From d2a42aa235aef95922ff21be36b773d730a8b516 Mon Sep 17 00:00:00 2001 From: Adam Fowler Date: Fri, 7 Jan 2022 11:51:24 +0000 Subject: [PATCH] Update BoringSSL --- Sources/CBigNumBoringSSL/crypto/bio/bio.c | 2 + Sources/CBigNumBoringSSL/crypto/bio/file.c | 14 +- .../CBigNumBoringSSL/crypto/bytestring/ber.c | 21 +- .../CBigNumBoringSSL/crypto/bytestring/cbb.c | 9 + .../CBigNumBoringSSL/crypto/bytestring/cbs.c | 113 +- .../crypto/cpu_aarch64_apple.c | 73 + ...arch64-fuchsia.c => cpu_aarch64_fuchsia.c} | 5 +- ...pu-aarch64-linux.c => cpu_aarch64_linux.c} | 6 +- .../CBigNumBoringSSL/crypto/cpu_aarch64_win.c | 43 + .../crypto/{cpu-arm.c => cpu_arm.c} | 6 +- .../{cpu-arm-linux.c => cpu_arm_linux.c} | 27 +- .../{cpu-arm-linux.h => cpu_arm_linux.h} | 0 .../crypto/{cpu-intel.c => cpu_intel.c} | 0 .../crypto/{cpu-ppc64le.c => cpu_ppc64le.c} | 0 Sources/CBigNumBoringSSL/crypto/crypto.c | 14 + Sources/CBigNumBoringSSL/crypto/err/err.c | 183 +- .../CBigNumBoringSSL/crypto/err/err_data.c | 1496 +++++----- .../crypto/fipsmodule/aes/mode_wrappers.c | 18 +- .../aesni-gcm-x86_64.linux.x86_64.S | 4 +- .../fipsmodule/aesni-gcm-x86_64.mac.x86_64.S | 4 +- .../crypto/fipsmodule/aesni-x86.linux.x86.S | 4 +- .../fipsmodule/aesni-x86_64.linux.x86_64.S | 4 +- .../fipsmodule/aesni-x86_64.mac.x86_64.S | 4 +- .../crypto/fipsmodule/aesv8-armx32.ios.arm.S | 47 +- .../fipsmodule/aesv8-armx32.linux.arm.S | 47 +- .../fipsmodule/aesv8-armx64.ios.aarch64.S | 55 +- .../fipsmodule/aesv8-armx64.linux.aarch64.S | 55 +- .../fipsmodule/armv8-mont.ios.aarch64.S | 13 + .../fipsmodule/armv8-mont.linux.aarch64.S | 13 + .../crypto/fipsmodule/bn-586.linux.x86.S | 4 +- .../crypto/fipsmodule/bn/bn.c | 33 +- .../crypto/fipsmodule/bn/div.c | 40 +- .../crypto/fipsmodule/bn/gcd_extra.c | 5 +- .../crypto/fipsmodule/bn/internal.h | 31 +- .../crypto/fipsmodule/bn/prime.c | 16 +- .../crypto/fipsmodule/bn/sqrt.c | 14 +- .../crypto/fipsmodule/cipher/cipher.c | 60 +- .../crypto/fipsmodule/cipher/e_aes.c | 245 +- .../crypto/fipsmodule/co-586.linux.x86.S | 4 +- .../fipsmodule/ghash-neon-armv8.ios.aarch64.S | 5 + .../ghash-neon-armv8.linux.aarch64.S | 5 + .../fipsmodule/ghash-ssse3-x86.linux.x86.S | 4 +- .../ghash-ssse3-x86_64.linux.x86_64.S | 4 +- .../ghash-ssse3-x86_64.mac.x86_64.S | 4 +- .../crypto/fipsmodule/ghash-x86.linux.x86.S | 4 +- .../fipsmodule/ghash-x86_64.linux.x86_64.S | 4 +- .../fipsmodule/ghash-x86_64.mac.x86_64.S | 4 +- .../fipsmodule/ghashv8-armx32.ios.arm.S | 8 +- .../fipsmodule/ghashv8-armx32.linux.arm.S | 8 +- .../fipsmodule/ghashv8-armx64.ios.aarch64.S | 329 ++- .../fipsmodule/ghashv8-armx64.linux.aarch64.S | 329 ++- .../crypto/fipsmodule/md5-586.linux.x86.S | 4 +- .../fipsmodule/md5-x86_64.linux.x86_64.S | 4 +- .../crypto/fipsmodule/md5-x86_64.mac.x86_64.S | 4 +- .../crypto/fipsmodule/modes/cbc.c | 55 +- .../crypto/fipsmodule/modes/cfb.c | 17 +- .../crypto/fipsmodule/modes/ctr.c | 17 +- .../crypto/fipsmodule/modes/gcm.c | 30 +- .../crypto/fipsmodule/modes/gcm_nohw.c | 2 +- .../crypto/fipsmodule/modes/internal.h | 23 +- .../crypto/fipsmodule/modes/ofb.c | 3 +- .../fipsmodule/p256-x86_64-asm.linux.x86_64.S | 4 +- .../fipsmodule/p256-x86_64-asm.mac.x86_64.S | 4 +- .../p256_beeu-x86_64-asm.linux.x86_64.S | 4 +- .../p256_beeu-x86_64-asm.mac.x86_64.S | 4 +- .../crypto/fipsmodule/rand/internal.h | 35 +- .../crypto/fipsmodule/rand/rand.c | 167 +- .../crypto/fipsmodule/rand/urandom.c | 125 +- .../fipsmodule/rdrand-x86_64.linux.x86_64.S | 4 +- .../fipsmodule/rdrand-x86_64.mac.x86_64.S | 4 +- .../fipsmodule/rsaz-avx2.linux.x86_64.S | 4 +- .../crypto/fipsmodule/rsaz-avx2.mac.x86_64.S | 4 +- .../crypto/fipsmodule/sha1-586.linux.x86.S | 4 +- .../fipsmodule/sha1-armv8.ios.aarch64.S | 7 +- .../fipsmodule/sha1-armv8.linux.aarch64.S | 7 +- .../fipsmodule/sha1-x86_64.linux.x86_64.S | 1871 +++++++++++- .../fipsmodule/sha1-x86_64.mac.x86_64.S | 1871 +++++++++++- .../crypto/fipsmodule/sha256-586.linux.x86.S | 4 +- .../fipsmodule/sha256-armv8.ios.aarch64.S | 46 +- .../fipsmodule/sha256-armv8.linux.aarch64.S | 46 +- .../fipsmodule/sha256-x86_64.linux.x86_64.S | 4 +- .../fipsmodule/sha256-x86_64.mac.x86_64.S | 4 +- .../crypto/fipsmodule/sha512-586.linux.x86.S | 4 +- .../fipsmodule/sha512-armv8.ios.aarch64.S | 572 +++- .../fipsmodule/sha512-armv8.linux.aarch64.S | 572 +++- .../fipsmodule/sha512-x86_64.linux.x86_64.S | 4 +- .../fipsmodule/sha512-x86_64.mac.x86_64.S | 4 +- .../fipsmodule/vpaes-armv8.ios.aarch64.S | 21 +- .../fipsmodule/vpaes-armv8.linux.aarch64.S | 21 +- .../crypto/fipsmodule/vpaes-x86.linux.x86.S | 4 +- .../fipsmodule/vpaes-x86_64.linux.x86_64.S | 4 +- .../fipsmodule/vpaes-x86_64.mac.x86_64.S | 4 +- .../crypto/fipsmodule/x86-mont.linux.x86.S | 4 +- .../fipsmodule/x86_64-mont.linux.x86_64.S | 4 +- .../fipsmodule/x86_64-mont.mac.x86_64.S | 4 +- .../fipsmodule/x86_64-mont5.linux.x86_64.S | 4 +- .../fipsmodule/x86_64-mont5.mac.x86_64.S | 4 +- Sources/CBigNumBoringSSL/crypto/internal.h | 125 + Sources/CBigNumBoringSSL/crypto/mem.c | 99 +- .../crypto/rand_extra/deterministic.c | 4 + .../crypto/rand_extra/fuchsia.c | 4 + .../crypto/rand_extra/passive.c | 34 + .../crypto/rand_extra/rand_extra.c | 6 +- .../crypto/rand_extra/windows.c | 4 + Sources/CBigNumBoringSSL/crypto/stack/stack.c | 20 +- .../CBigNumBoringSSL/crypto/thread_pthread.c | 28 - Sources/CBigNumBoringSSL/hash.txt | 2 +- .../include/CBigNumBoringSSL.h | 8 +- .../include/CBigNumBoringSSL_aead.h | 29 +- .../include/CBigNumBoringSSL_aes.h | 20 +- .../include/CBigNumBoringSSL_arm_arch.h | 123 + .../include/CBigNumBoringSSL_asn1.h | 2506 ++++++++++++----- .../include/CBigNumBoringSSL_base.h | 82 +- .../include/CBigNumBoringSSL_bio.h | 27 +- .../include/CBigNumBoringSSL_bn.h | 14 +- ...BigNumBoringSSL_boringssl_prefix_symbols.h | 25 +- ...umBoringSSL_boringssl_prefix_symbols_asm.h | 25 +- .../include/CBigNumBoringSSL_bytestring.h | 39 +- .../include/CBigNumBoringSSL_chacha.h | 2 +- .../include/CBigNumBoringSSL_cipher.h | 49 +- .../include/CBigNumBoringSSL_cpu.h | 59 +- .../include/CBigNumBoringSSL_crypto.h | 36 +- .../include/CBigNumBoringSSL_err.h | 26 +- .../include/CBigNumBoringSSL_mem.h | 15 +- .../include/CBigNumBoringSSL_opensslconf.h | 3 + .../include/CBigNumBoringSSL_rand.h | 27 +- .../include/CBigNumBoringSSL_span.h | 55 +- .../include/boringssl_prefix_symbols_nasm.inc | 50 +- 128 files changed, 10290 insertions(+), 2308 deletions(-) create mode 100644 Sources/CBigNumBoringSSL/crypto/cpu_aarch64_apple.c rename Sources/CBigNumBoringSSL/crypto/{cpu-aarch64-fuchsia.c => cpu_aarch64_fuchsia.c} (87%) rename Sources/CBigNumBoringSSL/crypto/{cpu-aarch64-linux.c => cpu_aarch64_linux.c} (90%) create mode 100644 Sources/CBigNumBoringSSL/crypto/cpu_aarch64_win.c rename Sources/CBigNumBoringSSL/crypto/{cpu-arm.c => cpu_arm.c} (89%) rename Sources/CBigNumBoringSSL/crypto/{cpu-arm-linux.c => cpu_arm_linux.c} (83%) rename Sources/CBigNumBoringSSL/crypto/{cpu-arm-linux.h => cpu_arm_linux.h} (100%) rename Sources/CBigNumBoringSSL/crypto/{cpu-intel.c => cpu_intel.c} (100%) rename Sources/CBigNumBoringSSL/crypto/{cpu-ppc64le.c => cpu_ppc64le.c} (100%) create mode 100644 Sources/CBigNumBoringSSL/crypto/rand_extra/passive.c diff --git a/Sources/CBigNumBoringSSL/crypto/bio/bio.c b/Sources/CBigNumBoringSSL/crypto/bio/bio.c index d45a6c6..98fa07a 100644 --- a/Sources/CBigNumBoringSSL/crypto/bio/bio.c +++ b/Sources/CBigNumBoringSSL/crypto/bio/bio.c @@ -262,6 +262,8 @@ int BIO_should_io_special(const BIO *bio) { int BIO_get_retry_reason(const BIO *bio) { return bio->retry_reason; } +void BIO_set_retry_reason(BIO *bio, int reason) { bio->retry_reason = reason; } + void BIO_clear_flags(BIO *bio, int flags) { bio->flags &= ~flags; } diff --git a/Sources/CBigNumBoringSSL/crypto/bio/file.c b/Sources/CBigNumBoringSSL/crypto/bio/file.c index d3a0904..73f30ae 100644 --- a/Sources/CBigNumBoringSSL/crypto/bio/file.c +++ b/Sources/CBigNumBoringSSL/crypto/bio/file.c @@ -126,13 +126,7 @@ BIO *BIO_new_fp(FILE *stream, int close_flag) { return ret; } -static int file_new(BIO *bio) { return 1; } - static int file_free(BIO *bio) { - if (bio == NULL) { - return 0; - } - if (!bio->shutdown) { return 1; } @@ -279,7 +273,7 @@ static const BIO_METHOD methods_filep = { BIO_TYPE_FILE, "FILE pointer", file_write, file_read, NULL /* puts */, file_gets, - file_ctrl, file_new, + file_ctrl, NULL /* create */, file_free, NULL /* callback_ctrl */, }; @@ -314,4 +308,10 @@ int BIO_rw_filename(BIO *bio, const char *filename) { BIO_CLOSE | BIO_FP_READ | BIO_FP_WRITE, (char *)filename); } +long BIO_tell(BIO *bio) { return BIO_ctrl(bio, BIO_C_FILE_TELL, 0, NULL); } + +long BIO_seek(BIO *bio, long offset) { + return BIO_ctrl(bio, BIO_C_FILE_SEEK, offset, NULL); +} + #endif // OPENSSL_TRUSTY diff --git a/Sources/CBigNumBoringSSL/crypto/bytestring/ber.c b/Sources/CBigNumBoringSSL/crypto/bytestring/ber.c index f846e59..3342030 100644 --- a/Sources/CBigNumBoringSSL/crypto/bytestring/ber.c +++ b/Sources/CBigNumBoringSSL/crypto/bytestring/ber.c @@ -29,8 +29,10 @@ static const unsigned kMaxDepth = 2048; // is_string_type returns one if |tag| is a string type and zero otherwise. It // ignores the constructed bit. static int is_string_type(unsigned tag) { + // While BER supports constructed BIT STRINGS, OpenSSL misparses them. To + // avoid acting on an ambiguous input, we do not support constructed BIT + // STRINGS. See https://github.com/openssl/openssl/issues/12810. switch (tag & ~CBS_ASN1_CONSTRUCTED) { - case CBS_ASN1_BITSTRING: case CBS_ASN1_OCTETSTRING: case CBS_ASN1_UTF8STRING: case CBS_ASN1_NUMERICSTRING: @@ -53,7 +55,7 @@ static int is_string_type(unsigned tag) { // depending on whether an indefinite length element or constructed string was // found. The value of |orig_in| is not changed. It returns one on success (i.e. // |*ber_found| was set) and zero on error. -static int cbs_find_ber(const CBS *orig_in, char *ber_found, unsigned depth) { +static int cbs_find_ber(const CBS *orig_in, int *ber_found, unsigned depth) { CBS in; if (depth > kMaxDepth) { @@ -68,14 +70,11 @@ static int cbs_find_ber(const CBS *orig_in, char *ber_found, unsigned depth) { unsigned tag; size_t header_len; - if (!CBS_get_any_ber_asn1_element(&in, &contents, &tag, &header_len)) { + if (!CBS_get_any_ber_asn1_element(&in, &contents, &tag, &header_len, + ber_found)) { return 0; } - if (CBS_len(&contents) == header_len && - header_len > 0 && - CBS_data(&contents)[header_len-1] == 0x80) { - // Found an indefinite-length element. - *ber_found = 1; + if (*ber_found) { return 1; } if (tag & CBS_ASN1_CONSTRUCTED) { @@ -120,9 +119,11 @@ static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag, CBS contents; unsigned tag, child_string_tag = string_tag; size_t header_len; + int ber_found; CBB *out_contents, out_contents_storage; - if (!CBS_get_any_ber_asn1_element(in, &contents, &tag, &header_len)) { + if (!CBS_get_any_ber_asn1_element(in, &contents, &tag, &header_len, + &ber_found)) { return 0; } @@ -194,7 +195,7 @@ int CBS_asn1_ber_to_der(CBS *in, CBS *out, uint8_t **out_storage) { // First, do a quick walk to find any indefinite-length elements. Most of the // time we hope that there aren't any and thus we can quickly return. - char conversion_needed; + int conversion_needed; if (!cbs_find_ber(in, &conversion_needed, 0)) { return 0; } diff --git a/Sources/CBigNumBoringSSL/crypto/bytestring/cbb.c b/Sources/CBigNumBoringSSL/crypto/bytestring/cbb.c index 3b3227d..3a029e2 100644 --- a/Sources/CBigNumBoringSSL/crypto/bytestring/cbb.c +++ b/Sources/CBigNumBoringSSL/crypto/bytestring/cbb.c @@ -404,6 +404,15 @@ int CBB_add_bytes(CBB *cbb, const uint8_t *data, size_t len) { return 1; } +int CBB_add_zeros(CBB *cbb, size_t len) { + uint8_t *out; + if (!CBB_add_space(cbb, &out, len)) { + return 0; + } + OPENSSL_memset(out, 0, len); + return 1; +} + int CBB_add_space(CBB *cbb, uint8_t **out_data, size_t len) { if (!CBB_flush(cbb) || !cbb_buffer_add(cbb->base, out_data, len)) { diff --git a/Sources/CBigNumBoringSSL/crypto/bytestring/cbs.c b/Sources/CBigNumBoringSSL/crypto/bytestring/cbs.c index c4c533d..10fc8da 100644 --- a/Sources/CBigNumBoringSSL/crypto/bytestring/cbs.c +++ b/Sources/CBigNumBoringSSL/crypto/bytestring/cbs.c @@ -216,6 +216,14 @@ int CBS_get_u24_length_prefixed(CBS *cbs, CBS *out) { return cbs_get_length_prefixed(cbs, out, 3); } +int CBS_get_until_first(CBS *cbs, CBS *out, uint8_t c) { + const uint8_t *split = OPENSSL_memchr(CBS_data(cbs), c, CBS_len(cbs)); + if (split == NULL) { + return 0; + } + return CBS_get_bytes(cbs, out, split - CBS_data(cbs)); +} + // parse_base128_integer reads a big-endian base-128 integer from |cbs| and sets // |*out| to the result. This is the encoding used in DER for both high tag // number form and OID components. @@ -254,8 +262,7 @@ static int parse_asn1_tag(CBS *cbs, unsigned *out) { // // If the number portion is 31 (0x1f, the largest value that fits in the // allotted bits), then the tag is more than one byte long and the - // continuation bytes contain the tag number. This parser only supports tag - // numbers less than 31 (and thus single-byte tags). + // continuation bytes contain the tag number. unsigned tag = ((unsigned)tag_byte & 0xe0) << CBS_ASN1_TAG_SHIFT; unsigned tag_number = tag_byte & 0x1f; if (tag_number == 0x1f) { @@ -263,7 +270,7 @@ static int parse_asn1_tag(CBS *cbs, unsigned *out) { if (!parse_base128_integer(cbs, &v) || // Check the tag number is within our supported bounds. v > CBS_ASN1_TAG_NUMBER_MASK || - // Small tag numbers should have used low tag number form. + // Small tag numbers should have used low tag number form, even in BER. v < 0x1f) { return 0; } @@ -277,13 +284,17 @@ static int parse_asn1_tag(CBS *cbs, unsigned *out) { } static int cbs_get_any_asn1_element(CBS *cbs, CBS *out, unsigned *out_tag, - size_t *out_header_len, int ber_ok) { + size_t *out_header_len, int *out_ber_found, + int ber_ok) { CBS header = *cbs; CBS throwaway; if (out == NULL) { out = &throwaway; } + if (ber_ok) { + *out_ber_found = 0; + } unsigned tag; if (!parse_asn1_tag(&header, &tag)) { @@ -321,27 +332,38 @@ static int cbs_get_any_asn1_element(CBS *cbs, CBS *out, unsigned *out_tag, if (out_header_len != NULL) { *out_header_len = header_len; } + *out_ber_found = 1; return CBS_get_bytes(cbs, out, header_len); } // ITU-T X.690 clause 8.1.3.5.c specifies that the value 0xff shall not be // used as the first byte of the length. If this parser encounters that - // value, num_bytes will be parsed as 127, which will fail the check below. + // value, num_bytes will be parsed as 127, which will fail this check. if (num_bytes == 0 || num_bytes > 4) { return 0; } if (!cbs_get_u(&header, &len64, num_bytes)) { return 0; } - // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length - // with the minimum number of octets. + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the + // length with the minimum number of octets. BER could, technically, have + // 125 superfluous zero bytes. We do not attempt to handle that and still + // require that the length fit in a |uint32_t| for BER. if (len64 < 128) { // Length should have used short-form encoding. - return 0; + if (ber_ok) { + *out_ber_found = 1; + } else { + return 0; + } } - if ((len64 >> ((num_bytes-1)*8)) == 0) { + if ((len64 >> ((num_bytes - 1) * 8)) == 0) { // Length should have been at least one byte shorter. - return 0; + if (ber_ok) { + *out_ber_found = 1; + } else { + return 0; + } } len = len64; if (len + header_len + num_bytes < len) { @@ -374,13 +396,15 @@ int CBS_get_any_asn1(CBS *cbs, CBS *out, unsigned *out_tag) { int CBS_get_any_asn1_element(CBS *cbs, CBS *out, unsigned *out_tag, size_t *out_header_len) { return cbs_get_any_asn1_element(cbs, out, out_tag, out_header_len, - 0 /* DER only */); + NULL, 0 /* DER only */); } int CBS_get_any_ber_asn1_element(CBS *cbs, CBS *out, unsigned *out_tag, - size_t *out_header_len) { - return cbs_get_any_asn1_element(cbs, out, out_tag, out_header_len, - 1 /* BER allowed */); + size_t *out_header_len, int *out_ber_found) { + int ber_found_temp; + return cbs_get_any_asn1_element( + cbs, out, out_tag, out_header_len, + out_ber_found ? out_ber_found : &ber_found_temp, 1 /* BER allowed */); } static int cbs_get_asn1(CBS *cbs, CBS *out, unsigned tag_value, @@ -426,29 +450,14 @@ int CBS_peek_asn1_tag(const CBS *cbs, unsigned tag_value) { int CBS_get_asn1_uint64(CBS *cbs, uint64_t *out) { CBS bytes; - if (!CBS_get_asn1(cbs, &bytes, CBS_ASN1_INTEGER)) { + if (!CBS_get_asn1(cbs, &bytes, CBS_ASN1_INTEGER) || + !CBS_is_unsigned_asn1_integer(&bytes)) { return 0; } *out = 0; const uint8_t *data = CBS_data(&bytes); size_t len = CBS_len(&bytes); - - if (len == 0) { - // An INTEGER is encoded with at least one octet. - return 0; - } - - if ((data[0] & 0x80) != 0) { - // Negative number. - return 0; - } - - if (data[0] == 0 && len > 1 && (data[1] & 0x80) == 0) { - // Extra leading zeros. - return 0; - } - for (size_t i = 0; i < len; i++) { if ((*out >> 56) != 0) { // Too large to represent as a uint64_t. @@ -462,31 +471,21 @@ int CBS_get_asn1_uint64(CBS *cbs, uint64_t *out) { } int CBS_get_asn1_int64(CBS *cbs, int64_t *out) { + int is_negative; CBS bytes; - if (!CBS_get_asn1(cbs, &bytes, CBS_ASN1_INTEGER)) { + if (!CBS_get_asn1(cbs, &bytes, CBS_ASN1_INTEGER) || + !CBS_is_valid_asn1_integer(&bytes, &is_negative)) { return 0; } const uint8_t *data = CBS_data(&bytes); const size_t len = CBS_len(&bytes); - - if (len == 0 || len > sizeof(int64_t)) { - // An INTEGER is encoded with at least one octet. + if (len > sizeof(int64_t)) { return 0; } - if (len > 1) { - if (data[0] == 0 && (data[1] & 0x80) == 0) { - return 0; // Extra leading zeros. - } - if (data[0] == 0xff && (data[1] & 0x80) != 0) { - return 0; // Extra leading 0xff. - } - } - union { int64_t i; uint8_t bytes[sizeof(int64_t)]; } u; - const int is_negative = (data[0] & 0x80); memset(u.bytes, is_negative ? 0xff : 0, sizeof(u.bytes)); // Sign-extend. for (size_t i = 0; i < len; i++) { u.bytes[i] = data[len - i - 1]; @@ -635,6 +634,30 @@ int CBS_asn1_bitstring_has_bit(const CBS *cbs, unsigned bit) { (CBS_data(cbs)[byte_num] & (1 << bit_num)) != 0; } +int CBS_is_valid_asn1_integer(const CBS *cbs, int *out_is_negative) { + CBS copy = *cbs; + uint8_t first_byte, second_byte; + if (!CBS_get_u8(©, &first_byte)) { + return 0; // INTEGERs may not be empty. + } + if (out_is_negative != NULL) { + *out_is_negative = (first_byte & 0x80) != 0; + } + if (!CBS_get_u8(©, &second_byte)) { + return 1; // One byte INTEGERs are always minimal. + } + if ((first_byte == 0x00 && (second_byte & 0x80) == 0) || + (first_byte == 0xff && (second_byte & 0x80) != 0)) { + return 0; // The value is minimal iff the first 9 bits are not all equal. + } + return 1; +} + +int CBS_is_unsigned_asn1_integer(const CBS *cbs) { + int is_negative; + return CBS_is_valid_asn1_integer(cbs, &is_negative) && !is_negative; +} + static int add_decimal(CBB *out, uint64_t v) { char buf[DECIMAL_SIZE(uint64_t) + 1]; BIO_snprintf(buf, sizeof(buf), "%" PRIu64, v); diff --git a/Sources/CBigNumBoringSSL/crypto/cpu_aarch64_apple.c b/Sources/CBigNumBoringSSL/crypto/cpu_aarch64_apple.c new file mode 100644 index 0000000..b1dc142 --- /dev/null +++ b/Sources/CBigNumBoringSSL/crypto/cpu_aarch64_apple.c @@ -0,0 +1,73 @@ +/* Copyright (c) 2021, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#if defined(OPENSSL_AARCH64) && defined(OPENSSL_APPLE) && \ + !defined(OPENSSL_STATIC_ARMCAP) + +#include +#include + +#include + +#include "internal.h" + + +extern uint32_t OPENSSL_armcap_P; + +static int has_hw_feature(const char *name) { + int value; + size_t len = sizeof(value); + if (sysctlbyname(name, &value, &len, NULL, 0) != 0) { + return 0; + } + if (len != sizeof(int)) { + // This should not happen. All the values queried should be integer-valued. + assert(0); + return 0; + } + + // Per sys/sysctl.h: + // + // Selectors that return errors are not support on the system. Supported + // features will return 1 if they are recommended or 0 if they are supported + // but are not expected to help performance. Future versions of these + // selectors may return larger values as necessary so it is best to test for + // non zero. + return value != 0; +} + +void OPENSSL_cpuid_setup(void) { + // Apple ARM64 platforms have NEON and cryptography extensions available + // statically, so we do not need to query them. In particular, there sometimes + // are no sysctls corresponding to such features. See below. +#if !defined(__ARM_NEON) || !defined(__ARM_FEATURE_CRYPTO) +#error "NEON and crypto extensions should be statically available." +#endif + OPENSSL_armcap_P = + ARMV7_NEON | ARMV8_AES | ARMV8_PMULL | ARMV8_SHA1 | ARMV8_SHA256; + + // macOS has sysctls named both like "hw.optional.arm.FEAT_SHA512" and like + // "hw.optional.armv8_2_sha512". There does not appear to be documentation on + // which to use. The "armv8_2_sha512" style omits statically-available + // features, while the "FEAT_SHA512" style includes them. However, the + // "FEAT_SHA512" style was added in macOS 12, so we use the older style for + // better compatibility and handle static features above. + if (has_hw_feature("hw.optional.armv8_2_sha512")) { + OPENSSL_armcap_P |= ARMV8_SHA512; + } +} + +#endif // OPENSSL_AARCH64 && OPENSSL_APPLE && !OPENSSL_STATIC_ARMCAP diff --git a/Sources/CBigNumBoringSSL/crypto/cpu-aarch64-fuchsia.c b/Sources/CBigNumBoringSSL/crypto/cpu_aarch64_fuchsia.c similarity index 87% rename from Sources/CBigNumBoringSSL/crypto/cpu-aarch64-fuchsia.c rename to Sources/CBigNumBoringSSL/crypto/cpu_aarch64_fuchsia.c index 203a4de..dd874b3 100644 --- a/Sources/CBigNumBoringSSL/crypto/cpu-aarch64-fuchsia.c +++ b/Sources/CBigNumBoringSSL/crypto/cpu_aarch64_fuchsia.c @@ -50,6 +50,9 @@ void OPENSSL_cpuid_setup(void) { if (hwcap & ZX_ARM64_FEATURE_ISA_SHA2) { OPENSSL_armcap_P |= ARMV8_SHA256; } + // As of writing, Fuchsia does not have a flag for ARMv8.2 SHA-512 + // extensions. When it does, add it here. See + // https://bugs.fuchsia.dev/p/fuchsia/issues/detail?id=90759. } -#endif // OPENSSL_AARCH64 && !OPENSSL_STATIC_ARMCAP +#endif // OPENSSL_AARCH64 && OPENSSL_FUCHSIA && !OPENSSL_STATIC_ARMCAP diff --git a/Sources/CBigNumBoringSSL/crypto/cpu-aarch64-linux.c b/Sources/CBigNumBoringSSL/crypto/cpu_aarch64_linux.c similarity index 90% rename from Sources/CBigNumBoringSSL/crypto/cpu-aarch64-linux.c rename to Sources/CBigNumBoringSSL/crypto/cpu_aarch64_linux.c index 1284632..829d303 100644 --- a/Sources/CBigNumBoringSSL/crypto/cpu-aarch64-linux.c +++ b/Sources/CBigNumBoringSSL/crypto/cpu_aarch64_linux.c @@ -36,6 +36,7 @@ void OPENSSL_cpuid_setup(void) { static const unsigned long kPMULL = 1 << 4; static const unsigned long kSHA1 = 1 << 5; static const unsigned long kSHA256 = 1 << 6; + static const unsigned long kSHA512 = 1 << 21; if ((hwcap & kNEON) == 0) { // Matching OpenSSL, if NEON is missing, don't report other features @@ -57,6 +58,9 @@ void OPENSSL_cpuid_setup(void) { if (hwcap & kSHA256) { OPENSSL_armcap_P |= ARMV8_SHA256; } + if (hwcap & kSHA512) { + OPENSSL_armcap_P |= ARMV8_SHA512; + } } -#endif // OPENSSL_AARCH64 && !OPENSSL_STATIC_ARMCAP +#endif // OPENSSL_AARCH64 && OPENSSL_LINUX && !OPENSSL_STATIC_ARMCAP diff --git a/Sources/CBigNumBoringSSL/crypto/cpu_aarch64_win.c b/Sources/CBigNumBoringSSL/crypto/cpu_aarch64_win.c new file mode 100644 index 0000000..9bdc2e5 --- /dev/null +++ b/Sources/CBigNumBoringSSL/crypto/cpu_aarch64_win.c @@ -0,0 +1,43 @@ +/* Copyright (c) 2018, Google Inc. + * Copyright (c) 2020, Arm Ltd. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#if defined(OPENSSL_AARCH64) && defined(OPENSSL_WINDOWS) && \ + !defined(OPENSSL_STATIC_ARMCAP) + +#include + +#include + +#include "internal.h" + +extern uint32_t OPENSSL_armcap_P; +void OPENSSL_cpuid_setup(void) { + // We do not need to check for the presence of NEON, as Armv8-A always has it + OPENSSL_armcap_P |= ARMV7_NEON; + + if (IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE)) { + // These are all covered by one call in Windows + OPENSSL_armcap_P |= ARMV8_AES; + OPENSSL_armcap_P |= ARMV8_PMULL; + OPENSSL_armcap_P |= ARMV8_SHA1; + OPENSSL_armcap_P |= ARMV8_SHA256; + } + // As of writing, Windows does not have a |PF_*| value for ARMv8.2 SHA-512 + // extensions. When it does, add it here. +} + +#endif // OPENSSL_AARCH64 && OPENSSL_WINDOWS && !OPENSSL_STATIC_ARMCAP diff --git a/Sources/CBigNumBoringSSL/crypto/cpu-arm.c b/Sources/CBigNumBoringSSL/crypto/cpu_arm.c similarity index 89% rename from Sources/CBigNumBoringSSL/crypto/cpu-arm.c rename to Sources/CBigNumBoringSSL/crypto/cpu_arm.c index 2f189cd..f4b8b09 100644 --- a/Sources/CBigNumBoringSSL/crypto/cpu-arm.c +++ b/Sources/CBigNumBoringSSL/crypto/cpu_arm.c @@ -22,15 +22,15 @@ extern uint32_t OPENSSL_armcap_P; -char CRYPTO_is_NEON_capable_at_runtime(void) { +int CRYPTO_is_NEON_capable_at_runtime(void) { return (OPENSSL_armcap_P & ARMV7_NEON) != 0; } -int CRYPTO_is_ARMv8_AES_capable(void) { +int CRYPTO_is_ARMv8_AES_capable_at_runtime(void) { return (OPENSSL_armcap_P & ARMV8_AES) != 0; } -int CRYPTO_is_ARMv8_PMULL_capable(void) { +int CRYPTO_is_ARMv8_PMULL_capable_at_runtime(void) { return (OPENSSL_armcap_P & ARMV8_PMULL) != 0; } diff --git a/Sources/CBigNumBoringSSL/crypto/cpu-arm-linux.c b/Sources/CBigNumBoringSSL/crypto/cpu_arm_linux.c similarity index 83% rename from Sources/CBigNumBoringSSL/crypto/cpu-arm-linux.c rename to Sources/CBigNumBoringSSL/crypto/cpu_arm_linux.c index 620673d..09acf11 100644 --- a/Sources/CBigNumBoringSSL/crypto/cpu-arm-linux.c +++ b/Sources/CBigNumBoringSSL/crypto/cpu_arm_linux.c @@ -23,7 +23,7 @@ #include #include -#include "cpu-arm-linux.h" +#include "cpu_arm_linux.h" #define AT_HWCAP 16 #define AT_HWCAP2 26 @@ -146,11 +146,13 @@ extern uint32_t OPENSSL_armcap_P; static int g_has_broken_neon, g_needs_hwcap2_workaround; void OPENSSL_cpuid_setup(void) { - char *cpuinfo_data; - size_t cpuinfo_len; - if (!read_file(&cpuinfo_data, &cpuinfo_len, "/proc/cpuinfo")) { - return; - } + // We ignore the return value of |read_file| and proceed with an empty + // /proc/cpuinfo on error. If |getauxval| works, we will still detect + // capabilities. There may be a false positive due to + // |crypto_cpuinfo_has_broken_neon|, but this is now rare. + char *cpuinfo_data = NULL; + size_t cpuinfo_len = 0; + read_file(&cpuinfo_data, &cpuinfo_len, "/proc/cpuinfo"); STRING_PIECE cpuinfo; cpuinfo.data = cpuinfo_data; cpuinfo.len = cpuinfo_len; @@ -173,7 +175,13 @@ void OPENSSL_cpuid_setup(void) { hwcap = crypto_get_arm_hwcap_from_cpuinfo(&cpuinfo); } - // Clear NEON support if known broken. + // Clear NEON support if known broken. Note, if NEON is available statically, + // the non-NEON code is dropped and this workaround is a no-op. + // + // TODO(davidben): The Android NDK now builds with NEON statically available + // by default. Cronet still has some consumers that support NEON-less devices + // (b/150371744). Get metrics on whether they still see this CPU and, if not, + // remove this check entirely. g_has_broken_neon = crypto_cpuinfo_has_broken_neon(&cpuinfo); if (g_has_broken_neon) { hwcap &= ~HWCAP_NEON; @@ -184,7 +192,10 @@ void OPENSSL_cpuid_setup(void) { OPENSSL_armcap_P |= ARMV7_NEON; // Some ARMv8 Android devices don't expose AT_HWCAP2. Fall back to - // /proc/cpuinfo. See https://crbug.com/596156. + // /proc/cpuinfo. See https://crbug.com/boringssl/46. As of February 2021, + // this is now rare (see Chrome's Net.NeedsHWCAP2Workaround metric), but AES + // and PMULL extensions are very useful, so we still carry the workaround + // for now. unsigned long hwcap2 = 0; if (getauxval != NULL) { hwcap2 = getauxval(AT_HWCAP2); diff --git a/Sources/CBigNumBoringSSL/crypto/cpu-arm-linux.h b/Sources/CBigNumBoringSSL/crypto/cpu_arm_linux.h similarity index 100% rename from Sources/CBigNumBoringSSL/crypto/cpu-arm-linux.h rename to Sources/CBigNumBoringSSL/crypto/cpu_arm_linux.h diff --git a/Sources/CBigNumBoringSSL/crypto/cpu-intel.c b/Sources/CBigNumBoringSSL/crypto/cpu_intel.c similarity index 100% rename from Sources/CBigNumBoringSSL/crypto/cpu-intel.c rename to Sources/CBigNumBoringSSL/crypto/cpu_intel.c diff --git a/Sources/CBigNumBoringSSL/crypto/cpu-ppc64le.c b/Sources/CBigNumBoringSSL/crypto/cpu_ppc64le.c similarity index 100% rename from Sources/CBigNumBoringSSL/crypto/cpu-ppc64le.c rename to Sources/CBigNumBoringSSL/crypto/cpu_ppc64le.c diff --git a/Sources/CBigNumBoringSSL/crypto/crypto.c b/Sources/CBigNumBoringSSL/crypto/crypto.c index 813b702..8e2533e 100644 --- a/Sources/CBigNumBoringSSL/crypto/crypto.c +++ b/Sources/CBigNumBoringSSL/crypto/crypto.c @@ -16,6 +16,8 @@ #include +#include "fipsmodule/rand/fork_detect.h" +#include "fipsmodule/rand/internal.h" #include "internal.h" @@ -102,6 +104,9 @@ HIDDEN uint32_t OPENSSL_armcap_P = #endif #if defined(OPENSSL_STATIC_ARMCAP_PMULL) || defined(__ARM_FEATURE_CRYPTO) ARMV8_PMULL | +#endif +#if defined(__ARM_FEATURE_SHA512) + ARMV8_SHA512 | #endif 0; @@ -174,6 +179,15 @@ int CRYPTO_has_asm(void) { #endif } +void CRYPTO_pre_sandbox_init(void) { + // Read from /proc/cpuinfo if needed. + CRYPTO_library_init(); + // Open /dev/urandom if needed. + CRYPTO_init_sysrand(); + // Set up MADV_WIPEONFORK state if needed. + CRYPTO_get_fork_generation(); +} + const char *SSLeay_version(int which) { return OpenSSL_version(which); } const char *OpenSSL_version(int which) { diff --git a/Sources/CBigNumBoringSSL/crypto/err/err.c b/Sources/CBigNumBoringSSL/crypto/err/err.c index 5aeab30..85479eb 100644 --- a/Sources/CBigNumBoringSSL/crypto/err/err.c +++ b/Sources/CBigNumBoringSSL/crypto/err/err.c @@ -368,84 +368,6 @@ void ERR_clear_system_error(void) { errno = 0; } -char *ERR_error_string(uint32_t packed_error, char *ret) { - static char buf[ERR_ERROR_STRING_BUF_LEN]; - - if (ret == NULL) { - // TODO(fork): remove this. - ret = buf; - } - -#if !defined(NDEBUG) - // This is aimed to help catch callers who don't provide - // |ERR_ERROR_STRING_BUF_LEN| bytes of space. - OPENSSL_memset(ret, 0, ERR_ERROR_STRING_BUF_LEN); -#endif - - return ERR_error_string_n(packed_error, ret, ERR_ERROR_STRING_BUF_LEN); -} - -char *ERR_error_string_n(uint32_t packed_error, char *buf, size_t len) { - char lib_buf[64], reason_buf[64]; - const char *lib_str, *reason_str; - unsigned lib, reason; - - if (len == 0) { - return NULL; - } - - lib = ERR_GET_LIB(packed_error); - reason = ERR_GET_REASON(packed_error); - - lib_str = ERR_lib_error_string(packed_error); - reason_str = ERR_reason_error_string(packed_error); - - if (lib_str == NULL) { - BIO_snprintf(lib_buf, sizeof(lib_buf), "lib(%u)", lib); - lib_str = lib_buf; - } - - if (reason_str == NULL) { - BIO_snprintf(reason_buf, sizeof(reason_buf), "reason(%u)", reason); - reason_str = reason_buf; - } - - BIO_snprintf(buf, len, "error:%08" PRIx32 ":%s:OPENSSL_internal:%s", - packed_error, lib_str, reason_str); - - if (strlen(buf) == len - 1) { - // output may be truncated; make sure we always have 5 colon-separated - // fields, i.e. 4 colons. - static const unsigned num_colons = 4; - unsigned i; - char *s = buf; - - if (len <= num_colons) { - // In this situation it's not possible to ensure that the correct number - // of colons are included in the output. - return buf; - } - - for (i = 0; i < num_colons; i++) { - char *colon = strchr(s, ':'); - char *last_pos = &buf[len - 1] - num_colons + i; - - if (colon == NULL || colon > last_pos) { - // set colon |i| at last possible position (buf[len-1] is the - // terminating 0). If we're setting this colon, then all whole of the - // rest of the string must be colons in order to have the correct - // number. - OPENSSL_memset(last_pos, ':', num_colons - i); - break; - } - - s = colon + 1; - } - } - - return buf; -} - // err_string_cmp is a compare function for searching error values with // |bsearch| in |err_string_lookup|. static int err_string_cmp(const void *a, const void *b) { @@ -530,7 +452,7 @@ static const char *const kLibraryNames[ERR_NUM_LIBS] = { "User defined functions", // ERR_LIB_USER }; -const char *ERR_lib_error_string(uint32_t packed_error) { +static const char *err_lib_error_string(uint32_t packed_error) { const uint32_t lib = ERR_GET_LIB(packed_error); if (lib >= ERR_NUM_LIBS) { @@ -539,11 +461,16 @@ const char *ERR_lib_error_string(uint32_t packed_error) { return kLibraryNames[lib]; } +const char *ERR_lib_error_string(uint32_t packed_error) { + const char *ret = err_lib_error_string(packed_error); + return ret == NULL ? "unknown library" : ret; +} + const char *ERR_func_error_string(uint32_t packed_error) { return "OPENSSL_internal"; } -const char *ERR_reason_error_string(uint32_t packed_error) { +static const char *err_reason_error_string(uint32_t packed_error) { const uint32_t lib = ERR_GET_LIB(packed_error); const uint32_t reason = ERR_GET_REASON(packed_error); @@ -579,6 +506,86 @@ const char *ERR_reason_error_string(uint32_t packed_error) { kOpenSSLReasonValuesLen, kOpenSSLReasonStringData); } +const char *ERR_reason_error_string(uint32_t packed_error) { + const char *ret = err_reason_error_string(packed_error); + return ret == NULL ? "unknown error" : ret; +} + +char *ERR_error_string(uint32_t packed_error, char *ret) { + static char buf[ERR_ERROR_STRING_BUF_LEN]; + + if (ret == NULL) { + // TODO(fork): remove this. + ret = buf; + } + +#if !defined(NDEBUG) + // This is aimed to help catch callers who don't provide + // |ERR_ERROR_STRING_BUF_LEN| bytes of space. + OPENSSL_memset(ret, 0, ERR_ERROR_STRING_BUF_LEN); +#endif + + return ERR_error_string_n(packed_error, ret, ERR_ERROR_STRING_BUF_LEN); +} + +char *ERR_error_string_n(uint32_t packed_error, char *buf, size_t len) { + if (len == 0) { + return NULL; + } + + unsigned lib = ERR_GET_LIB(packed_error); + unsigned reason = ERR_GET_REASON(packed_error); + + const char *lib_str = err_lib_error_string(packed_error); + const char *reason_str = err_reason_error_string(packed_error); + + char lib_buf[64], reason_buf[64]; + if (lib_str == NULL) { + BIO_snprintf(lib_buf, sizeof(lib_buf), "lib(%u)", lib); + lib_str = lib_buf; + } + + if (reason_str == NULL) { + BIO_snprintf(reason_buf, sizeof(reason_buf), "reason(%u)", reason); + reason_str = reason_buf; + } + + BIO_snprintf(buf, len, "error:%08" PRIx32 ":%s:OPENSSL_internal:%s", + packed_error, lib_str, reason_str); + + if (strlen(buf) == len - 1) { + // output may be truncated; make sure we always have 5 colon-separated + // fields, i.e. 4 colons. + static const unsigned num_colons = 4; + unsigned i; + char *s = buf; + + if (len <= num_colons) { + // In this situation it's not possible to ensure that the correct number + // of colons are included in the output. + return buf; + } + + for (i = 0; i < num_colons; i++) { + char *colon = strchr(s, ':'); + char *last_pos = &buf[len - 1] - num_colons + i; + + if (colon == NULL || colon > last_pos) { + // set colon |i| at last possible position (buf[len-1] is the + // terminating 0). If we're setting this colon, then all whole of the + // rest of the string must be colons in order to have the correct + // number. + OPENSSL_memset(last_pos, ':', num_colons - i); + break; + } + + s = colon + 1; + } + } + + return buf; +} + void ERR_print_errors_cb(ERR_print_errors_callback_t callback, void *ctx) { char buf[ERR_ERROR_STRING_BUF_LEN]; char buf2[1024]; @@ -738,6 +745,22 @@ void ERR_add_error_dataf(const char *format, ...) { err_set_error_data(buf); } +void ERR_set_error_data(char *data, int flags) { + if (!(flags & ERR_FLAG_STRING)) { + // We do not support non-string error data. + assert(0); + return; + } + if (flags & ERR_FLAG_MALLOCED) { + err_set_error_data(data); + } else { + char *copy = OPENSSL_strdup(data); + if (copy != NULL) { + err_set_error_data(copy); + } + } +} + int ERR_set_mark(void) { ERR_STATE *const state = err_get_state(); diff --git a/Sources/CBigNumBoringSSL/crypto/err/err_data.c b/Sources/CBigNumBoringSSL/crypto/err/err_data.c index d650b27..123627e 100644 --- a/Sources/CBigNumBoringSSL/crypto/err/err_data.c +++ b/Sources/CBigNumBoringSSL/crypto/err/err_data.c @@ -55,724 +55,751 @@ OPENSSL_STATIC_ASSERT(ERR_LIB_USER == 33, "library value changed"); OPENSSL_STATIC_ASSERT(ERR_NUM_LIBS == 34, "number of libraries changed"); const uint32_t kOpenSSLReasonValues[] = { - 0xc32083a, - 0xc328854, - 0xc330863, - 0xc338873, - 0xc340882, - 0xc34889b, - 0xc3508a7, - 0xc3588c4, - 0xc3608e4, - 0xc3688f2, - 0xc370902, - 0xc37890f, - 0xc38091f, - 0xc38892a, - 0xc390940, - 0xc39894f, - 0xc3a0963, - 0xc3a8847, - 0xc3b00ea, - 0xc3b88d6, - 0x10320847, - 0x1032959f, - 0x103315ab, - 0x103395c4, - 0x103415d7, - 0x10348f27, - 0x10350c60, - 0x103595ea, - 0x10361614, - 0x10369627, - 0x10371646, - 0x1037965f, - 0x10381674, - 0x10389692, - 0x103916a1, - 0x103996bd, - 0x103a16d8, - 0x103a96e7, - 0x103b1703, - 0x103b971e, - 0x103c1744, - 0x103c80ea, - 0x103d1755, - 0x103d9769, - 0x103e1788, - 0x103e9797, - 0x103f17ae, - 0x103f97c1, - 0x10400c24, - 0x104097d4, - 0x104117f2, - 0x10419805, - 0x1042181f, - 0x1042982f, - 0x10431843, - 0x10439859, - 0x10441871, - 0x10449886, - 0x1045189a, - 0x104598ac, - 0x104605fd, - 0x1046894f, - 0x104718c1, - 0x104798d8, - 0x104818ed, - 0x104898fb, - 0x10490e73, - 0x10499735, - 0x104a15ff, - 0x14320c07, - 0x14328c15, - 0x14330c24, - 0x14338c36, - 0x143400ac, - 0x143480ea, - 0x18320083, - 0x18328f7d, - 0x183300ac, - 0x18338f93, - 0x18340fa7, - 0x183480ea, - 0x18350fbc, - 0x18358fd4, - 0x18360fe9, - 0x18368ffd, - 0x18371021, - 0x18379037, - 0x1838104b, - 0x1838905b, - 0x18390a75, - 0x1839906b, - 0x183a1091, - 0x183a90b7, - 0x183b0c7f, - 0x183b9106, - 0x183c1118, - 0x183c9123, - 0x183d1133, - 0x183d9144, - 0x183e1155, - 0x183e9167, - 0x183f1190, - 0x183f91a9, - 0x184011c1, - 0x184086d5, - 0x184110da, - 0x184190a5, - 0x184210c4, - 0x18428c6c, - 0x18431080, - 0x184390ec, - 0x203211fb, - 0x203291e8, - 0x24321207, - 0x24328995, - 0x24331219, - 0x24339226, - 0x24341233, - 0x24349245, - 0x24351254, - 0x24359271, - 0x2436127e, - 0x2436928c, - 0x2437129a, - 0x243792a8, - 0x243812b1, - 0x243892be, - 0x243912d1, - 0x28320c54, - 0x28328c7f, - 0x28330c24, - 0x28338c92, - 0x28340c60, - 0x283480ac, - 0x283500ea, - 0x28358c6c, - 0x2c323012, - 0x2c3292e8, - 0x2c333020, - 0x2c33b032, - 0x2c343046, - 0x2c34b058, - 0x2c353073, - 0x2c35b085, - 0x2c363098, - 0x2c36832d, - 0x2c3730a5, - 0x2c37b0b7, - 0x2c3830dc, - 0x2c38b0f3, - 0x2c393101, - 0x2c39b111, - 0x2c3a3123, - 0x2c3ab137, - 0x2c3b3148, - 0x2c3bb167, - 0x2c3c12fa, - 0x2c3c9310, - 0x2c3d317b, - 0x2c3d9329, - 0x2c3e3198, - 0x2c3eb1a6, - 0x2c3f31be, - 0x2c3fb1d6, - 0x2c403200, - 0x2c4091fb, - 0x2c413211, - 0x2c41b224, - 0x2c4211c1, - 0x2c42b235, - 0x2c430722, - 0x2c43b159, - 0x2c4430ca, - 0x2c44b1e3, + 0xc320862, + 0xc32887c, + 0xc33088b, + 0xc33889b, + 0xc3408aa, + 0xc3488c3, + 0xc3508cf, + 0xc3588ec, + 0xc36090c, + 0xc36891a, + 0xc37092a, + 0xc378937, + 0xc380947, + 0xc388952, + 0xc390968, + 0xc398977, + 0xc3a098b, + 0xc3a886f, + 0xc3b00f7, + 0xc3b88fe, + 0x1032086f, + 0x103295e5, + 0x103315f1, + 0x1033960a, + 0x1034161d, + 0x10348f4f, + 0x10350c88, + 0x10359630, + 0x1036165a, + 0x1036966d, + 0x1037168c, + 0x103796a5, + 0x103816ba, + 0x103896d8, + 0x103916e7, + 0x10399703, + 0x103a171e, + 0x103a972d, + 0x103b1749, + 0x103b9764, + 0x103c178a, + 0x103c80f7, + 0x103d179b, + 0x103d97af, + 0x103e17ce, + 0x103e97dd, + 0x103f17f4, + 0x103f9807, + 0x10400c4c, + 0x1040981a, + 0x10411838, + 0x1041984b, + 0x10421865, + 0x10429875, + 0x10431889, + 0x1043989f, + 0x104418b7, + 0x104498cc, + 0x104518e0, + 0x104598f2, + 0x10460625, + 0x10468977, + 0x10471907, + 0x1047991e, + 0x10481933, + 0x10489941, + 0x10490e9b, + 0x1049977b, + 0x104a1645, + 0x14320c2f, + 0x14328c3d, + 0x14330c4c, + 0x14338c5e, + 0x143400b9, + 0x143480f7, + 0x18320090, + 0x18328fa5, + 0x183300b9, + 0x18338fbb, + 0x18340fcf, + 0x183480f7, + 0x18350fee, + 0x18359006, + 0x1836101b, + 0x1836902f, + 0x18371067, + 0x1837907d, + 0x18381091, + 0x183890a1, + 0x18390a9d, + 0x183990b1, + 0x183a10d7, + 0x183a90fd, + 0x183b0ca7, + 0x183b914c, + 0x183c115e, + 0x183c9169, + 0x183d1179, + 0x183d918a, + 0x183e119b, + 0x183e91ad, + 0x183f11d6, + 0x183f91ef, + 0x18401207, + 0x184086fd, + 0x18411120, + 0x184190eb, + 0x1842110a, + 0x18428c94, + 0x184310c6, + 0x18439132, + 0x18440fe4, + 0x18449053, + 0x20321241, + 0x2032922e, + 0x2432124d, + 0x243289bd, + 0x2433125f, + 0x2433926c, + 0x24341279, + 0x2434928b, + 0x2435129a, + 0x243592b7, + 0x243612c4, + 0x243692d2, + 0x243712e0, + 0x243792ee, + 0x243812f7, + 0x24389304, + 0x24391317, + 0x28320c7c, + 0x28328ca7, + 0x28330c4c, + 0x28338cba, + 0x28340c88, + 0x283480b9, + 0x283500f7, + 0x28358c94, + 0x2c323286, + 0x2c32932e, + 0x2c333294, + 0x2c33b2a6, + 0x2c3432ba, + 0x2c34b2cc, + 0x2c3532e7, + 0x2c35b2f9, + 0x2c363329, + 0x2c36833a, + 0x2c373336, + 0x2c37b362, + 0x2c383387, + 0x2c38b39e, + 0x2c3933bc, + 0x2c39b3cc, + 0x2c3a33de, + 0x2c3ab3f2, + 0x2c3b3403, + 0x2c3bb422, + 0x2c3c1340, + 0x2c3c9356, + 0x2c3d3436, + 0x2c3d936f, + 0x2c3e3453, + 0x2c3eb461, + 0x2c3f3479, + 0x2c3fb491, + 0x2c4034bb, + 0x2c409241, + 0x2c4134cc, + 0x2c41b4df, + 0x2c421207, + 0x2c42b4f0, + 0x2c43074a, + 0x2c43b414, + 0x2c443375, + 0x2c44b49e, + 0x2c45330c, + 0x2c45b348, + 0x2c4633ac, 0x30320000, 0x30328015, 0x3033001f, 0x30338038, - 0x3034004a, - 0x30348064, - 0x3035006b, - 0x30358083, - 0x30360094, - 0x303680ac, - 0x303700b9, - 0x303780c8, - 0x303800ea, - 0x303880f7, - 0x3039010a, - 0x30398125, - 0x303a013a, - 0x303a814e, - 0x303b0162, - 0x303b8173, - 0x303c018c, - 0x303c81a9, - 0x303d01b7, - 0x303d81cb, - 0x303e01db, - 0x303e81f4, - 0x303f0204, - 0x303f8217, - 0x30400226, - 0x30408232, - 0x30410247, - 0x30418257, - 0x3042026e, - 0x3042827b, - 0x3043028e, - 0x3043829d, - 0x304402b2, - 0x304482d3, - 0x304502e6, - 0x304582f9, - 0x30460312, - 0x3046832d, - 0x3047034a, - 0x3047835c, - 0x3048036a, - 0x3048837b, - 0x3049038a, - 0x304983a2, - 0x304a03b4, - 0x304a83c8, - 0x304b03e0, - 0x304b83f3, - 0x304c03fe, - 0x304c840f, - 0x304d041b, - 0x304d8431, - 0x304e043f, - 0x304e8455, - 0x304f0467, - 0x304f8479, - 0x3050049c, - 0x305084af, - 0x305104c0, - 0x305184d0, - 0x305204e8, - 0x305284fd, - 0x30530515, - 0x30538529, - 0x30540541, - 0x3054855a, - 0x30550573, - 0x30558590, - 0x3056059b, - 0x305685b3, - 0x305705c3, - 0x305785d4, - 0x305805e7, - 0x305885fd, - 0x30590606, - 0x3059861b, - 0x305a062e, - 0x305a863d, - 0x305b065d, - 0x305b866c, - 0x305c068d, - 0x305c86a9, - 0x305d06b5, - 0x305d86d5, - 0x305e06f1, - 0x305e8702, - 0x305f0718, - 0x305f8722, - 0x3060048c, - 0x34320b65, - 0x34328b79, - 0x34330b96, - 0x34338ba9, - 0x34340bb8, - 0x34348bf1, - 0x34350bd5, - 0x3c320083, - 0x3c328cbc, - 0x3c330cd5, - 0x3c338cf0, - 0x3c340d0d, - 0x3c348d37, - 0x3c350d52, - 0x3c358d78, - 0x3c360d91, - 0x3c368da9, - 0x3c370dba, - 0x3c378dc8, - 0x3c380dd5, - 0x3c388de9, - 0x3c390c7f, - 0x3c398e0c, - 0x3c3a0e20, - 0x3c3a890f, - 0x3c3b0e30, - 0x3c3b8e4b, - 0x3c3c0e5d, - 0x3c3c8e90, - 0x3c3d0e9a, - 0x3c3d8eae, - 0x3c3e0ebc, - 0x3c3e8ee1, - 0x3c3f0ca8, - 0x3c3f8eca, - 0x3c4000ac, - 0x3c4080ea, - 0x3c410d28, - 0x3c418d67, - 0x3c420e73, - 0x3c428dfd, - 0x40321971, - 0x40329987, - 0x403319b5, - 0x403399bf, - 0x403419d6, - 0x403499f4, - 0x40351a04, - 0x40359a16, - 0x40361a23, - 0x40369a2f, - 0x40371a44, - 0x40379a56, - 0x40381a61, - 0x40389a73, - 0x40390f27, - 0x40399a83, - 0x403a1a96, - 0x403a9ab7, - 0x403b1ac8, - 0x403b9ad8, - 0x403c0064, - 0x403c8083, - 0x403d1b39, - 0x403d9b4f, - 0x403e1b5e, - 0x403e9b96, - 0x403f1bb0, - 0x403f9bd8, - 0x40401bed, - 0x40409c01, - 0x40411c3c, - 0x40419c57, - 0x40421c70, - 0x40429c83, - 0x40431c97, - 0x40439caf, - 0x40441cc6, - 0x404480ac, - 0x40451cdb, - 0x40459ced, - 0x40461d11, - 0x40469d31, - 0x40471d3f, - 0x40479d66, - 0x40481dd7, - 0x40489e0a, - 0x40491e21, - 0x40499e3b, - 0x404a1e52, - 0x404a9e70, - 0x404b1e88, - 0x404b9eb5, - 0x404c1ecb, - 0x404c9edd, - 0x404d1efe, - 0x404d9f37, - 0x404e1f4b, - 0x404e9f58, - 0x404f1f9f, - 0x404f9fe5, - 0x4050203c, - 0x4050a050, - 0x40512083, - 0x40522093, - 0x4052a0b7, - 0x405320cf, - 0x4053a0e2, - 0x405420f7, - 0x4054a11a, - 0x40552128, - 0x4055a165, - 0x40562172, - 0x4056a18b, - 0x405721a3, - 0x4057a1b6, - 0x405821cb, - 0x4058a1f2, - 0x40592221, - 0x4059a24e, - 0x405a2262, - 0x405aa272, - 0x405b228a, - 0x405ba29b, - 0x405c22ae, - 0x405ca2ed, - 0x405d22fa, - 0x405da31f, - 0x405e235d, - 0x405e8ab3, - 0x405f237e, - 0x405fa38b, - 0x40602399, - 0x4060a3bb, - 0x4061241c, - 0x4061a454, - 0x4062246b, - 0x4062a47c, - 0x406324c9, - 0x4063a4de, - 0x406424f5, - 0x4064a521, - 0x4065253c, - 0x4065a553, - 0x4066256b, - 0x4066a595, - 0x406725c0, - 0x4067a605, - 0x4068264d, - 0x4068a66e, - 0x406926a0, - 0x4069a6ce, - 0x406a26ef, - 0x406aa70f, - 0x406b2897, - 0x406ba8ba, - 0x406c28d0, - 0x406cab73, - 0x406d2ba2, - 0x406dabca, - 0x406e2bf8, - 0x406eac45, - 0x406f2c80, - 0x406facb8, - 0x40702ccb, - 0x4070ace8, - 0x40710802, - 0x4071acfa, - 0x40722d0d, - 0x4072ad43, - 0x40732d5b, - 0x407394fa, - 0x40742d6f, - 0x4074ad89, - 0x40752d9a, - 0x4075adae, - 0x40762dbc, - 0x407692be, - 0x40772de1, - 0x4077ae03, - 0x40782e1e, - 0x4078ae57, - 0x40792e6e, - 0x4079ae84, - 0x407a2eb0, - 0x407aaec3, - 0x407b2ed8, - 0x407baeea, - 0x407c2f1b, - 0x407caf24, - 0x407d2689, - 0x407d9ff5, - 0x407e2e33, - 0x407ea202, - 0x407f1d53, - 0x407f9e9f, - 0x40801faf, - 0x40809d7b, - 0x408120a5, - 0x40819f89, - 0x40822be3, - 0x40829ae4, - 0x408321dd, - 0x4083a506, - 0x40841d8f, - 0x4084a23a, - 0x408522bf, - 0x4085a3e3, - 0x4086233f, - 0x4086a00f, - 0x40872c29, - 0x4087a431, - 0x40881b22, - 0x4088a618, - 0x40891b71, - 0x40899afe, - 0x408a2908, - 0x408a9912, - 0x408b2eff, - 0x408bac95, - 0x408c22cf, - 0x408c992e, - 0x408d1df0, - 0x408d9dc1, - 0x408e1f20, - 0x408ea145, - 0x408f262c, - 0x408fa3ff, - 0x409025e1, - 0x4090a311, - 0x409128f0, - 0x40919954, - 0x40921bbe, - 0x4092ac64, - 0x40932d26, - 0x4093a020, - 0x40941da3, - 0x4094a921, - 0x4095248d, - 0x4095ae90, - 0x40962c10, - 0x40969fc8, - 0x4097206b, - 0x40979f6f, - 0x40981c1e, - 0x4098a4a1, - 0x41f427c2, - 0x41f92854, - 0x41fe2747, - 0x41fea964, - 0x41ff2a55, - 0x420327db, - 0x420827fd, - 0x4208a839, - 0x4209272b, - 0x4209a873, - 0x420a2782, - 0x420aa762, - 0x420b27a2, - 0x420ba81b, - 0x420c2a71, - 0x420ca931, - 0x420d294b, - 0x420da982, - 0x4212299c, - 0x42172a38, - 0x4217a9de, - 0x421c2a00, - 0x421f29bb, - 0x42212a88, - 0x42262a1b, - 0x422b2b57, - 0x422bab05, - 0x422c2b3f, - 0x422caac4, - 0x422d2aa3, - 0x422dab24, - 0x422e2aea, - 0x4432072d, - 0x4432873c, - 0x44330748, - 0x44338756, - 0x44340769, - 0x4434877a, - 0x44350781, - 0x4435878b, - 0x4436079e, - 0x443687b4, - 0x443707c6, - 0x443787d3, - 0x443807e2, - 0x443887ea, - 0x44390802, - 0x44398810, - 0x443a0823, - 0x483212e8, - 0x483292fa, - 0x48331310, - 0x48339329, - 0x4c32134e, - 0x4c32935e, - 0x4c331371, - 0x4c339391, - 0x4c3400ac, - 0x4c3480ea, - 0x4c35139d, - 0x4c3593ab, - 0x4c3613c7, - 0x4c3693ed, - 0x4c3713fc, - 0x4c37940a, - 0x4c38141f, - 0x4c38942b, - 0x4c39144b, - 0x4c399475, - 0x4c3a148e, - 0x4c3a94a7, - 0x4c3b05fd, - 0x4c3b94c0, - 0x4c3c14d2, - 0x4c3c94e1, - 0x4c3d14fa, - 0x4c3d8c47, - 0x4c3e1567, - 0x4c3e9509, - 0x4c3f1589, - 0x4c3f92be, - 0x4c40151f, - 0x4c40933a, - 0x4c411557, - 0x4c4193da, - 0x4c421543, - 0x50323247, - 0x5032b256, - 0x50333261, - 0x5033b271, - 0x5034328a, - 0x5034b2a4, - 0x503532b2, - 0x5035b2c8, - 0x503632da, - 0x5036b2f0, - 0x50373309, - 0x5037b31c, - 0x50383334, - 0x5038b345, - 0x5039335a, - 0x5039b36e, - 0x503a338e, - 0x503ab3a4, - 0x503b33bc, - 0x503bb3ce, - 0x503c33ea, - 0x503cb401, - 0x503d341a, - 0x503db430, - 0x503e343d, - 0x503eb453, - 0x503f3465, - 0x503f837b, - 0x50403478, - 0x5040b488, - 0x504134a2, - 0x5041b4b1, - 0x504234cb, - 0x5042b4e8, - 0x504334f8, - 0x5043b508, - 0x50443517, - 0x50448431, - 0x5045352b, - 0x5045b549, - 0x5046355c, - 0x5046b572, - 0x50473584, - 0x5047b599, - 0x504835bf, - 0x5048b5cd, - 0x504935e0, - 0x5049b5f5, - 0x504a360b, - 0x504ab61b, - 0x504b363b, - 0x504bb64e, - 0x504c3671, - 0x504cb69f, - 0x504d36b1, - 0x504db6ce, - 0x504e36e9, - 0x504eb705, - 0x504f3717, - 0x504fb72e, - 0x5050373d, - 0x505086f1, - 0x50513750, - 0x58320f65, - 0x68320f27, - 0x68328c7f, - 0x68330c92, - 0x68338f35, - 0x68340f45, - 0x683480ea, - 0x6c320eed, - 0x6c328c36, - 0x6c330ef8, - 0x6c338f11, - 0x74320a1b, - 0x743280ac, - 0x74330c47, - 0x78320980, - 0x78328995, - 0x783309a1, - 0x78338083, - 0x783409b0, - 0x783489c5, - 0x783509e4, - 0x78358a06, - 0x78360a1b, - 0x78368a31, - 0x78370a41, - 0x78378a62, - 0x78380a75, - 0x78388a87, - 0x78390a94, - 0x78398ab3, - 0x783a0ac8, - 0x783a8ad6, - 0x783b0ae0, - 0x783b8af4, - 0x783c0b0b, - 0x783c8b20, - 0x783d0b37, - 0x783d8b4c, - 0x783e0aa2, - 0x783e8a54, - 0x7c3211d7, - 0x803213ed, - 0x80328083, - 0x80332fe1, - 0x803380ac, - 0x80342ff0, - 0x8034af58, - 0x80352f76, - 0x8035b004, - 0x80362fb8, - 0x8036af67, - 0x80372faa, - 0x8037af45, - 0x80382fcb, - 0x8038af87, - 0x80392f9c, + 0x30340057, + 0x30348071, + 0x30350078, + 0x30358090, + 0x303600a1, + 0x303680b9, + 0x303700c6, + 0x303780d5, + 0x303800f7, + 0x30388104, + 0x30390117, + 0x30398132, + 0x303a0147, + 0x303a815b, + 0x303b016f, + 0x303b8180, + 0x303c0199, + 0x303c81b6, + 0x303d01c4, + 0x303d81d8, + 0x303e01e8, + 0x303e8201, + 0x303f0211, + 0x303f8224, + 0x30400233, + 0x3040823f, + 0x30410254, + 0x30418264, + 0x3042027b, + 0x30428288, + 0x3043029b, + 0x304382aa, + 0x304402bf, + 0x304482e0, + 0x304502f3, + 0x30458306, + 0x3046031f, + 0x3046833a, + 0x30470372, + 0x30478384, + 0x30480392, + 0x304883a3, + 0x304903b2, + 0x304983ca, + 0x304a03dc, + 0x304a83f0, + 0x304b0408, + 0x304b841b, + 0x304c0426, + 0x304c8437, + 0x304d0443, + 0x304d8459, + 0x304e0467, + 0x304e847d, + 0x304f048f, + 0x304f84a1, + 0x305004c4, + 0x305084d7, + 0x305104e8, + 0x305184f8, + 0x30520510, + 0x30528525, + 0x3053053d, + 0x30538551, + 0x30540569, + 0x30548582, + 0x3055059b, + 0x305585b8, + 0x305605c3, + 0x305685db, + 0x305705eb, + 0x305785fc, + 0x3058060f, + 0x30588625, + 0x3059062e, + 0x30598643, + 0x305a0656, + 0x305a8665, + 0x305b0685, + 0x305b8694, + 0x305c06b5, + 0x305c86d1, + 0x305d06dd, + 0x305d86fd, + 0x305e0719, + 0x305e872a, + 0x305f0740, + 0x305f874a, + 0x306004b4, + 0x3060804a, + 0x30610357, + 0x34320b8d, + 0x34328ba1, + 0x34330bbe, + 0x34338bd1, + 0x34340be0, + 0x34348c19, + 0x34350bfd, + 0x3c320090, + 0x3c328ce4, + 0x3c330cfd, + 0x3c338d18, + 0x3c340d35, + 0x3c348d5f, + 0x3c350d7a, + 0x3c358da0, + 0x3c360db9, + 0x3c368dd1, + 0x3c370de2, + 0x3c378df0, + 0x3c380dfd, + 0x3c388e11, + 0x3c390ca7, + 0x3c398e34, + 0x3c3a0e48, + 0x3c3a8937, + 0x3c3b0e58, + 0x3c3b8e73, + 0x3c3c0e85, + 0x3c3c8eb8, + 0x3c3d0ec2, + 0x3c3d8ed6, + 0x3c3e0ee4, + 0x3c3e8f09, + 0x3c3f0cd0, + 0x3c3f8ef2, + 0x3c4000b9, + 0x3c4080f7, + 0x3c410d50, + 0x3c418d8f, + 0x3c420e9b, + 0x3c428e25, + 0x403219d3, + 0x403299e9, + 0x40331a17, + 0x40339a21, + 0x40341a38, + 0x40349a56, + 0x40351a66, + 0x40359a78, + 0x40361a85, + 0x40369a91, + 0x40371aa6, + 0x40379ab8, + 0x40381ac3, + 0x40389ad5, + 0x40390f4f, + 0x40399ae5, + 0x403a1af8, + 0x403a9b19, + 0x403b1b2a, + 0x403b9b3a, + 0x403c0071, + 0x403c8090, + 0x403d1b9b, + 0x403d9bb1, + 0x403e1bc0, + 0x403e9bf8, + 0x403f1c12, + 0x403f9c3a, + 0x40401c4f, + 0x40409c63, + 0x40411c9e, + 0x40419cb9, + 0x40421cd2, + 0x40429ce5, + 0x40431cf9, + 0x40439d27, + 0x40441d3e, + 0x404480b9, + 0x40451d53, + 0x40459d65, + 0x40461d89, + 0x40469da9, + 0x40471db7, + 0x40479dde, + 0x40481e4f, + 0x40489f09, + 0x40491f20, + 0x40499f3a, + 0x404a1f51, + 0x404a9f6f, + 0x404b1f87, + 0x404b9fb4, + 0x404c1fca, + 0x404c9fdc, + 0x404d1ffd, + 0x404da036, + 0x404e204a, + 0x404ea057, + 0x404f20f1, + 0x404fa167, + 0x405021be, + 0x4050a1d2, + 0x40512205, + 0x40522215, + 0x4052a239, + 0x40532251, + 0x4053a264, + 0x40542279, + 0x4054a29c, + 0x405522c7, + 0x4055a304, + 0x40562329, + 0x4056a342, + 0x4057235a, + 0x4057a36d, + 0x40582382, + 0x4058a3a9, + 0x405923d8, + 0x4059a405, + 0x405a2419, + 0x405aa429, + 0x405b2441, + 0x405ba452, + 0x405c2465, + 0x405ca4a4, + 0x405d24b1, + 0x405da4d6, + 0x405e2514, + 0x405e8adb, + 0x405f254f, + 0x405fa55c, + 0x4060256a, + 0x4060a58c, + 0x406125ed, + 0x4061a625, + 0x4062263c, + 0x4062a64d, + 0x4063269a, + 0x4063a6af, + 0x406426c6, + 0x4064a6f2, + 0x4065270d, + 0x4065a724, + 0x4066273c, + 0x4066a766, + 0x40672791, + 0x4067a7d6, + 0x4068281e, + 0x4068a83f, + 0x40692871, + 0x4069a89f, + 0x406a28c0, + 0x406aa8e0, + 0x406b2a68, + 0x406baa8b, + 0x406c2aa1, + 0x406cadab, + 0x406d2dda, + 0x406dae02, + 0x406e2e30, + 0x406eae7d, + 0x406f2ed6, + 0x406faf0e, + 0x40702f21, + 0x4070af3e, + 0x4071082a, + 0x4071af50, + 0x40722f63, + 0x4072af99, + 0x40732fb1, + 0x40739540, + 0x40742fc5, + 0x4074afdf, + 0x40752ff0, + 0x4075b004, + 0x40763012, + 0x40769304, + 0x40773037, + 0x4077b077, + 0x40783092, + 0x4078b0cb, + 0x407930e2, + 0x4079b0f8, + 0x407a3124, + 0x407ab137, + 0x407b314c, + 0x407bb15e, + 0x407c318f, + 0x407cb198, + 0x407d285a, + 0x407da177, + 0x407e30a7, + 0x407ea3b9, + 0x407f1dcb, + 0x407f9f9e, + 0x40802101, + 0x40809df3, + 0x40812227, + 0x4081a0a5, + 0x40822e1b, + 0x40829b46, + 0x40832394, + 0x4083a6d7, + 0x40841e07, + 0x4084a3f1, + 0x40852476, + 0x4085a5b4, + 0x408624f6, + 0x4086a191, + 0x40872e61, + 0x4087a602, + 0x40881b84, + 0x4088a7e9, + 0x40891bd3, + 0x40899b60, + 0x408a2ad9, + 0x408a9958, + 0x408b3173, + 0x408baeeb, + 0x408c2486, + 0x408c9990, + 0x408d1eef, + 0x408d9e39, + 0x408e201f, + 0x408ea2e4, + 0x408f27fd, + 0x408fa5d0, + 0x409027b2, + 0x4090a4c8, + 0x40912ac1, + 0x409199b6, + 0x40921c20, + 0x4092ae9c, + 0x40932f7c, + 0x4093a1a2, + 0x40941e1b, + 0x4094aaf2, + 0x4095265e, + 0x4095b104, + 0x40962e48, + 0x4096a11a, + 0x409721ed, + 0x4097a06e, + 0x40981c80, + 0x4098a672, + 0x40992eb8, + 0x4099a311, + 0x409a22aa, + 0x409a9974, + 0x409b1e75, + 0x409b9ea0, + 0x409c3059, + 0x409c9ec8, + 0x409d20d6, + 0x409da0bb, + 0x409e1d11, + 0x409ea14f, + 0x409f2137, + 0x409f9e68, + 0x40a02535, + 0x40a0a088, + 0x41f42993, + 0x41f92a25, + 0x41fe2918, + 0x41feabce, + 0x41ff2cfc, + 0x420329ac, + 0x420829ce, + 0x4208aa0a, + 0x420928fc, + 0x4209aa44, + 0x420a2953, + 0x420aa933, + 0x420b2973, + 0x420ba9ec, + 0x420c2d18, + 0x420cab02, + 0x420d2bb5, + 0x420dabec, + 0x42122c1f, + 0x42172cdf, + 0x4217ac61, + 0x421c2c83, + 0x421f2c3e, + 0x42212d90, + 0x42262cc2, + 0x422b2d6e, + 0x422bab90, + 0x422c2d50, + 0x422cab43, + 0x422d2b1c, + 0x422dad2f, + 0x422e2b6f, + 0x42302c9e, + 0x4230ac06, + 0x44320755, + 0x44328764, + 0x44330770, + 0x4433877e, + 0x44340791, + 0x443487a2, + 0x443507a9, + 0x443587b3, + 0x443607c6, + 0x443687dc, + 0x443707ee, + 0x443787fb, + 0x4438080a, + 0x44388812, + 0x4439082a, + 0x44398838, + 0x443a084b, + 0x4832132e, + 0x48329340, + 0x48331356, + 0x4833936f, + 0x4c321394, + 0x4c3293a4, + 0x4c3313b7, + 0x4c3393d7, + 0x4c3400b9, + 0x4c3480f7, + 0x4c3513e3, + 0x4c3593f1, + 0x4c36140d, + 0x4c369433, + 0x4c371442, + 0x4c379450, + 0x4c381465, + 0x4c389471, + 0x4c391491, + 0x4c3994bb, + 0x4c3a14d4, + 0x4c3a94ed, + 0x4c3b0625, + 0x4c3b9506, + 0x4c3c1518, + 0x4c3c9527, + 0x4c3d1540, + 0x4c3d8c6f, + 0x4c3e15ad, + 0x4c3e954f, + 0x4c3f15cf, + 0x4c3f9304, + 0x4c401565, + 0x4c409380, + 0x4c41159d, + 0x4c419420, + 0x4c421589, + 0x50323502, + 0x5032b511, + 0x5033351c, + 0x5033b52c, + 0x50343545, + 0x5034b55f, + 0x5035356d, + 0x5035b583, + 0x50363595, + 0x5036b5ab, + 0x503735c4, + 0x5037b5d7, + 0x503835ef, + 0x5038b600, + 0x50393615, + 0x5039b629, + 0x503a3649, + 0x503ab65f, + 0x503b3677, + 0x503bb689, + 0x503c36a5, + 0x503cb6bc, + 0x503d36d5, + 0x503db6eb, + 0x503e36f8, + 0x503eb70e, + 0x503f3720, + 0x503f83a3, + 0x50403733, + 0x5040b743, + 0x5041375d, + 0x5041b76c, + 0x50423786, + 0x5042b7a3, + 0x504337b3, + 0x5043b7c3, + 0x504437e0, + 0x50448459, + 0x504537f4, + 0x5045b812, + 0x50463825, + 0x5046b83b, + 0x5047384d, + 0x5047b862, + 0x50483888, + 0x5048b896, + 0x504938a9, + 0x5049b8be, + 0x504a38d4, + 0x504ab8e4, + 0x504b3904, + 0x504bb917, + 0x504c393a, + 0x504cb968, + 0x504d3995, + 0x504db9b2, + 0x504e39cd, + 0x504eb9e9, + 0x504f39fb, + 0x504fba12, + 0x50503a21, + 0x50508719, + 0x50513a34, + 0x5051b7d2, + 0x5052397a, + 0x58320f8d, + 0x68320f4f, + 0x68328ca7, + 0x68330cba, + 0x68338f5d, + 0x68340f6d, + 0x683480f7, + 0x6c320f15, + 0x6c328c5e, + 0x6c330f20, + 0x6c338f39, + 0x74320a43, + 0x743280b9, + 0x74330c6f, + 0x783209a8, + 0x783289bd, + 0x783309c9, + 0x78338090, + 0x783409d8, + 0x783489ed, + 0x78350a0c, + 0x78358a2e, + 0x78360a43, + 0x78368a59, + 0x78370a69, + 0x78378a8a, + 0x78380a9d, + 0x78388aaf, + 0x78390abc, + 0x78398adb, + 0x783a0af0, + 0x783a8afe, + 0x783b0b08, + 0x783b8b1c, + 0x783c0b33, + 0x783c8b48, + 0x783d0b5f, + 0x783d8b74, + 0x783e0aca, + 0x783e8a7c, + 0x7c32121d, + 0x80321433, + 0x80328090, + 0x80333255, + 0x803380b9, + 0x80343264, + 0x8034b1cc, + 0x803531ea, + 0x8035b278, + 0x8036322c, + 0x8036b1db, + 0x8037321e, + 0x8037b1b9, + 0x8038323f, + 0x8038b1fb, + 0x80393210, }; const size_t kOpenSSLReasonValuesLen = sizeof(kOpenSSLReasonValues) / sizeof(kOpenSSLReasonValues[0]); @@ -782,6 +809,7 @@ const char kOpenSSLReasonStringData[] = "AUX_ERROR\0" "BAD_GET_ASN1_OBJECT_CALL\0" "BAD_OBJECT_HEADER\0" + "BAD_TEMPLATE\0" "BMPSTRING_IS_WRONG_LENGTH\0" "BN_LIB\0" "BOOLEAN_IS_WRONG_LENGTH\0" @@ -820,6 +848,7 @@ const char kOpenSSLReasonStringData[] = "INTEGER_NOT_ASCII_FORMAT\0" "INTEGER_TOO_LARGE_FOR_LONG\0" "INVALID_BIT_STRING_BITS_LEFT\0" + "INVALID_BIT_STRING_PADDING\0" "INVALID_BMPSTRING\0" "INVALID_DIGIT\0" "INVALID_MODIFIER\0" @@ -988,10 +1017,12 @@ const char kOpenSSLReasonStringData[] = "COMMAND_NOT_SUPPORTED\0" "DIFFERENT_KEY_TYPES\0" "DIFFERENT_PARAMETERS\0" + "EMPTY_PSK\0" "EXPECTING_AN_EC_KEY_KEY\0" "EXPECTING_AN_RSA_KEY\0" "EXPECTING_A_DSA_KEY\0" "ILLEGAL_OR_UNSUPPORTED_PADDING_MODE\0" + "INVALID_BUFFER_SIZE\0" "INVALID_DIGEST_LENGTH\0" "INVALID_DIGEST_TYPE\0" "INVALID_KEYBITS\0" @@ -1105,6 +1136,7 @@ const char kOpenSSLReasonStringData[] = "VALUE_MISSING\0" "WRONG_SIGNATURE_LENGTH\0" "ALPN_MISMATCH_ON_EARLY_DATA\0" + "ALPS_MISMATCH_ON_EARLY_DATA\0" "APPLICATION_DATA_INSTEAD_OF_HANDSHAKE\0" "APPLICATION_DATA_ON_SHUTDOWN\0" "APP_DATA_IN_HANDSHAKE\0" @@ -1145,6 +1177,7 @@ const char kOpenSSLReasonStringData[] = "CLIENTHELLO_TLSEXT\0" "CONNECTION_REJECTED\0" "CONNECTION_TYPE_NOT_SET\0" + "COULD_NOT_PARSE_HINTS\0" "CUSTOM_EXTENSION_ERROR\0" "DATA_LENGTH_TOO_LONG\0" "DECRYPTION_FAILED\0" @@ -1159,6 +1192,10 @@ const char kOpenSSLReasonStringData[] = "DUPLICATE_SIGNATURE_ALGORITHM\0" "EARLY_DATA_NOT_IN_USE\0" "ECC_CERT_NOT_FOR_SIGNING\0" + "ECH_REJECTED\0" + "ECH_SERVER_CONFIG_AND_PRIVATE_KEY_MISMATCH\0" + "ECH_SERVER_CONFIG_UNSUPPORTED_EXTENSION\0" + "ECH_SERVER_WOULD_HAVE_NO_RETRY_CONFIGS\0" "EMPTY_HELLO_RETRY_REQUEST\0" "EMS_STATE_INCONSISTENT\0" "ENCRYPTED_LENGTH_TOO_LONG\0" @@ -1176,10 +1213,15 @@ const char kOpenSSLReasonStringData[] = "HTTP_REQUEST\0" "INAPPROPRIATE_FALLBACK\0" "INCONSISTENT_CLIENT_HELLO\0" + "INCONSISTENT_ECH_NEGOTIATION\0" "INVALID_ALPN_PROTOCOL\0" + "INVALID_ALPN_PROTOCOL_LIST\0" + "INVALID_CLIENT_HELLO_INNER\0" "INVALID_COMMAND\0" "INVALID_COMPRESSION_LIST\0" "INVALID_DELEGATED_CREDENTIAL\0" + "INVALID_ECH_CONFIG_LIST\0" + "INVALID_ECH_PUBLIC_NAME\0" "INVALID_MESSAGE\0" "INVALID_OUTER_RECORD_TYPE\0" "INVALID_SCT_LIST\0" @@ -1195,9 +1237,11 @@ const char kOpenSSLReasonStringData[] = "MISSING_TMP_ECDH_KEY\0" "MIXED_SPECIAL_OPERATOR_WITH_GROUPS\0" "MTU_TOO_SMALL\0" + "NEGOTIATED_ALPS_WITHOUT_ALPN\0" "NEGOTIATED_BOTH_NPN_AND_ALPN\0" "NEGOTIATED_TB_WITHOUT_EMS_OR_RI\0" "NESTED_GROUP\0" + "NO_APPLICATION_PROTOCOL\0" "NO_CERTIFICATES_RETURNED\0" "NO_CERTIFICATE_ASSIGNED\0" "NO_CERTIFICATE_SET\0" @@ -1222,6 +1266,7 @@ const char kOpenSSLReasonStringData[] = "OLD_SESSION_CIPHER_NOT_RETURNED\0" "OLD_SESSION_PRF_HASH_MISMATCH\0" "OLD_SESSION_VERSION_NOT_RETURNED\0" + "OUTER_EXTENSION_NOT_FOUND\0" "PARSE_TLSEXT\0" "PATH_TOO_LONG\0" "PEER_DID_NOT_RETURN_A_CERTIFICATE\0" @@ -1275,25 +1320,27 @@ const char kOpenSSLReasonStringData[] = "TICKET_ENCRYPTION_FAILED\0" "TLS13_DOWNGRADE\0" "TLSV1_ALERT_ACCESS_DENIED\0" + "TLSV1_ALERT_BAD_CERTIFICATE_HASH_VALUE\0" + "TLSV1_ALERT_BAD_CERTIFICATE_STATUS_RESPONSE\0" + "TLSV1_ALERT_CERTIFICATE_REQUIRED\0" + "TLSV1_ALERT_CERTIFICATE_UNOBTAINABLE\0" "TLSV1_ALERT_DECODE_ERROR\0" "TLSV1_ALERT_DECRYPTION_FAILED\0" "TLSV1_ALERT_DECRYPT_ERROR\0" + "TLSV1_ALERT_ECH_REQUIRED\0" "TLSV1_ALERT_EXPORT_RESTRICTION\0" "TLSV1_ALERT_INAPPROPRIATE_FALLBACK\0" "TLSV1_ALERT_INSUFFICIENT_SECURITY\0" "TLSV1_ALERT_INTERNAL_ERROR\0" + "TLSV1_ALERT_NO_APPLICATION_PROTOCOL\0" "TLSV1_ALERT_NO_RENEGOTIATION\0" "TLSV1_ALERT_PROTOCOL_VERSION\0" "TLSV1_ALERT_RECORD_OVERFLOW\0" "TLSV1_ALERT_UNKNOWN_CA\0" + "TLSV1_ALERT_UNKNOWN_PSK_IDENTITY\0" + "TLSV1_ALERT_UNRECOGNIZED_NAME\0" + "TLSV1_ALERT_UNSUPPORTED_EXTENSION\0" "TLSV1_ALERT_USER_CANCELLED\0" - "TLSV1_BAD_CERTIFICATE_HASH_VALUE\0" - "TLSV1_BAD_CERTIFICATE_STATUS_RESPONSE\0" - "TLSV1_CERTIFICATE_REQUIRED\0" - "TLSV1_CERTIFICATE_UNOBTAINABLE\0" - "TLSV1_UNKNOWN_PSK_IDENTITY\0" - "TLSV1_UNRECOGNIZED_NAME\0" - "TLSV1_UNSUPPORTED_EXTENSION\0" "TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST\0" "TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG\0" "TOO_MANY_EMPTY_FRAGMENTS\0" @@ -1303,6 +1350,7 @@ const char kOpenSSLReasonStringData[] = "TOO_MUCH_SKIPPED_EARLY_DATA\0" "UNABLE_TO_FIND_ECDH_PARAMETERS\0" "UNCOMPRESSED_CERT_TOO_LARGE\0" + "UNEXPECTED_COMPATIBILITY_MODE\0" "UNEXPECTED_EXTENSION\0" "UNEXPECTED_EXTENSION_ON_EARLY_DATA\0" "UNEXPECTED_MESSAGE\0" @@ -1319,6 +1367,7 @@ const char kOpenSSLReasonStringData[] = "UNKNOWN_STATE\0" "UNSAFE_LEGACY_RENEGOTIATION_DISABLED\0" "UNSUPPORTED_COMPRESSION_ALGORITHM\0" + "UNSUPPORTED_ECH_SERVER_CONFIG\0" "UNSUPPORTED_ELLIPTIC_CURVE\0" "UNSUPPORTED_PROTOCOL\0" "UNSUPPORTED_PROTOCOL_FOR_CUSTOM_KEY\0" @@ -1352,12 +1401,15 @@ const char kOpenSSLReasonStringData[] = "CERT_ALREADY_IN_HASH_TABLE\0" "CRL_ALREADY_DELTA\0" "CRL_VERIFY_FAILURE\0" + "DELTA_CRL_WITHOUT_CRL_NUMBER\0" "IDP_MISMATCH\0" "INVALID_DIRECTORY\0" + "INVALID_FIELD_FOR_VERSION\0" "INVALID_FIELD_NAME\0" "INVALID_PARAMETER\0" "INVALID_PSS_PARAMETERS\0" "INVALID_TRUST\0" + "INVALID_VERSION\0" "ISSUER_MISMATCH\0" "KEY_TYPE_MISMATCH\0" "KEY_VALUES_MISMATCH\0" @@ -1410,6 +1462,7 @@ const char kOpenSSLReasonStringData[] = "INVALID_PURPOSE\0" "INVALID_SECTION\0" "INVALID_SYNTAX\0" + "INVALID_VALUE\0" "ISSUER_DECODE_ERROR\0" "NEED_ORGANIZATION_AND_NUMBERS\0" "NO_CONFIG_DATABASE\0" @@ -1427,6 +1480,7 @@ const char kOpenSSLReasonStringData[] = "POLICY_PATH_LENGTH_ALREADY_DEFINED\0" "POLICY_WHEN_PROXY_LANGUAGE_REQUIRES_NO_POLICY\0" "SECTION_NOT_FOUND\0" + "TRAILING_DATA_IN_EXTENSION\0" "UNABLE_TO_GET_ISSUER_DETAILS\0" "UNABLE_TO_GET_ISSUER_KEYID\0" "UNKNOWN_BIT_STRING_ARGUMENT\0" diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aes/mode_wrappers.c b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aes/mode_wrappers.c index c1933e2..1a9260d 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aes/mode_wrappers.c +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aes/mode_wrappers.c @@ -57,7 +57,23 @@ void AES_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[AES_BLOCK_SIZE], uint8_t ecount_buf[AES_BLOCK_SIZE], unsigned int *num) { - CRYPTO_ctr128_encrypt(in, out, len, key, ivec, ecount_buf, num, AES_encrypt); + if (hwaes_capable()) { + CRYPTO_ctr128_encrypt_ctr32(in, out, len, key, ivec, ecount_buf, num, + aes_hw_ctr32_encrypt_blocks); + } else if (vpaes_capable()) { +#if defined(VPAES_CTR32) + // TODO(davidben): On ARM, where |BSAES| is additionally defined, this could + // use |vpaes_ctr32_encrypt_blocks_with_bsaes|. + CRYPTO_ctr128_encrypt_ctr32(in, out, len, key, ivec, ecount_buf, num, + vpaes_ctr32_encrypt_blocks); +#else + CRYPTO_ctr128_encrypt(in, out, len, key, ivec, ecount_buf, num, + vpaes_encrypt); +#endif + } else { + CRYPTO_ctr128_encrypt_ctr32(in, out, len, key, ivec, ecount_buf, num, + aes_nohw_ctr32_encrypt_blocks); + } } void AES_ecb_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key, diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-gcm-x86_64.linux.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-gcm-x86_64.linux.x86_64.S index c3927cd..2f89659 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-gcm-x86_64.linux.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-gcm-x86_64.linux.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-gcm-x86_64.mac.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-gcm-x86_64.mac.x86_64.S index ecb9527..c672795 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-gcm-x86_64.mac.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-gcm-x86_64.mac.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__APPLE__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-x86.linux.x86.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-x86.linux.x86.S index e3a0270..9541a0a 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-x86.linux.x86.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-x86.linux.x86.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__i386__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__i386__) #if defined(BORINGSSL_PREFIX) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-x86_64.linux.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-x86_64.linux.x86_64.S index c5befd2..854231b 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-x86_64.linux.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-x86_64.linux.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-x86_64.mac.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-x86_64.mac.x86_64.S index d84b3ae..aa36f59 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-x86_64.mac.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesni-x86_64.mac.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__APPLE__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesv8-armx32.ios.arm.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesv8-armx32.ios.arm.S index b24a614..1d3863c 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesv8-armx32.ios.arm.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesv8-armx32.ios.arm.S @@ -250,6 +250,7 @@ Ldec_key_abort: #endif .align 5 _aes_hw_encrypt: + AARCH64_VALID_CALL_TARGET ldr r3,[r2,#240] vld1.32 {q0},[r2]! vld1.8 {q2},[r0] @@ -282,6 +283,7 @@ Loop_enc: #endif .align 5 _aes_hw_decrypt: + AARCH64_VALID_CALL_TARGET ldr r3,[r2,#240] vld1.32 {q0},[r2]! vld1.8 {q2},[r0] @@ -630,20 +632,34 @@ _aes_hw_ctr32_encrypt_blocks: add r7,r3,#32 mov r6,r5 movlo r12,#0 + + @ ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are + @ affected by silicon errata #1742098 [0] and #1655431 [1], + @ respectively, where the second instruction of an aese/aesmc + @ instruction pair may execute twice if an interrupt is taken right + @ after the first instruction consumes an input register of which a + @ single 32-bit lane has been updated the last time it was modified. + @ + @ This function uses a counter in one 32-bit lane. The + @ could write to q1 and q10 directly, but that trips this bugs. + @ We write to q6 and copy to the final register as a workaround. + @ + @ [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice + @ [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice #ifndef __ARMEB__ rev r8, r8 #endif - vorr q1,q0,q0 add r10, r8, #1 - vorr q10,q0,q0 - add r8, r8, #2 vorr q6,q0,q0 rev r10, r10 - vmov.32 d3[1],r10 + vmov.32 d13[1],r10 + add r8, r8, #2 + vorr q1,q6,q6 bls Lctr32_tail rev r12, r8 + vmov.32 d13[1],r12 sub r2,r2,#3 @ bias - vmov.32 d21[1],r12 + vorr q10,q6,q6 b Loop3x_ctr32 .align 4 @@ -670,11 +686,11 @@ Loop3x_ctr32: .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 .byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1 vld1.8 {q2},[r0]! - vorr q0,q6,q6 + add r9,r8,#1 .byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8 .byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10 vld1.8 {q3},[r0]! - vorr q1,q6,q6 + rev r9,r9 .byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9 @@ -683,8 +699,6 @@ Loop3x_ctr32: mov r7,r3 .byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9 .byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10 - vorr q10,q6,q6 - add r9,r8,#1 .byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12 @@ -699,21 +713,26 @@ Loop3x_ctr32: .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13 .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 + @ Note the logic to update q0, q1, and q1 is written to work + @ around a bug in ARM Cortex-A57 and Cortex-A72 cores running in + @ 32-bit mode. See the comment above. veor q11,q11,q7 - rev r9,r9 + vmov.32 d13[1], r9 .byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13 .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 - vmov.32 d1[1], r9 + vorr q0,q6,q6 rev r10,r10 .byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 + vmov.32 d13[1], r10 + rev r12,r8 .byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14 .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 - vmov.32 d3[1], r10 - rev r12,r8 + vorr q1,q6,q6 + vmov.32 d13[1], r12 .byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14 .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 - vmov.32 d21[1], r12 + vorr q10,q6,q6 subs r2,r2,#3 .byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15 .byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15 diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesv8-armx32.linux.arm.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesv8-armx32.linux.arm.S index 40453c0..f020596 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesv8-armx32.linux.arm.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesv8-armx32.linux.arm.S @@ -245,6 +245,7 @@ aes_hw_set_decrypt_key: .type aes_hw_encrypt,%function .align 5 aes_hw_encrypt: + AARCH64_VALID_CALL_TARGET ldr r3,[r2,#240] vld1.32 {q0},[r2]! vld1.8 {q2},[r0] @@ -275,6 +276,7 @@ aes_hw_encrypt: .type aes_hw_decrypt,%function .align 5 aes_hw_decrypt: + AARCH64_VALID_CALL_TARGET ldr r3,[r2,#240] vld1.32 {q0},[r2]! vld1.8 {q2},[r0] @@ -619,20 +621,34 @@ aes_hw_ctr32_encrypt_blocks: add r7,r3,#32 mov r6,r5 movlo r12,#0 + + @ ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are + @ affected by silicon errata #1742098 [0] and #1655431 [1], + @ respectively, where the second instruction of an aese/aesmc + @ instruction pair may execute twice if an interrupt is taken right + @ after the first instruction consumes an input register of which a + @ single 32-bit lane has been updated the last time it was modified. + @ + @ This function uses a counter in one 32-bit lane. The + @ could write to q1 and q10 directly, but that trips this bugs. + @ We write to q6 and copy to the final register as a workaround. + @ + @ [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice + @ [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice #ifndef __ARMEB__ rev r8, r8 #endif - vorr q1,q0,q0 add r10, r8, #1 - vorr q10,q0,q0 - add r8, r8, #2 vorr q6,q0,q0 rev r10, r10 - vmov.32 d3[1],r10 + vmov.32 d13[1],r10 + add r8, r8, #2 + vorr q1,q6,q6 bls .Lctr32_tail rev r12, r8 + vmov.32 d13[1],r12 sub r2,r2,#3 @ bias - vmov.32 d21[1],r12 + vorr q10,q6,q6 b .Loop3x_ctr32 .align 4 @@ -659,11 +675,11 @@ aes_hw_ctr32_encrypt_blocks: .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8 .byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1 vld1.8 {q2},[r0]! - vorr q0,q6,q6 + add r9,r8,#1 .byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8 .byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10 vld1.8 {q3},[r0]! - vorr q1,q6,q6 + rev r9,r9 .byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9 @@ -672,8 +688,6 @@ aes_hw_ctr32_encrypt_blocks: mov r7,r3 .byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9 .byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10 - vorr q10,q6,q6 - add r9,r8,#1 .byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12 @@ -688,21 +702,26 @@ aes_hw_ctr32_encrypt_blocks: .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 .byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13 .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 + @ Note the logic to update q0, q1, and q1 is written to work + @ around a bug in ARM Cortex-A57 and Cortex-A72 cores running in + @ 32-bit mode. See the comment above. veor q11,q11,q7 - rev r9,r9 + vmov.32 d13[1], r9 .byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13 .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 - vmov.32 d1[1], r9 + vorr q0,q6,q6 rev r10,r10 .byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14 .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4 + vmov.32 d13[1], r10 + rev r12,r8 .byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14 .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5 - vmov.32 d3[1], r10 - rev r12,r8 + vorr q1,q6,q6 + vmov.32 d13[1], r12 .byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14 .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9 - vmov.32 d21[1], r12 + vorr q10,q6,q6 subs r2,r2,#3 .byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15 .byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15 diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesv8-armx64.ios.aarch64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesv8-armx64.ios.aarch64.S index cde7a64..11b7766 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesv8-armx64.ios.aarch64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesv8-armx64.ios.aarch64.S @@ -34,6 +34,8 @@ Lrcon: .align 5 _aes_hw_set_encrypt_key: Lenc_key: + // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. + AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x3,#-1 @@ -202,6 +204,7 @@ Lenc_key_abort: .align 5 _aes_hw_set_decrypt_key: + AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 bl Lenc_key @@ -235,6 +238,7 @@ Loop_imc: eor x0,x0,x0 // return value Ldec_key_abort: ldp x29,x30,[sp],#16 + AARCH64_VALIDATE_LINK_REGISTER ret .globl _aes_hw_encrypt @@ -242,6 +246,7 @@ Ldec_key_abort: .align 5 _aes_hw_encrypt: + AARCH64_VALID_CALL_TARGET ldr w3,[x2,#240] ld1 {v0.4s},[x2],#16 ld1 {v2.16b},[x0] @@ -272,6 +277,7 @@ Loop_enc: .align 5 _aes_hw_decrypt: + AARCH64_VALID_CALL_TARGET ldr w3,[x2,#240] ld1 {v0.4s},[x2],#16 ld1 {v2.16b},[x0] @@ -302,6 +308,8 @@ Loop_dec: .align 5 _aes_hw_cbc_encrypt: + // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. + AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 subs x2,x2,#16 @@ -593,6 +601,8 @@ Lcbc_abort: .align 5 _aes_hw_ctr32_encrypt_blocks: + // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. + AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ldr w5,[x3,#240] @@ -612,20 +622,34 @@ _aes_hw_ctr32_encrypt_blocks: add x7,x3,#32 mov w6,w5 csel x12,xzr,x12,lo + + // ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are + // affected by silicon errata #1742098 [0] and #1655431 [1], + // respectively, where the second instruction of an aese/aesmc + // instruction pair may execute twice if an interrupt is taken right + // after the first instruction consumes an input register of which a + // single 32-bit lane has been updated the last time it was modified. + // + // This function uses a counter in one 32-bit lane. The vmov lines + // could write to v1.16b and v18.16b directly, but that trips this bugs. + // We write to v6.16b and copy to the final register as a workaround. + // + // [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice + // [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice #ifndef __ARMEB__ rev w8, w8 #endif - orr v1.16b,v0.16b,v0.16b add w10, w8, #1 - orr v18.16b,v0.16b,v0.16b - add w8, w8, #2 orr v6.16b,v0.16b,v0.16b rev w10, w10 - mov v1.s[3],w10 + mov v6.s[3],w10 + add w8, w8, #2 + orr v1.16b,v6.16b,v6.16b b.ls Lctr32_tail rev w12, w8 + mov v6.s[3],w12 sub x2,x2,#3 // bias - mov v18.s[3],w12 + orr v18.16b,v6.16b,v6.16b b Loop3x_ctr32 .align 4 @@ -652,11 +676,11 @@ Loop3x_ctr32: aese v1.16b,v16.16b aesmc v5.16b,v1.16b ld1 {v2.16b},[x0],#16 - orr v0.16b,v6.16b,v6.16b + add w9,w8,#1 aese v18.16b,v16.16b aesmc v18.16b,v18.16b ld1 {v3.16b},[x0],#16 - orr v1.16b,v6.16b,v6.16b + rev w9,w9 aese v4.16b,v17.16b aesmc v4.16b,v4.16b aese v5.16b,v17.16b @@ -665,8 +689,6 @@ Loop3x_ctr32: mov x7,x3 aese v18.16b,v17.16b aesmc v17.16b,v18.16b - orr v18.16b,v6.16b,v6.16b - add w9,w8,#1 aese v4.16b,v20.16b aesmc v4.16b,v4.16b aese v5.16b,v20.16b @@ -681,21 +703,26 @@ Loop3x_ctr32: aesmc v4.16b,v4.16b aese v5.16b,v21.16b aesmc v5.16b,v5.16b + // Note the logic to update v0.16b, v1.16b, and v1.16b is written to work + // around a bug in ARM Cortex-A57 and Cortex-A72 cores running in + // 32-bit mode. See the comment above. eor v19.16b,v19.16b,v7.16b - rev w9,w9 + mov v6.s[3], w9 aese v17.16b,v21.16b aesmc v17.16b,v17.16b - mov v0.s[3], w9 + orr v0.16b,v6.16b,v6.16b rev w10,w10 aese v4.16b,v22.16b aesmc v4.16b,v4.16b + mov v6.s[3], w10 + rev w12,w8 aese v5.16b,v22.16b aesmc v5.16b,v5.16b - mov v1.s[3], w10 - rev w12,w8 + orr v1.16b,v6.16b,v6.16b + mov v6.s[3], w12 aese v17.16b,v22.16b aesmc v17.16b,v17.16b - mov v18.s[3], w12 + orr v18.16b,v6.16b,v6.16b subs x2,x2,#3 aese v4.16b,v23.16b aese v5.16b,v23.16b diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesv8-armx64.linux.aarch64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesv8-armx64.linux.aarch64.S index 9211b8a..e2d2e4a 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesv8-armx64.linux.aarch64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/aesv8-armx64.linux.aarch64.S @@ -35,6 +35,8 @@ .align 5 aes_hw_set_encrypt_key: .Lenc_key: + // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. + AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 mov x3,#-1 @@ -203,6 +205,7 @@ aes_hw_set_encrypt_key: .type aes_hw_set_decrypt_key,%function .align 5 aes_hw_set_decrypt_key: + AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 bl .Lenc_key @@ -236,6 +239,7 @@ aes_hw_set_decrypt_key: eor x0,x0,x0 // return value .Ldec_key_abort: ldp x29,x30,[sp],#16 + AARCH64_VALIDATE_LINK_REGISTER ret .size aes_hw_set_decrypt_key,.-aes_hw_set_decrypt_key .globl aes_hw_encrypt @@ -243,6 +247,7 @@ aes_hw_set_decrypt_key: .type aes_hw_encrypt,%function .align 5 aes_hw_encrypt: + AARCH64_VALID_CALL_TARGET ldr w3,[x2,#240] ld1 {v0.4s},[x2],#16 ld1 {v2.16b},[x0] @@ -273,6 +278,7 @@ aes_hw_encrypt: .type aes_hw_decrypt,%function .align 5 aes_hw_decrypt: + AARCH64_VALID_CALL_TARGET ldr w3,[x2,#240] ld1 {v0.4s},[x2],#16 ld1 {v2.16b},[x0] @@ -303,6 +309,8 @@ aes_hw_decrypt: .type aes_hw_cbc_encrypt,%function .align 5 aes_hw_cbc_encrypt: + // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. + AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 subs x2,x2,#16 @@ -594,6 +602,8 @@ aes_hw_cbc_encrypt: .type aes_hw_ctr32_encrypt_blocks,%function .align 5 aes_hw_ctr32_encrypt_blocks: + // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. + AARCH64_VALID_CALL_TARGET stp x29,x30,[sp,#-16]! add x29,sp,#0 ldr w5,[x3,#240] @@ -613,20 +623,34 @@ aes_hw_ctr32_encrypt_blocks: add x7,x3,#32 mov w6,w5 csel x12,xzr,x12,lo + + // ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are + // affected by silicon errata #1742098 [0] and #1655431 [1], + // respectively, where the second instruction of an aese/aesmc + // instruction pair may execute twice if an interrupt is taken right + // after the first instruction consumes an input register of which a + // single 32-bit lane has been updated the last time it was modified. + // + // This function uses a counter in one 32-bit lane. The vmov lines + // could write to v1.16b and v18.16b directly, but that trips this bugs. + // We write to v6.16b and copy to the final register as a workaround. + // + // [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice + // [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice #ifndef __ARMEB__ rev w8, w8 #endif - orr v1.16b,v0.16b,v0.16b add w10, w8, #1 - orr v18.16b,v0.16b,v0.16b - add w8, w8, #2 orr v6.16b,v0.16b,v0.16b rev w10, w10 - mov v1.s[3],w10 + mov v6.s[3],w10 + add w8, w8, #2 + orr v1.16b,v6.16b,v6.16b b.ls .Lctr32_tail rev w12, w8 + mov v6.s[3],w12 sub x2,x2,#3 // bias - mov v18.s[3],w12 + orr v18.16b,v6.16b,v6.16b b .Loop3x_ctr32 .align 4 @@ -653,11 +677,11 @@ aes_hw_ctr32_encrypt_blocks: aese v1.16b,v16.16b aesmc v5.16b,v1.16b ld1 {v2.16b},[x0],#16 - orr v0.16b,v6.16b,v6.16b + add w9,w8,#1 aese v18.16b,v16.16b aesmc v18.16b,v18.16b ld1 {v3.16b},[x0],#16 - orr v1.16b,v6.16b,v6.16b + rev w9,w9 aese v4.16b,v17.16b aesmc v4.16b,v4.16b aese v5.16b,v17.16b @@ -666,8 +690,6 @@ aes_hw_ctr32_encrypt_blocks: mov x7,x3 aese v18.16b,v17.16b aesmc v17.16b,v18.16b - orr v18.16b,v6.16b,v6.16b - add w9,w8,#1 aese v4.16b,v20.16b aesmc v4.16b,v4.16b aese v5.16b,v20.16b @@ -682,21 +704,26 @@ aes_hw_ctr32_encrypt_blocks: aesmc v4.16b,v4.16b aese v5.16b,v21.16b aesmc v5.16b,v5.16b + // Note the logic to update v0.16b, v1.16b, and v1.16b is written to work + // around a bug in ARM Cortex-A57 and Cortex-A72 cores running in + // 32-bit mode. See the comment above. eor v19.16b,v19.16b,v7.16b - rev w9,w9 + mov v6.s[3], w9 aese v17.16b,v21.16b aesmc v17.16b,v17.16b - mov v0.s[3], w9 + orr v0.16b,v6.16b,v6.16b rev w10,w10 aese v4.16b,v22.16b aesmc v4.16b,v4.16b + mov v6.s[3], w10 + rev w12,w8 aese v5.16b,v22.16b aesmc v5.16b,v5.16b - mov v1.s[3], w10 - rev w12,w8 + orr v1.16b,v6.16b,v6.16b + mov v6.s[3], w12 aese v17.16b,v22.16b aesmc v17.16b,v17.16b - mov v18.s[3], w12 + orr v18.16b,v6.16b,v6.16b subs x2,x2,#3 aese v4.16b,v23.16b aese v5.16b,v23.16b diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/armv8-mont.ios.aarch64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/armv8-mont.ios.aarch64.S index 47d837e..f67029f 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/armv8-mont.ios.aarch64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/armv8-mont.ios.aarch64.S @@ -14,6 +14,8 @@ #if defined(BORINGSSL_PREFIX) #include #endif +#include + .text .globl _bn_mul_mont @@ -21,6 +23,7 @@ .align 5 _bn_mul_mont: + AARCH64_SIGN_LINK_REGISTER tst x5,#7 b.eq __bn_sqr8x_mont tst x5,#3 @@ -218,11 +221,14 @@ Lcond_copy: mov x0,#1 ldp x23,x24,[x29,#48] ldr x29,[sp],#64 + AARCH64_VALIDATE_LINK_REGISTER ret .align 5 __bn_sqr8x_mont: + // Not adding AARCH64_SIGN_LINK_REGISTER here because __bn_sqr8x_mont is jumped to + // only from bn_mul_mont which has already signed the return address. cmp x1,x2 b.ne __bn_mul4x_mont Lsqr8x_mont: @@ -976,11 +982,16 @@ Lsqr8x_done: ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldr x29,[sp],#128 + // x30 is popped earlier + AARCH64_VALIDATE_LINK_REGISTER ret .align 5 __bn_mul4x_mont: + // Not adding AARCH64_SIGN_LINK_REGISTER here because __bn_mul4x_mont is jumped to + // only from bn_mul_mont or __bn_mul8x_mont which have already signed the + // return address. stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] @@ -1414,6 +1425,8 @@ Lmul4x_done: ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldr x29,[sp],#128 + // x30 is popped earlier + AARCH64_VALIDATE_LINK_REGISTER ret .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/armv8-mont.linux.aarch64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/armv8-mont.linux.aarch64.S index dbd1d6a..038f0e1 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/armv8-mont.linux.aarch64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/armv8-mont.linux.aarch64.S @@ -15,6 +15,8 @@ #if defined(BORINGSSL_PREFIX) #include #endif +#include + .text .globl bn_mul_mont @@ -22,6 +24,7 @@ .type bn_mul_mont,%function .align 5 bn_mul_mont: + AARCH64_SIGN_LINK_REGISTER tst x5,#7 b.eq __bn_sqr8x_mont tst x5,#3 @@ -219,11 +222,14 @@ bn_mul_mont: mov x0,#1 ldp x23,x24,[x29,#48] ldr x29,[sp],#64 + AARCH64_VALIDATE_LINK_REGISTER ret .size bn_mul_mont,.-bn_mul_mont .type __bn_sqr8x_mont,%function .align 5 __bn_sqr8x_mont: + // Not adding AARCH64_SIGN_LINK_REGISTER here because __bn_sqr8x_mont is jumped to + // only from bn_mul_mont which has already signed the return address. cmp x1,x2 b.ne __bn_mul4x_mont .Lsqr8x_mont: @@ -977,11 +983,16 @@ __bn_sqr8x_mont: ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldr x29,[sp],#128 + // x30 is popped earlier + AARCH64_VALIDATE_LINK_REGISTER ret .size __bn_sqr8x_mont,.-__bn_sqr8x_mont .type __bn_mul4x_mont,%function .align 5 __bn_mul4x_mont: + // Not adding AARCH64_SIGN_LINK_REGISTER here because __bn_mul4x_mont is jumped to + // only from bn_mul_mont or __bn_mul8x_mont which have already signed the + // return address. stp x29,x30,[sp,#-128]! add x29,sp,#0 stp x19,x20,[sp,#16] @@ -1415,6 +1426,8 @@ __bn_mul4x_mont: ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldr x29,[sp],#128 + // x30 is popped earlier + AARCH64_VALIDATE_LINK_REGISTER ret .size __bn_mul4x_mont,.-__bn_mul4x_mont .byte 77,111,110,116,103,111,109,101,114,121,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn-586.linux.x86.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn-586.linux.x86.S index e20dde8..03a4dde 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn-586.linux.x86.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn-586.linux.x86.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__i386__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__i386__) #if defined(BORINGSSL_PREFIX) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/bn.c b/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/bn.c index 9e0b010..5c77550 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/bn.c +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/bn.c @@ -101,26 +101,7 @@ void BN_free(BIGNUM *bn) { } void BN_clear_free(BIGNUM *bn) { - char should_free; - - if (bn == NULL) { - return; - } - - if (bn->d != NULL) { - if ((bn->flags & BN_FLG_STATIC_DATA) == 0) { - OPENSSL_free(bn->d); - } else { - OPENSSL_cleanse(bn->d, bn->dmax * sizeof(bn->d[0])); - } - } - - should_free = (bn->flags & BN_FLG_MALLOCED) != 0; - if (should_free) { - OPENSSL_free(bn); - } else { - OPENSSL_cleanse(bn, sizeof(BIGNUM)); - } + BN_free(bn); } BIGNUM *BN_dup(const BIGNUM *src) { @@ -302,6 +283,18 @@ int bn_set_words(BIGNUM *bn, const BN_ULONG *words, size_t num) { return 1; } +void bn_set_static_words(BIGNUM *bn, const BN_ULONG *words, size_t num) { + if ((bn->flags & BN_FLG_STATIC_DATA) == 0) { + OPENSSL_free(bn->d); + } + bn->d = (BN_ULONG *)words; + + bn->width = num; + bn->dmax = num; + bn->neg = 0; + bn->flags |= BN_FLG_STATIC_DATA; +} + int bn_fits_in_words(const BIGNUM *bn, size_t num) { // All words beyond |num| must be zero. BN_ULONG mask = 0; diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/div.c b/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/div.c index 403dcd5..25fdd54 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/div.c +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/div.c @@ -64,10 +64,10 @@ #include "internal.h" -#if !defined(BN_CAN_DIVIDE_ULLONG) && !defined(BN_CAN_USE_INLINE_ASM) // bn_div_words divides a double-width |h|,|l| by |d| and returns the result, // which must fit in a |BN_ULONG|. -static BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) { +OPENSSL_UNUSED static BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, + BN_ULONG d) { BN_ULONG dh, dl, q, ret = 0, th, tl, t; int i, count = 2; @@ -135,7 +135,6 @@ static BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) { ret |= q; return ret; } -#endif // !defined(BN_CAN_DIVIDE_ULLONG) && !defined(BN_CAN_USE_INLINE_ASM) static inline void bn_div_rem_words(BN_ULONG *quotient_out, BN_ULONG *rem_out, BN_ULONG n0, BN_ULONG n1, BN_ULONG d0) { @@ -286,8 +285,10 @@ int BN_div(BIGNUM *quotient, BIGNUM *rem, const BIGNUM *numerator, // pointer to the 'top' of snum wnump = &(snum->d[num_n - 1]); - // Setup to 'res' - res->neg = (numerator->neg ^ divisor->neg); + // Setup |res|. |numerator| and |res| may alias, so we save |numerator->neg| + // for later. + const int numerator_neg = numerator->neg; + res->neg = (numerator_neg ^ divisor->neg); if (!bn_wexpand(res, loop + 1)) { goto err; } @@ -380,14 +381,11 @@ int BN_div(BIGNUM *quotient, BIGNUM *rem, const BIGNUM *numerator, bn_set_minimal_width(snum); if (rem != NULL) { - // Keep a copy of the neg flag in numerator because if |rem| == |numerator| - // |BN_rshift| will overwrite it. - int neg = numerator->neg; if (!BN_rshift(rem, snum, norm_shift)) { goto err; } if (!BN_is_zero(rem)) { - rem->neg = neg; + rem->neg = numerator_neg; } } @@ -458,7 +456,7 @@ void bn_mod_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int bn_div_consttime(BIGNUM *quotient, BIGNUM *remainder, const BIGNUM *numerator, const BIGNUM *divisor, - BN_CTX *ctx) { + unsigned divisor_min_bits, BN_CTX *ctx) { if (BN_is_negative(numerator) || BN_is_negative(divisor)) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; @@ -498,8 +496,26 @@ int bn_div_consttime(BIGNUM *quotient, BIGNUM *remainder, r->neg = 0; // Incorporate |numerator| into |r|, one bit at a time, reducing after each - // step. At the start of each loop iteration, |r| < |divisor| - for (int i = numerator->width - 1; i >= 0; i--) { + // step. We maintain the invariant that |0 <= r < divisor| and + // |q * divisor + r = n| where |n| is the portion of |numerator| incorporated + // so far. + // + // First, we short-circuit the loop: if we know |divisor| has at least + // |divisor_min_bits| bits, the top |divisor_min_bits - 1| can be incorporated + // without reductions. This significantly speeds up |RSA_check_key|. For + // simplicity, we round down to a whole number of words. + assert(divisor_min_bits <= BN_num_bits(divisor)); + int initial_words = 0; + if (divisor_min_bits > 0) { + initial_words = (divisor_min_bits - 1) / BN_BITS2; + if (initial_words > numerator->width) { + initial_words = numerator->width; + } + OPENSSL_memcpy(r->d, numerator->d + numerator->width - initial_words, + initial_words * sizeof(BN_ULONG)); + } + + for (int i = numerator->width - initial_words - 1; i >= 0; i--) { for (int bit = BN_BITS2 - 1; bit >= 0; bit--) { // Incorporate the next bit of the numerator, by computing // r = 2*r or 2*r + 1. Note the result fits in one more word. We store the diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/gcd_extra.c b/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/gcd_extra.c index 23b0618..768a117 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/gcd_extra.c +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/gcd_extra.c @@ -157,10 +157,11 @@ int bn_lcm_consttime(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { BN_CTX_start(ctx); unsigned shift; BIGNUM *gcd = BN_CTX_get(ctx); - int ret = gcd != NULL && + int ret = gcd != NULL && // bn_mul_consttime(r, a, b, ctx) && bn_gcd_consttime(gcd, &shift, a, b, ctx) && - bn_div_consttime(r, NULL, r, gcd, ctx) && + // |gcd| has a secret bit width. + bn_div_consttime(r, NULL, r, gcd, /*divisor_min_bits=*/0, ctx) && bn_rshift_secret_shift(r, r, shift, ctx); BN_CTX_end(ctx); return ret; diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/internal.h b/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/internal.h index b491fb4..04e899c 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/internal.h +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/internal.h @@ -123,7 +123,7 @@ #ifndef OPENSSL_HEADER_BN_INTERNAL_H #define OPENSSL_HEADER_BN_INTERNAL_H -#include +#include #if defined(OPENSSL_X86_64) && defined(_MSC_VER) OPENSSL_MSVC_PRAGMA(warning(push, 3)) @@ -241,6 +241,14 @@ void bn_select_words(BN_ULONG *r, BN_ULONG mask, const BN_ULONG *a, // least significant word first. int bn_set_words(BIGNUM *bn, const BN_ULONG *words, size_t num); +// bn_set_static_words acts like |bn_set_words|, but doesn't copy the data. A +// flag is set on |bn| so that |BN_free| won't attempt to free the data. +// +// The |STATIC_BIGNUM| macro is probably a better solution for this outside of +// the FIPS module. Inside of the FIPS module that macro generates rel.ro data, +// which doesn't work with FIPS requirements. +void bn_set_static_words(BIGNUM *bn, const BN_ULONG *words, size_t num); + // bn_fits_in_words returns one if |bn| may be represented in |num| words, plus // a sign bit, and zero otherwise. int bn_fits_in_words(const BIGNUM *bn, size_t num); @@ -289,7 +297,7 @@ void bn_mul_comba4(BN_ULONG r[8], const BN_ULONG a[4], const BN_ULONG b[4]); void bn_mul_comba8(BN_ULONG r[16], const BN_ULONG a[8], const BN_ULONG b[8]); // bn_sqr_comba8 sets |r| to |a|^2. -void bn_sqr_comba8(BN_ULONG r[16], const BN_ULONG a[4]); +void bn_sqr_comba8(BN_ULONG r[16], const BN_ULONG a[8]); // bn_sqr_comba4 sets |r| to |a|^2. void bn_sqr_comba4(BN_ULONG r[8], const BN_ULONG a[4]); @@ -404,9 +412,19 @@ uint64_t bn_mont_n0(const BIGNUM *n); int bn_mod_exp_base_2_consttime(BIGNUM *r, unsigned p, const BIGNUM *n, BN_CTX *ctx); -#if defined(OPENSSL_X86_64) && defined(_MSC_VER) +#if defined(_MSC_VER) +#if defined(OPENSSL_X86_64) #define BN_UMULT_LOHI(low, high, a, b) ((low) = _umul128((a), (b), &(high))) +#elif defined(OPENSSL_AARCH64) +#define BN_UMULT_LOHI(low, high, a, b) \ + do { \ + const BN_ULONG _a = (a); \ + const BN_ULONG _b = (b); \ + (low) = _a * _b; \ + (high) = __umulh(_a, _b); \ + } while (0) #endif +#endif // _MSC_VER #if !defined(BN_ULLONG) && !defined(BN_UMULT_LOHI) #error "Either BN_ULLONG or BN_UMULT_LOHI must be defined on every platform." @@ -534,12 +552,15 @@ int bn_sqr_consttime(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx); // bn_div_consttime behaves like |BN_div|, but it rejects negative inputs and // treats both inputs, including their magnitudes, as secret. It is, as a // result, much slower than |BN_div| and should only be used for rare operations -// where Montgomery reduction is not available. +// where Montgomery reduction is not available. |divisor_min_bits| is a +// public lower bound for |BN_num_bits(divisor)|. When |divisor|'s bit width is +// public, this can speed up the operation. // // Note that |quotient->width| will be set pessimally to |numerator->width|. OPENSSL_EXPORT int bn_div_consttime(BIGNUM *quotient, BIGNUM *remainder, const BIGNUM *numerator, - const BIGNUM *divisor, BN_CTX *ctx); + const BIGNUM *divisor, + unsigned divisor_min_bits, BN_CTX *ctx); // bn_is_relatively_prime checks whether GCD(|x|, |y|) is one. On success, it // returns one and sets |*out_relatively_prime| to one if the GCD was one and diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/prime.c b/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/prime.c index edc82c6..b61c4fd 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/prime.c +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/prime.c @@ -115,10 +115,6 @@ #include "../../internal.h" -// The quick sieve algorithm approach to weeding out primes is Philip -// Zimmermann's, as implemented in PGP. I have had a read of his comments and -// implemented my own version. - // kPrimes contains the first 1024 primes. static const uint16_t kPrimes[] = { 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, @@ -363,6 +359,18 @@ static int probable_prime_dh(BIGNUM *rnd, int bits, const BIGNUM *add, static int probable_prime_dh_safe(BIGNUM *rnd, int bits, const BIGNUM *add, const BIGNUM *rem, BN_CTX *ctx); +BN_GENCB *BN_GENCB_new(void) { + BN_GENCB *callback = OPENSSL_malloc(sizeof(BN_GENCB)); + if (callback == NULL) { + OPENSSL_PUT_ERROR(BN, ERR_R_MALLOC_FAILURE); + return NULL; + } + OPENSSL_memset(callback, 0, sizeof(BN_GENCB)); + return callback; +} + +void BN_GENCB_free(BN_GENCB *callback) { OPENSSL_free(callback); } + void BN_GENCB_set(BN_GENCB *callback, int (*f)(int event, int n, struct bn_gencb_st *), void *arg) { diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/sqrt.c b/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/sqrt.c index 4e78400..24e1a02 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/sqrt.c +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/bn/sqrt.c @@ -75,10 +75,8 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { if (ret == NULL) { ret = BN_new(); } - if (ret == NULL) { - goto end; - } - if (!BN_set_word(ret, BN_is_bit_set(a, 0))) { + if (ret == NULL || + !BN_set_word(ret, BN_is_bit_set(a, 0))) { if (ret != in) { BN_free(ret); } @@ -88,17 +86,15 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { } OPENSSL_PUT_ERROR(BN, BN_R_P_IS_NOT_PRIME); - return (NULL); + return NULL; } if (BN_is_zero(a) || BN_is_one(a)) { if (ret == NULL) { ret = BN_new(); } - if (ret == NULL) { - goto end; - } - if (!BN_set_word(ret, BN_is_one(a))) { + if (ret == NULL || + !BN_set_word(ret, BN_is_one(a))) { if (ret != in) { BN_free(ret); } diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/cipher/cipher.c b/Sources/CBigNumBoringSSL/crypto/fipsmodule/cipher/cipher.c index 8d85492..1ada0da 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/cipher/cipher.c +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/cipher/cipher.c @@ -57,6 +57,7 @@ #include #include +#include #include #include @@ -224,7 +225,6 @@ int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ctx->buf_len = 0; ctx->final_used = 0; - ctx->block_mask = ctx->cipher->block_size - 1; return 1; } @@ -238,16 +238,31 @@ int EVP_DecryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, return EVP_CipherInit_ex(ctx, cipher, impl, key, iv, 0); } +// block_remainder returns the number of bytes to remove from |len| to get a +// multiple of |ctx|'s block size. +static int block_remainder(const EVP_CIPHER_CTX *ctx, int len) { + // |block_size| must be a power of two. + assert(ctx->cipher->block_size != 0); + assert((ctx->cipher->block_size & (ctx->cipher->block_size - 1)) == 0); + return len & (ctx->cipher->block_size - 1); +} + int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, int in_len) { - int i, j, bl; + // Ciphers that use blocks may write up to |bl| extra bytes. Ensure the output + // does not overflow |*out_len|. + int bl = ctx->cipher->block_size; + if (bl > 1 && in_len > INT_MAX - bl) { + OPENSSL_PUT_ERROR(CIPHER, ERR_R_OVERFLOW); + return 0; + } if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) { - i = ctx->cipher->cipher(ctx, out, in, in_len); - if (i < 0) { + int ret = ctx->cipher->cipher(ctx, out, in, in_len); + if (ret < 0) { return 0; } else { - *out_len = i; + *out_len = ret; } return 1; } @@ -257,7 +272,7 @@ int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, return in_len == 0; } - if (ctx->buf_len == 0 && (in_len & ctx->block_mask) == 0) { + if (ctx->buf_len == 0 && block_remainder(ctx, in_len) == 0) { if (ctx->cipher->cipher(ctx, out, in, in_len)) { *out_len = in_len; return 1; @@ -267,8 +282,7 @@ int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, } } - i = ctx->buf_len; - bl = ctx->cipher->block_size; + int i = ctx->buf_len; assert(bl <= (int)sizeof(ctx->buf)); if (i != 0) { if (bl - i > in_len) { @@ -277,7 +291,7 @@ int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, *out_len = 0; return 1; } else { - j = bl - i; + int j = bl - i; OPENSSL_memcpy(&ctx->buf[i], in, j); if (!ctx->cipher->cipher(ctx, out, ctx->buf, bl)) { return 0; @@ -291,7 +305,7 @@ int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, *out_len = 0; } - i = in_len & ctx->block_mask; + i = block_remainder(ctx, in_len); in_len -= i; if (in_len > 0) { if (!ctx->cipher->cipher(ctx, out, in, in_len)) { @@ -353,8 +367,13 @@ int EVP_EncryptFinal_ex(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len) { int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, int in_len) { - int fix_len; - unsigned int b; + // Ciphers that use blocks may write up to |bl| extra bytes. Ensure the output + // does not overflow |*out_len|. + unsigned int b = ctx->cipher->block_size; + if (b > 1 && in_len > INT_MAX - (int)b) { + OPENSSL_PUT_ERROR(CIPHER, ERR_R_OVERFLOW); + return 0; + } if (ctx->cipher->flags & EVP_CIPH_FLAG_CUSTOM_CIPHER) { int r = ctx->cipher->cipher(ctx, out, in, in_len); @@ -376,15 +395,12 @@ int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, return EVP_EncryptUpdate(ctx, out, out_len, in, in_len); } - b = ctx->cipher->block_size; assert(b <= sizeof(ctx->final)); - + int fix_len = 0; if (ctx->final_used) { OPENSSL_memcpy(out, ctx->final, b); out += b; fix_len = 1; - } else { - fix_len = 0; } if (!EVP_EncryptUpdate(ctx, out, out_len, in, in_len)) { @@ -613,6 +629,18 @@ int EVP_DecryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, return EVP_CipherInit(ctx, cipher, key, iv, 0); } +int EVP_CipherFinal(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len) { + return EVP_CipherFinal_ex(ctx, out, out_len); +} + +int EVP_EncryptFinal(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len) { + return EVP_EncryptFinal_ex(ctx, out, out_len); +} + +int EVP_DecryptFinal(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len) { + return EVP_DecryptFinal_ex(ctx, out, out_len); +} + int EVP_add_cipher_alias(const char *a, const char *b) { return 1; } diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/cipher/e_aes.c b/Sources/CBigNumBoringSSL/crypto/fipsmodule/cipher/e_aes.c index 0a15d1b..97091ef 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/cipher/e_aes.c +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/cipher/e_aes.c @@ -68,6 +68,8 @@ OPENSSL_MSVC_PRAGMA(warning(push)) OPENSSL_MSVC_PRAGMA(warning(disable: 4702)) // Unreachable code. +#define AES_GCM_NONCE_LENGTH 12 + #if defined(BSAES) static void vpaes_ctr32_encrypt_blocks_with_bsaes(const uint8_t *in, uint8_t *out, size_t blocks, @@ -139,10 +141,22 @@ typedef struct { static int aes_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, const uint8_t *iv, int enc) { - int ret, mode; + int ret; EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; + const int mode = ctx->cipher->flags & EVP_CIPH_MODE_MASK; + + if (mode == EVP_CIPH_CTR_MODE) { + switch (ctx->key_len) { + case 16: + boringssl_fips_inc_counter(fips_counter_evp_aes_128_ctr); + break; + + case 32: + boringssl_fips_inc_counter(fips_counter_evp_aes_256_ctr); + break; + } + } - mode = ctx->cipher->flags & EVP_CIPH_MODE_MASK; if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) && !enc) { if (hwaes_capable()) { ret = aes_hw_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks); @@ -351,6 +365,17 @@ static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, if (!iv && !key) { return 1; } + + switch (ctx->key_len) { + case 16: + boringssl_fips_inc_counter(fips_counter_evp_aes_128_gcm); + break; + + case 32: + boringssl_fips_inc_counter(fips_counter_evp_aes_256_gcm); + break; + } + if (key) { OPENSSL_memset(&gctx->gcm, 0, sizeof(gctx->gcm)); gctx->ctr = aes_ctr_set_key(&gctx->ks.ks, &gctx->gcm.gcm_key, NULL, key, @@ -630,7 +655,7 @@ DEFINE_LOCAL_DATA(EVP_CIPHER, aes_128_gcm_generic) { out->nid = NID_aes_128_gcm; out->block_size = 1; out->key_len = 16; - out->iv_len = 12; + out->iv_len = AES_GCM_NONCE_LENGTH; out->ctx_size = sizeof(EVP_AES_GCM_CTX) + EVP_AES_GCM_CTX_PADDING; out->flags = EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT | @@ -698,7 +723,7 @@ DEFINE_LOCAL_DATA(EVP_CIPHER, aes_192_gcm_generic) { out->nid = NID_aes_192_gcm; out->block_size = 1; out->key_len = 24; - out->iv_len = 12; + out->iv_len = AES_GCM_NONCE_LENGTH; out->ctx_size = sizeof(EVP_AES_GCM_CTX) + EVP_AES_GCM_CTX_PADDING; out->flags = EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT | @@ -766,7 +791,7 @@ DEFINE_LOCAL_DATA(EVP_CIPHER, aes_256_gcm_generic) { out->nid = NID_aes_256_gcm; out->block_size = 1; out->key_len = 32; - out->iv_len = 12; + out->iv_len = AES_GCM_NONCE_LENGTH; out->ctx_size = sizeof(EVP_AES_GCM_CTX) + EVP_AES_GCM_CTX_PADDING; out->flags = EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT | @@ -886,6 +911,16 @@ static int aead_aes_gcm_init_impl(struct aead_aes_gcm_ctx *gcm_ctx, size_t key_len, size_t tag_len) { const size_t key_bits = key_len * 8; + switch (key_bits) { + case 128: + boringssl_fips_inc_counter(fips_counter_evp_aes_128_gcm); + break; + + case 256: + boringssl_fips_inc_counter(fips_counter_evp_aes_256_gcm); + break; + } + if (key_bits != 128 && key_bits != 192 && key_bits != 256) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); return 0; // EVP_AEAD_CTX_init should catch this. @@ -931,21 +966,19 @@ static int aead_aes_gcm_init(EVP_AEAD_CTX *ctx, const uint8_t *key, static void aead_aes_gcm_cleanup(EVP_AEAD_CTX *ctx) {} -static int aead_aes_gcm_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, - uint8_t *out_tag, size_t *out_tag_len, - size_t max_out_tag_len, - const uint8_t *nonce, size_t nonce_len, - const uint8_t *in, size_t in_len, - const uint8_t *extra_in, - size_t extra_in_len, - const uint8_t *ad, size_t ad_len) { - struct aead_aes_gcm_ctx *gcm_ctx = (struct aead_aes_gcm_ctx *) &ctx->state; - - if (extra_in_len + ctx->tag_len < ctx->tag_len) { +static int aead_aes_gcm_seal_scatter_impl( + const struct aead_aes_gcm_ctx *gcm_ctx, + uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, + const uint8_t *nonce, size_t nonce_len, + const uint8_t *in, size_t in_len, + const uint8_t *extra_in, size_t extra_in_len, + const uint8_t *ad, size_t ad_len, + size_t tag_len) { + if (extra_in_len + tag_len < tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } - if (max_out_tag_len < extra_in_len + ctx->tag_len) { + if (max_out_tag_len < extra_in_len + tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } @@ -989,18 +1022,35 @@ static int aead_aes_gcm_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, } } - CRYPTO_gcm128_tag(&gcm, out_tag + extra_in_len, ctx->tag_len); - *out_tag_len = ctx->tag_len + extra_in_len; + CRYPTO_gcm128_tag(&gcm, out_tag + extra_in_len, tag_len); + *out_tag_len = tag_len + extra_in_len; return 1; } -static int aead_aes_gcm_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out, - const uint8_t *nonce, size_t nonce_len, - const uint8_t *in, size_t in_len, - const uint8_t *in_tag, size_t in_tag_len, - const uint8_t *ad, size_t ad_len) { - struct aead_aes_gcm_ctx *gcm_ctx = (struct aead_aes_gcm_ctx *) &ctx->state; +static int aead_aes_gcm_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, + uint8_t *out_tag, size_t *out_tag_len, + size_t max_out_tag_len, + const uint8_t *nonce, size_t nonce_len, + const uint8_t *in, size_t in_len, + const uint8_t *extra_in, + size_t extra_in_len, + const uint8_t *ad, size_t ad_len) { + const struct aead_aes_gcm_ctx *gcm_ctx = + (const struct aead_aes_gcm_ctx *)&ctx->state; + return aead_aes_gcm_seal_scatter_impl( + gcm_ctx, out, out_tag, out_tag_len, max_out_tag_len, nonce, nonce_len, in, + in_len, extra_in, extra_in_len, ad, ad_len, ctx->tag_len); +} + +static int aead_aes_gcm_open_gather_impl(const struct aead_aes_gcm_ctx *gcm_ctx, + uint8_t *out, + const uint8_t *nonce, size_t nonce_len, + const uint8_t *in, size_t in_len, + const uint8_t *in_tag, + size_t in_tag_len, + const uint8_t *ad, size_t ad_len, + size_t tag_len) { uint8_t tag[EVP_AEAD_AES_GCM_TAG_LEN]; if (nonce_len == 0) { @@ -1008,7 +1058,7 @@ static int aead_aes_gcm_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out, return 0; } - if (in_tag_len != ctx->tag_len) { + if (in_tag_len != tag_len) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } @@ -1035,8 +1085,8 @@ static int aead_aes_gcm_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out, } } - CRYPTO_gcm128_tag(&gcm, tag, ctx->tag_len); - if (CRYPTO_memcmp(tag, in_tag, ctx->tag_len) != 0) { + CRYPTO_gcm128_tag(&gcm, tag, tag_len); + if (CRYPTO_memcmp(tag, in_tag, tag_len) != 0) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } @@ -1044,11 +1094,22 @@ static int aead_aes_gcm_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out, return 1; } +static int aead_aes_gcm_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out, + const uint8_t *nonce, size_t nonce_len, + const uint8_t *in, size_t in_len, + const uint8_t *in_tag, size_t in_tag_len, + const uint8_t *ad, size_t ad_len) { + struct aead_aes_gcm_ctx *gcm_ctx = (struct aead_aes_gcm_ctx *)&ctx->state; + return aead_aes_gcm_open_gather_impl(gcm_ctx, out, nonce, nonce_len, in, + in_len, in_tag, in_tag_len, ad, ad_len, + ctx->tag_len); +} + DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_128_gcm) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 16; - out->nonce_len = 12; + out->nonce_len = AES_GCM_NONCE_LENGTH; out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; out->seal_scatter_supports_extra_in = 1; @@ -1063,7 +1124,7 @@ DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_192_gcm) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 24; - out->nonce_len = 12; + out->nonce_len = AES_GCM_NONCE_LENGTH; out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; out->seal_scatter_supports_extra_in = 1; @@ -1078,7 +1139,7 @@ DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_256_gcm) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 32; - out->nonce_len = 12; + out->nonce_len = AES_GCM_NONCE_LENGTH; out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; out->seal_scatter_supports_extra_in = 1; @@ -1089,6 +1150,116 @@ DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_256_gcm) { out->open_gather = aead_aes_gcm_open_gather; } +static int aead_aes_gcm_init_randnonce(EVP_AEAD_CTX *ctx, const uint8_t *key, + size_t key_len, + size_t requested_tag_len) { + if (requested_tag_len != EVP_AEAD_DEFAULT_TAG_LENGTH) { + if (requested_tag_len < AES_GCM_NONCE_LENGTH) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); + return 0; + } + requested_tag_len -= AES_GCM_NONCE_LENGTH; + } + + if (!aead_aes_gcm_init(ctx, key, key_len, requested_tag_len)) { + return 0; + } + + ctx->tag_len += AES_GCM_NONCE_LENGTH; + return 1; +} + +static int aead_aes_gcm_seal_scatter_randnonce( + const EVP_AEAD_CTX *ctx, + uint8_t *out, uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, + const uint8_t *external_nonce, size_t external_nonce_len, + const uint8_t *in, size_t in_len, + const uint8_t *extra_in, size_t extra_in_len, + const uint8_t *ad, size_t ad_len) { + if (external_nonce_len != 0) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); + return 0; + } + + uint8_t nonce[AES_GCM_NONCE_LENGTH]; + if (max_out_tag_len < sizeof(nonce)) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); + return 0; + } + + RAND_bytes(nonce, sizeof(nonce)); + const struct aead_aes_gcm_ctx *gcm_ctx = + (const struct aead_aes_gcm_ctx *)&ctx->state; + if (!aead_aes_gcm_seal_scatter_impl(gcm_ctx, out, out_tag, out_tag_len, + max_out_tag_len - AES_GCM_NONCE_LENGTH, + nonce, sizeof(nonce), in, in_len, + extra_in, extra_in_len, ad, ad_len, + ctx->tag_len - AES_GCM_NONCE_LENGTH)) { + return 0; + } + + assert(*out_tag_len + sizeof(nonce) <= max_out_tag_len); + memcpy(out_tag + *out_tag_len, nonce, sizeof(nonce)); + *out_tag_len += sizeof(nonce); + + return 1; +} + +static int aead_aes_gcm_open_gather_randnonce( + const EVP_AEAD_CTX *ctx, uint8_t *out, + const uint8_t *external_nonce, size_t external_nonce_len, + const uint8_t *in, size_t in_len, + const uint8_t *in_tag, size_t in_tag_len, + const uint8_t *ad, size_t ad_len) { + if (external_nonce_len != 0) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); + return 0; + } + + if (in_tag_len < AES_GCM_NONCE_LENGTH) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); + return 0; + } + const uint8_t *nonce = in_tag + in_tag_len - AES_GCM_NONCE_LENGTH; + + const struct aead_aes_gcm_ctx *gcm_ctx = + (const struct aead_aes_gcm_ctx *)&ctx->state; + return aead_aes_gcm_open_gather_impl( + gcm_ctx, out, nonce, AES_GCM_NONCE_LENGTH, in, in_len, in_tag, + in_tag_len - AES_GCM_NONCE_LENGTH, ad, ad_len, + ctx->tag_len - AES_GCM_NONCE_LENGTH); +} + +DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_128_gcm_randnonce) { + memset(out, 0, sizeof(EVP_AEAD)); + + out->key_len = 16; + out->nonce_len = 0; + out->overhead = EVP_AEAD_AES_GCM_TAG_LEN + AES_GCM_NONCE_LENGTH; + out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN + AES_GCM_NONCE_LENGTH; + out->seal_scatter_supports_extra_in = 1; + + out->init = aead_aes_gcm_init_randnonce; + out->cleanup = aead_aes_gcm_cleanup; + out->seal_scatter = aead_aes_gcm_seal_scatter_randnonce; + out->open_gather = aead_aes_gcm_open_gather_randnonce; +} + +DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_256_gcm_randnonce) { + memset(out, 0, sizeof(EVP_AEAD)); + + out->key_len = 32; + out->nonce_len = 0; + out->overhead = EVP_AEAD_AES_GCM_TAG_LEN + AES_GCM_NONCE_LENGTH; + out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN + AES_GCM_NONCE_LENGTH; + out->seal_scatter_supports_extra_in = 1; + + out->init = aead_aes_gcm_init_randnonce; + out->cleanup = aead_aes_gcm_cleanup; + out->seal_scatter = aead_aes_gcm_seal_scatter_randnonce; + out->open_gather = aead_aes_gcm_open_gather_randnonce; +} + struct aead_aes_gcm_tls12_ctx { struct aead_aes_gcm_ctx gcm_ctx; uint64_t min_next_nonce; @@ -1128,7 +1299,7 @@ static int aead_aes_gcm_tls12_seal_scatter( struct aead_aes_gcm_tls12_ctx *gcm_ctx = (struct aead_aes_gcm_tls12_ctx *) &ctx->state; - if (nonce_len != 12) { + if (nonce_len != AES_GCM_NONCE_LENGTH) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } @@ -1155,7 +1326,7 @@ DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_128_gcm_tls12) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 16; - out->nonce_len = 12; + out->nonce_len = AES_GCM_NONCE_LENGTH; out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; out->seal_scatter_supports_extra_in = 1; @@ -1170,7 +1341,7 @@ DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_256_gcm_tls12) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 32; - out->nonce_len = 12; + out->nonce_len = AES_GCM_NONCE_LENGTH; out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; out->seal_scatter_supports_extra_in = 1; @@ -1223,7 +1394,7 @@ static int aead_aes_gcm_tls13_seal_scatter( struct aead_aes_gcm_tls13_ctx *gcm_ctx = (struct aead_aes_gcm_tls13_ctx *) &ctx->state; - if (nonce_len != 12) { + if (nonce_len != AES_GCM_NONCE_LENGTH) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); return 0; } @@ -1261,7 +1432,7 @@ DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_128_gcm_tls13) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 16; - out->nonce_len = 12; + out->nonce_len = AES_GCM_NONCE_LENGTH; out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; out->seal_scatter_supports_extra_in = 1; @@ -1276,7 +1447,7 @@ DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_256_gcm_tls13) { memset(out, 0, sizeof(EVP_AEAD)); out->key_len = 32; - out->nonce_len = 12; + out->nonce_len = AES_GCM_NONCE_LENGTH; out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; out->seal_scatter_supports_extra_in = 1; diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/co-586.linux.x86.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/co-586.linux.x86.S index 588c53a..8029d44 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/co-586.linux.x86.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/co-586.linux.x86.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__i386__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__i386__) #if defined(BORINGSSL_PREFIX) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-neon-armv8.ios.aarch64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-neon-armv8.ios.aarch64.S index 9b53e28..fe9b4f4 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-neon-armv8.ios.aarch64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-neon-armv8.ios.aarch64.S @@ -14,6 +14,8 @@ #if defined(BORINGSSL_PREFIX) #include #endif +#include + .text .globl _gcm_init_neon @@ -21,6 +23,7 @@ .align 4 _gcm_init_neon: + AARCH64_VALID_CALL_TARGET // This function is adapted from gcm_init_v8. xC2 is t3. ld1 {v17.2d}, [x1] // load H movi v19.16b, #0xe1 @@ -46,6 +49,7 @@ _gcm_init_neon: .align 4 _gcm_gmult_neon: + AARCH64_VALID_CALL_TARGET ld1 {v3.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] @@ -65,6 +69,7 @@ _gcm_gmult_neon: .align 4 _gcm_ghash_neon: + AARCH64_VALID_CALL_TARGET ld1 {v0.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-neon-armv8.linux.aarch64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-neon-armv8.linux.aarch64.S index 11fa839..82944dc 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-neon-armv8.linux.aarch64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-neon-armv8.linux.aarch64.S @@ -15,6 +15,8 @@ #if defined(BORINGSSL_PREFIX) #include #endif +#include + .text .globl gcm_init_neon @@ -22,6 +24,7 @@ .type gcm_init_neon,%function .align 4 gcm_init_neon: + AARCH64_VALID_CALL_TARGET // This function is adapted from gcm_init_v8. xC2 is t3. ld1 {v17.2d}, [x1] // load H movi v19.16b, #0xe1 @@ -47,6 +50,7 @@ gcm_init_neon: .type gcm_gmult_neon,%function .align 4 gcm_gmult_neon: + AARCH64_VALID_CALL_TARGET ld1 {v3.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] @@ -66,6 +70,7 @@ gcm_gmult_neon: .type gcm_ghash_neon,%function .align 4 gcm_ghash_neon: + AARCH64_VALID_CALL_TARGET ld1 {v0.16b}, [x0] // load Xi ld1 {v5.1d}, [x1], #8 // load twisted H ld1 {v6.1d}, [x1] diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-ssse3-x86.linux.x86.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-ssse3-x86.linux.x86.S index 2899ac4..e9d7e4b 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-ssse3-x86.linux.x86.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-ssse3-x86.linux.x86.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__i386__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__i386__) #if defined(BORINGSSL_PREFIX) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-ssse3-x86_64.linux.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-ssse3-x86_64.linux.x86_64.S index df19521..b99377c 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-ssse3-x86_64.linux.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-ssse3-x86_64.linux.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-ssse3-x86_64.mac.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-ssse3-x86_64.mac.x86_64.S index 1cf5894..232d78a 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-ssse3-x86_64.mac.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-ssse3-x86_64.mac.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__APPLE__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-x86.linux.x86.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-x86.linux.x86.S index b212cf4..c4f837f 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-x86.linux.x86.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-x86.linux.x86.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__i386__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__i386__) #if defined(BORINGSSL_PREFIX) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-x86_64.linux.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-x86_64.linux.x86_64.S index 3d4dbd2..421251d 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-x86_64.linux.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-x86_64.linux.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-x86_64.mac.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-x86_64.mac.x86_64.S index 3f540b2..367869f 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-x86_64.mac.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghash-x86_64.mac.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__APPLE__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghashv8-armx32.ios.arm.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghashv8-armx32.ios.arm.S index d00df7f..c74e753 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghashv8-armx32.ios.arm.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghashv8-armx32.ios.arm.S @@ -16,6 +16,7 @@ #endif #include +#if __ARM_MAX_ARCH__>=7 .text .code 32 @@ -27,6 +28,7 @@ #endif .align 4 _gcm_init_v8: + AARCH64_VALID_CALL_TARGET vld1.64 {q9},[r1] @ load input H vmov.i8 q11,#0xe1 vshl.i64 q11,q11,#57 @ 0xc2.0 @@ -69,8 +71,7 @@ _gcm_init_v8: vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing veor q9,q9,q14 vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed - vst1.64 {q13,q14},[r0] @ store Htable[1..2] - + vst1.64 {q13,q14},[r0]! @ store Htable[1..2] bx lr .globl _gcm_gmult_v8 @@ -80,6 +81,7 @@ _gcm_init_v8: #endif .align 4 _gcm_gmult_v8: + AARCH64_VALID_CALL_TARGET vld1.64 {q9},[r0] @ load Xi vmov.i8 q11,#0xe1 vld1.64 {q12,q13},[r1] @ load twisted H, ... @@ -124,6 +126,7 @@ _gcm_gmult_v8: #endif .align 4 _gcm_ghash_v8: + AARCH64_VALID_CALL_TARGET vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so vld1.64 {q0},[r0] @ load [rotated] Xi @ "[rotated]" means that @@ -255,6 +258,7 @@ Ldone_v8: .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 +#endif #endif // !OPENSSL_NO_ASM #endif // defined(__arm__) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghashv8-armx32.linux.arm.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghashv8-armx32.linux.arm.S index 8bff7d2..f5cde89 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghashv8-armx32.linux.arm.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghashv8-armx32.linux.arm.S @@ -17,6 +17,7 @@ #endif #include +#if __ARM_MAX_ARCH__>=7 .text .fpu neon .code 32 @@ -26,6 +27,7 @@ .type gcm_init_v8,%function .align 4 gcm_init_v8: + AARCH64_VALID_CALL_TARGET vld1.64 {q9},[r1] @ load input H vmov.i8 q11,#0xe1 vshl.i64 q11,q11,#57 @ 0xc2.0 @@ -68,8 +70,7 @@ gcm_init_v8: vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing veor q9,q9,q14 vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed - vst1.64 {q13,q14},[r0] @ store Htable[1..2] - + vst1.64 {q13,q14},[r0]! @ store Htable[1..2] bx lr .size gcm_init_v8,.-gcm_init_v8 .globl gcm_gmult_v8 @@ -77,6 +78,7 @@ gcm_init_v8: .type gcm_gmult_v8,%function .align 4 gcm_gmult_v8: + AARCH64_VALID_CALL_TARGET vld1.64 {q9},[r0] @ load Xi vmov.i8 q11,#0xe1 vld1.64 {q12,q13},[r1] @ load twisted H, ... @@ -119,6 +121,7 @@ gcm_gmult_v8: .type gcm_ghash_v8,%function .align 4 gcm_ghash_v8: + AARCH64_VALID_CALL_TARGET vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so vld1.64 {q0},[r0] @ load [rotated] Xi @ "[rotated]" means that @@ -251,6 +254,7 @@ gcm_ghash_v8: .align 2 .align 2 #endif +#endif #endif // !OPENSSL_NO_ASM .section .note.GNU-stack,"",%progbits #endif // defined(__arm__) && defined(__linux__) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghashv8-armx64.ios.aarch64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghashv8-armx64.ios.aarch64.S index 1713643..b0f9e54 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghashv8-armx64.ios.aarch64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghashv8-armx64.ios.aarch64.S @@ -16,6 +16,7 @@ #endif #include +#if __ARM_MAX_ARCH__>=7 .text .globl _gcm_init_v8 @@ -23,6 +24,7 @@ .align 4 _gcm_init_v8: + AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x1] //load input H movi v19.16b,#0xe1 shl v19.2d,v19.2d,#57 //0xc2.0 @@ -65,8 +67,48 @@ _gcm_init_v8: ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing eor v17.16b,v17.16b,v22.16b ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed - st1 {v21.2d,v22.2d},[x0] //store Htable[1..2] + st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2] + //calculate H^3 and H^4 + pmull v0.1q,v20.1d, v22.1d + pmull v5.1q,v22.1d,v22.1d + pmull2 v2.1q,v20.2d, v22.2d + pmull2 v7.1q,v22.2d,v22.2d + pmull v1.1q,v16.1d,v17.1d + pmull v6.1q,v17.1d,v17.1d + ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing + ext v17.16b,v5.16b,v7.16b,#8 + eor v18.16b,v0.16b,v2.16b + eor v1.16b,v1.16b,v16.16b + eor v4.16b,v5.16b,v7.16b + eor v6.16b,v6.16b,v17.16b + eor v1.16b,v1.16b,v18.16b + pmull v18.1q,v0.1d,v19.1d //1st phase + eor v6.16b,v6.16b,v4.16b + pmull v4.1q,v5.1d,v19.1d + + ins v2.d[0],v1.d[1] + ins v7.d[0],v6.d[1] + ins v1.d[1],v0.d[0] + ins v6.d[1],v5.d[0] + eor v0.16b,v1.16b,v18.16b + eor v5.16b,v6.16b,v4.16b + + ext v18.16b,v0.16b,v0.16b,#8 //2nd phase + ext v4.16b,v5.16b,v5.16b,#8 + pmull v0.1q,v0.1d,v19.1d + pmull v5.1q,v5.1d,v19.1d + eor v18.16b,v18.16b,v2.16b + eor v4.16b,v4.16b,v7.16b + eor v20.16b, v0.16b,v18.16b //H^3 + eor v22.16b,v5.16b,v4.16b //H^4 + + ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing + ext v17.16b,v22.16b,v22.16b,#8 + eor v16.16b,v16.16b,v20.16b + eor v17.16b,v17.16b,v22.16b + ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed + st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5] ret .globl _gcm_gmult_v8 @@ -74,6 +116,7 @@ _gcm_init_v8: .align 4 _gcm_gmult_v8: + AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x0] //load Xi movi v19.16b,#0xe1 ld1 {v20.2d,v21.2d},[x1] //load twisted H, ... @@ -116,6 +159,9 @@ _gcm_gmult_v8: .align 4 _gcm_ghash_v8: + AARCH64_VALID_CALL_TARGET + cmp x3,#64 + b.hs Lgcm_ghash_v8_4x ld1 {v0.2d},[x0] //load [rotated] Xi //"[rotated]" means that //loaded value would have @@ -242,9 +288,290 @@ Ldone_v8: ret + +.align 4 +gcm_ghash_v8_4x: +Lgcm_ghash_v8_4x: + ld1 {v0.2d},[x0] //load [rotated] Xi + ld1 {v20.2d,v21.2d,v22.2d},[x1],#48 //load twisted H, ..., H^2 + movi v19.16b,#0xe1 + ld1 {v26.2d,v27.2d,v28.2d},[x1] //load twisted H^3, ..., H^4 + shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant + + ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64 +#ifndef __ARMEB__ + rev64 v0.16b,v0.16b + rev64 v5.16b,v5.16b + rev64 v6.16b,v6.16b + rev64 v7.16b,v7.16b + rev64 v4.16b,v4.16b +#endif + ext v25.16b,v7.16b,v7.16b,#8 + ext v24.16b,v6.16b,v6.16b,#8 + ext v23.16b,v5.16b,v5.16b,#8 + + pmull v29.1q,v20.1d,v25.1d //H·Ii+3 + eor v7.16b,v7.16b,v25.16b + pmull2 v31.1q,v20.2d,v25.2d + pmull v30.1q,v21.1d,v7.1d + + pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2 + eor v6.16b,v6.16b,v24.16b + pmull2 v24.1q,v22.2d,v24.2d + pmull2 v6.1q,v21.2d,v6.2d + + eor v29.16b,v29.16b,v16.16b + eor v31.16b,v31.16b,v24.16b + eor v30.16b,v30.16b,v6.16b + + pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1 + eor v5.16b,v5.16b,v23.16b + pmull2 v23.1q,v26.2d,v23.2d + pmull v5.1q,v27.1d,v5.1d + + eor v29.16b,v29.16b,v7.16b + eor v31.16b,v31.16b,v23.16b + eor v30.16b,v30.16b,v5.16b + + subs x3,x3,#128 + b.lo Ltail4x + + b Loop4x + +.align 4 +Loop4x: + eor v16.16b,v4.16b,v0.16b + ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64 + ext v3.16b,v16.16b,v16.16b,#8 +#ifndef __ARMEB__ + rev64 v5.16b,v5.16b + rev64 v6.16b,v6.16b + rev64 v7.16b,v7.16b + rev64 v4.16b,v4.16b +#endif + + pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii) + eor v16.16b,v16.16b,v3.16b + pmull2 v2.1q,v28.2d,v3.2d + ext v25.16b,v7.16b,v7.16b,#8 + pmull2 v1.1q,v27.2d,v16.2d + + eor v0.16b,v0.16b,v29.16b + eor v2.16b,v2.16b,v31.16b + ext v24.16b,v6.16b,v6.16b,#8 + eor v1.16b,v1.16b,v30.16b + ext v23.16b,v5.16b,v5.16b,#8 + + ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing + eor v18.16b,v0.16b,v2.16b + pmull v29.1q,v20.1d,v25.1d //H·Ii+3 + eor v7.16b,v7.16b,v25.16b + eor v1.16b,v1.16b,v17.16b + pmull2 v31.1q,v20.2d,v25.2d + eor v1.16b,v1.16b,v18.16b + pmull v30.1q,v21.1d,v7.1d + + pmull v18.1q,v0.1d,v19.1d //1st phase of reduction + ins v2.d[0],v1.d[1] + ins v1.d[1],v0.d[0] + pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2 + eor v6.16b,v6.16b,v24.16b + pmull2 v24.1q,v22.2d,v24.2d + eor v0.16b,v1.16b,v18.16b + pmull2 v6.1q,v21.2d,v6.2d + + eor v29.16b,v29.16b,v16.16b + eor v31.16b,v31.16b,v24.16b + eor v30.16b,v30.16b,v6.16b + + ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction + pmull v0.1q,v0.1d,v19.1d + pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1 + eor v5.16b,v5.16b,v23.16b + eor v18.16b,v18.16b,v2.16b + pmull2 v23.1q,v26.2d,v23.2d + pmull v5.1q,v27.1d,v5.1d + + eor v0.16b,v0.16b,v18.16b + eor v29.16b,v29.16b,v7.16b + eor v31.16b,v31.16b,v23.16b + ext v0.16b,v0.16b,v0.16b,#8 + eor v30.16b,v30.16b,v5.16b + + subs x3,x3,#64 + b.hs Loop4x + +Ltail4x: + eor v16.16b,v4.16b,v0.16b + ext v3.16b,v16.16b,v16.16b,#8 + + pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii) + eor v16.16b,v16.16b,v3.16b + pmull2 v2.1q,v28.2d,v3.2d + pmull2 v1.1q,v27.2d,v16.2d + + eor v0.16b,v0.16b,v29.16b + eor v2.16b,v2.16b,v31.16b + eor v1.16b,v1.16b,v30.16b + + adds x3,x3,#64 + b.eq Ldone4x + + cmp x3,#32 + b.lo Lone + b.eq Ltwo +Lthree: + ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing + eor v18.16b,v0.16b,v2.16b + eor v1.16b,v1.16b,v17.16b + ld1 {v4.2d,v5.2d,v6.2d},[x2] + eor v1.16b,v1.16b,v18.16b +#ifndef __ARMEB__ + rev64 v5.16b,v5.16b + rev64 v6.16b,v6.16b + rev64 v4.16b,v4.16b +#endif + + pmull v18.1q,v0.1d,v19.1d //1st phase of reduction + ins v2.d[0],v1.d[1] + ins v1.d[1],v0.d[0] + ext v24.16b,v6.16b,v6.16b,#8 + ext v23.16b,v5.16b,v5.16b,#8 + eor v0.16b,v1.16b,v18.16b + + pmull v29.1q,v20.1d,v24.1d //H·Ii+2 + eor v6.16b,v6.16b,v24.16b + + ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction + pmull v0.1q,v0.1d,v19.1d + eor v18.16b,v18.16b,v2.16b + pmull2 v31.1q,v20.2d,v24.2d + pmull v30.1q,v21.1d,v6.1d + eor v0.16b,v0.16b,v18.16b + pmull v7.1q,v22.1d,v23.1d //H^2·Ii+1 + eor v5.16b,v5.16b,v23.16b + ext v0.16b,v0.16b,v0.16b,#8 + + pmull2 v23.1q,v22.2d,v23.2d + eor v16.16b,v4.16b,v0.16b + pmull2 v5.1q,v21.2d,v5.2d + ext v3.16b,v16.16b,v16.16b,#8 + + eor v29.16b,v29.16b,v7.16b + eor v31.16b,v31.16b,v23.16b + eor v30.16b,v30.16b,v5.16b + + pmull v0.1q,v26.1d,v3.1d //H^3·(Xi+Ii) + eor v16.16b,v16.16b,v3.16b + pmull2 v2.1q,v26.2d,v3.2d + pmull v1.1q,v27.1d,v16.1d + + eor v0.16b,v0.16b,v29.16b + eor v2.16b,v2.16b,v31.16b + eor v1.16b,v1.16b,v30.16b + b Ldone4x + +.align 4 +Ltwo: + ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing + eor v18.16b,v0.16b,v2.16b + eor v1.16b,v1.16b,v17.16b + ld1 {v4.2d,v5.2d},[x2] + eor v1.16b,v1.16b,v18.16b +#ifndef __ARMEB__ + rev64 v5.16b,v5.16b + rev64 v4.16b,v4.16b +#endif + + pmull v18.1q,v0.1d,v19.1d //1st phase of reduction + ins v2.d[0],v1.d[1] + ins v1.d[1],v0.d[0] + ext v23.16b,v5.16b,v5.16b,#8 + eor v0.16b,v1.16b,v18.16b + + ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction + pmull v0.1q,v0.1d,v19.1d + eor v18.16b,v18.16b,v2.16b + eor v0.16b,v0.16b,v18.16b + ext v0.16b,v0.16b,v0.16b,#8 + + pmull v29.1q,v20.1d,v23.1d //H·Ii+1 + eor v5.16b,v5.16b,v23.16b + + eor v16.16b,v4.16b,v0.16b + ext v3.16b,v16.16b,v16.16b,#8 + + pmull2 v31.1q,v20.2d,v23.2d + pmull v30.1q,v21.1d,v5.1d + + pmull v0.1q,v22.1d,v3.1d //H^2·(Xi+Ii) + eor v16.16b,v16.16b,v3.16b + pmull2 v2.1q,v22.2d,v3.2d + pmull2 v1.1q,v21.2d,v16.2d + + eor v0.16b,v0.16b,v29.16b + eor v2.16b,v2.16b,v31.16b + eor v1.16b,v1.16b,v30.16b + b Ldone4x + +.align 4 +Lone: + ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing + eor v18.16b,v0.16b,v2.16b + eor v1.16b,v1.16b,v17.16b + ld1 {v4.2d},[x2] + eor v1.16b,v1.16b,v18.16b +#ifndef __ARMEB__ + rev64 v4.16b,v4.16b +#endif + + pmull v18.1q,v0.1d,v19.1d //1st phase of reduction + ins v2.d[0],v1.d[1] + ins v1.d[1],v0.d[0] + eor v0.16b,v1.16b,v18.16b + + ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction + pmull v0.1q,v0.1d,v19.1d + eor v18.16b,v18.16b,v2.16b + eor v0.16b,v0.16b,v18.16b + ext v0.16b,v0.16b,v0.16b,#8 + + eor v16.16b,v4.16b,v0.16b + ext v3.16b,v16.16b,v16.16b,#8 + + pmull v0.1q,v20.1d,v3.1d + eor v16.16b,v16.16b,v3.16b + pmull2 v2.1q,v20.2d,v3.2d + pmull v1.1q,v21.1d,v16.1d + +Ldone4x: + ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing + eor v18.16b,v0.16b,v2.16b + eor v1.16b,v1.16b,v17.16b + eor v1.16b,v1.16b,v18.16b + + pmull v18.1q,v0.1d,v19.1d //1st phase of reduction + ins v2.d[0],v1.d[1] + ins v1.d[1],v0.d[0] + eor v0.16b,v1.16b,v18.16b + + ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction + pmull v0.1q,v0.1d,v19.1d + eor v18.16b,v18.16b,v2.16b + eor v0.16b,v0.16b,v18.16b + ext v0.16b,v0.16b,v0.16b,#8 + +#ifndef __ARMEB__ + rev64 v0.16b,v0.16b +#endif + st1 {v0.2d},[x0] //write out Xi + + ret + .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 +#endif #endif // !OPENSSL_NO_ASM #endif // defined(__aarch64__) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghashv8-armx64.linux.aarch64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghashv8-armx64.linux.aarch64.S index ad8788c..79f58f8 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghashv8-armx64.linux.aarch64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/ghashv8-armx64.linux.aarch64.S @@ -17,6 +17,7 @@ #endif #include +#if __ARM_MAX_ARCH__>=7 .text .arch armv8-a+crypto .globl gcm_init_v8 @@ -24,6 +25,7 @@ .type gcm_init_v8,%function .align 4 gcm_init_v8: + AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x1] //load input H movi v19.16b,#0xe1 shl v19.2d,v19.2d,#57 //0xc2.0 @@ -66,8 +68,48 @@ gcm_init_v8: ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing eor v17.16b,v17.16b,v22.16b ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed - st1 {v21.2d,v22.2d},[x0] //store Htable[1..2] + st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2] + //calculate H^3 and H^4 + pmull v0.1q,v20.1d, v22.1d + pmull v5.1q,v22.1d,v22.1d + pmull2 v2.1q,v20.2d, v22.2d + pmull2 v7.1q,v22.2d,v22.2d + pmull v1.1q,v16.1d,v17.1d + pmull v6.1q,v17.1d,v17.1d + ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing + ext v17.16b,v5.16b,v7.16b,#8 + eor v18.16b,v0.16b,v2.16b + eor v1.16b,v1.16b,v16.16b + eor v4.16b,v5.16b,v7.16b + eor v6.16b,v6.16b,v17.16b + eor v1.16b,v1.16b,v18.16b + pmull v18.1q,v0.1d,v19.1d //1st phase + eor v6.16b,v6.16b,v4.16b + pmull v4.1q,v5.1d,v19.1d + + ins v2.d[0],v1.d[1] + ins v7.d[0],v6.d[1] + ins v1.d[1],v0.d[0] + ins v6.d[1],v5.d[0] + eor v0.16b,v1.16b,v18.16b + eor v5.16b,v6.16b,v4.16b + + ext v18.16b,v0.16b,v0.16b,#8 //2nd phase + ext v4.16b,v5.16b,v5.16b,#8 + pmull v0.1q,v0.1d,v19.1d + pmull v5.1q,v5.1d,v19.1d + eor v18.16b,v18.16b,v2.16b + eor v4.16b,v4.16b,v7.16b + eor v20.16b, v0.16b,v18.16b //H^3 + eor v22.16b,v5.16b,v4.16b //H^4 + + ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing + ext v17.16b,v22.16b,v22.16b,#8 + eor v16.16b,v16.16b,v20.16b + eor v17.16b,v17.16b,v22.16b + ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed + st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5] ret .size gcm_init_v8,.-gcm_init_v8 .globl gcm_gmult_v8 @@ -75,6 +117,7 @@ gcm_init_v8: .type gcm_gmult_v8,%function .align 4 gcm_gmult_v8: + AARCH64_VALID_CALL_TARGET ld1 {v17.2d},[x0] //load Xi movi v19.16b,#0xe1 ld1 {v20.2d,v21.2d},[x1] //load twisted H, ... @@ -117,6 +160,9 @@ gcm_gmult_v8: .type gcm_ghash_v8,%function .align 4 gcm_ghash_v8: + AARCH64_VALID_CALL_TARGET + cmp x3,#64 + b.hs .Lgcm_ghash_v8_4x ld1 {v0.2d},[x0] //load [rotated] Xi //"[rotated]" means that //loaded value would have @@ -243,10 +289,291 @@ gcm_ghash_v8: ret .size gcm_ghash_v8,.-gcm_ghash_v8 +.type gcm_ghash_v8_4x,%function +.align 4 +gcm_ghash_v8_4x: +.Lgcm_ghash_v8_4x: + ld1 {v0.2d},[x0] //load [rotated] Xi + ld1 {v20.2d,v21.2d,v22.2d},[x1],#48 //load twisted H, ..., H^2 + movi v19.16b,#0xe1 + ld1 {v26.2d,v27.2d,v28.2d},[x1] //load twisted H^3, ..., H^4 + shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant + + ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64 +#ifndef __ARMEB__ + rev64 v0.16b,v0.16b + rev64 v5.16b,v5.16b + rev64 v6.16b,v6.16b + rev64 v7.16b,v7.16b + rev64 v4.16b,v4.16b +#endif + ext v25.16b,v7.16b,v7.16b,#8 + ext v24.16b,v6.16b,v6.16b,#8 + ext v23.16b,v5.16b,v5.16b,#8 + + pmull v29.1q,v20.1d,v25.1d //H·Ii+3 + eor v7.16b,v7.16b,v25.16b + pmull2 v31.1q,v20.2d,v25.2d + pmull v30.1q,v21.1d,v7.1d + + pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2 + eor v6.16b,v6.16b,v24.16b + pmull2 v24.1q,v22.2d,v24.2d + pmull2 v6.1q,v21.2d,v6.2d + + eor v29.16b,v29.16b,v16.16b + eor v31.16b,v31.16b,v24.16b + eor v30.16b,v30.16b,v6.16b + + pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1 + eor v5.16b,v5.16b,v23.16b + pmull2 v23.1q,v26.2d,v23.2d + pmull v5.1q,v27.1d,v5.1d + + eor v29.16b,v29.16b,v7.16b + eor v31.16b,v31.16b,v23.16b + eor v30.16b,v30.16b,v5.16b + + subs x3,x3,#128 + b.lo .Ltail4x + + b .Loop4x + +.align 4 +.Loop4x: + eor v16.16b,v4.16b,v0.16b + ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64 + ext v3.16b,v16.16b,v16.16b,#8 +#ifndef __ARMEB__ + rev64 v5.16b,v5.16b + rev64 v6.16b,v6.16b + rev64 v7.16b,v7.16b + rev64 v4.16b,v4.16b +#endif + + pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii) + eor v16.16b,v16.16b,v3.16b + pmull2 v2.1q,v28.2d,v3.2d + ext v25.16b,v7.16b,v7.16b,#8 + pmull2 v1.1q,v27.2d,v16.2d + + eor v0.16b,v0.16b,v29.16b + eor v2.16b,v2.16b,v31.16b + ext v24.16b,v6.16b,v6.16b,#8 + eor v1.16b,v1.16b,v30.16b + ext v23.16b,v5.16b,v5.16b,#8 + + ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing + eor v18.16b,v0.16b,v2.16b + pmull v29.1q,v20.1d,v25.1d //H·Ii+3 + eor v7.16b,v7.16b,v25.16b + eor v1.16b,v1.16b,v17.16b + pmull2 v31.1q,v20.2d,v25.2d + eor v1.16b,v1.16b,v18.16b + pmull v30.1q,v21.1d,v7.1d + + pmull v18.1q,v0.1d,v19.1d //1st phase of reduction + ins v2.d[0],v1.d[1] + ins v1.d[1],v0.d[0] + pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2 + eor v6.16b,v6.16b,v24.16b + pmull2 v24.1q,v22.2d,v24.2d + eor v0.16b,v1.16b,v18.16b + pmull2 v6.1q,v21.2d,v6.2d + + eor v29.16b,v29.16b,v16.16b + eor v31.16b,v31.16b,v24.16b + eor v30.16b,v30.16b,v6.16b + + ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction + pmull v0.1q,v0.1d,v19.1d + pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1 + eor v5.16b,v5.16b,v23.16b + eor v18.16b,v18.16b,v2.16b + pmull2 v23.1q,v26.2d,v23.2d + pmull v5.1q,v27.1d,v5.1d + + eor v0.16b,v0.16b,v18.16b + eor v29.16b,v29.16b,v7.16b + eor v31.16b,v31.16b,v23.16b + ext v0.16b,v0.16b,v0.16b,#8 + eor v30.16b,v30.16b,v5.16b + + subs x3,x3,#64 + b.hs .Loop4x + +.Ltail4x: + eor v16.16b,v4.16b,v0.16b + ext v3.16b,v16.16b,v16.16b,#8 + + pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii) + eor v16.16b,v16.16b,v3.16b + pmull2 v2.1q,v28.2d,v3.2d + pmull2 v1.1q,v27.2d,v16.2d + + eor v0.16b,v0.16b,v29.16b + eor v2.16b,v2.16b,v31.16b + eor v1.16b,v1.16b,v30.16b + + adds x3,x3,#64 + b.eq .Ldone4x + + cmp x3,#32 + b.lo .Lone + b.eq .Ltwo +.Lthree: + ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing + eor v18.16b,v0.16b,v2.16b + eor v1.16b,v1.16b,v17.16b + ld1 {v4.2d,v5.2d,v6.2d},[x2] + eor v1.16b,v1.16b,v18.16b +#ifndef __ARMEB__ + rev64 v5.16b,v5.16b + rev64 v6.16b,v6.16b + rev64 v4.16b,v4.16b +#endif + + pmull v18.1q,v0.1d,v19.1d //1st phase of reduction + ins v2.d[0],v1.d[1] + ins v1.d[1],v0.d[0] + ext v24.16b,v6.16b,v6.16b,#8 + ext v23.16b,v5.16b,v5.16b,#8 + eor v0.16b,v1.16b,v18.16b + + pmull v29.1q,v20.1d,v24.1d //H·Ii+2 + eor v6.16b,v6.16b,v24.16b + + ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction + pmull v0.1q,v0.1d,v19.1d + eor v18.16b,v18.16b,v2.16b + pmull2 v31.1q,v20.2d,v24.2d + pmull v30.1q,v21.1d,v6.1d + eor v0.16b,v0.16b,v18.16b + pmull v7.1q,v22.1d,v23.1d //H^2·Ii+1 + eor v5.16b,v5.16b,v23.16b + ext v0.16b,v0.16b,v0.16b,#8 + + pmull2 v23.1q,v22.2d,v23.2d + eor v16.16b,v4.16b,v0.16b + pmull2 v5.1q,v21.2d,v5.2d + ext v3.16b,v16.16b,v16.16b,#8 + + eor v29.16b,v29.16b,v7.16b + eor v31.16b,v31.16b,v23.16b + eor v30.16b,v30.16b,v5.16b + + pmull v0.1q,v26.1d,v3.1d //H^3·(Xi+Ii) + eor v16.16b,v16.16b,v3.16b + pmull2 v2.1q,v26.2d,v3.2d + pmull v1.1q,v27.1d,v16.1d + + eor v0.16b,v0.16b,v29.16b + eor v2.16b,v2.16b,v31.16b + eor v1.16b,v1.16b,v30.16b + b .Ldone4x + +.align 4 +.Ltwo: + ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing + eor v18.16b,v0.16b,v2.16b + eor v1.16b,v1.16b,v17.16b + ld1 {v4.2d,v5.2d},[x2] + eor v1.16b,v1.16b,v18.16b +#ifndef __ARMEB__ + rev64 v5.16b,v5.16b + rev64 v4.16b,v4.16b +#endif + + pmull v18.1q,v0.1d,v19.1d //1st phase of reduction + ins v2.d[0],v1.d[1] + ins v1.d[1],v0.d[0] + ext v23.16b,v5.16b,v5.16b,#8 + eor v0.16b,v1.16b,v18.16b + + ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction + pmull v0.1q,v0.1d,v19.1d + eor v18.16b,v18.16b,v2.16b + eor v0.16b,v0.16b,v18.16b + ext v0.16b,v0.16b,v0.16b,#8 + + pmull v29.1q,v20.1d,v23.1d //H·Ii+1 + eor v5.16b,v5.16b,v23.16b + + eor v16.16b,v4.16b,v0.16b + ext v3.16b,v16.16b,v16.16b,#8 + + pmull2 v31.1q,v20.2d,v23.2d + pmull v30.1q,v21.1d,v5.1d + + pmull v0.1q,v22.1d,v3.1d //H^2·(Xi+Ii) + eor v16.16b,v16.16b,v3.16b + pmull2 v2.1q,v22.2d,v3.2d + pmull2 v1.1q,v21.2d,v16.2d + + eor v0.16b,v0.16b,v29.16b + eor v2.16b,v2.16b,v31.16b + eor v1.16b,v1.16b,v30.16b + b .Ldone4x + +.align 4 +.Lone: + ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing + eor v18.16b,v0.16b,v2.16b + eor v1.16b,v1.16b,v17.16b + ld1 {v4.2d},[x2] + eor v1.16b,v1.16b,v18.16b +#ifndef __ARMEB__ + rev64 v4.16b,v4.16b +#endif + + pmull v18.1q,v0.1d,v19.1d //1st phase of reduction + ins v2.d[0],v1.d[1] + ins v1.d[1],v0.d[0] + eor v0.16b,v1.16b,v18.16b + + ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction + pmull v0.1q,v0.1d,v19.1d + eor v18.16b,v18.16b,v2.16b + eor v0.16b,v0.16b,v18.16b + ext v0.16b,v0.16b,v0.16b,#8 + + eor v16.16b,v4.16b,v0.16b + ext v3.16b,v16.16b,v16.16b,#8 + + pmull v0.1q,v20.1d,v3.1d + eor v16.16b,v16.16b,v3.16b + pmull2 v2.1q,v20.2d,v3.2d + pmull v1.1q,v21.1d,v16.1d + +.Ldone4x: + ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing + eor v18.16b,v0.16b,v2.16b + eor v1.16b,v1.16b,v17.16b + eor v1.16b,v1.16b,v18.16b + + pmull v18.1q,v0.1d,v19.1d //1st phase of reduction + ins v2.d[0],v1.d[1] + ins v1.d[1],v0.d[0] + eor v0.16b,v1.16b,v18.16b + + ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction + pmull v0.1q,v0.1d,v19.1d + eor v18.16b,v18.16b,v2.16b + eor v0.16b,v0.16b,v18.16b + ext v0.16b,v0.16b,v0.16b,#8 + +#ifndef __ARMEB__ + rev64 v0.16b,v0.16b +#endif + st1 {v0.2d},[x0] //write out Xi + + ret +.size gcm_ghash_v8_4x,.-gcm_ghash_v8_4x .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 #endif +#endif #endif // !OPENSSL_NO_ASM .section .note.GNU-stack,"",%progbits #endif // defined(__aarch64__) && defined(__linux__) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/md5-586.linux.x86.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/md5-586.linux.x86.S index 085d1f5..9b4ab06 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/md5-586.linux.x86.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/md5-586.linux.x86.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__i386__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__i386__) #if defined(BORINGSSL_PREFIX) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/md5-x86_64.linux.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/md5-x86_64.linux.x86_64.S index 1959b2a..d68fa7e 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/md5-x86_64.linux.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/md5-x86_64.linux.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/md5-x86_64.mac.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/md5-x86_64.mac.x86_64.S index 787bdc8..7b6e6bd 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/md5-x86_64.mac.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/md5-x86_64.mac.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__APPLE__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/cbc.c b/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/cbc.c index 9c11d0e..6e8b8d5 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/cbc.c +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/cbc.c @@ -52,20 +52,25 @@ #include #include "internal.h" +#include "../../internal.h" void CRYPTO_cbc128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], block128_f block) { + assert(key != NULL && ivec != NULL); + if (len == 0) { + // Avoid |ivec| == |iv| in the |memcpy| below, which is not legal in C. + return; + } + + assert(in != NULL && out != NULL); size_t n; const uint8_t *iv = ivec; - - assert(key != NULL && ivec != NULL); - assert(len == 0 || (in != NULL && out != NULL)); - while (len >= 16) { - for (n = 0; n < 16; n += sizeof(size_t)) { - store_word_le(out + n, load_word_le(in + n) ^ load_word_le(iv + n)); + for (n = 0; n < 16; n += sizeof(crypto_word_t)) { + CRYPTO_store_word_le( + out + n, CRYPTO_load_word_le(in + n) ^ CRYPTO_load_word_le(iv + n)); } (*block)(out, out, key); iv = out; @@ -97,30 +102,36 @@ void CRYPTO_cbc128_encrypt(const uint8_t *in, uint8_t *out, size_t len, void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], block128_f block) { - size_t n; - union { - size_t t[16 / sizeof(size_t)]; - uint8_t c[16]; - } tmp; - assert(key != NULL && ivec != NULL); - assert(len == 0 || (in != NULL && out != NULL)); + if (len == 0) { + // Avoid |ivec| == |iv| in the |memcpy| below, which is not legal in C. + return; + } + + assert(in != NULL && out != NULL); const uintptr_t inptr = (uintptr_t) in; const uintptr_t outptr = (uintptr_t) out; // If |in| and |out| alias, |in| must be ahead. assert(inptr >= outptr || inptr + len <= outptr); + size_t n; + union { + crypto_word_t t[16 / sizeof(crypto_word_t)]; + uint8_t c[16]; + } tmp; + if ((inptr >= 32 && outptr <= inptr - 32) || inptr < outptr) { // If |out| is at least two blocks behind |in| or completely disjoint, there // is no need to decrypt to a temporary block. - OPENSSL_STATIC_ASSERT(16 % sizeof(size_t) == 0, + OPENSSL_STATIC_ASSERT(16 % sizeof(crypto_word_t) == 0, "block cannot be evenly divided into words"); const uint8_t *iv = ivec; while (len >= 16) { (*block)(in, out, key); - for (n = 0; n < 16; n += sizeof(size_t)) { - store_word_le(out + n, load_word_le(out + n) ^ load_word_le(iv + n)); + for (n = 0; n < 16; n += sizeof(crypto_word_t)) { + CRYPTO_store_word_le(out + n, CRYPTO_load_word_le(out + n) ^ + CRYPTO_load_word_le(iv + n)); } iv = in; len -= 16; @@ -129,16 +140,16 @@ void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len, } OPENSSL_memcpy(ivec, iv, 16); } else { - OPENSSL_STATIC_ASSERT(16 % sizeof(size_t) == 0, + OPENSSL_STATIC_ASSERT(16 % sizeof(crypto_word_t) == 0, "block cannot be evenly divided into words"); while (len >= 16) { (*block)(in, tmp.c, key); - for (n = 0; n < 16; n += sizeof(size_t)) { - size_t c = load_word_le(in + n); - store_word_le(out + n, - tmp.t[n / sizeof(size_t)] ^ load_word_le(ivec + n)); - store_word_le(ivec + n, c); + for (n = 0; n < 16; n += sizeof(crypto_word_t)) { + crypto_word_t c = CRYPTO_load_word_le(in + n); + CRYPTO_store_word_le(out + n, tmp.t[n / sizeof(crypto_word_t)] ^ + CRYPTO_load_word_le(ivec + n)); + CRYPTO_store_word_le(ivec + n, c); } len -= 16; in += 16; diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/cfb.c b/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/cfb.c index 93ebfa6..7f2b312 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/cfb.c +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/cfb.c @@ -72,10 +72,11 @@ void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, } while (len >= 16) { (*block)(ivec, ivec, key); - for (; n < 16; n += sizeof(size_t)) { - size_t tmp = load_word_le(ivec + n) ^ load_word_le(in + n); - store_word_le(ivec + n, tmp); - store_word_le(out + n, tmp); + for (; n < 16; n += sizeof(crypto_word_t)) { + crypto_word_t tmp = + CRYPTO_load_word_le(ivec + n) ^ CRYPTO_load_word_le(in + n); + CRYPTO_store_word_le(ivec + n, tmp); + CRYPTO_store_word_le(out + n, tmp); } len -= 16; out += 16; @@ -101,10 +102,10 @@ void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, } while (len >= 16) { (*block)(ivec, ivec, key); - for (; n < 16; n += sizeof(size_t)) { - size_t t = load_word_le(in + n); - store_word_le(out + n, load_word_le(ivec + n) ^ t); - store_word_le(ivec + n, t); + for (; n < 16; n += sizeof(crypto_word_t)) { + crypto_word_t t = CRYPTO_load_word_le(in + n); + CRYPTO_store_word_le(out + n, CRYPTO_load_word_le(ivec + n) ^ t); + CRYPTO_store_word_le(ivec + n, t); } len -= 16; out += 16; diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/ctr.c b/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/ctr.c index 99cf57c..80df78a 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/ctr.c +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/ctr.c @@ -52,6 +52,7 @@ #include #include "internal.h" +#include "../../internal.h" // NOTE: the IV/counter CTR mode is big-endian. The code itself @@ -69,8 +70,8 @@ static void ctr128_inc(uint8_t *counter) { } while (n); } -OPENSSL_STATIC_ASSERT(16 % sizeof(size_t) == 0, - "block cannot be divided into size_t"); +OPENSSL_STATIC_ASSERT(16 % sizeof(crypto_word_t) == 0, + "block cannot be divided into crypto_word_t"); // The input encrypted as though 128bit counter mode is being used. The extra // state information to record how much of the 128bit block we have used is @@ -102,9 +103,9 @@ void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len, while (len >= 16) { (*block)(ivec, ecount_buf, key); ctr128_inc(ivec); - for (n = 0; n < 16; n += sizeof(size_t)) { - store_word_le(out + n, - load_word_le(in + n) ^ load_word_le(ecount_buf + n)); + for (n = 0; n < 16; n += sizeof(crypto_word_t)) { + CRYPTO_store_word_le(out + n, CRYPTO_load_word_le(in + n) ^ + CRYPTO_load_word_le(ecount_buf + n)); } len -= 16; out += 16; @@ -152,7 +153,7 @@ void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len, n = (n + 1) % 16; } - ctr32 = GETU32(ivec + 12); + ctr32 = CRYPTO_load_u32_be(ivec + 12); while (len >= 16) { size_t blocks = len / 16; // 1<<28 is just a not-so-small yet not-so-large number... @@ -172,7 +173,7 @@ void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len, } (*func)(in, out, blocks, key, ivec); // (*func) does not update ivec, caller does: - PUTU32(ivec + 12, ctr32); + CRYPTO_store_u32_be(ivec + 12, ctr32); // ... overflow was detected, propogate carry. if (ctr32 == 0) { ctr96_inc(ivec); @@ -186,7 +187,7 @@ void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len, OPENSSL_memset(ecount_buf, 0, 16); (*func)(ecount_buf, ecount_buf, 1, key, ivec); ++ctr32; - PUTU32(ivec + 12, ctr32); + CRYPTO_store_u32_be(ivec + 12, ctr32); if (ctr32 == 0) { ctr96_inc(ivec); } diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/gcm.c b/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/gcm.c index d263a59..4c751d6 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/gcm.c +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/gcm.c @@ -73,7 +73,7 @@ static const size_t kSizeTWithoutLower4Bits = (size_t) -16; #if defined(GHASH_ASM_X86_64) || defined(GHASH_ASM_X86) static inline void gcm_reduce_1bit(u128 *V) { - if (sizeof(size_t) == 8) { + if (sizeof(crypto_word_t) == 8) { uint64_t T = UINT64_C(0xe100000000000000) & (0 - (V->hi & 1)); V->hi = (V->lo << 63) | (V->hi >> 1); V->lo = (V->lo >> 1) ^ T; @@ -377,9 +377,10 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const AES_KEY *key, (*block)(ctx->Yi.c, ctx->EKi.c, key); ++ctr; ctx->Yi.d[3] = CRYPTO_bswap4(ctr); - for (size_t i = 0; i < 16; i += sizeof(size_t)) { - store_word_le(out + i, - load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]); + for (size_t i = 0; i < 16; i += sizeof(crypto_word_t)) { + CRYPTO_store_word_le(out + i, + CRYPTO_load_word_le(in + i) ^ + ctx->EKi.t[i / sizeof(crypto_word_t)]); } out += 16; in += 16; @@ -394,9 +395,10 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const AES_KEY *key, (*block)(ctx->Yi.c, ctx->EKi.c, key); ++ctr; ctx->Yi.d[3] = CRYPTO_bswap4(ctr); - for (size_t i = 0; i < 16; i += sizeof(size_t)) { - store_word_le(out + i, - load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]); + for (size_t i = 0; i < 16; i += sizeof(crypto_word_t)) { + CRYPTO_store_word_le(out + i, + CRYPTO_load_word_le(in + i) ^ + ctx->EKi.t[i / sizeof(crypto_word_t)]); } out += 16; in += 16; @@ -468,9 +470,10 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const AES_KEY *key, (*block)(ctx->Yi.c, ctx->EKi.c, key); ++ctr; ctx->Yi.d[3] = CRYPTO_bswap4(ctr); - for (size_t i = 0; i < 16; i += sizeof(size_t)) { - store_word_le(out + i, - load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]); + for (size_t i = 0; i < 16; i += sizeof(crypto_word_t)) { + CRYPTO_store_word_le(out + i, + CRYPTO_load_word_le(in + i) ^ + ctx->EKi.t[i / sizeof(crypto_word_t)]); } out += 16; in += 16; @@ -485,9 +488,10 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const AES_KEY *key, (*block)(ctx->Yi.c, ctx->EKi.c, key); ++ctr; ctx->Yi.d[3] = CRYPTO_bswap4(ctr); - for (size_t i = 0; i < 16; i += sizeof(size_t)) { - store_word_le(out + i, - load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]); + for (size_t i = 0; i < 16; i += sizeof(crypto_word_t)) { + CRYPTO_store_word_le(out + i, + CRYPTO_load_word_le(in + i) ^ + ctx->EKi.t[i / sizeof(crypto_word_t)]); } out += 16; in += 16; diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/gcm_nohw.c b/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/gcm_nohw.c index a35fd4e..54286fc 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/gcm_nohw.c +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/gcm_nohw.c @@ -193,7 +193,7 @@ static void gcm_mul64_nohw(uint64_t *out_lo, uint64_t *out_hi, uint64_t a, #endif // BORINGSSL_HAS_UINT128 void gcm_init_nohw(u128 Htable[16], const uint64_t Xi[2]) { - // We implement GHASH in terms of POLYVAL, as described in RFC8452. This + // We implement GHASH in terms of POLYVAL, as described in RFC 8452. This // avoids a shift by 1 in the multiplication, needed to account for bit // reversal losing a bit after multiplication, that is, // rev128(X) * rev128(Y) = rev255(X*Y). diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/internal.h b/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/internal.h index db6dc35..83b58d4 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/internal.h +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/internal.h @@ -64,27 +64,6 @@ extern "C" { #endif -static inline uint32_t GETU32(const void *in) { - uint32_t v; - OPENSSL_memcpy(&v, in, sizeof(v)); - return CRYPTO_bswap4(v); -} - -static inline void PUTU32(void *out, uint32_t v) { - v = CRYPTO_bswap4(v); - OPENSSL_memcpy(out, &v, sizeof(v)); -} - -static inline size_t load_word_le(const void *in) { - size_t v; - OPENSSL_memcpy(&v, in, sizeof(v)); - return v; -} - -static inline void store_word_le(void *out, size_t v) { - OPENSSL_memcpy(out, &v, sizeof(v)); -} - // block128_f is the type of an AES block cipher implementation. // // Unlike upstream OpenSSL, it and the other functions in this file hard-code @@ -171,7 +150,7 @@ typedef struct { uint64_t u[2]; uint32_t d[4]; uint8_t c[16]; - size_t t[16 / sizeof(size_t)]; + crypto_word_t t[16 / sizeof(crypto_word_t)]; } Yi, EKi, EK0, len, Xi; // Note that the order of |Xi| and |gcm_key| is fixed by the MOVBE-based, diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/ofb.c b/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/ofb.c index 5e1d899..994ee4e 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/ofb.c +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/modes/ofb.c @@ -60,7 +60,8 @@ OPENSSL_STATIC_ASSERT(16 % sizeof(size_t) == 0, void CRYPTO_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[16], unsigned *num, block128_f block) { - assert(in && out && key && ivec && num); + assert(key != NULL && ivec != NULL && num != NULL); + assert(len == 0 || (in != NULL && out != NULL)); unsigned n = *num; diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/p256-x86_64-asm.linux.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/p256-x86_64-asm.linux.x86_64.S index 6b111fd..e0558e5 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/p256-x86_64-asm.linux.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/p256-x86_64-asm.linux.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/p256-x86_64-asm.mac.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/p256-x86_64-asm.mac.x86_64.S index 462abc0..3bbadfa 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/p256-x86_64-asm.mac.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/p256-x86_64-asm.mac.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__APPLE__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/p256_beeu-x86_64-asm.linux.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/p256_beeu-x86_64-asm.linux.x86_64.S index 832af95..446e286 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/p256_beeu-x86_64-asm.linux.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/p256_beeu-x86_64-asm.linux.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/p256_beeu-x86_64-asm.mac.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/p256_beeu-x86_64-asm.mac.x86_64.S index 7de187a..1ee437b 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/p256_beeu-x86_64-asm.mac.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/p256_beeu-x86_64-asm.mac.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__APPLE__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/rand/internal.h b/Sources/CBigNumBoringSSL/crypto/fipsmodule/rand/internal.h index 0f2d074..a4c42b2 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/rand/internal.h +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/rand/internal.h @@ -36,16 +36,45 @@ extern "C" { void RAND_bytes_with_additional_data(uint8_t *out, size_t out_len, const uint8_t user_additional_data[32]); +#if defined(BORINGSSL_FIPS) + +// We overread from /dev/urandom or RDRAND by a factor of 10 and XOR to whiten. +#define BORINGSSL_FIPS_OVERREAD 10 + +// CRYPTO_get_seed_entropy writes |out_entropy_len| bytes of entropy, suitable +// for seeding a DRBG, to |out_entropy|. It sets |*out_used_cpu| to one if the +// entropy came directly from the CPU and zero if it came from the OS. It +// actively obtains entropy from the CPU/OS and so should not be called from +// within the FIPS module. +void CRYPTO_get_seed_entropy(uint8_t *out_entropy, size_t out_entropy_len, + int *out_used_cpu); + +// RAND_load_entropy supplies |entropy_len| bytes of entropy to the module. The +// |from_cpu| parameter is true iff the entropy was obtained directly from the +// CPU. +void RAND_load_entropy(const uint8_t *entropy, size_t entropy_len, + int from_cpu); + +// RAND_need_entropy is implemented outside of the FIPS module and is called +// when the module has stopped because it has run out of entropy. +void RAND_need_entropy(size_t bytes_needed); + +#endif // BORINGSSL_FIPS + // CRYPTO_sysrand fills |len| bytes at |buf| with entropy from the operating // system. void CRYPTO_sysrand(uint8_t *buf, size_t len); -#if defined(OPENSSL_URANDOM) // CRYPTO_sysrand_for_seed fills |len| bytes at |buf| with entropy from the // operating system. It may draw from the |GRND_RANDOM| pool on Android, // depending on the vendor's configuration. void CRYPTO_sysrand_for_seed(uint8_t *buf, size_t len); +#if defined(OPENSSL_URANDOM) +// CRYPTO_init_sysrand initializes long-lived resources needed to draw entropy +// from the operating system. +void CRYPTO_init_sysrand(void); + // CRYPTO_sysrand_if_available fills |len| bytes at |buf| with entropy from the // operating system, or early /dev/urandom data, and returns 1, _if_ the entropy // pool is initialized or if getrandom() is not available and not in FIPS mode. @@ -53,9 +82,7 @@ void CRYPTO_sysrand_for_seed(uint8_t *buf, size_t len); // return 0. int CRYPTO_sysrand_if_available(uint8_t *buf, size_t len); #else -OPENSSL_INLINE void CRYPTO_sysrand_for_seed(uint8_t *buf, size_t len) { - CRYPTO_sysrand(buf, len); -} +OPENSSL_INLINE void CRYPTO_init_sysrand(void) {} OPENSSL_INLINE int CRYPTO_sysrand_if_available(uint8_t *buf, size_t len) { CRYPTO_sysrand(buf, len); diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/rand/rand.c b/Sources/CBigNumBoringSSL/crypto/fipsmodule/rand/rand.c index 30dda4d..f95fc70 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/rand/rand.c +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/rand/rand.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "internal.h" #include "fork_detect.h" @@ -63,11 +64,11 @@ struct rand_thread_state { // (re)seeded. This is bound by |kReseedInterval|. unsigned calls; // last_block_valid is non-zero iff |last_block| contains data from - // |CRYPTO_sysrand_for_seed|. + // |get_seed_entropy|. int last_block_valid; #if defined(BORINGSSL_FIPS) - // last_block contains the previous block from |CRYPTO_sysrand_for_seed|. + // last_block contains the previous block from |get_seed_entropy|. uint8_t last_block[CRNGT_BLOCK_SIZE]; // next and prev form a NULL-terminated, double-linked list of all states in // a process. @@ -82,16 +83,18 @@ struct rand_thread_state { // called when the whole process is exiting. DEFINE_BSS_GET(struct rand_thread_state *, thread_states_list); DEFINE_STATIC_MUTEX(thread_states_list_lock); +DEFINE_STATIC_MUTEX(state_clear_all_lock); static void rand_thread_state_clear_all(void) __attribute__((destructor)); static void rand_thread_state_clear_all(void) { CRYPTO_STATIC_MUTEX_lock_write(thread_states_list_lock_bss_get()); + CRYPTO_STATIC_MUTEX_lock_write(state_clear_all_lock_bss_get()); for (struct rand_thread_state *cur = *thread_states_list_bss_get(); cur != NULL; cur = cur->next) { CTR_DRBG_clear(&cur->drbg); } - // |thread_states_list_lock is deliberately left locked so that any threads - // that are still running will hang if they try to call |RAND_bytes|. + // The locks are deliberately left locked so that any threads that are still + // running will hang if they try to call |RAND_bytes|. } #endif @@ -146,12 +149,6 @@ static int rdrand(uint8_t *buf, const size_t len) { OPENSSL_memcpy(buf + len_multiple8, rand_buf, remainder); } -#if defined(BORINGSSL_FIPS_BREAK_CRNG) - // This breaks the "continuous random number generator test" defined in FIPS - // 140-2, section 4.9.2, and implemented in rand_get_seed(). - OPENSSL_memset(buf, 0, len); -#endif - return 1; } @@ -165,25 +162,97 @@ static int rdrand(uint8_t *buf, size_t len) { #if defined(BORINGSSL_FIPS) +void CRYPTO_get_seed_entropy(uint8_t *out_entropy, size_t out_entropy_len, + int *out_used_cpu) { + *out_used_cpu = 0; + if (have_rdrand() && rdrand(out_entropy, out_entropy_len)) { + *out_used_cpu = 1; + } else { + CRYPTO_sysrand_for_seed(out_entropy, out_entropy_len); + } + +#if defined(BORINGSSL_FIPS_BREAK_CRNG) + // This breaks the "continuous random number generator test" defined in FIPS + // 140-2, section 4.9.2, and implemented in |rand_get_seed|. + OPENSSL_memset(out_entropy, 0, out_entropy_len); +#endif +} + +// In passive entropy mode, entropy is supplied from outside of the module via +// |RAND_load_entropy| and is stored in global instance of the following +// structure. + +struct entropy_buffer { + // bytes contains entropy suitable for seeding a DRBG. + uint8_t bytes[CTR_DRBG_ENTROPY_LEN * BORINGSSL_FIPS_OVERREAD]; + // bytes_valid indicates the number of bytes of |bytes| that contain valid + // data. + size_t bytes_valid; + // from_cpu is true if any of the contents of |bytes| were obtained directly + // from the CPU. + int from_cpu; +}; + +DEFINE_BSS_GET(struct entropy_buffer, entropy_buffer); +DEFINE_STATIC_MUTEX(entropy_buffer_lock); + +void RAND_load_entropy(const uint8_t *entropy, size_t entropy_len, + int from_cpu) { + struct entropy_buffer *const buffer = entropy_buffer_bss_get(); + + CRYPTO_STATIC_MUTEX_lock_write(entropy_buffer_lock_bss_get()); + const size_t space = sizeof(buffer->bytes) - buffer->bytes_valid; + if (entropy_len > space) { + entropy_len = space; + } + + OPENSSL_memcpy(&buffer->bytes[buffer->bytes_valid], entropy, entropy_len); + buffer->bytes_valid += entropy_len; + buffer->from_cpu |= from_cpu && (entropy_len != 0); + CRYPTO_STATIC_MUTEX_unlock_write(entropy_buffer_lock_bss_get()); +} + +// get_seed_entropy fills |out_entropy_len| bytes of |out_entropy| from the +// global |entropy_buffer|. +static void get_seed_entropy(uint8_t *out_entropy, size_t out_entropy_len, + int *out_used_cpu) { + struct entropy_buffer *const buffer = entropy_buffer_bss_get(); + if (out_entropy_len > sizeof(buffer->bytes)) { + abort(); + } + + CRYPTO_STATIC_MUTEX_lock_write(entropy_buffer_lock_bss_get()); + while (buffer->bytes_valid < out_entropy_len) { + CRYPTO_STATIC_MUTEX_unlock_write(entropy_buffer_lock_bss_get()); + RAND_need_entropy(out_entropy_len - buffer->bytes_valid); + CRYPTO_STATIC_MUTEX_lock_write(entropy_buffer_lock_bss_get()); + } + + *out_used_cpu = buffer->from_cpu; + OPENSSL_memcpy(out_entropy, buffer->bytes, out_entropy_len); + OPENSSL_memmove(buffer->bytes, &buffer->bytes[out_entropy_len], + buffer->bytes_valid - out_entropy_len); + buffer->bytes_valid -= out_entropy_len; + if (buffer->bytes_valid == 0) { + buffer->from_cpu = 0; + } + + CRYPTO_STATIC_MUTEX_unlock_write(entropy_buffer_lock_bss_get()); +} + +// rand_get_seed fills |seed| with entropy and sets |*out_used_cpu| to one if +// that entropy came directly from the CPU and zero otherwise. static void rand_get_seed(struct rand_thread_state *state, - uint8_t seed[CTR_DRBG_ENTROPY_LEN]) { + uint8_t seed[CTR_DRBG_ENTROPY_LEN], + int *out_used_cpu) { if (!state->last_block_valid) { - if (!have_rdrand() || - !rdrand(state->last_block, sizeof(state->last_block))) { - CRYPTO_sysrand_for_seed(state->last_block, sizeof(state->last_block)); - } + int unused; + get_seed_entropy(state->last_block, sizeof(state->last_block), &unused); state->last_block_valid = 1; } - // We overread from /dev/urandom or RDRAND by a factor of 10 and XOR to - // whiten. -#define FIPS_OVERREAD 10 - uint8_t entropy[CTR_DRBG_ENTROPY_LEN * FIPS_OVERREAD]; - - int used_rdrand = have_rdrand() && rdrand(entropy, sizeof(entropy)); - if (!used_rdrand) { - CRYPTO_sysrand_for_seed(entropy, sizeof(entropy)); - } + uint8_t entropy[CTR_DRBG_ENTROPY_LEN * BORINGSSL_FIPS_OVERREAD]; + get_seed_entropy(entropy, sizeof(entropy), out_used_cpu); // See FIPS 140-2, section 4.9.2. This is the “continuous random number // generator test” which causes the program to randomly abort. Hopefully the @@ -193,6 +262,7 @@ static void rand_get_seed(struct rand_thread_state *state, BORINGSSL_FIPS_abort(); } + OPENSSL_STATIC_ASSERT(sizeof(entropy) % CRNGT_BLOCK_SIZE == 0, ""); for (size_t i = CRNGT_BLOCK_SIZE; i < sizeof(entropy); i += CRNGT_BLOCK_SIZE) { if (CRYPTO_memcmp(entropy + i - CRNGT_BLOCK_SIZE, entropy + i, @@ -207,31 +277,24 @@ static void rand_get_seed(struct rand_thread_state *state, OPENSSL_memcpy(seed, entropy, CTR_DRBG_ENTROPY_LEN); - for (size_t i = 1; i < FIPS_OVERREAD; i++) { + for (size_t i = 1; i < BORINGSSL_FIPS_OVERREAD; i++) { for (size_t j = 0; j < CTR_DRBG_ENTROPY_LEN; j++) { seed[j] ^= entropy[CTR_DRBG_ENTROPY_LEN * i + j]; } } - -#if defined(OPENSSL_URANDOM) - // If we used RDRAND, also opportunistically read from the system. This avoids - // solely relying on the hardware once the entropy pool has been initialized. - if (used_rdrand) { - CRYPTO_sysrand_if_available(entropy, CTR_DRBG_ENTROPY_LEN); - for (size_t i = 0; i < CTR_DRBG_ENTROPY_LEN; i++) { - seed[i] ^= entropy[i]; - } - } -#endif } #else +// rand_get_seed fills |seed| with entropy and sets |*out_used_cpu| to one if +// that entropy came directly from the CPU and zero otherwise. static void rand_get_seed(struct rand_thread_state *state, - uint8_t seed[CTR_DRBG_ENTROPY_LEN]) { + uint8_t seed[CTR_DRBG_ENTROPY_LEN], + int *out_used_cpu) { // If not in FIPS mode, we don't overread from the system entropy source and // we don't depend only on the hardware RDRAND. - CRYPTO_sysrand(seed, CTR_DRBG_ENTROPY_LEN); + CRYPTO_sysrand_for_seed(seed, CTR_DRBG_ENTROPY_LEN); + *out_used_cpu = 0; } #endif @@ -290,8 +353,23 @@ void RAND_bytes_with_additional_data(uint8_t *out, size_t out_len, state->last_block_valid = 0; uint8_t seed[CTR_DRBG_ENTROPY_LEN]; - rand_get_seed(state, seed); - if (!CTR_DRBG_init(&state->drbg, seed, NULL, 0)) { + int used_cpu; + rand_get_seed(state, seed, &used_cpu); + + uint8_t personalization[CTR_DRBG_ENTROPY_LEN] = {0}; + size_t personalization_len = 0; +#if defined(OPENSSL_URANDOM) + // If we used RDRAND, also opportunistically read from the system. This + // avoids solely relying on the hardware once the entropy pool has been + // initialized. + if (used_cpu && + CRYPTO_sysrand_if_available(personalization, sizeof(personalization))) { + personalization_len = sizeof(personalization); + } +#endif + + if (!CTR_DRBG_init(&state->drbg, seed, personalization, + personalization_len)) { abort(); } state->calls = 0; @@ -315,7 +393,8 @@ void RAND_bytes_with_additional_data(uint8_t *out, size_t out_len, if (state->calls >= kReseedInterval || state->fork_generation != fork_generation) { uint8_t seed[CTR_DRBG_ENTROPY_LEN]; - rand_get_seed(state, seed); + int used_cpu; + rand_get_seed(state, seed, &used_cpu); #if defined(BORINGSSL_FIPS) // Take a read lock around accesses to |state->drbg|. This is needed to // avoid returning bad entropy if we race with @@ -325,7 +404,7 @@ void RAND_bytes_with_additional_data(uint8_t *out, size_t out_len, // bug on ppc64le. glibc may implement pthread locks by wrapping user code // in a hardware transaction, but, on some older versions of glibc and the // kernel, syscalls made with |syscall| did not abort the transaction. - CRYPTO_STATIC_MUTEX_lock_read(thread_states_list_lock_bss_get()); + CRYPTO_STATIC_MUTEX_lock_read(state_clear_all_lock_bss_get()); #endif if (!CTR_DRBG_reseed(&state->drbg, seed, NULL, 0)) { abort(); @@ -334,7 +413,7 @@ void RAND_bytes_with_additional_data(uint8_t *out, size_t out_len, state->fork_generation = fork_generation; } else { #if defined(BORINGSSL_FIPS) - CRYPTO_STATIC_MUTEX_lock_read(thread_states_list_lock_bss_get()); + CRYPTO_STATIC_MUTEX_lock_read(state_clear_all_lock_bss_get()); #endif } @@ -363,7 +442,7 @@ void RAND_bytes_with_additional_data(uint8_t *out, size_t out_len, } #if defined(BORINGSSL_FIPS) - CRYPTO_STATIC_MUTEX_unlock_read(thread_states_list_lock_bss_get()); + CRYPTO_STATIC_MUTEX_unlock_read(state_clear_all_lock_bss_get()); #endif } diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/rand/urandom.c b/Sources/CBigNumBoringSSL/crypto/fipsmodule/rand/urandom.c index 04d1cc9..520a9c5 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/rand/urandom.c +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/rand/urandom.c @@ -62,6 +62,15 @@ #include #endif +#if defined(OPENSSL_FREEBSD) +#define URANDOM_BLOCKS_FOR_ENTROPY +#if __FreeBSD__ >= 12 +// getrandom is supported in FreeBSD 12 and up. +#define FREEBSD_GETRANDOM +#include +#endif +#endif + #include #include @@ -95,17 +104,10 @@ static ssize_t boringssl_getrandom(void *buf, size_t buf_len, unsigned flags) { #endif // USE_NR_getrandom -// rand_lock is used to protect the |*_requested| variables. -DEFINE_STATIC_MUTEX(rand_lock) - -// The following constants are magic values of |urandom_fd|. -static const int kUnset = 0; +// kHaveGetrandom in |urandom_fd| signals that |getrandom| or |getentropy| is +// available and should be used instead. static const int kHaveGetrandom = -3; -// urandom_fd_requested is set by |RAND_set_urandom_fd|. It's protected by -// |rand_lock|. -DEFINE_BSS_GET(int, urandom_fd_requested) - // urandom_fd is a file descriptor to /dev/urandom. It's protected by |once|. DEFINE_BSS_GET(int, urandom_fd) @@ -144,14 +146,9 @@ static void maybe_set_extra_getrandom_flags(void) { DEFINE_STATIC_ONCE(rand_once) // init_once initializes the state of this module to values previously -// requested. This is the only function that modifies |urandom_fd| and -// |urandom_buffering|, whose values may be read safely after calling the -// once. +// requested. This is the only function that modifies |urandom_fd|, which may be +// read safely after calling the once. static void init_once(void) { - CRYPTO_STATIC_MUTEX_lock_read(rand_lock_bss_get()); - int fd = *urandom_fd_requested_bss_get(); - CRYPTO_STATIC_MUTEX_unlock_read(rand_lock_bss_get()); - #if defined(USE_NR_getrandom) int have_getrandom; uint8_t dummy; @@ -188,37 +185,27 @@ static void init_once(void) { } #endif +#if defined(FREEBSD_GETRANDOM) + *urandom_fd_bss_get() = kHaveGetrandom; + return; +#endif + // Android FIPS builds must support getrandom. #if defined(BORINGSSL_FIPS) && defined(OPENSSL_ANDROID) perror("getrandom not found"); abort(); #endif - if (fd == kUnset) { - do { - fd = open("/dev/urandom", O_RDONLY); - } while (fd == -1 && errno == EINTR); - } + int fd; + do { + fd = open("/dev/urandom", O_RDONLY); + } while (fd == -1 && errno == EINTR); if (fd < 0) { perror("failed to open /dev/urandom"); abort(); } - assert(kUnset == 0); - if (fd == kUnset) { - // Because we want to keep |urandom_fd| in the BSS, we have to initialise - // it to zero. But zero is a valid file descriptor too. Thus if open - // returns zero for /dev/urandom, we dup it to get a non-zero number. - fd = dup(fd); - close(kUnset); - - if (fd <= 0) { - perror("failed to dup /dev/urandom fd"); - abort(); - } - } - int flags = fcntl(fd, F_GETFD); if (flags == -1) { // Native Client doesn't implement |fcntl|. @@ -283,11 +270,11 @@ static void wait_for_entropy(void) { return; } -#if defined(BORINGSSL_FIPS) - // In FIPS mode we ensure that the kernel has sufficient entropy before - // continuing. This is automatically handled by getrandom, which requires - // that the entropy pool has been initialised, but for urandom we have to - // poll. +#if defined(BORINGSSL_FIPS) && !defined(URANDOM_BLOCKS_FOR_ENTROPY) + // In FIPS mode on platforms where urandom doesn't block at startup, we ensure + // that the kernel has sufficient entropy before continuing. This is + // automatically handled by getrandom, which requires that the entropy pool + // has been initialised, but for urandom we have to poll. for (;;) { int entropy_bits; if (ioctl(fd, RNDGETENTCNT, &entropy_bits)) { @@ -304,41 +291,7 @@ static void wait_for_entropy(void) { usleep(250000); } -#endif // BORINGSSL_FIPS -} - -void RAND_set_urandom_fd(int fd) { - fd = dup(fd); - if (fd < 0) { - perror("failed to dup supplied urandom fd"); - abort(); - } - - assert(kUnset == 0); - if (fd == kUnset) { - // Because we want to keep |urandom_fd| in the BSS, we have to initialise - // it to zero. But zero is a valid file descriptor too. Thus if dup - // returned zero we dup it again to get a non-zero number. - fd = dup(fd); - close(kUnset); - - if (fd <= 0) { - perror("failed to dup supplied urandom fd"); - abort(); - } - } - - CRYPTO_STATIC_MUTEX_lock_write(rand_lock_bss_get()); - *urandom_fd_requested_bss_get() = fd; - CRYPTO_STATIC_MUTEX_unlock_write(rand_lock_bss_get()); - - CRYPTO_once(rand_once_bss_get(), init_once); - if (*urandom_fd_bss_get() == kHaveGetrandom) { - close(fd); - } else if (*urandom_fd_bss_get() != fd) { - fprintf(stderr, "RAND_set_urandom_fd called after initialisation.\n"); - abort(); - } +#endif // BORINGSSL_FIPS && !URANDOM_BLOCKS_FOR_ENTROPY } // fill_with_entropy writes |len| bytes of entropy into |out|. It returns one @@ -352,17 +305,20 @@ static int fill_with_entropy(uint8_t *out, size_t len, int block, int seed) { return 1; } -#if defined(USE_NR_getrandom) +#if defined(USE_NR_getrandom) || defined(FREEBSD_GETRANDOM) int getrandom_flags = 0; if (!block) { getrandom_flags |= GRND_NONBLOCK; } +#endif + +#if defined (USE_NR_getrandom) if (seed) { getrandom_flags |= *extra_getrandom_flags_for_seed_bss_get(); } #endif - CRYPTO_once(rand_once_bss_get(), init_once); + CRYPTO_init_sysrand(); if (block) { CRYPTO_once(wait_for_entropy_once_bss_get(), wait_for_entropy); } @@ -376,6 +332,8 @@ static int fill_with_entropy(uint8_t *out, size_t len, int block, int seed) { if (*urandom_fd_bss_get() == kHaveGetrandom) { #if defined(USE_NR_getrandom) r = boringssl_getrandom(out, len, getrandom_flags); +#elif defined(FREEBSD_GETRANDOM) + r = getrandom(out, len, getrandom_flags); #elif defined(OPENSSL_MACOS) if (__builtin_available(macos 10.12, *)) { // |getentropy| can only request 256 bytes at a time. @@ -409,6 +367,10 @@ static int fill_with_entropy(uint8_t *out, size_t len, int block, int seed) { return 1; } +void CRYPTO_init_sysrand(void) { + CRYPTO_once(rand_once_bss_get(), init_once); +} + // CRYPTO_sysrand puts |requested| random bytes into |out|. void CRYPTO_sysrand(uint8_t *out, size_t requested) { if (!fill_with_entropy(out, requested, /*block=*/1, /*seed=*/0)) { @@ -417,22 +379,13 @@ void CRYPTO_sysrand(uint8_t *out, size_t requested) { } } -#if defined(BORINGSSL_FIPS) void CRYPTO_sysrand_for_seed(uint8_t *out, size_t requested) { if (!fill_with_entropy(out, requested, /*block=*/1, /*seed=*/1)) { perror("entropy fill failed"); abort(); } - -#if defined(BORINGSSL_FIPS_BREAK_CRNG) - // This breaks the "continuous random number generator test" defined in FIPS - // 140-2, section 4.9.2, and implemented in rand_get_seed(). - OPENSSL_memset(out, 0, requested); -#endif } -#endif // BORINGSSL_FIPS - int CRYPTO_sysrand_if_available(uint8_t *out, size_t requested) { if (fill_with_entropy(out, requested, /*block=*/0, /*seed=*/0)) { return 1; diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/rdrand-x86_64.linux.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/rdrand-x86_64.linux.x86_64.S index 4c098ab..1212a14 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/rdrand-x86_64.linux.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/rdrand-x86_64.linux.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/rdrand-x86_64.mac.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/rdrand-x86_64.mac.x86_64.S index e9279d7..1f81eaa 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/rdrand-x86_64.mac.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/rdrand-x86_64.mac.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__APPLE__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/rsaz-avx2.linux.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/rsaz-avx2.linux.x86_64.S index 755133e..3200eec 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/rsaz-avx2.linux.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/rsaz-avx2.linux.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/rsaz-avx2.mac.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/rsaz-avx2.mac.x86_64.S index cd2c1d5..45aade9 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/rsaz-avx2.mac.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/rsaz-avx2.mac.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__APPLE__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-586.linux.x86.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-586.linux.x86.S index 7e68c79..b90dff1 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-586.linux.x86.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-586.linux.x86.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__i386__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__i386__) #if defined(BORINGSSL_PREFIX) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-armv8.ios.aarch64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-armv8.ios.aarch64.S index 75ce4ae..ce9a8ab 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-armv8.ios.aarch64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-armv8.ios.aarch64.S @@ -19,11 +19,14 @@ .text +.private_extern _OPENSSL_armcap_P .globl _sha1_block_data_order .private_extern _sha1_block_data_order .align 6 _sha1_block_data_order: + // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. + AARCH64_VALID_CALL_TARGET #if __has_feature(hwaddress_sanitizer) && __clang_major__ >= 10 adrp x16,:pg_hi21_nc:_OPENSSL_armcap_P #else @@ -1091,6 +1094,8 @@ Loop: .align 6 sha1_block_armv8: + // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. + AARCH64_VALID_CALL_TARGET Lv8_entry: stp x29,x30,[sp,#-16]! add x29,sp,#0 @@ -1229,8 +1234,6 @@ Lconst: .byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 -.comm _OPENSSL_armcap_P,4,4 -.private_extern _OPENSSL_armcap_P #endif // !OPENSSL_NO_ASM #endif // defined(__aarch64__) && defined(__APPLE__) #if defined(__linux__) && defined(__ELF__) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-armv8.linux.aarch64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-armv8.linux.aarch64.S index 73f3658..9ccbcac 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-armv8.linux.aarch64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-armv8.linux.aarch64.S @@ -20,11 +20,14 @@ .text +.hidden OPENSSL_armcap_P .globl sha1_block_data_order .hidden sha1_block_data_order .type sha1_block_data_order,%function .align 6 sha1_block_data_order: + // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. + AARCH64_VALID_CALL_TARGET #if __has_feature(hwaddress_sanitizer) && __clang_major__ >= 10 adrp x16,:pg_hi21_nc:OPENSSL_armcap_P #else @@ -1092,6 +1095,8 @@ sha1_block_data_order: .type sha1_block_armv8,%function .align 6 sha1_block_armv8: + // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. + AARCH64_VALID_CALL_TARGET .Lv8_entry: stp x29,x30,[sp,#-16]! add x29,sp,#0 @@ -1230,8 +1235,6 @@ sha1_block_armv8: .byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 -.comm OPENSSL_armcap_P,4,4 -.hidden OPENSSL_armcap_P #endif #endif // !OPENSSL_NO_ASM .section .note.GNU-stack,"",%progbits diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-x86_64.linux.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-x86_64.linux.x86_64.S index 72ca1ec..ac31639 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-x86_64.linux.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-x86_64.linux.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) @@ -29,6 +29,11 @@ sha1_block_data_order: movl 8(%r10),%r10d testl $512,%r8d jz .Lialu + testl $536870912,%r10d + jnz _shaext_shortcut + andl $296,%r10d + cmpl $296,%r10d + je _avx2_shortcut andl $268435456,%r8d andl $1073741824,%r9d orl %r9d,%r8d @@ -1268,6 +1273,175 @@ sha1_block_data_order: .byte 0xf3,0xc3 .cfi_endproc .size sha1_block_data_order,.-sha1_block_data_order +.type sha1_block_data_order_shaext,@function +.align 32 +sha1_block_data_order_shaext: +_shaext_shortcut: +.cfi_startproc + movdqu (%rdi),%xmm0 + movd 16(%rdi),%xmm1 + movdqa K_XX_XX+160(%rip),%xmm3 + + movdqu (%rsi),%xmm4 + pshufd $27,%xmm0,%xmm0 + movdqu 16(%rsi),%xmm5 + pshufd $27,%xmm1,%xmm1 + movdqu 32(%rsi),%xmm6 +.byte 102,15,56,0,227 + movdqu 48(%rsi),%xmm7 +.byte 102,15,56,0,235 +.byte 102,15,56,0,243 + movdqa %xmm1,%xmm9 +.byte 102,15,56,0,251 + jmp .Loop_shaext + +.align 16 +.Loop_shaext: + decq %rdx + leaq 64(%rsi),%r8 + paddd %xmm4,%xmm1 + cmovneq %r8,%rsi + movdqa %xmm0,%xmm8 +.byte 15,56,201,229 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,0 +.byte 15,56,200,213 + pxor %xmm6,%xmm4 +.byte 15,56,201,238 +.byte 15,56,202,231 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,0 +.byte 15,56,200,206 + pxor %xmm7,%xmm5 +.byte 15,56,202,236 +.byte 15,56,201,247 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,0 +.byte 15,56,200,215 + pxor %xmm4,%xmm6 +.byte 15,56,201,252 +.byte 15,56,202,245 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,0 +.byte 15,56,200,204 + pxor %xmm5,%xmm7 +.byte 15,56,202,254 +.byte 15,56,201,229 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,0 +.byte 15,56,200,213 + pxor %xmm6,%xmm4 +.byte 15,56,201,238 +.byte 15,56,202,231 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,1 +.byte 15,56,200,206 + pxor %xmm7,%xmm5 +.byte 15,56,202,236 +.byte 15,56,201,247 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,1 +.byte 15,56,200,215 + pxor %xmm4,%xmm6 +.byte 15,56,201,252 +.byte 15,56,202,245 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,1 +.byte 15,56,200,204 + pxor %xmm5,%xmm7 +.byte 15,56,202,254 +.byte 15,56,201,229 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,1 +.byte 15,56,200,213 + pxor %xmm6,%xmm4 +.byte 15,56,201,238 +.byte 15,56,202,231 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,1 +.byte 15,56,200,206 + pxor %xmm7,%xmm5 +.byte 15,56,202,236 +.byte 15,56,201,247 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,2 +.byte 15,56,200,215 + pxor %xmm4,%xmm6 +.byte 15,56,201,252 +.byte 15,56,202,245 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,2 +.byte 15,56,200,204 + pxor %xmm5,%xmm7 +.byte 15,56,202,254 +.byte 15,56,201,229 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,2 +.byte 15,56,200,213 + pxor %xmm6,%xmm4 +.byte 15,56,201,238 +.byte 15,56,202,231 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,2 +.byte 15,56,200,206 + pxor %xmm7,%xmm5 +.byte 15,56,202,236 +.byte 15,56,201,247 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,2 +.byte 15,56,200,215 + pxor %xmm4,%xmm6 +.byte 15,56,201,252 +.byte 15,56,202,245 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,3 +.byte 15,56,200,204 + pxor %xmm5,%xmm7 +.byte 15,56,202,254 + movdqu (%rsi),%xmm4 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,3 +.byte 15,56,200,213 + movdqu 16(%rsi),%xmm5 +.byte 102,15,56,0,227 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,3 +.byte 15,56,200,206 + movdqu 32(%rsi),%xmm6 +.byte 102,15,56,0,235 + + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,3 +.byte 15,56,200,215 + movdqu 48(%rsi),%xmm7 +.byte 102,15,56,0,243 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,3 +.byte 65,15,56,200,201 +.byte 102,15,56,0,251 + + paddd %xmm8,%xmm0 + movdqa %xmm1,%xmm9 + + jnz .Loop_shaext + + pshufd $27,%xmm0,%xmm0 + pshufd $27,%xmm1,%xmm1 + movdqu %xmm0,(%rdi) + movd %xmm1,16(%rdi) + .byte 0xf3,0xc3 +.cfi_endproc +.size sha1_block_data_order_shaext,.-sha1_block_data_order_shaext .type sha1_block_data_order_ssse3,@function .align 16 sha1_block_data_order_ssse3: @@ -3584,6 +3758,1699 @@ _avx_shortcut: .byte 0xf3,0xc3 .cfi_endproc .size sha1_block_data_order_avx,.-sha1_block_data_order_avx +.type sha1_block_data_order_avx2,@function +.align 16 +sha1_block_data_order_avx2: +_avx2_shortcut: +.cfi_startproc + movq %rsp,%r11 +.cfi_def_cfa_register %r11 + pushq %rbx +.cfi_offset %rbx,-16 + pushq %rbp +.cfi_offset %rbp,-24 + pushq %r12 +.cfi_offset %r12,-32 + pushq %r13 +.cfi_offset %r13,-40 + pushq %r14 +.cfi_offset %r14,-48 + vzeroupper + movq %rdi,%r8 + movq %rsi,%r9 + movq %rdx,%r10 + + leaq -640(%rsp),%rsp + shlq $6,%r10 + leaq 64(%r9),%r13 + andq $-128,%rsp + addq %r9,%r10 + leaq K_XX_XX+64(%rip),%r14 + + movl 0(%r8),%eax + cmpq %r10,%r13 + cmovaeq %r9,%r13 + movl 4(%r8),%ebp + movl 8(%r8),%ecx + movl 12(%r8),%edx + movl 16(%r8),%esi + vmovdqu 64(%r14),%ymm6 + + vmovdqu (%r9),%xmm0 + vmovdqu 16(%r9),%xmm1 + vmovdqu 32(%r9),%xmm2 + vmovdqu 48(%r9),%xmm3 + leaq 64(%r9),%r9 + vinserti128 $1,(%r13),%ymm0,%ymm0 + vinserti128 $1,16(%r13),%ymm1,%ymm1 + vpshufb %ymm6,%ymm0,%ymm0 + vinserti128 $1,32(%r13),%ymm2,%ymm2 + vpshufb %ymm6,%ymm1,%ymm1 + vinserti128 $1,48(%r13),%ymm3,%ymm3 + vpshufb %ymm6,%ymm2,%ymm2 + vmovdqu -64(%r14),%ymm11 + vpshufb %ymm6,%ymm3,%ymm3 + + vpaddd %ymm11,%ymm0,%ymm4 + vpaddd %ymm11,%ymm1,%ymm5 + vmovdqu %ymm4,0(%rsp) + vpaddd %ymm11,%ymm2,%ymm6 + vmovdqu %ymm5,32(%rsp) + vpaddd %ymm11,%ymm3,%ymm7 + vmovdqu %ymm6,64(%rsp) + vmovdqu %ymm7,96(%rsp) + vpalignr $8,%ymm0,%ymm1,%ymm4 + vpsrldq $4,%ymm3,%ymm8 + vpxor %ymm0,%ymm4,%ymm4 + vpxor %ymm2,%ymm8,%ymm8 + vpxor %ymm8,%ymm4,%ymm4 + vpsrld $31,%ymm4,%ymm8 + vpslldq $12,%ymm4,%ymm10 + vpaddd %ymm4,%ymm4,%ymm4 + vpsrld $30,%ymm10,%ymm9 + vpor %ymm8,%ymm4,%ymm4 + vpslld $2,%ymm10,%ymm10 + vpxor %ymm9,%ymm4,%ymm4 + vpxor %ymm10,%ymm4,%ymm4 + vpaddd %ymm11,%ymm4,%ymm9 + vmovdqu %ymm9,128(%rsp) + vpalignr $8,%ymm1,%ymm2,%ymm5 + vpsrldq $4,%ymm4,%ymm8 + vpxor %ymm1,%ymm5,%ymm5 + vpxor %ymm3,%ymm8,%ymm8 + vpxor %ymm8,%ymm5,%ymm5 + vpsrld $31,%ymm5,%ymm8 + vmovdqu -32(%r14),%ymm11 + vpslldq $12,%ymm5,%ymm10 + vpaddd %ymm5,%ymm5,%ymm5 + vpsrld $30,%ymm10,%ymm9 + vpor %ymm8,%ymm5,%ymm5 + vpslld $2,%ymm10,%ymm10 + vpxor %ymm9,%ymm5,%ymm5 + vpxor %ymm10,%ymm5,%ymm5 + vpaddd %ymm11,%ymm5,%ymm9 + vmovdqu %ymm9,160(%rsp) + vpalignr $8,%ymm2,%ymm3,%ymm6 + vpsrldq $4,%ymm5,%ymm8 + vpxor %ymm2,%ymm6,%ymm6 + vpxor %ymm4,%ymm8,%ymm8 + vpxor %ymm8,%ymm6,%ymm6 + vpsrld $31,%ymm6,%ymm8 + vpslldq $12,%ymm6,%ymm10 + vpaddd %ymm6,%ymm6,%ymm6 + vpsrld $30,%ymm10,%ymm9 + vpor %ymm8,%ymm6,%ymm6 + vpslld $2,%ymm10,%ymm10 + vpxor %ymm9,%ymm6,%ymm6 + vpxor %ymm10,%ymm6,%ymm6 + vpaddd %ymm11,%ymm6,%ymm9 + vmovdqu %ymm9,192(%rsp) + vpalignr $8,%ymm3,%ymm4,%ymm7 + vpsrldq $4,%ymm6,%ymm8 + vpxor %ymm3,%ymm7,%ymm7 + vpxor %ymm5,%ymm8,%ymm8 + vpxor %ymm8,%ymm7,%ymm7 + vpsrld $31,%ymm7,%ymm8 + vpslldq $12,%ymm7,%ymm10 + vpaddd %ymm7,%ymm7,%ymm7 + vpsrld $30,%ymm10,%ymm9 + vpor %ymm8,%ymm7,%ymm7 + vpslld $2,%ymm10,%ymm10 + vpxor %ymm9,%ymm7,%ymm7 + vpxor %ymm10,%ymm7,%ymm7 + vpaddd %ymm11,%ymm7,%ymm9 + vmovdqu %ymm9,224(%rsp) + leaq 128(%rsp),%r13 + jmp .Loop_avx2 +.align 32 +.Loop_avx2: + rorxl $2,%ebp,%ebx + andnl %edx,%ebp,%edi + andl %ecx,%ebp + xorl %edi,%ebp + jmp .Lalign32_1 +.align 32 +.Lalign32_1: + vpalignr $8,%ymm6,%ymm7,%ymm8 + vpxor %ymm4,%ymm0,%ymm0 + addl -128(%r13),%esi + andnl %ecx,%eax,%edi + vpxor %ymm1,%ymm0,%ymm0 + addl %ebp,%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + vpxor %ymm8,%ymm0,%ymm0 + andl %ebx,%eax + addl %r12d,%esi + xorl %edi,%eax + vpsrld $30,%ymm0,%ymm8 + vpslld $2,%ymm0,%ymm0 + addl -124(%r13),%edx + andnl %ebx,%esi,%edi + addl %eax,%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + andl %ebp,%esi + vpor %ymm8,%ymm0,%ymm0 + addl %r12d,%edx + xorl %edi,%esi + addl -120(%r13),%ecx + andnl %ebp,%edx,%edi + vpaddd %ymm11,%ymm0,%ymm9 + addl %esi,%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + andl %eax,%edx + vmovdqu %ymm9,256(%rsp) + addl %r12d,%ecx + xorl %edi,%edx + addl -116(%r13),%ebx + andnl %eax,%ecx,%edi + addl %edx,%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + andl %esi,%ecx + addl %r12d,%ebx + xorl %edi,%ecx + addl -96(%r13),%ebp + andnl %esi,%ebx,%edi + addl %ecx,%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + andl %edx,%ebx + addl %r12d,%ebp + xorl %edi,%ebx + vpalignr $8,%ymm7,%ymm0,%ymm8 + vpxor %ymm5,%ymm1,%ymm1 + addl -92(%r13),%eax + andnl %edx,%ebp,%edi + vpxor %ymm2,%ymm1,%ymm1 + addl %ebx,%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + vpxor %ymm8,%ymm1,%ymm1 + andl %ecx,%ebp + addl %r12d,%eax + xorl %edi,%ebp + vpsrld $30,%ymm1,%ymm8 + vpslld $2,%ymm1,%ymm1 + addl -88(%r13),%esi + andnl %ecx,%eax,%edi + addl %ebp,%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + andl %ebx,%eax + vpor %ymm8,%ymm1,%ymm1 + addl %r12d,%esi + xorl %edi,%eax + addl -84(%r13),%edx + andnl %ebx,%esi,%edi + vpaddd %ymm11,%ymm1,%ymm9 + addl %eax,%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + andl %ebp,%esi + vmovdqu %ymm9,288(%rsp) + addl %r12d,%edx + xorl %edi,%esi + addl -64(%r13),%ecx + andnl %ebp,%edx,%edi + addl %esi,%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + andl %eax,%edx + addl %r12d,%ecx + xorl %edi,%edx + addl -60(%r13),%ebx + andnl %eax,%ecx,%edi + addl %edx,%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + andl %esi,%ecx + addl %r12d,%ebx + xorl %edi,%ecx + vpalignr $8,%ymm0,%ymm1,%ymm8 + vpxor %ymm6,%ymm2,%ymm2 + addl -56(%r13),%ebp + andnl %esi,%ebx,%edi + vpxor %ymm3,%ymm2,%ymm2 + vmovdqu 0(%r14),%ymm11 + addl %ecx,%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + vpxor %ymm8,%ymm2,%ymm2 + andl %edx,%ebx + addl %r12d,%ebp + xorl %edi,%ebx + vpsrld $30,%ymm2,%ymm8 + vpslld $2,%ymm2,%ymm2 + addl -52(%r13),%eax + andnl %edx,%ebp,%edi + addl %ebx,%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + andl %ecx,%ebp + vpor %ymm8,%ymm2,%ymm2 + addl %r12d,%eax + xorl %edi,%ebp + addl -32(%r13),%esi + andnl %ecx,%eax,%edi + vpaddd %ymm11,%ymm2,%ymm9 + addl %ebp,%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + andl %ebx,%eax + vmovdqu %ymm9,320(%rsp) + addl %r12d,%esi + xorl %edi,%eax + addl -28(%r13),%edx + andnl %ebx,%esi,%edi + addl %eax,%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + andl %ebp,%esi + addl %r12d,%edx + xorl %edi,%esi + addl -24(%r13),%ecx + andnl %ebp,%edx,%edi + addl %esi,%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + andl %eax,%edx + addl %r12d,%ecx + xorl %edi,%edx + vpalignr $8,%ymm1,%ymm2,%ymm8 + vpxor %ymm7,%ymm3,%ymm3 + addl -20(%r13),%ebx + andnl %eax,%ecx,%edi + vpxor %ymm4,%ymm3,%ymm3 + addl %edx,%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + vpxor %ymm8,%ymm3,%ymm3 + andl %esi,%ecx + addl %r12d,%ebx + xorl %edi,%ecx + vpsrld $30,%ymm3,%ymm8 + vpslld $2,%ymm3,%ymm3 + addl 0(%r13),%ebp + andnl %esi,%ebx,%edi + addl %ecx,%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + andl %edx,%ebx + vpor %ymm8,%ymm3,%ymm3 + addl %r12d,%ebp + xorl %edi,%ebx + addl 4(%r13),%eax + andnl %edx,%ebp,%edi + vpaddd %ymm11,%ymm3,%ymm9 + addl %ebx,%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + andl %ecx,%ebp + vmovdqu %ymm9,352(%rsp) + addl %r12d,%eax + xorl %edi,%ebp + addl 8(%r13),%esi + andnl %ecx,%eax,%edi + addl %ebp,%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + andl %ebx,%eax + addl %r12d,%esi + xorl %edi,%eax + addl 12(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + vpalignr $8,%ymm2,%ymm3,%ymm8 + vpxor %ymm0,%ymm4,%ymm4 + addl 32(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + vpxor %ymm5,%ymm4,%ymm4 + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + vpxor %ymm8,%ymm4,%ymm4 + addl %r12d,%ecx + xorl %ebp,%edx + addl 36(%r13),%ebx + vpsrld $30,%ymm4,%ymm8 + vpslld $2,%ymm4,%ymm4 + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + vpor %ymm8,%ymm4,%ymm4 + addl 40(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + vpaddd %ymm11,%ymm4,%ymm9 + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + addl 44(%r13),%eax + vmovdqu %ymm9,384(%rsp) + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + addl 64(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + vpalignr $8,%ymm3,%ymm4,%ymm8 + vpxor %ymm1,%ymm5,%ymm5 + addl 68(%r13),%edx + leal (%rdx,%rax,1),%edx + vpxor %ymm6,%ymm5,%ymm5 + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + vpxor %ymm8,%ymm5,%ymm5 + addl %r12d,%edx + xorl %ebx,%esi + addl 72(%r13),%ecx + vpsrld $30,%ymm5,%ymm8 + vpslld $2,%ymm5,%ymm5 + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + vpor %ymm8,%ymm5,%ymm5 + addl 76(%r13),%ebx + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + vpaddd %ymm11,%ymm5,%ymm9 + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl 96(%r13),%ebp + vmovdqu %ymm9,416(%rsp) + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + addl 100(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + vpalignr $8,%ymm4,%ymm5,%ymm8 + vpxor %ymm2,%ymm6,%ymm6 + addl 104(%r13),%esi + leal (%rsi,%rbp,1),%esi + vpxor %ymm7,%ymm6,%ymm6 + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + vpxor %ymm8,%ymm6,%ymm6 + addl %r12d,%esi + xorl %ecx,%eax + addl 108(%r13),%edx + leaq 256(%r13),%r13 + vpsrld $30,%ymm6,%ymm8 + vpslld $2,%ymm6,%ymm6 + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + vpor %ymm8,%ymm6,%ymm6 + addl -128(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + vpaddd %ymm11,%ymm6,%ymm9 + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl -124(%r13),%ebx + vmovdqu %ymm9,448(%rsp) + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl -120(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + vpalignr $8,%ymm5,%ymm6,%ymm8 + vpxor %ymm3,%ymm7,%ymm7 + addl -116(%r13),%eax + leal (%rax,%rbx,1),%eax + vpxor %ymm0,%ymm7,%ymm7 + vmovdqu 32(%r14),%ymm11 + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + vpxor %ymm8,%ymm7,%ymm7 + addl %r12d,%eax + xorl %edx,%ebp + addl -96(%r13),%esi + vpsrld $30,%ymm7,%ymm8 + vpslld $2,%ymm7,%ymm7 + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + vpor %ymm8,%ymm7,%ymm7 + addl -92(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + vpaddd %ymm11,%ymm7,%ymm9 + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + addl -88(%r13),%ecx + vmovdqu %ymm9,480(%rsp) + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl -84(%r13),%ebx + movl %esi,%edi + xorl %eax,%edi + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + andl %edi,%ecx + jmp .Lalign32_2 +.align 32 +.Lalign32_2: + vpalignr $8,%ymm6,%ymm7,%ymm8 + vpxor %ymm4,%ymm0,%ymm0 + addl -64(%r13),%ebp + xorl %esi,%ecx + vpxor %ymm1,%ymm0,%ymm0 + movl %edx,%edi + xorl %esi,%edi + leal (%rcx,%rbp,1),%ebp + vpxor %ymm8,%ymm0,%ymm0 + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + vpsrld $30,%ymm0,%ymm8 + vpslld $2,%ymm0,%ymm0 + addl %r12d,%ebp + andl %edi,%ebx + addl -60(%r13),%eax + xorl %edx,%ebx + movl %ecx,%edi + xorl %edx,%edi + vpor %ymm8,%ymm0,%ymm0 + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + vpaddd %ymm11,%ymm0,%ymm9 + addl %r12d,%eax + andl %edi,%ebp + addl -56(%r13),%esi + xorl %ecx,%ebp + vmovdqu %ymm9,512(%rsp) + movl %ebx,%edi + xorl %ecx,%edi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + andl %edi,%eax + addl -52(%r13),%edx + xorl %ebx,%eax + movl %ebp,%edi + xorl %ebx,%edi + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + andl %edi,%esi + addl -32(%r13),%ecx + xorl %ebp,%esi + movl %eax,%edi + xorl %ebp,%edi + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + andl %edi,%edx + vpalignr $8,%ymm7,%ymm0,%ymm8 + vpxor %ymm5,%ymm1,%ymm1 + addl -28(%r13),%ebx + xorl %eax,%edx + vpxor %ymm2,%ymm1,%ymm1 + movl %esi,%edi + xorl %eax,%edi + leal (%rbx,%rdx,1),%ebx + vpxor %ymm8,%ymm1,%ymm1 + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + vpsrld $30,%ymm1,%ymm8 + vpslld $2,%ymm1,%ymm1 + addl %r12d,%ebx + andl %edi,%ecx + addl -24(%r13),%ebp + xorl %esi,%ecx + movl %edx,%edi + xorl %esi,%edi + vpor %ymm8,%ymm1,%ymm1 + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + vpaddd %ymm11,%ymm1,%ymm9 + addl %r12d,%ebp + andl %edi,%ebx + addl -20(%r13),%eax + xorl %edx,%ebx + vmovdqu %ymm9,544(%rsp) + movl %ecx,%edi + xorl %edx,%edi + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + andl %edi,%ebp + addl 0(%r13),%esi + xorl %ecx,%ebp + movl %ebx,%edi + xorl %ecx,%edi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + andl %edi,%eax + addl 4(%r13),%edx + xorl %ebx,%eax + movl %ebp,%edi + xorl %ebx,%edi + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + andl %edi,%esi + vpalignr $8,%ymm0,%ymm1,%ymm8 + vpxor %ymm6,%ymm2,%ymm2 + addl 8(%r13),%ecx + xorl %ebp,%esi + vpxor %ymm3,%ymm2,%ymm2 + movl %eax,%edi + xorl %ebp,%edi + leal (%rcx,%rsi,1),%ecx + vpxor %ymm8,%ymm2,%ymm2 + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + vpsrld $30,%ymm2,%ymm8 + vpslld $2,%ymm2,%ymm2 + addl %r12d,%ecx + andl %edi,%edx + addl 12(%r13),%ebx + xorl %eax,%edx + movl %esi,%edi + xorl %eax,%edi + vpor %ymm8,%ymm2,%ymm2 + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + vpaddd %ymm11,%ymm2,%ymm9 + addl %r12d,%ebx + andl %edi,%ecx + addl 32(%r13),%ebp + xorl %esi,%ecx + vmovdqu %ymm9,576(%rsp) + movl %edx,%edi + xorl %esi,%edi + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + andl %edi,%ebx + addl 36(%r13),%eax + xorl %edx,%ebx + movl %ecx,%edi + xorl %edx,%edi + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + andl %edi,%ebp + addl 40(%r13),%esi + xorl %ecx,%ebp + movl %ebx,%edi + xorl %ecx,%edi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + andl %edi,%eax + vpalignr $8,%ymm1,%ymm2,%ymm8 + vpxor %ymm7,%ymm3,%ymm3 + addl 44(%r13),%edx + xorl %ebx,%eax + vpxor %ymm4,%ymm3,%ymm3 + movl %ebp,%edi + xorl %ebx,%edi + leal (%rdx,%rax,1),%edx + vpxor %ymm8,%ymm3,%ymm3 + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + vpsrld $30,%ymm3,%ymm8 + vpslld $2,%ymm3,%ymm3 + addl %r12d,%edx + andl %edi,%esi + addl 64(%r13),%ecx + xorl %ebp,%esi + movl %eax,%edi + xorl %ebp,%edi + vpor %ymm8,%ymm3,%ymm3 + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + vpaddd %ymm11,%ymm3,%ymm9 + addl %r12d,%ecx + andl %edi,%edx + addl 68(%r13),%ebx + xorl %eax,%edx + vmovdqu %ymm9,608(%rsp) + movl %esi,%edi + xorl %eax,%edi + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + andl %edi,%ecx + addl 72(%r13),%ebp + xorl %esi,%ecx + movl %edx,%edi + xorl %esi,%edi + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + andl %edi,%ebx + addl 76(%r13),%eax + xorl %edx,%ebx + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + addl 96(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + addl 100(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + addl 104(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl 108(%r13),%ebx + leaq 256(%r13),%r13 + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl -128(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + addl -124(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + addl -120(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + addl -116(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + addl -96(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl -92(%r13),%ebx + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl -88(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + addl -84(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + addl -64(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + addl -60(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + addl -56(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl -52(%r13),%ebx + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl -32(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + addl -28(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + addl -24(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + addl -20(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + addl %r12d,%edx + leaq 128(%r9),%r13 + leaq 128(%r9),%rdi + cmpq %r10,%r13 + cmovaeq %r9,%r13 + + + addl 0(%r8),%edx + addl 4(%r8),%esi + addl 8(%r8),%ebp + movl %edx,0(%r8) + addl 12(%r8),%ebx + movl %esi,4(%r8) + movl %edx,%eax + addl 16(%r8),%ecx + movl %ebp,%r12d + movl %ebp,8(%r8) + movl %ebx,%edx + + movl %ebx,12(%r8) + movl %esi,%ebp + movl %ecx,16(%r8) + + movl %ecx,%esi + movl %r12d,%ecx + + + cmpq %r10,%r9 + je .Ldone_avx2 + vmovdqu 64(%r14),%ymm6 + cmpq %r10,%rdi + ja .Last_avx2 + + vmovdqu -64(%rdi),%xmm0 + vmovdqu -48(%rdi),%xmm1 + vmovdqu -32(%rdi),%xmm2 + vmovdqu -16(%rdi),%xmm3 + vinserti128 $1,0(%r13),%ymm0,%ymm0 + vinserti128 $1,16(%r13),%ymm1,%ymm1 + vinserti128 $1,32(%r13),%ymm2,%ymm2 + vinserti128 $1,48(%r13),%ymm3,%ymm3 + jmp .Last_avx2 + +.align 32 +.Last_avx2: + leaq 128+16(%rsp),%r13 + rorxl $2,%ebp,%ebx + andnl %edx,%ebp,%edi + andl %ecx,%ebp + xorl %edi,%ebp + subq $-128,%r9 + addl -128(%r13),%esi + andnl %ecx,%eax,%edi + addl %ebp,%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + andl %ebx,%eax + addl %r12d,%esi + xorl %edi,%eax + addl -124(%r13),%edx + andnl %ebx,%esi,%edi + addl %eax,%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + andl %ebp,%esi + addl %r12d,%edx + xorl %edi,%esi + addl -120(%r13),%ecx + andnl %ebp,%edx,%edi + addl %esi,%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + andl %eax,%edx + addl %r12d,%ecx + xorl %edi,%edx + addl -116(%r13),%ebx + andnl %eax,%ecx,%edi + addl %edx,%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + andl %esi,%ecx + addl %r12d,%ebx + xorl %edi,%ecx + addl -96(%r13),%ebp + andnl %esi,%ebx,%edi + addl %ecx,%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + andl %edx,%ebx + addl %r12d,%ebp + xorl %edi,%ebx + addl -92(%r13),%eax + andnl %edx,%ebp,%edi + addl %ebx,%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + andl %ecx,%ebp + addl %r12d,%eax + xorl %edi,%ebp + addl -88(%r13),%esi + andnl %ecx,%eax,%edi + addl %ebp,%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + andl %ebx,%eax + addl %r12d,%esi + xorl %edi,%eax + addl -84(%r13),%edx + andnl %ebx,%esi,%edi + addl %eax,%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + andl %ebp,%esi + addl %r12d,%edx + xorl %edi,%esi + addl -64(%r13),%ecx + andnl %ebp,%edx,%edi + addl %esi,%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + andl %eax,%edx + addl %r12d,%ecx + xorl %edi,%edx + addl -60(%r13),%ebx + andnl %eax,%ecx,%edi + addl %edx,%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + andl %esi,%ecx + addl %r12d,%ebx + xorl %edi,%ecx + addl -56(%r13),%ebp + andnl %esi,%ebx,%edi + addl %ecx,%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + andl %edx,%ebx + addl %r12d,%ebp + xorl %edi,%ebx + addl -52(%r13),%eax + andnl %edx,%ebp,%edi + addl %ebx,%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + andl %ecx,%ebp + addl %r12d,%eax + xorl %edi,%ebp + addl -32(%r13),%esi + andnl %ecx,%eax,%edi + addl %ebp,%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + andl %ebx,%eax + addl %r12d,%esi + xorl %edi,%eax + addl -28(%r13),%edx + andnl %ebx,%esi,%edi + addl %eax,%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + andl %ebp,%esi + addl %r12d,%edx + xorl %edi,%esi + addl -24(%r13),%ecx + andnl %ebp,%edx,%edi + addl %esi,%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + andl %eax,%edx + addl %r12d,%ecx + xorl %edi,%edx + addl -20(%r13),%ebx + andnl %eax,%ecx,%edi + addl %edx,%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + andl %esi,%ecx + addl %r12d,%ebx + xorl %edi,%ecx + addl 0(%r13),%ebp + andnl %esi,%ebx,%edi + addl %ecx,%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + andl %edx,%ebx + addl %r12d,%ebp + xorl %edi,%ebx + addl 4(%r13),%eax + andnl %edx,%ebp,%edi + addl %ebx,%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + andl %ecx,%ebp + addl %r12d,%eax + xorl %edi,%ebp + addl 8(%r13),%esi + andnl %ecx,%eax,%edi + addl %ebp,%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + andl %ebx,%eax + addl %r12d,%esi + xorl %edi,%eax + addl 12(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + addl 32(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl 36(%r13),%ebx + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl 40(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + addl 44(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + addl 64(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + vmovdqu -64(%r14),%ymm11 + vpshufb %ymm6,%ymm0,%ymm0 + addl 68(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + addl 72(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl 76(%r13),%ebx + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl 96(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + addl 100(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + vpshufb %ymm6,%ymm1,%ymm1 + vpaddd %ymm11,%ymm0,%ymm8 + addl 104(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + addl 108(%r13),%edx + leaq 256(%r13),%r13 + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + addl -128(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl -124(%r13),%ebx + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl -120(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + vmovdqu %ymm8,0(%rsp) + vpshufb %ymm6,%ymm2,%ymm2 + vpaddd %ymm11,%ymm1,%ymm9 + addl -116(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + addl -96(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + addl -92(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + addl -88(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl -84(%r13),%ebx + movl %esi,%edi + xorl %eax,%edi + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + andl %edi,%ecx + vmovdqu %ymm9,32(%rsp) + vpshufb %ymm6,%ymm3,%ymm3 + vpaddd %ymm11,%ymm2,%ymm6 + addl -64(%r13),%ebp + xorl %esi,%ecx + movl %edx,%edi + xorl %esi,%edi + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + andl %edi,%ebx + addl -60(%r13),%eax + xorl %edx,%ebx + movl %ecx,%edi + xorl %edx,%edi + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + andl %edi,%ebp + addl -56(%r13),%esi + xorl %ecx,%ebp + movl %ebx,%edi + xorl %ecx,%edi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + andl %edi,%eax + addl -52(%r13),%edx + xorl %ebx,%eax + movl %ebp,%edi + xorl %ebx,%edi + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + andl %edi,%esi + addl -32(%r13),%ecx + xorl %ebp,%esi + movl %eax,%edi + xorl %ebp,%edi + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + andl %edi,%edx + jmp .Lalign32_3 +.align 32 +.Lalign32_3: + vmovdqu %ymm6,64(%rsp) + vpaddd %ymm11,%ymm3,%ymm7 + addl -28(%r13),%ebx + xorl %eax,%edx + movl %esi,%edi + xorl %eax,%edi + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + andl %edi,%ecx + addl -24(%r13),%ebp + xorl %esi,%ecx + movl %edx,%edi + xorl %esi,%edi + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + andl %edi,%ebx + addl -20(%r13),%eax + xorl %edx,%ebx + movl %ecx,%edi + xorl %edx,%edi + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + andl %edi,%ebp + addl 0(%r13),%esi + xorl %ecx,%ebp + movl %ebx,%edi + xorl %ecx,%edi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + andl %edi,%eax + addl 4(%r13),%edx + xorl %ebx,%eax + movl %ebp,%edi + xorl %ebx,%edi + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + andl %edi,%esi + vmovdqu %ymm7,96(%rsp) + addl 8(%r13),%ecx + xorl %ebp,%esi + movl %eax,%edi + xorl %ebp,%edi + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + andl %edi,%edx + addl 12(%r13),%ebx + xorl %eax,%edx + movl %esi,%edi + xorl %eax,%edi + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + andl %edi,%ecx + addl 32(%r13),%ebp + xorl %esi,%ecx + movl %edx,%edi + xorl %esi,%edi + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + andl %edi,%ebx + addl 36(%r13),%eax + xorl %edx,%ebx + movl %ecx,%edi + xorl %edx,%edi + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + andl %edi,%ebp + addl 40(%r13),%esi + xorl %ecx,%ebp + movl %ebx,%edi + xorl %ecx,%edi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + andl %edi,%eax + vpalignr $8,%ymm0,%ymm1,%ymm4 + addl 44(%r13),%edx + xorl %ebx,%eax + movl %ebp,%edi + xorl %ebx,%edi + vpsrldq $4,%ymm3,%ymm8 + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + vpxor %ymm0,%ymm4,%ymm4 + vpxor %ymm2,%ymm8,%ymm8 + xorl %ebp,%esi + addl %r12d,%edx + vpxor %ymm8,%ymm4,%ymm4 + andl %edi,%esi + addl 64(%r13),%ecx + xorl %ebp,%esi + movl %eax,%edi + vpsrld $31,%ymm4,%ymm8 + xorl %ebp,%edi + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + vpslldq $12,%ymm4,%ymm10 + vpaddd %ymm4,%ymm4,%ymm4 + rorxl $2,%edx,%esi + xorl %eax,%edx + vpsrld $30,%ymm10,%ymm9 + vpor %ymm8,%ymm4,%ymm4 + addl %r12d,%ecx + andl %edi,%edx + vpslld $2,%ymm10,%ymm10 + vpxor %ymm9,%ymm4,%ymm4 + addl 68(%r13),%ebx + xorl %eax,%edx + vpxor %ymm10,%ymm4,%ymm4 + movl %esi,%edi + xorl %eax,%edi + leal (%rbx,%rdx,1),%ebx + vpaddd %ymm11,%ymm4,%ymm9 + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + vmovdqu %ymm9,128(%rsp) + addl %r12d,%ebx + andl %edi,%ecx + addl 72(%r13),%ebp + xorl %esi,%ecx + movl %edx,%edi + xorl %esi,%edi + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + andl %edi,%ebx + addl 76(%r13),%eax + xorl %edx,%ebx + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + vpalignr $8,%ymm1,%ymm2,%ymm5 + addl 96(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + vpsrldq $4,%ymm4,%ymm8 + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + vpxor %ymm1,%ymm5,%ymm5 + vpxor %ymm3,%ymm8,%ymm8 + addl 100(%r13),%edx + leal (%rdx,%rax,1),%edx + vpxor %ymm8,%ymm5,%ymm5 + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + vpsrld $31,%ymm5,%ymm8 + vmovdqu -32(%r14),%ymm11 + xorl %ebx,%esi + addl 104(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + vpslldq $12,%ymm5,%ymm10 + vpaddd %ymm5,%ymm5,%ymm5 + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + vpsrld $30,%ymm10,%ymm9 + vpor %ymm8,%ymm5,%ymm5 + xorl %eax,%edx + addl %r12d,%ecx + vpslld $2,%ymm10,%ymm10 + vpxor %ymm9,%ymm5,%ymm5 + xorl %ebp,%edx + addl 108(%r13),%ebx + leaq 256(%r13),%r13 + vpxor %ymm10,%ymm5,%ymm5 + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + vpaddd %ymm11,%ymm5,%ymm9 + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + vmovdqu %ymm9,160(%rsp) + addl -128(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + vpalignr $8,%ymm2,%ymm3,%ymm6 + addl -124(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + vpsrldq $4,%ymm5,%ymm8 + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + vpxor %ymm2,%ymm6,%ymm6 + vpxor %ymm4,%ymm8,%ymm8 + addl -120(%r13),%esi + leal (%rsi,%rbp,1),%esi + vpxor %ymm8,%ymm6,%ymm6 + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + vpsrld $31,%ymm6,%ymm8 + xorl %ecx,%eax + addl -116(%r13),%edx + leal (%rdx,%rax,1),%edx + vpslldq $12,%ymm6,%ymm10 + vpaddd %ymm6,%ymm6,%ymm6 + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + vpsrld $30,%ymm10,%ymm9 + vpor %ymm8,%ymm6,%ymm6 + xorl %ebp,%esi + addl %r12d,%edx + vpslld $2,%ymm10,%ymm10 + vpxor %ymm9,%ymm6,%ymm6 + xorl %ebx,%esi + addl -96(%r13),%ecx + vpxor %ymm10,%ymm6,%ymm6 + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + vpaddd %ymm11,%ymm6,%ymm9 + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + vmovdqu %ymm9,192(%rsp) + addl -92(%r13),%ebx + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + vpalignr $8,%ymm3,%ymm4,%ymm7 + addl -88(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + vpsrldq $4,%ymm6,%ymm8 + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + vpxor %ymm3,%ymm7,%ymm7 + vpxor %ymm5,%ymm8,%ymm8 + addl -84(%r13),%eax + leal (%rax,%rbx,1),%eax + vpxor %ymm8,%ymm7,%ymm7 + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + vpsrld $31,%ymm7,%ymm8 + xorl %edx,%ebp + addl -64(%r13),%esi + leal (%rsi,%rbp,1),%esi + vpslldq $12,%ymm7,%ymm10 + vpaddd %ymm7,%ymm7,%ymm7 + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + vpsrld $30,%ymm10,%ymm9 + vpor %ymm8,%ymm7,%ymm7 + xorl %ebx,%eax + addl %r12d,%esi + vpslld $2,%ymm10,%ymm10 + vpxor %ymm9,%ymm7,%ymm7 + xorl %ecx,%eax + addl -60(%r13),%edx + vpxor %ymm10,%ymm7,%ymm7 + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + vpaddd %ymm11,%ymm7,%ymm9 + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + vmovdqu %ymm9,224(%rsp) + addl -56(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl -52(%r13),%ebx + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl -32(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + addl -28(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + addl -24(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + addl -20(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + addl %r12d,%edx + leaq 128(%rsp),%r13 + + + addl 0(%r8),%edx + addl 4(%r8),%esi + addl 8(%r8),%ebp + movl %edx,0(%r8) + addl 12(%r8),%ebx + movl %esi,4(%r8) + movl %edx,%eax + addl 16(%r8),%ecx + movl %ebp,%r12d + movl %ebp,8(%r8) + movl %ebx,%edx + + movl %ebx,12(%r8) + movl %esi,%ebp + movl %ecx,16(%r8) + + movl %ecx,%esi + movl %r12d,%ecx + + + cmpq %r10,%r9 + jbe .Loop_avx2 + +.Ldone_avx2: + vzeroupper + movq -40(%r11),%r14 +.cfi_restore %r14 + movq -32(%r11),%r13 +.cfi_restore %r13 + movq -24(%r11),%r12 +.cfi_restore %r12 + movq -16(%r11),%rbp +.cfi_restore %rbp + movq -8(%r11),%rbx +.cfi_restore %rbx + leaq (%r11),%rsp +.cfi_def_cfa_register %rsp +.Lepilogue_avx2: + .byte 0xf3,0xc3 +.cfi_endproc +.size sha1_block_data_order_avx2,.-sha1_block_data_order_avx2 .align 64 K_XX_XX: .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-x86_64.mac.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-x86_64.mac.x86_64.S index 7a06814..935b0c3 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-x86_64.mac.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha1-x86_64.mac.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__APPLE__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) @@ -28,6 +28,11 @@ _sha1_block_data_order: movl 8(%r10),%r10d testl $512,%r8d jz L$ialu + testl $536870912,%r10d + jnz _shaext_shortcut + andl $296,%r10d + cmpl $296,%r10d + je _avx2_shortcut andl $268435456,%r8d andl $1073741824,%r9d orl %r9d,%r8d @@ -1268,6 +1273,175 @@ L$epilogue: +.p2align 5 +sha1_block_data_order_shaext: +_shaext_shortcut: + + movdqu (%rdi),%xmm0 + movd 16(%rdi),%xmm1 + movdqa K_XX_XX+160(%rip),%xmm3 + + movdqu (%rsi),%xmm4 + pshufd $27,%xmm0,%xmm0 + movdqu 16(%rsi),%xmm5 + pshufd $27,%xmm1,%xmm1 + movdqu 32(%rsi),%xmm6 +.byte 102,15,56,0,227 + movdqu 48(%rsi),%xmm7 +.byte 102,15,56,0,235 +.byte 102,15,56,0,243 + movdqa %xmm1,%xmm9 +.byte 102,15,56,0,251 + jmp L$oop_shaext + +.p2align 4 +L$oop_shaext: + decq %rdx + leaq 64(%rsi),%r8 + paddd %xmm4,%xmm1 + cmovneq %r8,%rsi + movdqa %xmm0,%xmm8 +.byte 15,56,201,229 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,0 +.byte 15,56,200,213 + pxor %xmm6,%xmm4 +.byte 15,56,201,238 +.byte 15,56,202,231 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,0 +.byte 15,56,200,206 + pxor %xmm7,%xmm5 +.byte 15,56,202,236 +.byte 15,56,201,247 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,0 +.byte 15,56,200,215 + pxor %xmm4,%xmm6 +.byte 15,56,201,252 +.byte 15,56,202,245 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,0 +.byte 15,56,200,204 + pxor %xmm5,%xmm7 +.byte 15,56,202,254 +.byte 15,56,201,229 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,0 +.byte 15,56,200,213 + pxor %xmm6,%xmm4 +.byte 15,56,201,238 +.byte 15,56,202,231 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,1 +.byte 15,56,200,206 + pxor %xmm7,%xmm5 +.byte 15,56,202,236 +.byte 15,56,201,247 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,1 +.byte 15,56,200,215 + pxor %xmm4,%xmm6 +.byte 15,56,201,252 +.byte 15,56,202,245 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,1 +.byte 15,56,200,204 + pxor %xmm5,%xmm7 +.byte 15,56,202,254 +.byte 15,56,201,229 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,1 +.byte 15,56,200,213 + pxor %xmm6,%xmm4 +.byte 15,56,201,238 +.byte 15,56,202,231 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,1 +.byte 15,56,200,206 + pxor %xmm7,%xmm5 +.byte 15,56,202,236 +.byte 15,56,201,247 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,2 +.byte 15,56,200,215 + pxor %xmm4,%xmm6 +.byte 15,56,201,252 +.byte 15,56,202,245 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,2 +.byte 15,56,200,204 + pxor %xmm5,%xmm7 +.byte 15,56,202,254 +.byte 15,56,201,229 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,2 +.byte 15,56,200,213 + pxor %xmm6,%xmm4 +.byte 15,56,201,238 +.byte 15,56,202,231 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,2 +.byte 15,56,200,206 + pxor %xmm7,%xmm5 +.byte 15,56,202,236 +.byte 15,56,201,247 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,2 +.byte 15,56,200,215 + pxor %xmm4,%xmm6 +.byte 15,56,201,252 +.byte 15,56,202,245 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,3 +.byte 15,56,200,204 + pxor %xmm5,%xmm7 +.byte 15,56,202,254 + movdqu (%rsi),%xmm4 + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,3 +.byte 15,56,200,213 + movdqu 16(%rsi),%xmm5 +.byte 102,15,56,0,227 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,3 +.byte 15,56,200,206 + movdqu 32(%rsi),%xmm6 +.byte 102,15,56,0,235 + + movdqa %xmm0,%xmm2 +.byte 15,58,204,193,3 +.byte 15,56,200,215 + movdqu 48(%rsi),%xmm7 +.byte 102,15,56,0,243 + + movdqa %xmm0,%xmm1 +.byte 15,58,204,194,3 +.byte 65,15,56,200,201 +.byte 102,15,56,0,251 + + paddd %xmm8,%xmm0 + movdqa %xmm1,%xmm9 + + jnz L$oop_shaext + + pshufd $27,%xmm0,%xmm0 + pshufd $27,%xmm1,%xmm1 + movdqu %xmm0,(%rdi) + movd %xmm1,16(%rdi) + .byte 0xf3,0xc3 + + + .p2align 4 sha1_block_data_order_ssse3: _ssse3_shortcut: @@ -3583,6 +3757,1699 @@ L$epilogue_avx: .byte 0xf3,0xc3 + +.p2align 4 +sha1_block_data_order_avx2: +_avx2_shortcut: + + movq %rsp,%r11 + + pushq %rbx + + pushq %rbp + + pushq %r12 + + pushq %r13 + + pushq %r14 + + vzeroupper + movq %rdi,%r8 + movq %rsi,%r9 + movq %rdx,%r10 + + leaq -640(%rsp),%rsp + shlq $6,%r10 + leaq 64(%r9),%r13 + andq $-128,%rsp + addq %r9,%r10 + leaq K_XX_XX+64(%rip),%r14 + + movl 0(%r8),%eax + cmpq %r10,%r13 + cmovaeq %r9,%r13 + movl 4(%r8),%ebp + movl 8(%r8),%ecx + movl 12(%r8),%edx + movl 16(%r8),%esi + vmovdqu 64(%r14),%ymm6 + + vmovdqu (%r9),%xmm0 + vmovdqu 16(%r9),%xmm1 + vmovdqu 32(%r9),%xmm2 + vmovdqu 48(%r9),%xmm3 + leaq 64(%r9),%r9 + vinserti128 $1,(%r13),%ymm0,%ymm0 + vinserti128 $1,16(%r13),%ymm1,%ymm1 + vpshufb %ymm6,%ymm0,%ymm0 + vinserti128 $1,32(%r13),%ymm2,%ymm2 + vpshufb %ymm6,%ymm1,%ymm1 + vinserti128 $1,48(%r13),%ymm3,%ymm3 + vpshufb %ymm6,%ymm2,%ymm2 + vmovdqu -64(%r14),%ymm11 + vpshufb %ymm6,%ymm3,%ymm3 + + vpaddd %ymm11,%ymm0,%ymm4 + vpaddd %ymm11,%ymm1,%ymm5 + vmovdqu %ymm4,0(%rsp) + vpaddd %ymm11,%ymm2,%ymm6 + vmovdqu %ymm5,32(%rsp) + vpaddd %ymm11,%ymm3,%ymm7 + vmovdqu %ymm6,64(%rsp) + vmovdqu %ymm7,96(%rsp) + vpalignr $8,%ymm0,%ymm1,%ymm4 + vpsrldq $4,%ymm3,%ymm8 + vpxor %ymm0,%ymm4,%ymm4 + vpxor %ymm2,%ymm8,%ymm8 + vpxor %ymm8,%ymm4,%ymm4 + vpsrld $31,%ymm4,%ymm8 + vpslldq $12,%ymm4,%ymm10 + vpaddd %ymm4,%ymm4,%ymm4 + vpsrld $30,%ymm10,%ymm9 + vpor %ymm8,%ymm4,%ymm4 + vpslld $2,%ymm10,%ymm10 + vpxor %ymm9,%ymm4,%ymm4 + vpxor %ymm10,%ymm4,%ymm4 + vpaddd %ymm11,%ymm4,%ymm9 + vmovdqu %ymm9,128(%rsp) + vpalignr $8,%ymm1,%ymm2,%ymm5 + vpsrldq $4,%ymm4,%ymm8 + vpxor %ymm1,%ymm5,%ymm5 + vpxor %ymm3,%ymm8,%ymm8 + vpxor %ymm8,%ymm5,%ymm5 + vpsrld $31,%ymm5,%ymm8 + vmovdqu -32(%r14),%ymm11 + vpslldq $12,%ymm5,%ymm10 + vpaddd %ymm5,%ymm5,%ymm5 + vpsrld $30,%ymm10,%ymm9 + vpor %ymm8,%ymm5,%ymm5 + vpslld $2,%ymm10,%ymm10 + vpxor %ymm9,%ymm5,%ymm5 + vpxor %ymm10,%ymm5,%ymm5 + vpaddd %ymm11,%ymm5,%ymm9 + vmovdqu %ymm9,160(%rsp) + vpalignr $8,%ymm2,%ymm3,%ymm6 + vpsrldq $4,%ymm5,%ymm8 + vpxor %ymm2,%ymm6,%ymm6 + vpxor %ymm4,%ymm8,%ymm8 + vpxor %ymm8,%ymm6,%ymm6 + vpsrld $31,%ymm6,%ymm8 + vpslldq $12,%ymm6,%ymm10 + vpaddd %ymm6,%ymm6,%ymm6 + vpsrld $30,%ymm10,%ymm9 + vpor %ymm8,%ymm6,%ymm6 + vpslld $2,%ymm10,%ymm10 + vpxor %ymm9,%ymm6,%ymm6 + vpxor %ymm10,%ymm6,%ymm6 + vpaddd %ymm11,%ymm6,%ymm9 + vmovdqu %ymm9,192(%rsp) + vpalignr $8,%ymm3,%ymm4,%ymm7 + vpsrldq $4,%ymm6,%ymm8 + vpxor %ymm3,%ymm7,%ymm7 + vpxor %ymm5,%ymm8,%ymm8 + vpxor %ymm8,%ymm7,%ymm7 + vpsrld $31,%ymm7,%ymm8 + vpslldq $12,%ymm7,%ymm10 + vpaddd %ymm7,%ymm7,%ymm7 + vpsrld $30,%ymm10,%ymm9 + vpor %ymm8,%ymm7,%ymm7 + vpslld $2,%ymm10,%ymm10 + vpxor %ymm9,%ymm7,%ymm7 + vpxor %ymm10,%ymm7,%ymm7 + vpaddd %ymm11,%ymm7,%ymm9 + vmovdqu %ymm9,224(%rsp) + leaq 128(%rsp),%r13 + jmp L$oop_avx2 +.p2align 5 +L$oop_avx2: + rorxl $2,%ebp,%ebx + andnl %edx,%ebp,%edi + andl %ecx,%ebp + xorl %edi,%ebp + jmp L$align32_1 +.p2align 5 +L$align32_1: + vpalignr $8,%ymm6,%ymm7,%ymm8 + vpxor %ymm4,%ymm0,%ymm0 + addl -128(%r13),%esi + andnl %ecx,%eax,%edi + vpxor %ymm1,%ymm0,%ymm0 + addl %ebp,%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + vpxor %ymm8,%ymm0,%ymm0 + andl %ebx,%eax + addl %r12d,%esi + xorl %edi,%eax + vpsrld $30,%ymm0,%ymm8 + vpslld $2,%ymm0,%ymm0 + addl -124(%r13),%edx + andnl %ebx,%esi,%edi + addl %eax,%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + andl %ebp,%esi + vpor %ymm8,%ymm0,%ymm0 + addl %r12d,%edx + xorl %edi,%esi + addl -120(%r13),%ecx + andnl %ebp,%edx,%edi + vpaddd %ymm11,%ymm0,%ymm9 + addl %esi,%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + andl %eax,%edx + vmovdqu %ymm9,256(%rsp) + addl %r12d,%ecx + xorl %edi,%edx + addl -116(%r13),%ebx + andnl %eax,%ecx,%edi + addl %edx,%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + andl %esi,%ecx + addl %r12d,%ebx + xorl %edi,%ecx + addl -96(%r13),%ebp + andnl %esi,%ebx,%edi + addl %ecx,%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + andl %edx,%ebx + addl %r12d,%ebp + xorl %edi,%ebx + vpalignr $8,%ymm7,%ymm0,%ymm8 + vpxor %ymm5,%ymm1,%ymm1 + addl -92(%r13),%eax + andnl %edx,%ebp,%edi + vpxor %ymm2,%ymm1,%ymm1 + addl %ebx,%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + vpxor %ymm8,%ymm1,%ymm1 + andl %ecx,%ebp + addl %r12d,%eax + xorl %edi,%ebp + vpsrld $30,%ymm1,%ymm8 + vpslld $2,%ymm1,%ymm1 + addl -88(%r13),%esi + andnl %ecx,%eax,%edi + addl %ebp,%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + andl %ebx,%eax + vpor %ymm8,%ymm1,%ymm1 + addl %r12d,%esi + xorl %edi,%eax + addl -84(%r13),%edx + andnl %ebx,%esi,%edi + vpaddd %ymm11,%ymm1,%ymm9 + addl %eax,%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + andl %ebp,%esi + vmovdqu %ymm9,288(%rsp) + addl %r12d,%edx + xorl %edi,%esi + addl -64(%r13),%ecx + andnl %ebp,%edx,%edi + addl %esi,%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + andl %eax,%edx + addl %r12d,%ecx + xorl %edi,%edx + addl -60(%r13),%ebx + andnl %eax,%ecx,%edi + addl %edx,%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + andl %esi,%ecx + addl %r12d,%ebx + xorl %edi,%ecx + vpalignr $8,%ymm0,%ymm1,%ymm8 + vpxor %ymm6,%ymm2,%ymm2 + addl -56(%r13),%ebp + andnl %esi,%ebx,%edi + vpxor %ymm3,%ymm2,%ymm2 + vmovdqu 0(%r14),%ymm11 + addl %ecx,%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + vpxor %ymm8,%ymm2,%ymm2 + andl %edx,%ebx + addl %r12d,%ebp + xorl %edi,%ebx + vpsrld $30,%ymm2,%ymm8 + vpslld $2,%ymm2,%ymm2 + addl -52(%r13),%eax + andnl %edx,%ebp,%edi + addl %ebx,%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + andl %ecx,%ebp + vpor %ymm8,%ymm2,%ymm2 + addl %r12d,%eax + xorl %edi,%ebp + addl -32(%r13),%esi + andnl %ecx,%eax,%edi + vpaddd %ymm11,%ymm2,%ymm9 + addl %ebp,%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + andl %ebx,%eax + vmovdqu %ymm9,320(%rsp) + addl %r12d,%esi + xorl %edi,%eax + addl -28(%r13),%edx + andnl %ebx,%esi,%edi + addl %eax,%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + andl %ebp,%esi + addl %r12d,%edx + xorl %edi,%esi + addl -24(%r13),%ecx + andnl %ebp,%edx,%edi + addl %esi,%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + andl %eax,%edx + addl %r12d,%ecx + xorl %edi,%edx + vpalignr $8,%ymm1,%ymm2,%ymm8 + vpxor %ymm7,%ymm3,%ymm3 + addl -20(%r13),%ebx + andnl %eax,%ecx,%edi + vpxor %ymm4,%ymm3,%ymm3 + addl %edx,%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + vpxor %ymm8,%ymm3,%ymm3 + andl %esi,%ecx + addl %r12d,%ebx + xorl %edi,%ecx + vpsrld $30,%ymm3,%ymm8 + vpslld $2,%ymm3,%ymm3 + addl 0(%r13),%ebp + andnl %esi,%ebx,%edi + addl %ecx,%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + andl %edx,%ebx + vpor %ymm8,%ymm3,%ymm3 + addl %r12d,%ebp + xorl %edi,%ebx + addl 4(%r13),%eax + andnl %edx,%ebp,%edi + vpaddd %ymm11,%ymm3,%ymm9 + addl %ebx,%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + andl %ecx,%ebp + vmovdqu %ymm9,352(%rsp) + addl %r12d,%eax + xorl %edi,%ebp + addl 8(%r13),%esi + andnl %ecx,%eax,%edi + addl %ebp,%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + andl %ebx,%eax + addl %r12d,%esi + xorl %edi,%eax + addl 12(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + vpalignr $8,%ymm2,%ymm3,%ymm8 + vpxor %ymm0,%ymm4,%ymm4 + addl 32(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + vpxor %ymm5,%ymm4,%ymm4 + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + vpxor %ymm8,%ymm4,%ymm4 + addl %r12d,%ecx + xorl %ebp,%edx + addl 36(%r13),%ebx + vpsrld $30,%ymm4,%ymm8 + vpslld $2,%ymm4,%ymm4 + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + vpor %ymm8,%ymm4,%ymm4 + addl 40(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + vpaddd %ymm11,%ymm4,%ymm9 + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + addl 44(%r13),%eax + vmovdqu %ymm9,384(%rsp) + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + addl 64(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + vpalignr $8,%ymm3,%ymm4,%ymm8 + vpxor %ymm1,%ymm5,%ymm5 + addl 68(%r13),%edx + leal (%rdx,%rax,1),%edx + vpxor %ymm6,%ymm5,%ymm5 + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + vpxor %ymm8,%ymm5,%ymm5 + addl %r12d,%edx + xorl %ebx,%esi + addl 72(%r13),%ecx + vpsrld $30,%ymm5,%ymm8 + vpslld $2,%ymm5,%ymm5 + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + vpor %ymm8,%ymm5,%ymm5 + addl 76(%r13),%ebx + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + vpaddd %ymm11,%ymm5,%ymm9 + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl 96(%r13),%ebp + vmovdqu %ymm9,416(%rsp) + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + addl 100(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + vpalignr $8,%ymm4,%ymm5,%ymm8 + vpxor %ymm2,%ymm6,%ymm6 + addl 104(%r13),%esi + leal (%rsi,%rbp,1),%esi + vpxor %ymm7,%ymm6,%ymm6 + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + vpxor %ymm8,%ymm6,%ymm6 + addl %r12d,%esi + xorl %ecx,%eax + addl 108(%r13),%edx + leaq 256(%r13),%r13 + vpsrld $30,%ymm6,%ymm8 + vpslld $2,%ymm6,%ymm6 + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + vpor %ymm8,%ymm6,%ymm6 + addl -128(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + vpaddd %ymm11,%ymm6,%ymm9 + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl -124(%r13),%ebx + vmovdqu %ymm9,448(%rsp) + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl -120(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + vpalignr $8,%ymm5,%ymm6,%ymm8 + vpxor %ymm3,%ymm7,%ymm7 + addl -116(%r13),%eax + leal (%rax,%rbx,1),%eax + vpxor %ymm0,%ymm7,%ymm7 + vmovdqu 32(%r14),%ymm11 + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + vpxor %ymm8,%ymm7,%ymm7 + addl %r12d,%eax + xorl %edx,%ebp + addl -96(%r13),%esi + vpsrld $30,%ymm7,%ymm8 + vpslld $2,%ymm7,%ymm7 + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + vpor %ymm8,%ymm7,%ymm7 + addl -92(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + vpaddd %ymm11,%ymm7,%ymm9 + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + addl -88(%r13),%ecx + vmovdqu %ymm9,480(%rsp) + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl -84(%r13),%ebx + movl %esi,%edi + xorl %eax,%edi + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + andl %edi,%ecx + jmp L$align32_2 +.p2align 5 +L$align32_2: + vpalignr $8,%ymm6,%ymm7,%ymm8 + vpxor %ymm4,%ymm0,%ymm0 + addl -64(%r13),%ebp + xorl %esi,%ecx + vpxor %ymm1,%ymm0,%ymm0 + movl %edx,%edi + xorl %esi,%edi + leal (%rcx,%rbp,1),%ebp + vpxor %ymm8,%ymm0,%ymm0 + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + vpsrld $30,%ymm0,%ymm8 + vpslld $2,%ymm0,%ymm0 + addl %r12d,%ebp + andl %edi,%ebx + addl -60(%r13),%eax + xorl %edx,%ebx + movl %ecx,%edi + xorl %edx,%edi + vpor %ymm8,%ymm0,%ymm0 + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + vpaddd %ymm11,%ymm0,%ymm9 + addl %r12d,%eax + andl %edi,%ebp + addl -56(%r13),%esi + xorl %ecx,%ebp + vmovdqu %ymm9,512(%rsp) + movl %ebx,%edi + xorl %ecx,%edi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + andl %edi,%eax + addl -52(%r13),%edx + xorl %ebx,%eax + movl %ebp,%edi + xorl %ebx,%edi + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + andl %edi,%esi + addl -32(%r13),%ecx + xorl %ebp,%esi + movl %eax,%edi + xorl %ebp,%edi + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + andl %edi,%edx + vpalignr $8,%ymm7,%ymm0,%ymm8 + vpxor %ymm5,%ymm1,%ymm1 + addl -28(%r13),%ebx + xorl %eax,%edx + vpxor %ymm2,%ymm1,%ymm1 + movl %esi,%edi + xorl %eax,%edi + leal (%rbx,%rdx,1),%ebx + vpxor %ymm8,%ymm1,%ymm1 + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + vpsrld $30,%ymm1,%ymm8 + vpslld $2,%ymm1,%ymm1 + addl %r12d,%ebx + andl %edi,%ecx + addl -24(%r13),%ebp + xorl %esi,%ecx + movl %edx,%edi + xorl %esi,%edi + vpor %ymm8,%ymm1,%ymm1 + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + vpaddd %ymm11,%ymm1,%ymm9 + addl %r12d,%ebp + andl %edi,%ebx + addl -20(%r13),%eax + xorl %edx,%ebx + vmovdqu %ymm9,544(%rsp) + movl %ecx,%edi + xorl %edx,%edi + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + andl %edi,%ebp + addl 0(%r13),%esi + xorl %ecx,%ebp + movl %ebx,%edi + xorl %ecx,%edi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + andl %edi,%eax + addl 4(%r13),%edx + xorl %ebx,%eax + movl %ebp,%edi + xorl %ebx,%edi + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + andl %edi,%esi + vpalignr $8,%ymm0,%ymm1,%ymm8 + vpxor %ymm6,%ymm2,%ymm2 + addl 8(%r13),%ecx + xorl %ebp,%esi + vpxor %ymm3,%ymm2,%ymm2 + movl %eax,%edi + xorl %ebp,%edi + leal (%rcx,%rsi,1),%ecx + vpxor %ymm8,%ymm2,%ymm2 + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + vpsrld $30,%ymm2,%ymm8 + vpslld $2,%ymm2,%ymm2 + addl %r12d,%ecx + andl %edi,%edx + addl 12(%r13),%ebx + xorl %eax,%edx + movl %esi,%edi + xorl %eax,%edi + vpor %ymm8,%ymm2,%ymm2 + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + vpaddd %ymm11,%ymm2,%ymm9 + addl %r12d,%ebx + andl %edi,%ecx + addl 32(%r13),%ebp + xorl %esi,%ecx + vmovdqu %ymm9,576(%rsp) + movl %edx,%edi + xorl %esi,%edi + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + andl %edi,%ebx + addl 36(%r13),%eax + xorl %edx,%ebx + movl %ecx,%edi + xorl %edx,%edi + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + andl %edi,%ebp + addl 40(%r13),%esi + xorl %ecx,%ebp + movl %ebx,%edi + xorl %ecx,%edi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + andl %edi,%eax + vpalignr $8,%ymm1,%ymm2,%ymm8 + vpxor %ymm7,%ymm3,%ymm3 + addl 44(%r13),%edx + xorl %ebx,%eax + vpxor %ymm4,%ymm3,%ymm3 + movl %ebp,%edi + xorl %ebx,%edi + leal (%rdx,%rax,1),%edx + vpxor %ymm8,%ymm3,%ymm3 + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + vpsrld $30,%ymm3,%ymm8 + vpslld $2,%ymm3,%ymm3 + addl %r12d,%edx + andl %edi,%esi + addl 64(%r13),%ecx + xorl %ebp,%esi + movl %eax,%edi + xorl %ebp,%edi + vpor %ymm8,%ymm3,%ymm3 + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + vpaddd %ymm11,%ymm3,%ymm9 + addl %r12d,%ecx + andl %edi,%edx + addl 68(%r13),%ebx + xorl %eax,%edx + vmovdqu %ymm9,608(%rsp) + movl %esi,%edi + xorl %eax,%edi + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + andl %edi,%ecx + addl 72(%r13),%ebp + xorl %esi,%ecx + movl %edx,%edi + xorl %esi,%edi + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + andl %edi,%ebx + addl 76(%r13),%eax + xorl %edx,%ebx + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + addl 96(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + addl 100(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + addl 104(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl 108(%r13),%ebx + leaq 256(%r13),%r13 + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl -128(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + addl -124(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + addl -120(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + addl -116(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + addl -96(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl -92(%r13),%ebx + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl -88(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + addl -84(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + addl -64(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + addl -60(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + addl -56(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl -52(%r13),%ebx + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl -32(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + addl -28(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + addl -24(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + addl -20(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + addl %r12d,%edx + leaq 128(%r9),%r13 + leaq 128(%r9),%rdi + cmpq %r10,%r13 + cmovaeq %r9,%r13 + + + addl 0(%r8),%edx + addl 4(%r8),%esi + addl 8(%r8),%ebp + movl %edx,0(%r8) + addl 12(%r8),%ebx + movl %esi,4(%r8) + movl %edx,%eax + addl 16(%r8),%ecx + movl %ebp,%r12d + movl %ebp,8(%r8) + movl %ebx,%edx + + movl %ebx,12(%r8) + movl %esi,%ebp + movl %ecx,16(%r8) + + movl %ecx,%esi + movl %r12d,%ecx + + + cmpq %r10,%r9 + je L$done_avx2 + vmovdqu 64(%r14),%ymm6 + cmpq %r10,%rdi + ja L$ast_avx2 + + vmovdqu -64(%rdi),%xmm0 + vmovdqu -48(%rdi),%xmm1 + vmovdqu -32(%rdi),%xmm2 + vmovdqu -16(%rdi),%xmm3 + vinserti128 $1,0(%r13),%ymm0,%ymm0 + vinserti128 $1,16(%r13),%ymm1,%ymm1 + vinserti128 $1,32(%r13),%ymm2,%ymm2 + vinserti128 $1,48(%r13),%ymm3,%ymm3 + jmp L$ast_avx2 + +.p2align 5 +L$ast_avx2: + leaq 128+16(%rsp),%r13 + rorxl $2,%ebp,%ebx + andnl %edx,%ebp,%edi + andl %ecx,%ebp + xorl %edi,%ebp + subq $-128,%r9 + addl -128(%r13),%esi + andnl %ecx,%eax,%edi + addl %ebp,%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + andl %ebx,%eax + addl %r12d,%esi + xorl %edi,%eax + addl -124(%r13),%edx + andnl %ebx,%esi,%edi + addl %eax,%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + andl %ebp,%esi + addl %r12d,%edx + xorl %edi,%esi + addl -120(%r13),%ecx + andnl %ebp,%edx,%edi + addl %esi,%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + andl %eax,%edx + addl %r12d,%ecx + xorl %edi,%edx + addl -116(%r13),%ebx + andnl %eax,%ecx,%edi + addl %edx,%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + andl %esi,%ecx + addl %r12d,%ebx + xorl %edi,%ecx + addl -96(%r13),%ebp + andnl %esi,%ebx,%edi + addl %ecx,%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + andl %edx,%ebx + addl %r12d,%ebp + xorl %edi,%ebx + addl -92(%r13),%eax + andnl %edx,%ebp,%edi + addl %ebx,%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + andl %ecx,%ebp + addl %r12d,%eax + xorl %edi,%ebp + addl -88(%r13),%esi + andnl %ecx,%eax,%edi + addl %ebp,%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + andl %ebx,%eax + addl %r12d,%esi + xorl %edi,%eax + addl -84(%r13),%edx + andnl %ebx,%esi,%edi + addl %eax,%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + andl %ebp,%esi + addl %r12d,%edx + xorl %edi,%esi + addl -64(%r13),%ecx + andnl %ebp,%edx,%edi + addl %esi,%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + andl %eax,%edx + addl %r12d,%ecx + xorl %edi,%edx + addl -60(%r13),%ebx + andnl %eax,%ecx,%edi + addl %edx,%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + andl %esi,%ecx + addl %r12d,%ebx + xorl %edi,%ecx + addl -56(%r13),%ebp + andnl %esi,%ebx,%edi + addl %ecx,%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + andl %edx,%ebx + addl %r12d,%ebp + xorl %edi,%ebx + addl -52(%r13),%eax + andnl %edx,%ebp,%edi + addl %ebx,%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + andl %ecx,%ebp + addl %r12d,%eax + xorl %edi,%ebp + addl -32(%r13),%esi + andnl %ecx,%eax,%edi + addl %ebp,%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + andl %ebx,%eax + addl %r12d,%esi + xorl %edi,%eax + addl -28(%r13),%edx + andnl %ebx,%esi,%edi + addl %eax,%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + andl %ebp,%esi + addl %r12d,%edx + xorl %edi,%esi + addl -24(%r13),%ecx + andnl %ebp,%edx,%edi + addl %esi,%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + andl %eax,%edx + addl %r12d,%ecx + xorl %edi,%edx + addl -20(%r13),%ebx + andnl %eax,%ecx,%edi + addl %edx,%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + andl %esi,%ecx + addl %r12d,%ebx + xorl %edi,%ecx + addl 0(%r13),%ebp + andnl %esi,%ebx,%edi + addl %ecx,%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + andl %edx,%ebx + addl %r12d,%ebp + xorl %edi,%ebx + addl 4(%r13),%eax + andnl %edx,%ebp,%edi + addl %ebx,%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + andl %ecx,%ebp + addl %r12d,%eax + xorl %edi,%ebp + addl 8(%r13),%esi + andnl %ecx,%eax,%edi + addl %ebp,%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + andl %ebx,%eax + addl %r12d,%esi + xorl %edi,%eax + addl 12(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + addl 32(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl 36(%r13),%ebx + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl 40(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + addl 44(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + addl 64(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + vmovdqu -64(%r14),%ymm11 + vpshufb %ymm6,%ymm0,%ymm0 + addl 68(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + addl 72(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl 76(%r13),%ebx + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl 96(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + addl 100(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + vpshufb %ymm6,%ymm1,%ymm1 + vpaddd %ymm11,%ymm0,%ymm8 + addl 104(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + addl 108(%r13),%edx + leaq 256(%r13),%r13 + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + addl -128(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl -124(%r13),%ebx + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl -120(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + vmovdqu %ymm8,0(%rsp) + vpshufb %ymm6,%ymm2,%ymm2 + vpaddd %ymm11,%ymm1,%ymm9 + addl -116(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + addl -96(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + addl -92(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + addl -88(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl -84(%r13),%ebx + movl %esi,%edi + xorl %eax,%edi + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + andl %edi,%ecx + vmovdqu %ymm9,32(%rsp) + vpshufb %ymm6,%ymm3,%ymm3 + vpaddd %ymm11,%ymm2,%ymm6 + addl -64(%r13),%ebp + xorl %esi,%ecx + movl %edx,%edi + xorl %esi,%edi + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + andl %edi,%ebx + addl -60(%r13),%eax + xorl %edx,%ebx + movl %ecx,%edi + xorl %edx,%edi + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + andl %edi,%ebp + addl -56(%r13),%esi + xorl %ecx,%ebp + movl %ebx,%edi + xorl %ecx,%edi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + andl %edi,%eax + addl -52(%r13),%edx + xorl %ebx,%eax + movl %ebp,%edi + xorl %ebx,%edi + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + andl %edi,%esi + addl -32(%r13),%ecx + xorl %ebp,%esi + movl %eax,%edi + xorl %ebp,%edi + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + andl %edi,%edx + jmp L$align32_3 +.p2align 5 +L$align32_3: + vmovdqu %ymm6,64(%rsp) + vpaddd %ymm11,%ymm3,%ymm7 + addl -28(%r13),%ebx + xorl %eax,%edx + movl %esi,%edi + xorl %eax,%edi + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + andl %edi,%ecx + addl -24(%r13),%ebp + xorl %esi,%ecx + movl %edx,%edi + xorl %esi,%edi + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + andl %edi,%ebx + addl -20(%r13),%eax + xorl %edx,%ebx + movl %ecx,%edi + xorl %edx,%edi + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + andl %edi,%ebp + addl 0(%r13),%esi + xorl %ecx,%ebp + movl %ebx,%edi + xorl %ecx,%edi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + andl %edi,%eax + addl 4(%r13),%edx + xorl %ebx,%eax + movl %ebp,%edi + xorl %ebx,%edi + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + andl %edi,%esi + vmovdqu %ymm7,96(%rsp) + addl 8(%r13),%ecx + xorl %ebp,%esi + movl %eax,%edi + xorl %ebp,%edi + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + andl %edi,%edx + addl 12(%r13),%ebx + xorl %eax,%edx + movl %esi,%edi + xorl %eax,%edi + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + andl %edi,%ecx + addl 32(%r13),%ebp + xorl %esi,%ecx + movl %edx,%edi + xorl %esi,%edi + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + andl %edi,%ebx + addl 36(%r13),%eax + xorl %edx,%ebx + movl %ecx,%edi + xorl %edx,%edi + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + andl %edi,%ebp + addl 40(%r13),%esi + xorl %ecx,%ebp + movl %ebx,%edi + xorl %ecx,%edi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + andl %edi,%eax + vpalignr $8,%ymm0,%ymm1,%ymm4 + addl 44(%r13),%edx + xorl %ebx,%eax + movl %ebp,%edi + xorl %ebx,%edi + vpsrldq $4,%ymm3,%ymm8 + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + vpxor %ymm0,%ymm4,%ymm4 + vpxor %ymm2,%ymm8,%ymm8 + xorl %ebp,%esi + addl %r12d,%edx + vpxor %ymm8,%ymm4,%ymm4 + andl %edi,%esi + addl 64(%r13),%ecx + xorl %ebp,%esi + movl %eax,%edi + vpsrld $31,%ymm4,%ymm8 + xorl %ebp,%edi + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + vpslldq $12,%ymm4,%ymm10 + vpaddd %ymm4,%ymm4,%ymm4 + rorxl $2,%edx,%esi + xorl %eax,%edx + vpsrld $30,%ymm10,%ymm9 + vpor %ymm8,%ymm4,%ymm4 + addl %r12d,%ecx + andl %edi,%edx + vpslld $2,%ymm10,%ymm10 + vpxor %ymm9,%ymm4,%ymm4 + addl 68(%r13),%ebx + xorl %eax,%edx + vpxor %ymm10,%ymm4,%ymm4 + movl %esi,%edi + xorl %eax,%edi + leal (%rbx,%rdx,1),%ebx + vpaddd %ymm11,%ymm4,%ymm9 + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + vmovdqu %ymm9,128(%rsp) + addl %r12d,%ebx + andl %edi,%ecx + addl 72(%r13),%ebp + xorl %esi,%ecx + movl %edx,%edi + xorl %esi,%edi + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + andl %edi,%ebx + addl 76(%r13),%eax + xorl %edx,%ebx + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + vpalignr $8,%ymm1,%ymm2,%ymm5 + addl 96(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + vpsrldq $4,%ymm4,%ymm8 + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + vpxor %ymm1,%ymm5,%ymm5 + vpxor %ymm3,%ymm8,%ymm8 + addl 100(%r13),%edx + leal (%rdx,%rax,1),%edx + vpxor %ymm8,%ymm5,%ymm5 + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + xorl %ebp,%esi + addl %r12d,%edx + vpsrld $31,%ymm5,%ymm8 + vmovdqu -32(%r14),%ymm11 + xorl %ebx,%esi + addl 104(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + vpslldq $12,%ymm5,%ymm10 + vpaddd %ymm5,%ymm5,%ymm5 + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + vpsrld $30,%ymm10,%ymm9 + vpor %ymm8,%ymm5,%ymm5 + xorl %eax,%edx + addl %r12d,%ecx + vpslld $2,%ymm10,%ymm10 + vpxor %ymm9,%ymm5,%ymm5 + xorl %ebp,%edx + addl 108(%r13),%ebx + leaq 256(%r13),%r13 + vpxor %ymm10,%ymm5,%ymm5 + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + vpaddd %ymm11,%ymm5,%ymm9 + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + vmovdqu %ymm9,160(%rsp) + addl -128(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + vpalignr $8,%ymm2,%ymm3,%ymm6 + addl -124(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + vpsrldq $4,%ymm5,%ymm8 + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + vpxor %ymm2,%ymm6,%ymm6 + vpxor %ymm4,%ymm8,%ymm8 + addl -120(%r13),%esi + leal (%rsi,%rbp,1),%esi + vpxor %ymm8,%ymm6,%ymm6 + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + vpsrld $31,%ymm6,%ymm8 + xorl %ecx,%eax + addl -116(%r13),%edx + leal (%rdx,%rax,1),%edx + vpslldq $12,%ymm6,%ymm10 + vpaddd %ymm6,%ymm6,%ymm6 + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + vpsrld $30,%ymm10,%ymm9 + vpor %ymm8,%ymm6,%ymm6 + xorl %ebp,%esi + addl %r12d,%edx + vpslld $2,%ymm10,%ymm10 + vpxor %ymm9,%ymm6,%ymm6 + xorl %ebx,%esi + addl -96(%r13),%ecx + vpxor %ymm10,%ymm6,%ymm6 + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + vpaddd %ymm11,%ymm6,%ymm9 + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + vmovdqu %ymm9,192(%rsp) + addl -92(%r13),%ebx + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + vpalignr $8,%ymm3,%ymm4,%ymm7 + addl -88(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + vpsrldq $4,%ymm6,%ymm8 + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + vpxor %ymm3,%ymm7,%ymm7 + vpxor %ymm5,%ymm8,%ymm8 + addl -84(%r13),%eax + leal (%rax,%rbx,1),%eax + vpxor %ymm8,%ymm7,%ymm7 + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + vpsrld $31,%ymm7,%ymm8 + xorl %edx,%ebp + addl -64(%r13),%esi + leal (%rsi,%rbp,1),%esi + vpslldq $12,%ymm7,%ymm10 + vpaddd %ymm7,%ymm7,%ymm7 + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + vpsrld $30,%ymm10,%ymm9 + vpor %ymm8,%ymm7,%ymm7 + xorl %ebx,%eax + addl %r12d,%esi + vpslld $2,%ymm10,%ymm10 + vpxor %ymm9,%ymm7,%ymm7 + xorl %ecx,%eax + addl -60(%r13),%edx + vpxor %ymm10,%ymm7,%ymm7 + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + rorxl $2,%esi,%eax + vpaddd %ymm11,%ymm7,%ymm9 + xorl %ebp,%esi + addl %r12d,%edx + xorl %ebx,%esi + vmovdqu %ymm9,224(%rsp) + addl -56(%r13),%ecx + leal (%rcx,%rsi,1),%ecx + rorxl $27,%edx,%r12d + rorxl $2,%edx,%esi + xorl %eax,%edx + addl %r12d,%ecx + xorl %ebp,%edx + addl -52(%r13),%ebx + leal (%rbx,%rdx,1),%ebx + rorxl $27,%ecx,%r12d + rorxl $2,%ecx,%edx + xorl %esi,%ecx + addl %r12d,%ebx + xorl %eax,%ecx + addl -32(%r13),%ebp + leal (%rcx,%rbp,1),%ebp + rorxl $27,%ebx,%r12d + rorxl $2,%ebx,%ecx + xorl %edx,%ebx + addl %r12d,%ebp + xorl %esi,%ebx + addl -28(%r13),%eax + leal (%rax,%rbx,1),%eax + rorxl $27,%ebp,%r12d + rorxl $2,%ebp,%ebx + xorl %ecx,%ebp + addl %r12d,%eax + xorl %edx,%ebp + addl -24(%r13),%esi + leal (%rsi,%rbp,1),%esi + rorxl $27,%eax,%r12d + rorxl $2,%eax,%ebp + xorl %ebx,%eax + addl %r12d,%esi + xorl %ecx,%eax + addl -20(%r13),%edx + leal (%rdx,%rax,1),%edx + rorxl $27,%esi,%r12d + addl %r12d,%edx + leaq 128(%rsp),%r13 + + + addl 0(%r8),%edx + addl 4(%r8),%esi + addl 8(%r8),%ebp + movl %edx,0(%r8) + addl 12(%r8),%ebx + movl %esi,4(%r8) + movl %edx,%eax + addl 16(%r8),%ecx + movl %ebp,%r12d + movl %ebp,8(%r8) + movl %ebx,%edx + + movl %ebx,12(%r8) + movl %esi,%ebp + movl %ecx,16(%r8) + + movl %ecx,%esi + movl %r12d,%ecx + + + cmpq %r10,%r9 + jbe L$oop_avx2 + +L$done_avx2: + vzeroupper + movq -40(%r11),%r14 + + movq -32(%r11),%r13 + + movq -24(%r11),%r12 + + movq -16(%r11),%rbp + + movq -8(%r11),%rbx + + leaq (%r11),%rsp + +L$epilogue_avx2: + .byte 0xf3,0xc3 + + .p2align 6 K_XX_XX: .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-586.linux.x86.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-586.linux.x86.S index 2f1853a..92c6b5c 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-586.linux.x86.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-586.linux.x86.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__i386__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__i386__) #if defined(BORINGSSL_PREFIX) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-armv8.ios.aarch64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-armv8.ios.aarch64.S index 40d1613..6d4303a 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-armv8.ios.aarch64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-armv8.ios.aarch64.S @@ -14,7 +14,7 @@ #if defined(BORINGSSL_PREFIX) #include #endif -// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. +// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy @@ -42,6 +42,7 @@ // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) +// Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. @@ -50,7 +51,7 @@ // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code -// generated with -mgeneral-regs-only is significanty faster +// generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ @@ -60,11 +61,13 @@ .text +.private_extern _OPENSSL_armcap_P .globl _sha256_block_data_order .private_extern _sha256_block_data_order .align 6 _sha256_block_data_order: + AARCH64_VALID_CALL_TARGET #ifndef __KERNEL__ #if __has_feature(hwaddress_sanitizer) && __clang_major__ >= 10 adrp x16,:pg_hi21_nc:_OPENSSL_armcap_P @@ -75,6 +78,7 @@ _sha256_block_data_order: tst w16,#ARMV8_SHA256 b.ne Lv8_entry #endif + AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 @@ -99,7 +103,7 @@ Loop: ldr w19,[x30],#4 // *K++ eor w28,w21,w22 // magic seed str x1,[x29,#112] -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w3,w3 // 0 #endif ror w16,w24,#6 @@ -122,7 +126,7 @@ Loop: add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w4,w4 // 1 #endif ldp w5,w6,[x1],#2*4 @@ -147,7 +151,7 @@ Loop: add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w5,w5 // 2 #endif add w26,w26,w17 // h+=Sigma0(a) @@ -171,7 +175,7 @@ Loop: add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w6,w6 // 3 #endif ldp w7,w8,[x1],#2*4 @@ -196,7 +200,7 @@ Loop: add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w7,w7 // 4 #endif add w24,w24,w17 // h+=Sigma0(a) @@ -220,7 +224,7 @@ Loop: add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w8,w8 // 5 #endif ldp w9,w10,[x1],#2*4 @@ -245,7 +249,7 @@ Loop: add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w9,w9 // 6 #endif add w22,w22,w17 // h+=Sigma0(a) @@ -269,7 +273,7 @@ Loop: add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w10,w10 // 7 #endif ldp w11,w12,[x1],#2*4 @@ -294,7 +298,7 @@ Loop: add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w20,w20,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w11,w11 // 8 #endif add w20,w20,w17 // h+=Sigma0(a) @@ -318,7 +322,7 @@ Loop: add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w12,w12 // 9 #endif ldp w13,w14,[x1],#2*4 @@ -343,7 +347,7 @@ Loop: add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w13,w13 // 10 #endif add w26,w26,w17 // h+=Sigma0(a) @@ -367,7 +371,7 @@ Loop: add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w14,w14 // 11 #endif ldp w15,w0,[x1],#2*4 @@ -393,7 +397,7 @@ Loop: add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w15,w15 // 12 #endif add w24,w24,w17 // h+=Sigma0(a) @@ -418,7 +422,7 @@ Loop: add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w0,w0 // 13 #endif ldp w1,w2,[x1] @@ -444,7 +448,7 @@ Loop: add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w1,w1 // 14 #endif ldr w6,[sp,#12] @@ -470,7 +474,7 @@ Loop: add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w2,w2 // 15 #endif ldr w7,[sp,#0] @@ -1035,6 +1039,7 @@ Loop_16_xx: ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 + AARCH64_VALIDATE_LINK_REGISTER ret @@ -1069,6 +1074,7 @@ LK256: .align 6 sha256_block_armv8: Lv8_entry: + // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. stp x29,x30,[sp,#-16]! add x29,sp,#0 @@ -1204,10 +1210,6 @@ Loop_hw: ldr x29,[sp],#16 ret -#endif -#ifndef __KERNEL__ -.comm _OPENSSL_armcap_P,4,4 -.private_extern _OPENSSL_armcap_P #endif #endif // !OPENSSL_NO_ASM #endif // defined(__aarch64__) && defined(__APPLE__) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-armv8.linux.aarch64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-armv8.linux.aarch64.S index a096b3f..12a365e 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-armv8.linux.aarch64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-armv8.linux.aarch64.S @@ -15,7 +15,7 @@ #if defined(BORINGSSL_PREFIX) #include #endif -// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. +// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy @@ -43,6 +43,7 @@ // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) +// Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. @@ -51,7 +52,7 @@ // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code -// generated with -mgeneral-regs-only is significanty faster +// generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ @@ -61,11 +62,13 @@ .text +.hidden OPENSSL_armcap_P .globl sha256_block_data_order .hidden sha256_block_data_order .type sha256_block_data_order,%function .align 6 sha256_block_data_order: + AARCH64_VALID_CALL_TARGET #ifndef __KERNEL__ #if __has_feature(hwaddress_sanitizer) && __clang_major__ >= 10 adrp x16,:pg_hi21_nc:OPENSSL_armcap_P @@ -76,6 +79,7 @@ sha256_block_data_order: tst w16,#ARMV8_SHA256 b.ne .Lv8_entry #endif + AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 @@ -100,7 +104,7 @@ sha256_block_data_order: ldr w19,[x30],#4 // *K++ eor w28,w21,w22 // magic seed str x1,[x29,#112] -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w3,w3 // 0 #endif ror w16,w24,#6 @@ -123,7 +127,7 @@ sha256_block_data_order: add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w4,w4 // 1 #endif ldp w5,w6,[x1],#2*4 @@ -148,7 +152,7 @@ sha256_block_data_order: add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w5,w5 // 2 #endif add w26,w26,w17 // h+=Sigma0(a) @@ -172,7 +176,7 @@ sha256_block_data_order: add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w6,w6 // 3 #endif ldp w7,w8,[x1],#2*4 @@ -197,7 +201,7 @@ sha256_block_data_order: add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w7,w7 // 4 #endif add w24,w24,w17 // h+=Sigma0(a) @@ -221,7 +225,7 @@ sha256_block_data_order: add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w8,w8 // 5 #endif ldp w9,w10,[x1],#2*4 @@ -246,7 +250,7 @@ sha256_block_data_order: add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w9,w9 // 6 #endif add w22,w22,w17 // h+=Sigma0(a) @@ -270,7 +274,7 @@ sha256_block_data_order: add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w10,w10 // 7 #endif ldp w11,w12,[x1],#2*4 @@ -295,7 +299,7 @@ sha256_block_data_order: add w20,w20,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w20,w20,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w11,w11 // 8 #endif add w20,w20,w17 // h+=Sigma0(a) @@ -319,7 +323,7 @@ sha256_block_data_order: add w27,w27,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w27,w27,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w12,w12 // 9 #endif ldp w13,w14,[x1],#2*4 @@ -344,7 +348,7 @@ sha256_block_data_order: add w26,w26,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w26,w26,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w13,w13 // 10 #endif add w26,w26,w17 // h+=Sigma0(a) @@ -368,7 +372,7 @@ sha256_block_data_order: add w25,w25,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w25,w25,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w14,w14 // 11 #endif ldp w15,w0,[x1],#2*4 @@ -394,7 +398,7 @@ sha256_block_data_order: add w24,w24,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w24,w24,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w15,w15 // 12 #endif add w24,w24,w17 // h+=Sigma0(a) @@ -419,7 +423,7 @@ sha256_block_data_order: add w23,w23,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w23,w23,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w0,w0 // 13 #endif ldp w1,w2,[x1] @@ -445,7 +449,7 @@ sha256_block_data_order: add w22,w22,w19 // h+=Maj(a,b,c) ldr w19,[x30],#4 // *K++, w28 in next round //add w22,w22,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w1,w1 // 14 #endif ldr w6,[sp,#12] @@ -471,7 +475,7 @@ sha256_block_data_order: add w21,w21,w28 // h+=Maj(a,b,c) ldr w28,[x30],#4 // *K++, w19 in next round //add w21,w21,w17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev w2,w2 // 15 #endif ldr w7,[sp,#0] @@ -1036,6 +1040,7 @@ sha256_block_data_order: ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 + AARCH64_VALIDATE_LINK_REGISTER ret .size sha256_block_data_order,.-sha256_block_data_order @@ -1070,6 +1075,7 @@ sha256_block_data_order: .align 6 sha256_block_armv8: .Lv8_entry: + // Armv8.3-A PAuth: even though x30 is pushed to stack it is not popped later. stp x29,x30,[sp,#-16]! add x29,sp,#0 @@ -1206,10 +1212,6 @@ sha256_block_armv8: ret .size sha256_block_armv8,.-sha256_block_armv8 #endif -#ifndef __KERNEL__ -.comm OPENSSL_armcap_P,4,4 -.hidden OPENSSL_armcap_P -#endif #endif #endif // !OPENSSL_NO_ASM .section .note.GNU-stack,"",%progbits diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-x86_64.linux.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-x86_64.linux.x86_64.S index 9d51c53..84e06cd 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-x86_64.linux.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-x86_64.linux.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-x86_64.mac.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-x86_64.mac.x86_64.S index f596399..130e979 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-x86_64.mac.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha256-x86_64.mac.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__APPLE__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-586.linux.x86.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-586.linux.x86.S index 3de12d1..b207f80 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-586.linux.x86.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-586.linux.x86.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__i386__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__i386__) #if defined(BORINGSSL_PREFIX) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-armv8.ios.aarch64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-armv8.ios.aarch64.S index c6ac349..d1692b5 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-armv8.ios.aarch64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-armv8.ios.aarch64.S @@ -14,7 +14,7 @@ #if defined(BORINGSSL_PREFIX) #include #endif -// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. +// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy @@ -42,6 +42,7 @@ // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) +// Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. @@ -50,7 +51,7 @@ // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code -// generated with -mgeneral-regs-only is significanty faster +// generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ @@ -60,11 +61,24 @@ .text +.private_extern _OPENSSL_armcap_P .globl _sha512_block_data_order .private_extern _sha512_block_data_order .align 6 _sha512_block_data_order: + AARCH64_VALID_CALL_TARGET +#ifndef __KERNEL__ +#if __has_feature(hwaddress_sanitizer) && __clang_major__ >= 10 + adrp x16,:pg_hi21_nc:_OPENSSL_armcap_P +#else + adrp x16,_OPENSSL_armcap_P@PAGE +#endif + ldr w16,[x16,_OPENSSL_armcap_P@PAGEOFF] + tst w16,#ARMV8_SHA512 + b.ne Lv8_entry +#endif + AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 @@ -89,7 +103,7 @@ Loop: ldr x19,[x30],#8 // *K++ eor x28,x21,x22 // magic seed str x1,[x29,#112] -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x3,x3 // 0 #endif ror x16,x24,#14 @@ -112,7 +126,7 @@ Loop: add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x4,x4 // 1 #endif ldp x5,x6,[x1],#2*8 @@ -137,7 +151,7 @@ Loop: add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x5,x5 // 2 #endif add x26,x26,x17 // h+=Sigma0(a) @@ -161,7 +175,7 @@ Loop: add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x6,x6 // 3 #endif ldp x7,x8,[x1],#2*8 @@ -186,7 +200,7 @@ Loop: add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x7,x7 // 4 #endif add x24,x24,x17 // h+=Sigma0(a) @@ -210,7 +224,7 @@ Loop: add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x8,x8 // 5 #endif ldp x9,x10,[x1],#2*8 @@ -235,7 +249,7 @@ Loop: add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x9,x9 // 6 #endif add x22,x22,x17 // h+=Sigma0(a) @@ -259,7 +273,7 @@ Loop: add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x10,x10 // 7 #endif ldp x11,x12,[x1],#2*8 @@ -284,7 +298,7 @@ Loop: add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x20,x20,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x11,x11 // 8 #endif add x20,x20,x17 // h+=Sigma0(a) @@ -308,7 +322,7 @@ Loop: add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x12,x12 // 9 #endif ldp x13,x14,[x1],#2*8 @@ -333,7 +347,7 @@ Loop: add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x13,x13 // 10 #endif add x26,x26,x17 // h+=Sigma0(a) @@ -357,7 +371,7 @@ Loop: add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x14,x14 // 11 #endif ldp x15,x0,[x1],#2*8 @@ -383,7 +397,7 @@ Loop: add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x15,x15 // 12 #endif add x24,x24,x17 // h+=Sigma0(a) @@ -408,7 +422,7 @@ Loop: add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x0,x0 // 13 #endif ldp x1,x2,[x1] @@ -434,7 +448,7 @@ Loop: add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x1,x1 // 14 #endif ldr x6,[sp,#24] @@ -460,7 +474,7 @@ Loop: add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x2,x2 // 15 #endif ldr x7,[sp,#0] @@ -1025,6 +1039,7 @@ Loop_16_xx: ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 + AARCH64_VALIDATE_LINK_REGISTER ret @@ -1077,9 +1092,526 @@ LK512: .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 +.text #ifndef __KERNEL__ -.comm _OPENSSL_armcap_P,4,4 -.private_extern _OPENSSL_armcap_P + +.align 6 +sha512_block_armv8: +Lv8_entry: + stp x29,x30,[sp,#-16]! + add x29,sp,#0 + + ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input + ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 + + ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context + adrp x3,LK512@PAGE + add x3,x3,LK512@PAGEOFF + + rev64 v16.16b,v16.16b + rev64 v17.16b,v17.16b + rev64 v18.16b,v18.16b + rev64 v19.16b,v19.16b + rev64 v20.16b,v20.16b + rev64 v21.16b,v21.16b + rev64 v22.16b,v22.16b + rev64 v23.16b,v23.16b + b Loop_hw + +.align 4 +Loop_hw: + ld1 {v24.2d},[x3],#16 + subs x2,x2,#1 + sub x4,x1,#128 + orr v26.16b,v0.16b,v0.16b // offload + orr v27.16b,v1.16b,v1.16b + orr v28.16b,v2.16b,v2.16b + orr v29.16b,v3.16b,v3.16b + csel x1,x1,x4,ne // conditional rewind + add v24.2d,v24.2d,v16.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v2.16b,v3.16b,#8 + ext v6.16b,v1.16b,v2.16b,#8 + add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" +.long 0xcec08230 //sha512su0 v16.16b,v17.16b + ext v7.16b,v20.16b,v21.16b,#8 +.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b +.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b + add v4.2d,v1.2d,v3.2d // "D + T1" +.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b + add v25.2d,v25.2d,v17.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v4.16b,v2.16b,#8 + ext v6.16b,v0.16b,v4.16b,#8 + add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" +.long 0xcec08251 //sha512su0 v17.16b,v18.16b + ext v7.16b,v21.16b,v22.16b,#8 +.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b +.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b + add v1.2d,v0.2d,v2.2d // "D + T1" +.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b + add v24.2d,v24.2d,v18.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v1.16b,v4.16b,#8 + ext v6.16b,v3.16b,v1.16b,#8 + add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" +.long 0xcec08272 //sha512su0 v18.16b,v19.16b + ext v7.16b,v22.16b,v23.16b,#8 +.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b +.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b + add v0.2d,v3.2d,v4.2d // "D + T1" +.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b + add v25.2d,v25.2d,v19.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v0.16b,v1.16b,#8 + ext v6.16b,v2.16b,v0.16b,#8 + add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" +.long 0xcec08293 //sha512su0 v19.16b,v20.16b + ext v7.16b,v23.16b,v16.16b,#8 +.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b +.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b + add v3.2d,v2.2d,v1.2d // "D + T1" +.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b + add v24.2d,v24.2d,v20.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v3.16b,v0.16b,#8 + ext v6.16b,v4.16b,v3.16b,#8 + add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" +.long 0xcec082b4 //sha512su0 v20.16b,v21.16b + ext v7.16b,v16.16b,v17.16b,#8 +.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b +.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b + add v2.2d,v4.2d,v0.2d // "D + T1" +.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b + add v25.2d,v25.2d,v21.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v2.16b,v3.16b,#8 + ext v6.16b,v1.16b,v2.16b,#8 + add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" +.long 0xcec082d5 //sha512su0 v21.16b,v22.16b + ext v7.16b,v17.16b,v18.16b,#8 +.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b +.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b + add v4.2d,v1.2d,v3.2d // "D + T1" +.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b + add v24.2d,v24.2d,v22.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v4.16b,v2.16b,#8 + ext v6.16b,v0.16b,v4.16b,#8 + add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" +.long 0xcec082f6 //sha512su0 v22.16b,v23.16b + ext v7.16b,v18.16b,v19.16b,#8 +.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b +.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b + add v1.2d,v0.2d,v2.2d // "D + T1" +.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b + add v25.2d,v25.2d,v23.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v1.16b,v4.16b,#8 + ext v6.16b,v3.16b,v1.16b,#8 + add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" +.long 0xcec08217 //sha512su0 v23.16b,v16.16b + ext v7.16b,v19.16b,v20.16b,#8 +.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b +.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b + add v0.2d,v3.2d,v4.2d // "D + T1" +.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b + add v24.2d,v24.2d,v16.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v0.16b,v1.16b,#8 + ext v6.16b,v2.16b,v0.16b,#8 + add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" +.long 0xcec08230 //sha512su0 v16.16b,v17.16b + ext v7.16b,v20.16b,v21.16b,#8 +.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b +.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b + add v3.2d,v2.2d,v1.2d // "D + T1" +.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b + add v25.2d,v25.2d,v17.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v3.16b,v0.16b,#8 + ext v6.16b,v4.16b,v3.16b,#8 + add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" +.long 0xcec08251 //sha512su0 v17.16b,v18.16b + ext v7.16b,v21.16b,v22.16b,#8 +.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b +.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b + add v2.2d,v4.2d,v0.2d // "D + T1" +.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b + add v24.2d,v24.2d,v18.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v2.16b,v3.16b,#8 + ext v6.16b,v1.16b,v2.16b,#8 + add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" +.long 0xcec08272 //sha512su0 v18.16b,v19.16b + ext v7.16b,v22.16b,v23.16b,#8 +.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b +.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b + add v4.2d,v1.2d,v3.2d // "D + T1" +.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b + add v25.2d,v25.2d,v19.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v4.16b,v2.16b,#8 + ext v6.16b,v0.16b,v4.16b,#8 + add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" +.long 0xcec08293 //sha512su0 v19.16b,v20.16b + ext v7.16b,v23.16b,v16.16b,#8 +.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b +.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b + add v1.2d,v0.2d,v2.2d // "D + T1" +.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b + add v24.2d,v24.2d,v20.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v1.16b,v4.16b,#8 + ext v6.16b,v3.16b,v1.16b,#8 + add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" +.long 0xcec082b4 //sha512su0 v20.16b,v21.16b + ext v7.16b,v16.16b,v17.16b,#8 +.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b +.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b + add v0.2d,v3.2d,v4.2d // "D + T1" +.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b + add v25.2d,v25.2d,v21.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v0.16b,v1.16b,#8 + ext v6.16b,v2.16b,v0.16b,#8 + add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" +.long 0xcec082d5 //sha512su0 v21.16b,v22.16b + ext v7.16b,v17.16b,v18.16b,#8 +.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b +.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b + add v3.2d,v2.2d,v1.2d // "D + T1" +.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b + add v24.2d,v24.2d,v22.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v3.16b,v0.16b,#8 + ext v6.16b,v4.16b,v3.16b,#8 + add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" +.long 0xcec082f6 //sha512su0 v22.16b,v23.16b + ext v7.16b,v18.16b,v19.16b,#8 +.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b +.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b + add v2.2d,v4.2d,v0.2d // "D + T1" +.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b + add v25.2d,v25.2d,v23.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v2.16b,v3.16b,#8 + ext v6.16b,v1.16b,v2.16b,#8 + add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" +.long 0xcec08217 //sha512su0 v23.16b,v16.16b + ext v7.16b,v19.16b,v20.16b,#8 +.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b +.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b + add v4.2d,v1.2d,v3.2d // "D + T1" +.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b + add v24.2d,v24.2d,v16.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v4.16b,v2.16b,#8 + ext v6.16b,v0.16b,v4.16b,#8 + add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" +.long 0xcec08230 //sha512su0 v16.16b,v17.16b + ext v7.16b,v20.16b,v21.16b,#8 +.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b +.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b + add v1.2d,v0.2d,v2.2d // "D + T1" +.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b + add v25.2d,v25.2d,v17.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v1.16b,v4.16b,#8 + ext v6.16b,v3.16b,v1.16b,#8 + add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" +.long 0xcec08251 //sha512su0 v17.16b,v18.16b + ext v7.16b,v21.16b,v22.16b,#8 +.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b +.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b + add v0.2d,v3.2d,v4.2d // "D + T1" +.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b + add v24.2d,v24.2d,v18.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v0.16b,v1.16b,#8 + ext v6.16b,v2.16b,v0.16b,#8 + add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" +.long 0xcec08272 //sha512su0 v18.16b,v19.16b + ext v7.16b,v22.16b,v23.16b,#8 +.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b +.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b + add v3.2d,v2.2d,v1.2d // "D + T1" +.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b + add v25.2d,v25.2d,v19.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v3.16b,v0.16b,#8 + ext v6.16b,v4.16b,v3.16b,#8 + add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" +.long 0xcec08293 //sha512su0 v19.16b,v20.16b + ext v7.16b,v23.16b,v16.16b,#8 +.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b +.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b + add v2.2d,v4.2d,v0.2d // "D + T1" +.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b + add v24.2d,v24.2d,v20.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v2.16b,v3.16b,#8 + ext v6.16b,v1.16b,v2.16b,#8 + add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" +.long 0xcec082b4 //sha512su0 v20.16b,v21.16b + ext v7.16b,v16.16b,v17.16b,#8 +.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b +.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b + add v4.2d,v1.2d,v3.2d // "D + T1" +.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b + add v25.2d,v25.2d,v21.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v4.16b,v2.16b,#8 + ext v6.16b,v0.16b,v4.16b,#8 + add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" +.long 0xcec082d5 //sha512su0 v21.16b,v22.16b + ext v7.16b,v17.16b,v18.16b,#8 +.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b +.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b + add v1.2d,v0.2d,v2.2d // "D + T1" +.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b + add v24.2d,v24.2d,v22.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v1.16b,v4.16b,#8 + ext v6.16b,v3.16b,v1.16b,#8 + add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" +.long 0xcec082f6 //sha512su0 v22.16b,v23.16b + ext v7.16b,v18.16b,v19.16b,#8 +.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b +.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b + add v0.2d,v3.2d,v4.2d // "D + T1" +.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b + add v25.2d,v25.2d,v23.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v0.16b,v1.16b,#8 + ext v6.16b,v2.16b,v0.16b,#8 + add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" +.long 0xcec08217 //sha512su0 v23.16b,v16.16b + ext v7.16b,v19.16b,v20.16b,#8 +.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b +.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b + add v3.2d,v2.2d,v1.2d // "D + T1" +.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b + add v24.2d,v24.2d,v16.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v3.16b,v0.16b,#8 + ext v6.16b,v4.16b,v3.16b,#8 + add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" +.long 0xcec08230 //sha512su0 v16.16b,v17.16b + ext v7.16b,v20.16b,v21.16b,#8 +.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b +.long 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b + add v2.2d,v4.2d,v0.2d // "D + T1" +.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b + add v25.2d,v25.2d,v17.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v2.16b,v3.16b,#8 + ext v6.16b,v1.16b,v2.16b,#8 + add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" +.long 0xcec08251 //sha512su0 v17.16b,v18.16b + ext v7.16b,v21.16b,v22.16b,#8 +.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b +.long 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b + add v4.2d,v1.2d,v3.2d // "D + T1" +.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b + add v24.2d,v24.2d,v18.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v4.16b,v2.16b,#8 + ext v6.16b,v0.16b,v4.16b,#8 + add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" +.long 0xcec08272 //sha512su0 v18.16b,v19.16b + ext v7.16b,v22.16b,v23.16b,#8 +.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b +.long 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b + add v1.2d,v0.2d,v2.2d // "D + T1" +.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b + add v25.2d,v25.2d,v19.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v1.16b,v4.16b,#8 + ext v6.16b,v3.16b,v1.16b,#8 + add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" +.long 0xcec08293 //sha512su0 v19.16b,v20.16b + ext v7.16b,v23.16b,v16.16b,#8 +.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b +.long 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b + add v0.2d,v3.2d,v4.2d // "D + T1" +.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b + add v24.2d,v24.2d,v20.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v0.16b,v1.16b,#8 + ext v6.16b,v2.16b,v0.16b,#8 + add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" +.long 0xcec082b4 //sha512su0 v20.16b,v21.16b + ext v7.16b,v16.16b,v17.16b,#8 +.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b +.long 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b + add v3.2d,v2.2d,v1.2d // "D + T1" +.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b + add v25.2d,v25.2d,v21.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v3.16b,v0.16b,#8 + ext v6.16b,v4.16b,v3.16b,#8 + add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" +.long 0xcec082d5 //sha512su0 v21.16b,v22.16b + ext v7.16b,v17.16b,v18.16b,#8 +.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b +.long 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b + add v2.2d,v4.2d,v0.2d // "D + T1" +.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b + add v24.2d,v24.2d,v22.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v2.16b,v3.16b,#8 + ext v6.16b,v1.16b,v2.16b,#8 + add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" +.long 0xcec082f6 //sha512su0 v22.16b,v23.16b + ext v7.16b,v18.16b,v19.16b,#8 +.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b +.long 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b + add v4.2d,v1.2d,v3.2d // "D + T1" +.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b + add v25.2d,v25.2d,v23.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v4.16b,v2.16b,#8 + ext v6.16b,v0.16b,v4.16b,#8 + add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" +.long 0xcec08217 //sha512su0 v23.16b,v16.16b + ext v7.16b,v19.16b,v20.16b,#8 +.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b +.long 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b + add v1.2d,v0.2d,v2.2d // "D + T1" +.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b + ld1 {v25.2d},[x3],#16 + add v24.2d,v24.2d,v16.2d + ld1 {v16.16b},[x1],#16 // load next input + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v1.16b,v4.16b,#8 + ext v6.16b,v3.16b,v1.16b,#8 + add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" +.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b + rev64 v16.16b,v16.16b + add v0.2d,v3.2d,v4.2d // "D + T1" +.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b + ld1 {v24.2d},[x3],#16 + add v25.2d,v25.2d,v17.2d + ld1 {v17.16b},[x1],#16 // load next input + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v0.16b,v1.16b,#8 + ext v6.16b,v2.16b,v0.16b,#8 + add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" +.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b + rev64 v17.16b,v17.16b + add v3.2d,v2.2d,v1.2d // "D + T1" +.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b + ld1 {v25.2d},[x3],#16 + add v24.2d,v24.2d,v18.2d + ld1 {v18.16b},[x1],#16 // load next input + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v3.16b,v0.16b,#8 + ext v6.16b,v4.16b,v3.16b,#8 + add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" +.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b + rev64 v18.16b,v18.16b + add v2.2d,v4.2d,v0.2d // "D + T1" +.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b + ld1 {v24.2d},[x3],#16 + add v25.2d,v25.2d,v19.2d + ld1 {v19.16b},[x1],#16 // load next input + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v2.16b,v3.16b,#8 + ext v6.16b,v1.16b,v2.16b,#8 + add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" +.long 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b + rev64 v19.16b,v19.16b + add v4.2d,v1.2d,v3.2d // "D + T1" +.long 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b + ld1 {v25.2d},[x3],#16 + add v24.2d,v24.2d,v20.2d + ld1 {v20.16b},[x1],#16 // load next input + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v4.16b,v2.16b,#8 + ext v6.16b,v0.16b,v4.16b,#8 + add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" +.long 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b + rev64 v20.16b,v20.16b + add v1.2d,v0.2d,v2.2d // "D + T1" +.long 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b + ld1 {v24.2d},[x3],#16 + add v25.2d,v25.2d,v21.2d + ld1 {v21.16b},[x1],#16 // load next input + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v1.16b,v4.16b,#8 + ext v6.16b,v3.16b,v1.16b,#8 + add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" +.long 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b + rev64 v21.16b,v21.16b + add v0.2d,v3.2d,v4.2d // "D + T1" +.long 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b + ld1 {v25.2d},[x3],#16 + add v24.2d,v24.2d,v22.2d + ld1 {v22.16b},[x1],#16 // load next input + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v0.16b,v1.16b,#8 + ext v6.16b,v2.16b,v0.16b,#8 + add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" +.long 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b + rev64 v22.16b,v22.16b + add v3.2d,v2.2d,v1.2d // "D + T1" +.long 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b + sub x3,x3,#80*8 // rewind + add v25.2d,v25.2d,v23.2d + ld1 {v23.16b},[x1],#16 // load next input + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v3.16b,v0.16b,#8 + ext v6.16b,v4.16b,v3.16b,#8 + add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" +.long 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b + rev64 v23.16b,v23.16b + add v2.2d,v4.2d,v0.2d // "D + T1" +.long 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b + add v0.2d,v0.2d,v26.2d // accumulate + add v1.2d,v1.2d,v27.2d + add v2.2d,v2.2d,v28.2d + add v3.2d,v3.2d,v29.2d + + cbnz x2,Loop_hw + + st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context + + ldr x29,[sp],#16 + ret + #endif #endif // !OPENSSL_NO_ASM #endif // defined(__aarch64__) && defined(__APPLE__) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-armv8.linux.aarch64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-armv8.linux.aarch64.S index 0b90237..a8e5b67 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-armv8.linux.aarch64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-armv8.linux.aarch64.S @@ -15,7 +15,7 @@ #if defined(BORINGSSL_PREFIX) #include #endif -// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. +// Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved. // // Licensed under the OpenSSL license (the "License"). You may not use // this file except in compliance with the License. You can obtain a copy @@ -43,6 +43,7 @@ // Denver 2.01 10.5 (+26%) 6.70 (+8%) // X-Gene 20.0 (+100%) 12.8 (+300%(***)) // Mongoose 2.36 13.0 (+50%) 8.36 (+33%) +// Kryo 1.92 17.4 (+30%) 11.2 (+8%) // // (*) Software SHA256 results are of lesser relevance, presented // mostly for informational purposes. @@ -51,7 +52,7 @@ // on Cortex-A53 (or by 4 cycles per round). // (***) Super-impressive coefficients over gcc-generated code are // indication of some compiler "pathology", most notably code -// generated with -mgeneral-regs-only is significanty faster +// generated with -mgeneral-regs-only is significantly faster // and the gap is only 40-90%. #ifndef __KERNEL__ @@ -61,11 +62,24 @@ .text +.hidden OPENSSL_armcap_P .globl sha512_block_data_order .hidden sha512_block_data_order .type sha512_block_data_order,%function .align 6 sha512_block_data_order: + AARCH64_VALID_CALL_TARGET +#ifndef __KERNEL__ +#if __has_feature(hwaddress_sanitizer) && __clang_major__ >= 10 + adrp x16,:pg_hi21_nc:OPENSSL_armcap_P +#else + adrp x16,OPENSSL_armcap_P +#endif + ldr w16,[x16,:lo12:OPENSSL_armcap_P] + tst w16,#ARMV8_SHA512 + b.ne .Lv8_entry +#endif + AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-128]! add x29,sp,#0 @@ -90,7 +104,7 @@ sha512_block_data_order: ldr x19,[x30],#8 // *K++ eor x28,x21,x22 // magic seed str x1,[x29,#112] -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x3,x3 // 0 #endif ror x16,x24,#14 @@ -113,7 +127,7 @@ sha512_block_data_order: add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x4,x4 // 1 #endif ldp x5,x6,[x1],#2*8 @@ -138,7 +152,7 @@ sha512_block_data_order: add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x5,x5 // 2 #endif add x26,x26,x17 // h+=Sigma0(a) @@ -162,7 +176,7 @@ sha512_block_data_order: add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x6,x6 // 3 #endif ldp x7,x8,[x1],#2*8 @@ -187,7 +201,7 @@ sha512_block_data_order: add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x7,x7 // 4 #endif add x24,x24,x17 // h+=Sigma0(a) @@ -211,7 +225,7 @@ sha512_block_data_order: add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x8,x8 // 5 #endif ldp x9,x10,[x1],#2*8 @@ -236,7 +250,7 @@ sha512_block_data_order: add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x9,x9 // 6 #endif add x22,x22,x17 // h+=Sigma0(a) @@ -260,7 +274,7 @@ sha512_block_data_order: add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x10,x10 // 7 #endif ldp x11,x12,[x1],#2*8 @@ -285,7 +299,7 @@ sha512_block_data_order: add x20,x20,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x20,x20,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x11,x11 // 8 #endif add x20,x20,x17 // h+=Sigma0(a) @@ -309,7 +323,7 @@ sha512_block_data_order: add x27,x27,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x27,x27,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x12,x12 // 9 #endif ldp x13,x14,[x1],#2*8 @@ -334,7 +348,7 @@ sha512_block_data_order: add x26,x26,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x26,x26,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x13,x13 // 10 #endif add x26,x26,x17 // h+=Sigma0(a) @@ -358,7 +372,7 @@ sha512_block_data_order: add x25,x25,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x25,x25,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x14,x14 // 11 #endif ldp x15,x0,[x1],#2*8 @@ -384,7 +398,7 @@ sha512_block_data_order: add x24,x24,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x24,x24,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x15,x15 // 12 #endif add x24,x24,x17 // h+=Sigma0(a) @@ -409,7 +423,7 @@ sha512_block_data_order: add x23,x23,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x23,x23,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x0,x0 // 13 #endif ldp x1,x2,[x1] @@ -435,7 +449,7 @@ sha512_block_data_order: add x22,x22,x19 // h+=Maj(a,b,c) ldr x19,[x30],#8 // *K++, x28 in next round //add x22,x22,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x1,x1 // 14 #endif ldr x6,[sp,#24] @@ -461,7 +475,7 @@ sha512_block_data_order: add x21,x21,x28 // h+=Maj(a,b,c) ldr x28,[x30],#8 // *K++, x19 in next round //add x21,x21,x17 // h+=Sigma0(a) -#ifndef __ARMEB__ +#ifndef __AARCH64EB__ rev x2,x2 // 15 #endif ldr x7,[sp,#0] @@ -1026,6 +1040,7 @@ sha512_block_data_order: ldp x25,x26,[x29,#64] ldp x27,x28,[x29,#80] ldp x29,x30,[sp],#128 + AARCH64_VALIDATE_LINK_REGISTER ret .size sha512_block_data_order,.-sha512_block_data_order @@ -1078,9 +1093,526 @@ sha512_block_data_order: .byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 .align 2 .align 2 +.text #ifndef __KERNEL__ -.comm OPENSSL_armcap_P,4,4 -.hidden OPENSSL_armcap_P +.type sha512_block_armv8,%function +.align 6 +sha512_block_armv8: +.Lv8_entry: + stp x29,x30,[sp,#-16]! + add x29,sp,#0 + + ld1 {v16.16b,v17.16b,v18.16b,v19.16b},[x1],#64 // load input + ld1 {v20.16b,v21.16b,v22.16b,v23.16b},[x1],#64 + + ld1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // load context + adrp x3,.LK512 + add x3,x3,:lo12:.LK512 + + rev64 v16.16b,v16.16b + rev64 v17.16b,v17.16b + rev64 v18.16b,v18.16b + rev64 v19.16b,v19.16b + rev64 v20.16b,v20.16b + rev64 v21.16b,v21.16b + rev64 v22.16b,v22.16b + rev64 v23.16b,v23.16b + b .Loop_hw + +.align 4 +.Loop_hw: + ld1 {v24.2d},[x3],#16 + subs x2,x2,#1 + sub x4,x1,#128 + orr v26.16b,v0.16b,v0.16b // offload + orr v27.16b,v1.16b,v1.16b + orr v28.16b,v2.16b,v2.16b + orr v29.16b,v3.16b,v3.16b + csel x1,x1,x4,ne // conditional rewind + add v24.2d,v24.2d,v16.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v2.16b,v3.16b,#8 + ext v6.16b,v1.16b,v2.16b,#8 + add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xcec08230 //sha512su0 v16.16b,v17.16b + ext v7.16b,v20.16b,v21.16b,#8 +.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b +.inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b + add v4.2d,v1.2d,v3.2d // "D + T1" +.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b + add v25.2d,v25.2d,v17.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v4.16b,v2.16b,#8 + ext v6.16b,v0.16b,v4.16b,#8 + add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xcec08251 //sha512su0 v17.16b,v18.16b + ext v7.16b,v21.16b,v22.16b,#8 +.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b +.inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b + add v1.2d,v0.2d,v2.2d // "D + T1" +.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b + add v24.2d,v24.2d,v18.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v1.16b,v4.16b,#8 + ext v6.16b,v3.16b,v1.16b,#8 + add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xcec08272 //sha512su0 v18.16b,v19.16b + ext v7.16b,v22.16b,v23.16b,#8 +.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b +.inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b + add v0.2d,v3.2d,v4.2d // "D + T1" +.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b + add v25.2d,v25.2d,v19.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v0.16b,v1.16b,#8 + ext v6.16b,v2.16b,v0.16b,#8 + add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xcec08293 //sha512su0 v19.16b,v20.16b + ext v7.16b,v23.16b,v16.16b,#8 +.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b +.inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b + add v3.2d,v2.2d,v1.2d // "D + T1" +.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b + add v24.2d,v24.2d,v20.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v3.16b,v0.16b,#8 + ext v6.16b,v4.16b,v3.16b,#8 + add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xcec082b4 //sha512su0 v20.16b,v21.16b + ext v7.16b,v16.16b,v17.16b,#8 +.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b +.inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b + add v2.2d,v4.2d,v0.2d // "D + T1" +.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b + add v25.2d,v25.2d,v21.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v2.16b,v3.16b,#8 + ext v6.16b,v1.16b,v2.16b,#8 + add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xcec082d5 //sha512su0 v21.16b,v22.16b + ext v7.16b,v17.16b,v18.16b,#8 +.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b +.inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b + add v4.2d,v1.2d,v3.2d // "D + T1" +.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b + add v24.2d,v24.2d,v22.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v4.16b,v2.16b,#8 + ext v6.16b,v0.16b,v4.16b,#8 + add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xcec082f6 //sha512su0 v22.16b,v23.16b + ext v7.16b,v18.16b,v19.16b,#8 +.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b +.inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b + add v1.2d,v0.2d,v2.2d // "D + T1" +.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b + add v25.2d,v25.2d,v23.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v1.16b,v4.16b,#8 + ext v6.16b,v3.16b,v1.16b,#8 + add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xcec08217 //sha512su0 v23.16b,v16.16b + ext v7.16b,v19.16b,v20.16b,#8 +.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b +.inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b + add v0.2d,v3.2d,v4.2d // "D + T1" +.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b + add v24.2d,v24.2d,v16.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v0.16b,v1.16b,#8 + ext v6.16b,v2.16b,v0.16b,#8 + add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xcec08230 //sha512su0 v16.16b,v17.16b + ext v7.16b,v20.16b,v21.16b,#8 +.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b +.inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b + add v3.2d,v2.2d,v1.2d // "D + T1" +.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b + add v25.2d,v25.2d,v17.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v3.16b,v0.16b,#8 + ext v6.16b,v4.16b,v3.16b,#8 + add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xcec08251 //sha512su0 v17.16b,v18.16b + ext v7.16b,v21.16b,v22.16b,#8 +.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b +.inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b + add v2.2d,v4.2d,v0.2d // "D + T1" +.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b + add v24.2d,v24.2d,v18.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v2.16b,v3.16b,#8 + ext v6.16b,v1.16b,v2.16b,#8 + add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xcec08272 //sha512su0 v18.16b,v19.16b + ext v7.16b,v22.16b,v23.16b,#8 +.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b +.inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b + add v4.2d,v1.2d,v3.2d // "D + T1" +.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b + add v25.2d,v25.2d,v19.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v4.16b,v2.16b,#8 + ext v6.16b,v0.16b,v4.16b,#8 + add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xcec08293 //sha512su0 v19.16b,v20.16b + ext v7.16b,v23.16b,v16.16b,#8 +.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b +.inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b + add v1.2d,v0.2d,v2.2d // "D + T1" +.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b + add v24.2d,v24.2d,v20.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v1.16b,v4.16b,#8 + ext v6.16b,v3.16b,v1.16b,#8 + add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xcec082b4 //sha512su0 v20.16b,v21.16b + ext v7.16b,v16.16b,v17.16b,#8 +.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b +.inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b + add v0.2d,v3.2d,v4.2d // "D + T1" +.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b + add v25.2d,v25.2d,v21.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v0.16b,v1.16b,#8 + ext v6.16b,v2.16b,v0.16b,#8 + add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xcec082d5 //sha512su0 v21.16b,v22.16b + ext v7.16b,v17.16b,v18.16b,#8 +.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b +.inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b + add v3.2d,v2.2d,v1.2d // "D + T1" +.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b + add v24.2d,v24.2d,v22.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v3.16b,v0.16b,#8 + ext v6.16b,v4.16b,v3.16b,#8 + add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xcec082f6 //sha512su0 v22.16b,v23.16b + ext v7.16b,v18.16b,v19.16b,#8 +.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b +.inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b + add v2.2d,v4.2d,v0.2d // "D + T1" +.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b + add v25.2d,v25.2d,v23.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v2.16b,v3.16b,#8 + ext v6.16b,v1.16b,v2.16b,#8 + add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xcec08217 //sha512su0 v23.16b,v16.16b + ext v7.16b,v19.16b,v20.16b,#8 +.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b +.inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b + add v4.2d,v1.2d,v3.2d // "D + T1" +.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b + add v24.2d,v24.2d,v16.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v4.16b,v2.16b,#8 + ext v6.16b,v0.16b,v4.16b,#8 + add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xcec08230 //sha512su0 v16.16b,v17.16b + ext v7.16b,v20.16b,v21.16b,#8 +.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b +.inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b + add v1.2d,v0.2d,v2.2d // "D + T1" +.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b + add v25.2d,v25.2d,v17.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v1.16b,v4.16b,#8 + ext v6.16b,v3.16b,v1.16b,#8 + add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xcec08251 //sha512su0 v17.16b,v18.16b + ext v7.16b,v21.16b,v22.16b,#8 +.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b +.inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b + add v0.2d,v3.2d,v4.2d // "D + T1" +.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b + add v24.2d,v24.2d,v18.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v0.16b,v1.16b,#8 + ext v6.16b,v2.16b,v0.16b,#8 + add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xcec08272 //sha512su0 v18.16b,v19.16b + ext v7.16b,v22.16b,v23.16b,#8 +.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b +.inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b + add v3.2d,v2.2d,v1.2d // "D + T1" +.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b + add v25.2d,v25.2d,v19.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v3.16b,v0.16b,#8 + ext v6.16b,v4.16b,v3.16b,#8 + add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xcec08293 //sha512su0 v19.16b,v20.16b + ext v7.16b,v23.16b,v16.16b,#8 +.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b +.inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b + add v2.2d,v4.2d,v0.2d // "D + T1" +.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b + add v24.2d,v24.2d,v20.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v2.16b,v3.16b,#8 + ext v6.16b,v1.16b,v2.16b,#8 + add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xcec082b4 //sha512su0 v20.16b,v21.16b + ext v7.16b,v16.16b,v17.16b,#8 +.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b +.inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b + add v4.2d,v1.2d,v3.2d // "D + T1" +.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b + add v25.2d,v25.2d,v21.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v4.16b,v2.16b,#8 + ext v6.16b,v0.16b,v4.16b,#8 + add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xcec082d5 //sha512su0 v21.16b,v22.16b + ext v7.16b,v17.16b,v18.16b,#8 +.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b +.inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b + add v1.2d,v0.2d,v2.2d // "D + T1" +.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b + add v24.2d,v24.2d,v22.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v1.16b,v4.16b,#8 + ext v6.16b,v3.16b,v1.16b,#8 + add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xcec082f6 //sha512su0 v22.16b,v23.16b + ext v7.16b,v18.16b,v19.16b,#8 +.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b +.inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b + add v0.2d,v3.2d,v4.2d // "D + T1" +.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b + add v25.2d,v25.2d,v23.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v0.16b,v1.16b,#8 + ext v6.16b,v2.16b,v0.16b,#8 + add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xcec08217 //sha512su0 v23.16b,v16.16b + ext v7.16b,v19.16b,v20.16b,#8 +.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b +.inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b + add v3.2d,v2.2d,v1.2d // "D + T1" +.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b + add v24.2d,v24.2d,v16.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v3.16b,v0.16b,#8 + ext v6.16b,v4.16b,v3.16b,#8 + add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xcec08230 //sha512su0 v16.16b,v17.16b + ext v7.16b,v20.16b,v21.16b,#8 +.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b +.inst 0xce678af0 //sha512su1 v16.16b,v23.16b,v7.16b + add v2.2d,v4.2d,v0.2d // "D + T1" +.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b + add v25.2d,v25.2d,v17.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v2.16b,v3.16b,#8 + ext v6.16b,v1.16b,v2.16b,#8 + add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xcec08251 //sha512su0 v17.16b,v18.16b + ext v7.16b,v21.16b,v22.16b,#8 +.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b +.inst 0xce678a11 //sha512su1 v17.16b,v16.16b,v7.16b + add v4.2d,v1.2d,v3.2d // "D + T1" +.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b + add v24.2d,v24.2d,v18.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v4.16b,v2.16b,#8 + ext v6.16b,v0.16b,v4.16b,#8 + add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xcec08272 //sha512su0 v18.16b,v19.16b + ext v7.16b,v22.16b,v23.16b,#8 +.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b +.inst 0xce678a32 //sha512su1 v18.16b,v17.16b,v7.16b + add v1.2d,v0.2d,v2.2d // "D + T1" +.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b + add v25.2d,v25.2d,v19.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v1.16b,v4.16b,#8 + ext v6.16b,v3.16b,v1.16b,#8 + add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xcec08293 //sha512su0 v19.16b,v20.16b + ext v7.16b,v23.16b,v16.16b,#8 +.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b +.inst 0xce678a53 //sha512su1 v19.16b,v18.16b,v7.16b + add v0.2d,v3.2d,v4.2d // "D + T1" +.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b + add v24.2d,v24.2d,v20.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v0.16b,v1.16b,#8 + ext v6.16b,v2.16b,v0.16b,#8 + add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xcec082b4 //sha512su0 v20.16b,v21.16b + ext v7.16b,v16.16b,v17.16b,#8 +.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b +.inst 0xce678a74 //sha512su1 v20.16b,v19.16b,v7.16b + add v3.2d,v2.2d,v1.2d // "D + T1" +.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b + add v25.2d,v25.2d,v21.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v3.16b,v0.16b,#8 + ext v6.16b,v4.16b,v3.16b,#8 + add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xcec082d5 //sha512su0 v21.16b,v22.16b + ext v7.16b,v17.16b,v18.16b,#8 +.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b +.inst 0xce678a95 //sha512su1 v21.16b,v20.16b,v7.16b + add v2.2d,v4.2d,v0.2d // "D + T1" +.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b + add v24.2d,v24.2d,v22.2d + ld1 {v25.2d},[x3],#16 + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v2.16b,v3.16b,#8 + ext v6.16b,v1.16b,v2.16b,#8 + add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xcec082f6 //sha512su0 v22.16b,v23.16b + ext v7.16b,v18.16b,v19.16b,#8 +.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b +.inst 0xce678ab6 //sha512su1 v22.16b,v21.16b,v7.16b + add v4.2d,v1.2d,v3.2d // "D + T1" +.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b + add v25.2d,v25.2d,v23.2d + ld1 {v24.2d},[x3],#16 + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v4.16b,v2.16b,#8 + ext v6.16b,v0.16b,v4.16b,#8 + add v2.2d,v2.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xcec08217 //sha512su0 v23.16b,v16.16b + ext v7.16b,v19.16b,v20.16b,#8 +.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b +.inst 0xce678ad7 //sha512su1 v23.16b,v22.16b,v7.16b + add v1.2d,v0.2d,v2.2d // "D + T1" +.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b + ld1 {v25.2d},[x3],#16 + add v24.2d,v24.2d,v16.2d + ld1 {v16.16b},[x1],#16 // load next input + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v1.16b,v4.16b,#8 + ext v6.16b,v3.16b,v1.16b,#8 + add v4.2d,v4.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b + rev64 v16.16b,v16.16b + add v0.2d,v3.2d,v4.2d // "D + T1" +.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b + ld1 {v24.2d},[x3],#16 + add v25.2d,v25.2d,v17.2d + ld1 {v17.16b},[x1],#16 // load next input + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v0.16b,v1.16b,#8 + ext v6.16b,v2.16b,v0.16b,#8 + add v1.2d,v1.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b + rev64 v17.16b,v17.16b + add v3.2d,v2.2d,v1.2d // "D + T1" +.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b + ld1 {v25.2d},[x3],#16 + add v24.2d,v24.2d,v18.2d + ld1 {v18.16b},[x1],#16 // load next input + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v3.16b,v0.16b,#8 + ext v6.16b,v4.16b,v3.16b,#8 + add v0.2d,v0.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b + rev64 v18.16b,v18.16b + add v2.2d,v4.2d,v0.2d // "D + T1" +.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b + ld1 {v24.2d},[x3],#16 + add v25.2d,v25.2d,v19.2d + ld1 {v19.16b},[x1],#16 // load next input + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v2.16b,v3.16b,#8 + ext v6.16b,v1.16b,v2.16b,#8 + add v3.2d,v3.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xce6680a3 //sha512h v3.16b,v5.16b,v6.16b + rev64 v19.16b,v19.16b + add v4.2d,v1.2d,v3.2d // "D + T1" +.inst 0xce608423 //sha512h2 v3.16b,v1.16b,v0.16b + ld1 {v25.2d},[x3],#16 + add v24.2d,v24.2d,v20.2d + ld1 {v20.16b},[x1],#16 // load next input + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v4.16b,v2.16b,#8 + ext v6.16b,v0.16b,v4.16b,#8 + add v2.2d,v2.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xce6680a2 //sha512h v2.16b,v5.16b,v6.16b + rev64 v20.16b,v20.16b + add v1.2d,v0.2d,v2.2d // "D + T1" +.inst 0xce638402 //sha512h2 v2.16b,v0.16b,v3.16b + ld1 {v24.2d},[x3],#16 + add v25.2d,v25.2d,v21.2d + ld1 {v21.16b},[x1],#16 // load next input + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v1.16b,v4.16b,#8 + ext v6.16b,v3.16b,v1.16b,#8 + add v4.2d,v4.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xce6680a4 //sha512h v4.16b,v5.16b,v6.16b + rev64 v21.16b,v21.16b + add v0.2d,v3.2d,v4.2d // "D + T1" +.inst 0xce628464 //sha512h2 v4.16b,v3.16b,v2.16b + ld1 {v25.2d},[x3],#16 + add v24.2d,v24.2d,v22.2d + ld1 {v22.16b},[x1],#16 // load next input + ext v24.16b,v24.16b,v24.16b,#8 + ext v5.16b,v0.16b,v1.16b,#8 + ext v6.16b,v2.16b,v0.16b,#8 + add v1.2d,v1.2d,v24.2d // "T1 + H + K512[i]" +.inst 0xce6680a1 //sha512h v1.16b,v5.16b,v6.16b + rev64 v22.16b,v22.16b + add v3.2d,v2.2d,v1.2d // "D + T1" +.inst 0xce648441 //sha512h2 v1.16b,v2.16b,v4.16b + sub x3,x3,#80*8 // rewind + add v25.2d,v25.2d,v23.2d + ld1 {v23.16b},[x1],#16 // load next input + ext v25.16b,v25.16b,v25.16b,#8 + ext v5.16b,v3.16b,v0.16b,#8 + ext v6.16b,v4.16b,v3.16b,#8 + add v0.2d,v0.2d,v25.2d // "T1 + H + K512[i]" +.inst 0xce6680a0 //sha512h v0.16b,v5.16b,v6.16b + rev64 v23.16b,v23.16b + add v2.2d,v4.2d,v0.2d // "D + T1" +.inst 0xce618480 //sha512h2 v0.16b,v4.16b,v1.16b + add v0.2d,v0.2d,v26.2d // accumulate + add v1.2d,v1.2d,v27.2d + add v2.2d,v2.2d,v28.2d + add v3.2d,v3.2d,v29.2d + + cbnz x2,.Loop_hw + + st1 {v0.2d,v1.2d,v2.2d,v3.2d},[x0] // store context + + ldr x29,[sp],#16 + ret +.size sha512_block_armv8,.-sha512_block_armv8 #endif #endif #endif // !OPENSSL_NO_ASM diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-x86_64.linux.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-x86_64.linux.x86_64.S index e69fc33..eaf34d6 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-x86_64.linux.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-x86_64.linux.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-x86_64.mac.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-x86_64.mac.x86_64.S index f1c4046..26cca5a 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-x86_64.mac.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/sha512-x86_64.mac.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__APPLE__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-armv8.ios.aarch64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-armv8.ios.aarch64.S index 4158cfb..bf79ab5 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-armv8.ios.aarch64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-armv8.ios.aarch64.S @@ -14,6 +14,8 @@ #if defined(BORINGSSL_PREFIX) #include #endif +#include + .section __TEXT,__const @@ -216,6 +218,7 @@ Lenc_entry: .align 4 _vpaes_encrypt: + AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 @@ -225,6 +228,7 @@ _vpaes_encrypt: st1 {v0.16b}, [x1] ldp x29,x30,[sp],#16 + AARCH64_VALIDATE_LINK_REGISTER ret @@ -453,6 +457,7 @@ Ldec_entry: .align 4 _vpaes_decrypt: + AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 @@ -462,6 +467,7 @@ _vpaes_decrypt: st1 {v0.16b}, [x1] ldp x29,x30,[sp],#16 + AARCH64_VALIDATE_LINK_REGISTER ret @@ -631,6 +637,7 @@ _vpaes_key_preheat: .align 4 _vpaes_schedule_core: + AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp,#-16]! add x29,sp,#0 @@ -800,6 +807,7 @@ Lschedule_mangle_last_dec: eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7 ldp x29, x30, [sp],#16 + AARCH64_VALIDATE_LINK_REGISTER ret @@ -1002,7 +1010,7 @@ Lschedule_mangle_dec: Lschedule_mangle_both: tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 - add x8, x8, #64-16 // add $-16, %r8 + add x8, x8, #48 // add $-16, %r8 and x8, x8, #~(1<<6) // and $0x30, %r8 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) ret @@ -1013,6 +1021,7 @@ Lschedule_mangle_both: .align 4 _vpaes_set_encrypt_key: + AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so @@ -1028,6 +1037,7 @@ _vpaes_set_encrypt_key: ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 + AARCH64_VALIDATE_LINK_REGISTER ret @@ -1036,6 +1046,7 @@ _vpaes_set_encrypt_key: .align 4 _vpaes_set_decrypt_key: + AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so @@ -1055,6 +1066,7 @@ _vpaes_set_decrypt_key: ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 + AARCH64_VALIDATE_LINK_REGISTER ret .globl _vpaes_cbc_encrypt @@ -1062,6 +1074,7 @@ _vpaes_set_decrypt_key: .align 4 _vpaes_cbc_encrypt: + AARCH64_SIGN_LINK_REGISTER cbz x2, Lcbc_abort cmp w5, #0 // check direction b.eq vpaes_cbc_decrypt @@ -1089,12 +1102,15 @@ Lcbc_enc_loop: ldp x29,x30,[sp],#16 Lcbc_abort: + AARCH64_VALIDATE_LINK_REGISTER ret .align 4 vpaes_cbc_decrypt: + // Not adding AARCH64_SIGN_LINK_REGISTER here because vpaes_cbc_decrypt is jumped to + // only from vpaes_cbc_encrypt which has already signed the return address. stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so @@ -1136,6 +1152,7 @@ Lcbc_dec_done: ldp d10,d11,[sp],#16 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 + AARCH64_VALIDATE_LINK_REGISTER ret .globl _vpaes_ctr32_encrypt_blocks @@ -1143,6 +1160,7 @@ Lcbc_dec_done: .align 4 _vpaes_ctr32_encrypt_blocks: + AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so @@ -1210,6 +1228,7 @@ Lctr32_done: ldp d10,d11,[sp],#16 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 + AARCH64_VALIDATE_LINK_REGISTER ret #endif // !OPENSSL_NO_ASM diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-armv8.linux.aarch64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-armv8.linux.aarch64.S index f92d540..d944667 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-armv8.linux.aarch64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-armv8.linux.aarch64.S @@ -15,6 +15,8 @@ #if defined(BORINGSSL_PREFIX) #include #endif +#include + .section .rodata .type _vpaes_consts,%object @@ -217,6 +219,7 @@ _vpaes_encrypt_core: .type vpaes_encrypt,%function .align 4 vpaes_encrypt: + AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 @@ -226,6 +229,7 @@ vpaes_encrypt: st1 {v0.16b}, [x1] ldp x29,x30,[sp],#16 + AARCH64_VALIDATE_LINK_REGISTER ret .size vpaes_encrypt,.-vpaes_encrypt @@ -454,6 +458,7 @@ _vpaes_decrypt_core: .type vpaes_decrypt,%function .align 4 vpaes_decrypt: + AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 @@ -463,6 +468,7 @@ vpaes_decrypt: st1 {v0.16b}, [x1] ldp x29,x30,[sp],#16 + AARCH64_VALIDATE_LINK_REGISTER ret .size vpaes_decrypt,.-vpaes_decrypt @@ -632,6 +638,7 @@ _vpaes_key_preheat: .type _vpaes_schedule_core,%function .align 4 _vpaes_schedule_core: + AARCH64_SIGN_LINK_REGISTER stp x29, x30, [sp,#-16]! add x29,sp,#0 @@ -801,6 +808,7 @@ _vpaes_schedule_core: eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7 ldp x29, x30, [sp],#16 + AARCH64_VALIDATE_LINK_REGISTER ret .size _vpaes_schedule_core,.-_vpaes_schedule_core @@ -1003,7 +1011,7 @@ _vpaes_schedule_mangle: .Lschedule_mangle_both: tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 - add x8, x8, #64-16 // add $-16, %r8 + add x8, x8, #48 // add $-16, %r8 and x8, x8, #~(1<<6) // and $0x30, %r8 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) ret @@ -1014,6 +1022,7 @@ _vpaes_schedule_mangle: .type vpaes_set_encrypt_key,%function .align 4 vpaes_set_encrypt_key: + AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so @@ -1029,6 +1038,7 @@ vpaes_set_encrypt_key: ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 + AARCH64_VALIDATE_LINK_REGISTER ret .size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key @@ -1037,6 +1047,7 @@ vpaes_set_encrypt_key: .type vpaes_set_decrypt_key,%function .align 4 vpaes_set_decrypt_key: + AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so @@ -1056,6 +1067,7 @@ vpaes_set_decrypt_key: ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 + AARCH64_VALIDATE_LINK_REGISTER ret .size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key .globl vpaes_cbc_encrypt @@ -1063,6 +1075,7 @@ vpaes_set_decrypt_key: .type vpaes_cbc_encrypt,%function .align 4 vpaes_cbc_encrypt: + AARCH64_SIGN_LINK_REGISTER cbz x2, .Lcbc_abort cmp w5, #0 // check direction b.eq vpaes_cbc_decrypt @@ -1090,12 +1103,15 @@ vpaes_cbc_encrypt: ldp x29,x30,[sp],#16 .Lcbc_abort: + AARCH64_VALIDATE_LINK_REGISTER ret .size vpaes_cbc_encrypt,.-vpaes_cbc_encrypt .type vpaes_cbc_decrypt,%function .align 4 vpaes_cbc_decrypt: + // Not adding AARCH64_SIGN_LINK_REGISTER here because vpaes_cbc_decrypt is jumped to + // only from vpaes_cbc_encrypt which has already signed the return address. stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so @@ -1137,6 +1153,7 @@ vpaes_cbc_decrypt: ldp d10,d11,[sp],#16 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 + AARCH64_VALIDATE_LINK_REGISTER ret .size vpaes_cbc_decrypt,.-vpaes_cbc_decrypt .globl vpaes_ctr32_encrypt_blocks @@ -1144,6 +1161,7 @@ vpaes_cbc_decrypt: .type vpaes_ctr32_encrypt_blocks,%function .align 4 vpaes_ctr32_encrypt_blocks: + AARCH64_SIGN_LINK_REGISTER stp x29,x30,[sp,#-16]! add x29,sp,#0 stp d8,d9,[sp,#-16]! // ABI spec says so @@ -1211,6 +1229,7 @@ vpaes_ctr32_encrypt_blocks: ldp d10,d11,[sp],#16 ldp d8,d9,[sp],#16 ldp x29,x30,[sp],#16 + AARCH64_VALIDATE_LINK_REGISTER ret .size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks #endif diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-x86.linux.x86.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-x86.linux.x86.S index 91ae1d9..19c5838 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-x86.linux.x86.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-x86.linux.x86.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__i386__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__i386__) #if defined(BORINGSSL_PREFIX) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-x86_64.linux.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-x86_64.linux.x86_64.S index fa4d319..09baf3e 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-x86_64.linux.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-x86_64.linux.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-x86_64.mac.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-x86_64.mac.x86_64.S index 15217b2..d9bff33 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-x86_64.mac.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/vpaes-x86_64.mac.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__APPLE__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86-mont.linux.x86.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86-mont.linux.x86.S index 56c84c8..2d71a5f 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86-mont.linux.x86.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86-mont.linux.x86.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__i386__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__i386__) #if defined(BORINGSSL_PREFIX) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86_64-mont.linux.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86_64-mont.linux.x86_64.S index a4fff73..078ac41 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86_64-mont.linux.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86_64-mont.linux.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86_64-mont.mac.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86_64-mont.mac.x86_64.S index 658aaaf..e14bf03 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86_64-mont.mac.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86_64-mont.mac.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__APPLE__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86_64-mont5.linux.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86_64-mont5.linux.x86_64.S index a65faa1..2dd9020 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86_64-mont5.linux.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86_64-mont5.linux.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__linux__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86_64-mont5.mac.x86_64.S b/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86_64-mont5.mac.x86_64.S index 3c034a5..0a1ff36 100644 --- a/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86_64-mont5.mac.x86_64.S +++ b/Sources/CBigNumBoringSSL/crypto/fipsmodule/x86_64-mont5.mac.x86_64.S @@ -1,7 +1,7 @@ #define BORINGSSL_PREFIX CBigNumBoringSSL #if defined(__x86_64__) && defined(__APPLE__) -# This file is generated from a similarly-named Perl script in the BoringSSL -# source tree. Do not edit by hand. +// This file is generated from a similarly-named Perl script in the BoringSSL +// source tree. Do not edit by hand. #if defined(__has_feature) #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM) diff --git a/Sources/CBigNumBoringSSL/crypto/internal.h b/Sources/CBigNumBoringSSL/crypto/internal.h index 7d6f18b..e8bfb25 100644 --- a/Sources/CBigNumBoringSSL/crypto/internal.h +++ b/Sources/CBigNumBoringSSL/crypto/internal.h @@ -109,6 +109,7 @@ #ifndef OPENSSL_HEADER_CRYPTO_INTERNAL_H #define OPENSSL_HEADER_CRYPTO_INTERNAL_H +#include #include #include #include @@ -208,6 +209,9 @@ typedef __uint128_t uint128_t; #define OPENSSL_SSE2 #endif + +// Pointer utility functions. + // buffers_alias returns one if |a| and |b| alias and zero otherwise. static inline int buffers_alias(const uint8_t *a, size_t a_len, const uint8_t *b, size_t b_len) { @@ -220,6 +224,23 @@ static inline int buffers_alias(const uint8_t *a, size_t a_len, return a_u + a_len > b_u && b_u + b_len > a_u; } +// align_pointer returns |ptr|, advanced to |alignment|. |alignment| must be a +// power of two, and |ptr| must have at least |alignment - 1| bytes of scratch +// space. +static inline void *align_pointer(void *ptr, size_t alignment) { + // |alignment| must be a power of two. + assert(alignment != 0 && (alignment & (alignment - 1)) == 0); + // Instead of aligning |ptr| as a |uintptr_t| and casting back, compute the + // offset and advance in pointer space. C guarantees that casting from pointer + // to |uintptr_t| and back gives the same pointer, but general + // integer-to-pointer conversions are implementation-defined. GCC does define + // it in the useful way, but this makes fewer assumptions. + uintptr_t offset = (0u - (uintptr_t)ptr) & (alignment - 1); + ptr = (char *)ptr + offset; + assert(((uintptr_t)ptr & (alignment - 1)) == 0); + return ptr; +} + // Constant-time utility functions. // @@ -470,6 +491,13 @@ OPENSSL_EXPORT void CRYPTO_once(CRYPTO_once_t *once, void (*init)(void)); // Reference counting. +// Automatically enable C11 atomics if implemented. +#if !defined(OPENSSL_C11_ATOMIC) && defined(OPENSSL_THREADS) && \ + !defined(__STDC_NO_ATOMICS__) && defined(__STDC_VERSION__) && \ + __STDC_VERSION__ >= 201112L +#define OPENSSL_C11_ATOMIC +#endif + // CRYPTO_REFCOUNT_MAX is the value at which the reference count saturates. #define CRYPTO_REFCOUNT_MAX 0xffffffff @@ -607,6 +635,7 @@ BSSL_NAMESPACE_END typedef enum { OPENSSL_THREAD_LOCAL_ERR = 0, OPENSSL_THREAD_LOCAL_RAND, + OPENSSL_THREAD_LOCAL_FIPS_COUNTERS, OPENSSL_THREAD_LOCAL_TEST, NUM_OPENSSL_THREAD_LOCALS, } thread_local_data_t; @@ -811,6 +840,97 @@ static inline void *OPENSSL_memset(void *dst, int c, size_t n) { return memset(dst, c, n); } + +// Loads and stores. +// +// The following functions load and store sized integers with the specified +// endianness. They use |memcpy|, and so avoid alignment or strict aliasing +// requirements on the input and output pointers. + +static inline uint32_t CRYPTO_load_u32_le(const void *in) { + uint32_t v; + OPENSSL_memcpy(&v, in, sizeof(v)); + return v; +} + +static inline void CRYPTO_store_u32_le(void *out, uint32_t v) { + OPENSSL_memcpy(out, &v, sizeof(v)); +} + +static inline uint32_t CRYPTO_load_u32_be(const void *in) { + uint32_t v; + OPENSSL_memcpy(&v, in, sizeof(v)); + return CRYPTO_bswap4(v); +} + +static inline void CRYPTO_store_u32_be(void *out, uint32_t v) { + v = CRYPTO_bswap4(v); + OPENSSL_memcpy(out, &v, sizeof(v)); +} + +static inline uint64_t CRYPTO_load_u64_be(const void *ptr) { + uint64_t ret; + OPENSSL_memcpy(&ret, ptr, sizeof(ret)); + return CRYPTO_bswap8(ret); +} + +static inline void CRYPTO_store_u64_be(void *out, uint64_t v) { + v = CRYPTO_bswap8(v); + OPENSSL_memcpy(out, &v, sizeof(v)); +} + +static inline crypto_word_t CRYPTO_load_word_le(const void *in) { + crypto_word_t v; + OPENSSL_memcpy(&v, in, sizeof(v)); + return v; +} + +static inline void CRYPTO_store_word_le(void *out, crypto_word_t v) { + OPENSSL_memcpy(out, &v, sizeof(v)); +} + + +// Bit rotation functions. +// +// Note these functions use |(-shift) & 31|, etc., because shifting by the bit +// width is undefined. Both Clang and GCC recognize this pattern as a rotation, +// but MSVC does not. Instead, we call MSVC's built-in functions. + +static inline uint32_t CRYPTO_rotl_u32(uint32_t value, int shift) { +#if defined(_MSC_VER) + return _rotl(value, shift); +#else + return (value << shift) | (value >> ((-shift) & 31)); +#endif +} + +static inline uint32_t CRYPTO_rotr_u32(uint32_t value, int shift) { +#if defined(_MSC_VER) + return _rotr(value, shift); +#else + return (value >> shift) | (value << ((-shift) & 31)); +#endif +} + +static inline uint64_t CRYPTO_rotl_u64(uint64_t value, int shift) { +#if defined(_MSC_VER) + return _rotl64(value, shift); +#else + return (value << shift) | (value >> ((-shift) & 63)); +#endif +} + +static inline uint64_t CRYPTO_rotr_u64(uint64_t value, int shift) { +#if defined(_MSC_VER) + return _rotr64(value, shift); +#else + return (value >> shift) | (value << ((-shift) & 63)); +#endif +} + + +// FIPS functions. + #if defined(BORINGSSL_FIPS) // BORINGSSL_FIPS_abort is called when a FIPS power-on or continuous test // fails. It prevents any further cryptographic operations by the current @@ -826,6 +946,11 @@ void BORINGSSL_FIPS_abort(void) __attribute__((noreturn)); int boringssl_fips_self_test(const uint8_t *module_hash, size_t module_hash_len); +#if defined(BORINGSSL_FIPS_COUNTERS) +void boringssl_fips_inc_counter(enum fips_counter_t counter); +#else +OPENSSL_INLINE void boringssl_fips_inc_counter(enum fips_counter_t counter) {} +#endif #if defined(__cplusplus) } // extern C diff --git a/Sources/CBigNumBoringSSL/crypto/mem.c b/Sources/CBigNumBoringSSL/crypto/mem.c index b0b3d4b..0392690 100644 --- a/Sources/CBigNumBoringSSL/crypto/mem.c +++ b/Sources/CBigNumBoringSSL/crypto/mem.c @@ -72,6 +72,8 @@ OPENSSL_MSVC_PRAGMA(warning(pop)) #define OPENSSL_MALLOC_PREFIX 8 +OPENSSL_STATIC_ASSERT(OPENSSL_MALLOC_PREFIX >= sizeof(size_t), + "size_t too large"); #if defined(OPENSSL_ASAN) void __asan_poison_memory_region(const volatile void *addr, size_t size); @@ -101,14 +103,54 @@ static void __asan_unpoison_memory_region(const void *addr, size_t size) {} // linked. This isn't an ideal result, but its helps in some cases. WEAK_SYMBOL_FUNC(void, sdallocx, (void *ptr, size_t size, int flags)); -// The following two functions are for memory tracking. They are no-ops by -// default but can be overridden at link time if the application needs to -// observe heap operations. -WEAK_SYMBOL_FUNC(void, OPENSSL_track_memory_alloc, (void *ptr, size_t size)); -WEAK_SYMBOL_FUNC(void, OPENSSL_track_memory_free, (void *ptr, size_t size)); +// The following three functions can be defined to override default heap +// allocation and freeing. If defined, it is the responsibility of +// |OPENSSL_memory_free| to zero out the memory before returning it to the +// system. |OPENSSL_memory_free| will not be passed NULL pointers. +// +// WARNING: These functions are called on every allocation and free in +// BoringSSL across the entire process. They may be called by any code in the +// process which calls BoringSSL, including in process initializers and thread +// destructors. When called, BoringSSL may hold pthreads locks. Any other code +// in the process which, directly or indirectly, calls BoringSSL may be on the +// call stack and may itself be using arbitrary synchronization primitives. +// +// As a result, these functions may not have the usual programming environment +// available to most C or C++ code. In particular, they may not call into +// BoringSSL, or any library which depends on BoringSSL. Any synchronization +// primitives used must tolerate every other synchronization primitive linked +// into the process, including pthreads locks. Failing to meet these constraints +// may result in deadlocks, crashes, or memory corruption. +WEAK_SYMBOL_FUNC(void*, OPENSSL_memory_alloc, (size_t size)); +WEAK_SYMBOL_FUNC(void, OPENSSL_memory_free, (void *ptr)); +WEAK_SYMBOL_FUNC(size_t, OPENSSL_memory_get_size, (void *ptr)); + +// kBoringSSLBinaryTag is a distinctive byte sequence to identify binaries that +// are linking in BoringSSL and, roughly, what version they are using. +static const uint8_t kBoringSSLBinaryTag[18] = { + // 16 bytes of magic tag. + 0x8c, 0x62, 0x20, 0x0b, 0xd2, 0xa0, 0x72, 0x58, + 0x44, 0xa8, 0x96, 0x69, 0xad, 0x55, 0x7e, 0xec, + // Current source iteration. Incremented ~monthly. + 2, 0, +}; void *OPENSSL_malloc(size_t size) { + if (OPENSSL_memory_alloc != NULL) { + assert(OPENSSL_memory_free != NULL); + assert(OPENSSL_memory_get_size != NULL); + return OPENSSL_memory_alloc(size); + } + if (size + OPENSSL_MALLOC_PREFIX < size) { + // |OPENSSL_malloc| is a central function in BoringSSL thus a reference to + // |kBoringSSLBinaryTag| is created here so that the tag isn't discarded by + // the linker. The following is sufficient to stop GCC, Clang, and MSVC + // optimising away the reference at the time of writing. Since this + // probably results in an actual memory reference, it is put in this very + // rare code path. + uint8_t unused = *(volatile uint8_t *)kBoringSSLBinaryTag; + (void) unused; return NULL; } @@ -120,9 +162,6 @@ void *OPENSSL_malloc(size_t size) { *(size_t *)ptr = size; __asan_poison_memory_region(ptr, OPENSSL_MALLOC_PREFIX); - if (OPENSSL_track_memory_alloc) { - OPENSSL_track_memory_alloc(ptr, size + OPENSSL_MALLOC_PREFIX); - } return ((uint8_t *)ptr) + OPENSSL_MALLOC_PREFIX; } @@ -131,13 +170,15 @@ void OPENSSL_free(void *orig_ptr) { return; } + if (OPENSSL_memory_free != NULL) { + OPENSSL_memory_free(orig_ptr); + return; + } + void *ptr = ((uint8_t *)orig_ptr) - OPENSSL_MALLOC_PREFIX; __asan_unpoison_memory_region(ptr, OPENSSL_MALLOC_PREFIX); size_t size = *(size_t *)ptr; - if (OPENSSL_track_memory_free) { - OPENSSL_track_memory_free(ptr, size + OPENSSL_MALLOC_PREFIX); - } OPENSSL_cleanse(ptr, size + OPENSSL_MALLOC_PREFIX); if (sdallocx) { sdallocx(ptr, size + OPENSSL_MALLOC_PREFIX, 0 /* flags */); @@ -151,10 +192,15 @@ void *OPENSSL_realloc(void *orig_ptr, size_t new_size) { return OPENSSL_malloc(new_size); } - void *ptr = ((uint8_t *)orig_ptr) - OPENSSL_MALLOC_PREFIX; - __asan_unpoison_memory_region(ptr, OPENSSL_MALLOC_PREFIX); - size_t old_size = *(size_t *)ptr; - __asan_poison_memory_region(ptr, OPENSSL_MALLOC_PREFIX); + size_t old_size; + if (OPENSSL_memory_get_size != NULL) { + old_size = OPENSSL_memory_get_size(orig_ptr); + } else { + void *ptr = ((uint8_t *)orig_ptr) - OPENSSL_MALLOC_PREFIX; + __asan_unpoison_memory_region(ptr, OPENSSL_MALLOC_PREFIX); + old_size = *(size_t *)ptr; + __asan_poison_memory_region(ptr, OPENSSL_MALLOC_PREFIX); + } void *ret = OPENSSL_malloc(new_size); if (ret == NULL) { @@ -219,6 +265,8 @@ uint32_t OPENSSL_hash32(const void *ptr, size_t len) { return h; } +uint32_t OPENSSL_strhash(const char *s) { return OPENSSL_hash32(s, strlen(s)); } + size_t OPENSSL_strnlen(const char *s, size_t len) { for (size_t i = 0; i < len; i++) { if (s[i] == 0) { @@ -294,22 +342,15 @@ int BIO_vsnprintf(char *buf, size_t n, const char *format, va_list args) { } char *OPENSSL_strndup(const char *str, size_t size) { - char *ret; - size_t alloc_size; - - if (str == NULL) { - return NULL; - } - size = OPENSSL_strnlen(str, size); - alloc_size = size + 1; + size_t alloc_size = size + 1; if (alloc_size < size) { // overflow OPENSSL_PUT_ERROR(CRYPTO, ERR_R_MALLOC_FAILURE); return NULL; } - ret = OPENSSL_malloc(alloc_size); + char *ret = OPENSSL_malloc(alloc_size); if (ret == NULL) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_MALLOC_FAILURE); return NULL; @@ -357,3 +398,13 @@ void *OPENSSL_memdup(const void *data, size_t size) { OPENSSL_memcpy(ret, data, size); return ret; } + +void *CRYPTO_malloc(size_t size, const char *file, int line) { + return OPENSSL_malloc(size); +} + +void *CRYPTO_realloc(void *ptr, size_t new_size, const char *file, int line) { + return OPENSSL_realloc(ptr, new_size); +} + +void CRYPTO_free(void *ptr, const char *file, int line) { OPENSSL_free(ptr); } diff --git a/Sources/CBigNumBoringSSL/crypto/rand_extra/deterministic.c b/Sources/CBigNumBoringSSL/crypto/rand_extra/deterministic.c index 08b68b8..f45ddd5 100644 --- a/Sources/CBigNumBoringSSL/crypto/rand_extra/deterministic.c +++ b/Sources/CBigNumBoringSSL/crypto/rand_extra/deterministic.c @@ -49,4 +49,8 @@ void CRYPTO_sysrand(uint8_t *out, size_t requested) { CRYPTO_chacha_20(out, out, requested, kZeroKey, nonce, 0); } +void CRYPTO_sysrand_for_seed(uint8_t *out, size_t requested) { + CRYPTO_sysrand(out, requested); +} + #endif // BORINGSSL_UNSAFE_DETERMINISTIC_MODE diff --git a/Sources/CBigNumBoringSSL/crypto/rand_extra/fuchsia.c b/Sources/CBigNumBoringSSL/crypto/rand_extra/fuchsia.c index b5e9eb3..688a754 100644 --- a/Sources/CBigNumBoringSSL/crypto/rand_extra/fuchsia.c +++ b/Sources/CBigNumBoringSSL/crypto/rand_extra/fuchsia.c @@ -27,4 +27,8 @@ void CRYPTO_sysrand(uint8_t *out, size_t requested) { zx_cprng_draw(out, requested); } +void CRYPTO_sysrand_for_seed(uint8_t *out, size_t requested) { + CRYPTO_sysrand(out, requested); +} + #endif // OPENSSL_FUCHSIA && !BORINGSSL_UNSAFE_DETERMINISTIC_MODE diff --git a/Sources/CBigNumBoringSSL/crypto/rand_extra/passive.c b/Sources/CBigNumBoringSSL/crypto/rand_extra/passive.c new file mode 100644 index 0000000..bd8180a --- /dev/null +++ b/Sources/CBigNumBoringSSL/crypto/rand_extra/passive.c @@ -0,0 +1,34 @@ +/* Copyright (c) 2020, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include +#include "../fipsmodule/rand/internal.h" + +#if defined(BORINGSSL_FIPS) + +// RAND_need_entropy is called by the FIPS module when it has blocked because of +// a lack of entropy. This signal is used as an indication to feed it more. +void RAND_need_entropy(size_t bytes_needed) { + uint8_t buf[CTR_DRBG_ENTROPY_LEN * BORINGSSL_FIPS_OVERREAD]; + size_t todo = sizeof(buf); + if (todo > bytes_needed) { + todo = bytes_needed; + } + + int used_cpu; + CRYPTO_get_seed_entropy(buf, todo, &used_cpu); + RAND_load_entropy(buf, todo, used_cpu); +} + +#endif // FIPS diff --git a/Sources/CBigNumBoringSSL/crypto/rand_extra/rand_extra.c b/Sources/CBigNumBoringSSL/crypto/rand_extra/rand_extra.c index 7f63fc6..1ee2ad2 100644 --- a/Sources/CBigNumBoringSSL/crypto/rand_extra/rand_extra.c +++ b/Sources/CBigNumBoringSSL/crypto/rand_extra/rand_extra.c @@ -63,8 +63,12 @@ RAND_METHOD *RAND_SSLeay(void) { return (RAND_METHOD*) &kSSLeayMethod; } +RAND_METHOD *RAND_OpenSSL(void) { + return RAND_SSLeay(); +} + const RAND_METHOD *RAND_get_rand_method(void) { return RAND_SSLeay(); } -void RAND_set_rand_method(const RAND_METHOD *method) {} +int RAND_set_rand_method(const RAND_METHOD *method) { return 1; } void RAND_cleanup(void) {} diff --git a/Sources/CBigNumBoringSSL/crypto/rand_extra/windows.c b/Sources/CBigNumBoringSSL/crypto/rand_extra/windows.c index 3c8700d..7ce4c6a 100644 --- a/Sources/CBigNumBoringSSL/crypto/rand_extra/windows.c +++ b/Sources/CBigNumBoringSSL/crypto/rand_extra/windows.c @@ -66,4 +66,8 @@ void CRYPTO_sysrand(uint8_t *out, size_t requested) { return; } +void CRYPTO_sysrand_for_seed(uint8_t *out, size_t requested) { + CRYPTO_sysrand(out, requested); +} + #endif // OPENSSL_WINDOWS && !BORINGSSL_UNSAFE_DETERMINISTIC_MODE diff --git a/Sources/CBigNumBoringSSL/crypto/stack/stack.c b/Sources/CBigNumBoringSSL/crypto/stack/stack.c index 14334a0..41fb5a0 100644 --- a/Sources/CBigNumBoringSSL/crypto/stack/stack.c +++ b/Sources/CBigNumBoringSSL/crypto/stack/stack.c @@ -57,7 +57,6 @@ #include #include -#include #include @@ -69,11 +68,9 @@ static const size_t kMinSize = 4; _STACK *sk_new(stack_cmp_func comp) { - _STACK *ret; - - ret = OPENSSL_malloc(sizeof(_STACK)); + _STACK *ret = OPENSSL_malloc(sizeof(_STACK)); if (ret == NULL) { - goto err; + return NULL; } OPENSSL_memset(ret, 0, sizeof(_STACK)); @@ -331,23 +328,20 @@ void *sk_pop(_STACK *sk) { } _STACK *sk_dup(const _STACK *sk) { - _STACK *ret; - void **s; - if (sk == NULL) { return NULL; } - ret = sk_new(sk->comp); + _STACK *ret = OPENSSL_malloc(sizeof(_STACK)); if (ret == NULL) { - goto err; + return NULL; } + OPENSSL_memset(ret, 0, sizeof(_STACK)); - s = (void **)OPENSSL_realloc(ret->data, sizeof(void *) * sk->num_alloc); - if (s == NULL) { + ret->data = OPENSSL_malloc(sizeof(void *) * sk->num_alloc); + if (ret->data == NULL) { goto err; } - ret->data = s; ret->num = sk->num; OPENSSL_memcpy(ret->data, sk->data, sizeof(void *) * sk->num); diff --git a/Sources/CBigNumBoringSSL/crypto/thread_pthread.c b/Sources/CBigNumBoringSSL/crypto/thread_pthread.c index d272065..c11f92d 100644 --- a/Sources/CBigNumBoringSSL/crypto/thread_pthread.c +++ b/Sources/CBigNumBoringSSL/crypto/thread_pthread.c @@ -127,34 +127,6 @@ static pthread_once_t g_thread_local_init_once = PTHREAD_ONCE_INIT; static pthread_key_t g_thread_local_key; static int g_thread_local_key_created = 0; -// OPENSSL_DANGEROUS_RELEASE_PTHREAD_KEY can be defined to cause -// |pthread_key_delete| to be called in a destructor function. This can be -// useful for programs that dlclose BoringSSL. -// -// Note that dlclose()ing BoringSSL is not supported and will leak memory: -// thread-local values will be leaked as well as anything initialised via a -// once. The |pthread_key_t| is destroyed because they run out very quickly, -// while the other leaks are slow, and this allows code that happens to use -// dlclose() despite all the problems to continue functioning. -// -// This is marked "dangerous" because it can cause multi-threaded processes to -// crash (even if they don't use dlclose): if the destructor runs while other -// threads are still executing then they may end up using an invalid key to -// access thread-local variables. -// -// This may be removed after February 2020. -#if defined(OPENSSL_DANGEROUS_RELEASE_PTHREAD_KEY) && \ - (defined(__GNUC__) || defined(__clang__)) -// thread_key_destructor is called when the library is unloaded with dlclose. -static void thread_key_destructor(void) __attribute__((destructor, unused)); -static void thread_key_destructor(void) { - if (g_thread_local_key_created) { - g_thread_local_key_created = 0; - pthread_key_delete(g_thread_local_key); - } -} -#endif - static void thread_local_init(void) { g_thread_local_key_created = pthread_key_create(&g_thread_local_key, thread_local_destructor) == 0; diff --git a/Sources/CBigNumBoringSSL/hash.txt b/Sources/CBigNumBoringSSL/hash.txt index 1978f4b..0afbd98 100644 --- a/Sources/CBigNumBoringSSL/hash.txt +++ b/Sources/CBigNumBoringSSL/hash.txt @@ -1 +1 @@ -This directory is derived from BoringSSL cloned from https://boringssl.googlesource.com/boringssl at revision 73e0401e3d434b17c6799d6ce2dcd4de76e885b1 +This directory is derived from BoringSSL cloned from https://boringssl.googlesource.com/boringssl at revision 295b31324f8c557dcd3c1c831857e33a7f23bc52 diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL.h index 68ecd5d..bbed77c 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL.h @@ -1,14 +1,20 @@ #ifndef C_BIGNUM_BORINGSSL_H #define C_BIGNUM_BORINGSSL_H +#include "CBigNumBoringSSL_aead.h" #include "CBigNumBoringSSL_aes.h" +#include "CBigNumBoringSSL_arm_arch.h" +#include "CBigNumBoringSSL_asn1.h" #include "CBigNumBoringSSL_bio.h" #include "CBigNumBoringSSL_bn.h" +#include "CBigNumBoringSSL_boringssl_prefix_symbols_asm.h" +#include "CBigNumBoringSSL_bytestring.h" +#include "CBigNumBoringSSL_chacha.h" #include "CBigNumBoringSSL_cipher.h" #include "CBigNumBoringSSL_cpu.h" #include "CBigNumBoringSSL_crypto.h" -#include "CBigNumBoringSSL_bytestring.h" #include "CBigNumBoringSSL_err.h" +#include "CBigNumBoringSSL_nid.h" #include "CBigNumBoringSSL_rand.h" diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_aead.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_aead.h index da3142a..71bc407 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_aead.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_aead.h @@ -122,7 +122,7 @@ OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_192_gcm(void); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_gcm(void); // EVP_aead_chacha20_poly1305 is the AEAD built from ChaCha20 and -// Poly1305 as described in RFC 7539. +// Poly1305 as described in RFC 8439. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_chacha20_poly1305(void); // EVP_aead_xchacha20_poly1305 is ChaCha20-Poly1305 with an extended nonce that @@ -146,6 +146,30 @@ OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_gcm_siv(void); // https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02 OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_gcm_siv(void); +// EVP_aead_aes_128_gcm_randnonce is AES-128 in Galois Counter Mode with +// internal nonce generation. The 12-byte nonce is appended to the tag +// and is generated internally. The "tag", for the purpurses of the API, is thus +// 12 bytes larger. The nonce parameter when using this AEAD must be +// zero-length. Since the nonce is random, a single key should not be used for +// more than 2^32 seal operations. +// +// Warning: this is for use for FIPS compliance only. It is probably not +// suitable for other uses. Using standard AES-GCM AEADs allows one to achieve +// the same effect, but gives more control over nonce storage. +OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_gcm_randnonce(void); + +// EVP_aead_aes_256_gcm_randnonce is AES-256 in Galois Counter Mode with +// internal nonce generation. The 12-byte nonce is appended to the tag +// and is generated internally. The "tag", for the purpurses of the API, is thus +// 12 bytes larger. The nonce parameter when using this AEAD must be +// zero-length. Since the nonce is random, a single key should not be used for +// more than 2^32 seal operations. +// +// Warning: this is for use for FIPS compliance only. It is probably not +// suitable for other uses. Using standard AES-GCM AEADs allows one to achieve +// the same effect, but gives more control over nonce storage. +OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_gcm_randnonce(void); + // EVP_aead_aes_128_ccm_bluetooth is AES-128-CCM with M=4 and L=2 (4-byte tags // and 13-byte nonces), as decribed in the Bluetooth Core Specification v5.0, // Volume 6, Part E, Section 1. @@ -373,12 +397,9 @@ OPENSSL_EXPORT const EVP_AEAD *EVP_AEAD_CTX_aead(const EVP_AEAD_CTX *ctx); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls(void); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void); -OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_cbc_sha256_tls(void); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls(void); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void); -OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_cbc_sha256_tls(void); -OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_cbc_sha384_tls(void); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls(void); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void); diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_aes.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_aes.h index aa58374..9d123c8 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_aes.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_aes.h @@ -106,7 +106,10 @@ OPENSSL_EXPORT void AES_decrypt(const uint8_t *in, uint8_t *out, // AES_ctr128_encrypt encrypts (or decrypts, it's the same in CTR mode) |len| // bytes from |in| to |out|. The |num| parameter must be set to zero on the -// first call and |ivec| will be incremented. +// first call and |ivec| will be incremented. This function may be called +// in-place with |in| equal to |out|, but otherwise the buffers may not +// partially overlap. A partial overlap may overwrite input data before it is +// read. OPENSSL_EXPORT void AES_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[AES_BLOCK_SIZE], @@ -114,26 +117,35 @@ OPENSSL_EXPORT void AES_ctr128_encrypt(const uint8_t *in, uint8_t *out, unsigned int *num); // AES_ecb_encrypt encrypts (or decrypts, if |enc| == |AES_DECRYPT|) a single, -// 16 byte block from |in| to |out|. +// 16 byte block from |in| to |out|. This function may be called in-place with +// |in| equal to |out|, but otherwise the buffers may not partially overlap. A +// partial overlap may overwrite input data before it is read. OPENSSL_EXPORT void AES_ecb_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key, const int enc); // AES_cbc_encrypt encrypts (or decrypts, if |enc| == |AES_DECRYPT|) |len| // bytes from |in| to |out|. The length must be a multiple of the block size. +// This function may be called in-place with |in| equal to |out|, but otherwise +// the buffers may not partially overlap. A partial overlap may overwrite input +// data before it is read. OPENSSL_EXPORT void AES_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t *ivec, const int enc); // AES_ofb128_encrypt encrypts (or decrypts, it's the same in OFB mode) |len| // bytes from |in| to |out|. The |num| parameter must be set to zero on the -// first call. +// first call. This function may be called in-place with |in| equal to |out|, +// but otherwise the buffers may not partially overlap. A partial overlap may +// overwrite input data before it is read. OPENSSL_EXPORT void AES_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t *ivec, int *num); // AES_cfb128_encrypt encrypts (or decrypts, if |enc| == |AES_DECRYPT|) |len| // bytes from |in| to |out|. The |num| parameter must be set to zero on the -// first call. +// first call. This function may be called in-place with |in| equal to |out|, +// but otherwise the buffers may not partially overlap. A partial overlap may +// overwrite input data before it is read. OPENSSL_EXPORT void AES_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t *ivec, int *num, int enc); diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_arm_arch.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_arm_arch.h index ea917d9..8e4a86a 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_arm_arch.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_arm_arch.h @@ -49,6 +49,7 @@ * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). */ + #if __arm__ || __arm64__ || __aarch64__ #ifndef OPENSSL_HEADER_ARM_ARCH_H #define OPENSSL_HEADER_ARM_ARCH_H @@ -117,6 +118,128 @@ // ARMV8_PMULL indicates support for carryless multiplication. #define ARMV8_PMULL (1 << 5) +// ARMV8_SHA512 indicates support for hardware SHA-512 instructions. +#define ARMV8_SHA512 (1 << 6) + +#if defined(__ASSEMBLER__) + +// Support macros for +// - Armv8.3-A Pointer Authentication and +// - Armv8.5-A Branch Target Identification +// features which require emitting a .note.gnu.property section with the +// appropriate architecture-dependent feature bits set. +// +// |AARCH64_SIGN_LINK_REGISTER| and |AARCH64_VALIDATE_LINK_REGISTER| expand to +// PACIxSP and AUTIxSP, respectively. |AARCH64_SIGN_LINK_REGISTER| should be +// used immediately before saving the LR register (x30) to the stack. +// |AARCH64_VALIDATE_LINK_REGISTER| should be used immediately after restoring +// it. Note |AARCH64_SIGN_LINK_REGISTER|'s modifications to LR must be undone +// with |AARCH64_VALIDATE_LINK_REGISTER| before RET. The SP register must also +// have the same value at the two points. For example: +// +// .global f +// f: +// AARCH64_SIGN_LINK_REGISTER +// stp x29, x30, [sp, #-96]! +// mov x29, sp +// ... +// ldp x29, x30, [sp], #96 +// AARCH64_VALIDATE_LINK_REGISTER +// ret +// +// |AARCH64_VALID_CALL_TARGET| expands to BTI 'c'. Either it, or +// |AARCH64_SIGN_LINK_REGISTER|, must be used at every point that may be an +// indirect call target. In particular, all symbols exported from a file must +// begin with one of these macros. For example, a leaf function that does not +// save LR can instead use |AARCH64_VALID_CALL_TARGET|: +// +// .globl return_zero +// return_zero: +// AARCH64_VALID_CALL_TARGET +// mov x0, #0 +// ret +// +// A non-leaf function which does not immediately save LR may need both macros +// because |AARCH64_SIGN_LINK_REGISTER| appears late. For example, the function +// may jump to an alternate implementation before setting up the stack: +// +// .globl with_early_jump +// with_early_jump: +// AARCH64_VALID_CALL_TARGET +// cmp x0, #128 +// b.lt .Lwith_early_jump_128 +// AARCH64_SIGN_LINK_REGISTER +// stp x29, x30, [sp, #-96]! +// mov x29, sp +// ... +// ldp x29, x30, [sp], #96 +// AARCH64_VALIDATE_LINK_REGISTER +// ret +// +// .Lwith_early_jump_128: +// ... +// ret +// +// These annotations are only required with indirect calls. Private symbols that +// are only the target of direct calls do not require annotations. Also note +// that |AARCH64_VALID_CALL_TARGET| is only valid for indirect calls (BLR), not +// indirect jumps (BR). Indirect jumps in assembly are currently not supported +// and would require a macro for BTI 'j'. +// +// Although not necessary, it is safe to use these macros in 32-bit ARM +// assembly. This may be used to simplify dual 32-bit and 64-bit files. +// +// References: +// - "ELF for the Arm® 64-bit Architecture" +// https://github.com/ARM-software/abi-aa/blob/master/aaelf64/aaelf64.rst +// - "Providing protection for complex software" +// https://developer.arm.com/architectures/learn-the-architecture/providing-protection-for-complex-software + +#if defined(__ARM_FEATURE_BTI_DEFAULT) && __ARM_FEATURE_BTI_DEFAULT == 1 +#define GNU_PROPERTY_AARCH64_BTI (1 << 0) // Has Branch Target Identification +#define AARCH64_VALID_CALL_TARGET hint #34 // BTI 'c' +#else +#define GNU_PROPERTY_AARCH64_BTI 0 // No Branch Target Identification +#define AARCH64_VALID_CALL_TARGET +#endif + +#if defined(__ARM_FEATURE_PAC_DEFAULT) && \ + (__ARM_FEATURE_PAC_DEFAULT & 1) == 1 // Signed with A-key +#define GNU_PROPERTY_AARCH64_POINTER_AUTH \ + (1 << 1) // Has Pointer Authentication +#define AARCH64_SIGN_LINK_REGISTER hint #25 // PACIASP +#define AARCH64_VALIDATE_LINK_REGISTER hint #29 // AUTIASP +#elif defined(__ARM_FEATURE_PAC_DEFAULT) && \ + (__ARM_FEATURE_PAC_DEFAULT & 2) == 2 // Signed with B-key +#define GNU_PROPERTY_AARCH64_POINTER_AUTH \ + (1 << 1) // Has Pointer Authentication +#define AARCH64_SIGN_LINK_REGISTER hint #27 // PACIBSP +#define AARCH64_VALIDATE_LINK_REGISTER hint #31 // AUTIBSP +#else +#define GNU_PROPERTY_AARCH64_POINTER_AUTH 0 // No Pointer Authentication +#if GNU_PROPERTY_AARCH64_BTI != 0 +#define AARCH64_SIGN_LINK_REGISTER AARCH64_VALID_CALL_TARGET +#else +#define AARCH64_SIGN_LINK_REGISTER +#endif +#define AARCH64_VALIDATE_LINK_REGISTER +#endif + +#if GNU_PROPERTY_AARCH64_POINTER_AUTH != 0 || GNU_PROPERTY_AARCH64_BTI != 0 +.pushsection .note.gnu.property, "a"; +.balign 8; +.long 4; +.long 0x10; +.long 0x5; +.asciz "GNU"; +.long 0xc0000000; /* GNU_PROPERTY_AARCH64_FEATURE_1_AND */ +.long 4; +.long (GNU_PROPERTY_AARCH64_POINTER_AUTH | GNU_PROPERTY_AARCH64_BTI); +.long 0; +.popsection; +#endif + +#endif /* defined __ASSEMBLER__ */ #endif // OPENSSL_HEADER_ARM_ARCH_H #endif // __arm__ || __arm64__ || __aarch64__ diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_asn1.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_asn1.h index 1107ffd..bc0303d 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_asn1.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_asn1.h @@ -4,21 +4,21 @@ * This package is an SSL implementation written * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. - * + * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * + * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: @@ -33,10 +33,10 @@ * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from + * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * + * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE @@ -48,7 +48,7 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * + * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence @@ -63,742 +63,1868 @@ #include #include "CBigNumBoringSSL_bio.h" +#include "CBigNumBoringSSL_bn.h" #include "CBigNumBoringSSL_stack.h" -#include "CBigNumBoringSSL_bn.h" - -#ifdef __cplusplus +#if defined(__cplusplus) extern "C" { #endif -/* Legacy ASN.1 library. - * - * This header is part of OpenSSL's ASN.1 implementation. It is retained for - * compatibility but otherwise underdocumented and not actively maintained. Use - * the new |CBS| and |CBB| library in instead. */ +// Legacy ASN.1 library. +// +// This header is part of OpenSSL's ASN.1 implementation. It is retained for +// compatibility but should not be used by new code. The functions are difficult +// to use correctly, and have buggy or non-standard behaviors. They are thus +// particularly prone to behavior changes and API removals, as BoringSSL +// iterates on these issues. +// +// Use the new |CBS| and |CBB| library in instead. -#define V_ASN1_UNIVERSAL 0x00 -#define V_ASN1_APPLICATION 0x40 -#define V_ASN1_CONTEXT_SPECIFIC 0x80 -#define V_ASN1_PRIVATE 0xc0 +// Tag constants. +// +// These constants are used in various APIs to specify ASN.1 types and tag +// components. See the specific API's documentation for details on which values +// are used and how. -#define V_ASN1_CONSTRUCTED 0x20 -#define V_ASN1_PRIMITIVE_TAG 0x1f +// The following constants are tag classes. +#define V_ASN1_UNIVERSAL 0x00 +#define V_ASN1_APPLICATION 0x40 +#define V_ASN1_CONTEXT_SPECIFIC 0x80 +#define V_ASN1_PRIVATE 0xc0 -#define V_ASN1_APP_CHOOSE -2 /* let the recipient choose */ -#define V_ASN1_OTHER -3 /* used in ASN1_TYPE */ -#define V_ASN1_ANY -4 /* used in ASN1 template code */ +// V_ASN1_CONSTRUCTED indicates an element is constructed, rather than +// primitive. +#define V_ASN1_CONSTRUCTED 0x20 -#define V_ASN1_NEG 0x100 /* negative flag */ -/* No supported universal tags may exceed this value, to avoid ambiguity with - * V_ASN1_NEG. */ -#define V_ASN1_MAX_UNIVERSAL 0xff +// V_ASN1_PRIMITIVE_TAG is the highest tag number which can be encoded in a +// single byte. Note this is unrelated to whether an element is constructed or +// primitive. +// +// TODO(davidben): Make this private. +#define V_ASN1_PRIMITIVE_TAG 0x1f -#define V_ASN1_UNDEF -1 -#define V_ASN1_EOC 0 -#define V_ASN1_BOOLEAN 1 /**/ -#define V_ASN1_INTEGER 2 -#define V_ASN1_NEG_INTEGER (2 | V_ASN1_NEG) -#define V_ASN1_BIT_STRING 3 -#define V_ASN1_OCTET_STRING 4 -#define V_ASN1_NULL 5 -#define V_ASN1_OBJECT 6 -#define V_ASN1_OBJECT_DESCRIPTOR 7 -#define V_ASN1_EXTERNAL 8 -#define V_ASN1_REAL 9 -#define V_ASN1_ENUMERATED 10 -#define V_ASN1_NEG_ENUMERATED (10 | V_ASN1_NEG) -#define V_ASN1_UTF8STRING 12 -#define V_ASN1_SEQUENCE 16 -#define V_ASN1_SET 17 -#define V_ASN1_NUMERICSTRING 18 /**/ -#define V_ASN1_PRINTABLESTRING 19 -#define V_ASN1_T61STRING 20 -#define V_ASN1_TELETEXSTRING 20 /* alias */ -#define V_ASN1_VIDEOTEXSTRING 21 /**/ -#define V_ASN1_IA5STRING 22 -#define V_ASN1_UTCTIME 23 -#define V_ASN1_GENERALIZEDTIME 24 /**/ -#define V_ASN1_GRAPHICSTRING 25 /**/ -#define V_ASN1_ISO64STRING 26 /**/ -#define V_ASN1_VISIBLESTRING 26 /* alias */ -#define V_ASN1_GENERALSTRING 27 /**/ -#define V_ASN1_UNIVERSALSTRING 28 /**/ -#define V_ASN1_BMPSTRING 30 +// V_ASN1_MAX_UNIVERSAL is the highest supported universal tag number. It is +// necessary to avoid ambiguity with |V_ASN1_NEG| and |MBSTRING_FLAG|. +// +// TODO(davidben): Make this private. +#define V_ASN1_MAX_UNIVERSAL 0xff -/* For use with d2i_ASN1_type_bytes() */ -#define B_ASN1_NUMERICSTRING 0x0001 -#define B_ASN1_PRINTABLESTRING 0x0002 -#define B_ASN1_T61STRING 0x0004 -#define B_ASN1_TELETEXSTRING 0x0004 -#define B_ASN1_VIDEOTEXSTRING 0x0008 -#define B_ASN1_IA5STRING 0x0010 -#define B_ASN1_GRAPHICSTRING 0x0020 -#define B_ASN1_ISO64STRING 0x0040 -#define B_ASN1_VISIBLESTRING 0x0040 -#define B_ASN1_GENERALSTRING 0x0080 -#define B_ASN1_UNIVERSALSTRING 0x0100 -#define B_ASN1_OCTET_STRING 0x0200 -#define B_ASN1_BIT_STRING 0x0400 -#define B_ASN1_BMPSTRING 0x0800 -#define B_ASN1_UNKNOWN 0x1000 -#define B_ASN1_UTF8STRING 0x2000 -#define B_ASN1_UTCTIME 0x4000 -#define B_ASN1_GENERALIZEDTIME 0x8000 -#define B_ASN1_SEQUENCE 0x10000 +// V_ASN1_UNDEF is used in some APIs to indicate an ASN.1 element is omitted. +#define V_ASN1_UNDEF (-1) -/* For use with ASN1_mbstring_copy() */ -#define MBSTRING_FLAG 0x1000 -#define MBSTRING_UTF8 (MBSTRING_FLAG) -/* |MBSTRING_ASC| refers to Latin-1, not ASCII. It is used with TeletexString - * which, in turn, is treated as Latin-1 rather than T.61 by OpenSSL and most - * other software. */ -#define MBSTRING_ASC (MBSTRING_FLAG|1) -#define MBSTRING_BMP (MBSTRING_FLAG|2) -#define MBSTRING_UNIV (MBSTRING_FLAG|4) +// V_ASN1_OTHER is used in |ASN1_TYPE| to indicate a non-universal ASN.1 type. +#define V_ASN1_OTHER (-3) -#define DECLARE_ASN1_SET_OF(type) /* filled in by mkstack.pl */ -#define IMPLEMENT_ASN1_SET_OF(type) /* nothing, no longer needed */ +// V_ASN1_ANY is used by the ASN.1 templates to indicate an ANY type. +#define V_ASN1_ANY (-4) -/* These are used internally in the ASN1_OBJECT to keep track of - * whether the names and data need to be free()ed */ -#define ASN1_OBJECT_FLAG_DYNAMIC 0x01 /* internal use */ -#define ASN1_OBJECT_FLAG_DYNAMIC_STRINGS 0x04 /* internal use */ -#define ASN1_OBJECT_FLAG_DYNAMIC_DATA 0x08 /* internal use */ -struct asn1_object_st - { - const char *sn,*ln; - int nid; - int length; - const unsigned char *data; /* data remains const after init */ - int flags; /* Should we free this one */ - }; +// The following constants are tag numbers for universal types. +#define V_ASN1_EOC 0 +#define V_ASN1_BOOLEAN 1 +#define V_ASN1_INTEGER 2 +#define V_ASN1_BIT_STRING 3 +#define V_ASN1_OCTET_STRING 4 +#define V_ASN1_NULL 5 +#define V_ASN1_OBJECT 6 +#define V_ASN1_OBJECT_DESCRIPTOR 7 +#define V_ASN1_EXTERNAL 8 +#define V_ASN1_REAL 9 +#define V_ASN1_ENUMERATED 10 +#define V_ASN1_UTF8STRING 12 +#define V_ASN1_SEQUENCE 16 +#define V_ASN1_SET 17 +#define V_ASN1_NUMERICSTRING 18 +#define V_ASN1_PRINTABLESTRING 19 +#define V_ASN1_T61STRING 20 +#define V_ASN1_TELETEXSTRING 20 +#define V_ASN1_VIDEOTEXSTRING 21 +#define V_ASN1_IA5STRING 22 +#define V_ASN1_UTCTIME 23 +#define V_ASN1_GENERALIZEDTIME 24 +#define V_ASN1_GRAPHICSTRING 25 +#define V_ASN1_ISO64STRING 26 +#define V_ASN1_VISIBLESTRING 26 +#define V_ASN1_GENERALSTRING 27 +#define V_ASN1_UNIVERSALSTRING 28 +#define V_ASN1_BMPSTRING 30 + +// The following constants are used for |ASN1_STRING| values that represent +// negative INTEGER and ENUMERATED values. See |ASN1_STRING| for more details. +#define V_ASN1_NEG 0x100 +#define V_ASN1_NEG_INTEGER (V_ASN1_INTEGER | V_ASN1_NEG) +#define V_ASN1_NEG_ENUMERATED (V_ASN1_ENUMERATED | V_ASN1_NEG) + +// The following constants are bitmask representations of ASN.1 types. +#define B_ASN1_NUMERICSTRING 0x0001 +#define B_ASN1_PRINTABLESTRING 0x0002 +#define B_ASN1_T61STRING 0x0004 +#define B_ASN1_TELETEXSTRING 0x0004 +#define B_ASN1_VIDEOTEXSTRING 0x0008 +#define B_ASN1_IA5STRING 0x0010 +#define B_ASN1_GRAPHICSTRING 0x0020 +#define B_ASN1_ISO64STRING 0x0040 +#define B_ASN1_VISIBLESTRING 0x0040 +#define B_ASN1_GENERALSTRING 0x0080 +#define B_ASN1_UNIVERSALSTRING 0x0100 +#define B_ASN1_OCTET_STRING 0x0200 +#define B_ASN1_BIT_STRING 0x0400 +#define B_ASN1_BMPSTRING 0x0800 +#define B_ASN1_UNKNOWN 0x1000 +#define B_ASN1_UTF8STRING 0x2000 +#define B_ASN1_UTCTIME 0x4000 +#define B_ASN1_GENERALIZEDTIME 0x8000 +#define B_ASN1_SEQUENCE 0x10000 + +// ASN1_tag2bit converts |tag| from the tag number of a universal type to a +// corresponding |B_ASN1_*| constant, |B_ASN1_UNKNOWN|, or zero. If the +// |B_ASN1_*| constant above is defined, it will map the corresponding +// |V_ASN1_*| constant to it. Otherwise, whether it returns |B_ASN1_UNKNOWN| or +// zero is ill-defined and callers should not rely on it. +// +// TODO(https://crbug.com/boringssl/412): Figure out what |B_ASN1_UNNOWN| vs +// zero is meant to be. The main impact is what values go in |B_ASN1_PRINTABLE|. +// To that end, we must return zero on types that can't go in |ASN1_STRING|. +OPENSSL_EXPORT unsigned long ASN1_tag2bit(int tag); + +// ASN1_tag2str returns a string representation of |tag|, interpret as a tag +// number for a universal type, or |V_ASN1_NEG_*|. +OPENSSL_EXPORT const char *ASN1_tag2str(int tag); + + +// API conventions. +// +// The following sample functions document the calling conventions used by +// legacy ASN.1 APIs. + +#if 0 // Sample functions + +// d2i_SAMPLE parses a structure from up to |len| bytes at |*inp|. On success, +// it advances |*inp| by the number of bytes read and returns a newly-allocated +// |SAMPLE| object containing the parsed structure. If |out| is non-NULL, it +// additionally frees the previous value at |*out| and updates |*out| to the +// result. If parsing or allocating the result fails, it returns NULL. +// +// This function does not reject trailing data in the input. This allows the +// caller to parse a sequence of concatenated structures. Callers parsing only +// one structure should check for trailing data by comparing the updated |*inp| +// with the end of the input. +// +// Note: If |out| and |*out| are both non-NULL, the object at |*out| is not +// updated in-place. Instead, it is freed, and the pointer is updated to the +// new object. This differs from OpenSSL, which behaves more like +// |d2i_SAMPLE_with_reuse|. Callers are recommended to set |out| to NULL and +// instead use the return value. +SAMPLE *d2i_SAMPLE(SAMPLE **out, const uint8_t **inp, long len); + +// d2i_SAMPLE_with_reuse parses a structure from up to |len| bytes at |*inp|. On +// success, it advances |*inp| by the number of bytes read and returns a +// non-NULL pointer to an object containing the parsed structure. The object is +// determined from |out| as follows: +// +// If |out| is NULL, the function places the result in a newly-allocated +// |SAMPLE| object and returns it. This mode is recommended. +// +// If |out| is non-NULL, but |*out| is NULL, the function also places the result +// in a newly-allocated |SAMPLE| object. It sets |*out| to this object and also +// returns it. +// +// If |out| and |*out| are both non-NULL, the function updates the object at +// |*out| in-place with the result and returns |*out|. +// +// If any of the above fail, the function returns NULL. +// +// This function does not reject trailing data in the input. This allows the +// caller to parse a sequence of concatenated structures. Callers parsing only +// one structure should check for trailing data by comparing the updated |*inp| +// with the end of the input. +// +// WARNING: Callers should not rely on the in-place update mode. It often +// produces the wrong result or breaks the type's internal invariants. Future +// revisions of BoringSSL may standardize on the |d2i_SAMPLE| behavior. +SAMPLE *d2i_SAMPLE_with_reuse(SAMPLE **out, const uint8_t **inp, long len); + +// i2d_SAMPLE marshals |in|. On error, it returns a negative value. On success, +// it returns the length of the result and outputs it via |outp| as follows: +// +// If |outp| is NULL, the function writes nothing. This mode can be used to size +// buffers. +// +// If |outp| is non-NULL but |*outp| is NULL, the function sets |*outp| to a +// newly-allocated buffer containing the result. The caller is responsible for +// releasing |*outp| with |OPENSSL_free|. This mode is recommended for most +// callers. +// +// If |outp| and |*outp| are non-NULL, the function writes the result to +// |*outp|, which must have enough space available, and advances |*outp| just +// past the output. +// +// WARNING: In the third mode, the function does not internally check output +// bounds. Failing to correctly size the buffer will result in a potentially +// exploitable memory error. +int i2d_SAMPLE(const SAMPLE *in, uint8_t **outp); + +#endif // Sample functions + +// The following typedefs are sometimes used for pointers to functions like +// |d2i_SAMPLE| and |i2d_SAMPLE|. Note, however, that these act on |void*|. +// Calling a function with a different pointer type is undefined in C, so this +// is only valid with a wrapper. +typedef void *d2i_of_void(void **, const unsigned char **, long); +typedef int i2d_of_void(const void *, unsigned char **); + + +// ASN.1 types. +// +// An |ASN1_ITEM| represents an ASN.1 type and allows working with ASN.1 types +// generically. +// +// |ASN1_ITEM|s use a different namespace from C types and are accessed via +// |ASN1_ITEM_*| macros. So, for example, |ASN1_OCTET_STRING| is both a C type +// and the name of an |ASN1_ITEM|, referenced as +// |ASN1_ITEM_rptr(ASN1_OCTET_STRING)|. +// +// Each |ASN1_ITEM| has a corresponding C type, typically with the same name, +// which represents values in the ASN.1 type. This type is either a pointer type +// or |ASN1_BOOLEAN|. When it is a pointer, NULL pointers represent omitted +// values. For example, an OCTET STRING value is declared with the C type +// |ASN1_OCTET_STRING*| and uses the |ASN1_ITEM| named |ASN1_OCTET_STRING|. An +// OPTIONAL OCTET STRING uses the same C type and represents an omitted value +// with a NULL pointer. |ASN1_BOOLEAN| is described in a later section. + +// DECLARE_ASN1_ITEM declares an |ASN1_ITEM| with name |name|. The |ASN1_ITEM| +// may be referenced with |ASN1_ITEM_rptr|. Uses of this macro should document +// the corresponding ASN.1 and C types. +#define DECLARE_ASN1_ITEM(name) extern OPENSSL_EXPORT const ASN1_ITEM name##_it; + +// ASN1_ITEM_rptr returns the |const ASN1_ITEM *| named |name|. +#define ASN1_ITEM_rptr(name) (&(name##_it)) + +// ASN1_ITEM_EXP is an abstraction for referencing an |ASN1_ITEM| in a +// constant-initialized structure, such as a method table. It exists because, on +// some OpenSSL platforms, |ASN1_ITEM| references are indirected through +// functions. Structures reference the |ASN1_ITEM| by declaring a field like +// |ASN1_ITEM_EXP *item| and initializing it with |ASN1_ITEM_ref|. +typedef const ASN1_ITEM ASN1_ITEM_EXP; + +// ASN1_ITEM_ref returns an |ASN1_ITEM_EXP*| for the |ASN1_ITEM| named |name|. +#define ASN1_ITEM_ref(name) (&(name##_it)) + +// ASN1_ITEM_ptr converts |iptr|, which must be an |ASN1_ITEM_EXP*| to a +// |const ASN1_ITEM*|. +#define ASN1_ITEM_ptr(iptr) (iptr) + +// ASN1_VALUE_st (aka |ASN1_VALUE|) is an opaque type used as a placeholder for +// the C type corresponding to an |ASN1_ITEM|. +typedef struct ASN1_VALUE_st ASN1_VALUE; + +// ASN1_item_new allocates a new value of the C type corresponding to |it|, or +// NULL on error. On success, the caller must release the value with +// |ASN1_item_free|, or the corresponding C type's free function, when done. The +// new value will initialize fields of the value to some default state, such as +// an empty string. Note, however, that this default state sometimes omits +// required values, such as with CHOICE types. +// +// This function may not be used with |ASN1_ITEM|s whose C type is +// |ASN1_BOOLEAN|. +// +// WARNING: Casting the result of this function to the wrong type is a +// potentially exploitable memory error. Callers must ensure the value is used +// consistently with |it|. Prefer using type-specific functions such as +// |ASN1_OCTET_STRING_new|. +OPENSSL_EXPORT ASN1_VALUE *ASN1_item_new(const ASN1_ITEM *it); + +// ASN1_item_free releases memory associated with |val|, which must be an object +// of the C type corresponding to |it|. +// +// This function may not be used with |ASN1_ITEM|s whose C type is +// |ASN1_BOOLEAN|. +// +// WARNING: Passing a pointer of the wrong type into this function is a +// potentially exploitable memory error. Callers must ensure |val| is consistent +// with |it|. Prefer using type-specific functions such as +// |ASN1_OCTET_STRING_free|. +OPENSSL_EXPORT void ASN1_item_free(ASN1_VALUE *val, const ASN1_ITEM *it); + +// ASN1_item_d2i parses the ASN.1 type |it| from up to |len| bytes at |*inp|. +// It behaves like |d2i_SAMPLE_with_reuse|, except that |out| and the return +// value are cast to |ASN1_VALUE| pointers. +// +// TODO(https://crbug.com/boringssl/444): C strict aliasing forbids type-punning +// |T*| and |ASN1_VALUE*| the way this function signature does. When that bug is +// resolved, we will need to pick which type |*out| is (probably |T*|). Do not +// use a non-NULL |out| to avoid ending up on the wrong side of this question. +// +// This function may not be used with |ASN1_ITEM|s whose C type is +// |ASN1_BOOLEAN|. +// +// WARNING: Casting the result of this function to the wrong type, or passing a +// pointer of the wrong type into this function, are potentially exploitable +// memory errors. Callers must ensure |out| is consistent with |it|. Prefer +// using type-specific functions such as |d2i_ASN1_OCTET_STRING|. +OPENSSL_EXPORT ASN1_VALUE *ASN1_item_d2i(ASN1_VALUE **out, + const unsigned char **inp, long len, + const ASN1_ITEM *it); + +// ASN1_item_i2d marshals |val| as the ASN.1 type associated with |it|, as +// described in |i2d_SAMPLE|. +// +// This function may not be used with |ASN1_ITEM|s whose C type is +// |ASN1_BOOLEAN|. +// +// WARNING: Passing a pointer of the wrong type into this function is a +// potentially exploitable memory error. Callers must ensure |val| is consistent +// with |it|. Prefer using type-specific functions such as +// |i2d_ASN1_OCTET_STRING|. +OPENSSL_EXPORT int ASN1_item_i2d(ASN1_VALUE *val, unsigned char **outp, + const ASN1_ITEM *it); + +// ASN1_item_dup returns a newly-allocated copy of |x|, or NULL on error. |x| +// must be an object of |it|'s C type. +// +// This function may not be used with |ASN1_ITEM|s whose C type is +// |ASN1_BOOLEAN|. +// +// WARNING: Casting the result of this function to the wrong type, or passing a +// pointer of the wrong type into this function, are potentially exploitable +// memory errors. Prefer using type-specific functions such as +// |ASN1_STRING_dup|. +OPENSSL_EXPORT void *ASN1_item_dup(const ASN1_ITEM *it, void *x); + +// The following functions behave like |ASN1_item_d2i| but read from |in| +// instead. |out| is the same parameter as in |ASN1_item_d2i|, but written with +// |void*| instead. The return values similarly match. +// +// These functions may not be used with |ASN1_ITEM|s whose C type is +// |ASN1_BOOLEAN|. +// +// WARNING: These functions do not bound how much data is read from |in|. +// Parsing an untrusted input could consume unbounded memory. +OPENSSL_EXPORT void *ASN1_item_d2i_fp(const ASN1_ITEM *it, FILE *in, void *out); +OPENSSL_EXPORT void *ASN1_item_d2i_bio(const ASN1_ITEM *it, BIO *in, void *out); + +// The following functions behave like |ASN1_item_i2d| but write to |out| +// instead. |in| is the same parameter as in |ASN1_item_i2d|, but written with +// |void*| instead. +// +// These functions may not be used with |ASN1_ITEM|s whose C type is +// |ASN1_BOOLEAN|. +OPENSSL_EXPORT int ASN1_item_i2d_fp(const ASN1_ITEM *it, FILE *out, void *in); +OPENSSL_EXPORT int ASN1_item_i2d_bio(const ASN1_ITEM *it, BIO *out, void *in); + +// ASN1_item_unpack parses |oct|'s contents as |it|'s ASN.1 type. It returns a +// newly-allocated instance of |it|'s C type on success, or NULL on error. +// +// This function may not be used with |ASN1_ITEM|s whose C type is +// |ASN1_BOOLEAN|. +// +// WARNING: Casting the result of this function to the wrong type is a +// potentially exploitable memory error. Callers must ensure the value is used +// consistently with |it|. +OPENSSL_EXPORT void *ASN1_item_unpack(const ASN1_STRING *oct, + const ASN1_ITEM *it); + +// ASN1_item_pack marshals |obj| as |it|'s ASN.1 type. If |out| is NULL, it +// returns a newly-allocated |ASN1_STRING| with the result, or NULL on error. +// If |out| is non-NULL, but |*out| is NULL, it does the same but additionally +// sets |*out| to the result. If both |out| and |*out| are non-NULL, it writes +// the result to |*out| and returns |*out| on success or NULL on error. +// +// This function may not be used with |ASN1_ITEM|s whose C type is +// |ASN1_BOOLEAN|. +// +// WARNING: Passing a pointer of the wrong type into this function is a +// potentially exploitable memory error. Callers must ensure |val| is consistent +// with |it|. +OPENSSL_EXPORT ASN1_STRING *ASN1_item_pack(void *obj, const ASN1_ITEM *it, + ASN1_STRING **out); + + +// Booleans. +// +// This library represents ASN.1 BOOLEAN values with |ASN1_BOOLEAN|, which is an +// integer type. FALSE is zero, TRUE is 0xff, and an omitted OPTIONAL BOOLEAN is +// -1. + +// d2i_ASN1_BOOLEAN parses a DER-encoded ASN.1 BOOLEAN from up to |len| bytes at +// |*inp|. On success, it advances |*inp| by the number of bytes read and +// returns the result. If |out| is non-NULL, it additionally writes the result +// to |*out|. On error, it returns -1. +// +// This function does not reject trailing data in the input. This allows the +// caller to parse a sequence of concatenated structures. Callers parsing only +// one structure should check for trailing data by comparing the updated |*inp| +// with the end of the input. +// +// WARNING: This function's is slightly different from other |d2i_*| functions +// because |ASN1_BOOLEAN| is not a pointer type. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +OPENSSL_EXPORT ASN1_BOOLEAN d2i_ASN1_BOOLEAN(ASN1_BOOLEAN *out, + const unsigned char **inp, + long len); + +// i2d_ASN1_BOOLEAN marshals |a| as a DER-encoded ASN.1 BOOLEAN, as described in +// |i2d_SAMPLE|. +OPENSSL_EXPORT int i2d_ASN1_BOOLEAN(ASN1_BOOLEAN a, unsigned char **outp); + +// The following |ASN1_ITEM|s have ASN.1 type BOOLEAN and C type |ASN1_BOOLEAN|. +// |ASN1_TBOOLEAN| and |ASN1_FBOOLEAN| must be marked OPTIONAL. When omitted, +// they are parsed as TRUE and FALSE, respectively, rather than -1. +DECLARE_ASN1_ITEM(ASN1_BOOLEAN) +DECLARE_ASN1_ITEM(ASN1_TBOOLEAN) +DECLARE_ASN1_ITEM(ASN1_FBOOLEAN) + + +// Strings. +// +// ASN.1 contains a myriad of string types, as well as types that contain data +// that may be encoded into a string. This library uses a single type, +// |ASN1_STRING|, to represent most values. + +// An asn1_string_st (aka |ASN1_STRING|) represents a value of a string-like +// ASN.1 type. It contains a type field, and a byte string data field with a +// type-specific representation. +// +// When representing a string value, the type field is one of +// |V_ASN1_OCTET_STRING|, |V_ASN1_UTF8STRING|, |V_ASN1_NUMERICSTRING|, +// |V_ASN1_PRINTABLESTRING|, |V_ASN1_T61STRING|, |V_ASN1_VIDEOTEXSTRING|, +// |V_ASN1_IA5STRING|, |V_ASN1_GRAPHICSTRING|, |V_ASN1_ISO64STRING|, +// |V_ASN1_VISIBLESTRING|, |V_ASN1_GENERALSTRING|, |V_ASN1_UNIVERSALSTRING|, or +// |V_ASN1_BMPSTRING|. The data contains the byte representation of of the +// string. +// +// When representing a BIT STRING value, the type field is |V_ASN1_BIT_STRING|. +// See bit string documentation below for how the data and flags are used. +// +// When representing an INTEGER or ENUMERATED value, the type field is one of +// |V_ASN1_INTEGER|, |V_ASN1_NEG_INTEGER|, |V_ASN1_ENUMERATED|, or +// |V_ASN1_NEG_ENUMERATED|. See integer documentation below for details. +// +// When representing a GeneralizedTime or UTCTime value, the type field is +// |V_ASN1_GENERALIZEDTIME| or |V_ASN1_UTCTIME|, respectively. The data contains +// the DER encoding of the value. For example, the UNIX epoch would be +// "19700101000000Z" for a GeneralizedTime and "700101000000Z" for a UTCTime. +// +// |ASN1_STRING|, when stored in an |ASN1_TYPE|, may also represent an element +// with tag not directly supported by this library. See |ASN1_TYPE| for details. +// +// |ASN1_STRING| additionally has the following typedefs: |ASN1_BIT_STRING|, +// |ASN1_BMPSTRING|, |ASN1_ENUMERATED|, |ASN1_GENERALIZEDTIME|, +// |ASN1_GENERALSTRING|, |ASN1_IA5STRING|, |ASN1_INTEGER|, |ASN1_OCTET_STRING|, +// |ASN1_PRINTABLESTRING|, |ASN1_T61STRING|, |ASN1_TIME|, +// |ASN1_UNIVERSALSTRING|, |ASN1_UTCTIME|, |ASN1_UTF8STRING|, and +// |ASN1_VISIBLESTRING|. Other than |ASN1_TIME|, these correspond to universal +// ASN.1 types. |ASN1_TIME| represents a CHOICE of UTCTime and GeneralizedTime, +// with a cutoff of 2049, as used in Section 4.1.2.5 of RFC 5280. +// +// For clarity, callers are encouraged to use the appropriate typedef when +// available. They are the same type as |ASN1_STRING|, so a caller may freely +// pass them into functions expecting |ASN1_STRING|, such as +// |ASN1_STRING_length|. +// +// If a function returns an |ASN1_STRING| where the typedef or ASN.1 structure +// implies constraints on the type field, callers may assume that the type field +// is correct. However, if a function takes an |ASN1_STRING| as input, callers +// must ensure the type field matches. These invariants are not captured by the +// C type system and may not be checked at runtime. For example, callers may +// assume the output of |X509_get0_serialNumber| has type |V_ASN1_INTEGER| or +// |V_ASN1_NEG_INTEGER|. Callers must not pass a string of type +// |V_ASN1_OCTET_STRING| to |X509_set_serialNumber|. Doing so may break +// invariants on the |X509| object and break the |X509_get0_serialNumber| +// invariant. +// +// TODO(https://crbug.com/boringssl/445): This is very unfriendly. Getting the +// type field wrong should not cause memory errors, but it may do strange +// things. We should add runtime checks to anything that consumes |ASN1_STRING|s +// from the caller. +struct asn1_string_st { + int length; + int type; + unsigned char *data; + long flags; +}; + +// ASN1_STRING_FLAG_BITS_LEFT indicates, in a BIT STRING |ASN1_STRING|, that +// flags & 0x7 contains the number of padding bits added to the BIT STRING +// value. When not set, all trailing zero bits in the last byte are implicitly +// treated as padding. This behavior is deprecated and should not be used. +#define ASN1_STRING_FLAG_BITS_LEFT 0x08 + +// ASN1_STRING_type_new returns a newly-allocated empty |ASN1_STRING| object of +// type |type|, or NULL on error. +OPENSSL_EXPORT ASN1_STRING *ASN1_STRING_type_new(int type); + +// ASN1_STRING_new returns a newly-allocated empty |ASN1_STRING| object with an +// arbitrary type. Prefer one of the type-specific constructors, such as +// |ASN1_OCTET_STRING_new|, or |ASN1_STRING_type_new|. +OPENSSL_EXPORT ASN1_STRING *ASN1_STRING_new(void); + +// ASN1_STRING_free releases memory associated with |str|. +OPENSSL_EXPORT void ASN1_STRING_free(ASN1_STRING *str); + +// ASN1_STRING_copy sets |dst| to a copy of |str|. It returns one on success and +// zero on error. +OPENSSL_EXPORT int ASN1_STRING_copy(ASN1_STRING *dst, const ASN1_STRING *str); + +// ASN1_STRING_dup returns a newly-allocated copy of |str|, or NULL on error. +OPENSSL_EXPORT ASN1_STRING *ASN1_STRING_dup(const ASN1_STRING *str); + +// ASN1_STRING_type returns the type of |str|. This value will be one of the +// |V_ASN1_*| constants. +OPENSSL_EXPORT int ASN1_STRING_type(const ASN1_STRING *str); + +// ASN1_STRING_get0_data returns a pointer to |str|'s contents. Callers should +// use |ASN1_STRING_length| to determine the length of the string. The string +// may have embedded NUL bytes and may not be NUL-terminated. +OPENSSL_EXPORT const unsigned char *ASN1_STRING_get0_data( + const ASN1_STRING *str); + +// ASN1_STRING_data returns a mutable pointer to |str|'s contents. Callers +// should use |ASN1_STRING_length| to determine the length of the string. The +// string may have embedded NUL bytes and may not be NUL-terminated. +// +// Prefer |ASN1_STRING_get0_data|. +OPENSSL_EXPORT unsigned char *ASN1_STRING_data(ASN1_STRING *str); + +// ASN1_STRING_length returns the length of |str|, in bytes. +OPENSSL_EXPORT int ASN1_STRING_length(const ASN1_STRING *str); + +// ASN1_STRING_cmp compares |a| and |b|'s type and contents. It returns an +// integer equal to, less than, or greater than zero if |a| is equal to, less +// than, or greater than |b|, respectively. This function compares by length, +// then data, then type. Note the data compared is the |ASN1_STRING| internal +// representation and the type order is arbitrary. While this comparison is +// suitable for sorting, callers should not rely on the exact order when |a| +// and |b| are different types. +// +// Note that, if |a| and |b| are INTEGERs, this comparison does not order the +// values numerically. For a numerical comparison, use |ASN1_INTEGER_cmp|. +OPENSSL_EXPORT int ASN1_STRING_cmp(const ASN1_STRING *a, const ASN1_STRING *b); + +// ASN1_STRING_set sets the contents of |str| to a copy of |len| bytes from +// |data|. It returns one on success and zero on error. +OPENSSL_EXPORT int ASN1_STRING_set(ASN1_STRING *str, const void *data, int len); + +// ASN1_STRING_set0 sets the contents of |str| to |len| bytes from |data|. It +// takes ownership of |data|, which must have been allocated with +// |OPENSSL_malloc|. +OPENSSL_EXPORT void ASN1_STRING_set0(ASN1_STRING *str, void *data, int len); + +// The following functions call |ASN1_STRING_type_new| with the corresponding +// |V_ASN1_*| constant. +OPENSSL_EXPORT ASN1_BMPSTRING *ASN1_BMPSTRING_new(void); +OPENSSL_EXPORT ASN1_GENERALSTRING *ASN1_GENERALSTRING_new(void); +OPENSSL_EXPORT ASN1_IA5STRING *ASN1_IA5STRING_new(void); +OPENSSL_EXPORT ASN1_OCTET_STRING *ASN1_OCTET_STRING_new(void); +OPENSSL_EXPORT ASN1_PRINTABLESTRING *ASN1_PRINTABLESTRING_new(void); +OPENSSL_EXPORT ASN1_T61STRING *ASN1_T61STRING_new(void); +OPENSSL_EXPORT ASN1_UNIVERSALSTRING *ASN1_UNIVERSALSTRING_new(void); +OPENSSL_EXPORT ASN1_UTF8STRING *ASN1_UTF8STRING_new(void); +OPENSSL_EXPORT ASN1_VISIBLESTRING *ASN1_VISIBLESTRING_new(void); + +// The following functions call |ASN1_STRING_free|. +OPENSSL_EXPORT void ASN1_BMPSTRING_free(ASN1_BMPSTRING *str); +OPENSSL_EXPORT void ASN1_GENERALSTRING_free(ASN1_GENERALSTRING *str); +OPENSSL_EXPORT void ASN1_IA5STRING_free(ASN1_IA5STRING *str); +OPENSSL_EXPORT void ASN1_OCTET_STRING_free(ASN1_OCTET_STRING *str); +OPENSSL_EXPORT void ASN1_PRINTABLESTRING_free(ASN1_PRINTABLESTRING *str); +OPENSSL_EXPORT void ASN1_T61STRING_free(ASN1_T61STRING *str); +OPENSSL_EXPORT void ASN1_UNIVERSALSTRING_free(ASN1_UNIVERSALSTRING *str); +OPENSSL_EXPORT void ASN1_UTF8STRING_free(ASN1_UTF8STRING *str); +OPENSSL_EXPORT void ASN1_VISIBLESTRING_free(ASN1_VISIBLESTRING *str); + +// The following functions parse up to |len| bytes from |*inp| as a +// DER-encoded ASN.1 value of the corresponding type, as described in +// |d2i_SAMPLE_with_reuse|. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +OPENSSL_EXPORT ASN1_BMPSTRING *d2i_ASN1_BMPSTRING(ASN1_BMPSTRING **out, + const uint8_t **inp, + long len); +OPENSSL_EXPORT ASN1_GENERALSTRING *d2i_ASN1_GENERALSTRING( + ASN1_GENERALSTRING **out, const uint8_t **inp, long len); +OPENSSL_EXPORT ASN1_IA5STRING *d2i_ASN1_IA5STRING(ASN1_IA5STRING **out, + const uint8_t **inp, + long len); +OPENSSL_EXPORT ASN1_OCTET_STRING *d2i_ASN1_OCTET_STRING(ASN1_OCTET_STRING **out, + const uint8_t **inp, + long len); +OPENSSL_EXPORT ASN1_PRINTABLESTRING *d2i_ASN1_PRINTABLESTRING( + ASN1_PRINTABLESTRING **out, const uint8_t **inp, long len); +OPENSSL_EXPORT ASN1_T61STRING *d2i_ASN1_T61STRING(ASN1_T61STRING **out, + const uint8_t **inp, + long len); +OPENSSL_EXPORT ASN1_UNIVERSALSTRING *d2i_ASN1_UNIVERSALSTRING( + ASN1_UNIVERSALSTRING **out, const uint8_t **inp, long len); +OPENSSL_EXPORT ASN1_UTF8STRING *d2i_ASN1_UTF8STRING(ASN1_UTF8STRING **out, + const uint8_t **inp, + long len); +OPENSSL_EXPORT ASN1_VISIBLESTRING *d2i_ASN1_VISIBLESTRING( + ASN1_VISIBLESTRING **out, const uint8_t **inp, long len); + +// The following functions marshal |in| as a DER-encoded ASN.1 value of the +// corresponding type, as described in |i2d_SAMPLE|. +OPENSSL_EXPORT int i2d_ASN1_BMPSTRING(const ASN1_BMPSTRING *in, uint8_t **outp); +OPENSSL_EXPORT int i2d_ASN1_GENERALSTRING(const ASN1_GENERALSTRING *in, + uint8_t **outp); +OPENSSL_EXPORT int i2d_ASN1_IA5STRING(const ASN1_IA5STRING *in, uint8_t **outp); +OPENSSL_EXPORT int i2d_ASN1_OCTET_STRING(const ASN1_OCTET_STRING *in, + uint8_t **outp); +OPENSSL_EXPORT int i2d_ASN1_PRINTABLESTRING(const ASN1_PRINTABLESTRING *in, + uint8_t **outp); +OPENSSL_EXPORT int i2d_ASN1_T61STRING(const ASN1_T61STRING *in, uint8_t **outp); +OPENSSL_EXPORT int i2d_ASN1_UNIVERSALSTRING(const ASN1_UNIVERSALSTRING *in, + uint8_t **outp); +OPENSSL_EXPORT int i2d_ASN1_UTF8STRING(const ASN1_UTF8STRING *in, + uint8_t **outp); +OPENSSL_EXPORT int i2d_ASN1_VISIBLESTRING(const ASN1_VISIBLESTRING *in, + uint8_t **outp); + +// The following |ASN1_ITEM|s have the ASN.1 type referred to in their name and +// C type |ASN1_STRING*|. The C type may also be written as the corresponding +// typedef. +DECLARE_ASN1_ITEM(ASN1_BMPSTRING) +DECLARE_ASN1_ITEM(ASN1_GENERALSTRING) +DECLARE_ASN1_ITEM(ASN1_IA5STRING) +DECLARE_ASN1_ITEM(ASN1_OCTET_STRING) +DECLARE_ASN1_ITEM(ASN1_PRINTABLESTRING) +DECLARE_ASN1_ITEM(ASN1_T61STRING) +DECLARE_ASN1_ITEM(ASN1_UNIVERSALSTRING) +DECLARE_ASN1_ITEM(ASN1_UTF8STRING) +DECLARE_ASN1_ITEM(ASN1_VISIBLESTRING) + +// ASN1_OCTET_STRING_dup calls |ASN1_STRING_dup|. +OPENSSL_EXPORT ASN1_OCTET_STRING *ASN1_OCTET_STRING_dup( + const ASN1_OCTET_STRING *a); + +// ASN1_OCTET_STRING_cmp calls |ASN1_STRING_cmp|. +OPENSSL_EXPORT int ASN1_OCTET_STRING_cmp(const ASN1_OCTET_STRING *a, + const ASN1_OCTET_STRING *b); + +// ASN1_OCTET_STRING_set calls |ASN1_STRING_set|. +OPENSSL_EXPORT int ASN1_OCTET_STRING_set(ASN1_OCTET_STRING *str, + const unsigned char *data, int len); + +// ASN1_STRING_to_UTF8 converts |in| to UTF-8. On success, sets |*out| to a +// newly-allocated buffer containing the resulting string and returns the length +// of the string. The caller must call |OPENSSL_free| to release |*out| when +// done. On error, it returns a negative number. +OPENSSL_EXPORT int ASN1_STRING_to_UTF8(unsigned char **out, + const ASN1_STRING *in); + +// The following formats define encodings for use with functions like +// |ASN1_mbstring_copy|. Note |MBSTRING_ASC| refers to Latin-1, not ASCII. +#define MBSTRING_FLAG 0x1000 +#define MBSTRING_UTF8 (MBSTRING_FLAG) +#define MBSTRING_ASC (MBSTRING_FLAG | 1) +#define MBSTRING_BMP (MBSTRING_FLAG | 2) +#define MBSTRING_UNIV (MBSTRING_FLAG | 4) + +// DIRSTRING_TYPE contains the valid string types in an X.509 DirectoryString. +#define DIRSTRING_TYPE \ + (B_ASN1_PRINTABLESTRING | B_ASN1_T61STRING | B_ASN1_BMPSTRING | \ + B_ASN1_UTF8STRING) + +// PKCS9STRING_TYPE contains the valid string types in a PKCS9String. +#define PKCS9STRING_TYPE (DIRSTRING_TYPE | B_ASN1_IA5STRING) + +// ASN1_mbstring_copy converts |len| bytes from |in| to an ASN.1 string. If +// |len| is -1, |in| must be NUL-terminated and the length is determined by +// |strlen|. |in| is decoded according to |inform|, which must be one of +// |MBSTRING_*|. |mask| determines the set of valid output types and is a +// bitmask containing a subset of |B_ASN1_PRINTABLESTRING|, |B_ASN1_IA5STRING|, +// |B_ASN1_T61STRING|, |B_ASN1_BMPSTRING|, |B_ASN1_UNIVERSALSTRING|, and +// |B_ASN1_UTF8STRING|, in that preference order. This function chooses the +// first output type in |mask| which can represent |in|. It interprets T61String +// as Latin-1, rather than T.61. +// +// If |mask| is zero, |DIRSTRING_TYPE| is used by default. +// +// On success, this function returns the |V_ASN1_*| constant corresponding to +// the selected output type and, if |out| and |*out| are both non-NULL, updates +// the object at |*out| with the result. If |out| is non-NULL and |*out| is +// NULL, it instead sets |*out| to a newly-allocated |ASN1_STRING| containing +// the result. If |out| is NULL, it returns the selected output type without +// constructing an |ASN1_STRING|. On error, this function returns -1. +OPENSSL_EXPORT int ASN1_mbstring_copy(ASN1_STRING **out, const uint8_t *in, + int len, int inform, unsigned long mask); + +// ASN1_mbstring_ncopy behaves like |ASN1_mbstring_copy| but returns an error if +// the input is less than |minsize| or greater than |maxsize| codepoints long. A +// |maxsize| value of zero is ignored. Note the sizes are measured in +// codepoints, not output bytes. +OPENSSL_EXPORT int ASN1_mbstring_ncopy(ASN1_STRING **out, const uint8_t *in, + int len, int inform, unsigned long mask, + long minsize, long maxsize); + +// ASN1_STRING_set_by_NID behaves like |ASN1_mbstring_ncopy|, but determines +// |mask|, |minsize|, and |maxsize| based on |nid|. When |nid| is a recognized +// X.509 attribute type, it will pick a suitable ASN.1 string type and bounds. +// For most attribute types, it preferentially chooses UTF8String. If |nid| is +// unrecognized, it uses UTF8String by default. +// +// Slightly unlike |ASN1_mbstring_ncopy|, this function interprets |out| and +// returns its result as follows: If |out| is NULL, it returns a newly-allocated +// |ASN1_STRING| containing the result. If |out| is non-NULL and +// |*out| is NULL, it additionally sets |*out| to the result. If both |out| and +// |*out| are non-NULL, it instead updates the object at |*out| and returns +// |*out|. In all cases, it returns NULL on error. +// +// This function supports the following NIDs: |NID_countryName|, +// |NID_dnQualifier|, |NID_domainComponent|, |NID_friendlyName|, +// |NID_givenName|, |NID_initials|, |NID_localityName|, |NID_ms_csp_name|, +// |NID_name|, |NID_organizationalUnitName|, |NID_organizationName|, +// |NID_pkcs9_challengePassword|, |NID_pkcs9_emailAddress|, +// |NID_pkcs9_unstructuredAddress|, |NID_pkcs9_unstructuredName|, +// |NID_serialNumber|, |NID_stateOrProvinceName|, and |NID_surname|. Additional +// NIDs may be registered with |ASN1_STRING_set_by_NID|, but it is recommended +// to call |ASN1_mbstring_ncopy| directly instead. +OPENSSL_EXPORT ASN1_STRING *ASN1_STRING_set_by_NID(ASN1_STRING **out, + const unsigned char *in, + int len, int inform, + int nid); + +// STABLE_NO_MASK causes |ASN1_STRING_TABLE_add| to allow types other than +// UTF8String. +#define STABLE_NO_MASK 0x02 + +// ASN1_STRING_TABLE_add registers the corresponding parameters with |nid|, for +// use with |ASN1_STRING_set_by_NID|. It returns one on success and zero on +// error. It is an error to call this function if |nid| is a built-in NID, or +// was already registered by a previous call. +// +// WARNING: This function affects global state in the library. If two libraries +// in the same address space register information for the same OID, one call +// will fail. Prefer directly passing the desired parametrs to +// |ASN1_mbstring_copy| or |ASN1_mbstring_ncopy| instead. +OPENSSL_EXPORT int ASN1_STRING_TABLE_add(int nid, long minsize, long maxsize, + unsigned long mask, + unsigned long flags); + + +// Multi-strings. +// +// A multi-string, or "MSTRING", is an |ASN1_STRING| that represents a CHOICE of +// several string or string-like types, such as X.509's DirectoryString. The +// |ASN1_STRING|'s type field determines which type is used. +// +// Multi-string types are associated with a bitmask, using the |B_ASN1_*| +// constants, which defines which types are valid. + +// B_ASN1_DIRECTORYSTRING is a bitmask of types allowed in an X.509 +// DirectoryString (RFC 5280). +#define B_ASN1_DIRECTORYSTRING \ + (B_ASN1_PRINTABLESTRING | B_ASN1_TELETEXSTRING | B_ASN1_BMPSTRING | \ + B_ASN1_UNIVERSALSTRING | B_ASN1_UTF8STRING) + +// DIRECTORYSTRING_new returns a newly-allocated |ASN1_STRING| with type -1, or +// NULL on error. The resulting |ASN1_STRING| is not a valid X.509 +// DirectoryString until initialized with a value. +OPENSSL_EXPORT ASN1_STRING *DIRECTORYSTRING_new(void); + +// DIRECTORYSTRING_free calls |ASN1_STRING_free|. +OPENSSL_EXPORT void DIRECTORYSTRING_free(ASN1_STRING *str); + +// d2i_DIRECTORYSTRING parses up to |len| bytes from |*inp| as a DER-encoded +// X.509 DirectoryString (RFC 5280), as described in |d2i_SAMPLE_with_reuse|. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +// +// TODO(https://crbug.com/boringssl/449): DirectoryString's non-empty string +// requirement is not currently enforced. +OPENSSL_EXPORT ASN1_STRING *d2i_DIRECTORYSTRING(ASN1_STRING **out, + const uint8_t **inp, long len); + +// i2d_DIRECTORYSTRING marshals |in| as a DER-encoded X.509 DirectoryString (RFC +// 5280), as described in |i2d_SAMPLE|. +OPENSSL_EXPORT int i2d_DIRECTORYSTRING(const ASN1_STRING *in, uint8_t **outp); + +// DIRECTORYSTRING is an |ASN1_ITEM| whose ASN.1 type is X.509 DirectoryString +// (RFC 5280) and C type is |ASN1_STRING*|. +DECLARE_ASN1_ITEM(DIRECTORYSTRING) + +// B_ASN1_DISPLAYTEXT is a bitmask of types allowed in an X.509 DisplayText (RFC +// 5280). +#define B_ASN1_DISPLAYTEXT \ + (B_ASN1_IA5STRING | B_ASN1_VISIBLESTRING | B_ASN1_BMPSTRING | \ + B_ASN1_UTF8STRING) + +// DISPLAYTEXT_new returns a newly-allocated |ASN1_STRING| with type -1, or NULL +// on error. The resulting |ASN1_STRING| is not a valid X.509 DisplayText until +// initialized with a value. +OPENSSL_EXPORT ASN1_STRING *DISPLAYTEXT_new(void); + +// DISPLAYTEXT_free calls |ASN1_STRING_free|. +OPENSSL_EXPORT void DISPLAYTEXT_free(ASN1_STRING *str); + +// d2i_DISPLAYTEXT parses up to |len| bytes from |*inp| as a DER-encoded X.509 +// DisplayText (RFC 5280), as described in |d2i_SAMPLE_with_reuse|. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +// +// TODO(https://crbug.com/boringssl/449): DisplayText's size limits are not +// currently enforced. +OPENSSL_EXPORT ASN1_STRING *d2i_DISPLAYTEXT(ASN1_STRING **out, + const uint8_t **inp, long len); + +// i2d_DISPLAYTEXT marshals |in| as a DER-encoded X.509 DisplayText (RFC 5280), +// as described in |i2d_SAMPLE|. +OPENSSL_EXPORT int i2d_DISPLAYTEXT(const ASN1_STRING *in, uint8_t **outp); + +// DISPLAYTEXT is an |ASN1_ITEM| whose ASN.1 type is X.509 DisplayText (RFC +// 5280) and C type is |ASN1_STRING*|. +DECLARE_ASN1_ITEM(DISPLAYTEXT) + + +// Bit strings. +// +// An ASN.1 BIT STRING type represents a string of bits. The string may not +// necessarily be a whole number of bytes. BIT STRINGs occur in ASN.1 structures +// in several forms: +// +// Some BIT STRINGs represent a bitmask of named bits, such as the X.509 key +// usage extension in RFC 5280, section 4.2.1.3. For such bit strings, DER +// imposes an additional restriction that trailing zero bits are removed. Some +// functions like |ASN1_BIT_STRING_set_bit| help in maintaining this. +// +// Other BIT STRINGs are arbitrary strings of bits used as identifiers and do +// not have this constraint, such as the X.509 issuerUniqueID field. +// +// Finally, some structures use BIT STRINGs as a container for byte strings. For +// example, the signatureValue field in X.509 and the subjectPublicKey field in +// SubjectPublicKeyInfo are defined as BIT STRINGs with a value specific to the +// AlgorithmIdentifier. While some unknown algorithm could choose to store +// arbitrary bit strings, all supported algorithms use a byte string, with bit +// order matching the DER encoding. Callers interpreting a BIT STRING as a byte +// string should use |ASN1_BIT_STRING_num_bytes| instead of |ASN1_STRING_length| +// and reject bit strings that are not a whole number of bytes. +// +// This library represents BIT STRINGs as |ASN1_STRING|s with type +// |V_ASN1_BIT_STRING|. The data contains the encoded form of the BIT STRING, +// including any padding bits added to round to a whole number of bytes, but +// excluding the leading byte containing the number of padding bits. If +// |ASN1_STRING_FLAG_BITS_LEFT| is set, the bottom three bits contains the +// number of padding bits. For example, DER encodes the BIT STRING {1, 0} as +// {0x06, 0x80 = 0b10_000000}. The |ASN1_STRING| representation has data of +// {0x80} and flags of ASN1_STRING_FLAG_BITS_LEFT | 6. If +// |ASN1_STRING_FLAG_BITS_LEFT| is unset, trailing zero bits are implicitly +// removed. Callers should not rely this representation when constructing bit +// strings. The padding bits in the |ASN1_STRING| data must be zero. + +// ASN1_BIT_STRING_new calls |ASN1_STRING_type_new| with |V_ASN1_BIT_STRING|. +OPENSSL_EXPORT ASN1_BIT_STRING *ASN1_BIT_STRING_new(void); + +// ASN1_BIT_STRING_free calls |ASN1_STRING_free|. +OPENSSL_EXPORT void ASN1_BIT_STRING_free(ASN1_BIT_STRING *str); + +// d2i_ASN1_BIT_STRING parses up to |len| bytes from |*inp| as a DER-encoded +// ASN.1 BIT STRING, as described in |d2i_SAMPLE_with_reuse|. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +OPENSSL_EXPORT ASN1_BIT_STRING *d2i_ASN1_BIT_STRING(ASN1_BIT_STRING **out, + const uint8_t **inp, + long len); + +// i2d_ASN1_BIT_STRING marshals |in| as a DER-encoded ASN.1 BIT STRING, as +// described in |i2d_SAMPLE|. +OPENSSL_EXPORT int i2d_ASN1_BIT_STRING(const ASN1_BIT_STRING *in, + uint8_t **outp); + +// c2i_ASN1_BIT_STRING decodes |len| bytes from |*inp| as the contents of a +// DER-encoded BIT STRING, excluding the tag and length. It behaves like +// |d2i_SAMPLE_with_reuse| except, on success, it always consumes all |len| +// bytes. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +OPENSSL_EXPORT ASN1_BIT_STRING *c2i_ASN1_BIT_STRING(ASN1_BIT_STRING **out, + const uint8_t **inp, + long len); + +// i2c_ASN1_BIT_STRING encodes |in| as the contents of a DER-encoded BIT STRING, +// excluding the tag and length. If |outp| is non-NULL, it writes the result to +// |*outp|, advances |*outp| just past the output, and returns the number of +// bytes written. |*outp| must have space available for the result. If |outp| is +// NULL, it returns the number of bytes without writing anything. On error, it +// returns a value <= 0. +// +// Note this function differs slightly from |i2d_SAMPLE|. If |outp| is non-NULL +// and |*outp| is NULL, it does not allocate a new buffer. +// +// TODO(davidben): This function currently returns zero on error instead of -1, +// but it is also mostly infallible. I've currently documented <= 0 to suggest +// callers work with both. +OPENSSL_EXPORT int i2c_ASN1_BIT_STRING(const ASN1_BIT_STRING *in, + uint8_t **outp); + +// ASN1_BIT_STRING is an |ASN1_ITEM| with ASN.1 type BIT STRING and C type +// |ASN1_BIT_STRING*|. +DECLARE_ASN1_ITEM(ASN1_BIT_STRING) + +// ASN1_BIT_STRING_num_bytes computes the length of |str| in bytes. If |str|'s +// bit length is a multiple of 8, it sets |*out| to the byte length and returns +// one. Otherwise, it returns zero. +// +// This function may be used with |ASN1_STRING_get0_data| to interpret |str| as +// a byte string. +OPENSSL_EXPORT int ASN1_BIT_STRING_num_bytes(const ASN1_BIT_STRING *str, + size_t *out); + +// ASN1_BIT_STRING_set calls |ASN1_STRING_set|. It leaves flags unchanged, so +// the caller must set the number of unused bits. +// +// TODO(davidben): Maybe it should? Wrapping a byte string in a bit string is a +// common use case. +OPENSSL_EXPORT int ASN1_BIT_STRING_set(ASN1_BIT_STRING *str, + const unsigned char *d, int length); + +// ASN1_BIT_STRING_set_bit sets bit |n| of |str| to one if |value| is non-zero +// and zero if |value| is zero, resizing |str| as needed. It then truncates +// trailing zeros in |str| to align with the DER represention for a bit string +// with named bits. It returns one on success and zero on error. |n| is indexed +// beginning from zero. +OPENSSL_EXPORT int ASN1_BIT_STRING_set_bit(ASN1_BIT_STRING *str, int n, + int value); + +// ASN1_BIT_STRING_get_bit returns one if bit |n| of |a| is in bounds and set, +// and zero otherwise. |n| is indexed beginning from zero. +OPENSSL_EXPORT int ASN1_BIT_STRING_get_bit(const ASN1_BIT_STRING *str, int n); + +// ASN1_BIT_STRING_check returns one if |str| only contains bits that are set in +// the |flags_len| bytes pointed by |flags|. Otherwise it returns zero. Bits in +// |flags| are arranged according to the DER representation, so bit 0 +// corresponds to the MSB of |flags[0]|. +OPENSSL_EXPORT int ASN1_BIT_STRING_check(const ASN1_BIT_STRING *str, + const unsigned char *flags, + int flags_len); + + +// Integers and enumerated values. +// +// INTEGER and ENUMERATED values are represented as |ASN1_STRING|s where the +// data contains the big-endian encoding of the absolute value of the integer. +// The sign bit is encoded in the type: non-negative values have a type of +// |V_ASN1_INTEGER| or |V_ASN1_ENUMERATED|, while negative values have a type of +// |V_ASN1_NEG_INTEGER| or |V_ASN1_NEG_ENUMERATED|. Note this differs from DER's +// two's complement representation. + +DEFINE_STACK_OF(ASN1_INTEGER) + +// ASN1_INTEGER_new calls |ASN1_STRING_type_new| with |V_ASN1_INTEGER|. The +// resulting object has value zero. +OPENSSL_EXPORT ASN1_INTEGER *ASN1_INTEGER_new(void); + +// ASN1_INTEGER_free calls |ASN1_STRING_free|. +OPENSSL_EXPORT void ASN1_INTEGER_free(ASN1_INTEGER *str); + +// ASN1_INTEGER_dup calls |ASN1_STRING_dup|. +OPENSSL_EXPORT ASN1_INTEGER *ASN1_INTEGER_dup(const ASN1_INTEGER *x); + +// d2i_ASN1_INTEGER parses up to |len| bytes from |*inp| as a DER-encoded +// ASN.1 INTEGER, as described in |d2i_SAMPLE_with_reuse|. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +OPENSSL_EXPORT ASN1_INTEGER *d2i_ASN1_INTEGER(ASN1_INTEGER **out, + const uint8_t **inp, long len); + +// i2d_ASN1_INTEGER marshals |in| as a DER-encoded ASN.1 INTEGER, as +// described in |i2d_SAMPLE|. +OPENSSL_EXPORT int i2d_ASN1_INTEGER(const ASN1_INTEGER *in, uint8_t **outp); + +// c2i_ASN1_INTEGER decodes |len| bytes from |*inp| as the contents of a +// DER-encoded INTEGER, excluding the tag and length. It behaves like +// |d2i_SAMPLE_with_reuse| except, on success, it always consumes all |len| +// bytes. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// some invalid inputs, but this will be removed in the future. +OPENSSL_EXPORT ASN1_INTEGER *c2i_ASN1_INTEGER(ASN1_INTEGER **in, + const uint8_t **outp, long len); + +// i2c_ASN1_INTEGER encodes |in| as the contents of a DER-encoded INTEGER, +// excluding the tag and length. If |outp| is non-NULL, it writes the result to +// |*outp|, advances |*outp| just past the output, and returns the number of +// bytes written. |*outp| must have space available for the result. If |outp| is +// NULL, it returns the number of bytes without writing anything. On error, it +// returns a value <= 0. +// +// Note this function differs slightly from |i2d_SAMPLE|. If |outp| is non-NULL +// and |*outp| is NULL, it does not allocate a new buffer. +// +// TODO(davidben): This function currently returns zero on error instead of -1, +// but it is also mostly infallible. I've currently documented <= 0 to suggest +// callers work with both. +OPENSSL_EXPORT int i2c_ASN1_INTEGER(const ASN1_INTEGER *in, uint8_t **outp); + +// ASN1_INTEGER is an |ASN1_ITEM| with ASN.1 type INTEGER and C type +// |ASN1_INTEGER*|. +DECLARE_ASN1_ITEM(ASN1_INTEGER) + +// ASN1_INTEGER_set sets |a| to an INTEGER with value |v|. It returns one on +// success and zero on error. +OPENSSL_EXPORT int ASN1_INTEGER_set(ASN1_INTEGER *a, long v); + +// ASN1_INTEGER_set_uint64 sets |a| to an INTEGER with value |v|. It returns one +// on success and zero on error. +OPENSSL_EXPORT int ASN1_INTEGER_set_uint64(ASN1_INTEGER *out, uint64_t v); + +// ASN1_INTEGER_get returns the value of |a| as a |long|, or -1 if |a| is out of +// range or the wrong type. +OPENSSL_EXPORT long ASN1_INTEGER_get(const ASN1_INTEGER *a); + +// BN_to_ASN1_INTEGER sets |ai| to an INTEGER with value |bn| and returns |ai| +// on success or NULL or error. If |ai| is NULL, it returns a newly-allocated +// |ASN1_INTEGER| on success instead, which the caller must release with +// |ASN1_INTEGER_free|. +OPENSSL_EXPORT ASN1_INTEGER *BN_to_ASN1_INTEGER(const BIGNUM *bn, + ASN1_INTEGER *ai); + +// ASN1_INTEGER_to_BN sets |bn| to the value of |ai| and returns |bn| on success +// or NULL or error. If |bn| is NULL, it returns a newly-allocated |BIGNUM| on +// success instead, which the caller must release with |BN_free|. +OPENSSL_EXPORT BIGNUM *ASN1_INTEGER_to_BN(const ASN1_INTEGER *ai, BIGNUM *bn); + +// ASN1_INTEGER_cmp compares the values of |x| and |y|. It returns an integer +// equal to, less than, or greater than zero if |x| is equal to, less than, or +// greater than |y|, respectively. +OPENSSL_EXPORT int ASN1_INTEGER_cmp(const ASN1_INTEGER *x, + const ASN1_INTEGER *y); + +// ASN1_ENUMERATED_new calls |ASN1_STRING_type_new| with |V_ASN1_ENUMERATED|. +// The resulting object has value zero. +OPENSSL_EXPORT ASN1_ENUMERATED *ASN1_ENUMERATED_new(void); + +// ASN1_ENUMERATED_free calls |ASN1_STRING_free|. +OPENSSL_EXPORT void ASN1_ENUMERATED_free(ASN1_ENUMERATED *str); + +// d2i_ASN1_ENUMERATED parses up to |len| bytes from |*inp| as a DER-encoded +// ASN.1 ENUMERATED, as described in |d2i_SAMPLE_with_reuse|. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +OPENSSL_EXPORT ASN1_ENUMERATED *d2i_ASN1_ENUMERATED(ASN1_ENUMERATED **out, + const uint8_t **inp, + long len); + +// i2d_ASN1_ENUMERATED marshals |in| as a DER-encoded ASN.1 ENUMERATED, as +// described in |i2d_SAMPLE|. +OPENSSL_EXPORT int i2d_ASN1_ENUMERATED(const ASN1_ENUMERATED *in, + uint8_t **outp); + +// ASN1_ENUMERATED is an |ASN1_ITEM| with ASN.1 type ENUMERATED and C type +// |ASN1_ENUMERATED*|. +DECLARE_ASN1_ITEM(ASN1_ENUMERATED) + +// ASN1_ENUMERATED_set sets |a| to an ENUMERATED with value |v|. It returns one +// on success and zero on error. +OPENSSL_EXPORT int ASN1_ENUMERATED_set(ASN1_ENUMERATED *a, long v); + +// ASN1_ENUMERATED_get returns the value of |a| as a |long|, or -1 if |a| is out +// of range or the wrong type. +OPENSSL_EXPORT long ASN1_ENUMERATED_get(const ASN1_ENUMERATED *a); + +// BN_to_ASN1_ENUMERATED sets |ai| to an ENUMERATED with value |bn| and returns +// |ai| on success or NULL or error. If |ai| is NULL, it returns a +// newly-allocated |ASN1_INTEGER| on success instead, which the caller must +// release with |ASN1_INTEGER_free|. +OPENSSL_EXPORT ASN1_ENUMERATED *BN_to_ASN1_ENUMERATED(const BIGNUM *bn, + ASN1_ENUMERATED *ai); + +// ASN1_ENUMERATED_to_BN sets |bn| to the value of |ai| and returns |bn| on +// success or NULL or error. If |bn| is NULL, it returns a newly-allocated +// |BIGNUM| on success instead, which the caller must release with |BN_free|. +OPENSSL_EXPORT BIGNUM *ASN1_ENUMERATED_to_BN(const ASN1_ENUMERATED *ai, + BIGNUM *bn); + + +// Time. +// +// GeneralizedTime and UTCTime values are represented as |ASN1_STRING|s. The +// type field is |V_ASN1_GENERALIZEDTIME| or |V_ASN1_UTCTIME|, respectively. The +// data field contains the DER encoding of the value. For example, the UNIX +// epoch would be "19700101000000Z" for a GeneralizedTime and "700101000000Z" +// for a UTCTime. +// +// ASN.1 does not define how to interpret UTCTime's two-digit year. RFC 5280 +// defines it as a range from 1950 to 2049 for X.509. The library uses the +// RFC 5280 interpretation. It does not currently enforce the restrictions from +// BER, and the additional restrictions from RFC 5280, but future versions may. +// Callers should not rely on fractional seconds and non-UTC time zones. +// +// The |ASN1_TIME| typedef is a multi-string representing the X.509 Time type, +// which is a CHOICE of GeneralizedTime and UTCTime, using UTCTime when the +// value is in range. + +// ASN1_UTCTIME_new calls |ASN1_STRING_type_new| with |V_ASN1_UTCTIME|. The +// resulting object contains empty contents and must be initialized to be a +// valid UTCTime. +OPENSSL_EXPORT ASN1_UTCTIME *ASN1_UTCTIME_new(void); + +// ASN1_UTCTIME_free calls |ASN1_STRING_free|. +OPENSSL_EXPORT void ASN1_UTCTIME_free(ASN1_UTCTIME *str); + +// d2i_ASN1_UTCTIME parses up to |len| bytes from |*inp| as a DER-encoded +// ASN.1 UTCTime, as described in |d2i_SAMPLE_with_reuse|. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +OPENSSL_EXPORT ASN1_UTCTIME *d2i_ASN1_UTCTIME(ASN1_UTCTIME **out, + const uint8_t **inp, long len); + +// i2d_ASN1_UTCTIME marshals |in| as a DER-encoded ASN.1 UTCTime, as +// described in |i2d_SAMPLE|. +OPENSSL_EXPORT int i2d_ASN1_UTCTIME(const ASN1_UTCTIME *in, uint8_t **outp); + +// ASN1_UTCTIME is an |ASN1_ITEM| with ASN.1 type UTCTime and C type +// |ASN1_UTCTIME*|. +DECLARE_ASN1_ITEM(ASN1_UTCTIME) + +// ASN1_UTCTIME_check returns one if |a| is a valid UTCTime and zero otherwise. +OPENSSL_EXPORT int ASN1_UTCTIME_check(const ASN1_UTCTIME *a); + +// ASN1_UTCTIME_set represents |t| as a UTCTime and writes the result to |s|. It +// returns |s| on success and NULL on error. If |s| is NULL, it returns a +// newly-allocated |ASN1_UTCTIME| instead. +// +// Note this function may fail if the time is out of range for UTCTime. +OPENSSL_EXPORT ASN1_UTCTIME *ASN1_UTCTIME_set(ASN1_UTCTIME *s, time_t t); + +// ASN1_UTCTIME_adj adds |offset_day| days and |offset_sec| seconds to |t| and +// writes the result to |s| as a UTCTime. It returns |s| on success and NULL on +// error. If |s| is NULL, it returns a newly-allocated |ASN1_UTCTIME| instead. +// +// Note this function may fail if the time overflows or is out of range for +// UTCTime. +OPENSSL_EXPORT ASN1_UTCTIME *ASN1_UTCTIME_adj(ASN1_UTCTIME *s, time_t t, + int offset_day, long offset_sec); + +// ASN1_UTCTIME_set_string sets |s| to a UTCTime whose contents are a copy of +// |str|. It returns one on success and zero on error or if |str| is not a valid +// UTCTime. +// +// If |s| is NULL, this function validates |str| without copying it. +OPENSSL_EXPORT int ASN1_UTCTIME_set_string(ASN1_UTCTIME *s, const char *str); + +// ASN1_UTCTIME_cmp_time_t compares |s| to |t|. It returns -1 if |s| < |t|, 0 if +// they are equal, 1 if |s| > |t|, and -2 on error. +OPENSSL_EXPORT int ASN1_UTCTIME_cmp_time_t(const ASN1_UTCTIME *s, time_t t); + +// ASN1_GENERALIZEDTIME_new calls |ASN1_STRING_type_new| with +// |V_ASN1_GENERALIZEDTIME|. The resulting object contains empty contents and +// must be initialized to be a valid GeneralizedTime. +OPENSSL_EXPORT ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_new(void); + +// ASN1_GENERALIZEDTIME_free calls |ASN1_STRING_free|. +OPENSSL_EXPORT void ASN1_GENERALIZEDTIME_free(ASN1_GENERALIZEDTIME *str); + +// d2i_ASN1_GENERALIZEDTIME parses up to |len| bytes from |*inp| as a +// DER-encoded ASN.1 GeneralizedTime, as described in |d2i_SAMPLE_with_reuse|. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +OPENSSL_EXPORT ASN1_GENERALIZEDTIME *d2i_ASN1_GENERALIZEDTIME( + ASN1_GENERALIZEDTIME **out, const uint8_t **inp, long len); + +// i2d_ASN1_GENERALIZEDTIME marshals |in| as a DER-encoded ASN.1 +// GeneralizedTime, as described in |i2d_SAMPLE|. +OPENSSL_EXPORT int i2d_ASN1_GENERALIZEDTIME(const ASN1_GENERALIZEDTIME *in, + uint8_t **outp); + +// ASN1_GENERALIZEDTIME is an |ASN1_ITEM| with ASN.1 type GeneralizedTime and C +// type |ASN1_GENERALIZEDTIME*|. +DECLARE_ASN1_ITEM(ASN1_GENERALIZEDTIME) + +// ASN1_GENERALIZEDTIME_check returns one if |a| is a valid GeneralizedTime and +// zero otherwise. +OPENSSL_EXPORT int ASN1_GENERALIZEDTIME_check(const ASN1_GENERALIZEDTIME *a); + +// ASN1_GENERALIZEDTIME_set represents |t| as a GeneralizedTime and writes the +// result to |s|. It returns |s| on success and NULL on error. If |s| is NULL, +// it returns a newly-allocated |ASN1_GENERALIZEDTIME| instead. +// +// Note this function may fail if the time is out of range for GeneralizedTime. +OPENSSL_EXPORT ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_set( + ASN1_GENERALIZEDTIME *s, time_t t); + +// ASN1_GENERALIZEDTIME_adj adds |offset_day| days and |offset_sec| seconds to +// |t| and writes the result to |s| as a GeneralizedTime. It returns |s| on +// success and NULL on error. If |s| is NULL, it returns a newly-allocated +// |ASN1_GENERALIZEDTIME| instead. +// +// Note this function may fail if the time overflows or is out of range for +// GeneralizedTime. +OPENSSL_EXPORT ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_adj( + ASN1_GENERALIZEDTIME *s, time_t t, int offset_day, long offset_sec); + +// ASN1_GENERALIZEDTIME_set_string sets |s| to a GeneralizedTime whose contents +// are a copy of |str|. It returns one on success and zero on error or if |str| +// is not a valid GeneralizedTime. +// +// If |s| is NULL, this function validates |str| without copying it. +OPENSSL_EXPORT int ASN1_GENERALIZEDTIME_set_string(ASN1_GENERALIZEDTIME *s, + const char *str); + +// B_ASN1_TIME is a bitmask of types allowed in an X.509 Time. +#define B_ASN1_TIME (B_ASN1_UTCTIME | B_ASN1_GENERALIZEDTIME) + +// ASN1_TIME_new returns a newly-allocated |ASN1_TIME| with type -1, or NULL on +// error. The resulting |ASN1_TIME| is not a valid X.509 Time until initialized +// with a value. +OPENSSL_EXPORT ASN1_TIME *ASN1_TIME_new(void); + +// ASN1_TIME_free releases memory associated with |str|. +OPENSSL_EXPORT void ASN1_TIME_free(ASN1_TIME *str); + +// d2i_ASN1_TIME parses up to |len| bytes from |*inp| as a DER-encoded X.509 +// Time (RFC 5280), as described in |d2i_SAMPLE_with_reuse|. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +OPENSSL_EXPORT ASN1_TIME *d2i_ASN1_TIME(ASN1_TIME **out, const uint8_t **inp, + long len); + +// i2d_ASN1_TIME marshals |in| as a DER-encoded X.509 Time (RFC 5280), as +// described in |i2d_SAMPLE|. +OPENSSL_EXPORT int i2d_ASN1_TIME(const ASN1_TIME *in, uint8_t **outp); + +// ASN1_TIME is an |ASN1_ITEM| whose ASN.1 type is X.509 Time (RFC 5280) and C +// type is |ASN1_TIME*|. +DECLARE_ASN1_ITEM(ASN1_TIME) + +// ASN1_TIME_diff computes |to| - |from|. On success, it sets |*out_days| to the +// difference in days, rounded towards zero, sets |*out_seconds| to the +// remainder, and returns one. On error, it returns zero. +// +// If |from| is before |to|, both outputs will be <= 0, with at least one +// negative. If |from| is after |to|, both will be >= 0, with at least one +// positive. If they are equal, ignoring fractional seconds, both will be zero. +// +// Note this function may fail on overflow, or if |from| or |to| cannot be +// decoded. +OPENSSL_EXPORT int ASN1_TIME_diff(int *out_days, int *out_seconds, + const ASN1_TIME *from, const ASN1_TIME *to); + +// ASN1_TIME_set represents |t| as a GeneralizedTime or UTCTime and writes +// the result to |s|. As in RFC 5280, section 4.1.2.5, it uses UTCTime when the +// time fits and GeneralizedTime otherwise. It returns |s| on success and NULL +// on error. If |s| is NULL, it returns a newly-allocated |ASN1_TIME| instead. +// +// Note this function may fail if the time is out of range for GeneralizedTime. +OPENSSL_EXPORT ASN1_TIME *ASN1_TIME_set(ASN1_TIME *s, time_t t); + +// ASN1_TIME_adj adds |offset_day| days and |offset_sec| seconds to +// |t| and writes the result to |s|. As in RFC 5280, section 4.1.2.5, it uses +// UTCTime when the time fits and GeneralizedTime otherwise. It returns |s| on +// success and NULL on error. If |s| is NULL, it returns a newly-allocated +// |ASN1_GENERALIZEDTIME| instead. +// +// Note this function may fail if the time overflows or is out of range for +// GeneralizedTime. +OPENSSL_EXPORT ASN1_TIME *ASN1_TIME_adj(ASN1_TIME *s, time_t t, int offset_day, + long offset_sec); + +// ASN1_TIME_check returns one if |t| is a valid UTCTime or GeneralizedTime, and +// zero otherwise. |t|'s type determines which check is performed. This +// function does not enforce that UTCTime was used when possible. +OPENSSL_EXPORT int ASN1_TIME_check(const ASN1_TIME *t); + +// ASN1_TIME_to_generalizedtime converts |t| to a GeneralizedTime. If |out| is +// NULL, it returns a newly-allocated |ASN1_GENERALIZEDTIME| on success, or NULL +// on error. If |out| is non-NULL and |*out| is NULL, it additionally sets +// |*out| to the result. If |out| and |*out| are non-NULL, it instead updates +// the object pointed by |*out| and returns |*out| on success or NULL on error. +OPENSSL_EXPORT ASN1_GENERALIZEDTIME *ASN1_TIME_to_generalizedtime( + const ASN1_TIME *t, ASN1_GENERALIZEDTIME **out); + +// ASN1_TIME_set_string behaves like |ASN1_UTCTIME_set_string| if |str| is a +// valid UTCTime, and |ASN1_GENERALIZEDTIME_set_string| if |str| is a valid +// GeneralizedTime. If |str| is neither, it returns zero. +OPENSSL_EXPORT int ASN1_TIME_set_string(ASN1_TIME *s, const char *str); + +// TODO(davidben): Expand and document function prototypes generated in macros. + + +// NULL values. +// +// This library represents the ASN.1 NULL value by a non-NULL pointer to the +// opaque type |ASN1_NULL|. An omitted OPTIONAL ASN.1 NULL value is a NULL +// pointer. Unlike other pointer types, it is not necessary to free |ASN1_NULL| +// pointers, but it is safe to do so. + +// ASN1_NULL_new returns an opaque, non-NULL pointer. It is safe to call +// |ASN1_NULL_free| on the result, but not necessary. +OPENSSL_EXPORT ASN1_NULL *ASN1_NULL_new(void); + +// ASN1_NULL_free does nothing. +OPENSSL_EXPORT void ASN1_NULL_free(ASN1_NULL *null); + +// d2i_ASN1_NULL parses a DER-encoded ASN.1 NULL value from up to |len| bytes +// at |*inp|, as described in |d2i_SAMPLE|. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +OPENSSL_EXPORT ASN1_NULL *d2i_ASN1_NULL(ASN1_NULL **out, const uint8_t **inp, + long len); + +// i2d_ASN1_NULL marshals |in| as a DER-encoded ASN.1 NULL value, as described +// in |i2d_SAMPLE|. +OPENSSL_EXPORT int i2d_ASN1_NULL(const ASN1_NULL *in, uint8_t **outp); + +// ASN1_NULL is an |ASN1_ITEM| with ASN.1 type NULL and C type |ASN1_NULL*|. +DECLARE_ASN1_ITEM(ASN1_NULL) + + +// Object identifiers. +// +// An |ASN1_OBJECT| represents a ASN.1 OBJECT IDENTIFIER. See also obj.h for +// additional functions relating to |ASN1_OBJECT|. +// +// TODO(davidben): What's the relationship between asn1.h and obj.h? Most of +// obj.h deals with the large NID table, but then functions like |OBJ_get0_data| +// or |OBJ_dup| are general |ASN1_OBJECT| functions. DEFINE_STACK_OF(ASN1_OBJECT) -#define ASN1_STRING_FLAG_BITS_LEFT 0x08 /* Set if 0x07 has bits left value */ -/* This indicates that the ASN1_STRING is not a real value but just a place - * holder for the location where indefinite length constructed data should - * be inserted in the memory buffer - */ -#define ASN1_STRING_FLAG_NDEF 0x010 +// ASN1_OBJECT_create returns a newly-allocated |ASN1_OBJECT| with |len| bytes +// from |data| as the encoded OID, or NULL on error. |data| should contain the +// DER-encoded identifier, excluding the tag and length. +// +// |nid| should be |NID_undef|. Passing a NID value that does not match |data| +// will cause some functions to misbehave. |sn| and |ln| should be NULL. If +// non-NULL, they are stored as short and long names, respectively, but these +// values have no effect for |ASN1_OBJECT|s created through this function. +// +// TODO(davidben): Should we just ignore all those parameters? NIDs and names +// are only relevant for |ASN1_OBJECT|s in the obj.h table. +OPENSSL_EXPORT ASN1_OBJECT *ASN1_OBJECT_create(int nid, const uint8_t *data, + int len, const char *sn, + const char *ln); -/* This flag is used by ASN1 code to indicate an ASN1_STRING is an MSTRING - * type. - */ -#define ASN1_STRING_FLAG_MSTRING 0x040 -/* This is the base type that holds just about everything :-) */ -struct asn1_string_st - { - int length; - int type; - unsigned char *data; - /* The value of the following field depends on the type being - * held. It is mostly being used for BIT_STRING so if the - * input data has a non-zero 'unused bits' value, it will be - * handled correctly */ - long flags; - }; +// ASN1_OBJECT_free releases memory associated with |a|. If |a| is a static +// |ASN1_OBJECT|, returned from |OBJ_nid2obj|, this function does nothing. +OPENSSL_EXPORT void ASN1_OBJECT_free(ASN1_OBJECT *a); -/* ASN1_ENCODING structure: this is used to save the received - * encoding of an ASN1 type. This is useful to get round - * problems with invalid encodings which can break signatures. - */ +// d2i_ASN1_OBJECT parses a DER-encoded ASN.1 OBJECT IDENTIFIER from up to |len| +// bytes at |*inp|, as described in |d2i_SAMPLE_with_reuse|. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +OPENSSL_EXPORT ASN1_OBJECT *d2i_ASN1_OBJECT(ASN1_OBJECT **out, + const uint8_t **inp, long len); -typedef struct ASN1_ENCODING_st - { - unsigned char *enc; /* DER encoding */ - long len; /* Length of encoding */ - int modified; /* set to 1 if 'enc' is invalid */ - /* alias_only is zero if |enc| owns the buffer that it points to - * (although |enc| may still be NULL). If one, |enc| points into a - * buffer that is owned elsewhere. */ - unsigned alias_only:1; - /* alias_only_on_next_parse is one iff the next parsing operation - * should avoid taking a copy of the input and rather set - * |alias_only|. */ - unsigned alias_only_on_next_parse:1; - } ASN1_ENCODING; +// i2d_ASN1_OBJECT marshals |in| as a DER-encoded ASN.1 OBJECT IDENTIFIER, as +// described in |i2d_SAMPLE|. +OPENSSL_EXPORT int i2d_ASN1_OBJECT(const ASN1_OBJECT *a, uint8_t **outp); -#define STABLE_FLAGS_MALLOC 0x01 -#define STABLE_NO_MASK 0x02 -#define DIRSTRING_TYPE \ - (B_ASN1_PRINTABLESTRING|B_ASN1_T61STRING|B_ASN1_BMPSTRING|B_ASN1_UTF8STRING) -#define PKCS9STRING_TYPE (DIRSTRING_TYPE|B_ASN1_IA5STRING) +// c2i_ASN1_OBJECT decodes |len| bytes from |*inp| as the contents of a +// DER-encoded OBJECT IDENTIFIER, excluding the tag and length. It behaves like +// |d2i_SAMPLE_with_reuse| except, on success, it always consumes all |len| +// bytes. +OPENSSL_EXPORT ASN1_OBJECT *c2i_ASN1_OBJECT(ASN1_OBJECT **out, + const uint8_t **inp, long len); -typedef struct asn1_string_table_st { - int nid; - long minsize; - long maxsize; - unsigned long mask; - unsigned long flags; -} ASN1_STRING_TABLE; +// ASN1_OBJECT is an |ASN1_ITEM| with ASN.1 type OBJECT IDENTIFIER and C type +// |ASN1_OBJECT*|. +DECLARE_ASN1_ITEM(ASN1_OBJECT) -/* size limits: this stuff is taken straight from RFC2459 */ -#define ub_name 32768 -#define ub_common_name 64 -#define ub_locality_name 128 -#define ub_state_name 128 -#define ub_organization_name 64 -#define ub_organization_unit_name 64 -#define ub_title 64 -#define ub_email_address 128 +// Arbitrary elements. -/* Declarations for template structures: for full definitions - * see asn1t.h - */ -typedef struct ASN1_TEMPLATE_st ASN1_TEMPLATE; -typedef struct ASN1_TLC_st ASN1_TLC; -/* This is just an opaque pointer */ -typedef struct ASN1_VALUE_st ASN1_VALUE; +// An asn1_type_st (aka |ASN1_TYPE|) represents an arbitrary ASN.1 element, +// typically used for ANY types. It contains a |type| field and a |value| union +// dependent on |type|. +// +// WARNING: This struct has a complex representation. Callers must not construct +// |ASN1_TYPE| values manually. Use |ASN1_TYPE_set| and |ASN1_TYPE_set1| +// instead. Additionally, callers performing non-trivial operations on this type +// are encouraged to use |CBS| and |CBB| from , and +// convert to or from |ASN1_TYPE| with |d2i_ASN1_TYPE| or |i2d_ASN1_TYPE|. +// +// The |type| field corresponds to the tag of the ASN.1 element being +// represented: +// +// If |type| is a |V_ASN1_*| constant for an ASN.1 string-like type, as defined +// by |ASN1_STRING|, the tag matches the constant. |value| contains an +// |ASN1_STRING| pointer (equivalently, one of the more specific typedefs). See +// |ASN1_STRING| for details on the representation. Unlike |ASN1_STRING|, +// |ASN1_TYPE| does not use the |V_ASN1_NEG| flag for negative INTEGER and +// ENUMERATE values. For a negative value, the |ASN1_TYPE|'s |type| will be +// |V_ASN1_INTEGER| or |V_ASN1_ENUMERATED|, but |value| will an |ASN1_STRING| +// whose |type| is |V_ASN1_NEG_INTEGER| or |V_ASN1_NEG_ENUMERATED|. +// +// If |type| is |V_ASN1_OBJECT|, the tag is OBJECT IDENTIFIER and |value| +// contains an |ASN1_OBJECT| pointer. +// +// If |type| is |V_ASN1_NULL|, the tag is NULL. |value| contains a NULL pointer. +// +// If |type| is |V_ASN1_BOOLEAN|, the tag is BOOLEAN. |value| contains an +// |ASN1_BOOLEAN|. +// +// If |type| is |V_ASN1_SEQUENCE|, |V_ASN1_SET|, or |V_ASN1_OTHER|, the tag is +// SEQUENCE, SET, or some non-universal tag, respectively. |value| is an +// |ASN1_STRING| containing the entire element, including the tag and length. +// The |ASN1_STRING|'s |type| field matches the containing |ASN1_TYPE|'s |type|. +// +// Other positive values of |type|, up to |V_ASN1_MAX_UNIVERSAL|, correspond to +// universal primitive tags not directly supported by this library. |value| is +// an |ASN1_STRING| containing the body of the element, excluding the tag +// and length. The |ASN1_STRING|'s |type| field matches the containing +// |ASN1_TYPE|'s |type|. +struct asn1_type_st { + int type; + union { + char *ptr; + ASN1_BOOLEAN boolean; + ASN1_STRING *asn1_string; + ASN1_OBJECT *object; + ASN1_INTEGER *integer; + ASN1_ENUMERATED *enumerated; + ASN1_BIT_STRING *bit_string; + ASN1_OCTET_STRING *octet_string; + ASN1_PRINTABLESTRING *printablestring; + ASN1_T61STRING *t61string; + ASN1_IA5STRING *ia5string; + ASN1_GENERALSTRING *generalstring; + ASN1_BMPSTRING *bmpstring; + ASN1_UNIVERSALSTRING *universalstring; + ASN1_UTCTIME *utctime; + ASN1_GENERALIZEDTIME *generalizedtime; + ASN1_VISIBLESTRING *visiblestring; + ASN1_UTF8STRING *utf8string; + // set and sequence are left complete and still contain the entire element. + ASN1_STRING *set; + ASN1_STRING *sequence; + ASN1_VALUE *asn1_value; + } value; +}; -/* Declare ASN1 functions: the implement macro in in asn1t.h */ +DEFINE_STACK_OF(ASN1_TYPE) + +// ASN1_TYPE_new returns a newly-allocated |ASN1_TYPE|, or NULL on allocation +// failure. The resulting object has type -1 and must be initialized to be +// a valid ANY value. +OPENSSL_EXPORT ASN1_TYPE *ASN1_TYPE_new(void); + +// ASN1_TYPE_free releases memory associated with |a|. +OPENSSL_EXPORT void ASN1_TYPE_free(ASN1_TYPE *a); + +// d2i_ASN1_TYPE parses up to |len| bytes from |*inp| as an ASN.1 value of any +// type, as described in |d2i_SAMPLE_with_reuse|. Note this function only +// validates primitive, universal types supported by this library. Values of +// type |V_ASN1_SEQUENCE|, |V_ASN1_SET|, |V_ASN1_OTHER|, or an unsupported +// primitive type must be validated by the caller when interpreting. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +OPENSSL_EXPORT ASN1_TYPE *d2i_ASN1_TYPE(ASN1_TYPE **out, const uint8_t **inp, + long len); + +// i2d_ASN1_TYPE marshals |in| as DER, as described in |i2d_SAMPLE|. +OPENSSL_EXPORT int i2d_ASN1_TYPE(const ASN1_TYPE *in, uint8_t **outp); + +// ASN1_ANY is an |ASN1_ITEM| with ASN.1 type ANY and C type |ASN1_TYPE*|. Note +// the |ASN1_ITEM| name and C type do not match. +DECLARE_ASN1_ITEM(ASN1_ANY) + +// ASN1_TYPE_get returns the type of |a|, which will be one of the |V_ASN1_*| +// constants, or zero if |a| is not fully initialized. +OPENSSL_EXPORT int ASN1_TYPE_get(const ASN1_TYPE *a); + +// ASN1_TYPE_set sets |a| to an |ASN1_TYPE| of type |type| and value |value|, +// releasing the previous contents of |a|. +// +// If |type| is |V_ASN1_BOOLEAN|, |a| is set to FALSE if |value| is NULL and +// TRUE otherwise. If setting |a| to TRUE, |value| may be an invalid pointer, +// such as (void*)1. +// +// If |type| is |V_ASN1_NULL|, |value| must be NULL. +// +// For other values of |type|, this function takes ownership of |value|, which +// must point to an object of the corresponding type. See |ASN1_TYPE| for +// details. +OPENSSL_EXPORT void ASN1_TYPE_set(ASN1_TYPE *a, int type, void *value); + +// ASN1_TYPE_set1 behaves like |ASN1_TYPE_set| except it does not take ownership +// of |value|. It returns one on success and zero on error. +OPENSSL_EXPORT int ASN1_TYPE_set1(ASN1_TYPE *a, int type, const void *value); + +// ASN1_TYPE_cmp returns zero if |a| and |b| are equal and some non-zero value +// otherwise. Note this function can only be used for equality checks, not an +// ordering. +OPENSSL_EXPORT int ASN1_TYPE_cmp(const ASN1_TYPE *a, const ASN1_TYPE *b); + +typedef STACK_OF(ASN1_TYPE) ASN1_SEQUENCE_ANY; + +// d2i_ASN1_SEQUENCE_ANY parses up to |len| bytes from |*inp| as a DER-encoded +// ASN.1 SEQUENCE OF ANY structure, as described in |d2i_SAMPLE_with_reuse|. The +// resulting |ASN1_SEQUENCE_ANY| owns its contents and thus must be released +// with |sk_ASN1_TYPE_pop_free| and |ASN1_TYPE_free|, not |sk_ASN1_TYPE_free|. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +OPENSSL_EXPORT ASN1_SEQUENCE_ANY *d2i_ASN1_SEQUENCE_ANY(ASN1_SEQUENCE_ANY **out, + const uint8_t **inp, + long len); + +// i2d_ASN1_SEQUENCE_ANY marshals |in| as a DER-encoded SEQUENCE OF ANY +// structure, as described in |i2d_SAMPLE|. +OPENSSL_EXPORT int i2d_ASN1_SEQUENCE_ANY(const ASN1_SEQUENCE_ANY *in, + uint8_t **outp); + +// d2i_ASN1_SET_ANY parses up to |len| bytes from |*inp| as a DER-encoded ASN.1 +// SET OF ANY structure, as described in |d2i_SAMPLE_with_reuse|. The resulting +// |ASN1_SEQUENCE_ANY| owns its contents and thus must be released with +// |sk_ASN1_TYPE_pop_free| and |ASN1_TYPE_free|, not |sk_ASN1_TYPE_free|. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +OPENSSL_EXPORT ASN1_SEQUENCE_ANY *d2i_ASN1_SET_ANY(ASN1_SEQUENCE_ANY **out, + const uint8_t **inp, + long len); + +// i2d_ASN1_SET_ANY marshals |in| as a DER-encoded SET OF ANY structure, as +// described in |i2d_SAMPLE|. +OPENSSL_EXPORT int i2d_ASN1_SET_ANY(const ASN1_SEQUENCE_ANY *in, + uint8_t **outp); + + +// Human-readable output. +// +// The following functions output types in some human-readable format. These +// functions may be used for debugging and logging. However, the output should +// not be consumed programmatically. They may be ambiguous or lose information. + +// ASN1_UTCTIME_print writes a human-readable representation of |a| to |out|. It +// returns one on success and zero on error. +OPENSSL_EXPORT int ASN1_UTCTIME_print(BIO *out, const ASN1_UTCTIME *a); + +// ASN1_GENERALIZEDTIME_print writes a human-readable representation of |a| to +// |out|. It returns one on success and zero on error. +OPENSSL_EXPORT int ASN1_GENERALIZEDTIME_print(BIO *out, + const ASN1_GENERALIZEDTIME *a); + +// ASN1_TIME_print writes a human-readable representation of |a| to |out|. It +// returns one on success and zero on error. +OPENSSL_EXPORT int ASN1_TIME_print(BIO *out, const ASN1_TIME *a); + +// ASN1_STRING_print writes a human-readable representation of |str| to |out|. +// It returns one on success and zero on error. Unprintable characters are +// replaced with '.'. +OPENSSL_EXPORT int ASN1_STRING_print(BIO *out, const ASN1_STRING *str); + +// ASN1_STRFLGS_ESC_2253 causes characters to be escaped as in RFC 2253, section +// 2.4. +#define ASN1_STRFLGS_ESC_2253 1 + +// ASN1_STRFLGS_ESC_CTRL causes all control characters to be escaped. +#define ASN1_STRFLGS_ESC_CTRL 2 + +// ASN1_STRFLGS_ESC_MSB causes all characters above 127 to be escaped. +#define ASN1_STRFLGS_ESC_MSB 4 + +// ASN1_STRFLGS_ESC_QUOTE causes the string to be surrounded by quotes, rather +// than using backslashes, when characters are escaped. Fewer characters will +// require escapes in this case. +#define ASN1_STRFLGS_ESC_QUOTE 8 + +// ASN1_STRFLGS_UTF8_CONVERT causes the string to be encoded as UTF-8, with each +// byte in the UTF-8 encoding treated as an individual character for purposes of +// escape sequences. If not set, each Unicode codepoint in the string is treated +// as a character, with wide characters escaped as "\Uxxxx" or "\Wxxxxxxxx". +// Note this can be ambiguous if |ASN1_STRFLGS_ESC_*| are all unset. In that +// case, backslashes are not escaped, but wide characters are. +#define ASN1_STRFLGS_UTF8_CONVERT 0x10 + +// ASN1_STRFLGS_IGNORE_TYPE causes the string type to be ignored. The +// |ASN1_STRING| in-memory representation will be printed directly. +#define ASN1_STRFLGS_IGNORE_TYPE 0x20 + +// ASN1_STRFLGS_SHOW_TYPE causes the string type to be included in the output. +#define ASN1_STRFLGS_SHOW_TYPE 0x40 + +// ASN1_STRFLGS_DUMP_ALL causes all strings to be printed as a hexdump, using +// RFC 2253 hexstring notation, such as "#0123456789ABCDEF". +#define ASN1_STRFLGS_DUMP_ALL 0x80 + +// ASN1_STRFLGS_DUMP_UNKNOWN behaves like |ASN1_STRFLGS_DUMP_ALL| but only +// applies to values of unknown type. If unset, unknown values will print +// their contents as single-byte characters with escape sequences. +#define ASN1_STRFLGS_DUMP_UNKNOWN 0x100 + +// ASN1_STRFLGS_DUMP_DER causes hexdumped strings (as determined by +// |ASN1_STRFLGS_DUMP_ALL| or |ASN1_STRFLGS_DUMP_UNKNOWN|) to print the entire +// DER element as in RFC 2253, rather than only the contents of the +// |ASN1_STRING|. +#define ASN1_STRFLGS_DUMP_DER 0x200 + +// ASN1_STRFLGS_RFC2253 causes the string to be escaped as in RFC 2253, +// additionally escaping control characters. +#define ASN1_STRFLGS_RFC2253 \ + (ASN1_STRFLGS_ESC_2253 | ASN1_STRFLGS_ESC_CTRL | ASN1_STRFLGS_ESC_MSB | \ + ASN1_STRFLGS_UTF8_CONVERT | ASN1_STRFLGS_DUMP_UNKNOWN | \ + ASN1_STRFLGS_DUMP_DER) + +// ASN1_STRING_print_ex writes a human-readable representation of |str| to +// |out|. It returns the number of bytes written on success and -1 on error. If +// |out| is NULL, it returns the number of bytes it would have written, without +// writing anything. +// +// The |flags| should be a combination of combination of |ASN1_STRFLGS_*| +// constants. See the documentation for each flag for how it controls the +// output. If unsure, use |ASN1_STRFLGS_RFC2253|. +OPENSSL_EXPORT int ASN1_STRING_print_ex(BIO *out, const ASN1_STRING *str, + unsigned long flags); + +// ASN1_STRING_print_ex_fp behaves like |ASN1_STRING_print_ex| but writes to a +// |FILE| rather than a |BIO|. +OPENSSL_EXPORT int ASN1_STRING_print_ex_fp(FILE *fp, const ASN1_STRING *str, + unsigned long flags); + +// i2a_ASN1_INTEGER writes a human-readable representation of |a| to |bp|. It +// returns the number of bytes written on success, or a negative number on +// error. On error, this function may have written a partial output to |bp|. +OPENSSL_EXPORT int i2a_ASN1_INTEGER(BIO *bp, const ASN1_INTEGER *a); + +// i2a_ASN1_ENUMERATED writes a human-readable representation of |a| to |bp|. It +// returns the number of bytes written on success, or a negative number on +// error. On error, this function may have written a partial output to |bp|. +OPENSSL_EXPORT int i2a_ASN1_ENUMERATED(BIO *bp, const ASN1_ENUMERATED *a); + +// i2a_ASN1_OBJECT writes a human-readable representation of |a| to |bp|. It +// returns the number of bytes written on success, or a negative number on +// error. On error, this function may have written a partial output to |bp|. +OPENSSL_EXPORT int i2a_ASN1_OBJECT(BIO *bp, const ASN1_OBJECT *a); + +// i2a_ASN1_STRING writes a text representation of |a|'s contents to |bp|. It +// returns the number of bytes written on success, or a negative number on +// error. On error, this function may have written a partial output to |bp|. +// |type| is ignored. +// +// This function does not decode |a| into a Unicode string. It only hex-encodes +// the internal representation of |a|. This is suitable for printing an OCTET +// STRING, but may not be human-readable for any other string type. +OPENSSL_EXPORT int i2a_ASN1_STRING(BIO *bp, const ASN1_STRING *a, int type); + +// i2t_ASN1_OBJECT calls |OBJ_obj2txt| with |always_return_oid| set to zero. +OPENSSL_EXPORT int i2t_ASN1_OBJECT(char *buf, int buf_len, + const ASN1_OBJECT *a); + + +// Low-level encoding functions. + +// ASN1_get_object parses a BER element from up to |max_len| bytes at |*inp|. It +// returns |V_ASN1_CONSTRUCTED| if it successfully parsed a constructed element, +// zero if it successfully parsed a primitive element, and 0x80 on error. On +// success, it additionally advances |*inp| to the element body, sets +// |*out_length|, |*out_tag|, and |*out_class| to the element's length, tag +// number, and tag class, respectively, +// +// Unlike OpenSSL, this function does not support indefinite-length elements. +// +// This function is difficult to use correctly. Use |CBS_get_asn1| and related +// functions from bytestring.h. +// +// TODO(https://crbug.com/boringssl/354): Remove support for non-minimal +// lengths. +OPENSSL_EXPORT int ASN1_get_object(const unsigned char **inp, long *out_length, + int *out_tag, int *out_class, long max_len); + +// ASN1_put_object writes the header for a DER or BER element to |*outp| and +// advances |*outp| by the number of bytes written. The caller is responsible +// for ensuring |*outp| has enough space for the output. The header describes an +// element with length |length|, tag number |tag|, and class |xclass|. |xclass| +// should be one of the |V_ASN1_*| tag class constants. The element is primitive +// if |constructed| is zero and constructed if it is one or two. If +// |constructed| is two, |length| is ignored and the element uses +// indefinite-length encoding. +// +// Use |CBB_add_asn1| instead. +OPENSSL_EXPORT void ASN1_put_object(unsigned char **outp, int constructed, + int length, int tag, int xclass); + +// ASN1_put_eoc writes two zero bytes to |*outp|, advances |*outp| to point past +// those bytes, and returns two. +// +// Use definite-length encoding instead. +OPENSSL_EXPORT int ASN1_put_eoc(unsigned char **outp); + +// ASN1_object_size returns the number of bytes needed to encode a DER or BER +// value with length |length| and tag number |tag|, or -1 on error. |tag| should +// not include the constructed bit or tag class. If |constructed| is zero or +// one, the result uses a definite-length encoding with minimally-encoded +// length, as in DER. If |constructed| is two, the result uses BER +// indefinite-length encoding. +// +// Use |CBB_add_asn1| instead. +OPENSSL_EXPORT int ASN1_object_size(int constructed, int length, int tag); + + +// Function declaration macros. +// +// The following macros declare functions for ASN.1 types. Prefer writing the +// prototypes directly. Particularly when |type|, |itname|, or |name| differ, +// the macros can be difficult to understand. #define DECLARE_ASN1_FUNCTIONS(type) DECLARE_ASN1_FUNCTIONS_name(type, type) #define DECLARE_ASN1_ALLOC_FUNCTIONS(type) \ - DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, type) + DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, type) #define DECLARE_ASN1_FUNCTIONS_name(type, name) \ - DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, name) \ - DECLARE_ASN1_ENCODE_FUNCTIONS(type, name, name) + DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, name) \ + DECLARE_ASN1_ENCODE_FUNCTIONS(type, name, name) #define DECLARE_ASN1_FUNCTIONS_fname(type, itname, name) \ - DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, name) \ - DECLARE_ASN1_ENCODE_FUNCTIONS(type, itname, name) + DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, name) \ + DECLARE_ASN1_ENCODE_FUNCTIONS(type, itname, name) -#define DECLARE_ASN1_ENCODE_FUNCTIONS(type, itname, name) \ - OPENSSL_EXPORT type *d2i_##name(type **a, const unsigned char **in, long len); \ - OPENSSL_EXPORT int i2d_##name(type *a, unsigned char **out); \ - DECLARE_ASN1_ITEM(itname) +#define DECLARE_ASN1_ENCODE_FUNCTIONS(type, itname, name) \ + OPENSSL_EXPORT type *d2i_##name(type **a, const unsigned char **in, \ + long len); \ + OPENSSL_EXPORT int i2d_##name(type *a, unsigned char **out); \ + DECLARE_ASN1_ITEM(itname) -#define DECLARE_ASN1_ENCODE_FUNCTIONS_const(type, name) \ - OPENSSL_EXPORT type *d2i_##name(type **a, const unsigned char **in, long len); \ - OPENSSL_EXPORT int i2d_##name(const type *a, unsigned char **out); \ - DECLARE_ASN1_ITEM(name) - -#define DECLARE_ASN1_NDEF_FUNCTION(name) \ - OPENSSL_EXPORT int i2d_##name##_NDEF(name *a, unsigned char **out); +#define DECLARE_ASN1_ENCODE_FUNCTIONS_const(type, name) \ + OPENSSL_EXPORT type *d2i_##name(type **a, const unsigned char **in, \ + long len); \ + OPENSSL_EXPORT int i2d_##name(const type *a, unsigned char **out); \ + DECLARE_ASN1_ITEM(name) #define DECLARE_ASN1_FUNCTIONS_const(name) \ - DECLARE_ASN1_ALLOC_FUNCTIONS(name) \ - DECLARE_ASN1_ENCODE_FUNCTIONS_const(name, name) + DECLARE_ASN1_ALLOC_FUNCTIONS(name) \ + DECLARE_ASN1_ENCODE_FUNCTIONS_const(name, name) #define DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, name) \ - OPENSSL_EXPORT type *name##_new(void); \ - OPENSSL_EXPORT void name##_free(type *a); + OPENSSL_EXPORT type *name##_new(void); \ + OPENSSL_EXPORT void name##_free(type *a); -#define DECLARE_ASN1_PRINT_FUNCTION(stname) \ - DECLARE_ASN1_PRINT_FUNCTION_fname(stname, stname) -#define DECLARE_ASN1_PRINT_FUNCTION_fname(stname, fname) \ - OPENSSL_EXPORT int fname##_print_ctx(BIO *out, stname *x, int indent, \ - const ASN1_PCTX *pctx); +// Deprecated functions. -typedef void *d2i_of_void(void **, const unsigned char **, long); -typedef int i2d_of_void(const void *, unsigned char **); - -/* The following macros and typedefs allow an ASN1_ITEM - * to be embedded in a structure and referenced. Since - * the ASN1_ITEM pointers need to be globally accessible - * (possibly from shared libraries) they may exist in - * different forms. On platforms that support it the - * ASN1_ITEM structure itself will be globally exported. - * Other platforms will export a function that returns - * an ASN1_ITEM pointer. - * - * To handle both cases transparently the macros below - * should be used instead of hard coding an ASN1_ITEM - * pointer in a structure. - * - * The structure will look like this: - * - * typedef struct SOMETHING_st { - * ... - * ASN1_ITEM_EXP *iptr; - * ... - * } SOMETHING; - * - * It would be initialised as e.g.: - * - * SOMETHING somevar = {...,ASN1_ITEM_ref(X509),...}; - * - * and the actual pointer extracted with: - * - * const ASN1_ITEM *it = ASN1_ITEM_ptr(somevar.iptr); - * - * Finally an ASN1_ITEM pointer can be extracted from an - * appropriate reference with: ASN1_ITEM_rptr(X509). This - * would be used when a function takes an ASN1_ITEM * argument. - * - */ - -/* ASN1_ITEM pointer exported type */ -typedef const ASN1_ITEM ASN1_ITEM_EXP; - -/* Macro to obtain ASN1_ITEM pointer from exported type */ -#define ASN1_ITEM_ptr(iptr) (iptr) - -/* Macro to include ASN1_ITEM pointer from base type */ -#define ASN1_ITEM_ref(iptr) (&(iptr##_it)) - -#define ASN1_ITEM_rptr(ref) (&(ref##_it)) - -#define DECLARE_ASN1_ITEM(name) \ - extern OPENSSL_EXPORT const ASN1_ITEM name##_it; - -/* Parameters used by ASN1_STRING_print_ex() */ - -/* These determine which characters to escape: - * RFC2253 special characters, control characters and - * MSB set characters - */ - -#define ASN1_STRFLGS_ESC_2253 1 -#define ASN1_STRFLGS_ESC_CTRL 2 -#define ASN1_STRFLGS_ESC_MSB 4 - - -/* This flag determines how we do escaping: normally - * RC2253 backslash only, set this to use backslash and - * quote. - */ - -#define ASN1_STRFLGS_ESC_QUOTE 8 - - -/* These three flags are internal use only. */ - -/* Character is a valid PrintableString character */ -#define CHARTYPE_PRINTABLESTRING 0x10 -/* Character needs escaping if it is the first character */ -#define CHARTYPE_FIRST_ESC_2253 0x20 -/* Character needs escaping if it is the last character */ -#define CHARTYPE_LAST_ESC_2253 0x40 - -/* NB the internal flags are safely reused below by flags - * handled at the top level. - */ - -/* If this is set we convert all character strings - * to UTF8 first - */ - -#define ASN1_STRFLGS_UTF8_CONVERT 0x10 - -/* If this is set we don't attempt to interpret content: - * just assume all strings are 1 byte per character. This - * will produce some pretty odd looking output! - */ - -#define ASN1_STRFLGS_IGNORE_TYPE 0x20 - -/* If this is set we include the string type in the output */ -#define ASN1_STRFLGS_SHOW_TYPE 0x40 - -/* This determines which strings to display and which to - * 'dump' (hex dump of content octets or DER encoding). We can - * only dump non character strings or everything. If we - * don't dump 'unknown' they are interpreted as character - * strings with 1 octet per character and are subject to - * the usual escaping options. - */ - -#define ASN1_STRFLGS_DUMP_ALL 0x80 -#define ASN1_STRFLGS_DUMP_UNKNOWN 0x100 - -/* These determine what 'dumping' does, we can dump the - * content octets or the DER encoding: both use the - * RFC2253 #XXXXX notation. - */ - -#define ASN1_STRFLGS_DUMP_DER 0x200 - -/* All the string flags consistent with RFC2253, - * escaping control characters isn't essential in - * RFC2253 but it is advisable anyway. - */ - -#define ASN1_STRFLGS_RFC2253 (ASN1_STRFLGS_ESC_2253 | \ - ASN1_STRFLGS_ESC_CTRL | \ - ASN1_STRFLGS_ESC_MSB | \ - ASN1_STRFLGS_UTF8_CONVERT | \ - ASN1_STRFLGS_DUMP_UNKNOWN | \ - ASN1_STRFLGS_DUMP_DER) - -DEFINE_STACK_OF(ASN1_INTEGER) -DECLARE_ASN1_SET_OF(ASN1_INTEGER) - -struct asn1_type_st - { - int type; - union { - char *ptr; - ASN1_BOOLEAN boolean; - ASN1_STRING * asn1_string; - ASN1_OBJECT * object; - ASN1_INTEGER * integer; - ASN1_ENUMERATED * enumerated; - ASN1_BIT_STRING * bit_string; - ASN1_OCTET_STRING * octet_string; - ASN1_PRINTABLESTRING * printablestring; - ASN1_T61STRING * t61string; - ASN1_IA5STRING * ia5string; - ASN1_GENERALSTRING * generalstring; - ASN1_BMPSTRING * bmpstring; - ASN1_UNIVERSALSTRING * universalstring; - ASN1_UTCTIME * utctime; - ASN1_GENERALIZEDTIME * generalizedtime; - ASN1_VISIBLESTRING * visiblestring; - ASN1_UTF8STRING * utf8string; - /* set and sequence are left complete and still - * contain the set or sequence bytes */ - ASN1_STRING * set; - ASN1_STRING * sequence; - ASN1_VALUE * asn1_value; - } value; - }; - -DEFINE_STACK_OF(ASN1_TYPE) -DECLARE_ASN1_SET_OF(ASN1_TYPE) - -typedef STACK_OF(ASN1_TYPE) ASN1_SEQUENCE_ANY; - -DECLARE_ASN1_ENCODE_FUNCTIONS_const(ASN1_SEQUENCE_ANY, ASN1_SEQUENCE_ANY) -DECLARE_ASN1_ENCODE_FUNCTIONS_const(ASN1_SEQUENCE_ANY, ASN1_SET_ANY) - -struct X509_algor_st - { - ASN1_OBJECT *algorithm; - ASN1_TYPE *parameter; - } /* X509_ALGOR */; - -DECLARE_ASN1_FUNCTIONS(X509_ALGOR) - -/* This is used to contain a list of bit names */ -typedef struct BIT_STRING_BITNAME_st { - int bitnum; - const char *lname; - const char *sname; -} BIT_STRING_BITNAME; - - -#define M_ASN1_STRING_length(x) ((x)->length) -#define M_ASN1_STRING_length_set(x, n) ((x)->length = (n)) -#define M_ASN1_STRING_type(x) ((x)->type) -#define M_ASN1_STRING_data(x) ((x)->data) - -/* Macros for string operations */ -#define M_ASN1_BIT_STRING_new() (ASN1_BIT_STRING *)\ - ASN1_STRING_type_new(V_ASN1_BIT_STRING) -#define M_ASN1_BIT_STRING_free(a) ASN1_STRING_free((ASN1_STRING *)a) -#define M_ASN1_BIT_STRING_dup(a) (ASN1_BIT_STRING *)\ - ASN1_STRING_dup((const ASN1_STRING *)a) -#define M_ASN1_BIT_STRING_cmp(a,b) ASN1_STRING_cmp(\ - (const ASN1_STRING *)a,(const ASN1_STRING *)b) -#define M_ASN1_BIT_STRING_set(a,b,c) ASN1_STRING_set((ASN1_STRING *)a,b,c) - -#define M_ASN1_INTEGER_new() (ASN1_INTEGER *)\ - ASN1_STRING_type_new(V_ASN1_INTEGER) -#define M_ASN1_INTEGER_free(a) ASN1_STRING_free((ASN1_STRING *)a) -#define M_ASN1_INTEGER_dup(a) (ASN1_INTEGER *)\ - ASN1_STRING_dup((const ASN1_STRING *)a) -#define M_ASN1_INTEGER_cmp(a,b) ASN1_STRING_cmp(\ - (const ASN1_STRING *)a,(const ASN1_STRING *)b) - -#define M_ASN1_ENUMERATED_new() (ASN1_ENUMERATED *)\ - ASN1_STRING_type_new(V_ASN1_ENUMERATED) -#define M_ASN1_ENUMERATED_free(a) ASN1_STRING_free((ASN1_STRING *)a) -#define M_ASN1_ENUMERATED_dup(a) (ASN1_ENUMERATED *)\ - ASN1_STRING_dup((const ASN1_STRING *)a) -#define M_ASN1_ENUMERATED_cmp(a,b) ASN1_STRING_cmp(\ - (const ASN1_STRING *)a,(const ASN1_STRING *)b) - -#define M_ASN1_OCTET_STRING_new() (ASN1_OCTET_STRING *)\ - ASN1_STRING_type_new(V_ASN1_OCTET_STRING) -#define M_ASN1_OCTET_STRING_free(a) ASN1_STRING_free((ASN1_STRING *)a) -#define M_ASN1_OCTET_STRING_dup(a) (ASN1_OCTET_STRING *)\ - ASN1_STRING_dup((const ASN1_STRING *)a) -#define M_ASN1_OCTET_STRING_cmp(a,b) ASN1_STRING_cmp(\ - (const ASN1_STRING *)a,(const ASN1_STRING *)b) -#define M_ASN1_OCTET_STRING_set(a,b,c) ASN1_STRING_set((ASN1_STRING *)a,b,c) -#define M_ASN1_OCTET_STRING_print(a,b) ASN1_STRING_print(a,(ASN1_STRING *)b) - -#define B_ASN1_TIME \ - B_ASN1_UTCTIME | \ - B_ASN1_GENERALIZEDTIME - -#define B_ASN1_PRINTABLE \ - B_ASN1_NUMERICSTRING| \ - B_ASN1_PRINTABLESTRING| \ - B_ASN1_T61STRING| \ - B_ASN1_IA5STRING| \ - B_ASN1_BIT_STRING| \ - B_ASN1_UNIVERSALSTRING|\ - B_ASN1_BMPSTRING|\ - B_ASN1_UTF8STRING|\ - B_ASN1_SEQUENCE|\ - B_ASN1_UNKNOWN - -#define B_ASN1_DIRECTORYSTRING \ - B_ASN1_PRINTABLESTRING| \ - B_ASN1_TELETEXSTRING|\ - B_ASN1_BMPSTRING|\ - B_ASN1_UNIVERSALSTRING|\ - B_ASN1_UTF8STRING - -#define B_ASN1_DISPLAYTEXT \ - B_ASN1_IA5STRING| \ - B_ASN1_VISIBLESTRING| \ - B_ASN1_BMPSTRING|\ - B_ASN1_UTF8STRING - -#define M_ASN1_PRINTABLE_new() ASN1_STRING_type_new(V_ASN1_T61STRING) -#define M_ASN1_PRINTABLE_free(a) ASN1_STRING_free((ASN1_STRING *)a) - -#define M_DIRECTORYSTRING_new() ASN1_STRING_type_new(V_ASN1_PRINTABLESTRING) -#define M_DIRECTORYSTRING_free(a) ASN1_STRING_free((ASN1_STRING *)a) - -#define M_DISPLAYTEXT_new() ASN1_STRING_type_new(V_ASN1_VISIBLESTRING) -#define M_DISPLAYTEXT_free(a) ASN1_STRING_free((ASN1_STRING *)a) - -#define M_ASN1_PRINTABLESTRING_new() (ASN1_PRINTABLESTRING *)\ - ASN1_STRING_type_new(V_ASN1_PRINTABLESTRING) -#define M_ASN1_PRINTABLESTRING_free(a) ASN1_STRING_free((ASN1_STRING *)a) - -#define M_ASN1_T61STRING_new() (ASN1_T61STRING *)\ - ASN1_STRING_type_new(V_ASN1_T61STRING) -#define M_ASN1_T61STRING_free(a) ASN1_STRING_free((ASN1_STRING *)a) - -#define M_ASN1_IA5STRING_new() (ASN1_IA5STRING *)\ - ASN1_STRING_type_new(V_ASN1_IA5STRING) -#define M_ASN1_IA5STRING_free(a) ASN1_STRING_free((ASN1_STRING *)a) -#define M_ASN1_IA5STRING_dup(a) \ - (ASN1_IA5STRING *)ASN1_STRING_dup((const ASN1_STRING *)a) - -#define M_ASN1_UTCTIME_new() (ASN1_UTCTIME *)\ - ASN1_STRING_type_new(V_ASN1_UTCTIME) -#define M_ASN1_UTCTIME_free(a) ASN1_STRING_free((ASN1_STRING *)a) -#define M_ASN1_UTCTIME_dup(a) (ASN1_UTCTIME *)\ - ASN1_STRING_dup((const ASN1_STRING *)a) - -#define M_ASN1_GENERALIZEDTIME_new() (ASN1_GENERALIZEDTIME *)\ - ASN1_STRING_type_new(V_ASN1_GENERALIZEDTIME) -#define M_ASN1_GENERALIZEDTIME_free(a) ASN1_STRING_free((ASN1_STRING *)a) -#define M_ASN1_GENERALIZEDTIME_dup(a) (ASN1_GENERALIZEDTIME *)ASN1_STRING_dup(\ - (const ASN1_STRING *)a) - -#define M_ASN1_TIME_new() (ASN1_TIME *)\ - ASN1_STRING_type_new(V_ASN1_UTCTIME) -#define M_ASN1_TIME_free(a) ASN1_STRING_free((ASN1_STRING *)a) -#define M_ASN1_TIME_dup(a) (ASN1_TIME *)\ - ASN1_STRING_dup((const ASN1_STRING *)a) - -#define M_ASN1_GENERALSTRING_new() (ASN1_GENERALSTRING *)\ - ASN1_STRING_type_new(V_ASN1_GENERALSTRING) -#define M_ASN1_GENERALSTRING_free(a) ASN1_STRING_free((ASN1_STRING *)a) - -#define M_ASN1_UNIVERSALSTRING_new() (ASN1_UNIVERSALSTRING *)\ - ASN1_STRING_type_new(V_ASN1_UNIVERSALSTRING) -#define M_ASN1_UNIVERSALSTRING_free(a) ASN1_STRING_free((ASN1_STRING *)a) - -#define M_ASN1_BMPSTRING_new() (ASN1_BMPSTRING *)\ - ASN1_STRING_type_new(V_ASN1_BMPSTRING) -#define M_ASN1_BMPSTRING_free(a) ASN1_STRING_free((ASN1_STRING *)a) - -#define M_ASN1_VISIBLESTRING_new() (ASN1_VISIBLESTRING *)\ - ASN1_STRING_type_new(V_ASN1_VISIBLESTRING) -#define M_ASN1_VISIBLESTRING_free(a) ASN1_STRING_free((ASN1_STRING *)a) - -#define M_ASN1_UTF8STRING_new() (ASN1_UTF8STRING *)\ - ASN1_STRING_type_new(V_ASN1_UTF8STRING) -#define M_ASN1_UTF8STRING_free(a) ASN1_STRING_free((ASN1_STRING *)a) - -DECLARE_ASN1_FUNCTIONS_fname(ASN1_TYPE, ASN1_ANY, ASN1_TYPE) - -OPENSSL_EXPORT int ASN1_TYPE_get(ASN1_TYPE *a); -OPENSSL_EXPORT void ASN1_TYPE_set(ASN1_TYPE *a, int type, void *value); -OPENSSL_EXPORT int ASN1_TYPE_set1(ASN1_TYPE *a, int type, const void *value); -OPENSSL_EXPORT int ASN1_TYPE_cmp(const ASN1_TYPE *a, const ASN1_TYPE *b); - -OPENSSL_EXPORT ASN1_OBJECT * ASN1_OBJECT_new(void ); -OPENSSL_EXPORT void ASN1_OBJECT_free(ASN1_OBJECT *a); -OPENSSL_EXPORT int i2d_ASN1_OBJECT(ASN1_OBJECT *a,unsigned char **pp); -OPENSSL_EXPORT ASN1_OBJECT * c2i_ASN1_OBJECT(ASN1_OBJECT **a,const unsigned char **pp, - long length); -OPENSSL_EXPORT ASN1_OBJECT * d2i_ASN1_OBJECT(ASN1_OBJECT **a,const unsigned char **pp, - long length); - -DECLARE_ASN1_ITEM(ASN1_OBJECT) - -DECLARE_ASN1_SET_OF(ASN1_OBJECT) - -OPENSSL_EXPORT ASN1_STRING * ASN1_STRING_new(void); -OPENSSL_EXPORT void ASN1_STRING_free(ASN1_STRING *a); -OPENSSL_EXPORT int ASN1_STRING_copy(ASN1_STRING *dst, const ASN1_STRING *str); -OPENSSL_EXPORT ASN1_STRING * ASN1_STRING_dup(const ASN1_STRING *a); -OPENSSL_EXPORT ASN1_STRING * ASN1_STRING_type_new(int type ); -OPENSSL_EXPORT int ASN1_STRING_cmp(const ASN1_STRING *a, const ASN1_STRING *b); - /* Since this is used to store all sorts of things, via macros, for now, make - its data void * */ -OPENSSL_EXPORT int ASN1_STRING_set(ASN1_STRING *str, const void *data, int len); -OPENSSL_EXPORT void ASN1_STRING_set0(ASN1_STRING *str, void *data, int len); -OPENSSL_EXPORT int ASN1_STRING_length(const ASN1_STRING *x); -OPENSSL_EXPORT void ASN1_STRING_length_set(ASN1_STRING *x, int n); -OPENSSL_EXPORT int ASN1_STRING_type(ASN1_STRING *x); -OPENSSL_EXPORT unsigned char * ASN1_STRING_data(ASN1_STRING *x); -OPENSSL_EXPORT const unsigned char *ASN1_STRING_get0_data(const ASN1_STRING *x); - -DECLARE_ASN1_FUNCTIONS(ASN1_BIT_STRING) -OPENSSL_EXPORT int i2c_ASN1_BIT_STRING(ASN1_BIT_STRING *a,unsigned char **pp); -OPENSSL_EXPORT ASN1_BIT_STRING *c2i_ASN1_BIT_STRING(ASN1_BIT_STRING **a,const unsigned char **pp, long length); -OPENSSL_EXPORT int ASN1_BIT_STRING_set(ASN1_BIT_STRING *a, unsigned char *d, int length ); -OPENSSL_EXPORT int ASN1_BIT_STRING_set_bit(ASN1_BIT_STRING *a, int n, int value); -OPENSSL_EXPORT int ASN1_BIT_STRING_get_bit(ASN1_BIT_STRING *a, int n); -OPENSSL_EXPORT int ASN1_BIT_STRING_check(ASN1_BIT_STRING *a, unsigned char *flags, int flags_len); - -OPENSSL_EXPORT int i2d_ASN1_BOOLEAN(int a,unsigned char **pp); -OPENSSL_EXPORT int d2i_ASN1_BOOLEAN(int *a,const unsigned char **pp,long length); - -DECLARE_ASN1_FUNCTIONS(ASN1_INTEGER) -OPENSSL_EXPORT int i2c_ASN1_INTEGER(ASN1_INTEGER *a,unsigned char **pp); -OPENSSL_EXPORT ASN1_INTEGER *c2i_ASN1_INTEGER(ASN1_INTEGER **a,const unsigned char **pp, long length); -OPENSSL_EXPORT ASN1_INTEGER * ASN1_INTEGER_dup(const ASN1_INTEGER *x); -OPENSSL_EXPORT int ASN1_INTEGER_cmp(const ASN1_INTEGER *x, const ASN1_INTEGER *y); - -DECLARE_ASN1_FUNCTIONS(ASN1_ENUMERATED) - -OPENSSL_EXPORT int ASN1_UTCTIME_check(const ASN1_UTCTIME *a); -OPENSSL_EXPORT ASN1_UTCTIME *ASN1_UTCTIME_set(ASN1_UTCTIME *s,time_t t); -OPENSSL_EXPORT ASN1_UTCTIME *ASN1_UTCTIME_adj(ASN1_UTCTIME *s, time_t t, int offset_day, long offset_sec); -OPENSSL_EXPORT int ASN1_UTCTIME_set_string(ASN1_UTCTIME *s, const char *str); -OPENSSL_EXPORT int ASN1_UTCTIME_cmp_time_t(const ASN1_UTCTIME *s, time_t t); -#if 0 -time_t ASN1_UTCTIME_get(const ASN1_UTCTIME *s); -#endif - -OPENSSL_EXPORT int ASN1_GENERALIZEDTIME_check(const ASN1_GENERALIZEDTIME *a); -OPENSSL_EXPORT ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_set(ASN1_GENERALIZEDTIME *s,time_t t); -OPENSSL_EXPORT ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_adj(ASN1_GENERALIZEDTIME *s, time_t t, int offset_day, long offset_sec); -OPENSSL_EXPORT int ASN1_GENERALIZEDTIME_set_string(ASN1_GENERALIZEDTIME *s, const char *str); -OPENSSL_EXPORT int ASN1_TIME_diff(int *pday, int *psec, const ASN1_TIME *from, const ASN1_TIME *to); - -DECLARE_ASN1_FUNCTIONS(ASN1_OCTET_STRING) -OPENSSL_EXPORT ASN1_OCTET_STRING * ASN1_OCTET_STRING_dup(const ASN1_OCTET_STRING *a); -OPENSSL_EXPORT int ASN1_OCTET_STRING_cmp(const ASN1_OCTET_STRING *a, const ASN1_OCTET_STRING *b); -OPENSSL_EXPORT int ASN1_OCTET_STRING_set(ASN1_OCTET_STRING *str, const unsigned char *data, int len); - -DECLARE_ASN1_FUNCTIONS(ASN1_VISIBLESTRING) -DECLARE_ASN1_FUNCTIONS(ASN1_UNIVERSALSTRING) -DECLARE_ASN1_FUNCTIONS(ASN1_UTF8STRING) -DECLARE_ASN1_FUNCTIONS(ASN1_NULL) -DECLARE_ASN1_FUNCTIONS(ASN1_BMPSTRING) - -DECLARE_ASN1_FUNCTIONS_name(ASN1_STRING, ASN1_PRINTABLE) - -DECLARE_ASN1_FUNCTIONS_name(ASN1_STRING, DIRECTORYSTRING) -DECLARE_ASN1_FUNCTIONS_name(ASN1_STRING, DISPLAYTEXT) -DECLARE_ASN1_FUNCTIONS(ASN1_PRINTABLESTRING) -DECLARE_ASN1_FUNCTIONS(ASN1_T61STRING) -DECLARE_ASN1_FUNCTIONS(ASN1_IA5STRING) -DECLARE_ASN1_FUNCTIONS(ASN1_GENERALSTRING) -DECLARE_ASN1_FUNCTIONS(ASN1_UTCTIME) -DECLARE_ASN1_FUNCTIONS(ASN1_GENERALIZEDTIME) -DECLARE_ASN1_FUNCTIONS(ASN1_TIME) - -DECLARE_ASN1_ITEM(ASN1_OCTET_STRING_NDEF) - -OPENSSL_EXPORT ASN1_TIME *ASN1_TIME_set(ASN1_TIME *s,time_t t); -OPENSSL_EXPORT ASN1_TIME *ASN1_TIME_adj(ASN1_TIME *s,time_t t, int offset_day, long offset_sec); -OPENSSL_EXPORT int ASN1_TIME_check(ASN1_TIME *t); -OPENSSL_EXPORT ASN1_GENERALIZEDTIME *ASN1_TIME_to_generalizedtime(ASN1_TIME *t, ASN1_GENERALIZEDTIME **out); -OPENSSL_EXPORT int ASN1_TIME_set_string(ASN1_TIME *s, const char *str); - -OPENSSL_EXPORT int i2a_ASN1_INTEGER(BIO *bp, ASN1_INTEGER *a); -OPENSSL_EXPORT int i2a_ASN1_ENUMERATED(BIO *bp, ASN1_ENUMERATED *a); -OPENSSL_EXPORT int i2a_ASN1_OBJECT(BIO *bp,ASN1_OBJECT *a); -OPENSSL_EXPORT int i2a_ASN1_STRING(BIO *bp, ASN1_STRING *a, int type); -OPENSSL_EXPORT int i2t_ASN1_OBJECT(char *buf,int buf_len,ASN1_OBJECT *a); - -OPENSSL_EXPORT ASN1_OBJECT *ASN1_OBJECT_create(int nid, unsigned char *data,int len, const char *sn, const char *ln); - -OPENSSL_EXPORT int ASN1_INTEGER_set(ASN1_INTEGER *a, long v); -OPENSSL_EXPORT int ASN1_INTEGER_set_uint64(ASN1_INTEGER *out, uint64_t v); -OPENSSL_EXPORT long ASN1_INTEGER_get(const ASN1_INTEGER *a); -OPENSSL_EXPORT ASN1_INTEGER *BN_to_ASN1_INTEGER(const BIGNUM *bn, ASN1_INTEGER *ai); -OPENSSL_EXPORT BIGNUM *ASN1_INTEGER_to_BN(const ASN1_INTEGER *ai,BIGNUM *bn); - -OPENSSL_EXPORT int ASN1_ENUMERATED_set(ASN1_ENUMERATED *a, long v); -OPENSSL_EXPORT long ASN1_ENUMERATED_get(ASN1_ENUMERATED *a); -OPENSSL_EXPORT ASN1_ENUMERATED *BN_to_ASN1_ENUMERATED(BIGNUM *bn, ASN1_ENUMERATED *ai); -OPENSSL_EXPORT BIGNUM *ASN1_ENUMERATED_to_BN(ASN1_ENUMERATED *ai,BIGNUM *bn); - -/* General */ -/* given a string, return the correct type, max is the maximum length */ -OPENSSL_EXPORT int ASN1_PRINTABLE_type(const unsigned char *s, int max); - -OPENSSL_EXPORT unsigned long ASN1_tag2bit(int tag); - -/* SPECIALS */ -OPENSSL_EXPORT int ASN1_get_object(const unsigned char **pp, long *plength, int *ptag, int *pclass, long omax); -OPENSSL_EXPORT void ASN1_put_object(unsigned char **pp, int constructed, int length, int tag, int xclass); -OPENSSL_EXPORT int ASN1_put_eoc(unsigned char **pp); -OPENSSL_EXPORT int ASN1_object_size(int constructed, int length, int tag); - -OPENSSL_EXPORT void *ASN1_item_dup(const ASN1_ITEM *it, void *x); - -#ifndef OPENSSL_NO_FP_API -OPENSSL_EXPORT void *ASN1_item_d2i_fp(const ASN1_ITEM *it, FILE *in, void *x); -OPENSSL_EXPORT int ASN1_item_i2d_fp(const ASN1_ITEM *it, FILE *out, void *x); -OPENSSL_EXPORT int ASN1_STRING_print_ex_fp(FILE *fp, ASN1_STRING *str, unsigned long flags); -#endif - -OPENSSL_EXPORT int ASN1_STRING_to_UTF8(unsigned char **out, ASN1_STRING *in); - -OPENSSL_EXPORT void *ASN1_item_d2i_bio(const ASN1_ITEM *it, BIO *in, void *x); -OPENSSL_EXPORT int ASN1_item_i2d_bio(const ASN1_ITEM *it, BIO *out, void *x); -OPENSSL_EXPORT int ASN1_UTCTIME_print(BIO *fp, const ASN1_UTCTIME *a); -OPENSSL_EXPORT int ASN1_GENERALIZEDTIME_print(BIO *fp, const ASN1_GENERALIZEDTIME *a); -OPENSSL_EXPORT int ASN1_TIME_print(BIO *fp, const ASN1_TIME *a); -OPENSSL_EXPORT int ASN1_STRING_print(BIO *bp, const ASN1_STRING *v); -OPENSSL_EXPORT int ASN1_STRING_print_ex(BIO *out, ASN1_STRING *str, unsigned long flags); -OPENSSL_EXPORT const char *ASN1_tag2str(int tag); - -/* Used to load and write netscape format cert */ - -OPENSSL_EXPORT void *ASN1_item_unpack(ASN1_STRING *oct, const ASN1_ITEM *it); - -OPENSSL_EXPORT ASN1_STRING *ASN1_item_pack(void *obj, const ASN1_ITEM *it, ASN1_OCTET_STRING **oct); +// ASN1_PRINTABLE_type interprets |len| bytes from |s| as a Latin-1 string. It +// returns the first of |V_ASN1_PRINTABLESTRING|, |V_ASN1_IA5STRING|, or +// |V_ASN1_T61STRING| that can represent every character. If |len| is negative, +// |strlen(s)| is used instead. +// +// TODO(davidben): Remove this once all copies of Conscrypt have been updated +// past https://github.com/google/conscrypt/pull/1032. +OPENSSL_EXPORT int ASN1_PRINTABLE_type(const unsigned char *s, int len); +// ASN1_STRING_set_default_mask does nothing. OPENSSL_EXPORT void ASN1_STRING_set_default_mask(unsigned long mask); -OPENSSL_EXPORT int ASN1_STRING_set_default_mask_asc(const char *p); -OPENSSL_EXPORT unsigned long ASN1_STRING_get_default_mask(void); -OPENSSL_EXPORT int ASN1_mbstring_copy(ASN1_STRING **out, const unsigned char *in, int len, int inform, unsigned long mask); -OPENSSL_EXPORT int ASN1_mbstring_ncopy(ASN1_STRING **out, const unsigned char *in, int len, int inform, unsigned long mask, long minsize, long maxsize); -OPENSSL_EXPORT ASN1_STRING *ASN1_STRING_set_by_NID(ASN1_STRING **out, const unsigned char *in, int inlen, int inform, int nid); -OPENSSL_EXPORT ASN1_STRING_TABLE *ASN1_STRING_TABLE_get(int nid); -OPENSSL_EXPORT int ASN1_STRING_TABLE_add(int, long, long, unsigned long, unsigned long); +// ASN1_STRING_set_default_mask_asc returns one. +OPENSSL_EXPORT int ASN1_STRING_set_default_mask_asc(const char *p); + +// ASN1_STRING_get_default_mask returns |B_ASN1_UTF8STRING|. +OPENSSL_EXPORT unsigned long ASN1_STRING_get_default_mask(void); + +// ASN1_STRING_TABLE_cleanup does nothing. OPENSSL_EXPORT void ASN1_STRING_TABLE_cleanup(void); -/* ASN1 template functions */ +// M_ASN1_* are legacy aliases for various |ASN1_STRING| functions. Use the +// functions themselves. +#define M_ASN1_STRING_length(x) ASN1_STRING_length(x) +#define M_ASN1_STRING_type(x) ASN1_STRING_type(x) +#define M_ASN1_STRING_data(x) ASN1_STRING_data(x) +#define M_ASN1_BIT_STRING_new() ASN1_BIT_STRING_new() +#define M_ASN1_BIT_STRING_free(a) ASN1_BIT_STRING_free(a) +#define M_ASN1_BIT_STRING_dup(a) ASN1_STRING_dup(a) +#define M_ASN1_BIT_STRING_cmp(a, b) ASN1_STRING_cmp(a, b) +#define M_ASN1_BIT_STRING_set(a, b, c) ASN1_BIT_STRING_set(a, b, c) +#define M_ASN1_INTEGER_new() ASN1_INTEGER_new() +#define M_ASN1_INTEGER_free(a) ASN1_INTEGER_free(a) +#define M_ASN1_INTEGER_dup(a) ASN1_INTEGER_dup(a) +#define M_ASN1_INTEGER_cmp(a, b) ASN1_INTEGER_cmp(a, b) +#define M_ASN1_ENUMERATED_new() ASN1_ENUMERATED_new() +#define M_ASN1_ENUMERATED_free(a) ASN1_ENUMERATED_free(a) +#define M_ASN1_ENUMERATED_dup(a) ASN1_STRING_dup(a) +#define M_ASN1_ENUMERATED_cmp(a, b) ASN1_STRING_cmp(a, b) +#define M_ASN1_OCTET_STRING_new() ASN1_OCTET_STRING_new() +#define M_ASN1_OCTET_STRING_free(a) ASN1_OCTET_STRING_free() +#define M_ASN1_OCTET_STRING_dup(a) ASN1_OCTET_STRING_dup(a) +#define M_ASN1_OCTET_STRING_cmp(a, b) ASN1_OCTET_STRING_cmp(a, b) +#define M_ASN1_OCTET_STRING_set(a, b, c) ASN1_OCTET_STRING_set(a, b, c) +#define M_ASN1_OCTET_STRING_print(a, b) ASN1_STRING_print(a, b) +#define M_ASN1_PRINTABLESTRING_new() ASN1_PRINTABLESTRING_new() +#define M_ASN1_PRINTABLESTRING_free(a) ASN1_PRINTABLESTRING_free(a) +#define M_ASN1_IA5STRING_new() ASN1_IA5STRING_new() +#define M_ASN1_IA5STRING_free(a) ASN1_IA5STRING_free(a) +#define M_ASN1_IA5STRING_dup(a) ASN1_STRING_dup(a) +#define M_ASN1_UTCTIME_new() ASN1_UTCTIME_new() +#define M_ASN1_UTCTIME_free(a) ASN1_UTCTIME_free(a) +#define M_ASN1_UTCTIME_dup(a) ASN1_STRING_dup(a) +#define M_ASN1_T61STRING_new() ASN1_T61STRING_new() +#define M_ASN1_T61STRING_free(a) ASN1_T61STRING_free(a) +#define M_ASN1_GENERALIZEDTIME_new() ASN1_GENERALIZEDTIME_new() +#define M_ASN1_GENERALIZEDTIME_free(a) ASN1_GENERALIZEDTIME_free(a) +#define M_ASN1_GENERALIZEDTIME_dup(a) ASN1_STRING_dup(a) +#define M_ASN1_GENERALSTRING_new() ASN1_GENERALSTRING_new() +#define M_ASN1_GENERALSTRING_free(a) ASN1_GENERALSTRING_free(a) +#define M_ASN1_UNIVERSALSTRING_new() ASN1_UNIVERSALSTRING_new() +#define M_ASN1_UNIVERSALSTRING_free(a) ASN1_UNIVERSALSTRING_free(a) +#define M_ASN1_BMPSTRING_new() ASN1_BMPSTRING_new() +#define M_ASN1_BMPSTRING_free(a) ASN1_BMPSTRING_free(a) +#define M_ASN1_VISIBLESTRING_new() ASN1_VISIBLESTRING_new() +#define M_ASN1_VISIBLESTRING_free(a) ASN1_VISIBLESTRING_free(a) +#define M_ASN1_UTF8STRING_new() ASN1_UTF8STRING_new() +#define M_ASN1_UTF8STRING_free(a) ASN1_UTF8STRING_free(a) -/* Old API compatible functions */ -OPENSSL_EXPORT ASN1_VALUE *ASN1_item_new(const ASN1_ITEM *it); -OPENSSL_EXPORT void ASN1_item_free(ASN1_VALUE *val, const ASN1_ITEM *it); -OPENSSL_EXPORT ASN1_VALUE * ASN1_item_d2i(ASN1_VALUE **val, const unsigned char **in, long len, const ASN1_ITEM *it); -OPENSSL_EXPORT int ASN1_item_i2d(ASN1_VALUE *val, unsigned char **out, const ASN1_ITEM *it); -OPENSSL_EXPORT int ASN1_item_ndef_i2d(ASN1_VALUE *val, unsigned char **out, const ASN1_ITEM *it); +// B_ASN1_PRINTABLE is a bitmask for an ad-hoc subset of string-like types. Note +// the presence of |B_ASN1_UNKNOWN| means it includes types which |ASN1_tag2bit| +// maps to |B_ASN1_UNKNOWN|. +// +// Do not use this. Despite the name, it has no connection to PrintableString or +// printable characters. See https://crbug.com/boringssl/412. +#define B_ASN1_PRINTABLE \ + (B_ASN1_NUMERICSTRING | B_ASN1_PRINTABLESTRING | B_ASN1_T61STRING | \ + B_ASN1_IA5STRING | B_ASN1_BIT_STRING | B_ASN1_UNIVERSALSTRING | \ + B_ASN1_BMPSTRING | B_ASN1_UTF8STRING | B_ASN1_SEQUENCE | B_ASN1_UNKNOWN) -OPENSSL_EXPORT ASN1_TYPE *ASN1_generate_nconf(char *str, CONF *nconf); -OPENSSL_EXPORT ASN1_TYPE *ASN1_generate_v3(char *str, X509V3_CTX *cnf); +// ASN1_PRINTABLE_new returns a newly-allocated |ASN1_STRING| with type -1, or +// NULL on error. The resulting |ASN1_STRING| is not a valid ASN.1 value until +// initialized with a value. +OPENSSL_EXPORT ASN1_STRING *ASN1_PRINTABLE_new(void); + +// ASN1_PRINTABLE_free calls |ASN1_STRING_free|. +OPENSSL_EXPORT void ASN1_PRINTABLE_free(ASN1_STRING *str); + +// d2i_ASN1_PRINTABLE parses up to |len| bytes from |*inp| as a DER-encoded +// CHOICE of an ad-hoc subset of string-like types, as described in +// |d2i_SAMPLE_with_reuse|. +// +// Do not use this. Despite, the name it has no connection to PrintableString or +// printable characters. See https://crbug.com/boringssl/412. +// +// TODO(https://crbug.com/boringssl/354): This function currently also accepts +// BER, but this will be removed in the future. +OPENSSL_EXPORT ASN1_STRING *d2i_ASN1_PRINTABLE(ASN1_STRING **out, + const uint8_t **inp, long len); + +// i2d_ASN1_PRINTABLE marshals |in| as DER, as described in |i2d_SAMPLE|. +// +// Do not use this. Despite the name, it has no connection to PrintableString or +// printable characters. See https://crbug.com/boringssl/412. +OPENSSL_EXPORT int i2d_ASN1_PRINTABLE(const ASN1_STRING *in, uint8_t **outp); + +// ASN1_PRINTABLE is an |ASN1_ITEM| whose ASN.1 type is a CHOICE of an ad-hoc +// subset of string-like types, and whose C type is |ASN1_STRING*|. +// +// Do not use this. Despite the name, it has no connection to PrintableString or +// printable characters. See https://crbug.com/boringssl/412. +DECLARE_ASN1_ITEM(ASN1_PRINTABLE) -#ifdef __cplusplus -} +#if defined(__cplusplus) +} // extern C extern "C++" { @@ -810,7 +1936,7 @@ BORINGSSL_MAKE_DELETER(ASN1_TYPE, ASN1_TYPE_free) BSSL_NAMESPACE_END -} /* extern C++ */ +} // extern C++ #endif @@ -907,5 +2033,7 @@ BSSL_NAMESPACE_END #define ASN1_R_WRONG_TAG 190 #define ASN1_R_WRONG_TYPE 191 #define ASN1_R_NESTED_TOO_DEEP 192 +#define ASN1_R_BAD_TEMPLATE 193 +#define ASN1_R_INVALID_BIT_STRING_PADDING 194 #endif diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_base.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_base.h index 6063c5c..e32e786 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_base.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_base.h @@ -95,21 +95,25 @@ extern "C" { #elif defined(__x86) || defined(__i386) || defined(__i386__) || defined(_M_IX86) #define OPENSSL_32_BIT #define OPENSSL_X86 -#elif defined(__aarch64__) +#elif defined(__AARCH64EL__) || defined(_M_ARM64) #define OPENSSL_64_BIT #define OPENSSL_AARCH64 -#elif defined(__arm) || defined(__arm__) || defined(_M_ARM) +#elif defined(__ARMEL__) || defined(_M_ARM) #define OPENSSL_32_BIT #define OPENSSL_ARM #elif (defined(__PPC64__) || defined(__powerpc64__)) && defined(_LITTLE_ENDIAN) #define OPENSSL_64_BIT #define OPENSSL_PPC64LE -#elif defined(__mips__) && !defined(__LP64__) +#elif defined(__MIPSEL__) && !defined(__LP64__) #define OPENSSL_32_BIT #define OPENSSL_MIPS -#elif defined(__mips__) && defined(__LP64__) +#elif defined(__MIPSEL__) && defined(__LP64__) #define OPENSSL_64_BIT #define OPENSSL_MIPS64 +#elif defined(__riscv) && __SIZEOF_POINTER__ == 8 +#define OPENSSL_64_BIT +#elif defined(__riscv) && __SIZEOF_POINTER__ == 4 +#define OPENSSL_32_BIT #elif defined(__pnacl__) #define OPENSSL_32_BIT #define OPENSSL_PNACL @@ -143,7 +147,10 @@ extern "C" { #define OPENSSL_WINDOWS #endif -#if defined(__linux__) +// Trusty isn't Linux but currently defines __linux__. As a workaround, we +// exclude it here. +// TODO(b/169780122): Remove this workaround once Trusty no longer defines it. +#if defined(__linux__) && !defined(__TRUSTY__) #define OPENSSL_LINUX #endif @@ -151,7 +158,7 @@ extern "C" { #define OPENSSL_FUCHSIA #endif -#if defined(TRUSTY) +#if defined(__TRUSTY__) #define OPENSSL_TRUSTY #define OPENSSL_NO_THREADS_CORRUPT_MEMORY_AND_LEAK_SECRETS_IF_THREADED #endif @@ -160,6 +167,10 @@ extern "C" { #define OPENSSL_ANDROID #endif +#if defined(__FreeBSD__) +#define OPENSSL_FREEBSD +#endif + // BoringSSL requires platform's locking APIs to make internal global state // thread-safe, including the PRNG. On some single-threaded embedded platforms, // locking APIs may not exist, so this dependency may be disabled with the @@ -178,7 +189,7 @@ extern "C" { #endif #define OPENSSL_IS_BORINGSSL -#define OPENSSL_VERSION_NUMBER 0x1010007f +#define OPENSSL_VERSION_NUMBER 0x1010107f #define SSLEAY_VERSION_NUMBER OPENSSL_VERSION_NUMBER // BORINGSSL_API_VERSION is a positive integer that increments as BoringSSL @@ -189,7 +200,7 @@ extern "C" { // A consumer may use this symbol in the preprocessor to temporarily build // against multiple revisions of BoringSSL at the same time. It is not // recommended to do so for longer than is necessary. -#define BORINGSSL_API_VERSION 10 +#define BORINGSSL_API_VERSION 16 #if defined(BORINGSSL_SHARED_LIBRARY) @@ -322,8 +333,11 @@ enum ssl_verify_result_t BORINGSSL_ENUM_INT; // CRYPTO_THREADID is a dummy value. typedef int CRYPTO_THREADID; +// An |ASN1_NULL| is an opaque type. asn1.h represents the ASN.1 NULL value as +// an opaque, non-NULL |ASN1_NULL*| pointer. +typedef struct asn1_null_st ASN1_NULL; + typedef int ASN1_BOOLEAN; -typedef int ASN1_NULL; typedef struct ASN1_ITEM_st ASN1_ITEM; typedef struct asn1_object_st ASN1_OBJECT; typedef struct asn1_pctx_st ASN1_PCTX; @@ -359,21 +373,19 @@ typedef struct X509_POLICY_NODE_st X509_POLICY_NODE; typedef struct X509_POLICY_TREE_st X509_POLICY_TREE; typedef struct X509_VERIFY_PARAM_st X509_VERIFY_PARAM; typedef struct X509_algor_st X509_ALGOR; -typedef struct X509_crl_info_st X509_CRL_INFO; typedef struct X509_crl_st X509_CRL; typedef struct X509_extension_st X509_EXTENSION; typedef struct X509_info_st X509_INFO; typedef struct X509_name_entry_st X509_NAME_ENTRY; typedef struct X509_name_st X509_NAME; typedef struct X509_pubkey_st X509_PUBKEY; -typedef struct X509_req_info_st X509_REQ_INFO; typedef struct X509_req_st X509_REQ; typedef struct X509_sig_st X509_SIG; -typedef struct X509_val_st X509_VAL; typedef struct bignum_ctx BN_CTX; typedef struct bignum_st BIGNUM; typedef struct bio_method_st BIO_METHOD; typedef struct bio_st BIO; +typedef struct blake2b_state_st BLAKE2B_CTX; typedef struct bn_gencb_st BN_GENCB; typedef struct bn_mont_ctx_st BN_MONT_CTX; typedef struct buf_mem_st BUF_MEM; @@ -398,6 +410,11 @@ typedef struct evp_aead_st EVP_AEAD; typedef struct evp_cipher_ctx_st EVP_CIPHER_CTX; typedef struct evp_cipher_st EVP_CIPHER; typedef struct evp_encode_ctx_st EVP_ENCODE_CTX; +typedef struct evp_hpke_aead_st EVP_HPKE_AEAD; +typedef struct evp_hpke_ctx_st EVP_HPKE_CTX; +typedef struct evp_hpke_kdf_st EVP_HPKE_KDF; +typedef struct evp_hpke_kem_st EVP_HPKE_KEM; +typedef struct evp_hpke_key_st EVP_HPKE_KEY; typedef struct evp_pkey_asn1_method_st EVP_PKEY_ASN1_METHOD; typedef struct evp_pkey_ctx_st EVP_PKEY_CTX; typedef struct evp_pkey_method_st EVP_PKEY_METHOD; @@ -412,6 +429,7 @@ typedef struct private_key_st X509_PKEY; typedef struct rand_meth_st RAND_METHOD; typedef struct rc4_key_st RC4_KEY; typedef struct rsa_meth_st RSA_METHOD; +typedef struct rsa_pss_params_st RSA_PSS_PARAMS; typedef struct rsa_st RSA; typedef struct sha256_state_st SHA256_CTX; typedef struct sha512_state_st SHA512_CTX; @@ -420,6 +438,8 @@ typedef struct spake2_ctx_st SPAKE2_CTX; typedef struct srtp_protection_profile_st SRTP_PROTECTION_PROFILE; typedef struct ssl_cipher_st SSL_CIPHER; typedef struct ssl_ctx_st SSL_CTX; +typedef struct ssl_early_callback_ctx SSL_CLIENT_HELLO; +typedef struct ssl_ech_keys_st SSL_ECH_KEYS; typedef struct ssl_method_st SSL_METHOD; typedef struct ssl_private_key_method_st SSL_PRIVATE_KEY_METHOD; typedef struct ssl_quic_method_st SSL_QUIC_METHOD; @@ -434,9 +454,10 @@ typedef struct trust_token_method_st TRUST_TOKEN_METHOD; typedef struct v3_ext_ctx X509V3_CTX; typedef struct x509_attributes_st X509_ATTRIBUTE; typedef struct x509_cert_aux_st X509_CERT_AUX; -typedef struct x509_cinf_st X509_CINF; typedef struct x509_crl_method_st X509_CRL_METHOD; typedef struct x509_lookup_st X509_LOOKUP; +typedef struct x509_lookup_method_st X509_LOOKUP_METHOD; +typedef struct x509_object_st X509_OBJECT; typedef struct x509_revoked_st X509_REVOKED; typedef struct x509_st X509; typedef struct x509_store_ctx_st X509_STORE_CTX; @@ -525,8 +546,39 @@ class StackAllocated { StackAllocated() { init(&ctx_); } ~StackAllocated() { cleanup(&ctx_); } - StackAllocated(const StackAllocated &) = delete; - T& operator=(const StackAllocated &) = delete; + StackAllocated(const StackAllocated &) = delete; + StackAllocated& operator=(const StackAllocated &) = delete; + + T *get() { return &ctx_; } + const T *get() const { return &ctx_; } + + T *operator->() { return &ctx_; } + const T *operator->() const { return &ctx_; } + + void Reset() { + cleanup(&ctx_); + init(&ctx_); + } + + private: + T ctx_; +}; + +template +class StackAllocatedMovable { + public: + StackAllocatedMovable() { init(&ctx_); } + ~StackAllocatedMovable() { cleanup(&ctx_); } + + StackAllocatedMovable(StackAllocatedMovable &&other) { + init(&ctx_); + move(&ctx_, &other.ctx_); + } + StackAllocatedMovable &operator=(StackAllocatedMovable &&other) { + move(&ctx_, &other.ctx_); + return *this; + } T *get() { return &ctx_; } const T *get() const { return &ctx_; } diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_bio.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_bio.h index 31e1881..cdd8615 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_bio.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_bio.h @@ -199,6 +199,10 @@ OPENSSL_EXPORT int BIO_should_io_special(const BIO *bio); // retried. The return value is one of the |BIO_RR_*| values. OPENSSL_EXPORT int BIO_get_retry_reason(const BIO *bio); +// BIO_set_retry_reason sets the special I/O operation that needs to be retried +// to |reason|, which should be one of the |BIO_RR_*| values. +OPENSSL_EXPORT void BIO_set_retry_reason(BIO *bio, int reason); + // BIO_clear_flags ANDs |bio->flags| with the bitwise-complement of |flags|. OPENSSL_EXPORT void BIO_clear_flags(BIO *bio, int flags); @@ -373,7 +377,9 @@ OPENSSL_EXPORT int BIO_read_asn1(BIO *bio, uint8_t **out, size_t *out_len, OPENSSL_EXPORT const BIO_METHOD *BIO_s_mem(void); // BIO_new_mem_buf creates read-only BIO that reads from |len| bytes at |buf|. -// It does not take ownership of |buf|. It returns the BIO or NULL on error. +// It returns the BIO or NULL on error. This function does not copy or take +// ownership of |buf|. The caller must ensure the memory pointed to by |buf| +// outlives the |BIO|. // // If |len| is negative, then |buf| is treated as a NUL-terminated string, but // don't depend on this in new code. @@ -502,6 +508,25 @@ OPENSSL_EXPORT int BIO_append_filename(BIO *bio, const char *filename); // |FILE| will be closed when |bio| is freed. OPENSSL_EXPORT int BIO_rw_filename(BIO *bio, const char *filename); +// BIO_tell returns the file offset of |bio|, or a negative number on error or +// if |bio| does not support the operation. +// +// TODO(https://crbug.com/boringssl/465): On platforms where |long| is 32-bit, +// this function cannot report 64-bit offsets. +OPENSSL_EXPORT long BIO_tell(BIO *bio); + +// BIO_seek sets the file offset of |bio| to |offset|. It returns a non-negative +// number on success and a negative number on error. If |bio| is a file +// descriptor |BIO|, it returns the resulting file offset on success. If |bio| +// is a file |BIO|, it returns zero on success. +// +// WARNING: This function's return value conventions differs from most functions +// in this library. +// +// TODO(https://crbug.com/boringssl/465): On platforms where |long| is 32-bit, +// this function cannot handle 64-bit offsets. +OPENSSL_EXPORT long BIO_seek(BIO *bio, long offset); + // Socket BIOs. // diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_bn.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_bn.h index e13cc39..b64ebfb 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_bn.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_bn.h @@ -658,6 +658,14 @@ struct bn_gencb_st { int (*callback)(int event, int n, struct bn_gencb_st *); }; +// BN_GENCB_new returns a newly-allocated |BN_GENCB| object, or NULL on +// allocation failure. The result must be released with |BN_GENCB_free| when +// done. +OPENSSL_EXPORT BN_GENCB *BN_GENCB_new(void); + +// BN_GENCB_free releases memory associated with |callback|. +OPENSSL_EXPORT void BN_GENCB_free(BN_GENCB *callback); + // BN_GENCB_set configures |callback| to call |f| and sets |callout->arg| to // |arg|. OPENSSL_EXPORT void BN_GENCB_set(BN_GENCB *callback, @@ -687,9 +695,9 @@ OPENSSL_EXPORT int BN_generate_prime_ex(BIGNUM *ret, int bits, int safe, // BN_prime_checks_for_validation can be used as the |checks| argument to the // primarily testing functions when validating an externally-supplied candidate // prime. It gives a false positive rate of at most 2^{-128}. (The worst case -// false positive rate for a single iteration is 1/4, so we perform 32 -// iterations.) -#define BN_prime_checks_for_validation 32 +// false positive rate for a single iteration is 1/4 per +// https://eprint.iacr.org/2018/749. (1/4)^64 = 2^{-128}.) +#define BN_prime_checks_for_validation 64 // BN_prime_checks_for_generation can be used as the |checks| argument to the // primality testing functions when generating random primes. It gives a false diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_boringssl_prefix_symbols.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_boringssl_prefix_symbols.h index 1019a88..c13e426 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_boringssl_prefix_symbols.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_boringssl_prefix_symbols.h @@ -79,12 +79,14 @@ #define BIO_reset BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_reset) #define BIO_rw_filename BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_rw_filename) #define BIO_s_file BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_s_file) +#define BIO_seek BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_seek) #define BIO_set_close BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_close) #define BIO_set_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_data) #define BIO_set_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_flags) #define BIO_set_fp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_fp) #define BIO_set_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_init) #define BIO_set_retry_read BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_retry_read) +#define BIO_set_retry_reason BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_retry_reason) #define BIO_set_retry_special BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_retry_special) #define BIO_set_retry_write BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_retry_write) #define BIO_set_shutdown BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_set_shutdown) @@ -94,6 +96,7 @@ #define BIO_should_retry BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_should_retry) #define BIO_should_write BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_should_write) #define BIO_snprintf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_snprintf) +#define BIO_tell BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_tell) #define BIO_test_flags BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_test_flags) #define BIO_up_ref BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_up_ref) #define BIO_vfree BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BIO_vfree) @@ -108,6 +111,8 @@ #define BN_CTX_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_CTX_new) #define BN_CTX_start BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_CTX_start) #define BN_GENCB_call BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_GENCB_call) +#define BN_GENCB_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_GENCB_free) +#define BN_GENCB_new BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_GENCB_new) #define BN_GENCB_set BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_GENCB_set) #define BN_MONT_CTX_copy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_MONT_CTX_copy) #define BN_MONT_CTX_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, BN_MONT_CTX_free) @@ -239,6 +244,7 @@ #define CBB_add_u64le BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_u64le) #define CBB_add_u8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_u8) #define CBB_add_u8_length_prefixed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_u8_length_prefixed) +#define CBB_add_zeros BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_add_zeros) #define CBB_cleanup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_cleanup) #define CBB_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_data) #define CBB_did_write BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBB_did_write) @@ -284,8 +290,11 @@ #define CBS_get_u64le BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_u64le) #define CBS_get_u8 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_u8) #define CBS_get_u8_length_prefixed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_u8_length_prefixed) +#define CBS_get_until_first BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_get_until_first) #define CBS_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_init) +#define CBS_is_unsigned_asn1_integer BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_is_unsigned_asn1_integer) #define CBS_is_valid_asn1_bitstring BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_is_valid_asn1_bitstring) +#define CBS_is_valid_asn1_integer BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_is_valid_asn1_integer) #define CBS_len BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_len) #define CBS_mem_equal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_mem_equal) #define CBS_peek_asn1_tag BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CBS_peek_asn1_tag) @@ -318,6 +327,7 @@ #define CRYPTO_ctr128_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_ctr128_encrypt) #define CRYPTO_ctr128_encrypt_ctr32 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_ctr128_encrypt_ctr32) #define CRYPTO_fork_detect_ignore_madv_wipeonfork_for_testing BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_fork_detect_ignore_madv_wipeonfork_for_testing) +#define CRYPTO_free BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_free) #define CRYPTO_free_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_free_ex_data) #define CRYPTO_gcm128_aad BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_gcm128_aad) #define CRYPTO_gcm128_decrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_gcm128_decrypt) @@ -339,16 +349,20 @@ #define CRYPTO_get_thread_local BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_get_thread_local) #define CRYPTO_ghash_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_ghash_init) #define CRYPTO_has_asm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_has_asm) +#define CRYPTO_init_sysrand BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_init_sysrand) #define CRYPTO_is_confidential_build BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_is_confidential_build) #define CRYPTO_library_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_library_init) +#define CRYPTO_malloc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_malloc) #define CRYPTO_malloc_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_malloc_init) #define CRYPTO_memcmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_memcmp) #define CRYPTO_new_ex_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_new_ex_data) #define CRYPTO_num_locks BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_num_locks) #define CRYPTO_ofb128_encrypt BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_ofb128_encrypt) #define CRYPTO_once BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_once) +#define CRYPTO_pre_sandbox_init BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_pre_sandbox_init) #define CRYPTO_rdrand BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_rdrand) #define CRYPTO_rdrand_multiple8_buf BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_rdrand_multiple8_buf) +#define CRYPTO_realloc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_realloc) #define CRYPTO_refcount_dec_and_test_zero BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_refcount_dec_and_test_zero) #define CRYPTO_refcount_inc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_refcount_inc) #define CRYPTO_set_add_lock_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_set_add_lock_callback) @@ -360,6 +374,7 @@ #define CRYPTO_set_locking_callback BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_set_locking_callback) #define CRYPTO_set_thread_local BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_set_thread_local) #define CRYPTO_sysrand BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_sysrand) +#define CRYPTO_sysrand_for_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_sysrand_for_seed) #define CRYPTO_sysrand_if_available BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CRYPTO_sysrand_if_available) #define CTR_DRBG_clear BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CTR_DRBG_clear) #define CTR_DRBG_generate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, CTR_DRBG_generate) @@ -401,6 +416,7 @@ #define ERR_remove_thread_state BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_remove_thread_state) #define ERR_restore_state BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_restore_state) #define ERR_save_state BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_save_state) +#define ERR_set_error_data BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_set_error_data) #define ERR_set_mark BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, ERR_set_mark) #define EVP_CIPHER_CTX_block_size BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_block_size) #define EVP_CIPHER_CTX_cipher BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_CTX_cipher) @@ -429,24 +445,29 @@ #define EVP_CIPHER_mode BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_mode) #define EVP_CIPHER_nid BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CIPHER_nid) #define EVP_Cipher BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_Cipher) +#define EVP_CipherFinal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CipherFinal) #define EVP_CipherFinal_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CipherFinal_ex) #define EVP_CipherInit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CipherInit) #define EVP_CipherInit_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CipherInit_ex) #define EVP_CipherUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_CipherUpdate) +#define EVP_DecryptFinal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DecryptFinal) #define EVP_DecryptFinal_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DecryptFinal_ex) #define EVP_DecryptInit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DecryptInit) #define EVP_DecryptInit_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DecryptInit_ex) #define EVP_DecryptUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_DecryptUpdate) +#define EVP_EncryptFinal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_EncryptFinal) #define EVP_EncryptFinal_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_EncryptFinal_ex) #define EVP_EncryptInit BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_EncryptInit) #define EVP_EncryptInit_ex BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_EncryptInit_ex) #define EVP_EncryptUpdate BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_EncryptUpdate) #define EVP_add_cipher_alias BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_add_cipher_alias) #define EVP_aead_aes_128_gcm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm) +#define EVP_aead_aes_128_gcm_randnonce BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm_randnonce) #define EVP_aead_aes_128_gcm_tls12 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm_tls12) #define EVP_aead_aes_128_gcm_tls13 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm_tls13) #define EVP_aead_aes_192_gcm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_192_gcm) #define EVP_aead_aes_256_gcm BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm) +#define EVP_aead_aes_256_gcm_randnonce BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm_randnonce) #define EVP_aead_aes_256_gcm_tls12 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm_tls12) #define EVP_aead_aes_256_gcm_tls13 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm_tls13) #define EVP_aes_128_cbc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, EVP_aes_128_cbc) @@ -480,6 +501,7 @@ #define OPENSSL_realloc BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_realloc) #define OPENSSL_strcasecmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_strcasecmp) #define OPENSSL_strdup BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_strdup) +#define OPENSSL_strhash BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_strhash) #define OPENSSL_strlcat BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_strlcat) #define OPENSSL_strlcpy BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_strlcpy) #define OPENSSL_strncasecmp BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_strncasecmp) @@ -488,6 +510,7 @@ #define OPENSSL_tolower BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OPENSSL_tolower) #define OpenSSL_version BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OpenSSL_version) #define OpenSSL_version_num BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, OpenSSL_version_num) +#define RAND_OpenSSL BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_OpenSSL) #define RAND_SSLeay BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_SSLeay) #define RAND_add BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_add) #define RAND_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_bytes) @@ -502,7 +525,6 @@ #define RAND_pseudo_bytes BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_pseudo_bytes) #define RAND_seed BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_seed) #define RAND_set_rand_method BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_set_rand_method) -#define RAND_set_urandom_fd BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_set_urandom_fd) #define RAND_status BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RAND_status) #define RSAZ_1024_mod_exp_avx2 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, RSAZ_1024_mod_exp_avx2) #define SSLeay BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, SSLeay) @@ -581,6 +603,7 @@ #define bn_scatter5 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_scatter5) #define bn_select_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_select_words) #define bn_set_minimal_width BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_set_minimal_width) +#define bn_set_static_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_set_static_words) #define bn_set_words BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_set_words) #define bn_sqr8x_internal BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_sqr8x_internal) #define bn_sqr_comba4 BORINGSSL_ADD_PREFIX(BORINGSSL_PREFIX, bn_sqr_comba4) diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_boringssl_prefix_symbols_asm.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_boringssl_prefix_symbols_asm.h index d699ea9..a4225ad 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_boringssl_prefix_symbols_asm.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_boringssl_prefix_symbols_asm.h @@ -84,12 +84,14 @@ #define _BIO_reset BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_reset) #define _BIO_rw_filename BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_rw_filename) #define _BIO_s_file BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_s_file) +#define _BIO_seek BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_seek) #define _BIO_set_close BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_close) #define _BIO_set_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_data) #define _BIO_set_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_flags) #define _BIO_set_fp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_fp) #define _BIO_set_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_init) #define _BIO_set_retry_read BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_retry_read) +#define _BIO_set_retry_reason BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_retry_reason) #define _BIO_set_retry_special BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_retry_special) #define _BIO_set_retry_write BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_retry_write) #define _BIO_set_shutdown BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_set_shutdown) @@ -99,6 +101,7 @@ #define _BIO_should_retry BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_should_retry) #define _BIO_should_write BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_should_write) #define _BIO_snprintf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_snprintf) +#define _BIO_tell BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_tell) #define _BIO_test_flags BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_test_flags) #define _BIO_up_ref BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_up_ref) #define _BIO_vfree BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BIO_vfree) @@ -113,6 +116,8 @@ #define _BN_CTX_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_CTX_new) #define _BN_CTX_start BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_CTX_start) #define _BN_GENCB_call BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_GENCB_call) +#define _BN_GENCB_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_GENCB_free) +#define _BN_GENCB_new BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_GENCB_new) #define _BN_GENCB_set BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_GENCB_set) #define _BN_MONT_CTX_copy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_MONT_CTX_copy) #define _BN_MONT_CTX_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, BN_MONT_CTX_free) @@ -244,6 +249,7 @@ #define _CBB_add_u64le BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_u64le) #define _CBB_add_u8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_u8) #define _CBB_add_u8_length_prefixed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_u8_length_prefixed) +#define _CBB_add_zeros BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_add_zeros) #define _CBB_cleanup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_cleanup) #define _CBB_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_data) #define _CBB_did_write BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBB_did_write) @@ -289,8 +295,11 @@ #define _CBS_get_u64le BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_u64le) #define _CBS_get_u8 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_u8) #define _CBS_get_u8_length_prefixed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_u8_length_prefixed) +#define _CBS_get_until_first BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_get_until_first) #define _CBS_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_init) +#define _CBS_is_unsigned_asn1_integer BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_is_unsigned_asn1_integer) #define _CBS_is_valid_asn1_bitstring BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_is_valid_asn1_bitstring) +#define _CBS_is_valid_asn1_integer BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_is_valid_asn1_integer) #define _CBS_len BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_len) #define _CBS_mem_equal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_mem_equal) #define _CBS_peek_asn1_tag BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CBS_peek_asn1_tag) @@ -323,6 +332,7 @@ #define _CRYPTO_ctr128_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_ctr128_encrypt) #define _CRYPTO_ctr128_encrypt_ctr32 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_ctr128_encrypt_ctr32) #define _CRYPTO_fork_detect_ignore_madv_wipeonfork_for_testing BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_fork_detect_ignore_madv_wipeonfork_for_testing) +#define _CRYPTO_free BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_free) #define _CRYPTO_free_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_free_ex_data) #define _CRYPTO_gcm128_aad BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_gcm128_aad) #define _CRYPTO_gcm128_decrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_gcm128_decrypt) @@ -344,16 +354,20 @@ #define _CRYPTO_get_thread_local BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_get_thread_local) #define _CRYPTO_ghash_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_ghash_init) #define _CRYPTO_has_asm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_has_asm) +#define _CRYPTO_init_sysrand BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_init_sysrand) #define _CRYPTO_is_confidential_build BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_is_confidential_build) #define _CRYPTO_library_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_library_init) +#define _CRYPTO_malloc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_malloc) #define _CRYPTO_malloc_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_malloc_init) #define _CRYPTO_memcmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_memcmp) #define _CRYPTO_new_ex_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_new_ex_data) #define _CRYPTO_num_locks BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_num_locks) #define _CRYPTO_ofb128_encrypt BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_ofb128_encrypt) #define _CRYPTO_once BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_once) +#define _CRYPTO_pre_sandbox_init BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_pre_sandbox_init) #define _CRYPTO_rdrand BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_rdrand) #define _CRYPTO_rdrand_multiple8_buf BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_rdrand_multiple8_buf) +#define _CRYPTO_realloc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_realloc) #define _CRYPTO_refcount_dec_and_test_zero BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_refcount_dec_and_test_zero) #define _CRYPTO_refcount_inc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_refcount_inc) #define _CRYPTO_set_add_lock_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_set_add_lock_callback) @@ -365,6 +379,7 @@ #define _CRYPTO_set_locking_callback BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_set_locking_callback) #define _CRYPTO_set_thread_local BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_set_thread_local) #define _CRYPTO_sysrand BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_sysrand) +#define _CRYPTO_sysrand_for_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_sysrand_for_seed) #define _CRYPTO_sysrand_if_available BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CRYPTO_sysrand_if_available) #define _CTR_DRBG_clear BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CTR_DRBG_clear) #define _CTR_DRBG_generate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, CTR_DRBG_generate) @@ -406,6 +421,7 @@ #define _ERR_remove_thread_state BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_remove_thread_state) #define _ERR_restore_state BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_restore_state) #define _ERR_save_state BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_save_state) +#define _ERR_set_error_data BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_set_error_data) #define _ERR_set_mark BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, ERR_set_mark) #define _EVP_CIPHER_CTX_block_size BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_block_size) #define _EVP_CIPHER_CTX_cipher BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_CTX_cipher) @@ -434,24 +450,29 @@ #define _EVP_CIPHER_mode BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_mode) #define _EVP_CIPHER_nid BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CIPHER_nid) #define _EVP_Cipher BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_Cipher) +#define _EVP_CipherFinal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CipherFinal) #define _EVP_CipherFinal_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CipherFinal_ex) #define _EVP_CipherInit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CipherInit) #define _EVP_CipherInit_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CipherInit_ex) #define _EVP_CipherUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_CipherUpdate) +#define _EVP_DecryptFinal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DecryptFinal) #define _EVP_DecryptFinal_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DecryptFinal_ex) #define _EVP_DecryptInit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DecryptInit) #define _EVP_DecryptInit_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DecryptInit_ex) #define _EVP_DecryptUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_DecryptUpdate) +#define _EVP_EncryptFinal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_EncryptFinal) #define _EVP_EncryptFinal_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_EncryptFinal_ex) #define _EVP_EncryptInit BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_EncryptInit) #define _EVP_EncryptInit_ex BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_EncryptInit_ex) #define _EVP_EncryptUpdate BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_EncryptUpdate) #define _EVP_add_cipher_alias BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_add_cipher_alias) #define _EVP_aead_aes_128_gcm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm) +#define _EVP_aead_aes_128_gcm_randnonce BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm_randnonce) #define _EVP_aead_aes_128_gcm_tls12 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm_tls12) #define _EVP_aead_aes_128_gcm_tls13 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_128_gcm_tls13) #define _EVP_aead_aes_192_gcm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_192_gcm) #define _EVP_aead_aes_256_gcm BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm) +#define _EVP_aead_aes_256_gcm_randnonce BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm_randnonce) #define _EVP_aead_aes_256_gcm_tls12 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm_tls12) #define _EVP_aead_aes_256_gcm_tls13 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aead_aes_256_gcm_tls13) #define _EVP_aes_128_cbc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, EVP_aes_128_cbc) @@ -485,6 +506,7 @@ #define _OPENSSL_realloc BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_realloc) #define _OPENSSL_strcasecmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_strcasecmp) #define _OPENSSL_strdup BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_strdup) +#define _OPENSSL_strhash BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_strhash) #define _OPENSSL_strlcat BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_strlcat) #define _OPENSSL_strlcpy BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_strlcpy) #define _OPENSSL_strncasecmp BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_strncasecmp) @@ -493,6 +515,7 @@ #define _OPENSSL_tolower BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OPENSSL_tolower) #define _OpenSSL_version BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OpenSSL_version) #define _OpenSSL_version_num BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, OpenSSL_version_num) +#define _RAND_OpenSSL BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_OpenSSL) #define _RAND_SSLeay BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_SSLeay) #define _RAND_add BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_add) #define _RAND_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_bytes) @@ -507,7 +530,6 @@ #define _RAND_pseudo_bytes BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_pseudo_bytes) #define _RAND_seed BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_seed) #define _RAND_set_rand_method BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_set_rand_method) -#define _RAND_set_urandom_fd BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_set_urandom_fd) #define _RAND_status BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RAND_status) #define _RSAZ_1024_mod_exp_avx2 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, RSAZ_1024_mod_exp_avx2) #define _SSLeay BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, SSLeay) @@ -586,6 +608,7 @@ #define _bn_scatter5 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_scatter5) #define _bn_select_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_select_words) #define _bn_set_minimal_width BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_set_minimal_width) +#define _bn_set_static_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_set_static_words) #define _bn_set_words BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_set_words) #define _bn_sqr8x_internal BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_sqr8x_internal) #define _bn_sqr_comba4 BORINGSSL_ADD_PREFIX_MAC_ASM(BORINGSSL_PREFIX, bn_sqr_comba4) diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_bytestring.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_bytestring.h index 7b87b5a..a0caa4b 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_bytestring.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_bytestring.h @@ -51,6 +51,7 @@ struct cbs_st { // Defining any constructors requires we explicitly default the others. cbs_st() = default; cbs_st(const cbs_st &) = default; + cbs_st &operator=(const cbs_st &) = default; #endif }; @@ -153,6 +154,11 @@ OPENSSL_EXPORT int CBS_get_u16_length_prefixed(CBS *cbs, CBS *out); // returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u24_length_prefixed(CBS *cbs, CBS *out); +// CBS_get_until_first finds the first instance of |c| in |cbs|. If found, it +// sets |*out| to the text before the match, advances |cbs| over it, and returns +// one. Otherwise, it returns zero and leaves |cbs| unmodified. +OPENSSL_EXPORT int CBS_get_until_first(CBS *cbs, CBS *out, uint8_t c); + // Parsing ASN.1 // @@ -252,12 +258,16 @@ OPENSSL_EXPORT int CBS_get_any_asn1_element(CBS *cbs, CBS *out, size_t *out_header_len); // CBS_get_any_ber_asn1_element acts the same as |CBS_get_any_asn1_element| but -// also allows indefinite-length elements to be returned. In that case, -// |*out_header_len| and |CBS_len(out)| will both be two as only the header is -// returned, otherwise it behaves the same as the previous function. +// also allows indefinite-length elements to be returned and does not enforce +// that lengths are minimal. For indefinite-lengths, |*out_header_len| and +// |CBS_len(out)| will be equal as only the header is returned (although this is +// also true for empty elements so the length must be checked too). If +// |out_ber_found| is not NULL then it is set to one if any case of invalid DER +// but valid BER is found, and to zero otherwise. OPENSSL_EXPORT int CBS_get_any_ber_asn1_element(CBS *cbs, CBS *out, unsigned *out_tag, - size_t *out_header_len); + size_t *out_header_len, + int *out_ber_found); // CBS_get_asn1_uint64 gets an ASN.1 INTEGER from |cbs| using |CBS_get_asn1| // and sets |*out| to its value. It returns one on success and zero on error, @@ -310,14 +320,25 @@ OPENSSL_EXPORT int CBS_get_optional_asn1_bool(CBS *cbs, int *out, unsigned tag, int default_value); // CBS_is_valid_asn1_bitstring returns one if |cbs| is a valid ASN.1 BIT STRING -// and zero otherwise. +// body and zero otherwise. OPENSSL_EXPORT int CBS_is_valid_asn1_bitstring(const CBS *cbs); // CBS_asn1_bitstring_has_bit returns one if |cbs| is a valid ASN.1 BIT STRING -// and the specified bit is present and set. Otherwise, it returns zero. |bit| -// is indexed starting from zero. +// body and the specified bit is present and set. Otherwise, it returns zero. +// |bit| is indexed starting from zero. OPENSSL_EXPORT int CBS_asn1_bitstring_has_bit(const CBS *cbs, unsigned bit); +// CBS_is_valid_asn1_integer returns one if |cbs| is a valid ASN.1 INTEGER, +// body and zero otherwise. On success, if |out_is_negative| is non-NULL, +// |*out_is_negative| will be set to one if |cbs| is negative and zero +// otherwise. +OPENSSL_EXPORT int CBS_is_valid_asn1_integer(const CBS *cbs, + int *out_is_negative); + +// CBS_is_unsigned_asn1_integer returns one if |cbs| is a valid non-negative +// ASN.1 INTEGER body and zero otherwise. +OPENSSL_EXPORT int CBS_is_unsigned_asn1_integer(const CBS *cbs); + // CBS_asn1_oid_to_text interprets |cbs| as DER-encoded ASN.1 OBJECT IDENTIFIER // contents (not including the element framing) and returns the ASCII // representation (e.g., "1.2.840.113554.4.1.72585") in a newly-allocated @@ -447,6 +468,10 @@ OPENSSL_EXPORT int CBB_add_asn1(CBB *cbb, CBB *out_contents, unsigned tag); // success and zero otherwise. OPENSSL_EXPORT int CBB_add_bytes(CBB *cbb, const uint8_t *data, size_t len); +// CBB_add_zeros append |len| bytes with value zero to |cbb|. It returns one on +// success and zero otherwise. +OPENSSL_EXPORT int CBB_add_zeros(CBB *cbb, size_t len); + // CBB_add_space appends |len| bytes to |cbb| and sets |*out_data| to point to // the beginning of that space. The caller must then write |len| bytes of // actual contents to |*out_data|. It returns one on success and zero diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_chacha.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_chacha.h index abcfd81..aa1cf70 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_chacha.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_chacha.h @@ -23,7 +23,7 @@ extern "C" { // ChaCha20. // -// ChaCha20 is a stream cipher. See https://tools.ietf.org/html/rfc7539. +// ChaCha20 is a stream cipher. See https://tools.ietf.org/html/rfc8439. // CRYPTO_chacha_20 encrypts |in_len| bytes from |in| with the given key and diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_cipher.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_cipher.h index 2e366d3..66553bc 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_cipher.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_cipher.h @@ -106,7 +106,10 @@ OPENSSL_EXPORT const EVP_CIPHER *EVP_rc2_cbc(void); const EVP_CIPHER *EVP_rc2_40_cbc(void); // EVP_get_cipherbynid returns the cipher corresponding to the given NID, or -// NULL if no such cipher is known. +// NULL if no such cipher is known. Note using this function links almost every +// cipher implemented by BoringSSL into the binary, whether the caller uses them +// or not. Size-conscious callers, such as client software, should not use this +// function. OPENSSL_EXPORT const EVP_CIPHER *EVP_get_cipherbynid(int nid); @@ -198,7 +201,7 @@ OPENSSL_EXPORT int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, // // WARNING: it is unsafe to call this function with unauthenticated // ciphertext if padding is enabled. -OPENSSL_EXPORT int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, +OPENSSL_EXPORT int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len); // EVP_Cipher performs a one-shot encryption/decryption operation. No partial @@ -380,6 +383,12 @@ OPENSSL_EXPORT int EVP_BytesToKey(const EVP_CIPHER *type, const EVP_MD *md, // processing. #define EVP_CIPH_CUSTOM_COPY 0x1000 +// EVP_CIPH_FLAG_NON_FIPS_ALLOW is meaningless. In OpenSSL it permits non-FIPS +// algorithms in FIPS mode. But BoringSSL FIPS mode doesn't prohibit algorithms +// (it's up the the caller to use the FIPS module in a fashion compliant with +// their needs). Thus this exists only to allow code to compile. +#define EVP_CIPH_FLAG_NON_FIPS_ALLOW 0 + // Deprecated functions @@ -399,11 +408,26 @@ OPENSSL_EXPORT int EVP_DecryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, const uint8_t *key, const uint8_t *iv); +// EVP_CipherFinal calls |EVP_CipherFinal_ex|. +OPENSSL_EXPORT int EVP_CipherFinal(EVP_CIPHER_CTX *ctx, uint8_t *out, + int *out_len); + +// EVP_EncryptFinal calls |EVP_EncryptFinal_ex|. +OPENSSL_EXPORT int EVP_EncryptFinal(EVP_CIPHER_CTX *ctx, uint8_t *out, + int *out_len); + +// EVP_DecryptFinal calls |EVP_DecryptFinal_ex|. +OPENSSL_EXPORT int EVP_DecryptFinal(EVP_CIPHER_CTX *ctx, uint8_t *out, + int *out_len); + // EVP_add_cipher_alias does nothing and returns one. OPENSSL_EXPORT int EVP_add_cipher_alias(const char *a, const char *b); // EVP_get_cipherbyname returns an |EVP_CIPHER| given a human readable name in -// |name|, or NULL if the name is unknown. +// |name|, or NULL if the name is unknown. Note using this function links almost +// every cipher implemented by BoringSSL into the binary, not just the ones the +// caller requests. Size-conscious callers, such as client software, should not +// use this function. OPENSSL_EXPORT const EVP_CIPHER *EVP_get_cipherbyname(const char *name); // These AEADs are deprecated AES-GCM implementations that set @@ -425,9 +449,24 @@ OPENSSL_EXPORT const EVP_CIPHER *EVP_des_ede3_ecb(void); // EVP_aes_128_cfb128 is only available in decrepit. OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_128_cfb128(void); +// EVP_aes_128_cfb is an alias for |EVP_aes_128_cfb128| and is only available in +// decrepit. +OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_128_cfb(void); + +// EVP_aes_192_cfb128 is only available in decrepit. +OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_192_cfb128(void); + +// EVP_aes_192_cfb is an alias for |EVP_aes_192_cfb128| and is only available in +// decrepit. +OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_192_cfb(void); + // EVP_aes_256_cfb128 is only available in decrepit. OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_256_cfb128(void); +// EVP_aes_256_cfb is an alias for |EVP_aes_256_cfb128| and is only available in +// decrepit. +OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_256_cfb(void); + // EVP_bf_ecb is Blowfish in ECB mode and is only available in decrepit. OPENSSL_EXPORT const EVP_CIPHER *EVP_bf_ecb(void); @@ -535,10 +574,6 @@ struct evp_cipher_ctx_st { // final_used is non-zero if the |final| buffer contains plaintext. int final_used; - // block_mask contains |cipher->block_size| minus one. (The block size - // assumed to be a power of two.) - int block_mask; - uint8_t final[EVP_MAX_BLOCK_LENGTH]; // possible final block } /* EVP_CIPHER_CTX */; diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_cpu.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_cpu.h index 37c1788..a15bf48 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_cpu.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_cpu.h @@ -105,32 +105,25 @@ OPENSSL_INLINE const uint32_t *OPENSSL_ia32cap_get(void) { #if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) -#if defined(OPENSSL_APPLE) -// iOS builds use the static ARM configuration. +#if defined(OPENSSL_APPLE) && defined(OPENSSL_ARM) +// We do not detect any features at runtime for Apple's 32-bit ARM platforms. On +// 64-bit ARM, we detect some post-ARMv8.0 features. #define OPENSSL_STATIC_ARMCAP #endif #if !defined(OPENSSL_STATIC_ARMCAP) - // CRYPTO_is_NEON_capable_at_runtime returns true if the current CPU has a NEON // unit. Note that |OPENSSL_armcap_P| also exists and contains the same // information in a form that's easier for assembly to use. -OPENSSL_EXPORT char CRYPTO_is_NEON_capable_at_runtime(void); +OPENSSL_EXPORT int CRYPTO_is_NEON_capable_at_runtime(void); -// CRYPTO_is_NEON_capable returns true if the current CPU has a NEON unit. If -// this is known statically then it returns one immediately. -OPENSSL_INLINE int CRYPTO_is_NEON_capable(void) { - // Only statically skip the runtime lookup on aarch64. On arm, one CPU is - // known to have a broken NEON unit which is known to fail with on some - // hand-written NEON assembly. For now, continue to apply the workaround even - // when the compiler is instructed to freely emit NEON code. See - // https://crbug.com/341598 and https://crbug.com/606629. -#if (defined(__ARM_NEON__) || defined(__ARM_NEON)) && !defined(OPENSSL_ARM) - return 1; -#else - return CRYPTO_is_NEON_capable_at_runtime(); -#endif -} +// CRYPTO_is_ARMv8_AES_capable_at_runtime returns true if the current CPU +// supports the ARMv8 AES instruction. +int CRYPTO_is_ARMv8_AES_capable_at_runtime(void); + +// CRYPTO_is_ARMv8_PMULL_capable_at_runtime returns true if the current CPU +// supports the ARMv8 PMULL instruction. +int CRYPTO_is_ARMv8_PMULL_capable_at_runtime(void); #if defined(OPENSSL_ARM) // CRYPTO_has_broken_NEON returns one if the current CPU is known to have a @@ -141,43 +134,41 @@ OPENSSL_EXPORT int CRYPTO_has_broken_NEON(void); // workaround was needed. See https://crbug.com/boringssl/46. OPENSSL_EXPORT int CRYPTO_needs_hwcap2_workaround(void); #endif +#endif // !OPENSSL_STATIC_ARMCAP -// CRYPTO_is_ARMv8_AES_capable returns true if the current CPU supports the -// ARMv8 AES instruction. -int CRYPTO_is_ARMv8_AES_capable(void); - -// CRYPTO_is_ARMv8_PMULL_capable returns true if the current CPU supports the -// ARMv8 PMULL instruction. -int CRYPTO_is_ARMv8_PMULL_capable(void); - -#else - +// CRYPTO_is_NEON_capable returns true if the current CPU has a NEON unit. If +// this is known statically, it is a constant inline function. OPENSSL_INLINE int CRYPTO_is_NEON_capable(void) { -#if defined(OPENSSL_STATIC_ARMCAP_NEON) || \ - (defined(__ARM_NEON__) || defined(__ARM_NEON)) +#if defined(__ARM_NEON__) || defined(__ARM_NEON) || \ + defined(OPENSSL_STATIC_ARMCAP_NEON) return 1; -#else +#elif defined(OPENSSL_STATIC_ARMCAP) return 0; +#else + return CRYPTO_is_NEON_capable_at_runtime(); #endif } OPENSSL_INLINE int CRYPTO_is_ARMv8_AES_capable(void) { #if defined(OPENSSL_STATIC_ARMCAP_AES) || defined(__ARM_FEATURE_CRYPTO) return 1; -#else +#elif defined(OPENSSL_STATIC_ARMCAP) return 0; +#else + return CRYPTO_is_ARMv8_AES_capable_at_runtime(); #endif } OPENSSL_INLINE int CRYPTO_is_ARMv8_PMULL_capable(void) { #if defined(OPENSSL_STATIC_ARMCAP_PMULL) || defined(__ARM_FEATURE_CRYPTO) return 1; -#else +#elif defined(OPENSSL_STATIC_ARMCAP) return 0; +#else + return CRYPTO_is_ARMv8_PMULL_capable_at_runtime(); #endif } -#endif // OPENSSL_STATIC_ARMCAP #endif // OPENSSL_ARM || OPENSSL_AARCH64 #if defined(OPENSSL_PPC64LE) diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_crypto.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_crypto.h index da78b48..d2094f0 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_crypto.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_crypto.h @@ -55,20 +55,48 @@ OPENSSL_EXPORT int CRYPTO_is_confidential_build(void); // in which case it returns zero. OPENSSL_EXPORT int CRYPTO_has_asm(void); +// BORINGSSL_self_test triggers the FIPS KAT-based self tests. It returns one on +// success and zero on error. +OPENSSL_EXPORT int BORINGSSL_self_test(void); + +// CRYPTO_pre_sandbox_init initializes the crypto library, pre-acquiring some +// unusual resources to aid running in sandboxed environments. It is safe to +// call this function multiple times and concurrently from multiple threads. +// +// For more details on using BoringSSL in a sandboxed environment, see +// SANDBOXING.md in the source tree. +OPENSSL_EXPORT void CRYPTO_pre_sandbox_init(void); + + +// FIPS monitoring + // FIPS_mode returns zero unless BoringSSL is built with BORINGSSL_FIPS, in // which case it returns one. OPENSSL_EXPORT int FIPS_mode(void); -// BORINGSSL_self_test triggers the FIPS KAT-based self tests. It returns one on -// success and zero on error. -OPENSSL_EXPORT int BORINGSSL_self_test(void); +// fips_counter_t denotes specific APIs/algorithms. A counter is maintained for +// each in FIPS mode so that tests can be written to assert that the expected, +// FIPS functions are being called by a certain peice of code. +enum fips_counter_t { + fips_counter_evp_aes_128_gcm = 0, + fips_counter_evp_aes_256_gcm = 1, + fips_counter_evp_aes_128_ctr = 2, + fips_counter_evp_aes_256_ctr = 3, + + fips_counter_max = 3, +}; + +// FIPS_read_counter returns a counter of the number of times the specific +// function denoted by |counter| has been used. This always returns zero unless +// BoringSSL was built with BORINGSSL_FIPS_COUNTERS defined. +OPENSSL_EXPORT size_t FIPS_read_counter(enum fips_counter_t counter); // Deprecated functions. // OPENSSL_VERSION_TEXT contains a string the identifies the version of // “OpenSSL”. node.js requires a version number in this text. -#define OPENSSL_VERSION_TEXT "OpenSSL 1.1.0 (compatible; BoringSSL)" +#define OPENSSL_VERSION_TEXT "OpenSSL 1.1.1 (compatible; BoringSSL)" #define OPENSSL_VERSION 0 #define OPENSSL_CFLAGS 1 diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_err.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_err.h index e5e3a07..fbf3f00 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_err.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_err.h @@ -183,6 +183,11 @@ OPENSSL_EXPORT uint32_t ERR_get_error_line(const char **file, int *line); // can be printed. This is always set if |data| is non-NULL. #define ERR_FLAG_STRING 1 +// ERR_FLAG_MALLOCED is passed into |ERR_set_error_data| to indicate that |data| +// was allocated with |OPENSSL_malloc|. It is never returned from +// |ERR_get_error_line_data|. +#define ERR_FLAG_MALLOCED 2 + // ERR_get_error_line_data acts like |ERR_get_error_line|, but also returns the // error-specific data pointer and flags. The flags are a bitwise-OR of // |ERR_FLAG_*| values. The error-specific data is owned by the error queue @@ -223,11 +228,12 @@ OPENSSL_EXPORT char *ERR_error_string_n(uint32_t packed_error, char *buf, size_t len); // ERR_lib_error_string returns a string representation of the library that -// generated |packed_error|. +// generated |packed_error|, or a placeholder string is the library is +// unrecognized. OPENSSL_EXPORT const char *ERR_lib_error_string(uint32_t packed_error); // ERR_reason_error_string returns a string representation of the reason for -// |packed_error|. +// |packed_error|, or a placeholder string if the reason is unrecognized. OPENSSL_EXPORT const char *ERR_reason_error_string(uint32_t packed_error); // ERR_print_errors_callback_t is the type of a function used by @@ -407,9 +413,10 @@ OPENSSL_EXPORT char *ERR_error_string(uint32_t packed_error, char *buf); // ERR_GET_FUNC returns zero. BoringSSL errors do not report a function code. #define ERR_GET_FUNC(packed_error) 0 -// ERR_TXT_STRING is provided for compatibility with code that assumes that -// it's using OpenSSL. +// ERR_TXT_* are provided for compatibility with code that assumes that it's +// using OpenSSL. #define ERR_TXT_STRING ERR_FLAG_STRING +#define ERR_TXT_MALLOCED ERR_FLAG_MALLOCED // Private functions. @@ -443,6 +450,17 @@ OPENSSL_EXPORT void ERR_add_error_data(unsigned count, ...); OPENSSL_EXPORT void ERR_add_error_dataf(const char *format, ...) OPENSSL_PRINTF_FORMAT_FUNC(1, 2); +// ERR_set_error_data sets the data on the most recent error to |data|, which +// must be a NUL-terminated string. |flags| must contain |ERR_FLAG_STRING|. If +// |flags| contains |ERR_FLAG_MALLOCED|, this function takes ownership of +// |data|, which must have been allocated with |OPENSSL_malloc|. Otherwise, it +// saves a copy of |data|. +// +// Note this differs from OpenSSL which, when |ERR_FLAG_MALLOCED| is unset, +// saves the pointer as-is and requires it remain valid for the lifetime of the +// address space. +OPENSSL_EXPORT void ERR_set_error_data(char *data, int flags); + // ERR_NUM_ERRORS is one more than the limit of the number of errors in the // queue. #define ERR_NUM_ERRORS 16 diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_mem.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_mem.h index 1654cf6..a1f6684 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_mem.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_mem.h @@ -101,6 +101,9 @@ OPENSSL_EXPORT int CRYPTO_memcmp(const void *a, const void *b, size_t len); // OPENSSL_hash32 implements the 32 bit, FNV-1a hash. OPENSSL_EXPORT uint32_t OPENSSL_hash32(const void *ptr, size_t len); +// OPENSSL_strhash calls |OPENSSL_hash32| on the NUL-terminated string |s|. +OPENSSL_EXPORT uint32_t OPENSSL_strhash(const char *s); + // OPENSSL_strdup has the same behaviour as strdup(3). OPENSSL_EXPORT char *OPENSSL_strdup(const char *s); @@ -147,9 +150,15 @@ OPENSSL_EXPORT size_t OPENSSL_strlcat(char *dst, const char *src, // Deprecated functions. -#define CRYPTO_malloc OPENSSL_malloc -#define CRYPTO_realloc OPENSSL_realloc -#define CRYPTO_free OPENSSL_free +// CRYPTO_malloc calls |OPENSSL_malloc|. |file| and |line| are ignored. +OPENSSL_EXPORT void *CRYPTO_malloc(size_t size, const char *file, int line); + +// CRYPTO_realloc calls |OPENSSL_realloc|. |file| and |line| are ignored. +OPENSSL_EXPORT void *CRYPTO_realloc(void *ptr, size_t new_size, + const char *file, int line); + +// CRYPTO_free calls |OPENSSL_free|. |file| and |line| are ignored. +OPENSSL_EXPORT void CRYPTO_free(void *ptr, const char *file, int line); // OPENSSL_clear_free calls |OPENSSL_free|. BoringSSL automatically clears all // allocations on free, but we define |OPENSSL_clear_free| for compatibility. diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_opensslconf.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_opensslconf.h index 3c6ffd8..3f1faf3 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_opensslconf.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_opensslconf.h @@ -55,6 +55,9 @@ #define OPENSSL_NO_RMD160 #define OPENSSL_NO_SCTP #define OPENSSL_NO_SEED +#define OPENSSL_NO_SM2 +#define OPENSSL_NO_SM3 +#define OPENSSL_NO_SM4 #define OPENSSL_NO_SRP #define OPENSSL_NO_SSL2 #define OPENSSL_NO_SSL3 diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_rand.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_rand.h index 4bd25ae..0239fe2 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_rand.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_rand.h @@ -36,26 +36,12 @@ OPENSSL_EXPORT void RAND_cleanup(void); // Obscure functions. #if !defined(OPENSSL_WINDOWS) -// RAND_set_urandom_fd causes the module to use a copy of |fd| for system -// randomness rather opening /dev/urandom internally. The caller retains -// ownership of |fd| and is at liberty to close it at any time. This is useful -// if, due to a sandbox, /dev/urandom isn't available. If used, it must be -// called before the first call to |RAND_bytes|, and it is mutually exclusive -// with |RAND_enable_fork_unsafe_buffering|. -// -// |RAND_set_urandom_fd| does not buffer any entropy, so it is safe to call -// |fork| at any time after calling |RAND_set_urandom_fd|. -OPENSSL_EXPORT void RAND_set_urandom_fd(int fd); - // RAND_enable_fork_unsafe_buffering enables efficient buffered reading of // /dev/urandom. It adds an overhead of a few KB of memory per thread. It must -// be called before the first call to |RAND_bytes| and it is mutually exclusive -// with calls to |RAND_set_urandom_fd|. +// be called before the first call to |RAND_bytes|. // -// If |fd| is non-negative then a copy of |fd| will be used rather than opening -// /dev/urandom internally. Like |RAND_set_urandom_fd|, the caller retains -// ownership of |fd|. If |fd| is negative then /dev/urandom will be opened and -// any error from open(2) crashes the address space. +// |fd| must be -1. We no longer support setting the file descriptor with this +// function. // // It has an unusual name because the buffer is unsafe across calls to |fork|. // Hence, this function should never be called by libraries. @@ -111,11 +97,14 @@ struct rand_meth_st { // RAND_SSLeay returns a pointer to a dummy |RAND_METHOD|. OPENSSL_EXPORT RAND_METHOD *RAND_SSLeay(void); +// RAND_OpenSSL returns a pointer to a dummy |RAND_METHOD|. +OPENSSL_EXPORT RAND_METHOD *RAND_OpenSSL(void); + // RAND_get_rand_method returns |RAND_SSLeay()|. OPENSSL_EXPORT const RAND_METHOD *RAND_get_rand_method(void); -// RAND_set_rand_method does nothing. -OPENSSL_EXPORT void RAND_set_rand_method(const RAND_METHOD *); +// RAND_set_rand_method returns one. +OPENSSL_EXPORT int RAND_set_rand_method(const RAND_METHOD *); #if defined(__cplusplus) diff --git a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_span.h b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_span.h index 38282bc..29ee97e 100644 --- a/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_span.h +++ b/Sources/CBigNumBoringSSL/include/CBigNumBoringSSL_span.h @@ -21,8 +21,9 @@ extern "C++" { +#include + #include -#include #include BSSL_NAMESPACE_BEGIN @@ -93,18 +94,6 @@ class SpanBase { template class Span : private internal::SpanBase { private: - // Heuristically test whether C is a container type that can be converted into - // a Span by checking for data() and size() member functions. - // - // TODO(davidben): Switch everything to std::enable_if_t when we remove - // support for MSVC 2015. Although we could write our own enable_if_t and MSVC - // 2015 has std::enable_if_t anyway, MSVC 2015's SFINAE implementation is - // problematic and does not work below unless we write the ::type at use. - template - using EnableIfContainer = std::enable_if< - std::is_convertible().data()), T *>::value && - std::is_integral().size())>::value>; - static const size_t npos = static_cast(-1); public: @@ -115,12 +104,27 @@ class Span : private internal::SpanBase { constexpr Span(T (&array)[N]) : Span(array, N) {} template < - typename C, typename = typename EnableIfContainer::type, + typename C, + // TODO(davidben): Switch everything to std::enable_if_t when we remove + // support for MSVC 2015. Although we could write our own enable_if_t and + // MSVC 2015 has std::enable_if_t anyway, MSVC 2015's SFINAE + // implementation is problematic and does not work below unless we write + // the ::type at use. + // + // TODO(davidben): Move this and the identical copy below into an + // EnableIfContainer alias when we drop MSVC 2015 support. MSVC 2015's + // SFINAE support cannot handle type aliases. + typename = typename std::enable_if< + std::is_convertible().data()), T *>::value && + std::is_integral().size())>::value>::type, typename = typename std::enable_if::value, C>::type> Span(const C &container) : data_(container.data()), size_(container.size()) {} template < - typename C, typename = typename EnableIfContainer::type, + typename C, + typename = typename std::enable_if< + std::is_convertible().data()), T *>::value && + std::is_integral().size())>::value>::type, typename = typename std::enable_if::value, C>::type> explicit Span(C &container) : data_(container.data()), size_(container.size()) {} @@ -157,11 +161,30 @@ class Span : private internal::SpanBase { Span subspan(size_t pos = 0, size_t len = npos) const { if (pos > size_) { - abort(); // absl::Span throws an exception here. + // absl::Span throws an exception here. Note std::span and Chromium + // base::span additionally forbid pos + len being out of range, with a + // special case at npos/dynamic_extent, while absl::Span::subspan clips + // the span. For now, we align with absl::Span in case we switch to it in + // the future. + abort(); } return Span(data_ + pos, std::min(size_ - pos, len)); } + Span first(size_t len) { + if (len > size_) { + abort(); + } + return Span(data_, len); + } + + Span last(size_t len) { + if (len > size_) { + abort(); + } + return Span(data_ + size_ - len, len); + } + private: T *data_; size_t size_; diff --git a/Sources/CBigNumBoringSSL/include/boringssl_prefix_symbols_nasm.inc b/Sources/CBigNumBoringSSL/include/boringssl_prefix_symbols_nasm.inc index 581531e..fdaeac0 100644 --- a/Sources/CBigNumBoringSSL/include/boringssl_prefix_symbols_nasm.inc +++ b/Sources/CBigNumBoringSSL/include/boringssl_prefix_symbols_nasm.inc @@ -76,12 +76,14 @@ %xdefine _BIO_reset _ %+ BORINGSSL_PREFIX %+ _BIO_reset %xdefine _BIO_rw_filename _ %+ BORINGSSL_PREFIX %+ _BIO_rw_filename %xdefine _BIO_s_file _ %+ BORINGSSL_PREFIX %+ _BIO_s_file +%xdefine _BIO_seek _ %+ BORINGSSL_PREFIX %+ _BIO_seek %xdefine _BIO_set_close _ %+ BORINGSSL_PREFIX %+ _BIO_set_close %xdefine _BIO_set_data _ %+ BORINGSSL_PREFIX %+ _BIO_set_data %xdefine _BIO_set_flags _ %+ BORINGSSL_PREFIX %+ _BIO_set_flags %xdefine _BIO_set_fp _ %+ BORINGSSL_PREFIX %+ _BIO_set_fp %xdefine _BIO_set_init _ %+ BORINGSSL_PREFIX %+ _BIO_set_init %xdefine _BIO_set_retry_read _ %+ BORINGSSL_PREFIX %+ _BIO_set_retry_read +%xdefine _BIO_set_retry_reason _ %+ BORINGSSL_PREFIX %+ _BIO_set_retry_reason %xdefine _BIO_set_retry_special _ %+ BORINGSSL_PREFIX %+ _BIO_set_retry_special %xdefine _BIO_set_retry_write _ %+ BORINGSSL_PREFIX %+ _BIO_set_retry_write %xdefine _BIO_set_shutdown _ %+ BORINGSSL_PREFIX %+ _BIO_set_shutdown @@ -91,6 +93,7 @@ %xdefine _BIO_should_retry _ %+ BORINGSSL_PREFIX %+ _BIO_should_retry %xdefine _BIO_should_write _ %+ BORINGSSL_PREFIX %+ _BIO_should_write %xdefine _BIO_snprintf _ %+ BORINGSSL_PREFIX %+ _BIO_snprintf +%xdefine _BIO_tell _ %+ BORINGSSL_PREFIX %+ _BIO_tell %xdefine _BIO_test_flags _ %+ BORINGSSL_PREFIX %+ _BIO_test_flags %xdefine _BIO_up_ref _ %+ BORINGSSL_PREFIX %+ _BIO_up_ref %xdefine _BIO_vfree _ %+ BORINGSSL_PREFIX %+ _BIO_vfree @@ -105,6 +108,8 @@ %xdefine _BN_CTX_new _ %+ BORINGSSL_PREFIX %+ _BN_CTX_new %xdefine _BN_CTX_start _ %+ BORINGSSL_PREFIX %+ _BN_CTX_start %xdefine _BN_GENCB_call _ %+ BORINGSSL_PREFIX %+ _BN_GENCB_call +%xdefine _BN_GENCB_free _ %+ BORINGSSL_PREFIX %+ _BN_GENCB_free +%xdefine _BN_GENCB_new _ %+ BORINGSSL_PREFIX %+ _BN_GENCB_new %xdefine _BN_GENCB_set _ %+ BORINGSSL_PREFIX %+ _BN_GENCB_set %xdefine _BN_MONT_CTX_copy _ %+ BORINGSSL_PREFIX %+ _BN_MONT_CTX_copy %xdefine _BN_MONT_CTX_free _ %+ BORINGSSL_PREFIX %+ _BN_MONT_CTX_free @@ -236,6 +241,7 @@ %xdefine _CBB_add_u64le _ %+ BORINGSSL_PREFIX %+ _CBB_add_u64le %xdefine _CBB_add_u8 _ %+ BORINGSSL_PREFIX %+ _CBB_add_u8 %xdefine _CBB_add_u8_length_prefixed _ %+ BORINGSSL_PREFIX %+ _CBB_add_u8_length_prefixed +%xdefine _CBB_add_zeros _ %+ BORINGSSL_PREFIX %+ _CBB_add_zeros %xdefine _CBB_cleanup _ %+ BORINGSSL_PREFIX %+ _CBB_cleanup %xdefine _CBB_data _ %+ BORINGSSL_PREFIX %+ _CBB_data %xdefine _CBB_did_write _ %+ BORINGSSL_PREFIX %+ _CBB_did_write @@ -281,8 +287,11 @@ %xdefine _CBS_get_u64le _ %+ BORINGSSL_PREFIX %+ _CBS_get_u64le %xdefine _CBS_get_u8 _ %+ BORINGSSL_PREFIX %+ _CBS_get_u8 %xdefine _CBS_get_u8_length_prefixed _ %+ BORINGSSL_PREFIX %+ _CBS_get_u8_length_prefixed +%xdefine _CBS_get_until_first _ %+ BORINGSSL_PREFIX %+ _CBS_get_until_first %xdefine _CBS_init _ %+ BORINGSSL_PREFIX %+ _CBS_init +%xdefine _CBS_is_unsigned_asn1_integer _ %+ BORINGSSL_PREFIX %+ _CBS_is_unsigned_asn1_integer %xdefine _CBS_is_valid_asn1_bitstring _ %+ BORINGSSL_PREFIX %+ _CBS_is_valid_asn1_bitstring +%xdefine _CBS_is_valid_asn1_integer _ %+ BORINGSSL_PREFIX %+ _CBS_is_valid_asn1_integer %xdefine _CBS_len _ %+ BORINGSSL_PREFIX %+ _CBS_len %xdefine _CBS_mem_equal _ %+ BORINGSSL_PREFIX %+ _CBS_mem_equal %xdefine _CBS_peek_asn1_tag _ %+ BORINGSSL_PREFIX %+ _CBS_peek_asn1_tag @@ -315,6 +324,7 @@ %xdefine _CRYPTO_ctr128_encrypt _ %+ BORINGSSL_PREFIX %+ _CRYPTO_ctr128_encrypt %xdefine _CRYPTO_ctr128_encrypt_ctr32 _ %+ BORINGSSL_PREFIX %+ _CRYPTO_ctr128_encrypt_ctr32 %xdefine _CRYPTO_fork_detect_ignore_madv_wipeonfork_for_testing _ %+ BORINGSSL_PREFIX %+ _CRYPTO_fork_detect_ignore_madv_wipeonfork_for_testing +%xdefine _CRYPTO_free _ %+ BORINGSSL_PREFIX %+ _CRYPTO_free %xdefine _CRYPTO_free_ex_data _ %+ BORINGSSL_PREFIX %+ _CRYPTO_free_ex_data %xdefine _CRYPTO_gcm128_aad _ %+ BORINGSSL_PREFIX %+ _CRYPTO_gcm128_aad %xdefine _CRYPTO_gcm128_decrypt _ %+ BORINGSSL_PREFIX %+ _CRYPTO_gcm128_decrypt @@ -336,16 +346,20 @@ %xdefine _CRYPTO_get_thread_local _ %+ BORINGSSL_PREFIX %+ _CRYPTO_get_thread_local %xdefine _CRYPTO_ghash_init _ %+ BORINGSSL_PREFIX %+ _CRYPTO_ghash_init %xdefine _CRYPTO_has_asm _ %+ BORINGSSL_PREFIX %+ _CRYPTO_has_asm +%xdefine _CRYPTO_init_sysrand _ %+ BORINGSSL_PREFIX %+ _CRYPTO_init_sysrand %xdefine _CRYPTO_is_confidential_build _ %+ BORINGSSL_PREFIX %+ _CRYPTO_is_confidential_build %xdefine _CRYPTO_library_init _ %+ BORINGSSL_PREFIX %+ _CRYPTO_library_init +%xdefine _CRYPTO_malloc _ %+ BORINGSSL_PREFIX %+ _CRYPTO_malloc %xdefine _CRYPTO_malloc_init _ %+ BORINGSSL_PREFIX %+ _CRYPTO_malloc_init %xdefine _CRYPTO_memcmp _ %+ BORINGSSL_PREFIX %+ _CRYPTO_memcmp %xdefine _CRYPTO_new_ex_data _ %+ BORINGSSL_PREFIX %+ _CRYPTO_new_ex_data %xdefine _CRYPTO_num_locks _ %+ BORINGSSL_PREFIX %+ _CRYPTO_num_locks %xdefine _CRYPTO_ofb128_encrypt _ %+ BORINGSSL_PREFIX %+ _CRYPTO_ofb128_encrypt %xdefine _CRYPTO_once _ %+ BORINGSSL_PREFIX %+ _CRYPTO_once +%xdefine _CRYPTO_pre_sandbox_init _ %+ BORINGSSL_PREFIX %+ _CRYPTO_pre_sandbox_init %xdefine _CRYPTO_rdrand _ %+ BORINGSSL_PREFIX %+ _CRYPTO_rdrand %xdefine _CRYPTO_rdrand_multiple8_buf _ %+ BORINGSSL_PREFIX %+ _CRYPTO_rdrand_multiple8_buf +%xdefine _CRYPTO_realloc _ %+ BORINGSSL_PREFIX %+ _CRYPTO_realloc %xdefine _CRYPTO_refcount_dec_and_test_zero _ %+ BORINGSSL_PREFIX %+ _CRYPTO_refcount_dec_and_test_zero %xdefine _CRYPTO_refcount_inc _ %+ BORINGSSL_PREFIX %+ _CRYPTO_refcount_inc %xdefine _CRYPTO_set_add_lock_callback _ %+ BORINGSSL_PREFIX %+ _CRYPTO_set_add_lock_callback @@ -357,6 +371,7 @@ %xdefine _CRYPTO_set_locking_callback _ %+ BORINGSSL_PREFIX %+ _CRYPTO_set_locking_callback %xdefine _CRYPTO_set_thread_local _ %+ BORINGSSL_PREFIX %+ _CRYPTO_set_thread_local %xdefine _CRYPTO_sysrand _ %+ BORINGSSL_PREFIX %+ _CRYPTO_sysrand +%xdefine _CRYPTO_sysrand_for_seed _ %+ BORINGSSL_PREFIX %+ _CRYPTO_sysrand_for_seed %xdefine _CRYPTO_sysrand_if_available _ %+ BORINGSSL_PREFIX %+ _CRYPTO_sysrand_if_available %xdefine _CTR_DRBG_clear _ %+ BORINGSSL_PREFIX %+ _CTR_DRBG_clear %xdefine _CTR_DRBG_generate _ %+ BORINGSSL_PREFIX %+ _CTR_DRBG_generate @@ -398,6 +413,7 @@ %xdefine _ERR_remove_thread_state _ %+ BORINGSSL_PREFIX %+ _ERR_remove_thread_state %xdefine _ERR_restore_state _ %+ BORINGSSL_PREFIX %+ _ERR_restore_state %xdefine _ERR_save_state _ %+ BORINGSSL_PREFIX %+ _ERR_save_state +%xdefine _ERR_set_error_data _ %+ BORINGSSL_PREFIX %+ _ERR_set_error_data %xdefine _ERR_set_mark _ %+ BORINGSSL_PREFIX %+ _ERR_set_mark %xdefine _EVP_CIPHER_CTX_block_size _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_block_size %xdefine _EVP_CIPHER_CTX_cipher _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_cipher @@ -426,24 +442,29 @@ %xdefine _EVP_CIPHER_mode _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_mode %xdefine _EVP_CIPHER_nid _ %+ BORINGSSL_PREFIX %+ _EVP_CIPHER_nid %xdefine _EVP_Cipher _ %+ BORINGSSL_PREFIX %+ _EVP_Cipher +%xdefine _EVP_CipherFinal _ %+ BORINGSSL_PREFIX %+ _EVP_CipherFinal %xdefine _EVP_CipherFinal_ex _ %+ BORINGSSL_PREFIX %+ _EVP_CipherFinal_ex %xdefine _EVP_CipherInit _ %+ BORINGSSL_PREFIX %+ _EVP_CipherInit %xdefine _EVP_CipherInit_ex _ %+ BORINGSSL_PREFIX %+ _EVP_CipherInit_ex %xdefine _EVP_CipherUpdate _ %+ BORINGSSL_PREFIX %+ _EVP_CipherUpdate +%xdefine _EVP_DecryptFinal _ %+ BORINGSSL_PREFIX %+ _EVP_DecryptFinal %xdefine _EVP_DecryptFinal_ex _ %+ BORINGSSL_PREFIX %+ _EVP_DecryptFinal_ex %xdefine _EVP_DecryptInit _ %+ BORINGSSL_PREFIX %+ _EVP_DecryptInit %xdefine _EVP_DecryptInit_ex _ %+ BORINGSSL_PREFIX %+ _EVP_DecryptInit_ex %xdefine _EVP_DecryptUpdate _ %+ BORINGSSL_PREFIX %+ _EVP_DecryptUpdate +%xdefine _EVP_EncryptFinal _ %+ BORINGSSL_PREFIX %+ _EVP_EncryptFinal %xdefine _EVP_EncryptFinal_ex _ %+ BORINGSSL_PREFIX %+ _EVP_EncryptFinal_ex %xdefine _EVP_EncryptInit _ %+ BORINGSSL_PREFIX %+ _EVP_EncryptInit %xdefine _EVP_EncryptInit_ex _ %+ BORINGSSL_PREFIX %+ _EVP_EncryptInit_ex %xdefine _EVP_EncryptUpdate _ %+ BORINGSSL_PREFIX %+ _EVP_EncryptUpdate %xdefine _EVP_add_cipher_alias _ %+ BORINGSSL_PREFIX %+ _EVP_add_cipher_alias %xdefine _EVP_aead_aes_128_gcm _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm +%xdefine _EVP_aead_aes_128_gcm_randnonce _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm_randnonce %xdefine _EVP_aead_aes_128_gcm_tls12 _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm_tls12 %xdefine _EVP_aead_aes_128_gcm_tls13 _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm_tls13 %xdefine _EVP_aead_aes_192_gcm _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_192_gcm %xdefine _EVP_aead_aes_256_gcm _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm +%xdefine _EVP_aead_aes_256_gcm_randnonce _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm_randnonce %xdefine _EVP_aead_aes_256_gcm_tls12 _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm_tls12 %xdefine _EVP_aead_aes_256_gcm_tls13 _ %+ BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm_tls13 %xdefine _EVP_aes_128_cbc _ %+ BORINGSSL_PREFIX %+ _EVP_aes_128_cbc @@ -477,6 +498,7 @@ %xdefine _OPENSSL_realloc _ %+ BORINGSSL_PREFIX %+ _OPENSSL_realloc %xdefine _OPENSSL_strcasecmp _ %+ BORINGSSL_PREFIX %+ _OPENSSL_strcasecmp %xdefine _OPENSSL_strdup _ %+ BORINGSSL_PREFIX %+ _OPENSSL_strdup +%xdefine _OPENSSL_strhash _ %+ BORINGSSL_PREFIX %+ _OPENSSL_strhash %xdefine _OPENSSL_strlcat _ %+ BORINGSSL_PREFIX %+ _OPENSSL_strlcat %xdefine _OPENSSL_strlcpy _ %+ BORINGSSL_PREFIX %+ _OPENSSL_strlcpy %xdefine _OPENSSL_strncasecmp _ %+ BORINGSSL_PREFIX %+ _OPENSSL_strncasecmp @@ -485,6 +507,7 @@ %xdefine _OPENSSL_tolower _ %+ BORINGSSL_PREFIX %+ _OPENSSL_tolower %xdefine _OpenSSL_version _ %+ BORINGSSL_PREFIX %+ _OpenSSL_version %xdefine _OpenSSL_version_num _ %+ BORINGSSL_PREFIX %+ _OpenSSL_version_num +%xdefine _RAND_OpenSSL _ %+ BORINGSSL_PREFIX %+ _RAND_OpenSSL %xdefine _RAND_SSLeay _ %+ BORINGSSL_PREFIX %+ _RAND_SSLeay %xdefine _RAND_add _ %+ BORINGSSL_PREFIX %+ _RAND_add %xdefine _RAND_bytes _ %+ BORINGSSL_PREFIX %+ _RAND_bytes @@ -499,7 +522,6 @@ %xdefine _RAND_pseudo_bytes _ %+ BORINGSSL_PREFIX %+ _RAND_pseudo_bytes %xdefine _RAND_seed _ %+ BORINGSSL_PREFIX %+ _RAND_seed %xdefine _RAND_set_rand_method _ %+ BORINGSSL_PREFIX %+ _RAND_set_rand_method -%xdefine _RAND_set_urandom_fd _ %+ BORINGSSL_PREFIX %+ _RAND_set_urandom_fd %xdefine _RAND_status _ %+ BORINGSSL_PREFIX %+ _RAND_status %xdefine _RSAZ_1024_mod_exp_avx2 _ %+ BORINGSSL_PREFIX %+ _RSAZ_1024_mod_exp_avx2 %xdefine _SSLeay _ %+ BORINGSSL_PREFIX %+ _SSLeay @@ -578,6 +600,7 @@ %xdefine _bn_scatter5 _ %+ BORINGSSL_PREFIX %+ _bn_scatter5 %xdefine _bn_select_words _ %+ BORINGSSL_PREFIX %+ _bn_select_words %xdefine _bn_set_minimal_width _ %+ BORINGSSL_PREFIX %+ _bn_set_minimal_width +%xdefine _bn_set_static_words _ %+ BORINGSSL_PREFIX %+ _bn_set_static_words %xdefine _bn_set_words _ %+ BORINGSSL_PREFIX %+ _bn_set_words %xdefine _bn_sqr8x_internal _ %+ BORINGSSL_PREFIX %+ _bn_sqr8x_internal %xdefine _bn_sqr_comba4 _ %+ BORINGSSL_PREFIX %+ _bn_sqr_comba4 @@ -728,12 +751,14 @@ %xdefine BIO_reset BORINGSSL_PREFIX %+ _BIO_reset %xdefine BIO_rw_filename BORINGSSL_PREFIX %+ _BIO_rw_filename %xdefine BIO_s_file BORINGSSL_PREFIX %+ _BIO_s_file +%xdefine BIO_seek BORINGSSL_PREFIX %+ _BIO_seek %xdefine BIO_set_close BORINGSSL_PREFIX %+ _BIO_set_close %xdefine BIO_set_data BORINGSSL_PREFIX %+ _BIO_set_data %xdefine BIO_set_flags BORINGSSL_PREFIX %+ _BIO_set_flags %xdefine BIO_set_fp BORINGSSL_PREFIX %+ _BIO_set_fp %xdefine BIO_set_init BORINGSSL_PREFIX %+ _BIO_set_init %xdefine BIO_set_retry_read BORINGSSL_PREFIX %+ _BIO_set_retry_read +%xdefine BIO_set_retry_reason BORINGSSL_PREFIX %+ _BIO_set_retry_reason %xdefine BIO_set_retry_special BORINGSSL_PREFIX %+ _BIO_set_retry_special %xdefine BIO_set_retry_write BORINGSSL_PREFIX %+ _BIO_set_retry_write %xdefine BIO_set_shutdown BORINGSSL_PREFIX %+ _BIO_set_shutdown @@ -743,6 +768,7 @@ %xdefine BIO_should_retry BORINGSSL_PREFIX %+ _BIO_should_retry %xdefine BIO_should_write BORINGSSL_PREFIX %+ _BIO_should_write %xdefine BIO_snprintf BORINGSSL_PREFIX %+ _BIO_snprintf +%xdefine BIO_tell BORINGSSL_PREFIX %+ _BIO_tell %xdefine BIO_test_flags BORINGSSL_PREFIX %+ _BIO_test_flags %xdefine BIO_up_ref BORINGSSL_PREFIX %+ _BIO_up_ref %xdefine BIO_vfree BORINGSSL_PREFIX %+ _BIO_vfree @@ -757,6 +783,8 @@ %xdefine BN_CTX_new BORINGSSL_PREFIX %+ _BN_CTX_new %xdefine BN_CTX_start BORINGSSL_PREFIX %+ _BN_CTX_start %xdefine BN_GENCB_call BORINGSSL_PREFIX %+ _BN_GENCB_call +%xdefine BN_GENCB_free BORINGSSL_PREFIX %+ _BN_GENCB_free +%xdefine BN_GENCB_new BORINGSSL_PREFIX %+ _BN_GENCB_new %xdefine BN_GENCB_set BORINGSSL_PREFIX %+ _BN_GENCB_set %xdefine BN_MONT_CTX_copy BORINGSSL_PREFIX %+ _BN_MONT_CTX_copy %xdefine BN_MONT_CTX_free BORINGSSL_PREFIX %+ _BN_MONT_CTX_free @@ -888,6 +916,7 @@ %xdefine CBB_add_u64le BORINGSSL_PREFIX %+ _CBB_add_u64le %xdefine CBB_add_u8 BORINGSSL_PREFIX %+ _CBB_add_u8 %xdefine CBB_add_u8_length_prefixed BORINGSSL_PREFIX %+ _CBB_add_u8_length_prefixed +%xdefine CBB_add_zeros BORINGSSL_PREFIX %+ _CBB_add_zeros %xdefine CBB_cleanup BORINGSSL_PREFIX %+ _CBB_cleanup %xdefine CBB_data BORINGSSL_PREFIX %+ _CBB_data %xdefine CBB_did_write BORINGSSL_PREFIX %+ _CBB_did_write @@ -933,8 +962,11 @@ %xdefine CBS_get_u64le BORINGSSL_PREFIX %+ _CBS_get_u64le %xdefine CBS_get_u8 BORINGSSL_PREFIX %+ _CBS_get_u8 %xdefine CBS_get_u8_length_prefixed BORINGSSL_PREFIX %+ _CBS_get_u8_length_prefixed +%xdefine CBS_get_until_first BORINGSSL_PREFIX %+ _CBS_get_until_first %xdefine CBS_init BORINGSSL_PREFIX %+ _CBS_init +%xdefine CBS_is_unsigned_asn1_integer BORINGSSL_PREFIX %+ _CBS_is_unsigned_asn1_integer %xdefine CBS_is_valid_asn1_bitstring BORINGSSL_PREFIX %+ _CBS_is_valid_asn1_bitstring +%xdefine CBS_is_valid_asn1_integer BORINGSSL_PREFIX %+ _CBS_is_valid_asn1_integer %xdefine CBS_len BORINGSSL_PREFIX %+ _CBS_len %xdefine CBS_mem_equal BORINGSSL_PREFIX %+ _CBS_mem_equal %xdefine CBS_peek_asn1_tag BORINGSSL_PREFIX %+ _CBS_peek_asn1_tag @@ -967,6 +999,7 @@ %xdefine CRYPTO_ctr128_encrypt BORINGSSL_PREFIX %+ _CRYPTO_ctr128_encrypt %xdefine CRYPTO_ctr128_encrypt_ctr32 BORINGSSL_PREFIX %+ _CRYPTO_ctr128_encrypt_ctr32 %xdefine CRYPTO_fork_detect_ignore_madv_wipeonfork_for_testing BORINGSSL_PREFIX %+ _CRYPTO_fork_detect_ignore_madv_wipeonfork_for_testing +%xdefine CRYPTO_free BORINGSSL_PREFIX %+ _CRYPTO_free %xdefine CRYPTO_free_ex_data BORINGSSL_PREFIX %+ _CRYPTO_free_ex_data %xdefine CRYPTO_gcm128_aad BORINGSSL_PREFIX %+ _CRYPTO_gcm128_aad %xdefine CRYPTO_gcm128_decrypt BORINGSSL_PREFIX %+ _CRYPTO_gcm128_decrypt @@ -988,16 +1021,20 @@ %xdefine CRYPTO_get_thread_local BORINGSSL_PREFIX %+ _CRYPTO_get_thread_local %xdefine CRYPTO_ghash_init BORINGSSL_PREFIX %+ _CRYPTO_ghash_init %xdefine CRYPTO_has_asm BORINGSSL_PREFIX %+ _CRYPTO_has_asm +%xdefine CRYPTO_init_sysrand BORINGSSL_PREFIX %+ _CRYPTO_init_sysrand %xdefine CRYPTO_is_confidential_build BORINGSSL_PREFIX %+ _CRYPTO_is_confidential_build %xdefine CRYPTO_library_init BORINGSSL_PREFIX %+ _CRYPTO_library_init +%xdefine CRYPTO_malloc BORINGSSL_PREFIX %+ _CRYPTO_malloc %xdefine CRYPTO_malloc_init BORINGSSL_PREFIX %+ _CRYPTO_malloc_init %xdefine CRYPTO_memcmp BORINGSSL_PREFIX %+ _CRYPTO_memcmp %xdefine CRYPTO_new_ex_data BORINGSSL_PREFIX %+ _CRYPTO_new_ex_data %xdefine CRYPTO_num_locks BORINGSSL_PREFIX %+ _CRYPTO_num_locks %xdefine CRYPTO_ofb128_encrypt BORINGSSL_PREFIX %+ _CRYPTO_ofb128_encrypt %xdefine CRYPTO_once BORINGSSL_PREFIX %+ _CRYPTO_once +%xdefine CRYPTO_pre_sandbox_init BORINGSSL_PREFIX %+ _CRYPTO_pre_sandbox_init %xdefine CRYPTO_rdrand BORINGSSL_PREFIX %+ _CRYPTO_rdrand %xdefine CRYPTO_rdrand_multiple8_buf BORINGSSL_PREFIX %+ _CRYPTO_rdrand_multiple8_buf +%xdefine CRYPTO_realloc BORINGSSL_PREFIX %+ _CRYPTO_realloc %xdefine CRYPTO_refcount_dec_and_test_zero BORINGSSL_PREFIX %+ _CRYPTO_refcount_dec_and_test_zero %xdefine CRYPTO_refcount_inc BORINGSSL_PREFIX %+ _CRYPTO_refcount_inc %xdefine CRYPTO_set_add_lock_callback BORINGSSL_PREFIX %+ _CRYPTO_set_add_lock_callback @@ -1009,6 +1046,7 @@ %xdefine CRYPTO_set_locking_callback BORINGSSL_PREFIX %+ _CRYPTO_set_locking_callback %xdefine CRYPTO_set_thread_local BORINGSSL_PREFIX %+ _CRYPTO_set_thread_local %xdefine CRYPTO_sysrand BORINGSSL_PREFIX %+ _CRYPTO_sysrand +%xdefine CRYPTO_sysrand_for_seed BORINGSSL_PREFIX %+ _CRYPTO_sysrand_for_seed %xdefine CRYPTO_sysrand_if_available BORINGSSL_PREFIX %+ _CRYPTO_sysrand_if_available %xdefine CTR_DRBG_clear BORINGSSL_PREFIX %+ _CTR_DRBG_clear %xdefine CTR_DRBG_generate BORINGSSL_PREFIX %+ _CTR_DRBG_generate @@ -1050,6 +1088,7 @@ %xdefine ERR_remove_thread_state BORINGSSL_PREFIX %+ _ERR_remove_thread_state %xdefine ERR_restore_state BORINGSSL_PREFIX %+ _ERR_restore_state %xdefine ERR_save_state BORINGSSL_PREFIX %+ _ERR_save_state +%xdefine ERR_set_error_data BORINGSSL_PREFIX %+ _ERR_set_error_data %xdefine ERR_set_mark BORINGSSL_PREFIX %+ _ERR_set_mark %xdefine EVP_CIPHER_CTX_block_size BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_block_size %xdefine EVP_CIPHER_CTX_cipher BORINGSSL_PREFIX %+ _EVP_CIPHER_CTX_cipher @@ -1078,24 +1117,29 @@ %xdefine EVP_CIPHER_mode BORINGSSL_PREFIX %+ _EVP_CIPHER_mode %xdefine EVP_CIPHER_nid BORINGSSL_PREFIX %+ _EVP_CIPHER_nid %xdefine EVP_Cipher BORINGSSL_PREFIX %+ _EVP_Cipher +%xdefine EVP_CipherFinal BORINGSSL_PREFIX %+ _EVP_CipherFinal %xdefine EVP_CipherFinal_ex BORINGSSL_PREFIX %+ _EVP_CipherFinal_ex %xdefine EVP_CipherInit BORINGSSL_PREFIX %+ _EVP_CipherInit %xdefine EVP_CipherInit_ex BORINGSSL_PREFIX %+ _EVP_CipherInit_ex %xdefine EVP_CipherUpdate BORINGSSL_PREFIX %+ _EVP_CipherUpdate +%xdefine EVP_DecryptFinal BORINGSSL_PREFIX %+ _EVP_DecryptFinal %xdefine EVP_DecryptFinal_ex BORINGSSL_PREFIX %+ _EVP_DecryptFinal_ex %xdefine EVP_DecryptInit BORINGSSL_PREFIX %+ _EVP_DecryptInit %xdefine EVP_DecryptInit_ex BORINGSSL_PREFIX %+ _EVP_DecryptInit_ex %xdefine EVP_DecryptUpdate BORINGSSL_PREFIX %+ _EVP_DecryptUpdate +%xdefine EVP_EncryptFinal BORINGSSL_PREFIX %+ _EVP_EncryptFinal %xdefine EVP_EncryptFinal_ex BORINGSSL_PREFIX %+ _EVP_EncryptFinal_ex %xdefine EVP_EncryptInit BORINGSSL_PREFIX %+ _EVP_EncryptInit %xdefine EVP_EncryptInit_ex BORINGSSL_PREFIX %+ _EVP_EncryptInit_ex %xdefine EVP_EncryptUpdate BORINGSSL_PREFIX %+ _EVP_EncryptUpdate %xdefine EVP_add_cipher_alias BORINGSSL_PREFIX %+ _EVP_add_cipher_alias %xdefine EVP_aead_aes_128_gcm BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm +%xdefine EVP_aead_aes_128_gcm_randnonce BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm_randnonce %xdefine EVP_aead_aes_128_gcm_tls12 BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm_tls12 %xdefine EVP_aead_aes_128_gcm_tls13 BORINGSSL_PREFIX %+ _EVP_aead_aes_128_gcm_tls13 %xdefine EVP_aead_aes_192_gcm BORINGSSL_PREFIX %+ _EVP_aead_aes_192_gcm %xdefine EVP_aead_aes_256_gcm BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm +%xdefine EVP_aead_aes_256_gcm_randnonce BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm_randnonce %xdefine EVP_aead_aes_256_gcm_tls12 BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm_tls12 %xdefine EVP_aead_aes_256_gcm_tls13 BORINGSSL_PREFIX %+ _EVP_aead_aes_256_gcm_tls13 %xdefine EVP_aes_128_cbc BORINGSSL_PREFIX %+ _EVP_aes_128_cbc @@ -1129,6 +1173,7 @@ %xdefine OPENSSL_realloc BORINGSSL_PREFIX %+ _OPENSSL_realloc %xdefine OPENSSL_strcasecmp BORINGSSL_PREFIX %+ _OPENSSL_strcasecmp %xdefine OPENSSL_strdup BORINGSSL_PREFIX %+ _OPENSSL_strdup +%xdefine OPENSSL_strhash BORINGSSL_PREFIX %+ _OPENSSL_strhash %xdefine OPENSSL_strlcat BORINGSSL_PREFIX %+ _OPENSSL_strlcat %xdefine OPENSSL_strlcpy BORINGSSL_PREFIX %+ _OPENSSL_strlcpy %xdefine OPENSSL_strncasecmp BORINGSSL_PREFIX %+ _OPENSSL_strncasecmp @@ -1137,6 +1182,7 @@ %xdefine OPENSSL_tolower BORINGSSL_PREFIX %+ _OPENSSL_tolower %xdefine OpenSSL_version BORINGSSL_PREFIX %+ _OpenSSL_version %xdefine OpenSSL_version_num BORINGSSL_PREFIX %+ _OpenSSL_version_num +%xdefine RAND_OpenSSL BORINGSSL_PREFIX %+ _RAND_OpenSSL %xdefine RAND_SSLeay BORINGSSL_PREFIX %+ _RAND_SSLeay %xdefine RAND_add BORINGSSL_PREFIX %+ _RAND_add %xdefine RAND_bytes BORINGSSL_PREFIX %+ _RAND_bytes @@ -1151,7 +1197,6 @@ %xdefine RAND_pseudo_bytes BORINGSSL_PREFIX %+ _RAND_pseudo_bytes %xdefine RAND_seed BORINGSSL_PREFIX %+ _RAND_seed %xdefine RAND_set_rand_method BORINGSSL_PREFIX %+ _RAND_set_rand_method -%xdefine RAND_set_urandom_fd BORINGSSL_PREFIX %+ _RAND_set_urandom_fd %xdefine RAND_status BORINGSSL_PREFIX %+ _RAND_status %xdefine RSAZ_1024_mod_exp_avx2 BORINGSSL_PREFIX %+ _RSAZ_1024_mod_exp_avx2 %xdefine SSLeay BORINGSSL_PREFIX %+ _SSLeay @@ -1230,6 +1275,7 @@ %xdefine bn_scatter5 BORINGSSL_PREFIX %+ _bn_scatter5 %xdefine bn_select_words BORINGSSL_PREFIX %+ _bn_select_words %xdefine bn_set_minimal_width BORINGSSL_PREFIX %+ _bn_set_minimal_width +%xdefine bn_set_static_words BORINGSSL_PREFIX %+ _bn_set_static_words %xdefine bn_set_words BORINGSSL_PREFIX %+ _bn_set_words %xdefine bn_sqr8x_internal BORINGSSL_PREFIX %+ _bn_sqr8x_internal %xdefine bn_sqr_comba4 BORINGSSL_PREFIX %+ _bn_sqr_comba4