llvm-project/clang/test/CodeGen/aarch64-neon-sha3.c

165 lines
4.8 KiB
C

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon \
// RUN: -target-feature +sha3 -S -emit-llvm -o - %s \
// RUN: | FileCheck %s
// REQUIRES: aarch64-registered-target || arm-registered-target
#include <arm_neon.h>
// CHECK-LABEL: @test_vsha512h(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.sha512h
//
void test_vsha512h(uint64x2_t hash_ed, uint64x2_t hash_gf, uint64x2_t kwh_kwh2) {
uint64x2_t result = vsha512hq_u64(hash_ed, hash_gf, kwh_kwh2);
}
// CHECK-LABEL: @test_vsha512h2(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.sha512h2
//
void test_vsha512h2(uint64x2_t sum_ab, uint64x2_t hash_c_, uint64x2_t hash_ab) {
uint64x2_t result = vsha512h2q_u64(sum_ab, hash_c_, hash_ab);
}
// CHECK-LABEL: @test_vsha512su0(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.sha512su0
//
void test_vsha512su0(uint64x2_t w0_1, uint64x2_t w2_) {
uint64x2_t result = vsha512su0q_u64(w0_1, w2_);
}
// CHECK-LABEL: @test_vsha512su1(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.sha512su1
//
void test_vsha512su1(uint64x2_t s01_s02, uint64x2_t w14_15, uint64x2_t w9_10) {
uint64x2_t result = vsha512su1q_u64(s01_s02, w14_15, w9_10);
}
// CHECK-LABEL: @test_vrax1(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.rax1
//
void test_vrax1(uint64x2_t a, uint64x2_t b) {
uint64x2_t result = vrax1q_u64(a, b);
}
// CHECK-LABEL: @test_xar(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.xar
//
void test_xar(uint64x2_t a, uint64x2_t b) {
uint64x2_t result = vxarq_u64(a, b, 10);
}
// CHECK-LABEL: @test_vbcax_u8(
// CHECK: call <16 x i8> @llvm.aarch64.crypto.bcaxu.v16i8
//
void test_vbcax_u8(uint8x16_t a, uint8x16_t b, uint8x16_t c) {
uint8x16_t result = vbcaxq_u8(a, b, c);
}
// CHECK-LABEL: @test_vbcax_u16(
// CHECK: call <8 x i16> @llvm.aarch64.crypto.bcaxu.v8i16
//
void test_vbcax_u16(uint16x8_t a, uint16x8_t b, uint16x8_t c) {
uint16x8_t result = vbcaxq_u16(a, b, c);
}
// CHECK-LABEL: @test_vbcax_u32(
// CHECK: call <4 x i32> @llvm.aarch64.crypto.bcaxu.v4i32
//
void test_vbcax_u32(uint32x4_t a, uint32x4_t b, uint32x4_t c) {
uint32x4_t result = vbcaxq_u32(a, b, c);
}
// CHECK-LABEL: @test_vbcax_u64(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.bcaxu.v2i64
//
void test_vbcax_u64(uint64x2_t a, uint64x2_t b, uint64x2_t c) {
uint64x2_t result = vbcaxq_u64(a, b, c);
}
// CHECK-LABEL: @test_vbcax_s8(
// CHECK: call <16 x i8> @llvm.aarch64.crypto.bcaxs.v16i8
//
void test_vbcax_s8(int8x16_t a, int8x16_t b, int8x16_t c) {
int8x16_t result = vbcaxq_s8(a, b, c);
}
// CHECK-LABEL: @test_vbcax_s16(
// CHECK: call <8 x i16> @llvm.aarch64.crypto.bcaxs.v8i16
//
void test_vbcax_s16(int16x8_t a, int16x8_t b, int16x8_t c) {
int16x8_t result = vbcaxq_s16(a, b, c);
}
// CHECK-LABEL: @test_vbcax_s32(
// CHECK: call <4 x i32> @llvm.aarch64.crypto.bcaxs.v4i32
//
void test_vbcax_s32(int32x4_t a, int32x4_t b, int32x4_t c) {
int32x4_t result = vbcaxq_s32(a, b, c);
}
// CHECK-LABEL: @test_vbcax_s64(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.bcaxs.v2i64
//
void test_vbcax_s64(int64x2_t a, int64x2_t b, int64x2_t c) {
int64x2_t result = vbcaxq_s64(a, b, c);
}
// CHECK-LABEL: @test_veor3_u8(
// CHECK: call <16 x i8> @llvm.aarch64.crypto.eor3u.v16i8
//
void test_veor3_u8(uint8x16_t a, uint8x16_t b, uint8x16_t c) {
uint8x16_t result = veor3q_u8(a, b, c);
}
// CHECK-LABEL: @test_veor3_u16(
// CHECK: call <8 x i16> @llvm.aarch64.crypto.eor3u.v8i16
//
void test_veor3_u16(uint16x8_t a, uint16x8_t b, uint16x8_t c) {
uint16x8_t result = veor3q_u16(a, b, c);
}
// CHECK-LABEL: @test_veor3_u32(
// CHECK: call <4 x i32> @llvm.aarch64.crypto.eor3u.v4i32
//
void test_veor3_u32(uint32x4_t a, uint32x4_t b, uint32x4_t c) {
uint32x4_t result = veor3q_u32(a, b, c);
}
// CHECK-LABEL: @test_veor3_u64(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.eor3u.v2i64
//
void test_veor3_u64(uint64x2_t a, uint64x2_t b, uint64x2_t c) {
uint64x2_t result = veor3q_u64(a, b, c);
}
// CHECK-LABEL: @test_veor3_s8(
// CHECK: call <16 x i8> @llvm.aarch64.crypto.eor3s.v16i8
//
void test_veor3_s8(int8x16_t a, int8x16_t b, int8x16_t c) {
int8x16_t result = veor3q_s8(a, b, c);
}
// CHECK-LABEL: @test_veor3_s16(
// CHECK: call <8 x i16> @llvm.aarch64.crypto.eor3s.v8i16
//
void test_veor3_s16(int16x8_t a, int16x8_t b, int16x8_t c) {
int16x8_t result = veor3q_s16(a, b, c);
}
// CHECK-LABEL: @test_veor3_s32(
// CHECK: call <4 x i32> @llvm.aarch64.crypto.eor3s.v4i32
//
void test_veor3_s32(int32x4_t a, int32x4_t b, int32x4_t c) {
int32x4_t result = veor3q_s32(a, b, c);
}
// CHECK-LABEL: @test_veor3_s64(
// CHECK: call <2 x i64> @llvm.aarch64.crypto.eor3s.v2i64
//
void test_veor3_s64(int64x2_t a, int64x2_t b, int64x2_t c) {
int64x2_t result = veor3q_s64(a, b, c);
}