forked from OSchip/llvm-project
[WebAssembly] Fix constness of pointer params to load intrinsics
Update the SIMD builtin load functions to take pointers to const data and update the intrinsics themselves to not cast away constness. Differential Revision: https://reviews.llvm.org/D101884
This commit is contained in:
parent
627a526955
commit
602f318cfd
|
@ -194,13 +194,13 @@ TARGET_BUILTIN(__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4, "V4UiV2d", "nc", "si
|
|||
TARGET_BUILTIN(__builtin_wasm_demote_zero_f64x2_f32x4, "V4fV2d", "nc", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_promote_low_f32x4_f64x2, "V2dV4f", "nc", "simd128")
|
||||
|
||||
TARGET_BUILTIN(__builtin_wasm_load32_zero, "V4ii*", "n", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_load64_zero, "V2LLiLLi*", "n", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_load32_zero, "V4iiC*", "n", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_load64_zero, "V2LLiLLiC*", "n", "simd128")
|
||||
|
||||
TARGET_BUILTIN(__builtin_wasm_load8_lane, "V16ScSc*V16ScIi", "n", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_load16_lane, "V8ss*V8sIi", "n", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_load32_lane, "V4ii*V4iIi", "n", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_load64_lane, "V2LLiLLi*V2LLiIi", "n", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_load8_lane, "V16ScScC*V16ScIi", "n", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_load16_lane, "V8ssC*V8sIi", "n", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_load32_lane, "V4iiC*V4iIi", "n", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_load64_lane, "V2LLiLLiC*V2LLiIi", "n", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_store8_lane, "vSc*V16ScIi", "n", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_store16_lane, "vs*V8sIi", "n", "simd128")
|
||||
TARGET_BUILTIN(__builtin_wasm_store32_lane, "vi*V4iIi", "n", "simd128")
|
||||
|
|
|
@ -169,18 +169,19 @@ wasm_v128_load64_zero(const void *__mem) {
|
|||
}
|
||||
|
||||
#define wasm_v128_load8_lane(__ptr, __vec, __i) \
|
||||
((v128_t)__builtin_wasm_load8_lane((signed char *)(__ptr), (__i8x16)(__vec), \
|
||||
(__i)))
|
||||
((v128_t)__builtin_wasm_load8_lane((const signed char *)(__ptr), \
|
||||
(__i8x16)(__vec), (__i)))
|
||||
|
||||
#define wasm_v128_load16_lane(__ptr, __vec, __i) \
|
||||
((v128_t)__builtin_wasm_load16_lane((short *)(__ptr), (__i16x8)(__vec), \
|
||||
(__i)))
|
||||
((v128_t)__builtin_wasm_load16_lane((const short *)(__ptr), \
|
||||
(__i16x8)(__vec), (__i)))
|
||||
|
||||
#define wasm_v128_load32_lane(__ptr, __vec, __i) \
|
||||
((v128_t)__builtin_wasm_load32_lane((int *)(__ptr), (__i32x4)(__vec), (__i)))
|
||||
((v128_t)__builtin_wasm_load32_lane((const int *)(__ptr), (__i32x4)(__vec), \
|
||||
(__i)))
|
||||
|
||||
#define wasm_v128_load64_lane(__ptr, __vec, __i) \
|
||||
((v128_t)__builtin_wasm_load64_lane((long long int *)(__ptr), \
|
||||
((v128_t)__builtin_wasm_load64_lane((const long long int *)(__ptr), \
|
||||
(__i64x2)(__vec), (__i)))
|
||||
|
||||
static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store(void *__mem,
|
||||
|
|
|
@ -284,28 +284,28 @@ f64x2 replace_lane_f64x2(f64x2 v, double x) {
|
|||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
||||
i8x16 load8_lane(signed char *p, i8x16 v) {
|
||||
i8x16 load8_lane(const signed char *p, i8x16 v) {
|
||||
return __builtin_wasm_load8_lane(p, v, 0);
|
||||
// WEBASSEMBLY: tail call <16 x i8> @llvm.wasm.load8.lane(
|
||||
// WEBASSEMBLY-SAME: i8* %p, <16 x i8> %v, i32 0)
|
||||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
||||
i16x8 load16_lane(short *p, i16x8 v) {
|
||||
i16x8 load16_lane(const short *p, i16x8 v) {
|
||||
return __builtin_wasm_load16_lane(p, v, 0);
|
||||
// WEBASSEMBLY: tail call <8 x i16> @llvm.wasm.load16.lane(
|
||||
// WEBASSEMBLY-SAME: i16* %p, <8 x i16> %v, i32 0)
|
||||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
||||
i32x4 load32_lane(int *p, i32x4 v) {
|
||||
i32x4 load32_lane(const int *p, i32x4 v) {
|
||||
return __builtin_wasm_load32_lane(p, v, 0);
|
||||
// WEBASSEMBLY: tail call <4 x i32> @llvm.wasm.load32.lane(
|
||||
// WEBASSEMBLY-SAME: i32* %p, <4 x i32> %v, i32 0)
|
||||
// WEBASSEMBLY-NEXT: ret
|
||||
}
|
||||
|
||||
i64x2 load64_lane(long long *p, i64x2 v) {
|
||||
i64x2 load64_lane(const long long *p, i64x2 v) {
|
||||
return __builtin_wasm_load64_lane(p, v, 0);
|
||||
// WEBASSEMBLY: tail call <2 x i64> @llvm.wasm.load64.lane(
|
||||
// WEBASSEMBLY-SAME: i64* %p, <2 x i64> %v, i32 0)
|
||||
|
@ -904,13 +904,13 @@ f64x2 wasm_promote_low_f32x4_f64x2(f32x4 x) {
|
|||
// WEBASSEMBLY: ret
|
||||
}
|
||||
|
||||
i32x4 load32_zero(int *p) {
|
||||
i32x4 load32_zero(const int *p) {
|
||||
return __builtin_wasm_load32_zero(p);
|
||||
// WEBASSEMBLY: call <4 x i32> @llvm.wasm.load32.zero(i32* %p)
|
||||
// WEBASSEMBLY: ret
|
||||
}
|
||||
|
||||
i64x2 load64_zero(long long *p) {
|
||||
i64x2 load64_zero(const long long *p) {
|
||||
return __builtin_wasm_load64_zero(p);
|
||||
// WEBASSEMBLY: call <2 x i64> @llvm.wasm.load64.zero(i64* %p)
|
||||
// WEBASSEMBLY: ret
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --force-update
|
||||
// REQUIRES: webassembly-registered-target, asserts
|
||||
|
||||
// RUN: %clang %s -O2 -emit-llvm -S -o - -target wasm32-unknown-unknown -msimd128 | FileCheck %s
|
||||
// RUN: %clang %s -O2 -emit-llvm -S -o - -target wasm32-unknown-unknown -msimd128 -Wcast-qual -Werror | FileCheck %s
|
||||
|
||||
#include <wasm_simd128.h>
|
||||
|
||||
|
@ -165,7 +165,7 @@ v128_t test_v128_load64_zero(const void *mem) {
|
|||
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
|
||||
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
||||
//
|
||||
v128_t test_v128_load8_lane(uint8_t *ptr, v128_t vec) {
|
||||
v128_t test_v128_load8_lane(const uint8_t *ptr, v128_t vec) {
|
||||
return wasm_v128_load8_lane(ptr, vec, 15);
|
||||
}
|
||||
|
||||
|
@ -176,7 +176,7 @@ v128_t test_v128_load8_lane(uint8_t *ptr, v128_t vec) {
|
|||
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i16> [[TMP1]] to <4 x i32>
|
||||
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
||||
//
|
||||
v128_t test_v128_load16_lane(uint16_t *ptr, v128_t vec) {
|
||||
v128_t test_v128_load16_lane(const uint16_t *ptr, v128_t vec) {
|
||||
return wasm_v128_load16_lane(ptr, vec, 7);
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ v128_t test_v128_load16_lane(uint16_t *ptr, v128_t vec) {
|
|||
// CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i32> @llvm.wasm.load32.lane(i32* [[PTR:%.*]], <4 x i32> [[VEC:%.*]], i32 3)
|
||||
// CHECK-NEXT: ret <4 x i32> [[TMP0]]
|
||||
//
|
||||
v128_t test_v128_load32_lane(uint32_t *ptr, v128_t vec) {
|
||||
v128_t test_v128_load32_lane(const uint32_t *ptr, v128_t vec) {
|
||||
return wasm_v128_load32_lane(ptr, vec, 3);
|
||||
}
|
||||
|
||||
|
@ -196,7 +196,7 @@ v128_t test_v128_load32_lane(uint32_t *ptr, v128_t vec) {
|
|||
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP1]] to <4 x i32>
|
||||
// CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
||||
//
|
||||
v128_t test_v128_load64_lane(uint64_t *ptr, v128_t vec) {
|
||||
v128_t test_v128_load64_lane(const uint64_t *ptr, v128_t vec) {
|
||||
return wasm_v128_load64_lane(ptr, vec, 1);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue