llvm-project/clang/test/SemaCXX/complex-folding.cpp

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

91 lines
5.3 KiB
C++
Raw Normal View History

[complex] Teach Clang to preserve different-type operands to arithmetic operators where one type is a C complex type, and to emit both the efficient and correct implementation for complex arithmetic according to C11 Annex G using this extra information. For both multiply and divide the old code was writing a long-hand reduced version of the math without any of the special handling of inf and NaN recommended by the standard here. Instead of putting more complexity here, this change does what GCC does which is to emit a libcall for the fully general case. However, the old code also failed to do the proper minimization of the set of operations when there was a mixed complex and real operation. In those cases, C provides a spec for much more minimal operations that are valid. Clang now emits the exact suggested operations. This change isn't *just* about performance though, without minimizing these operations, we again lose the correct handling of infinities and NaNs. It is critical that this happen in the frontend based on assymetric type operands to complex math operations. The performance implications of this change aren't trivial either. I've run a set of benchmarks in Eigen, an open source mathematics library that makes heavy use of complex. While a few have slowed down due to the libcall being introduce, most sped up and some by a huge amount: up to 100% and 140%. In order to make all of this work, also match the algorithm in the constant evaluator to the one in the runtime library. Currently it is a broken port of the simplifications from C's Annex G to the long-hand formulation of the algorithm. Splitting this patch up is very hard because none of this works without the AST change to preserve non-complex operands. Sorry for the enormous change. Follow-up changes will include support for sinking the libcalls onto cold paths in common cases and fastmath improvements to allow more aggressive backend folding. Differential Revision: http://reviews.llvm.org/D5698 llvm-svn: 219557
2014-10-11 08:57:18 +08:00
// RUN: %clang_cc1 %s -std=c++1z -fsyntax-only -verify
//
// Test the constant folding of builtin complex numbers.
static_assert((0.0 + 0.0j) == (0.0 + 0.0j));
static_assert((0.0 + 0.0j) != (0.0 + 0.0j)); // expected-error {{static_assert}}
static_assert((0.0 + 0.0j) == 0.0);
static_assert(0.0 == (0.0 + 0.0j));
static_assert(0.0 == 0.0j);
static_assert((0.0 + 1.0j) != 0.0);
static_assert(1.0 != (0.0 + 0.0j));
static_assert(0.0 != 1.0j);
[complex] Teach Clang to preserve different-type operands to arithmetic operators where one type is a C complex type, and to emit both the efficient and correct implementation for complex arithmetic according to C11 Annex G using this extra information. For both multiply and divide the old code was writing a long-hand reduced version of the math without any of the special handling of inf and NaN recommended by the standard here. Instead of putting more complexity here, this change does what GCC does which is to emit a libcall for the fully general case. However, the old code also failed to do the proper minimization of the set of operations when there was a mixed complex and real operation. In those cases, C provides a spec for much more minimal operations that are valid. Clang now emits the exact suggested operations. This change isn't *just* about performance though, without minimizing these operations, we again lose the correct handling of infinities and NaNs. It is critical that this happen in the frontend based on assymetric type operands to complex math operations. The performance implications of this change aren't trivial either. I've run a set of benchmarks in Eigen, an open source mathematics library that makes heavy use of complex. While a few have slowed down due to the libcall being introduce, most sped up and some by a huge amount: up to 100% and 140%. In order to make all of this work, also match the algorithm in the constant evaluator to the one in the runtime library. Currently it is a broken port of the simplifications from C's Annex G to the long-hand formulation of the algorithm. Splitting this patch up is very hard because none of this works without the AST change to preserve non-complex operands. Sorry for the enormous change. Follow-up changes will include support for sinking the libcalls onto cold paths in common cases and fastmath improvements to allow more aggressive backend folding. Differential Revision: http://reviews.llvm.org/D5698 llvm-svn: 219557
2014-10-11 08:57:18 +08:00
// Walk around the complex plane stepping between angular differences and
// equality.
static_assert((1.0 + 0.0j) == (0.0 + 0.0j)); // expected-error {{static_assert}}
static_assert((1.0 + 0.0j) == (1.0 + 0.0j));
static_assert((1.0 + 1.0j) == (1.0 + 0.0j)); // expected-error {{static_assert}}
static_assert((1.0 + 1.0j) == (1.0 + 1.0j));
static_assert((0.0 + 1.0j) == (1.0 + 1.0j)); // expected-error {{static_assert}}
static_assert((0.0 + 1.0j) == (0.0 + 1.0j));
static_assert((-1.0 + 1.0j) == (0.0 + 1.0j)); // expected-error {{static_assert}}
static_assert((-1.0 + 1.0j) == (-1.0 + 1.0j));
static_assert((-1.0 + 0.0j) == (-1.0 + 1.0j)); // expected-error {{static_assert}}
static_assert((-1.0 + 0.0j) == (-1.0 + 0.0j));
static_assert((-1.0 - 1.0j) == (-1.0 + 0.0j)); // expected-error {{static_assert}}
static_assert((-1.0 - 1.0j) == (-1.0 - 1.0j));
static_assert((0.0 - 1.0j) == (-1.0 - 1.0j)); // expected-error {{static_assert}}
static_assert((0.0 - 1.0j) == (0.0 - 1.0j));
static_assert((1.0 - 1.0j) == (0.0 - 1.0j)); // expected-error {{static_assert}}
static_assert((1.0 - 1.0j) == (1.0 - 1.0j));
// Test basic mathematical folding of both complex and real operands.
static_assert(((1.0 + 0.5j) + (0.25 - 0.75j)) == (1.25 - 0.25j));
static_assert(((1.0 + 0.5j) + 0.25) == (1.25 + 0.5j));
static_assert((1.0 + (0.25 - 0.75j)) == (1.25 - 0.75j));
static_assert(((1.0 + 0.5j) - (0.25 - 0.75j)) == (0.75 + 1.25j));
static_assert(((1.0 + 0.5j) - 0.25) == (0.75 + 0.5j));
static_assert((1.0 - (0.25 - 0.75j)) == (0.75 + 0.75j));
static_assert(((1.25 + 0.5j) * (0.25 - 0.75j)) == (0.6875 - 0.8125j));
static_assert(((1.25 + 0.5j) * 0.25) == (0.3125 + 0.125j));
static_assert((1.25 * (0.25 - 0.75j)) == (0.3125 - 0.9375j));
static_assert(((1.25 + 0.5j) / (0.25 - 0.75j)) == (-0.1 + 1.7j));
static_assert(((1.25 + 0.5j) / 0.25) == (5.0 + 2.0j));
static_assert((1.25 / (0.25 - 0.75j)) == (0.5 + 1.5j));
// Test that infinities are preserved, don't turn into NaNs, and do form zeros
// when the divisor.
static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + 1.0j) * 1.0)) == 1);
static_assert(__builtin_isinf_sign(__imag__((1.0 + __builtin_inf() * 1.0j) * 1.0)) == 1);
static_assert(__builtin_isinf_sign(__real__(1.0 * (__builtin_inf() + 1.0j))) == 1);
static_assert(__builtin_isinf_sign(__imag__(1.0 * (1.0 + __builtin_inf() * 1.0j))) == 1);
static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + 1.0j) * (1.0 + 1.0j))) == 1);
static_assert(__builtin_isinf_sign(__real__((1.0 + 1.0j) * (__builtin_inf() + 1.0j))) == 1);
static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + 1.0j) * (__builtin_inf() + 1.0j))) == 1);
static_assert(__builtin_isinf_sign(__real__((1.0 + __builtin_inf() * 1.0j) * (1.0 + 1.0j))) == -1);
static_assert(__builtin_isinf_sign(__imag__((1.0 + __builtin_inf() * 1.0j) * (1.0 + 1.0j))) == 1);
static_assert(__builtin_isinf_sign(__real__((1.0 + 1.0j) * (1.0 + __builtin_inf() * 1.0j))) == -1);
static_assert(__builtin_isinf_sign(__imag__((1.0 + 1.0j) * (1.0 + __builtin_inf() * 1.0j))) == 1);
static_assert(__builtin_isinf_sign(__real__((1.0 + __builtin_inf() * 1.0j) * (1.0 + __builtin_inf() * 1.0j))) == -1);
static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + __builtin_inf() * 1.0j) * (__builtin_inf() + __builtin_inf() * 1.0j))) == -1);
static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + 1.0j) / (1.0 + 1.0j))) == 1);
static_assert(__builtin_isinf_sign(__imag__(1.0 + (__builtin_inf() * 1.0j) / (1.0 + 1.0j))) == 1);
static_assert(__builtin_isinf_sign(__imag__((__builtin_inf() + __builtin_inf() * 1.0j) / (1.0 + 1.0j))) == 1);
static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + 1.0j) / 1.0)) == 1);
static_assert(__builtin_isinf_sign(__imag__(1.0 + (__builtin_inf() * 1.0j) / 1.0)) == 1);
static_assert(__builtin_isinf_sign(__imag__((__builtin_inf() + __builtin_inf() * 1.0j) / 1.0)) == 1);
static_assert(((1.0 + 1.0j) / (__builtin_inf() + 1.0j)) == (0.0 + 0.0j));
static_assert(((1.0 + 1.0j) / (1.0 + __builtin_inf() * 1.0j)) == (0.0 + 0.0j));
static_assert(((1.0 + 1.0j) / (__builtin_inf() + __builtin_inf() * 1.0j)) == (0.0 + 0.0j));
static_assert(((1.0 + 1.0j) / __builtin_inf()) == (0.0 + 0.0j));
static_assert(__builtin_isinf_sign(__real__((1.0 + 1.0j) / (0.0 + 0.0j))) == 1);
static_assert(__builtin_isinf_sign(__real__((1.0 + 1.0j) / 0.0)) == 1);
static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + 1.0j) / (0.0 + 0.0j))) == 1);
static_assert(__builtin_isinf_sign(__imag__((1.0 + __builtin_inf() * 1.0j) / (0.0 + 0.0j))) == 1);
static_assert(__builtin_isinf_sign(__imag__((__builtin_inf() + __builtin_inf() * 1.0j) / (0.0 + 0.0j))) == 1);
static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + 1.0j) / 0.0)) == 1);
static_assert(__builtin_isinf_sign(__imag__((1.0 + __builtin_inf() * 1.0j) / 0.0)) == 1);
static_assert(__builtin_isinf_sign(__imag__((__builtin_inf() + __builtin_inf() * 1.0j) / 0.0)) == 1);