[CUDA][HIP] Fix delete operator for -fopenmp

When new operator is called in OpenMP parallel region,
delete operator is resolved and checked. Due to similar
issue fixed by https://reviews.llvm.org/D121765,
when resolving delete operator, the caller was not
determined correctly, which results in error as
shown in https://godbolt.org/z/jKhd8qKos.

This patch fixes the issue in a similar way as
https://reviews.llvm.org/D121765

Reviewed by: Artem Belevich

Differential Revision: https://reviews.llvm.org/D123976
This commit is contained in:
Yaxun (Sam) Liu 2022-04-18 22:21:47 -04:00
parent 3de29ad209
commit 800f26386c
2 changed files with 13 additions and 7 deletions

View File

@ -1597,7 +1597,7 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
bool Sema::isUsualDeallocationFunction(const CXXMethodDecl *Method) { bool Sema::isUsualDeallocationFunction(const CXXMethodDecl *Method) {
// [CUDA] Ignore this function, if we can't call it. // [CUDA] Ignore this function, if we can't call it.
const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext); const FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
if (getLangOpts().CUDA) { if (getLangOpts().CUDA) {
auto CallPreference = IdentifyCUDAPreference(Caller, Method); auto CallPreference = IdentifyCUDAPreference(Caller, Method);
// If it's not callable at all, it's not the right function. // If it's not callable at all, it's not the right function.
@ -1691,7 +1691,7 @@ namespace {
// In CUDA, determine how much we'd like / dislike to call this. // In CUDA, determine how much we'd like / dislike to call this.
if (S.getLangOpts().CUDA) if (S.getLangOpts().CUDA)
if (auto *Caller = dyn_cast<FunctionDecl>(S.CurContext)) if (auto *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true))
CUDAPref = S.IdentifyCUDAPreference(Caller, FD); CUDAPref = S.IdentifyCUDAPreference(Caller, FD);
} }
@ -2830,7 +2830,8 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
} }
if (getLangOpts().CUDA) if (getLangOpts().CUDA)
EraseUnwantedCUDAMatches(dyn_cast<FunctionDecl>(CurContext), Matches); EraseUnwantedCUDAMatches(getCurFunctionDecl(/*AllowLambda=*/true),
Matches);
} else { } else {
// C++1y [expr.new]p22: // C++1y [expr.new]p22:
// For a non-placement allocation function, the normal deallocation // For a non-placement allocation function, the normal deallocation

View File

@ -1,4 +1,5 @@
// RUN: %clang_cc1 -fopenmp -fsyntax-only -verify %s // RUN: %clang_cc1 -fopenmp -fsyntax-only -verify %s
// RUN: %clang_cc1 -fopenmp -fexceptions -fsyntax-only -verify %s
#include "Inputs/cuda.h" #include "Inputs/cuda.h"
@ -7,13 +8,17 @@ __device__ void foo(int) {} // expected-note {{candidate function not viable: ca
int main() { int main() {
#pragma omp parallel #pragma omp parallel
for (int i = 0; i < 100; i++) for (int i = 0; i < 100; i++) {
foo(1); // expected-error {{no matching function for call to 'foo'}} foo(1); // expected-error {{no matching function for call to 'foo'}}
new int;
}
auto Lambda = []() { auto Lambda = []() {
#pragma omp parallel #pragma omp parallel
for (int i = 0; i < 100; i++) for (int i = 0; i < 100; i++) {
foo(1); // expected-error {{reference to __device__ function 'foo' in __host__ __device__ function}} foo(1); // expected-error {{reference to __device__ function 'foo' in __host__ __device__ function}}
new int;
}
}; };
Lambda(); // expected-note {{called by 'main'}} Lambda(); // expected-note {{called by 'main'}}
} }