!9368 support 1-2-3dim reshape op of opencl

From: @ddwsky
Reviewed-by: @HilbertDavid,@zhanghaibo5
Signed-off-by: @HilbertDavid
This commit is contained in:
mindspore-ci-bot 2020-12-03 18:58:02 +08:00 committed by Gitee
commit 2a9e0b0521
25 changed files with 128 additions and 9 deletions

View File

@ -56,6 +56,10 @@ std::string ActivationOpenCLKernel::GetActTypeString(int act_type) {
}
int ActivationOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 1 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (GetActTypeString(type_).empty()) {
MS_LOG(ERROR) << "schema::ActivationType:" << type_ << "not found";
return RET_ERROR;

View File

@ -34,6 +34,10 @@ using mindspore::schema::PrimitiveType_ArgMin;
namespace mindspore::kernel {
int ArgMinMaxOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 1 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) {
MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type();
return RET_ERROR;

View File

@ -58,6 +58,10 @@ std::set<schema::PrimitiveType> SupportedOpenCLArithmetics = {PrimitiveType_Mul,
PrimitiveType_Eltwise};
int ArithmeticOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 2 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto *param = reinterpret_cast<const ArithmeticParameter *>(op_parameter_);
if (param->broadcasting_ && out_tensors_[0]->shape()[0] > 1) {
MS_LOG(ERROR) << "Broadcasting don't support N > 1";

View File

@ -90,6 +90,10 @@ void ArithmeticSelfOpenCLKernel::GetKernelName(std::string *kernel_name, Arithme
}
int ArithmeticSelfOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 1 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_[0]->shape().size() != 4 && in_tensors_[0]->shape().size() != 2) {
MS_LOG(ERROR) << " only support dim = 4 or 2 but your dim = " << in_tensors_[0]->shape().size();
return RET_ERROR;

View File

@ -32,6 +32,10 @@ using mindspore::schema::PrimitiveType_BatchToSpaceND;
namespace mindspore::kernel {
int BatchToSpaceNDOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 1 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) {
MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type();
return RET_ERROR;

View File

@ -31,6 +31,10 @@ using mindspore::schema::PrimitiveType_BatchNorm;
namespace mindspore::kernel {
int BatchNormOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 5 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_.at(0)->shape()[0] > 1) {
MS_LOG(ERROR) << " Unsupported batch_size >1 ";
return RET_ERROR;

View File

@ -36,6 +36,10 @@ using mindspore::schema::PrimitiveType_BiasAdd;
namespace mindspore::kernel {
int BiasAddOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 2 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "Reshape in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_.size() == 0) {
MS_LOG(ERROR) << "Input data size must be greater than 0, but your size is " << in_tensors_.size();
return RET_ERROR;

View File

@ -43,7 +43,13 @@ int CastOpenCLKernel::GetKernelName(std::string *kernel_name, CastParameter *par
return RET_OK;
}
int CastOpenCLKernel::CheckSpecs() { return RET_OK; }
int CastOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 1 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
return RET_OK;
}
void CastOpenCLKernel::SetConstArgs() {
auto input_shape = in_tensors_[0]->shape();

View File

@ -64,6 +64,10 @@ void ConcatGetWorkGroup(const std::vector<size_t> &global, std::vector<size_t> *
}
int ConcatOpenCLKernel::CheckSpecs() {
if ((in_tensors_.size() < 2 || in_tensors_.size() > 6) || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto param = reinterpret_cast<ConcatParameter *>(this->op_parameter_);
MS_LOG(DEBUG) << " concat at axis=: " << param->axis_;
if (out_tensors_[0]->shape().size() > 4) {

View File

@ -35,6 +35,10 @@ using mindspore::schema::PrimitiveType_DeConv2D;
namespace mindspore::kernel {
int Conv2dTransposeOpenCLKernel::CheckSpecs() {
if ((in_tensors_.size() != 2 && in_tensors_.size() != 3) || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
ConvParameter *param = reinterpret_cast<ConvParameter *>(op_parameter_);
if (param->pad_l_ != param->pad_r_ || param->kernel_h_ - param->stride_h_ != 2 * param->pad_l_ ||
param->pad_u_ != param->pad_d_ || param->kernel_w_ - param->stride_w_ != 2 * param->pad_u_) {

View File

@ -43,6 +43,10 @@ using mindspore::schema::PrimitiveType_DepthwiseConv2D;
namespace mindspore::kernel {
int DepthwiseConv2dOpenCLKernel::CheckSpecs() {
if ((in_tensors_.size() != 2 && in_tensors_.size() != 3) || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) {
MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type();
return RET_ERROR;

View File

@ -67,6 +67,10 @@ void FillOpenCLKernel::SetConstArgs() {}
void FillOpenCLKernel::SetGlobalLocal() {}
int FillOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 1 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto param = this->op_parameter_;
if (out_tensors_[0]->shape().size() > 4) {

View File

@ -37,6 +37,10 @@ using mindspore::schema::PrimitiveType_FullConnection;
namespace mindspore::kernel {
int FullConnectionOpenCLKernel::CheckSpecs() {
if ((in_tensors_.size() != 2 && in_tensors_.size() != 3) || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto param = reinterpret_cast<MatMulParameter *>(op_parameter_);
if (param->a_transpose_) {
MS_LOG(ERROR) << "fullconnection only support a_transpose_=false yet.";

View File

@ -31,6 +31,10 @@ using mindspore::schema::PrimitiveType_MatMul;
namespace mindspore::kernel {
int MatMulOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 2 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto param = reinterpret_cast<MatMulParameter *>(op_parameter_);
transposeA = param->a_transpose_;
if (transposeA) {

View File

@ -29,7 +29,13 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_OneHot;
namespace mindspore::kernel {
int OneHotOpenCLKernel::CheckSpecs() { return RET_OK; }
int OneHotOpenCLKernel::CheckSpecs() {
if ((in_tensors_.size() < 2 || in_tensors_.size() > 4) || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
return RET_OK;
}
int OneHotOpenCLKernel::Prepare() {
std::string kernel_name = "OneHot";
@ -59,9 +65,6 @@ int OneHotOpenCLKernel::Prepare() {
}
int OneHotOpenCLKernel::InitWeights() {
if (in_tensors_.size() <= 1) {
return RET_ERROR;
}
depth_ = static_cast<int32_t *>(in_tensors_[1]->data_c())[0];
if (in_tensors_.size() > 2) {
on_value_ = static_cast<float *>(in_tensors_[2]->data_c())[0];

View File

@ -37,6 +37,10 @@ namespace mindspore {
namespace kernel {
int PoolingOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 1 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (parameter_->pool_mode_ != PoolMode_MaxPool && parameter_->pool_mode_ != PoolMode_AvgPool) {
MS_LOG(ERROR) << "Init `Pooling2d` kernel failed, unsupported pool mode!";
return RET_ERROR;

View File

@ -67,6 +67,10 @@ cl_float4 ReduceOpenCLKernel::GenC4Mask() {
}
int ReduceOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 1 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_[0]->shape()[0] > 1) {
MS_LOG(ERROR) << "reduce op only support n = 1";
return RET_PARAM_INVALID;

View File

@ -31,12 +31,20 @@ using mindspore::schema::PrimitiveType_Squeeze;
namespace mindspore::kernel {
int ReshapeOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 1 && out_tensors_.size() != 1) {
MS_LOG(ERROR) << "Reshape in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) {
MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type();
return RET_ERROR;
}
if (out_tensors_[0]->shape().size() != 2 && out_tensors_[0]->shape().size() != 4) {
MS_LOG(ERROR) << "Reshape output size should in 2,4";
if (in_tensors_[0]->shape().size() == 0 || in_tensors_[0]->shape().size() > 4) {
MS_LOG(ERROR) << "Reshape input size should in 1-4, actual: " << in_tensors_[0]->shape();
return RET_ERROR;
}
if (out_tensors_[0]->shape().size() == 0 || out_tensors_[0]->shape().size() > 4) {
MS_LOG(ERROR) << "Reshape output size should in 1-4, actual: " << out_tensors_[0]->shape();
return RET_ERROR;
}
return RET_OK;

View File

@ -33,6 +33,10 @@ using mindspore::schema::PrimitiveType_Resize;
namespace mindspore::kernel {
int ResizeOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 1 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto in_shape = in_tensors_[0]->shape();
auto out_shape = out_tensors_[0]->shape();
if (in_shape.size() != 4 || out_shape.size() != 4 || in_shape[0] != out_shape[0] || in_shape[3] != out_shape[3]) {

View File

@ -43,6 +43,10 @@ std::vector<float> SoftmaxOpenCLKernel::GetMaskForLastChannel(int channels) {
}
int SoftmaxOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 1 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
axis_ = parameter_->axis_;
auto in_shape = in_tensors_[0]->shape();
if (in_shape.size() > 4) {

View File

@ -32,6 +32,10 @@ using mindspore::schema::PrimitiveType_SpaceToBatchND;
namespace mindspore::kernel {
int SpaceToBatchNDOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 1 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
if (in_tensors_[0]->data_type() != kNumberTypeFloat32 && in_tensors_[0]->data_type() != kNumberTypeFloat16) {
MS_LOG(ERROR) << "Unsupported data type " << in_tensors_[0]->data_type();
return RET_ERROR;

View File

@ -31,7 +31,13 @@ using mindspore::lite::RET_PARAM_INVALID;
using mindspore::schema::PrimitiveType_SpaceToDepth;
namespace mindspore::kernel {
int SpaceToDepthOpenCLKernel::CheckSpecs() { return RET_OK; }
int SpaceToDepthOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 1 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
return RET_OK;
}
int SpaceToDepthOpenCLKernel::Prepare() {
std::string kernel_name;

View File

@ -33,6 +33,10 @@ using mindspore::schema::PrimitiveType_ToFormat;
namespace mindspore::kernel {
int ToFormatOpenCLKernel::CheckSpecs() {
if (in_tensors_.size() != 1 || out_tensors_.size() != 1) {
MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size();
return RET_ERROR;
}
auto data_type = in_tensors_.front()->data_type();
if (data_type != kNumberTypeFloat32 && data_type != kNumberTypeFloat16 && data_type != kNumberTypeInt32) {
MS_LOG(ERROR) << "Unsupported data type " << data_type;

View File

@ -271,7 +271,7 @@ int OpenCLRuntime::Init() {
}
int OpenCLRuntime::Uninit() {
if (enable_cache_) {
if (enable_cache_ && !binary_map_.empty()) {
StoreCache();
}
binary_map_.clear();

View File

@ -135,4 +135,24 @@ TEST_F(TestOpenCL_Reshape, 4D_4D_test5) {
}
}
TEST_F(TestOpenCL_Reshape, 3D_2D_test6) {
std::vector<int> shape_in = {5, 3, 8};
std::vector<int> shape_out = {8, 15};
float input_data[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119};
float output_data[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119};
for (auto fp16_enable : {false, true}) {
TestMain({{shape_in, input_data, VAR}}, {shape_out, output_data}, CreateParameter(), fp16_enable);
}
}
} // namespace mindspore::lite::opencl::test