forked from mindspore-Ecosystem/mindspore
!5905 [MS][LITE][Develop]support batch_to_space_nd
Merge pull request !5905 from chenjianping/lite_dev
This commit is contained in:
commit
59775dbf6d
|
@ -598,6 +598,7 @@ PrimitiveC *PrimitiveC::Create(mindspore::schema::PrimitiveT *primitive) {
|
|||
case schema::PrimitiveType_Unsqueeze:
|
||||
return new Unsqueeze(primitive);
|
||||
case schema::PrimitiveType_BatchToSpace:
|
||||
case schema::PrimitiveType_BatchToSpaceND:
|
||||
return new BatchToSpace(primitive);
|
||||
case schema::PrimitiveType_SpaceToBatch:
|
||||
return new SpaceToBatch(primitive);
|
||||
|
@ -857,6 +858,7 @@ PrimitiveC *PrimitiveC::Create(const schema::Primitive *primitive) {
|
|||
case schema::PrimitiveType_Unsqueeze:
|
||||
return NewPrimitiveC<Unsqueeze>(primitive);
|
||||
case schema::PrimitiveType_BatchToSpace:
|
||||
case schema::PrimitiveType_BatchToSpaceND:
|
||||
return NewPrimitiveC<BatchToSpace>(primitive);
|
||||
case schema::PrimitiveType_SpaceToBatch:
|
||||
return NewPrimitiveC<SpaceToBatch>(primitive);
|
||||
|
|
|
@ -80,7 +80,7 @@ int Stack::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> output
|
|||
auto input_shape = input->shape();
|
||||
|
||||
std::vector<int32_t> output_shape = input_shape;
|
||||
auto axis = GetAxis() < 0 ? GetAxis() + input_shape.size() : GetAxis();
|
||||
auto axis = GetAxis() < 0 ? GetAxis() + input_shape.size() + 1 : GetAxis();
|
||||
if (axis < 0 || axis > input_shape.size()) {
|
||||
MS_LOG(ERROR) << "Invalid axis " << GetAxis();
|
||||
return RET_PARAM_INVALID;
|
||||
|
|
|
@ -1676,6 +1676,7 @@ PopulateParameterRegistry::PopulateParameterRegistry() {
|
|||
populate_parameter_funcs_[schema::PrimitiveType_Pad] = PopulatePadParameter;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_Resize] = PopulateResizeParameter;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_BatchToSpace] = PopulateBatchToSpaceParameter;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_BatchToSpaceND] = PopulateBatchToSpaceParameter;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_SpaceToDepth] = PopulateSpaceToDepthParameter;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_SpaceToBatch] = PopulateSpaceToBatchParameter;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_SpaceToBatchND] = PopulateSpaceToBatchNDParameter;
|
||||
|
|
|
@ -27,9 +27,14 @@ using mindspore::lite::RET_ERROR;
|
|||
using mindspore::lite::RET_FORMAT_ERR;
|
||||
using mindspore::lite::RET_OK;
|
||||
using mindspore::schema::PrimitiveType_BatchToSpace;
|
||||
using mindspore::schema::PrimitiveType_BatchToSpaceND;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
int BatchToSpaceBaseCPUKernel::Init() {
|
||||
if (in_tensors_[0]->GetFormat() != schema::Format::Format_NHWC) {
|
||||
MS_LOG(ERROR) << "batch_to_space only support NHWC now!";
|
||||
return RET_FORMAT_ERR;
|
||||
}
|
||||
BatchToSpaceParameter *param = reinterpret_cast<BatchToSpaceParameter *>(this->op_parameter_);
|
||||
for (int i = 0; i < BATCH_TO_SPACE_CROPS_SIZE; ++i) {
|
||||
if (param->crops_[i] != 0) {
|
||||
|
@ -40,9 +45,10 @@ int BatchToSpaceBaseCPUKernel::Init() {
|
|||
}
|
||||
|
||||
int BatchToSpaceBaseCPUKernel::ReSize() {
|
||||
if (in_tensors_[0]->GetFormat() != schema::Format::Format_NHWC) {
|
||||
MS_LOG(ERROR) << "batch_to_space only support NHWC now!";
|
||||
return RET_FORMAT_ERR;
|
||||
auto shape = in_tensors_[0]->shape();
|
||||
if (shape.size() != 4) {
|
||||
MS_LOG(ERROR) << "Unsupport shape size: " << shape.size();
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
@ -52,7 +58,6 @@ kernel::LiteKernel *CpuBatchToSpaceInt8KernelCreator(const std::vector<lite::Ten
|
|||
OpParameter *op_parameter, const lite::Context *ctx,
|
||||
const kernel::KernelKey &desc,
|
||||
const mindspore::lite::PrimitiveC *primitive) {
|
||||
MS_ASSERT(desc.type == schema::PrimitiveType_BatchToSpace);
|
||||
if (op_parameter == nullptr) {
|
||||
MS_LOG(ERROR) << "Input op_parameter is nullptr!";
|
||||
return nullptr;
|
||||
|
@ -78,7 +83,6 @@ kernel::LiteKernel *CpuBatchToSpaceFp32KernelCreator(const std::vector<lite::Ten
|
|||
OpParameter *op_parameter, const lite::Context *ctx,
|
||||
const kernel::KernelKey &desc,
|
||||
const mindspore::lite::PrimitiveC *primitive) {
|
||||
MS_ASSERT(desc.type == schema::PrimitiveType_BatchToSpace);
|
||||
if (op_parameter == nullptr) {
|
||||
MS_LOG(ERROR) << "Input op_parameter is nullptr!";
|
||||
return nullptr;
|
||||
|
@ -100,5 +104,7 @@ kernel::LiteKernel *CpuBatchToSpaceFp32KernelCreator(const std::vector<lite::Ten
|
|||
}
|
||||
|
||||
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_BatchToSpace, CpuBatchToSpaceInt8KernelCreator)
|
||||
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_BatchToSpaceND, CpuBatchToSpaceInt8KernelCreator)
|
||||
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_BatchToSpace, CpuBatchToSpaceFp32KernelCreator)
|
||||
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_BatchToSpaceND, CpuBatchToSpaceFp32KernelCreator)
|
||||
} // namespace mindspore::kernel
|
||||
|
|
|
@ -29,7 +29,7 @@ namespace mindspore::kernel {
|
|||
int StackCPUKernel::ReSize() {
|
||||
StackParameter *param = reinterpret_cast<StackParameter *>(op_parameter_);
|
||||
auto input0_shape = in_tensors_[0]->shape();
|
||||
axis_ = param->axis_ < 0 ? param->axis_ + input0_shape.size() : param->axis_;
|
||||
axis_ = param->axis_ < 0 ? param->axis_ + input0_shape.size() + 1 : param->axis_;
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "common/common_test.h"
|
||||
#include "mindspore/lite/nnacl/fp32/stack.h"
|
||||
|
||||
namespace mindspore {
|
||||
class StackTestFp32 : public mindspore::CommonTest {
|
||||
public:
|
||||
StackTestFp32() = default;
|
||||
};
|
||||
|
||||
TEST_F(StackTestFp32, StackTest1) {
|
||||
float input0[6] = {1, 2, 3, 10, 20, 30};
|
||||
float input1[6] = {4, 5, 6, 40, 50, 60};
|
||||
float input2[6] = {7, 8, 9, 70, 80, 90};
|
||||
float *input[3];
|
||||
input[0] = input0;
|
||||
input[1] = input1;
|
||||
input[2] = input2;
|
||||
std::vector<int> shape = {2, 3};
|
||||
int axis = 2;
|
||||
constexpr int kOutSize = 18;
|
||||
float expect_out[kOutSize] = {1, 4, 7, 2, 5, 8, 3, 6, 9,
|
||||
10, 40, 70, 20, 50, 80, 30, 60, 90};
|
||||
float output[kOutSize];
|
||||
DoStack(input, 3, shape.data(), shape.size(), axis, output);
|
||||
for (int i = 0; i < kOutSize; ++i) {
|
||||
std::cout << output[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
CompareOutputData(output, expect_out, kOutSize, 0.000001);
|
||||
}
|
||||
|
||||
|
||||
} // namespace mindspore
|
Loading…
Reference in New Issue