forked from mindspore-Ecosystem/mindspore
delete mean
This commit is contained in:
parent
ec3983b77d
commit
2083dd686d
|
@ -72,7 +72,7 @@ union PrimitiveType {
|
|||
Resize,
|
||||
DetectionPostProcess,
|
||||
FullConnection,
|
||||
Mean,
|
||||
Mean, // DEPRECATED
|
||||
DeConv2D,
|
||||
Scale,
|
||||
Reshape,
|
||||
|
|
|
@ -457,7 +457,7 @@ table FullConnection {
|
|||
}
|
||||
|
||||
// Mean(input_tensor, axis, keep_dims)
|
||||
table Mean {
|
||||
table Mean { // DEPRECATED
|
||||
axis: [int];
|
||||
keepDims: bool = false;
|
||||
}
|
||||
|
|
|
@ -1,123 +0,0 @@
|
|||
/**
|
||||
* Copyright 2019-2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/ops/mean.h"
|
||||
|
||||
#ifndef PRIMITIVE_WRITEABLE
|
||||
#include "src/ops/ops_register.h"
|
||||
#endif
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
#ifdef PRIMITIVE_WRITEABLE
|
||||
std::vector<int> Mean::GetAxis() const { return this->primitive_->value.AsMean()->axis; }
|
||||
bool Mean::GetKeepDims() const { return this->primitive_->value.AsMean()->keepDims; }
|
||||
|
||||
void Mean::SetAxis(const std::vector<int> &axis) { this->primitive_->value.AsMean()->axis = axis; }
|
||||
void Mean::SetKeepDims(bool keep_dims) { this->primitive_->value.AsMean()->keepDims = keep_dims; }
|
||||
|
||||
#else
|
||||
|
||||
std::vector<int> Mean::GetAxis() const {
|
||||
auto fb_vector = this->primitive_->value_as_Mean()->axis();
|
||||
return std::vector<int>(fb_vector->begin(), fb_vector->end());
|
||||
}
|
||||
bool Mean::GetKeepDims() const { return this->primitive_->value_as_Mean()->keepDims(); }
|
||||
|
||||
int Mean::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
|
||||
MS_ASSERT(nullptr != primitive);
|
||||
MS_ASSERT(nullptr != fbb);
|
||||
auto attr = primitive->value_as_Mean();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "value_as_Mean return nullptr";
|
||||
return RET_ERROR;
|
||||
}
|
||||
std::vector<int32_t> axis;
|
||||
if (attr->axis() != nullptr) {
|
||||
for (int i = 0; i < static_cast<int>(attr->axis()->size()); i++) {
|
||||
axis.push_back(attr->axis()->data()[i]);
|
||||
}
|
||||
}
|
||||
auto val_offset = schema::CreateMeanDirect(*fbb, &axis, attr->keepDims());
|
||||
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Mean, val_offset.o);
|
||||
fbb->Finish(prim_offset);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
PrimitiveC *MeanCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Mean>(primitive); }
|
||||
Registry MeanRegistry(schema::PrimitiveType_Mean, MeanCreator);
|
||||
#endif
|
||||
|
||||
namespace {
|
||||
constexpr size_t kInputSize = 1;
|
||||
constexpr size_t kOutputSize = 1;
|
||||
} // namespace
|
||||
int Mean::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) {
|
||||
if (inputs_.size() != kInputSize || outputs_.size() != kOutputSize) {
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto input = inputs_.front();
|
||||
auto output = outputs_.front();
|
||||
if (input == nullptr || output == nullptr) {
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
output->set_data_type(input->data_type());
|
||||
output->set_format(input->format());
|
||||
if (!infer_flag()) {
|
||||
return RET_INFER_INVALID;
|
||||
}
|
||||
if (this->primitive_ == nullptr) {
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
bool keep_dims = static_cast<bool>(GetKeepDims());
|
||||
std::vector<int> in_shape = input->shape();
|
||||
std::vector<int> out_shape;
|
||||
const auto &axes = GetAxis();
|
||||
auto num_axes = axes.size();
|
||||
// reduce on all axes
|
||||
if (num_axes == 0) {
|
||||
if (keep_dims) {
|
||||
for (size_t i = 0; i < in_shape.size(); i++) {
|
||||
out_shape.push_back(1);
|
||||
}
|
||||
}
|
||||
output->set_shape(out_shape);
|
||||
output->set_data_type(input->data_type());
|
||||
return RET_OK;
|
||||
}
|
||||
// reduce on selected axes
|
||||
for (size_t i = 0; i < in_shape.size(); i++) {
|
||||
bool reduce_axis = false;
|
||||
for (size_t idx = 0; idx < num_axes; ++idx) {
|
||||
if (static_cast<size_t>(axes.at(idx)) == i) {
|
||||
reduce_axis = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (reduce_axis) {
|
||||
if (keep_dims) {
|
||||
out_shape.push_back(1);
|
||||
}
|
||||
} else {
|
||||
out_shape.push_back(in_shape.at(i));
|
||||
}
|
||||
}
|
||||
output->set_shape(out_shape);
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
|
@ -1,48 +0,0 @@
|
|||
/**
|
||||
* Copyright 2019-2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef LITE_MINDSPORE_LITE_C_OPS_MEAN_H_
|
||||
#define LITE_MINDSPORE_LITE_C_OPS_MEAN_H_
|
||||
|
||||
#include <vector>
|
||||
#include <set>
|
||||
#include <cmath>
|
||||
#include <memory>
|
||||
|
||||
#include "src/ops/primitive_c.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
class Mean : public PrimitiveC {
|
||||
public:
|
||||
Mean() = default;
|
||||
~Mean() = default;
|
||||
#ifdef PRIMITIVE_WRITEABLE
|
||||
MS_DECLARE_PARENT(Mean, PrimitiveC);
|
||||
explicit Mean(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
|
||||
void SetAxis(const std::vector<int> &axis);
|
||||
void SetKeepDims(bool keep_dims);
|
||||
#else
|
||||
int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
|
||||
#endif
|
||||
int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override;
|
||||
std::vector<int> GetAxis() const;
|
||||
bool GetKeepDims() const;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // LITE_MINDSPORE_LITE_C_OPS_MEAN_H_
|
|
@ -1,52 +0,0 @@
|
|||
/**
|
||||
* Copyright 2019-2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/ops/mean.h"
|
||||
#include "src/ops/primitive_c.h"
|
||||
#include "src/ops/populate/populate_register.h"
|
||||
#include "nnacl/reduce_parameter.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
|
||||
OpParameter *PopulateMeanParameter(const mindspore::lite::PrimitiveC *primitive) {
|
||||
ReduceParameter *mean_param = reinterpret_cast<ReduceParameter *>(malloc(sizeof(ReduceParameter)));
|
||||
if (mean_param == nullptr) {
|
||||
MS_LOG(ERROR) << "malloc ReduceParameter failed.";
|
||||
return nullptr;
|
||||
}
|
||||
memset(mean_param, 0, sizeof(ReduceParameter));
|
||||
mean_param->op_parameter_.type_ = primitive->Type();
|
||||
auto mean = reinterpret_cast<mindspore::lite::Mean *>(const_cast<mindspore::lite::PrimitiveC *>(primitive));
|
||||
mean_param->keep_dims_ = mean->GetKeepDims();
|
||||
auto axisVector = mean->GetAxis();
|
||||
if (axisVector.size() > REDUCE_MAX_AXES_NUM) {
|
||||
MS_LOG(ERROR) << "Reduce axes size " << axisVector.size() << " exceed limit " << REDUCE_MAX_AXES_NUM;
|
||||
free(mean_param);
|
||||
return nullptr;
|
||||
}
|
||||
mean_param->num_axes_ = static_cast<int>(axisVector.size());
|
||||
int i = 0;
|
||||
for (auto iter = axisVector.begin(); iter != axisVector.end(); iter++) {
|
||||
mean_param->axes_[i++] = *iter;
|
||||
}
|
||||
mean_param->mode_ = static_cast<int>(schema::ReduceMode_ReduceMean);
|
||||
return reinterpret_cast<OpParameter *>(mean_param);
|
||||
}
|
||||
Registry MeanParameterRegistry(schema::PrimitiveType_Mean, PopulateMeanParameter);
|
||||
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
|
@ -67,7 +67,6 @@
|
|||
#include "src/ops/slice.h"
|
||||
#include "src/ops/squeeze.h"
|
||||
#include "src/ops/flatten.h"
|
||||
#include "src/ops/mean.h"
|
||||
#include "src/ops/nhwc2nchw.h"
|
||||
#include "src/ops/stack.h"
|
||||
#include "src/ops/crop.h"
|
||||
|
@ -710,8 +709,6 @@ PrimitiveC *PrimitiveC::Create(mindspore::schema::PrimitiveT *primitive) {
|
|||
return new (std::nothrow) Squeeze(primitive);
|
||||
case schema::PrimitiveType_Flatten:
|
||||
return new (std::nothrow) Flatten(primitive);
|
||||
case schema::PrimitiveType_Mean:
|
||||
return new (std::nothrow) Mean(primitive);
|
||||
case schema::PrimitiveType_Stack:
|
||||
return new (std::nothrow) Stack(primitive);
|
||||
case schema::PrimitiveType_Crop:
|
||||
|
|
|
@ -26,7 +26,6 @@ using mindspore::lite::KernelRegistrar;
|
|||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_NULL_PTR;
|
||||
using mindspore::lite::RET_OK;
|
||||
using mindspore::schema::PrimitiveType_Mean;
|
||||
using mindspore::schema::PrimitiveType_Reduce;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
|
@ -206,17 +205,6 @@ kernel::LiteKernel *CpuMeanFp32KernelCreator(const std::vector<lite::Tensor *> &
|
|||
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
|
||||
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
|
||||
const mindspore::lite::PrimitiveC *primitive) {
|
||||
MS_ASSERT(opParameter != nullptr);
|
||||
MS_ASSERT(desc.type == schema::PrimitiveType_Mean);
|
||||
if (opParameter == nullptr) {
|
||||
MS_LOG(ERROR) << "Reduce opParameter nullptr";
|
||||
return nullptr;
|
||||
}
|
||||
if (desc.type != schema::PrimitiveType_Mean) {
|
||||
MS_LOG(ERROR) << "Reduce op desc.type should be PrimitiveType_Mean, got " << desc.type;
|
||||
free(opParameter);
|
||||
return nullptr;
|
||||
}
|
||||
auto *kernel = new (std::nothrow) ReduceCPUKernel(opParameter, inputs, outputs, ctx, primitive);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "Reduce new ReduceCPUKernel failed.";
|
||||
|
@ -236,6 +224,4 @@ kernel::LiteKernel *CpuMeanFp32KernelCreator(const std::vector<lite::Tensor *> &
|
|||
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Reduce, CpuReduceFp32KernelCreator)
|
||||
REG_KERNEL(kCPU, kNumberTypeInt, PrimitiveType_Reduce, CpuReduceFp32KernelCreator)
|
||||
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Reduce, CpuReduceFp32KernelCreator)
|
||||
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Mean, CpuMeanFp32KernelCreator)
|
||||
|
||||
} // namespace mindspore::kernel
|
||||
|
|
|
@ -28,7 +28,6 @@ using mindspore::lite::KernelRegistrar;
|
|||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_NULL_PTR;
|
||||
using mindspore::lite::RET_OK;
|
||||
using mindspore::schema::PrimitiveType_Mean;
|
||||
using mindspore::schema::PrimitiveType_Reduce;
|
||||
using mindspore::schema::ReduceMode;
|
||||
using mindspore::schema::ReduceMode_ReduceMax;
|
||||
|
@ -191,17 +190,6 @@ kernel::LiteKernel *CpuMeanFp16KernelCreator(const std::vector<lite::Tensor *> &
|
|||
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
|
||||
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
|
||||
const mindspore::lite::PrimitiveC *primitive) {
|
||||
MS_ASSERT(opParameter != nullptr);
|
||||
MS_ASSERT(desc.type == schema::PrimitiveType_Mean);
|
||||
if (opParameter == nullptr) {
|
||||
MS_LOG(ERROR) << "Reduce opParameter nullptr";
|
||||
return nullptr;
|
||||
}
|
||||
if (desc.type != schema::PrimitiveType_Mean) {
|
||||
MS_LOG(ERROR) << "Reduce op desc.type should be PrimitiveType_Mean, got " << desc.type;
|
||||
free(opParameter);
|
||||
return nullptr;
|
||||
}
|
||||
auto *kernel = new (std::nothrow) ReduceFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "Reduce new ReduceCPUKernel failed.";
|
||||
|
@ -219,5 +207,4 @@ kernel::LiteKernel *CpuMeanFp16KernelCreator(const std::vector<lite::Tensor *> &
|
|||
}
|
||||
|
||||
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Reduce, CpuReduceFp16KernelCreator)
|
||||
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Mean, CpuMeanFp16KernelCreator)
|
||||
} // namespace mindspore::kernel
|
||||
|
|
|
@ -28,7 +28,6 @@ using mindspore::lite::KernelRegistrar;
|
|||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_NULL_PTR;
|
||||
using mindspore::lite::RET_OK;
|
||||
using mindspore::schema::PrimitiveType_Mean;
|
||||
using mindspore::schema::PrimitiveType_Reduce;
|
||||
using mindspore::schema::ReduceMode;
|
||||
using mindspore::schema::ReduceMode_ReduceAll;
|
||||
|
|
|
@ -35,7 +35,6 @@ using mindspore::schema::ReduceMode_ReduceSum;
|
|||
using mindspore::schema::ReduceMode_ReduceSumSquare;
|
||||
|
||||
using mindspore::kernel::KERNEL_ARCH::kCPU;
|
||||
using mindspore::schema::PrimitiveType_Mean;
|
||||
using mindspore::schema::PrimitiveType_Reduce;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
|
@ -536,17 +535,6 @@ kernel::LiteKernel *CpuReduceInt8KernelCreator(const std::vector<lite::Tensor *>
|
|||
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
|
||||
const lite::InnerContext *ctx, const kernel::KernelKey &desc,
|
||||
const mindspore::lite::PrimitiveC *primitive) {
|
||||
MS_ASSERT(opParameter != nullptr);
|
||||
MS_ASSERT(desc.type == schema::PrimitiveType_Reduce);
|
||||
if (opParameter == nullptr) {
|
||||
MS_LOG(ERROR) << "Reduce opParameter nullptr";
|
||||
return nullptr;
|
||||
}
|
||||
if (desc.type != schema::PrimitiveType_Reduce) {
|
||||
MS_LOG(ERROR) << "Reduce op desc.type should be PrimitiveType_Reduce, got " << desc.type;
|
||||
free(opParameter);
|
||||
return nullptr;
|
||||
}
|
||||
auto *kernel = new (std::nothrow) ReduceInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "Reduce new ReduceCPUKernel failed.";
|
||||
|
@ -563,6 +551,4 @@ kernel::LiteKernel *CpuReduceInt8KernelCreator(const std::vector<lite::Tensor *>
|
|||
return kernel;
|
||||
}
|
||||
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Reduce, CpuReduceInt8KernelCreator)
|
||||
REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Mean, CpuReduceInt8KernelCreator)
|
||||
|
||||
} // namespace mindspore::kernel
|
||||
|
|
|
@ -28,7 +28,6 @@ using mindspore::lite::RET_ERROR;
|
|||
using mindspore::lite::RET_NULL_PTR;
|
||||
using mindspore::lite::RET_OK;
|
||||
using mindspore::lite::RET_PARAM_INVALID;
|
||||
using mindspore::schema::PrimitiveType_Mean;
|
||||
using mindspore::schema::PrimitiveType_Reduce;
|
||||
using mindspore::schema::ReduceMode;
|
||||
using mindspore::schema::ReduceMode_ReduceMax;
|
||||
|
@ -183,8 +182,6 @@ int ReduceOpenCLKernel::Run() {
|
|||
return mindspore::lite::RET_OK;
|
||||
}
|
||||
|
||||
REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Mean, OpenCLKernelCreator<ReduceOpenCLKernel>)
|
||||
REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Mean, OpenCLKernelCreator<ReduceOpenCLKernel>)
|
||||
REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Reduce, OpenCLKernelCreator<ReduceOpenCLKernel>)
|
||||
REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Reduce, OpenCLKernelCreator<ReduceOpenCLKernel>)
|
||||
} // namespace mindspore::kernel
|
||||
|
|
|
@ -103,7 +103,6 @@ static const std::vector<schema::PrimitiveType> int8OpList = {schema::PrimitiveT
|
|||
schema::PrimitiveType_BatchToSpace,
|
||||
schema::PrimitiveType_BatchToSpaceND,
|
||||
schema::PrimitiveType_Reduce,
|
||||
schema::PrimitiveType_Mean,
|
||||
schema::PrimitiveType_Round,
|
||||
schema::PrimitiveType_Floor,
|
||||
schema::PrimitiveType_Ceil,
|
||||
|
|
|
@ -520,7 +520,6 @@ QuantParamCalcRegister::QuantParamCalcRegister() {
|
|||
_registerMap[schema::PrimitiveType_RealDiv] = std::make_shared<CalcRealDiv>();
|
||||
_registerMap[schema::PrimitiveType_Reduce] = commonCalcer;
|
||||
_registerMap[schema::PrimitiveType_BiasAdd] = std::make_shared<BiasAddCalcer>();
|
||||
_registerMap[schema::PrimitiveType_Mean] = linearCalcer;
|
||||
_registerMap[schema::PrimitiveType_Transpose] = linearCalcer;
|
||||
_registerMap[schema::PrimitiveType_MatMul] = std::make_shared<ConvCalcer>();
|
||||
_registerMap[schema::PrimitiveType_FullConnection] = std::make_shared<ConvCalcer>();
|
||||
|
|
Loading…
Reference in New Issue