forked from mindspore-Ecosystem/mindspore
!5857 [MS][LITE][Develop]support negative,negative grad,log grad
Merge pull request !5857 from chenjianping/lite_dev2
This commit is contained in:
commit
acd896cdea
|
@ -45,8 +45,14 @@ struct Model {
|
|||
/// \return Pointer of MindSpore Lite Model.
|
||||
static Model *Import(const char *model_buf, size_t size);
|
||||
|
||||
/// \brief Free all the temporary buffer
|
||||
/// \brief Free meta graph temporary buffer
|
||||
void Free();
|
||||
|
||||
/// \brief Free all temporay buffer
|
||||
void Destroy();
|
||||
|
||||
/// \brief Model destruct, free all memory
|
||||
~Model();
|
||||
};
|
||||
} // namespace mindspore::lite
|
||||
|
||||
|
|
|
@ -113,3 +113,10 @@ int ElementCeil(float *input, float *output, int number) {
|
|||
}
|
||||
return NNACL_OK;
|
||||
}
|
||||
|
||||
int ElementNegative(float *input, float *output, int element_size) {
|
||||
for (int i = 0; i < element_size; ++i) {
|
||||
output[i] = -input[i];
|
||||
}
|
||||
return NNACL_OK;
|
||||
}
|
||||
|
|
|
@ -47,6 +47,8 @@ int ElementRound(float *input, float *output, int element_size);
|
|||
int ElementFloor(float *input, float *output, int element_size);
|
||||
|
||||
int ElementCeil(float *input, float *output, int number);
|
||||
|
||||
int ElementNegative(float *input, float *output, int element_size);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -199,6 +199,9 @@ union PrimitiveType {
|
|||
Proposal,
|
||||
Custom,
|
||||
BlackBox,
|
||||
NegGrad,
|
||||
LogGrad,
|
||||
BatchToSpaceND,
|
||||
}
|
||||
|
||||
enum QuantType: int {
|
||||
|
|
|
@ -481,6 +481,9 @@ table Abs {
|
|||
table Neg {
|
||||
}
|
||||
|
||||
table NegGrad {
|
||||
}
|
||||
|
||||
table Exp {
|
||||
base : float = -1.0;
|
||||
scale : float = 1.0;
|
||||
|
@ -505,6 +508,9 @@ table Ceil {
|
|||
table Log {
|
||||
}
|
||||
|
||||
table LogGrad {
|
||||
}
|
||||
|
||||
table Tan {
|
||||
}
|
||||
|
||||
|
@ -749,6 +755,11 @@ table BatchToSpace {
|
|||
crops: [int];
|
||||
}
|
||||
|
||||
table BatchToSpaceND {
|
||||
blockShape: [int];
|
||||
crops: [int];
|
||||
}
|
||||
|
||||
table AddN {
|
||||
N: int;
|
||||
}
|
||||
|
|
|
@ -124,12 +124,21 @@ void Model::Free() {
|
|||
free(this->buf);
|
||||
this->buf = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void Model::Destroy() {
|
||||
Free();
|
||||
auto nodes_size = this->nodes_.size();
|
||||
for (size_t i = 0; i < nodes_size; ++i) {
|
||||
auto node = this->nodes_[i];
|
||||
MS_ASSERT(node != nullptr);
|
||||
MS_ASSERT(node->primitive_ != nullptr);
|
||||
delete node->primitive_;
|
||||
node->primitive_ = nullptr;
|
||||
delete node;
|
||||
}
|
||||
this->nodes_.clear();
|
||||
}
|
||||
|
||||
Model::~Model() { Destroy(); }
|
||||
} // namespace mindspore::lite
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
/**
|
||||
* Copyright 2019-2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/ops/log_grad.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
#ifndef PRIMITIVE_WRITEABLE
|
||||
int LogGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
|
||||
MS_ASSERT(primitive != nullptr);
|
||||
MS_ASSERT(fbb != nullptr);
|
||||
auto attr = primitive->value_as_LogGrad();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "value_as_LogGrad return nullptr";
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto val_offset = schema::CreateLogGrad(*fbb);
|
||||
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_LogGrad, val_offset.o);
|
||||
fbb->Finish(prim_offset);
|
||||
return RET_OK;
|
||||
}
|
||||
#endif
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,42 @@
|
|||
/**
|
||||
* Copyright 2019-2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef LITE_MINDSPORE_LITE_C_OPS_LOG_GRAD_H_
|
||||
#define LITE_MINDSPORE_LITE_C_OPS_LOG_GRAD_H_
|
||||
|
||||
#include <vector>
|
||||
#include <set>
|
||||
#include <cmath>
|
||||
#include "ir/dtype/type_id.h"
|
||||
#include "src/ops/primitive_c.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
class LogGrad : public PrimitiveC {
|
||||
public:
|
||||
#ifdef PRIMITIVE_WRITEABLE
|
||||
MS_DECLARE_PARENT(LogGrad, PrimitiveC);
|
||||
LogGrad() = default;
|
||||
explicit LogGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
|
||||
#else
|
||||
LogGrad() = default;
|
||||
|
||||
int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
|
||||
#endif
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
#endif // LITE_MINDSPORE_LITE_C_OPS_LOG_GRAD_H_
|
|
@ -0,0 +1,33 @@
|
|||
/**
|
||||
* Copyright 2019-2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/ops/neg.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
#ifndef PRIMITIVE_WRITEABLE
|
||||
int Neg::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
|
||||
MS_ASSERT(primitive != nullptr);
|
||||
MS_ASSERT(fbb != nullptr);
|
||||
auto val_offset = schema::CreateNeg(*fbb);
|
||||
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Neg, val_offset.o);
|
||||
fbb->Finish(prim_offset);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
#endif
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef LITE_MINDSPORE_LITE_C_OPS_NEG_H_
|
||||
#define LITE_MINDSPORE_LITE_C_OPS_NEG_H_
|
||||
|
||||
#include <vector>
|
||||
#include <set>
|
||||
#include <cmath>
|
||||
#include "ir/dtype/type_id.h"
|
||||
#include "src/ops/arithmetic_self.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
class Neg : public ArithmeticSelf {
|
||||
public:
|
||||
#ifdef PRIMITIVE_WRITEABLE
|
||||
MS_DECLARE_PARENT(Neg, ArithmeticSelf);
|
||||
Neg() = default;
|
||||
explicit Neg(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {}
|
||||
#else
|
||||
Neg() = default;
|
||||
|
||||
int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
|
||||
#endif
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // LITE_MINDSPORE_LITE_C_OPS_NEG_H_
|
|
@ -0,0 +1,33 @@
|
|||
/**
|
||||
* Copyright 2019-2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/ops/neg_grad.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
#ifndef PRIMITIVE_WRITEABLE
|
||||
int NegGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
|
||||
MS_ASSERT(primitive != nullptr);
|
||||
MS_ASSERT(fbb != nullptr);
|
||||
auto val_offset = schema::CreateNegGrad(*fbb);
|
||||
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_NegGrad, val_offset.o);
|
||||
fbb->Finish(prim_offset);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
#endif
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef LITE_MINDSPORE_LITE_C_OPS_NEG_GRAD_H_
|
||||
#define LITE_MINDSPORE_LITE_C_OPS_NEG_GRAD_H_
|
||||
|
||||
#include <vector>
|
||||
#include <set>
|
||||
#include <cmath>
|
||||
#include "ir/dtype/type_id.h"
|
||||
#include "src/ops/arithmetic_self.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
class NegGrad : public ArithmeticSelf {
|
||||
public:
|
||||
#ifdef PRIMITIVE_WRITEABLE
|
||||
MS_DECLARE_PARENT(NegGrad, ArithmeticSelf);
|
||||
NegGrad() = default;
|
||||
explicit NegGrad(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {}
|
||||
#else
|
||||
NegGrad() = default;
|
||||
|
||||
int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
|
||||
#endif
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // LITE_MINDSPORE_LITE_C_OPS_NEG_GRAD_H_
|
|
@ -120,6 +120,7 @@
|
|||
#include "src/ops/quant.h"
|
||||
#include "src/ops/tuple_get_item.h"
|
||||
#include "src/ops/l2_norm.h"
|
||||
#include "src/ops/neg.h"
|
||||
#include "src/ops/sparse_to_dense.h"
|
||||
#include "src/ops/detection_post_process.h"
|
||||
#include "src/ops/dropout.h"
|
||||
|
@ -128,6 +129,7 @@
|
|||
#endif
|
||||
|
||||
#ifdef SUPPORT_TRAIN
|
||||
#include "src/ops/neg_grad.h"
|
||||
#include "src/ops/activation_grad.h"
|
||||
#include "src/ops/apply_momentum.h"
|
||||
#include "src/ops/bias_grad.h"
|
||||
|
@ -141,6 +143,7 @@
|
|||
#include "src/ops/arithmetic_grad.h"
|
||||
#include "src/ops/depend.h"
|
||||
#include "src/ops/flatten_grad.h"
|
||||
#include "src/ops/log_grad.h"
|
||||
#endif
|
||||
|
||||
namespace mindspore {
|
||||
|
@ -383,6 +386,10 @@ std::shared_ptr<PrimitiveC> PrimitiveC::Create(const Primitive &prim, const std:
|
|||
return NewPrimitiveC<BiasGrad>(prim, inputs, quantType);
|
||||
} else if (op_type == "ApplyMomentum") {
|
||||
return NewPrimitiveC<ApplyMomentum>(prim, inputs, quantType);
|
||||
} else if (op_type == "NegGrad") {
|
||||
return NewPrimitiveC<NegGrad>(prim, inputs, quantType);
|
||||
} else if (op_type == "LogGrad") {
|
||||
return NewPrimitiveC<LogGrad>(prim, inputs, quantType);
|
||||
} else if (op_type == "BatchNormGrad") {
|
||||
return NewPrimitiveC<BNGrad>(prim, inputs, quantType);
|
||||
} else if (op_type == "Conv2DGradInput") {
|
||||
|
@ -620,6 +627,8 @@ PrimitiveC *PrimitiveC::Create(mindspore::schema::PrimitiveT *primitive) {
|
|||
return new DetectionPostProcess(primitive);
|
||||
case schema::PrimitiveType_Dropout:
|
||||
return new Dropout(primitive);
|
||||
case schema::PrimitiveType_Neg:
|
||||
return new Neg(primitive);
|
||||
|
||||
#ifdef SUPPORT_TRAIN
|
||||
case schema::PrimitiveType_ActivationGrad:
|
||||
|
@ -654,6 +663,10 @@ PrimitiveC *PrimitiveC::Create(mindspore::schema::PrimitiveT *primitive) {
|
|||
return new Depend(primitive);
|
||||
case schema::PrimitiveType_FlattenGrad:
|
||||
return new FlattenGrad(primitive);
|
||||
case schema::PrimitiveType_NegGrad:
|
||||
return new NegGrad(primitive);
|
||||
case schema::PrimitiveType_LogGrad:
|
||||
return new LogGrad(primitive);
|
||||
#endif
|
||||
|
||||
default:
|
||||
|
@ -755,6 +768,8 @@ PrimitiveC *PrimitiveC::Create(const schema::Primitive *primitive) {
|
|||
return NewPrimitiveC<Cos>(primitive);
|
||||
case schema::PrimitiveType_Log:
|
||||
return NewPrimitiveC<Log>(primitive);
|
||||
case schema::PrimitiveType_Neg:
|
||||
return NewPrimitiveC<Neg>(primitive);
|
||||
case schema::PrimitiveType_Sqrt:
|
||||
return NewPrimitiveC<Sqrt>(primitive);
|
||||
case schema::PrimitiveType_Rsqrt:
|
||||
|
@ -895,6 +910,10 @@ PrimitiveC *PrimitiveC::Create(const schema::Primitive *primitive) {
|
|||
return NewPrimitiveC<ArithmeticGrad>(primitive);
|
||||
case schema::PrimitiveType_DivGrad:
|
||||
return NewPrimitiveC<ArithmeticGrad>(primitive);
|
||||
case schema::PrimitiveType_NegGrad:
|
||||
return NewPrimitiveC<NegGrad>(primitive);
|
||||
case schema::PrimitiveType_LogGrad:
|
||||
return NewPrimitiveC<LogGrad>(primitive);
|
||||
#endif
|
||||
default:
|
||||
MS_LOG(ERROR) << "Unsupported primitive type in Create : " << schema::EnumNamePrimitiveType(op_type);
|
||||
|
|
|
@ -113,6 +113,7 @@
|
|||
#include "src/ops/round.h"
|
||||
#include "src/ops/sparse_to_dense.h"
|
||||
#include "src/ops/l2_norm.h"
|
||||
#include "src/ops/neg.h"
|
||||
#include "src/ops/detection_post_process.h"
|
||||
#include "nnacl/op_base.h"
|
||||
#include "nnacl/fp32/arg_min_max.h"
|
||||
|
@ -1632,6 +1633,9 @@ PopulateParameterRegistry::PopulateParameterRegistry() {
|
|||
populate_parameter_funcs_[schema::PrimitiveType_Sin] = PopulateArithmeticSelf;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_Exp] = PopulateExpParameter;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_Log] = PopulateArithmeticSelf;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_Neg] = PopulateArithmeticSelf;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_NegGrad] = PopulateArithmeticSelf;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_LogGrad] = PopulateArithmeticSelf;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_Square] = PopulateArithmeticSelf;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_Sqrt] = PopulateArithmeticSelf;
|
||||
populate_parameter_funcs_[schema::PrimitiveType_Rsqrt] = PopulateArithmeticSelf;
|
||||
|
|
|
@ -37,7 +37,7 @@ kernel::LiteKernel *CpuLeakyReluInt8KernelCreator(const std::vector<lite::Tensor
|
|||
MS_LOG(ERROR) << "Input opParameter is nullptr!";
|
||||
return nullptr;
|
||||
}
|
||||
MS_ASSERT(desc.type == schema::PrimitiveType_LeakyRelu);
|
||||
|
||||
auto *kernel = new (std::nothrow) LeakyReluInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "new LeakyReluInt8CPUKernel fail!";
|
||||
|
|
|
@ -169,4 +169,5 @@ REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LogicalNot, CpuArithmeticSelf
|
|||
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Floor, CpuArithmeticSelfFp32KernelCreator)
|
||||
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Ceil, CpuArithmeticSelfFp32KernelCreator)
|
||||
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Round, CpuArithmeticSelfFp32KernelCreator)
|
||||
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Neg, CpuArithmeticSelfFp32KernelCreator)
|
||||
} // namespace mindspore::kernel
|
||||
|
|
|
@ -36,6 +36,7 @@ using mindspore::schema::PrimitiveType_Rsqrt;
|
|||
using mindspore::schema::PrimitiveType_Sin;
|
||||
using mindspore::schema::PrimitiveType_Sqrt;
|
||||
using mindspore::schema::PrimitiveType_Square;
|
||||
using mindspore::schema::PrimitiveType_Neg;
|
||||
static constexpr int kPerTensor = 1;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
|
@ -81,6 +82,9 @@ class ArithmeticSelfCPUKernel : public LiteKernel {
|
|||
case PrimitiveType_Round:
|
||||
arithmeticSelf_run_ = ElementRound;
|
||||
break;
|
||||
case PrimitiveType_Neg:
|
||||
arithmeticSelf_run_ = ElementNegative;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.h"
|
||||
#include "schema/model_generated.h"
|
||||
#include "src/kernel_registry.h"
|
||||
#include "include/errorcode.h"
|
||||
#include "src/runtime/runtime_api.h"
|
||||
#include "nnacl/fp32/arithmetic.h"
|
||||
|
||||
using mindspore::kernel::KERNEL_ARCH::kCPU;
|
||||
using mindspore::lite::KernelRegistrar;
|
||||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_OK;
|
||||
using mindspore::schema::PrimitiveType_LogGrad;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
namespace {
|
||||
int ArithmeticSelfGradRun(void *cdata, int thread_id) {
|
||||
MS_ASSERT(cdata != nullptr);
|
||||
auto kernel = reinterpret_cast<ArithmeticSelfGradCPUKernel *>(cdata);
|
||||
return kernel->DoArithmeticSelfGrad(thread_id);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
int ArithmeticSelfGradCPUKernel::Init() {
|
||||
auto type = Type();
|
||||
switch (type) {
|
||||
case PrimitiveType_LogGrad:
|
||||
self_grad_operation_ = ElementDiv;
|
||||
break;
|
||||
default:
|
||||
MS_LOG(ERROR) << "Unsupport type: " << type;
|
||||
return RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ArithmeticSelfGradCPUKernel::DoArithmeticSelfGrad(int thread_id) {
|
||||
auto dy = reinterpret_cast<float *>(in_tensors_[0]->MutableData());
|
||||
auto in_x = reinterpret_cast<float *>(in_tensors_[1]->MutableData());
|
||||
auto dx = reinterpret_cast<float *>(out_tensors_[0]->MutableData());
|
||||
int dy_size = in_tensors_.at(0)->ElementsNum();
|
||||
int size = MSMIN(thread_stride_, static_cast<int>(dy_size - thread_id * thread_stride_));
|
||||
if (size <= 0) {
|
||||
return RET_OK;
|
||||
}
|
||||
int offset = thread_id * thread_stride_;
|
||||
(*self_grad_operation_)(dy + offset, in_x + offset, dx + offset, size);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int ArithmeticSelfGradCPUKernel::ReSize() { return RET_OK; }
|
||||
|
||||
int ArithmeticSelfGradCPUKernel::Run() {
|
||||
int dy_size = in_tensors_.at(0)->ElementsNum();
|
||||
op_parameter_->thread_num_ = MSMIN(op_parameter_->thread_num_, static_cast<int>(dy_size));
|
||||
thread_stride_ = UP_DIV(dy_size, op_parameter_->thread_num_);
|
||||
auto ret = ParallelLaunch(THREAD_POOL_DEFAULT, ArithmeticSelfGradRun, this, op_parameter_->thread_num_);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "parallel launch fail!ret: " << ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
kernel::LiteKernel *CpuArithmeticSelfGradFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs,
|
||||
OpParameter *param, const lite::Context *ctx,
|
||||
const kernel::KernelKey &desc,
|
||||
const mindspore::lite::PrimitiveC *primitive) {
|
||||
if (param == nullptr) {
|
||||
MS_LOG(ERROR) << "input parameter is nullptr!";
|
||||
return nullptr;
|
||||
}
|
||||
auto *kernel = new (std::nothrow) ArithmeticSelfGradCPUKernel(param, inputs, outputs, ctx, primitive);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "new ArithmeticSelfGradCPUKernel fail!";
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto ret = kernel->Init();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Init kernel failed, name: " << param->name_ << ", type: "
|
||||
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(param->type_));
|
||||
delete kernel;
|
||||
return nullptr;
|
||||
}
|
||||
return kernel;
|
||||
}
|
||||
|
||||
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LogGrad, CpuArithmeticSelfGradFp32KernelCreator)
|
||||
} // namespace mindspore::kernel
|
|
@ -0,0 +1,46 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_ARITHMETIC_SELF_GRAD_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_ARITHMETIC_SELF_GRAD_H_
|
||||
|
||||
#include <vector>
|
||||
#include "src/lite_kernel.h"
|
||||
#include "schema/model_generated.h"
|
||||
#include "ir/anf.h"
|
||||
|
||||
namespace mindspore::kernel {
|
||||
|
||||
class ArithmeticSelfGradCPUKernel : public LiteKernel {
|
||||
typedef int (*ArithmeticSelfGradOperation)(float *, float *, float *, int);
|
||||
public:
|
||||
ArithmeticSelfGradCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, const lite::Context *ctx,
|
||||
const mindspore::lite::PrimitiveC *primitive)
|
||||
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {}
|
||||
~ArithmeticSelfGradCPUKernel() override {}
|
||||
int Init() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int DoArithmeticSelfGrad(int thread_id);
|
||||
|
||||
private:
|
||||
int thread_stride_;
|
||||
ArithmeticSelfGradOperation self_grad_operation_;
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_ARITHMETIC_SELF_GRAD_H_
|
|
@ -0,0 +1,95 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "src/runtime/kernel/arm/fp32_grad/neg_grad.h"
|
||||
#include "schema/model_generated.h"
|
||||
#include "src/kernel_registry.h"
|
||||
#include "include/errorcode.h"
|
||||
#include "src/runtime/runtime_api.h"
|
||||
#include "nnacl/fp32/arithmetic_self.h"
|
||||
|
||||
using mindspore::kernel::KERNEL_ARCH::kCPU;
|
||||
using mindspore::lite::KernelRegistrar;
|
||||
using mindspore::lite::RET_ERROR;
|
||||
using mindspore::lite::RET_OK;
|
||||
using mindspore::schema::PrimitiveType_NegGrad;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
namespace {
|
||||
int NegGradRun(void *cdata, int thread_id) {
|
||||
MS_ASSERT(cdata != nullptr);
|
||||
auto kernel = reinterpret_cast<NegGradCPUKernel *>(cdata);
|
||||
return kernel->DoNegGrad(thread_id);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
int NegGradCPUKernel::Init() { return RET_OK; }
|
||||
|
||||
int NegGradCPUKernel::DoNegGrad(int thread_id) {
|
||||
auto dy = reinterpret_cast<float *>(in_tensors_[0]->MutableData());
|
||||
auto dx = reinterpret_cast<float *>(out_tensors_[0]->MutableData());
|
||||
int dy_size = in_tensors_.at(0)->ElementsNum();
|
||||
int size = MSMIN(thread_stride_, static_cast<int>(dy_size - thread_id * thread_stride_));
|
||||
if (size <= 0) {
|
||||
return RET_OK;
|
||||
}
|
||||
int offset = thread_id * thread_stride_;
|
||||
ElementNegative(dy + offset, dx + offset, size);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int NegGradCPUKernel::ReSize() { return RET_OK; }
|
||||
|
||||
int NegGradCPUKernel::Run() {
|
||||
int dy_size = in_tensors_.at(0)->ElementsNum();
|
||||
op_parameter_->thread_num_ = MSMIN(op_parameter_->thread_num_, static_cast<int>(dy_size));
|
||||
thread_stride_ = UP_DIV(dy_size, op_parameter_->thread_num_);
|
||||
auto ret = ParallelLaunch(THREAD_POOL_DEFAULT, NegGradRun, this, op_parameter_->thread_num_);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "parallel launch fail!ret: " << ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
kernel::LiteKernel *CpuNegGradFp32KernelCreator(const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs,
|
||||
OpParameter *param, const lite::Context *ctx,
|
||||
const kernel::KernelKey &desc,
|
||||
const mindspore::lite::PrimitiveC *primitive) {
|
||||
if (param == nullptr) {
|
||||
MS_LOG(ERROR) << "input parameter is nullptr!";
|
||||
return nullptr;
|
||||
}
|
||||
auto *kernel = new (std::nothrow) NegGradCPUKernel(param, inputs, outputs, ctx, primitive);
|
||||
if (kernel == nullptr) {
|
||||
MS_LOG(ERROR) << "new NegGradCPUKernel fail!";
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto ret = kernel->Init();
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Init kernel failed, name: " << param->name_ << ", type: "
|
||||
<< schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(param->type_));
|
||||
delete kernel;
|
||||
return nullptr;
|
||||
}
|
||||
return kernel;
|
||||
}
|
||||
|
||||
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_NegGrad, CpuNegGradFp32KernelCreator)
|
||||
} // namespace mindspore::kernel
|
|
@ -0,0 +1,44 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_NEG_GRAD_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_NEG_GRAD_H_
|
||||
|
||||
#include <vector>
|
||||
#include "src/lite_kernel.h"
|
||||
#include "schema/model_generated.h"
|
||||
#include "ir/anf.h"
|
||||
|
||||
namespace mindspore::kernel {
|
||||
|
||||
class NegGradCPUKernel : public LiteKernel {
|
||||
public:
|
||||
explicit NegGradCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, const lite::Context *ctx,
|
||||
const mindspore::lite::PrimitiveC *primitive)
|
||||
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {}
|
||||
~NegGradCPUKernel() override {}
|
||||
int Init() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int DoNegGrad(int thread_id);
|
||||
|
||||
private:
|
||||
int thread_stride_;
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_NEG_GRAD_H_
|
Loading…
Reference in New Issue