!18164 [MS][LITE] add lite ragged_range kernel
Merge pull request !18164 from sunsuodong/ragged_range
This commit is contained in:
commit
caad7a654c
|
@ -0,0 +1,32 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "nnacl/fp16/ragged_range_fp16.h"
|
||||
|
||||
void RaggedRangeFp16(const float16_t *starts, const float16_t *limits, const float16_t *deltas, int *splits,
|
||||
float16_t *value, RaggedRangeParameter *param) {
|
||||
splits[0] = 0;
|
||||
for (int i = 0; i < param->rows; i++) {
|
||||
float16_t start = param->starts_is_scalar ? starts[0] : starts[i];
|
||||
float16_t limit = param->limits_is_scalar ? limits[0] : limits[i];
|
||||
float16_t delta = param->deltas_is_scalar ? deltas[0] : deltas[i];
|
||||
int len = MSMAX((int)ceil((float16_t)(limit - start) / delta), 0);
|
||||
splits[i + 1] = splits[i] + len;
|
||||
for (int j = 0; j < len; j++) {
|
||||
*value++ = start;
|
||||
start += delta;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_NNACL_RAGGED_RANGE_FP16_H_
|
||||
#define MINDSPORE_NNACL_RAGGED_RANGE_FP16_H_
|
||||
|
||||
#include <math.h>
|
||||
#include "nnacl/op_base.h"
|
||||
#include "nnacl/ragged_range_parameter.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void RaggedRangeFp16(const float16_t *starts, const float16_t *limits, const float16_t *deltas, int *splits,
|
||||
float16_t *value, RaggedRangeParameter *param);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // MINDSPORE_NNACL_RAGGED_RANGE_FP16_H_
|
|
@ -0,0 +1,48 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "nnacl/fp32/ragged_range_fp32.h"
|
||||
|
||||
void RaggedRangeFp32(const float *starts, const float *limits, const float *deltas, int *splits, float *value,
|
||||
RaggedRangeParameter *param) {
|
||||
splits[0] = 0;
|
||||
for (int i = 0; i < param->rows; i++) {
|
||||
float start = param->starts_is_scalar ? starts[0] : starts[i];
|
||||
float limit = param->limits_is_scalar ? limits[0] : limits[i];
|
||||
float delta = param->deltas_is_scalar ? deltas[0] : deltas[i];
|
||||
int len = MSMAX((int)ceil((float)(limit - start) / delta), 0);
|
||||
splits[i + 1] = splits[i] + len;
|
||||
for (int j = 0; j < len; j++) {
|
||||
*value++ = start;
|
||||
start += delta;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void RaggedRangeInt(const int *starts, const int *limits, const int *deltas, int *splits, int *value,
|
||||
RaggedRangeParameter *param) {
|
||||
splits[0] = 0;
|
||||
for (int i = 0; i < param->rows; i++) {
|
||||
int start = param->starts_is_scalar ? starts[0] : starts[i];
|
||||
int limit = param->limits_is_scalar ? limits[0] : limits[i];
|
||||
int delta = param->deltas_is_scalar ? deltas[0] : deltas[i];
|
||||
int len = MSMAX((int)ceil((float)(limit - start) / delta), 0);
|
||||
splits[i + 1] = splits[i] + len;
|
||||
for (int j = 0; j < len; j++) {
|
||||
*value++ = start;
|
||||
start += delta;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_NNACL_RAGGED_RANGE_FP32_H_
|
||||
#define MINDSPORE_NNACL_RAGGED_RANGE_FP32_H_
|
||||
|
||||
#include <math.h>
|
||||
#include "nnacl/ragged_range_parameter.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void RaggedRangeFp32(const float *starts, const float *limits, const float *deltas, int *splits, float *value,
|
||||
RaggedRangeParameter *param);
|
||||
void RaggedRangeInt(const int *starts, const int *limits, const int *deltas, int *splits, int *value,
|
||||
RaggedRangeParameter *param);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // MINDSPORE_NNACL_RAGGED_RANGE_FP32_H_
|
|
@ -219,8 +219,9 @@ enum PrimType {
|
|||
PrimType_CumSum = 192,
|
||||
PrimType_SplitWithOverlap = 193,
|
||||
PrimType_GenOP = 194,
|
||||
PrimType_RaggedRange = 195,
|
||||
PrimType_MIN = PrimType_NONE,
|
||||
PrimType_MAX = PrimType_GenOP + 1
|
||||
PrimType_MAX = PrimType_RaggedRange + 1
|
||||
};
|
||||
|
||||
void RegInfer(int prim_type, InferShape func);
|
||||
|
|
|
@ -0,0 +1,130 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "nnacl/infer/ragged_range_infer.h"
|
||||
#include <math.h>
|
||||
#include "nnacl/infer/infer_register.h"
|
||||
#include "nnacl/ragged_range_parameter.h"
|
||||
|
||||
int CheckInputTensor(const TensorC *const *inputs) {
|
||||
if (inputs[0]->data_ == NULL || inputs[1]->data_ == NULL || inputs[2]->data_ == NULL) {
|
||||
return NNACL_INFER_INVALID;
|
||||
}
|
||||
if (inputs[0]->shape_size_ != 0 && inputs[0]->shape_size_ != 1) {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
return NNACL_OK;
|
||||
}
|
||||
|
||||
int GetRows(const TensorC *const *inputs, bool starts_is_scalar, bool limits_is_scalar, bool deltas_is_scalar,
|
||||
int *rows) {
|
||||
int sizes[3];
|
||||
int not_scalar_count = 0;
|
||||
if (!starts_is_scalar) {
|
||||
sizes[not_scalar_count++] = inputs[0]->shape_[0];
|
||||
}
|
||||
if (!limits_is_scalar) {
|
||||
sizes[not_scalar_count++] = inputs[1]->shape_[0];
|
||||
}
|
||||
if (!deltas_is_scalar) {
|
||||
sizes[not_scalar_count++] = inputs[2]->shape_[0];
|
||||
}
|
||||
for (int i = 1; i < not_scalar_count; i++) {
|
||||
if (sizes[i] != sizes[i - 1]) {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
}
|
||||
*rows = not_scalar_count == 0 ? 1 : sizes[0];
|
||||
return NNACL_OK;
|
||||
}
|
||||
|
||||
int GetOutputValueElementNum(const TensorC *const *inputs, RaggedRangeParameter *param, int *output_value_element_num) {
|
||||
int count = 0;
|
||||
switch (inputs[0]->data_type_) {
|
||||
case kNumberTypeInt32: {
|
||||
int *starts = (int *)(inputs[0]->data_);
|
||||
int *limits = (int *)(inputs[1]->data_);
|
||||
int *deltas = (int *)(inputs[2]->data_);
|
||||
for (int i = 0; i < param->rows; i++) {
|
||||
int start = param->starts_is_scalar ? starts[0] : starts[i];
|
||||
int limit = param->limits_is_scalar ? limits[0] : limits[i];
|
||||
int delta = param->deltas_is_scalar ? deltas[0] : deltas[i];
|
||||
count += MSMAX((int)(ceil((float)(limit - start) / delta)), 0);
|
||||
}
|
||||
} break;
|
||||
case kNumberTypeFloat32: {
|
||||
float *starts = (float *)(inputs[0]->data_);
|
||||
float *limits = (float *)(inputs[1]->data_);
|
||||
float *deltas = (float *)(inputs[2]->data_);
|
||||
for (int i = 0; i < param->rows; i++) {
|
||||
int start = param->starts_is_scalar ? starts[0] : starts[i];
|
||||
int limit = param->limits_is_scalar ? limits[0] : limits[i];
|
||||
int delta = param->deltas_is_scalar ? deltas[0] : deltas[i];
|
||||
count += MSMAX((int)(ceil((float)(limit - start) / delta)), 0);
|
||||
}
|
||||
} break;
|
||||
default: {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
}
|
||||
*output_value_element_num = count;
|
||||
return NNACL_OK;
|
||||
}
|
||||
|
||||
int RaggedRangeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
|
||||
OpParameter *parameter) {
|
||||
#ifdef Debug
|
||||
int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter);
|
||||
if (check_ret != NNACL_OK) {
|
||||
return check_ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (inputs_size != 3 || outputs_size != 2) {
|
||||
return NNACL_INPUT_TENSOR_ERROR;
|
||||
}
|
||||
|
||||
outputs[0]->data_type_ = kNumberTypeInt32;
|
||||
outputs[0]->format_ = inputs[0]->format_;
|
||||
SetDataTypeFormat(outputs[1], inputs[0]);
|
||||
|
||||
if (!InferFlag(inputs, inputs_size)) {
|
||||
return NNACL_INFER_INVALID;
|
||||
}
|
||||
int ret = CheckInputTensor(inputs);
|
||||
if (ret != NNACL_OK) {
|
||||
return ret;
|
||||
}
|
||||
RaggedRangeParameter *param = (RaggedRangeParameter *)parameter;
|
||||
param->starts_is_scalar = inputs[0]->shape_size_ == 0;
|
||||
param->limits_is_scalar = inputs[1]->shape_size_ == 0;
|
||||
param->deltas_is_scalar = inputs[2]->shape_size_ == 0;
|
||||
ret = GetRows(inputs, param->starts_is_scalar, param->limits_is_scalar, param->deltas_is_scalar, ¶m->rows);
|
||||
if (ret != NNACL_OK) {
|
||||
return ret;
|
||||
}
|
||||
int output_value_element_num;
|
||||
ret = GetOutputValueElementNum(inputs, param, &output_value_element_num);
|
||||
if (ret != NNACL_OK) {
|
||||
return ret;
|
||||
}
|
||||
outputs[0]->shape_size_ = 1;
|
||||
outputs[0]->shape_[0] = param->rows + 1;
|
||||
outputs[1]->shape_size_ = 1;
|
||||
outputs[1]->shape_[0] = output_value_element_num;
|
||||
return NNACL_OK;
|
||||
}
|
||||
|
||||
REG_INFER(RaggedRange, PrimType_RaggedRange, RaggedRangeInferShape)
|
|
@ -0,0 +1,32 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_NNACL_RAGGED_RANGE_INFER_H
|
||||
#define MINDSPORE_NNACL_RAGGED_RANGE_INFER_H
|
||||
|
||||
#include "nnacl/infer/common_infer.h"
|
||||
#include "nnacl/fp32/ragged_range_fp32.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
int RaggedRangeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
|
||||
OpParameter *parameter);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // MINDSPORE_NNACL_RAGGED_RANGE_INFER_H
|
|
@ -0,0 +1,32 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_NNACL_RAGGED_RANGE_PARAMETER_H_
|
||||
#define MINDSPORE_NNACL_RAGGED_RANGE_PARAMETER_H_
|
||||
|
||||
#include "nnacl/op_base.h"
|
||||
|
||||
typedef struct RaggedRangeParameter {
|
||||
// Primitive parameter
|
||||
OpParameter op_parameter_;
|
||||
// Other parameter
|
||||
int rows;
|
||||
bool starts_is_scalar;
|
||||
bool limits_is_scalar;
|
||||
bool deltas_is_scalar;
|
||||
} RaggedRangeParameter;
|
||||
|
||||
#endif // MINDSPORE_NNACL_RAGGED_RANGE_PARAMETER_H_
|
|
@ -0,0 +1,23 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "ops/ragged_range.h"
|
||||
#include "ops/primitive_c.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
REGISTER_PRIMITIVE_C(kNameRaggedRange, RaggedRange);
|
||||
} // namespace ops
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,41 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_CORE_OPS_RAGGED_RANGE_H_
|
||||
#define MINDSPORE_CORE_OPS_RAGGED_RANGE_H_
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include "ops/primitive_c.h"
|
||||
#include "abstract/abstract_value.h"
|
||||
#include "utils/check_convert_utils.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
constexpr auto kNameRaggedRange = "RaggedRange";
|
||||
class RaggedRange : public PrimitiveC {
|
||||
public:
|
||||
RaggedRange() : PrimitiveC(kNameRaggedRange) {}
|
||||
~RaggedRange() = default;
|
||||
MS_DECLARE_PARENT(RaggedRange, PrimitiveC);
|
||||
void Init() {}
|
||||
};
|
||||
|
||||
using PrimRaggedRangePtr = std::shared_ptr<RaggedRange>;
|
||||
} // namespace ops
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CORE_OPS_RAGGED_RANGE_H_
|
|
@ -273,9 +273,6 @@ if(ENABLE_MODEL_OBF)
|
|||
endif()
|
||||
|
||||
if(MSLITE_ENABLE_CONVERTER)
|
||||
if(PLATFORM_ARM)
|
||||
MESSAGE(FATAL_ERROR "Cannot build converter in arm platform")
|
||||
endif()
|
||||
include_directories(${PYTHON_INCLUDE_DIRS})
|
||||
include(${TOP_DIR}/cmake/external_libs/eigen.cmake)
|
||||
include(${TOP_DIR}/cmake/external_libs/protobuf.cmake)
|
||||
|
|
|
@ -212,6 +212,7 @@ union PrimitiveType {
|
|||
CumSum,
|
||||
SplitWithOverlap,
|
||||
GenOP,
|
||||
RaggedRange,
|
||||
}
|
||||
|
||||
table Abs {
|
||||
|
@ -1163,3 +1164,6 @@ table GenOP {
|
|||
reduce_to_end: bool;
|
||||
coeff: float;
|
||||
}
|
||||
|
||||
table RaggedRange {
|
||||
}
|
||||
|
|
|
@ -211,6 +211,7 @@ OP_TYPE(Custom)
|
|||
OP_TYPE(CumSum)
|
||||
OP_TYPE(SplitWithOverlap)
|
||||
OP_TYPE(GenOP)
|
||||
OP_TYPE(RaggedRange)
|
||||
OP_TYPE_DEF_END(PrimitiveType)
|
||||
|
||||
OP_SCHEMA_DEF(Abs)
|
||||
|
@ -1162,3 +1163,6 @@ OP_ATTR_ONLY(reduce_mode, ReduceMode)
|
|||
OP_ATTR_ONLY(reduce_to_end, bool)
|
||||
OP_ATTR_ONLY(coeff, float)
|
||||
OP_SCHEMA_DEF_ONLY_END(GenOP)
|
||||
|
||||
OP_SCHEMA_DEF(RaggedRange)
|
||||
OP_SCHEMA_DEF_END(RaggedRange)
|
||||
|
|
|
@ -113,6 +113,7 @@
|
|||
#include "ops/prior_box.h"
|
||||
#include "ops/proposal.h"
|
||||
#include "ops/quant_dtype_cast.h"
|
||||
#include "ops/ragged_range.h"
|
||||
#include "ops/range.h"
|
||||
#include "ops/rank.h"
|
||||
#include "ops/real_div.h"
|
||||
|
@ -363,6 +364,7 @@ FUNC_MSOP2SCHEMAOP_DECLARE(PowerGrad)
|
|||
FUNC_MSOP2SCHEMAOP_DECLARE(PReLUFusion)
|
||||
FUNC_MSOP2SCHEMAOP_DECLARE(PriorBox)
|
||||
FUNC_MSOP2SCHEMAOP_DECLARE(Proposal)
|
||||
FUNC_MSOP2SCHEMAOP_DECLARE(RaggedRange)
|
||||
FUNC_MSOP2SCHEMAOP_DECLARE(Rank)
|
||||
FUNC_MSOP2SCHEMAOP_DECLARE(Range)
|
||||
FUNC_MSOP2SCHEMAOP_DECLARE(Rank)
|
||||
|
|
|
@ -493,6 +493,10 @@ std::unique_ptr<schema::PrimitiveT> QuantDTypeCastPrimitiveCreator(const AnfNode
|
|||
auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::QuantDTypeCast>>(node);
|
||||
return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr;
|
||||
}
|
||||
std::unique_ptr<schema::PrimitiveT> RaggedRangePrimitiveCreator(const AnfNodePtr &node) {
|
||||
auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::RaggedRange>>(node);
|
||||
return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr;
|
||||
}
|
||||
std::unique_ptr<schema::PrimitiveT> RangePrimitiveCreator(const AnfNodePtr &node) {
|
||||
auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Range>>(node);
|
||||
return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr;
|
||||
|
@ -908,6 +912,7 @@ RegistryMSOps g_powFusionPrimitiveCreatorRegistry("PowFusion", PowFusionPrimitiv
|
|||
RegistryMSOps g_pReLUFusionPrimitiveCreatorRegistry("PReLUFusion", PReLUFusionPrimitiveCreator);
|
||||
RegistryMSOps g_RandomStandardNormalPrimitiveCreatorRegistry("RandomStandardNormal",
|
||||
RandomStandardNormalPrimitiveCreator);
|
||||
RegistryMSOps g_raggedRangePrimitiveCreatorRegistry("RaggedRange", RaggedRangePrimitiveCreator);
|
||||
RegistryMSOps g_rangePrimitiveCreatorRegistry("Range", RangePrimitiveCreator);
|
||||
RegistryMSOps g_rankPrimitiveCreatorRegistry("Rank", RankPrimitiveCreator);
|
||||
RegistryMSOps g_reciprocalPrimitiveCreatorRegistry("Reciprocal", ReciprocalPrimitiveCreator);
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "src/ops/populate/populate_register.h"
|
||||
#include "nnacl/fp32/ragged_range_fp32.h"
|
||||
using mindspore::schema::PrimitiveType_RaggedRange;
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
OpParameter *PopulateRaggedRangeParameter(const void *prim) {
|
||||
auto primitive = static_cast<const schema::Primitive *>(prim);
|
||||
MS_ASSERT(primitive != nullptr);
|
||||
|
||||
auto *param = reinterpret_cast<RaggedRangeParameter *>(malloc(sizeof(RaggedRangeParameter)));
|
||||
if (param == nullptr) {
|
||||
MS_LOG(ERROR) << "malloc RaggedRangeParameter failed.";
|
||||
return nullptr;
|
||||
}
|
||||
memset(param, 0, sizeof(RaggedRangeParameter));
|
||||
|
||||
param->op_parameter_.type_ = primitive->value_type();
|
||||
return reinterpret_cast<OpParameter *>(param);
|
||||
}
|
||||
REG_POPULATE(PrimitiveType_RaggedRange, PopulateRaggedRangeParameter, SCHEMA_CUR)
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,46 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "src/runtime/kernel/arm/fp16/ragged_range_fp16.h"
|
||||
#include <vector>
|
||||
#include "nnacl/fp16/ragged_range_fp16.h"
|
||||
#include "src/kernel_registry.h"
|
||||
#include "include/errorcode.h"
|
||||
|
||||
using mindspore::kernel::KERNEL_ARCH;
|
||||
using mindspore::lite::KernelRegistrar;
|
||||
using mindspore::lite::RET_OK;
|
||||
using mindspore::schema::PrimitiveType_RaggedRange;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
int RaggedRangeFp16CPUKernel::Init() {
|
||||
if (!InferShapeDone()) {
|
||||
return RET_OK;
|
||||
}
|
||||
return ReSize();
|
||||
}
|
||||
|
||||
int RaggedRangeFp16CPUKernel::ReSize() { return RET_OK; }
|
||||
|
||||
int RaggedRangeFp16CPUKernel::Run() {
|
||||
RaggedRangeFp16(
|
||||
static_cast<float16_t *>(in_tensors_.at(0)->data_c()), static_cast<float16_t *>(in_tensors_.at(1)->data_c()),
|
||||
static_cast<float16_t *>(in_tensors_.at(2)->data_c()), static_cast<int *>(out_tensors_.at(0)->data_c()),
|
||||
static_cast<float16_t *>(out_tensors_.at(1)->data_c()), reinterpret_cast<RaggedRangeParameter *>(op_parameter_));
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_RaggedRange, LiteKernelCreator<RaggedRangeFp16CPUKernel>)
|
||||
} // namespace mindspore::kernel
|
|
@ -0,0 +1,36 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_RAGGED_RANGE_FP16_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_RAGGED_RANGE_FP16_H_
|
||||
|
||||
#include <vector>
|
||||
#include "src/inner_kernel.h"
|
||||
|
||||
namespace mindspore::kernel {
|
||||
class RaggedRangeFp16CPUKernel : public InnerKernel {
|
||||
public:
|
||||
explicit RaggedRangeFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx)
|
||||
: InnerKernel(parameter, inputs, outputs, ctx) {}
|
||||
~RaggedRangeFp16CPUKernel() override = default;
|
||||
|
||||
int Init() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_RAGGED_RANGE_FP16_H_
|
|
@ -0,0 +1,54 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "src/runtime/kernel/arm/fp32/ragged_range_fp32.h"
|
||||
#include <vector>
|
||||
#include "nnacl/fp32/ragged_range_fp32.h"
|
||||
#include "src/kernel_registry.h"
|
||||
#include "include/errorcode.h"
|
||||
|
||||
using mindspore::kernel::KERNEL_ARCH;
|
||||
using mindspore::lite::KernelRegistrar;
|
||||
using mindspore::lite::RET_OK;
|
||||
using mindspore::schema::PrimitiveType_RaggedRange;
|
||||
|
||||
namespace mindspore::kernel {
|
||||
int RaggedRangeCPUKernel::Init() {
|
||||
if (!InferShapeDone()) {
|
||||
return RET_OK;
|
||||
}
|
||||
return ReSize();
|
||||
}
|
||||
|
||||
int RaggedRangeCPUKernel::ReSize() { return RET_OK; }
|
||||
|
||||
int RaggedRangeCPUKernel::Run() {
|
||||
if (in_tensors_[0]->data_type() == kNumberTypeFloat32) {
|
||||
RaggedRangeFp32(
|
||||
static_cast<float *>(in_tensors_.at(0)->data_c()), static_cast<float *>(in_tensors_.at(1)->data_c()),
|
||||
static_cast<float *>(in_tensors_.at(2)->data_c()), static_cast<int *>(out_tensors_.at(0)->data_c()),
|
||||
static_cast<float *>(out_tensors_.at(1)->data_c()), reinterpret_cast<RaggedRangeParameter *>(op_parameter_));
|
||||
} else {
|
||||
RaggedRangeInt(static_cast<int *>(in_tensors_.at(0)->data_c()), static_cast<int *>(in_tensors_.at(1)->data_c()),
|
||||
static_cast<int *>(in_tensors_.at(2)->data_c()), static_cast<int *>(out_tensors_.at(0)->data_c()),
|
||||
static_cast<int *>(out_tensors_.at(1)->data_c()),
|
||||
reinterpret_cast<RaggedRangeParameter *>(op_parameter_));
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_RaggedRange, LiteKernelCreator<RaggedRangeCPUKernel>)
|
||||
REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_RaggedRange, LiteKernelCreator<RaggedRangeCPUKernel>)
|
||||
} // namespace mindspore::kernel
|
|
@ -0,0 +1,36 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_RAGGED_RANGE_FP32_H_
|
||||
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_RAGGED_RANGE_FP32_H_
|
||||
|
||||
#include <vector>
|
||||
#include "src/inner_kernel.h"
|
||||
|
||||
namespace mindspore::kernel {
|
||||
class RaggedRangeCPUKernel : public InnerKernel {
|
||||
public:
|
||||
explicit RaggedRangeCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
|
||||
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx)
|
||||
: InnerKernel(parameter, inputs, outputs, ctx) {}
|
||||
~RaggedRangeCPUKernel() override = default;
|
||||
|
||||
int Init() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
};
|
||||
} // namespace mindspore::kernel
|
||||
|
||||
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_RAGGED_RANGE_FP32_H_
|
|
@ -0,0 +1,132 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include "common/common_test.h"
|
||||
#include "nnacl/fp32/ragged_range_fp32.h"
|
||||
#include "src/tensor.h"
|
||||
#include "src/lite_kernel.h"
|
||||
#include "src/kernel_registry.h"
|
||||
|
||||
namespace mindspore {
|
||||
class TestRaggedRangeFp32 : public mindspore::CommonTest {
|
||||
public:
|
||||
TestRaggedRangeFp32() {}
|
||||
};
|
||||
|
||||
TEST_F(TestRaggedRangeFp32, 001) {
|
||||
lite::Tensor in_tensor0(kNumberTypeFloat32, {1});
|
||||
lite::Tensor in_tensor1(kNumberTypeFloat32, {1});
|
||||
lite::Tensor in_tensor2(kNumberTypeFloat32, {1});
|
||||
lite::Tensor out_tensor0(kNumberTypeFloat32, {2});
|
||||
lite::Tensor out_tensor1(kNumberTypeInt32, {5});
|
||||
|
||||
float input_data0[] = {0};
|
||||
float input_data1[] = {5};
|
||||
float input_data2[] = {1};
|
||||
int output_data0[2];
|
||||
float output_data1[5];
|
||||
in_tensor0.set_data(input_data0);
|
||||
in_tensor1.set_data(input_data1);
|
||||
in_tensor2.set_data(input_data2);
|
||||
out_tensor0.set_data(output_data0);
|
||||
out_tensor1.set_data(output_data1);
|
||||
std::vector<lite::Tensor *> inputs = {&in_tensor0, &in_tensor1, &in_tensor2};
|
||||
std::vector<lite::Tensor *> outputs = {&out_tensor0, &out_tensor1};
|
||||
|
||||
RaggedRangeParameter param = {{}, 1, true, true, true};
|
||||
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_RaggedRange};
|
||||
|
||||
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
|
||||
ASSERT_NE(creator, nullptr);
|
||||
|
||||
auto ctx = std::make_shared<lite::InnerContext>();
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(¶m), ctx.get(), desc);
|
||||
ASSERT_NE(kernel, nullptr);
|
||||
|
||||
auto ret = kernel->Init();
|
||||
EXPECT_EQ(0, ret);
|
||||
ret = kernel->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
int expect0[] = {0, 5};
|
||||
float expect1[] = {0, 1, 2, 3, 4};
|
||||
EXPECT_EQ(output_data0[0], expect0[0]);
|
||||
EXPECT_EQ(output_data0[1], expect0[1]);
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
EXPECT_EQ(output_data1[i], expect1[i]);
|
||||
}
|
||||
|
||||
in_tensor0.set_data(nullptr);
|
||||
in_tensor1.set_data(nullptr);
|
||||
in_tensor2.set_data(nullptr);
|
||||
out_tensor0.set_data(nullptr);
|
||||
out_tensor1.set_data(nullptr);
|
||||
}
|
||||
|
||||
TEST_F(TestRaggedRangeFp32, 002) {
|
||||
lite::Tensor in_tensor0(kNumberTypeFloat32, {4});
|
||||
lite::Tensor in_tensor1(kNumberTypeFloat32, {4});
|
||||
lite::Tensor in_tensor2(kNumberTypeFloat32, {4});
|
||||
lite::Tensor out_tensor0(kNumberTypeFloat32, {2});
|
||||
lite::Tensor out_tensor1(kNumberTypeInt32, {5});
|
||||
|
||||
float input_data0[] = {0, 1, 7, 3};
|
||||
float input_data1[] = {3, 8, 4, 4};
|
||||
float input_data2[] = {1, 2, -1, 1};
|
||||
int output_data0[5];
|
||||
float output_data1[11];
|
||||
in_tensor0.set_data(input_data0);
|
||||
in_tensor1.set_data(input_data1);
|
||||
in_tensor2.set_data(input_data2);
|
||||
out_tensor0.set_data(output_data0);
|
||||
out_tensor1.set_data(output_data1);
|
||||
std::vector<lite::Tensor *> inputs = {&in_tensor0, &in_tensor1, &in_tensor2};
|
||||
std::vector<lite::Tensor *> outputs = {&out_tensor0, &out_tensor1};
|
||||
|
||||
RaggedRangeParameter param = {{}, 4, false, false, false};
|
||||
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_RaggedRange};
|
||||
|
||||
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
|
||||
ASSERT_NE(creator, nullptr);
|
||||
|
||||
auto ctx = std::make_shared<lite::InnerContext>();
|
||||
ASSERT_EQ(lite::RET_OK, ctx->Init());
|
||||
auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(¶m), ctx.get(), desc);
|
||||
ASSERT_NE(kernel, nullptr);
|
||||
|
||||
auto ret = kernel->Init();
|
||||
EXPECT_EQ(0, ret);
|
||||
ret = kernel->Run();
|
||||
EXPECT_EQ(0, ret);
|
||||
|
||||
int expect0[] = {0, 3, 7, 10, 11};
|
||||
float expect1[] = {0, 1, 2, 1, 3, 5, 7, 7, 6, 5, 3};
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
EXPECT_EQ(output_data0[i], expect0[i]);
|
||||
}
|
||||
for (int i = 0; i < 11; ++i) {
|
||||
EXPECT_EQ(output_data1[i], expect1[i]);
|
||||
}
|
||||
|
||||
in_tensor0.set_data(nullptr);
|
||||
in_tensor1.set_data(nullptr);
|
||||
in_tensor2.set_data(nullptr);
|
||||
out_tensor0.set_data(nullptr);
|
||||
out_tensor1.set_data(nullptr);
|
||||
}
|
||||
} // namespace mindspore
|
|
@ -16,58 +16,23 @@
|
|||
#include "tools/converter/parser/tf/tf_ragged_range_parser.h"
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include "tools/converter/parser/tf/tf_node_parser_registry.h"
|
||||
#include "tools/converter/parser/tf/tf_util.h"
|
||||
#include "ops/range.h"
|
||||
#include "ops/ragged_range.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
ops::PrimitiveC *TFRaggedRangeParser::Parse(const tensorflow::NodeDef &tf_op,
|
||||
const std::map<string, const tensorflow::NodeDef *> &tf_node_map,
|
||||
std::vector<std::string> *inputs, int *output_size) {
|
||||
auto prim = std::make_unique<ops::Range>();
|
||||
|
||||
tensorflow::AttrValue attr_value;
|
||||
if (!TensorFlowUtils::FindAttrValue(tf_op, "starts", &attr_value)) {
|
||||
prim->set_start(attr_value.i());
|
||||
} else {
|
||||
auto start_node = tf_node_map.at(TensorFlowUtils::GetFlattenNodeName(tf_op.input(0)));
|
||||
if (TensorFlowUtils::FindAttrValue(*start_node, "value", &attr_value)) {
|
||||
MS_LOG(INFO) << "Found raggedrange start node value attr, means it has default value";
|
||||
prim->set_start(attr_value.i());
|
||||
}
|
||||
}
|
||||
|
||||
if (TensorFlowUtils::FindAttrValue(tf_op, "limits", &attr_value)) {
|
||||
prim->set_limit(attr_value.i());
|
||||
} else {
|
||||
auto limit_node = tf_node_map.at(TensorFlowUtils::GetFlattenNodeName(tf_op.input(1)));
|
||||
if (TensorFlowUtils::FindAttrValue(*limit_node, "value", &attr_value)) {
|
||||
MS_LOG(INFO) << "Found raggedrange limit node value attr, means it has default value";
|
||||
prim->set_limit(attr_value.i());
|
||||
}
|
||||
}
|
||||
|
||||
if (TensorFlowUtils::FindAttrValue(tf_op, "deltas", &attr_value)) {
|
||||
prim->set_delta(attr_value.i());
|
||||
} else {
|
||||
auto delta_node = tf_node_map.at(TensorFlowUtils::GetFlattenNodeName(tf_op.input(2)));
|
||||
if (TensorFlowUtils::FindAttrValue(*delta_node, "value", &attr_value)) {
|
||||
MS_LOG(INFO) << "Found raggedrange delta node value attr, means it has default value";
|
||||
}
|
||||
prim->set_delta(attr_value.i());
|
||||
}
|
||||
|
||||
*output_size = 1;
|
||||
*output_size = 2;
|
||||
for (int i = 0; i < 3; i++) {
|
||||
if (AddOpInput(tf_op, i, inputs) != RET_OK) {
|
||||
MS_LOG(ERROR) << "add op input " << i << " failed!";
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
auto prim = std::make_unique<ops::RaggedRange>();
|
||||
return prim.release();
|
||||
}
|
||||
|
||||
|
|
|
@ -15,10 +15,9 @@
|
|||
*/
|
||||
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_RAGFED_RANGE_PARSER_H_
|
||||
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_RAGGED_RANGE_PARSER_H_
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "tools/converter/parser/tf/tf_node_parser.h"
|
||||
|
||||
namespace mindspore {
|
||||
|
|
Loading…
Reference in New Issue