!48395 add sequence

Merge pull request !48395 from NaCN/add_sequencesssss
This commit is contained in:
i-robot 2023-02-06 03:08:13 +00:00 committed by Gitee
commit fedc32491c
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
50 changed files with 2800 additions and 151 deletions

View File

@ -802,108 +802,6 @@ AbstractBasePtr InferImplMakeSlice(const AnalysisEnginePtr &, const PrimitivePtr
slice_args[kMakeSliceInput2]);
}
bool CheckMakeRangeInput(const std::vector<AbstractBasePtr> &input_args, const std::string &prim_name) {
constexpr size_t max_args_size = 3;
constexpr size_t min_args_size = 1;
auto inputs_size = input_args.size();
if (inputs_size > max_args_size || inputs_size < min_args_size) {
MS_LOG(EXCEPTION) << "For '" << prim_name << "', the input size should within [" << min_args_size << ", "
<< max_args_size << "] but got" << inputs_size;
}
bool has_variable = false;
for (size_t i = 0; i < input_args.size(); ++i) {
auto element = input_args[i];
MS_EXCEPTION_IF_NULL(element);
auto element_type = element->BuildType();
if (element_type->type_id() != kInt64->type_id()) {
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', the " << i << "th input should be a int64 scalar but got "
<< element->ToString();
}
if (!has_variable && element->BuildValue() == kAnyValue) {
has_variable = true;
}
}
return has_variable;
}
abstract::AbstractTuplePtr CalcSlidePara(const std::vector<int64_t> &values, const std::string &prim_name) {
SlideInfo slide = {0, 1, 0};
auto values_size = values.size();
if (values_size == kDim3) {
slide.start = values[kIndex0];
slide.stop = values[kIndex1];
slide.step = values[kIndex2];
} else if (values_size == kDim2) {
slide.start = values[kIndex0];
slide.stop = values[kIndex1];
} else {
slide.stop = values[kIndex0];
}
if (slide.step == 0) {
MS_LOG(EXCEPTION) << "For 'range', the argument 'step' could not be 0.";
}
AbstractBasePtrList args;
if (slide.start <= slide.stop) {
if (slide.step <= 0) {
MS_LOG(EXCEPTION) << "For '" << prim_name << "', when the argument 'start' " << slide.start
<< " is less than or equal to the argument 'stop' " << slide.stop << ", "
<< "the argument 'step' must be greater than 0, but the argument 'step' is " << slide.step
<< ".";
}
for (int64_t i = slide.start; i < slide.stop; i += slide.step) {
args.push_back(std::make_shared<abstract::AbstractScalar>(std::make_shared<Int64Imm>(i)));
if (i > 0 && INT_MAX - i < slide.step) {
MS_EXCEPTION(ValueError) << "Integer overflow error occurred when traversing the range. "
<< "Please check the inputs of range.";
}
}
} else {
if (slide.step >= 0) {
MS_LOG(EXCEPTION) << "For '" << prim_name << "', while the argument 'start' " << slide.start
<< " is greater than the argument "
<< "'stop' " << slide.stop << ", the argument 'step' must be less than 0, "
<< "but the argument 'step' is " << slide.step << ".";
}
for (int64_t i = slide.start; i > slide.stop; i += slide.step) {
args.push_back(std::make_shared<abstract::AbstractScalar>(std::make_shared<Int64Imm>(i)));
if (i < 0 && INT_MIN - i > slide.step) {
MS_EXCEPTION(ValueError) << "Integer overflow error occurred when traversing the range. "
<< "Please check the inputs of range.";
}
}
}
return std::make_shared<abstract::AbstractTuple>(args);
}
AbstractBasePtr InferImplMakeRange(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list) {
MS_EXCEPTION_IF_NULL(primitive);
auto prim_name = primitive->name();
bool has_variable = CheckMakeRangeInput(args_spec_list, prim_name);
if (has_variable) {
// If the input to make_range has variable input, the output abs should be dynamic length sequence.
auto element = std::make_shared<abstract::AbstractScalar>(kAnyValue, kInt64);
auto ret = std::make_shared<abstract::AbstractTuple>(AbstractBasePtrList{element});
ret->CheckAndConvertToDynamicLenSequence();
return ret;
}
std::vector<int64_t> values;
for (size_t i = 0; i < args_spec_list.size(); ++i) {
auto element = args_spec_list[i];
auto element_val = element->BuildValue();
if (!element_val->isa<Int64Imm>()) {
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', the " << i << "th input should be a int64 scalar but got "
<< element->ToString();
}
values.push_back(element_val->cast<Int64ImmPtr>()->value());
}
return CalcSlidePara(values, prim_name);
}
AbstractBasePtr InferImplStopGradient(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list) {
// Inputs: any value;
@ -1210,7 +1108,6 @@ REGISTER_PRIMITIVE_FRONT_EVAL_IMPL(J, prim::kPrimJ, InferImplJ, nullptr);
REGISTER_PRIMITIVE_FRONT_EVAL_IMPL(BroadcastGradientArgs, prim::kPrimBroadcastGradientArgs,
InferImplBroadcastGradientArgs, nullptr);
// Other
REGISTER_PRIMITIVE_FRONT_EVAL_IMPL(MakeRange, prim::kPrimMakeRange, InferImplMakeRange, nullptr);
REGISTER_PRIMITIVE_FRONT_EVAL_IMPL(Taylor, prim::kPrimTaylor, InferImplTaylor, nullptr);
REGISTER_PRIMITIVE_FRONT_EVAL_IMPL(Shard, prim::kPrimShard, InferImplShard, nullptr);
REGISTER_PRIMITIVE_FRONT_EVAL_IMPL(Vmap, prim::kPrimVmap, InferImplVmap, nullptr);
@ -1273,8 +1170,6 @@ void RegPrimitiveFrontEval() {
prim::kPrimBroadcastGradientArgs, InferImplBroadcastGradientArgs,
nullptr);
// Other
abstract::RegisterStandardPrimitiveEvalHelper(abstract::GetFrontendPrimitiveInferMapPtr(), prim::kPrimMakeRange,
InferImplMakeRange, nullptr);
abstract::RegisterStandardPrimitiveEvalHelper(abstract::GetFrontendPrimitiveInferMapPtr(), prim::kPrimTaylor,
InferImplTaylor, nullptr);
abstract::RegisterStandardPrimitiveEvalHelper(abstract::GetFrontendPrimitiveInferMapPtr(), prim::kPrimShard,

View File

@ -73,8 +73,6 @@ AbstractBasePtr InferImplJ(const AnalysisEnginePtr &, const PrimitivePtr &primit
AbstractBasePtr InferImplBroadcastGradientArgs(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
// Other
AbstractBasePtr InferImplMakeRange(const AnalysisEnginePtr &, const PrimitivePtr &,
const AbstractBasePtrList &args_spec_list);
AbstractBasePtr InferImplTaylor(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
AbstractBasePtr InferImplShard(const AnalysisEnginePtr &, const PrimitivePtr &primitive,

View File

@ -137,6 +137,9 @@ ShapeVector KernelTensor::GetShapeVector() const {
}
ShapeVector KernelTensor::GetMaxShape() const {
if (meta_type_ != kObjectTypeTensorType) {
return {};
}
auto base_shape_ptr = GetBaseShape();
if (base_shape_ptr == nullptr || !base_shape_ptr->isa<abstract::Shape>()) {
return {};

View File

@ -0,0 +1,121 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/cpu/kernel/sequence/sequence_add_cpu_kernel.h"
#include <algorithm>
#include <utility>
#include <complex>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
#include "utils/ms_utils.h"
#include "include/common/thread_pool.h"
namespace mindspore {
namespace kernel {
namespace {
constexpr size_t kSequenceAddInputNum = 2;
constexpr size_t kSequenceAddOutputNum = 1;
} // namespace
bool SequenceAddCpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) {
MS_EXCEPTION_IF_NULL(base_operator);
kernel_name_ = base_operator->name();
auto kernel_attr = GetKernelAttrFromTensors(inputs, outputs);
auto [is_match, index] = MatchKernelAttr(kernel_attr, GetOpSupport());
if (!is_match) {
MS_LOG(ERROR) << "For '" << kernel_name_ << "', it does not support this kernel data type: " << kernel_attr;
return false;
}
kernel_func_ = func_list_[index].second;
return true;
}
int SequenceAddCpuKernelMod::Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) {
int ret = KernelMod::Resize(base_operator, inputs, outputs, inputsOnHost);
if (ret != 0) {
return ret;
}
return KRET_OK;
}
template <typename T>
bool SequenceAddCpuKernelMod::LaunchKernel(const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
const auto input_0_addr = reinterpret_cast<T *>(inputs[0]->GetData()->addr);
const auto input_1_addr = reinterpret_cast<T *>(inputs[1]->GetData()->addr);
auto output_addr = reinterpret_cast<T *>(outputs[0]->GetData()->addr);
auto input_0_size = inputs[0]->GetData()->size;
auto input_1_size = inputs[1]->GetData()->size;
auto output_size = outputs[0]->GetData()->size;
if (input_0_size + input_1_size != output_size) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the size of 'input_0 + input_1': {"
<< input_0_size + input_1_size << "} is not equal to the size of output: {" << output_size << "}";
}
auto cp_ret = memcpy_s(output_addr, input_0_size, input_0_addr, input_0_size);
if (cp_ret != EOK) {
MS_LOG(EXCEPTION) << "For " << kernel_name_ << ", memcpy error, errorno: " << cp_ret;
}
cp_ret = memcpy_s(output_addr + input_0_size / sizeof(T), input_1_size, input_1_addr, input_1_size);
if (cp_ret != EOK) {
MS_LOG(EXCEPTION) << "For " << kernel_name_ << ", memcpy error, errorno: " << cp_ret;
}
return true;
}
bool SequenceAddCpuKernelMod::Launch(const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kSequenceAddInputNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kSequenceAddOutputNum, kernel_name_);
return kernel_func_(this, inputs, outputs, workspace);
}
std::vector<std::pair<KernelAttr, SequenceAddCpuKernelMod::SequenceAddFunc>> SequenceAddCpuKernelMod::func_list_ = {
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat32)
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat32)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeFloat32),
&SequenceAddCpuKernelMod::LaunchKernel<float>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat64)
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeFloat64),
&SequenceAddCpuKernelMod::LaunchKernel<double>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt32)
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt32)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt32),
&SequenceAddCpuKernelMod::LaunchKernel<int>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt64)
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt64),
&SequenceAddCpuKernelMod::LaunchKernel<int64_t>}};
std::vector<KernelAttr> SequenceAddCpuKernelMod::GetOpSupport() {
std::vector<KernelAttr> support_list;
(void)std::transform(func_list_.begin(), func_list_.end(), std::back_inserter(support_list),
[](const std::pair<KernelAttr, SequenceAddFunc> &item) { return item.first; });
return support_list;
}
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, SequenceAdd, SequenceAddCpuKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,65 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_ADD_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_ADD_CPU_KERNEL_H_
#include <vector>
#include <memory>
#include <utility>
#include <map>
#include <string>
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "mindspore/core/ops/sequence_add.h"
#include "plugin/factory/ms_factory.h"
namespace mindspore {
namespace kernel {
class SequenceAddCpuKernelMod : public NativeCpuKernelMod {
public:
SequenceAddCpuKernelMod() = default;
explicit SequenceAddCpuKernelMod(const std::string &kernel_type) : kernel_type_(kernel_type) {}
~SequenceAddCpuKernelMod() override = default;
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) override;
bool Launch(const std::vector<KernelTensorPtr> &inputs, const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) override;
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs, const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost);
template <typename T>
bool LaunchKernel(const std::vector<kernel::KernelTensorPtr> &inputs,
const std::vector<kernel::KernelTensorPtr> &outputs,
const std::vector<kernel::AddressPtr> &workspace);
protected:
std::vector<KernelAttr> GetOpSupport() override;
using SequenceAddFunc =
std::function<bool(SequenceAddCpuKernelMod *, const std::vector<kernel::KernelTensorPtr> &,
const std::vector<kernel::KernelTensorPtr> &, const std::vector<kernel::AddressPtr> &)>;
static std::vector<std::pair<KernelAttr, SequenceAddFunc>> func_list_;
SequenceAddFunc kernel_func_;
private:
std::string kernel_type_;
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_ADD_CPU_KERNEL_H_

View File

@ -0,0 +1,112 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/cpu/kernel/sequence/sequence_add_offset_cpu_kernel.h"
#include <algorithm>
#include <utility>
#include <complex>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
#include "utils/ms_utils.h"
#include "include/common/thread_pool.h"
namespace mindspore {
namespace kernel {
namespace {
constexpr size_t kSequenceAddOffsetInputNum = 2;
constexpr size_t kSequenceAddOffsetOutputNum = 1;
} // namespace
bool SequenceAddOffsetCpuKernelMod::Init(const BaseOperatorPtr &base_operator,
const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) {
MS_EXCEPTION_IF_NULL(base_operator);
kernel_name_ = base_operator->name();
auto kernel_attr = GetKernelAttrFromTensors(inputs, outputs);
auto [is_match, index] = MatchKernelAttr(kernel_attr, GetOpSupport());
if (!is_match) {
MS_LOG(ERROR) << "For '" << kernel_name_ << "', it does not support this kernel data type: " << kernel_attr;
return false;
}
kernel_func_ = func_list_[index].second;
return true;
}
int SequenceAddOffsetCpuKernelMod::Resize(const BaseOperatorPtr &base_operator,
const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) {
int ret = KernelMod::Resize(base_operator, inputs, outputs, inputsOnHost);
if (ret != 0) {
return ret;
}
return KRET_OK;
}
template <typename T>
bool SequenceAddOffsetCpuKernelMod::LaunchKernel(const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
auto output_addr = reinterpret_cast<int64_t *>(outputs[0]->GetData()->addr);
auto input_0_size = inputs[0]->GetData()->size / sizeof(T);
output_addr[0] = 0;
output_addr[1] = input_0_size;
return true;
}
bool SequenceAddOffsetCpuKernelMod::Launch(const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kSequenceAddOffsetInputNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kSequenceAddOffsetOutputNum, kernel_name_);
return kernel_func_(this, inputs, outputs, workspace);
}
std::vector<std::pair<KernelAttr, SequenceAddOffsetCpuKernelMod::SequenceAddOffsetFunc>>
SequenceAddOffsetCpuKernelMod::func_list_ = {{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat64)
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt64),
&SequenceAddOffsetCpuKernelMod::LaunchKernel<double>},
{KernelAttr()
.AddInputAttr(kObjectTypeList, kNumberTypeInt32)
.AddInputAttr(kObjectTypeList, kNumberTypeInt32)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt64),
&SequenceAddOffsetCpuKernelMod::LaunchKernel<int>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt32)
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt32)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt64),
&SequenceAddOffsetCpuKernelMod::LaunchKernel<int>},
{KernelAttr()
.AddInputAttr(kObjectTypeList, kNumberTypeInt64)
.AddInputAttr(kObjectTypeList, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt64),
&SequenceAddOffsetCpuKernelMod::LaunchKernel<int64_t>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt64)
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt64),
&SequenceAddOffsetCpuKernelMod::LaunchKernel<int64_t>}};
std::vector<KernelAttr> SequenceAddOffsetCpuKernelMod::GetOpSupport() {
std::vector<KernelAttr> support_list;
(void)std::transform(func_list_.begin(), func_list_.end(), std::back_inserter(support_list),
[](const std::pair<KernelAttr, SequenceAddOffsetFunc> &item) { return item.first; });
return support_list;
}
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, SequenceAddOffset, SequenceAddOffsetCpuKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,65 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_ADD_OFFSET_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_ADD_OFFSET_CPU_KERNEL_H_
#include <vector>
#include <memory>
#include <utility>
#include <map>
#include <string>
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "mindspore/core/ops/sequence_add_offset.h"
#include "plugin/factory/ms_factory.h"
namespace mindspore {
namespace kernel {
class SequenceAddOffsetCpuKernelMod : public NativeCpuKernelMod {
public:
SequenceAddOffsetCpuKernelMod() = default;
explicit SequenceAddOffsetCpuKernelMod(const std::string &kernel_type) : kernel_type_(kernel_type) {}
~SequenceAddOffsetCpuKernelMod() override = default;
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) override;
bool Launch(const std::vector<KernelTensorPtr> &inputs, const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace);
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs, const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost);
template <typename T>
bool LaunchKernel(const std::vector<kernel::KernelTensorPtr> &inputs,
const std::vector<kernel::KernelTensorPtr> &outputs,
const std::vector<kernel::AddressPtr> &workspace);
protected:
std::vector<KernelAttr> GetOpSupport() override;
using SequenceAddOffsetFunc =
std::function<bool(SequenceAddOffsetCpuKernelMod *, const std::vector<kernel::KernelTensorPtr> &,
const std::vector<kernel::KernelTensorPtr> &, const std::vector<kernel::AddressPtr> &)>;
static std::vector<std::pair<KernelAttr, SequenceAddOffsetFunc>> func_list_;
SequenceAddOffsetFunc kernel_func_;
private:
std::string kernel_type_;
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_ADD_OFFSET_CPU_KERNEL_H_

View File

@ -0,0 +1,112 @@
/**
* Copyright 2020-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/cpu/kernel/sequence/sequence_getitem_cpu_kernel.h"
#include <algorithm>
#include <utility>
#include <complex>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
#include "utils/ms_utils.h"
#include "include/common/thread_pool.h"
namespace mindspore {
namespace kernel {
namespace {
constexpr int kInputsNum = 2;
constexpr int kOutputsNum = 1;
} // namespace
bool SequenceGetItemCpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) {
MS_EXCEPTION_IF_NULL(base_operator);
kernel_name_ = base_operator->name();
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kInputsNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kOutputsNum, kernel_name_);
return MatchKernelFunc(base_operator, inputs, outputs);
}
int SequenceGetItemCpuKernelMod::Resize(const BaseOperatorPtr &base_operator,
const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) {
int ret = KernelMod::Resize(base_operator, inputs, outputs, inputsOnHost);
if (ret != 0) {
return ret;
}
return KRET_OK;
}
template <typename T>
bool SequenceGetItemCpuKernelMod::LaunchKernel(const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
const auto input_addr = reinterpret_cast<T *>(inputs[0]->GetData()->addr);
const auto index = reinterpret_cast<int64_t *>(inputs[1]->GetData()->addr);
auto output_addr = reinterpret_cast<T *>(outputs[0]->GetData()->addr);
*output_addr = input_addr[*index];
return true;
}
const std::vector<std::pair<KernelAttr, SequenceGetItemCpuKernelMod::KernelRunFunc>>
&SequenceGetItemCpuKernelMod::GetFuncList() const {
static const std::vector<std::pair<KernelAttr, SequenceGetItemCpuKernelMod::KernelRunFunc>> func_list = {
{KernelAttr()
.AddInputAttr(kObjectTypeList, kNumberTypeFloat32)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeNumber, kNumberTypeFloat32),
&SequenceGetItemCpuKernelMod::LaunchKernel<float>},
{KernelAttr()
.AddInputAttr(kObjectTypeList, kNumberTypeFloat64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeNumber, kNumberTypeFloat64),
&SequenceGetItemCpuKernelMod::LaunchKernel<double>},
{KernelAttr()
.AddInputAttr(kObjectTypeList, kNumberTypeInt32)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeNumber, kNumberTypeInt32),
&SequenceGetItemCpuKernelMod::LaunchKernel<int>},
{KernelAttr()
.AddInputAttr(kObjectTypeList, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeNumber, kNumberTypeInt64),
&SequenceGetItemCpuKernelMod::LaunchKernel<int64_t>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat32)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeNumber, kNumberTypeFloat32),
&SequenceGetItemCpuKernelMod::LaunchKernel<float>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeNumber, kNumberTypeFloat64),
&SequenceGetItemCpuKernelMod::LaunchKernel<double>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt32)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeNumber, kNumberTypeInt32),
&SequenceGetItemCpuKernelMod::LaunchKernel<int>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeNumber, kNumberTypeInt64),
&SequenceGetItemCpuKernelMod::LaunchKernel<int64_t>}};
return func_list;
}
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, RealTupleGetItem, SequenceGetItemCpuKernelMod);
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, RealListGetItem, SequenceGetItemCpuKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,59 @@
/**
* Copyright 2020-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_GETITEM_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_GETITEM_CPU_KERNEL_H_
#include <vector>
#include <memory>
#include <utility>
#include <map>
#include <string>
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "plugin/factory/ms_factory.h"
namespace mindspore {
namespace kernel {
class SequenceGetItemCpuKernelMod : public NativeCpuKernelMod,
public MatchKernelHelper<SequenceGetItemCpuKernelMod, KernelTensorPtr> {
public:
SequenceGetItemCpuKernelMod() = default;
~SequenceGetItemCpuKernelMod() override = default;
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) override;
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) override;
bool Launch(const std::vector<KernelTensorPtr> &inputs, const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
MS_EXCEPTION_IF_NULL(kernel_func_);
return kernel_func_(this, inputs, outputs, workspace);
}
const std::vector<std::pair<KernelAttr, KernelRunFunc>> &GetFuncList() const override;
protected:
std::vector<KernelAttr> GetOpSupport() override { return OpSupport(); }
template <typename T>
bool LaunchKernel(const std::vector<KernelTensorPtr> &inputs, const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace);
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_GETITEM_CPU_KERNEL_H_

View File

@ -0,0 +1,77 @@
/**
* Copyright 2020-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/cpu/kernel/sequence/sequence_len_cpu_kernel.h"
#include <algorithm>
#include <utility>
#include <complex>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
#include "utils/ms_utils.h"
#include "include/common/thread_pool.h"
namespace mindspore {
namespace kernel {
namespace {
constexpr int kInputsNum = 1;
constexpr int kOutputsNum = 1;
} // namespace
bool SequenceLenCpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) {
MS_EXCEPTION_IF_NULL(base_operator);
kernel_name_ = base_operator->name();
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kInputsNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kOutputsNum, kernel_name_);
return MatchKernelFunc(base_operator, inputs, outputs);
}
int SequenceLenCpuKernelMod::Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) {
int ret = KernelMod::Resize(base_operator, inputs, outputs, inputsOnHost);
if (ret != 0) {
return ret;
}
input_shape_ = inputs.at(kIndex0)->GetShapeVector();
return KRET_OK;
}
template <typename T>
bool SequenceLenCpuKernelMod::LaunchKernel(const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
auto output_addr = reinterpret_cast<int64_t *>(outputs[0]->GetData()->addr);
output_addr[0] = input_shape_.at(kIndex0);
return true;
} // namespace kernel
const std::vector<std::pair<KernelAttr, SequenceLenCpuKernelMod::KernelRunFunc>> &SequenceLenCpuKernelMod::GetFuncList()
const {
static const std::vector<std::pair<KernelAttr, SequenceLenCpuKernelMod::KernelRunFunc>> func_list = {
{KernelAttr().AddInputAttr(kObjectTypeTuple, kNumberTypeFloat32).AddOutputAttr(kObjectTypeNumber, kNumberTypeInt64),
&SequenceLenCpuKernelMod::LaunchKernel<float>},
{KernelAttr().AddInputAttr(kObjectTypeTuple, kNumberTypeFloat64).AddOutputAttr(kObjectTypeNumber, kNumberTypeInt64),
&SequenceLenCpuKernelMod::LaunchKernel<double>},
{KernelAttr().AddInputAttr(kObjectTypeTuple, kNumberTypeInt32).AddOutputAttr(kObjectTypeNumber, kNumberTypeInt64),
&SequenceLenCpuKernelMod::LaunchKernel<int>},
{KernelAttr().AddInputAttr(kObjectTypeTuple, kNumberTypeInt64).AddOutputAttr(kObjectTypeNumber, kNumberTypeInt64),
&SequenceLenCpuKernelMod::LaunchKernel<int64_t>}};
return func_list;
}
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, sequence_len, SequenceLenCpuKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,62 @@
/**
* Copyright 2020-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_LEN_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_LEN_CPU_KERNEL_H_
#include <vector>
#include <memory>
#include <utility>
#include <map>
#include <string>
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "plugin/factory/ms_factory.h"
namespace mindspore {
namespace kernel {
class SequenceLenCpuKernelMod : public NativeCpuKernelMod,
public MatchKernelHelper<SequenceLenCpuKernelMod, KernelTensorPtr> {
public:
SequenceLenCpuKernelMod() = default;
~SequenceLenCpuKernelMod() override = default;
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) override;
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) override;
bool Launch(const std::vector<KernelTensorPtr> &inputs, const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
MS_EXCEPTION_IF_NULL(kernel_func_);
return kernel_func_(this, inputs, outputs, workspace);
}
const std::vector<std::pair<KernelAttr, KernelRunFunc>> &GetFuncList() const override;
protected:
std::vector<KernelAttr> GetOpSupport() override { return OpSupport(); }
template <typename T>
bool LaunchKernel(const std::vector<KernelTensorPtr> &inputs, const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace);
private:
ShapeVector input_shape_;
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_LEN_CPU_KERNEL_H_

View File

@ -0,0 +1,113 @@
/**
* Copyright 2020-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/cpu/kernel/sequence/sequence_make_range_cpu_kernel.h"
#include <algorithm>
#include <utility>
#include <complex>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
#include "utils/ms_utils.h"
#include "include/common/thread_pool.h"
namespace mindspore {
namespace kernel {
namespace {
constexpr int kOutputsNum = 1;
template <typename T>
T Sign(T num) {
if (num > static_cast<T>(0.0)) {
return static_cast<T>(1.0);
} else if (num == static_cast<T>(0.0)) {
return static_cast<T>(0.0);
} else {
return static_cast<T>(-1.0);
}
} // namespace
} // namespace
bool MakeRangeCpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) {
MS_EXCEPTION_IF_NULL(base_operator);
kernel_name_ = base_operator->name();
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kOutputsNum, kernel_name_);
return MatchKernelFunc(base_operator, inputs, outputs);
}
int MakeRangeCpuKernelMod::Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) {
int ret = KernelMod::Resize(base_operator, inputs, outputs, inputsOnHost);
if (ret != 0) {
return ret;
}
return KRET_OK;
}
template <typename T>
bool MakeRangeCpuKernelMod::LaunchKernel(const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
auto start = inputs.size() == 1 ? 0 : reinterpret_cast<T *>(inputs[0]->GetData()->addr)[0];
auto limit = inputs.size() == 1 ? reinterpret_cast<T *>(inputs[0]->GetData()->addr)[0]
: reinterpret_cast<T *>(inputs[1]->GetData()->addr)[0];
auto delta = inputs.size() <= 2 ? T(1) : reinterpret_cast<T *>(inputs[2]->GetData()->addr)[0];
auto output_addr = reinterpret_cast<T *>(outputs[0]->GetData()->addr);
size_t output_size = outputs[0]->GetData()->size / sizeof(T);
if (Sign(delta) * Sign(limit - start) >= 0) {
for (size_t index = 0; index < output_size; index++) {
output_addr[index] = delta * index + start;
}
}
return true;
} // namespace kernel
const std::vector<std::pair<KernelAttr, MakeRangeCpuKernelMod::KernelRunFunc>> &MakeRangeCpuKernelMod::GetFuncList()
const {
static const std::vector<std::pair<KernelAttr, MakeRangeCpuKernelMod::KernelRunFunc>> func_list = {
{KernelAttr()
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt32)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt32)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt32)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt32),
&MakeRangeCpuKernelMod::LaunchKernel<int>},
{KernelAttr()
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt64),
&MakeRangeCpuKernelMod::LaunchKernel<int64_t>},
{KernelAttr()
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt32)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt32)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt32),
&MakeRangeCpuKernelMod::LaunchKernel<int>},
{KernelAttr()
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt64),
&MakeRangeCpuKernelMod::LaunchKernel<int64_t>},
{KernelAttr().AddInputAttr(kObjectTypeNumber, kNumberTypeInt32).AddOutputAttr(kObjectTypeTuple, kNumberTypeInt32),
&MakeRangeCpuKernelMod::LaunchKernel<int>},
{KernelAttr().AddInputAttr(kObjectTypeNumber, kNumberTypeInt64).AddOutputAttr(kObjectTypeTuple, kNumberTypeInt64),
&MakeRangeCpuKernelMod::LaunchKernel<int64_t>},
};
return func_list;
}
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, make_range, MakeRangeCpuKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,59 @@
/**
* Copyright 2020-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_MAKERANGE_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_MAKERANGE_CPU_KERNEL_H_
#include <vector>
#include <memory>
#include <utility>
#include <map>
#include <string>
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "plugin/factory/ms_factory.h"
namespace mindspore {
namespace kernel {
class MakeRangeCpuKernelMod : public NativeCpuKernelMod,
public MatchKernelHelper<MakeRangeCpuKernelMod, KernelTensorPtr> {
public:
MakeRangeCpuKernelMod() = default;
~MakeRangeCpuKernelMod() override = default;
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) override;
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) override;
bool Launch(const std::vector<KernelTensorPtr> &inputs, const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
MS_EXCEPTION_IF_NULL(kernel_func_);
return kernel_func_(this, inputs, outputs, workspace);
}
const std::vector<std::pair<KernelAttr, KernelRunFunc>> &GetFuncList() const override;
protected:
std::vector<KernelAttr> GetOpSupport() override { return OpSupport(); }
template <typename T>
bool LaunchKernel(const std::vector<KernelTensorPtr> &inputs, const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace);
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_MAKERANGE_CPU_KERNEL_H_

View File

@ -0,0 +1,136 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/cpu/kernel/sequence/sequence_setitem_cpu_kernel.h"
#include <algorithm>
#include <utility>
#include <complex>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
#include "utils/ms_utils.h"
#include "include/common/thread_pool.h"
namespace mindspore {
namespace kernel {
namespace {
constexpr size_t kSequenceSetItemInputNum = 3;
constexpr size_t kSequenceSetItemOutputNum = 1;
constexpr size_t kDataIndex = 0;
constexpr size_t kIdxIndex = 1;
constexpr size_t kValueIndex = 2;
} // namespace
bool SequenceSetItemCpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) {
MS_EXCEPTION_IF_NULL(base_operator);
kernel_name_ = base_operator->name();
auto kernel_attr = GetKernelAttrFromTensors(inputs, outputs);
auto [is_match, index] = MatchKernelAttr(kernel_attr, GetOpSupport());
if (!is_match) {
MS_LOG(ERROR) << "For '" << kernel_name_ << "', it does not support this kernel data type: " << kernel_attr;
return false;
}
kernel_func_ = func_list_[index].second;
return true;
}
int SequenceSetItemCpuKernelMod::Resize(const BaseOperatorPtr &base_operator,
const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) {
int ret = KernelMod::Resize(base_operator, inputs, outputs, inputsOnHost);
if (ret != 0) {
return ret;
}
return KRET_OK;
}
template <typename T>
bool SequenceSetItemCpuKernelMod::LaunchKernel(const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
const auto data_addr = reinterpret_cast<T *>(inputs[kDataIndex]->GetData()->addr);
const auto idx_addr = reinterpret_cast<int64_t *>(inputs[kIdxIndex]->GetData()->addr);
const auto value_addr = reinterpret_cast<T *>(inputs[kValueIndex]->GetData()->addr);
auto output_addr = reinterpret_cast<T *>(outputs[0]->GetData()->addr);
T value = value_addr[0];
int64_t idx = idx_addr[0];
auto input_size = inputs[kDataIndex]->GetData()->size;
auto output_size = outputs[0]->GetData()->size;
auto len = static_cast<int64_t>(input_size / sizeof(T));
if (input_size != output_size) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', the size of 'input_x': {" << input_size
<< "} is not equal to the size of output: {" << output_size << "}";
}
auto cp_ret = memcpy_s(output_addr, output_size, data_addr, input_size);
if (cp_ret != EOK) {
MS_LOG(EXCEPTION) << "For " << kernel_name_ << ", memcpy error, errorno: " << cp_ret;
}
if (idx < -len || idx >= len) {
MS_EXCEPTION(ValueError) << "idx is out of range: " << -len << " < idx <= " << len << ", but got " << idx << ".";
}
if (idx < 0) {
idx += len;
}
output_addr[idx] = value;
return true;
}
bool SequenceSetItemCpuKernelMod::Launch(const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kSequenceSetItemInputNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kSequenceSetItemOutputNum, kernel_name_);
return kernel_func_(this, inputs, outputs, workspace);
}
std::vector<std::pair<KernelAttr, SequenceSetItemCpuKernelMod::SequenceSetItemFunc>>
SequenceSetItemCpuKernelMod::func_list_ = {{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat32)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeFloat32)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeFloat32),
&SequenceSetItemCpuKernelMod::LaunchKernel<float>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeFloat64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeFloat64),
&SequenceSetItemCpuKernelMod::LaunchKernel<double>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt32)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt32)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt32),
&SequenceSetItemCpuKernelMod::LaunchKernel<int>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt64),
&SequenceSetItemCpuKernelMod::LaunchKernel<int64_t>}};
std::vector<KernelAttr> SequenceSetItemCpuKernelMod::GetOpSupport() {
std::vector<KernelAttr> support_list;
(void)std::transform(func_list_.begin(), func_list_.end(), std::back_inserter(support_list),
[](const std::pair<KernelAttr, SequenceSetItemFunc> &item) { return item.first; });
return support_list;
}
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, list_setitem, SequenceSetItemCpuKernelMod);
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, tuple_setitem, SequenceSetItemCpuKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,62 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_SETITEM_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_SETITEM_CPU_KERNEL_H_
#include <vector>
#include <memory>
#include <utility>
#include <map>
#include <string>
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "plugin/factory/ms_factory.h"
namespace mindspore {
namespace kernel {
class SequenceSetItemCpuKernelMod : public NativeCpuKernelMod {
public:
SequenceSetItemCpuKernelMod() = default;
~SequenceSetItemCpuKernelMod() override = default;
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) override;
bool Launch(const std::vector<KernelTensorPtr> &inputs, const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) override;
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs, const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost);
template <typename T>
bool LaunchKernel(const std::vector<KernelTensorPtr> &inputs, const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace);
protected:
std::vector<KernelAttr> GetOpSupport() override;
using SequenceSetItemFunc =
std::function<bool(SequenceSetItemCpuKernelMod *, const std::vector<kernel::KernelTensorPtr> &,
const std::vector<kernel::KernelTensorPtr> &, const std::vector<kernel::AddressPtr> &)>;
static std::vector<std::pair<KernelAttr, SequenceSetItemFunc>> func_list_;
SequenceSetItemFunc kernel_func_;
private:
std::string kernel_type_;
TypeId dtype{kTypeUnknown};
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_SETITEM_CPU_KERNEL_H_

View File

@ -0,0 +1,163 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/cpu/kernel/sequence/sequence_slice_cpu_kernel.h"
#include <algorithm>
#include <utility>
#include <complex>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
#include "utils/ms_utils.h"
#include "include/common/thread_pool.h"
namespace mindspore {
namespace kernel {
namespace {
constexpr size_t kSequenceSliceInputNum = 4;
constexpr size_t kSequenceSliceOutputNum = 1;
} // namespace
bool SequenceSliceCpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) {
MS_EXCEPTION_IF_NULL(base_operator);
kernel_name_ = base_operator->name();
auto kernel_attr = GetKernelAttrFromTensors(inputs, outputs);
auto [is_match, index] = MatchKernelAttr(kernel_attr, GetOpSupport());
if (!is_match) {
MS_LOG(ERROR) << "For '" << kernel_name_ << "', it does not support this kernel data type: " << kernel_attr;
return false;
}
kernel_func_ = func_list_[index].second;
return true;
}
int SequenceSliceCpuKernelMod::Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) {
int ret = KernelMod::Resize(base_operator, inputs, outputs, inputsOnHost);
if (ret != 0) {
return ret;
}
return KRET_OK;
}
template <typename T>
bool SequenceSliceCpuKernelMod::LaunchKernel(const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
const auto seq_addr = reinterpret_cast<T *>(inputs[0]->GetData()->addr);
const auto start_addr = reinterpret_cast<int64_t *>(inputs[1]->GetData()->addr);
const auto stop_addr = reinterpret_cast<int64_t *>(inputs[2]->GetData()->addr);
const auto step_addr = reinterpret_cast<int64_t *>(inputs[3]->GetData()->addr);
auto output_addr = reinterpret_cast<T *>(outputs[0]->GetData()->addr);
int64_t len = inputs[0]->GetData()->size;
int64_t start = start_addr[0];
int64_t stop = stop_addr[0];
int64_t step = step_addr[0];
if (step > 0) {
if (start <= -len) {
start = 0;
} else if (start < 0) {
start += len;
}
if (stop > len) {
stop = len;
} else if (stop > -len && stop < 0) {
stop += len;
}
if (start >= stop) {
return true;
}
int64_t idx = 0;
for (int64_t i = start; i < stop; i += step) {
output_addr[idx] = seq_addr[i];
idx++;
}
return true;
}
if (step < 0) {
if (start >= len) {
start = -1;
} else if (start >= 0 && start < len) {
start -= len;
}
if (stop < -len) {
stop = -1 - len;
} else if (stop >= 0 && stop < len) {
stop -= len;
}
if (start <= stop) {
return true;
}
int64_t idx = 0;
for (int64_t i = start; i > stop; i += step) {
output_addr[idx] = seq_addr[i];
idx++;
}
return true;
}
MS_EXCEPTION(ValueError) << "For 'SequenceSlice', step cannot be 0.";
return false;
}
bool SequenceSliceCpuKernelMod::Launch(const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kSequenceSliceInputNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kSequenceSliceOutputNum, kernel_name_);
return kernel_func_(this, inputs, outputs, workspace);
}
std::vector<std::pair<KernelAttr, SequenceSliceCpuKernelMod::SequenceSliceFunc>> SequenceSliceCpuKernelMod::func_list_ =
{{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat32)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeFloat32),
&SequenceSliceCpuKernelMod::LaunchKernel<float>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeFloat64),
&SequenceSliceCpuKernelMod::LaunchKernel<double>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt32)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt32),
&SequenceSliceCpuKernelMod::LaunchKernel<int>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt64),
&SequenceSliceCpuKernelMod::LaunchKernel<int64_t>}};
std::vector<KernelAttr> SequenceSliceCpuKernelMod::GetOpSupport() {
std::vector<KernelAttr> support_list;
(void)std::transform(func_list_.begin(), func_list_.end(), std::back_inserter(support_list),
[](const std::pair<KernelAttr, SequenceSliceFunc> &item) { return item.first; });
return support_list;
}
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, SequenceSlice, SequenceSliceCpuKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,64 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_SLICE_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_SLICE_CPU_KERNEL_H_
#include <vector>
#include <memory>
#include <utility>
#include <map>
#include <string>
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "mindspore/core/ops/sequence_slice.h"
#include "plugin/factory/ms_factory.h"
namespace mindspore {
namespace kernel {
class SequenceSliceCpuKernelMod : public NativeCpuKernelMod {
public:
SequenceSliceCpuKernelMod() = default;
explicit SequenceSliceCpuKernelMod(const std::string &kernel_type) : kernel_type_(kernel_type) {}
~SequenceSliceCpuKernelMod() override = default;
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) override;
bool Launch(const std::vector<KernelTensorPtr> &inputs, const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) override;
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs, const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost);
template <typename T>
bool LaunchKernel(const std::vector<KernelTensorPtr> &inputs, const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace);
protected:
std::vector<KernelAttr> GetOpSupport() override;
using SequenceSliceFunc =
std::function<bool(SequenceSliceCpuKernelMod *, const std::vector<kernel::KernelTensorPtr> &,
const std::vector<kernel::KernelTensorPtr> &, const std::vector<kernel::AddressPtr> &)>;
static std::vector<std::pair<KernelAttr, SequenceSliceFunc>> func_list_;
SequenceSliceFunc kernel_func_;
private:
std::string kernel_type_;
TypeId dtype{kTypeUnknown};
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_SLICE_CPU_KERNEL_H_

View File

@ -0,0 +1,174 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/cpu/kernel/sequence/sequence_slice_grad_cpu_kernel.h"
#include <algorithm>
#include <utility>
#include <complex>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
#include "utils/ms_utils.h"
#include "include/common/thread_pool.h"
namespace mindspore {
namespace kernel {
namespace {
constexpr size_t kSequenceSliceGradInputNum = 5;
constexpr size_t kSequenceSliceGradOutputNum = 1;
} // namespace
bool SequenceSliceGradCpuKernelMod::Init(const BaseOperatorPtr &base_operator,
const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) {
MS_EXCEPTION_IF_NULL(base_operator);
kernel_name_ = base_operator->name();
auto kernel_attr = GetKernelAttrFromTensors(inputs, outputs);
auto [is_match, index] = MatchKernelAttr(kernel_attr, GetOpSupport());
if (!is_match) {
MS_LOG(ERROR) << "For '" << kernel_name_ << "', it does not support this kernel data type: " << kernel_attr;
return false;
}
kernel_func_ = func_list_[index].second;
return true;
}
int SequenceSliceGradCpuKernelMod::Resize(const BaseOperatorPtr &base_operator,
const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) {
int ret = KernelMod::Resize(base_operator, inputs, outputs, inputsOnHost);
if (ret != 0) {
return ret;
}
return KRET_OK;
}
template <typename T>
bool SequenceSliceGradCpuKernelMod::LaunchKernel(const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
const auto dout_addr = reinterpret_cast<T *>(inputs[0]->GetData()->addr);
const auto start_addr = reinterpret_cast<int64_t *>(inputs[2]->GetData()->addr);
const auto stop_addr = reinterpret_cast<int64_t *>(inputs[3]->GetData()->addr);
const auto step_addr = reinterpret_cast<int64_t *>(inputs[4]->GetData()->addr);
auto output_addr = reinterpret_cast<T *>(outputs[0]->GetData()->addr);
int64_t len = inputs[1]->GetData()->size;
int64_t start = start_addr[0];
int64_t stop = stop_addr[0];
int64_t step = step_addr[0];
// set output to zeros
for (int64_t i = 0; i < len; i++) {
output_addr[i] = (T)0;
}
if (step > 0) {
if (start <= -len) {
start = 0;
} else if (start < 0) {
start += len;
}
if (stop > len) {
stop = len;
} else if (stop > -len && stop < 0) {
stop += len;
}
if (start >= stop) {
return true;
}
int64_t idx = 0;
for (int64_t i = start; i < stop; i += step) {
output_addr[i] = dout_addr[idx];
idx++;
}
return true;
}
if (step < 0) {
if (start >= len) {
start = -1;
} else if (start >= 0 && start < len) {
start -= len;
}
if (stop < -len) {
stop = -1 - len;
} else if (stop >= 0 && stop < len) {
stop -= len;
}
if (start <= stop) {
return true;
}
int64_t idx = inputs[0]->GetData()->size;
for (int64_t i = start; i > stop; i += step) {
idx--;
output_addr[i + len] = dout_addr[idx];
}
return true;
}
MS_EXCEPTION(ValueError) << "For 'SequenceSliceGrad', step cannot be 0.";
return false;
}
bool SequenceSliceGradCpuKernelMod::Launch(const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kSequenceSliceGradInputNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kSequenceSliceGradOutputNum, kernel_name_);
return kernel_func_(this, inputs, outputs, workspace);
}
std::vector<std::pair<KernelAttr, SequenceSliceGradCpuKernelMod::SequenceSliceGradFunc>>
SequenceSliceGradCpuKernelMod::func_list_ = {{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat32)
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat32)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeFloat32),
&SequenceSliceGradCpuKernelMod::LaunchKernel<float>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat64)
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeFloat64),
&SequenceSliceGradCpuKernelMod::LaunchKernel<double>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt32)
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt32)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt32),
&SequenceSliceGradCpuKernelMod::LaunchKernel<int>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt64)
.AddInputAttr(kObjectTypeTuple, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeInt64),
&SequenceSliceGradCpuKernelMod::LaunchKernel<int64_t>}};
std::vector<KernelAttr> SequenceSliceGradCpuKernelMod::GetOpSupport() {
std::vector<KernelAttr> support_list;
(void)std::transform(func_list_.begin(), func_list_.end(), std::back_inserter(support_list),
[](const std::pair<KernelAttr, SequenceSliceGradFunc> &item) { return item.first; });
return support_list;
}
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, SequenceSliceGrad, SequenceSliceGradCpuKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,64 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_SLICE_GRAD_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_SLICE_GRAD_CPU_KERNEL_H_
#include <vector>
#include <memory>
#include <utility>
#include <map>
#include <string>
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "mindspore/core/ops/sequence_slice_grad.h"
#include "plugin/factory/ms_factory.h"
namespace mindspore {
namespace kernel {
class SequenceSliceGradCpuKernelMod : public NativeCpuKernelMod {
public:
SequenceSliceGradCpuKernelMod() = default;
explicit SequenceSliceGradCpuKernelMod(const std::string &kernel_type) : kernel_type_(kernel_type) {}
~SequenceSliceGradCpuKernelMod() override = default;
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) override;
bool Launch(const std::vector<KernelTensorPtr> &inputs, const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) override;
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs, const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost);
template <typename T>
bool LaunchKernel(const std::vector<KernelTensorPtr> &inputs, const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace);
protected:
std::vector<KernelAttr> GetOpSupport() override;
using SequenceSliceGradFunc =
std::function<bool(SequenceSliceGradCpuKernelMod *, const std::vector<kernel::KernelTensorPtr> &,
const std::vector<kernel::KernelTensorPtr> &, const std::vector<kernel::AddressPtr> &)>;
static std::vector<std::pair<KernelAttr, SequenceSliceGradFunc>> func_list_;
SequenceSliceGradFunc kernel_func_;
private:
std::string kernel_type_;
TypeId dtype{kTypeUnknown};
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_SLICE_GRAD_CPU_KERNEL_H_

View File

@ -0,0 +1,80 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/cpu/kernel/sequence/sequence_zeros_like_cpu_kernel.h"
#include <algorithm>
#include <utility>
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
namespace mindspore {
namespace kernel {
namespace {
constexpr size_t kInputsNum = 1;
constexpr size_t kOutputsNum = 1;
} // namespace
bool SequenceZerosLikeCpuKernelMod::Init(const BaseOperatorPtr &base_operator,
const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) {
MS_EXCEPTION_IF_NULL(base_operator);
kernel_name_ = base_operator->name();
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kInputsNum, kernel_name_);
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kOutputsNum, kernel_name_);
return MatchKernelFunc(base_operator, inputs, outputs);
}
int SequenceZerosLikeCpuKernelMod::Resize(const BaseOperatorPtr &base_operator,
const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) {
int ret = KernelMod::Resize(base_operator, inputs, outputs, inputsOnHost);
if (ret != 0) {
return ret;
}
return KRET_OK;
}
template <typename T>
bool SequenceZerosLikeCpuKernelMod::LaunchKernel(const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
auto output_addr = reinterpret_cast<T *>(outputs[0]->GetData()->addr);
size_t output_size = outputs[0]->GetData()->size / sizeof(T);
for (size_t i = 0; i < output_size; i++) {
output_addr[i] = T(0);
}
return true;
}
const std::vector<std::pair<KernelAttr, SequenceZerosLikeCpuKernelMod::KernelRunFunc>>
&SequenceZerosLikeCpuKernelMod::GetFuncList() const {
static const std::vector<std::pair<KernelAttr, SequenceZerosLikeCpuKernelMod::KernelRunFunc>> func_list = {
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat32)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeFloat32),
&SequenceZerosLikeCpuKernelMod::LaunchKernel<float>},
{KernelAttr()
.AddInputAttr(kObjectTypeTuple, kNumberTypeFloat64)
.AddOutputAttr(kObjectTypeTuple, kNumberTypeFloat64),
&SequenceZerosLikeCpuKernelMod::LaunchKernel<double>},
{KernelAttr().AddInputAttr(kObjectTypeTuple, kNumberTypeInt32).AddOutputAttr(kObjectTypeTuple, kNumberTypeInt32),
&SequenceZerosLikeCpuKernelMod::LaunchKernel<int32_t>},
{KernelAttr().AddInputAttr(kObjectTypeTuple, kNumberTypeInt64).AddOutputAttr(kObjectTypeTuple, kNumberTypeInt64),
&SequenceZerosLikeCpuKernelMod::LaunchKernel<int64_t>}};
return func_list;
} // namespace kernel
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, SequenceZerosLike, SequenceZerosLikeCpuKernelMod);
} // namespace kernel
} // namespace mindspore

View File

@ -0,0 +1,59 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_ZEROS_LIKE_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_ZEROS_LIKE_CPU_KERNEL_H_
#include <complex>
#include <vector>
#include <utility>
#include <map>
#include <string>
#include "plugin/device/cpu/kernel/cpu_kernel.h"
#include "plugin/factory/ms_factory.h"
namespace mindspore {
namespace kernel {
class SequenceZerosLikeCpuKernelMod : public NativeCpuKernelMod,
public MatchKernelHelper<SequenceZerosLikeCpuKernelMod, KernelTensorPtr> {
public:
SequenceZerosLikeCpuKernelMod() = default;
~SequenceZerosLikeCpuKernelMod() override = default;
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs) override;
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
const std::vector<KernelTensorPtr> &outputs,
const std::map<uint32_t, tensor::TensorPtr> &inputsOnHost) override;
bool Launch(const std::vector<KernelTensorPtr> &inputs, const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace) {
MS_EXCEPTION_IF_NULL(kernel_func_);
return kernel_func_(this, inputs, outputs, workspace);
}
const std::vector<std::pair<KernelAttr, KernelRunFunc>> &GetFuncList() const override;
protected:
std::vector<KernelAttr> GetOpSupport() override { return OpSupport(); }
template <typename T>
bool LaunchKernel(const std::vector<KernelTensorPtr> &inputs, const std::vector<KernelTensorPtr> &outputs,
const std::vector<AddressPtr> &workspace);
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PLUGIN_DEVICE_CPU_KERNEL_SEQUENCE_ZEROS_LIKE_CPU_KERNEL_H_

View File

@ -47,14 +47,11 @@ REG_PRIM_INFER_FUNC(UnsortedSegmentMax, true)
REG_PRIM_INFER_FUNC(UnsortedSegmentMin, true)
REG_PRIM_INFER_FUNC(MakeKeywordArg, true)
REG_PRIM_INFER_FUNC(ExtractKeywordArg, true)
REG_PRIM_INFER_FUNC(TupleSetItem, true)
REG_PRIM_INFER_FUNC(ListSetItem, true)
REG_PRIM_INFER_FUNC(DictGetItem, true)
REG_PRIM_INFER_FUNC(DictSetItem, true)
REG_PRIM_INFER_FUNC(DictGetKeys, true)
REG_PRIM_INFER_FUNC(DictGetValues, true)
REG_PRIM_INFER_FUNC(DictItems, true)
REG_PRIM_INFER_FUNC(SequenceLen, true)
REG_PRIM_INFER_FUNC(ArrayLen, true)
REG_PRIM_INFER_FUNC(Mutable, true)
REG_PRIM_INFER_FUNC(GetGrad, true)

View File

@ -74,10 +74,6 @@ MIND_API AbstractBasePtr InferImplMakeKeywordArg(const AnalysisEnginePtr &, cons
const AbstractBasePtrList &args_spec_list);
MIND_API AbstractBasePtr InferImplExtractKeywordArg(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
MIND_API AbstractBasePtr InferImplTupleSetItem(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
MIND_API AbstractBasePtr InferImplListSetItem(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
MIND_API AbstractBasePtr InferImplDictGetItem(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
MIND_API AbstractBasePtr InferImplDictSetItem(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
@ -88,8 +84,6 @@ MIND_API AbstractBasePtr InferImplDictGetValues(const AnalysisEnginePtr &, const
const AbstractBasePtrList &args_spec_list);
MIND_API AbstractBasePtr InferImplDictItems(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
MIND_API AbstractBasePtr InferImplSequenceLen(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
MIND_API AbstractBasePtr InferImplArrayLen(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
MIND_API AbstractBasePtr InferImplMutable(const AnalysisEnginePtr &, const PrimitivePtr &primitive,

View File

@ -289,11 +289,6 @@ AbstractBasePtr InferImplDictItems(const AnalysisEnginePtr &, const PrimitivePtr
return std::make_shared<AbstractList>(items);
}
AbstractBasePtr InferImplSequenceLen(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list) {
return InferTupleOrListOrDictLen<AbstractSequence>(primitive->name(), args_spec_list);
}
AbstractBasePtr InferImplArrayLen(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list) {
const std::string op_name = primitive->name();

View File

@ -266,6 +266,11 @@ constexpr auto kSequenceCount = "SequenceCount";
constexpr auto kSequenceIndex = "SequenceIndex";
constexpr auto kSequenceMul = "SequenceMul";
constexpr auto kSequenceSlice = "SequenceSlice";
constexpr auto kSequenceLen = "sequence_len";
constexpr auto kSequenceZerosLike = "SequenceZerosLike";
constexpr auto kMakeRange = "make_range";
constexpr auto kSequenceAddOffset = "SequenceAddOffset";
constexpr auto kSequenceSliceGrad = "SequenceSliceGrad";
constexpr auto kSequenceSliceSetItem = "SequenceSliceSetItem";
// NN
@ -1603,6 +1608,9 @@ GVAR_DEF(PrimitivePtr, kPrimSequenceIndex, std::make_shared<Primitive>(kSequence
GVAR_DEF(PrimitivePtr, kPrimSequenceMul, std::make_shared<Primitive>(kSequenceMul));
GVAR_DEF(PrimitivePtr, kPrimSequenceSlice, std::make_shared<Primitive>(kSequenceSlice));
GVAR_DEF(PrimitivePtr, kPrimSequenceSliceSetItem, std::make_shared<Primitive>(kSequenceSliceSetItem));
GVAR_DEF(PrimitivePtr, kPrimSequenceZerosLike, std::make_shared<Primitive>(kSequenceZerosLike));
GVAR_DEF(PrimitivePtr, kPrimSequenceAddOffset, std::make_shared<Primitive>(kSequenceAddOffset));
GVAR_DEF(PrimitivePtr, kPrimSequenceSliceGrad, std::make_shared<Primitive>(kSequenceSliceGrad));
// Other miscellaneous
GVAR_DEF(PrimitivePtr, kPrimSampleDistortedBoundingBoxV2, std::make_shared<Primitive>(kSampleDistortedBoundingBoxV2));

View File

@ -0,0 +1,50 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/list_setitem.h"
#include <string>
#include <vector>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "include/common/utils/utils.h"
#include "mindapi/src/helper.h"
namespace mindspore {
namespace ops {
namespace {} // namespace
class ListSetItemInfer : public abstract::OpInferBase {
public:
BaseShapePtr InferShape(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) const override {
return InferSequenceSetItem<abstract::AbstractList>(primitive, input_args)->BuildShape();
}
TypePtr InferType(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) const override {
return InferSequenceSetItem<abstract::AbstractList>(primitive, input_args)->BuildType();
}
AbstractBasePtr InferShapeAndType(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) const override {
return InferSequenceSetItem<abstract::AbstractList>(primitive, input_args);
}
};
MIND_API_OPERATOR_IMPL(list_setitem, BaseOperator);
REGISTER_PRIMITIVE_OP_INFER_IMPL(list_setitem, prim::kPrimListSetItem, ListSetItemInfer, false);
} // namespace ops
} // namespace mindspore

View File

@ -0,0 +1,34 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_OPS_LIST_SET_ITEM_H_
#define MINDSPORE_CORE_OPS_LIST_SET_ITEM_H_
#include "ops/base_operator.h"
namespace mindspore {
namespace ops {
constexpr auto kNameListSetItem = "list_setitem";
/// \brief RealListSetItem op is used to set one item to the specific position in the list.
class MIND_API list_setitem : public BaseOperator {
public:
MIND_API_BASE_MEMBER(list_setitem);
/// \brief Constructor.
list_setitem() : BaseOperator(kNameListSetItem) {}
};
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_LIST_SET_ITEM_H_

View File

@ -0,0 +1,149 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/make_range.h"
#include <vector>
#include <memory>
#include <string>
#include <set>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "include/common/utils/utils.h"
#include "mindapi/src/helper.h"
namespace mindspore {
namespace ops {
namespace {
bool CheckMakeRangeInput(const std::vector<AbstractBasePtr> &input_args, const std::string &prim_name) {
constexpr size_t max_args_size = 3;
constexpr size_t min_args_size = 1;
auto inputs_size = input_args.size();
if (inputs_size > max_args_size || inputs_size < min_args_size) {
MS_LOG(EXCEPTION) << "For '" << prim_name << "', the input size should within [" << min_args_size << ", "
<< max_args_size << "] but got" << inputs_size;
}
bool has_variable = false;
for (size_t i = 0; i < input_args.size(); ++i) {
auto element = input_args[i];
MS_EXCEPTION_IF_NULL(element);
auto element_type = element->BuildType();
if (element_type->type_id() != kInt64->type_id()) {
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', the " << i << "th input should be a int64 scalar but got "
<< element->ToString();
}
if (!has_variable && element->BuildValue() == kAnyValue) {
has_variable = true;
}
}
return has_variable;
}
abstract::AbstractTuplePtr CalcSlidePara(const std::vector<int64_t> &values, const std::string &prim_name) {
auto values_size = values.size();
int64_t start = values_size == 1 ? 0LL : values[kIndex0];
int64_t stop = values_size == 1 ? values[kIndex0] : values[kIndex1];
int64_t step = values_size <= kDim2 ? 1LL : values[kIndex2];
if (step == 0) {
MS_LOG(EXCEPTION) << "For 'range', the argument 'step' could not be 0.";
}
AbstractBasePtrList args;
if (start <= stop) {
if (step <= 0) {
MS_LOG(EXCEPTION) << "For '" << prim_name << "', when the argument 'start' " << start
<< " is less than or equal to the argument 'stop' " << stop << ", "
<< "the argument 'step' must be greater than 0, but the argument 'step' is " << step << ".";
}
for (int64_t i = start; i < stop; i += step) {
args.push_back(std::make_shared<abstract::AbstractScalar>(std::make_shared<Int64Imm>(i)));
if (i > 0 && INT_MAX - i < step) {
MS_EXCEPTION(ValueError) << "Integer overflow error occurred when traversing the range. "
<< "Please check the inputs of range.";
}
}
} else {
if (step >= 0) {
MS_LOG(EXCEPTION) << "For '" << prim_name << "', while the argument 'start' " << start
<< " is greater than the argument "
<< "'stop' " << stop << ", the argument 'step' must be less than 0, "
<< "but the argument 'step' is " << step << ".";
}
for (int64_t i = start; i > stop; i += step) {
args.push_back(std::make_shared<abstract::AbstractScalar>(std::make_shared<Int64Imm>(i)));
if (i < 0 && INT_MIN - i > step) {
MS_EXCEPTION(ValueError) << "Integer overflow error occurred when traversing the range. "
<< "Please check the inputs of range.";
}
}
}
return std::make_shared<abstract::AbstractTuple>(args);
}
AbstractBasePtr InferImplMakeRange(const PrimitivePtr &primitive, const AbstractBasePtrList &args_spec_list) {
MS_EXCEPTION_IF_NULL(primitive);
auto prim_name = primitive->name();
bool has_variable = CheckMakeRangeInput(args_spec_list, prim_name);
if (has_variable) {
// If the input to make_range has variable input, the output abs should be dynamic length sequence.
auto element = std::make_shared<abstract::AbstractScalar>(kAnyValue, kInt64);
auto ret = std::make_shared<abstract::AbstractTuple>(AbstractBasePtrList{element});
ret->CheckAndConvertToDynamicLenSequence();
return ret;
}
std::vector<int64_t> values;
for (size_t i = 0; i < args_spec_list.size(); ++i) {
auto element = args_spec_list[i];
auto element_val = element->BuildValue();
if (!element_val->isa<Int64Imm>()) {
MS_EXCEPTION(TypeError) << "For '" << prim_name << "', the " << i << "th input should be a int64 scalar but got "
<< element->ToString();
}
values.push_back(element_val->cast<Int64ImmPtr>()->value());
}
return CalcSlidePara(values, prim_name);
}
} // namespace
MIND_API_OPERATOR_IMPL(make_range, BaseOperator);
// AG means auto generated
class MIND_API AGMakeRangeInfer : public abstract::OpInferBase {
public:
BaseShapePtr InferShape(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) const override {
return InferImplMakeRange(primitive, input_args)->BuildShape();
}
TypePtr InferType(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) const override {
return InferImplMakeRange(primitive, input_args)->BuildType();
}
AbstractBasePtr InferShapeAndType(const abstract::AnalysisEnginePtr &engine, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) const override {
return InferImplMakeRange(primitive, input_args);
}
std::set<int64_t> GetValueDependArgIndices() const override { return {0, 1, 2}; }
};
REGISTER_PRIMITIVE_OP_INFER_IMPL(make_range, prim::kPrimMakeRange, AGMakeRangeInfer, false);
} // namespace ops
} // namespace mindspore

View File

@ -0,0 +1,37 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_OPS_MAKE_RANGE_H_
#define MINDSPORE_CORE_OPS_MAKE_RANGE_H_
#include "ops/base_operator.h"
#include "mindspore/core/ops/core_ops.h"
namespace mindspore {
namespace ops {
/// \brief Sequence mul integer operation.
class MIND_API make_range : public BaseOperator {
public:
MIND_API_BASE_MEMBER(make_range);
/// \brief Constructor.
make_range() : BaseOperator(prim::kMakeRange) {}
/// \brief Init function.
void Init() const {}
};
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_MAKE_RANGE_H_

View File

@ -676,6 +676,84 @@ AbstractBasePtr TensorToSequenceInfer(const PrimitivePtr &primitive, const std::
auto abs = std::make_shared<T>(abs_list);
return abs;
}
void CheckDynamicLengthSequenceSetItem(const std::string &op_name, const abstract::AbstractSequencePtr &queue,
const AbstractBasePtr &target) {
auto element_abs = queue->dynamic_len_element_abs();
if (element_abs == nullptr) {
MS_LOG(EXCEPTION) << "Empty variable len sequence can not setitem.";
}
const auto precondition_log = "For " + op_name + ", when the queue is dynamic length";
const auto standard_abs_description = "element within dynamic length sequence";
const auto differ_abs_description = "target element";
CheckAndConvertUtils::CheckAbstractTypeAndShapeSame(std::vector<AbstractBasePtr>{element_abs, target},
precondition_log, standard_abs_description,
differ_abs_description);
}
template <typename T>
AbstractBasePtr InferSequenceSetItem(const PrimitivePtr &primitive, const AbstractBasePtrList &args_spec_list) {
// Inputs: a tuple or list, a scalar whose value is an int64 number and an object of a subclass of AbstractBase.
MS_EXCEPTION_IF_NULL(primitive);
auto op_name = primitive->name();
constexpr int args_spec_size = 3;
constexpr size_t kIndex2 = 2;
abstract::CheckArgsSize(op_name, args_spec_list, args_spec_size);
auto queue = abstract::CheckArg<T>(op_name, args_spec_list, 0);
auto index = abstract::CheckArg<abstract::AbstractScalar>(op_name, args_spec_list, 1);
auto index_type = index->BuildType();
MS_EXCEPTION_IF_NULL(index_type);
if (index_type->type_id() != kInt64->type_id()) {
MS_EXCEPTION(IndexError) << op_name << " evaluator index should be an int64 number, but got a "
<< index_type->ToString() << " number.";
}
ValuePtr index_value = index->BuildValue();
MS_EXCEPTION_IF_NULL(index_value);
auto target = args_spec_list[kIndex2];
MS_EXCEPTION_IF_NULL(target);
if (queue->dynamic_len()) {
CheckDynamicLengthSequenceSetItem(op_name, queue, target);
return queue->Clone();
}
if (index_value == kAnyValue) {
// If the index is variable and the sequence is constant length, then all of the element within the sequence
// should have the same type and shape with the target input. The element within the return sequence should
// be all broadened.
const auto &elements = queue->elements();
if (elements.size() == 0) {
MS_LOG(EXCEPTION) << "Empty sequence can not setitem.";
}
const auto precondition_log = "For " + op_name + ", when the index is variable and the queue is constant length";
CheckAndConvertUtils::CheckAbstractTypeAndShapeSame(elements, precondition_log);
auto first_element = elements[0];
const auto standard_abs_description = "element within constant length sequence";
const auto differ_abs_description = "target element";
CheckAndConvertUtils::CheckAbstractTypeAndShapeSame(std::vector<AbstractBasePtr>{first_element, target},
precondition_log, standard_abs_description,
differ_abs_description);
return CheckAndConvertUtils::BroadenAllSequenceElements(queue);
}
auto index_int64_value = GetValue<int64_t>(index_value);
AbstractBasePtrList elements = queue->elements();
std::size_t nelems = elements.size();
if (nelems == 0) {
MS_EXCEPTION(IndexError) << "Can not setitem for an empty sequence.";
}
int64_t index_positive_value = index_int64_value >= 0 ? index_int64_value : index_int64_value + SizeToLong(nelems);
if (index_positive_value < 0 || index_positive_value >= SizeToLong(nelems)) {
MS_EXCEPTION(IndexError) << op_name << " evaluator the index: " << index_int64_value << " to set out of range: [-"
<< nelems << "," << (nelems - 1) << "].";
}
size_t index_unsigned_value = LongToSize(index_positive_value);
elements[index_unsigned_value] = args_spec_list[kIndex2];
MS_LOG(DEBUG) << "SetItem use flags, index: " << index_unsigned_value << ", for " << queue->ToString();
return std::make_shared<T>(elements, queue->sequence_nodes());
}
template AbstractBasePtr InferSequenceSetItem<abstract::AbstractList>(const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
template AbstractBasePtr InferSequenceSetItem<abstract::AbstractTuple>(const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
template AbstractBasePtr TensorToSequenceInfer<abstract::AbstractList>(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args);

View File

@ -112,6 +112,9 @@ std::shared_ptr<T> InferSparseAttr(const PrimitivePtr &primitive, const Abstract
template <typename T>
AbstractBasePtr TensorToSequenceInfer(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args);
template <typename T>
AbstractBasePtr InferSequenceSetItem(const PrimitivePtr &primitive, const AbstractBasePtrList &args_spec_list);
template <typename T>
T GetScalarValue(const std::string &op_name, const ValuePtr &elem);

View File

@ -59,12 +59,24 @@ AbstractBasePtr SequenceAddInferInner(const PrimitivePtr &primitive, const std::
<< "the first input is: " << input_1->ToString()
<< " and the second input is: " << input_2->ToString();
}
if (!input_1->dynamic_len() && !input_2->dynamic_len()) {
MS_EXCEPTION(TypeError) << "For operator 'SequenceAdd', at least one of the input should be dynamic length.";
}
// All elements of sequence add should have same element type.
auto abs_1 = CheckAndGetElementType(input_1, prim_name);
auto abs_2 = CheckAndGetElementType(input_2, prim_name);
// all of elements is known
if (!input_1->dynamic_len() && !input_2->dynamic_len()) {
abstract::AbstractBasePtrList abs;
for (size_t i = 0; i < input_1->size(); i++) {
abs.push_back(input_1->elements()[i]);
}
for (size_t i = 0; i < input_2->size(); i++) {
abs.push_back(input_2->elements()[i]);
}
auto ret = std::make_shared<abstract::AbstractTuple>(abs);
return ret;
}
// abs_1 is nullptr represents that the input_1 is empty.
// input_1 can be either dynamic length sequence or constant length sequence.
if (abs_1 == nullptr) {

View File

@ -0,0 +1,63 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/sequence_add_offset.h"
#include <vector>
#include <string>
#include <memory>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "include/common/utils/utils.h"
#include "mindapi/src/helper.h"
namespace mindspore {
namespace ops {
AbstractBasePtr SequenceAddOffsetInferInner(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
auto prim_name = primitive->name();
constexpr size_t input_len = 2;
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, input_len, prim_name);
abstract::AbstractBasePtrList abs{};
abs.push_back(std::make_shared<abstract::AbstractScalar>(kAnyValue, kInt64));
abs.push_back(std::make_shared<abstract::AbstractScalar>(kAnyValue, kInt64));
auto ret = std::make_shared<abstract::AbstractTuple>(abs);
return ret;
}
MIND_API_OPERATOR_IMPL(SequenceAddOffset, BaseOperator);
class SequenceAddOffsetInfer : public abstract::OpInferBase {
public:
BaseShapePtr InferShape(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) const override {
return SequenceAddOffsetInferInner(primitive, input_args)->BuildShape();
}
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) const override {
return SequenceAddOffsetInferInner(prim, input_args)->BuildType();
}
AbstractBasePtr InferShapeAndType(const abstract::AnalysisEnginePtr &engine, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) const override {
return SequenceAddOffsetInferInner(primitive, input_args);
}
};
REGISTER_PRIMITIVE_OP_INFER_IMPL(SequenceAddOffset, prim::kPrimSequenceAddOffset, SequenceAddOffsetInfer, true);
} // namespace ops
} // namespace mindspore

View File

@ -0,0 +1,37 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_OPS_SEQUENCE_ADD_OFFSET_H_
#define MINDSPORE_CORE_OPS_SEQUENCE_ADD_OFFSET_H_
#include "ops/base_operator.h"
#include "mindspore/core/ops/core_ops.h"
namespace mindspore {
namespace ops {
/// \brief Sequence addition offset operation
class MIND_API SequenceAddOffset : public BaseOperator {
public:
MIND_API_BASE_MEMBER(SequenceAddOffset);
/// \brief Constructor.
SequenceAddOffset() : BaseOperator(prim::kSequenceAddOffset) {}
/// \brief Init function.
void Init() const {}
};
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_SEQUENCE_ADD_OFFSET_H_

View File

@ -0,0 +1,62 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/sequence_len.h"
#include <vector>
#include <memory>
#include <string>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "include/common/utils/utils.h"
#include "mindapi/src/helper.h"
namespace mindspore {
namespace ops {
AbstractBasePtr SequenceLenInferInner(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
std::string op_name = primitive->name();
constexpr size_t input_num = 1;
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, op_name);
auto arg = input_args[0];
auto seq_abs = arg->cast<abstract::AbstractSequencePtr>();
if (seq_abs->dynamic_len()) {
return std::make_shared<abstract::AbstractScalar>(kAnyValue, kInt64);
}
const auto &seq_elements = seq_abs->elements();
return std::make_shared<abstract::AbstractScalar>(SizeToLong(seq_elements.size()));
}
MIND_API_OPERATOR_IMPL(sequence_len, BaseOperator);
class SequenceLenInfer : public abstract::OpInferBase {
public:
BaseShapePtr InferShape(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) const override {
return SequenceLenInferInner(primitive, input_args)->BuildShape();
}
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) const override {
return SequenceLenInferInner(prim, input_args)->BuildType();
}
AbstractBasePtr InferShapeAndType(const abstract::AnalysisEnginePtr &engine, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) const override {
return SequenceLenInferInner(primitive, input_args);
}
};
REGISTER_PRIMITIVE_OP_INFER_IMPL(sequence_len, prim::kPrimSequenceLen, SequenceLenInfer, false);
} // namespace ops
} // namespace mindspore

View File

@ -0,0 +1,37 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_OPS_SEQUENCE_LEN_H_
#define MINDSPORE_CORE_OPS_SEQUENCE_LEN_H_
#include "ops/base_operator.h"
#include "mindspore/core/ops/core_ops.h"
namespace mindspore {
namespace ops {
/// \brief Sequence len operation.
class MIND_API sequence_len : public BaseOperator {
public:
MIND_API_BASE_MEMBER(sequence_len);
/// \brief Constructor.
sequence_len() : BaseOperator(prim::kSequenceLen) {}
/// \brief Init function.
void Init() const {}
};
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_SEQUENCE_LEN_H_

View File

@ -17,6 +17,9 @@
#include "ops/sequence_slice.h"
#include <vector>
#include <string>
#include <set>
#include <memory>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
@ -53,10 +56,21 @@ AbstractBasePtr SliceInferInner(const PrimitivePtr &primitive, const std::vector
MS_EXCEPTION_IF_NULL(end_abs);
auto step_abs = input_args[step_index];
MS_EXCEPTION_IF_NULL(step_abs);
// all value is known
if (start_abs->BuildValue() != kAnyValue && end_abs->BuildValue() != kAnyValue &&
step_abs->BuildValue() != kAnyValue) {
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', the input sequence should be dynamic length sequence or "
<< "at least one of the start/end/step should be variable, but got all constant.";
auto start_v = GetValue<int64_t>(start_abs->BuildValue());
auto end_v = GetValue<int64_t>(end_abs->BuildValue());
auto step_v = GetValue<int64_t>(step_abs->BuildValue());
int64_t len = seq_abs->elements().size();
auto output_size = SequenceSliceGetOutputSize(start_v, end_v, step_v, len);
abstract::AbstractBasePtrList abs{};
for (int64_t i = 0; i < output_size; i++) {
abs.push_back(std::make_shared<abstract::AbstractScalar>(kAnyValue, kInt64));
}
auto ret = std::make_shared<abstract::AbstractTuple>(abs);
return ret;
}
auto ret = seq_abs->Clone()->cast<abstract::AbstractSequencePtr>();
ret->CheckAndConvertToDynamicLenSequence();
@ -80,6 +94,7 @@ class SequenceSliceInfer : public abstract::OpInferBase {
const std::vector<AbstractBasePtr> &input_args) const override {
return SliceInferInner(primitive, input_args);
}
std::set<int64_t> GetValueDependArgIndices() const override { return {1, 2, 3}; }
};
REGISTER_PRIMITIVE_OP_INFER_IMPL(SequenceSlice, prim::kPrimSequenceSlice, SequenceSliceInfer, false);
} // namespace ops

View File

@ -31,7 +31,48 @@ class MIND_API SequenceSlice : public BaseOperator {
/// \brief Init function.
void Init() const {}
};
inline static int64_t SequenceSliceGetOutputSize(int64_t start, int64_t stop, int64_t step, int64_t len) {
int64_t idx = 0;
if (step > 0) {
if (start <= -len) {
start = 0;
} else if (start < 0) {
start += len;
}
if (stop > len) {
stop = len;
} else if (stop > -len && stop < 0) {
stop += len;
}
if (start >= stop) {
return 0;
}
for (int i = start; i < stop; i += step) {
idx++;
}
}
if (step < 0) {
if (start >= len) {
start = -1;
} else if (start >= 0 && start < len) {
start -= len;
}
if (stop < -len) {
stop = -1 - len;
} else if (stop >= 0 && stop < len) {
stop -= len;
}
if (start <= stop) {
return 0;
}
for (int i = start; i > stop; i += step) {
idx++;
}
}
return idx;
}
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_SEQUENCE_SLICE_H_

View File

@ -0,0 +1,87 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/sequence_slice_grad.h"
#include <vector>
#include <set>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "include/common/utils/utils.h"
#include "mindapi/src/helper.h"
namespace mindspore {
namespace ops {
namespace {
AbstractBasePtr SliceGradInferInner(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
auto prim_name = primitive->name();
constexpr size_t input_num = 5;
constexpr size_t x_index = 1;
constexpr size_t start_index = 2;
constexpr size_t end_index = 3;
constexpr size_t step_index = 4;
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, prim_name);
auto first_abs = input_args[x_index];
MS_EXCEPTION_IF_NULL(first_abs);
if (!first_abs->isa<abstract::AbstractSequence>()) {
MS_EXCEPTION(TypeError) << "For '" << prim_name
<< "', the first input should be tuple or list but got: " << first_abs->ToString();
}
auto seq_abs = first_abs->cast<abstract::AbstractSequencePtr>();
MS_EXCEPTION_IF_NULL(seq_abs);
if (seq_abs->dynamic_len()) {
// If the length of input sequence is dynamic length, the length of sliced sequence should also be dynamic length.
return seq_abs->Clone();
}
auto start_abs = input_args[start_index];
MS_EXCEPTION_IF_NULL(start_abs);
auto end_abs = input_args[end_index];
MS_EXCEPTION_IF_NULL(end_abs);
auto step_abs = input_args[step_index];
MS_EXCEPTION_IF_NULL(step_abs);
if (start_abs->BuildValue() != kAnyValue && end_abs->BuildValue() != kAnyValue &&
step_abs->BuildValue() != kAnyValue) {
return seq_abs->Clone();
}
auto ret = seq_abs->Clone()->cast<abstract::AbstractSequencePtr>();
ret->CheckAndConvertToDynamicLenSequence();
return ret;
}
} // namespace
MIND_API_OPERATOR_IMPL(SequenceSliceGrad, BaseOperator);
class SequenceSliceGradInfer : public abstract::OpInferBase {
public:
BaseShapePtr InferShape(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) const override {
return SliceGradInferInner(primitive, input_args)->BuildShape();
}
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) const override {
return SliceGradInferInner(prim, input_args)->BuildType();
}
AbstractBasePtr InferShapeAndType(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) const override {
return SliceGradInferInner(primitive, input_args);
}
std::set<int64_t> GetValueDependArgIndices() const override { return {2, 3, 4}; }
};
REGISTER_PRIMITIVE_OP_INFER_IMPL(SequenceSliceGrad, prim::kPrimSequenceSliceGrad, SequenceSliceGradInfer, false);
} // namespace ops
} // namespace mindspore

View File

@ -0,0 +1,39 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_OPS_SEQUENCE_SLICE_GRAD_H_
#define MINDSPORE_CORE_OPS_SEQUENCE_SLICE_GRAD_H_
#include "ops/base_operator.h"
#include "mindspore/core/ops/core_ops.h"
namespace mindspore {
namespace ops {
/// \brief Sequence slice grad operation.
class MIND_API SequenceSliceGrad : public BaseOperator {
public:
MIND_API_BASE_MEMBER(SequenceSliceGrad);
/// \brief Constructor.
SequenceSliceGrad() : BaseOperator(prim::kSequenceSliceGrad) {
InitIOName({"dy", "x", "start", "stop", "step"}, {"output"});
}
/// \brief Init function.
void Init() const {}
};
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_SEQUENCE_SLICE_GRAD_H_

View File

@ -0,0 +1,70 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/sequence_zeros_like.h"
#include <vector>
#include <memory>
#include <set>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "include/common/utils/utils.h"
#include "mindapi/src/helper.h"
#include "abstract/ops/primitive_infer_map.h"
namespace mindspore {
namespace ops {
AbstractBasePtr SequenceZerosLikeInferInner(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
auto prim_name = primitive->name();
constexpr size_t input_len = 1;
constexpr size_t seq_index = 0;
(void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, input_len, prim_name);
auto first_abs = input_args[seq_index];
if (!first_abs->isa<abstract::AbstractSequence>()) {
MS_EXCEPTION(TypeError) << "For '" << prim_name
<< "', the first input should be tuple or list but got: " << first_abs->ToString();
}
auto seq_abs = first_abs->cast<abstract::AbstractSequencePtr>();
if (seq_abs->dynamic_len()) {
return seq_abs;
}
auto ret = seq_abs->Clone()->cast<abstract::AbstractSequencePtr>();
return ret;
}
MIND_API_OPERATOR_IMPL(SequenceZerosLike, BaseOperator);
class SequenceZerosLikeInfer : public abstract::OpInferBase {
public:
BaseShapePtr InferShape(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) const override {
return SequenceZerosLikeInferInner(primitive, input_args)->BuildShape();
}
TypePtr InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) const override {
return SequenceZerosLikeInferInner(prim, input_args)->BuildType();
}
AbstractBasePtr InferShapeAndType(const abstract::AnalysisEnginePtr &engine, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) const override {
return SequenceZerosLikeInferInner(primitive, input_args);
}
};
REGISTER_PRIMITIVE_OP_INFER_IMPL(SequenceZerosLike, prim::kPrimSequenceZerosLike, SequenceZerosLikeInfer, false);
} // namespace ops
} // namespace mindspore

View File

@ -0,0 +1,37 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_OPS_SEQUENCE_ZEROS_LIKE_H_
#define MINDSPORE_CORE_OPS_SEQUENCE_ZEROS_LIKE_H_
#include "ops/base_operator.h"
#include "mindspore/core/ops/core_ops.h"
namespace mindspore {
namespace ops {
/// \brief Sequence mul integer operation.
class MIND_API SequenceZerosLike : public BaseOperator {
public:
MIND_API_BASE_MEMBER(SequenceZerosLike);
/// \brief Constructor.
SequenceZerosLike() : BaseOperator(prim::kSequenceZerosLike) {}
/// \brief Init function.
void Init() const {}
};
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_SEQUENCE_ZEROS_LIKE_H_

View File

@ -0,0 +1,48 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops/tuple_setitem.h"
#include <string>
#include <vector>
#include "ops/op_utils.h"
#include "utils/check_convert_utils.h"
#include "include/common/utils/utils.h"
#include "mindapi/src/helper.h"
namespace mindspore {
namespace ops {
class TupleSetItemInfer : public abstract::OpInferBase {
public:
BaseShapePtr InferShape(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) const override {
return InferSequenceSetItem<abstract::AbstractTuple>(primitive, input_args)->BuildShape();
}
TypePtr InferType(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) const override {
return InferSequenceSetItem<abstract::AbstractTuple>(primitive, input_args)->BuildType();
}
AbstractBasePtr InferShapeAndType(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) const override {
return InferSequenceSetItem<abstract::AbstractTuple>(primitive, input_args);
}
};
MIND_API_OPERATOR_IMPL(tuple_setitem, BaseOperator);
REGISTER_PRIMITIVE_OP_INFER_IMPL(tuple_setitem, prim::kPrimTupleSetItem, TupleSetItemInfer, false);
} // namespace ops
} // namespace mindspore

View File

@ -0,0 +1,34 @@
/**
* Copyright 2023 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_OPS_TUPLE_SET_ITEM_H_
#define MINDSPORE_CORE_OPS_TUPLE_SET_ITEM_H_
#include "ops/base_operator.h"
namespace mindspore {
namespace ops {
constexpr auto kNameTupleSetItem = "tuple_setitem";
/// \brief RealTupleSetItem op is used to set one item to the specific position in the tuple.
class MIND_API tuple_setitem : public BaseOperator {
public:
MIND_API_BASE_MEMBER(tuple_setitem);
/// \brief Constructor.
tuple_setitem() : BaseOperator(kNameTupleSetItem) {}
};
} // namespace ops
} // namespace mindspore
#endif // MINDSPORE_CORE_OPS_TUPLE_SET_ITEM_H_

View File

@ -18,6 +18,7 @@ from mindspore.ops import _constants
from mindspore.ops import functional as F
from mindspore.ops import operations as P
from mindspore.ops.composite import multitype_ops as C
from mindspore.ops.composite.multitype_ops.zeros_like_impl import zeros_like
from mindspore.ops._grad.grad_base import bprops
from mindspore.common import dtype as mstype
@ -68,13 +69,13 @@ def bprop_scalar_cast(x, t, out, dout):
@bprops.register(_constants.kTupleGetItem)
def bprop_tuple_getitem(data, idx, out, dout):
"""Backpropagator for primitive `tuple_getitem`."""
return F.tuple_setitem(C.zeros_like(data), idx, dout), C.zeros_like(idx)
return F.tuple_setitem(zeros_like(data), idx, dout), zeros_like(idx)
@bprops.register("ListGetItem")
def bprop_list_getitem(data, idx, out, dout):
"""Backpropagator for primitive `list_getitem`."""
return F.list_setitem(C.zeros_like(data), idx, dout), C.zeros_like(idx)
return F.list_setitem(zeros_like(data), idx, dout), zeros_like(idx)
@bprops.register("identity")

View File

@ -13,7 +13,7 @@
# limitations under the License.
# ============================================================================
"""sequence_ops"""
"""grad_sequence_ops"""
from mindspore.ops.operations import _sequence_ops as seq
from mindspore.ops.composite.multitype_ops.zeros_like_impl import zeros_like
@ -31,6 +31,48 @@ def get_bprop_count(self):
return bprop
@bprop_getters.register(seq.sequence_len)
def get_bprop_sequence_len(self):
"""Generate bprop for sequence_len"""
def bprop(x, out, dout):
return (zeros_like(x),)
return bprop
@bprop_getters.register(seq.make_range)
def get_bprop_range(self):
"""Generate bprop for make_range"""
def bprop(start, limit, delta, out, dout):
return (zeros_like(start), zeros_like(limit), zeros_like(delta))
return bprop
@bprop_getters.register(seq.SequenceAdd)
def get_bprop_sequence_add(self):
"""Generate bprop for SequenceAdd"""
def bprop(x, y, out, dout):
out_offset = seq.SequenceAddOffset()(x, y)
dx = seq.SequenceSlice()(dout, out_offset[0], len(x), 1)
dy = seq.SequenceSlice()(dout, out_offset[1], len(x) + len(y), 1)
return (dx, dy)
return bprop
@bprop_getters.register(seq.SequenceSlice)
def get_bprop_slice(self):
"""Generate bprop for SequenceSlice"""
def bprop(x, start, stop, step, out, dout):
dx = seq.SequenceSliceGrad()(dout, x, start, stop, step)
return (dx, zeros_like(start), zeros_like(stop), zeros_like(step))
return bprop
@bprop_getters.register(seq.SequenceIndex)
def get_bprop_index(self):
"""Generate bprop for SequenceIndex"""
@ -41,15 +83,31 @@ def get_bprop_index(self):
return bprop
@bprop_getters.register("tuple_setitem")
@bprop_getters.register("list_setitem")
def get_bprop_setitem(self):
"""Generate bprop for TupleSetItem and ListSetItem"""
tuple_setitem = Primitive('tuple_setitem')
def bprop(x, idx, value, out, dout):
d_x = tuple_setitem(dout, idx, 0)
d_value = dout[idx]
d_idx = 0
return d_x, zeros_like(d_idx), d_value
return bprop
@bprop_getters.register(seq.SequenceMul)
def get_bprop_mul(self):
"""Generate bprop for SequenceMul"""
tuple_set_item = Primitive("TupleSetItem")
tuple_setitem = Primitive("tuple_setitem")
def bprop(x, y, out, dout):
dx = x
for i in range(len(x)):
dx = tuple_set_item(dx, i, dout[i])
dx = tuple_setitem(dx, i, dout[i])
return (dx, zeros_like(y))
return bprop

View File

@ -1,20 +1,20 @@
0.1.1 MindSpore*2.0.0: 
Ñ
bprop_tuple_getitem.1313:data&bprop_tuple_getitem.1313:[CNode]1314:1&bprop_tuple_getitem.1313:[CNode]1314:1".REF::MetaFuncGraph::hyper_map[zeros_like_leaf]:0Default/S-Prim-hyper_map[zeros_like_leaf]-op1018
÷
&bprop_tuple_getitem.1313:[CNode]1314:1
bprop_tuple_getitem.1313:idx
bprop_tuple_getitem.1313:dout&bprop_tuple_getitem.1313:[CNode]1315:2&bprop_tuple_getitem.1313:[CNode]1315:2"REF::S-Prim-tuple_setitem:3:#Default/S-Prim-tuple_setitem-op1019
Ð
bprop_tuple_getitem.1313:idx&bprop_tuple_getitem.1313:[CNode]1316:4&bprop_tuple_getitem.1313:[CNode]1316:4".REF::MetaFuncGraph::hyper_map[zeros_like_leaf]:0Default/S-Prim-hyper_map[zeros_like_leaf]-op1020
Ú
&bprop_tuple_getitem.1313:[CNode]1315:2
&bprop_tuple_getitem.1313:[CNode]1316:4&bprop_tuple_getitem.1313:[CNode]1317:5&bprop_tuple_getitem.1313:[CNode]1317:5"REF::S-Prim-MakeTuple:6:Default/S-Prim-MakeTuple-op1021bprop_tuple_getitem.1313*
bprop_tuple_getitem.1313:data*
bprop_tuple_getitem.1313:idx*
bprop_tuple_getitem.1313:out*
bprop_tuple_getitem.1313:dout2(
&bprop_tuple_getitem.1313:[CNode]1317:5:@3b469bb8469b8fc1c99e64800ab2ec83690845a04557bbbf06a651184fbe83baJ/grad_implementations.pyPb.
0.1.1 MindSpore*2.0.0:±
¿
bprop_tuple_getitem.1:data bprop_tuple_getitem.1:[CNode]2:1 bprop_tuple_getitem.1:[CNode]2:1".REF::MetaFuncGraph::hyper_map[zeros_like_leaf]:-Default/S-Prim-hyper_map[zeros_like_leaf]-op0
Ü
bprop_tuple_getitem.1:[CNode]2:1
bprop_tuple_getitem.1:idx
bprop_tuple_getitem.1:dout bprop_tuple_getitem.1:[CNode]3:2 bprop_tuple_getitem.1:[CNode]3:2"REF::S-Prim-tuple_setitem:3: Default/S-Prim-tuple_setitem-op1
¾
bprop_tuple_getitem.1:idx bprop_tuple_getitem.1:[CNode]4:4 bprop_tuple_getitem.1:[CNode]4:4".REF::MetaFuncGraph::hyper_map[zeros_like_leaf]:-Default/S-Prim-hyper_map[zeros_like_leaf]-op2
¿
bprop_tuple_getitem.1:[CNode]3:2
bprop_tuple_getitem.1:[CNode]4:4 bprop_tuple_getitem.1:[CNode]5:5 bprop_tuple_getitem.1:[CNode]5:5"REF::S-Prim-MakeTuple:6:Default/S-Prim-MakeTuple-op3bprop_tuple_getitem.1*
bprop_tuple_getitem.1:data*
bprop_tuple_getitem.1:idx*
bprop_tuple_getitem.1:out*
bprop_tuple_getitem.1:dout2"
bprop_tuple_getitem.1:[CNode]5:5:@badb8d2f379272a9bfe28f5e50b98c3de21620b357eb5d206a8abc1d48964179J/grad_implementations.pyPb.
S-Prim-tuple_setitem:3S-Prim-tuple_setitemb&
S-Prim-MakeTuple:6S-Prim-MakeTupleh

View File

@ -108,6 +108,15 @@ class SequenceSliceSetItem(Primitive):
self.init_prim_io_names(inputs=['seq', 'target', 'start', 'stop', 'step'], outputs=['output_data'])
class SequenceSliceGrad(Primitive):
"""Reverse of slice."""
@prim_attr_register
def __init__(self):
"""Initialize SequenceSliceGrad"""
self.init_prim_io_names(inputs=['dy', 'x', 'start', 'stop', 'step'], outputs=['dx'])
class SequenceAdd(Primitive):
r"""
Add elements of two sequence together.
@ -136,6 +145,33 @@ class SequenceAdd(Primitive):
self.init_prim_io_names(inputs=['input_1', 'input_2'], outputs=['output_data'])
class SequenceAddOffset(Primitive):
r"""
Get offsets of SequenceAdd inputs within its output, refs to ConcatOffset.
.. note::
This it is only for internal used. At least one of the sequence should be dynamic length sequence.
This primitive only have 'CPU' implementation, for other platform, it runs using heterogeneous.
Inputs:
- **input_0** (List, Tuple) - A Tuple/List.
- **input_1** (List, Tuple) - A Tuple/List.
Outputs:
A tuple of offsets of SequenceAdd inputs within its output.
Raises:
TypeError: The data in 'input_0' and 'input_1' are not both list or tuple.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
@prim_attr_register
def __init__(self):
"""Initialize SequenceAddOffset"""
self.init_prim_io_names(inputs=['shape_0', 'shape_1'], outputs=['output'])
class TupleToTensor(Primitive):
r"""
Convert tuple to tensor
@ -359,3 +395,87 @@ class SequenceMul(Primitive):
def __init__(self):
"""Initialize SequenceMul"""
self.init_prim_io_names(inputs=['sequence', 'scalar'], outputs=['output_data'])
class SequenceZerosLike(Primitive):
r"""
Returns a Sequence with a value of 0 and its shape and data type is the same as the input.
.. note::
This it is only for internal used.
This primitive only have 'CPU' implementation, for other platform, it runs using heterogeneous.
Inputs:
- **sequence** (Union[List, Tuple]) - The sequence to count elements.
Outputs:
List or tuple filled with zeros.
Raises:
TypeError: The 'sequence' is not list or tuple.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
@prim_attr_register
def __init__(self):
"""Initialize SequenceZerosLike"""
self.init_prim_io_names(inputs=['sequence'], outputs=['output_data'])
class make_range(Primitive):
r"""
Creates a sequence of numbers in range [start, limit) with step size delta.
.. note::
This it is only for internal used.
This primitive only have 'CPU' implementation, for other platform, it runs using heterogeneous.
Inputs:
- **start** (Union[int, float]) - Start of interval.
- **limit** (Union[int, float]) - End of interval.
- **delta** (Union[int, float]) - Spacing between values.
Outputs:
A 1-D Sequence, with the same type as the inputs.
Raises:
TypeError: If datatype of `start`, `limit` or `delta` is not same.
TypeError: If datatype of `start`, `limit` or `delta` is not supported.
ValueError: If `delta` = 0.
ValueError: If `start` >= `limit` when `delta` > 0.
ValueError: If `start` <= `limit` when `delta` < 0.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
@prim_attr_register
def __init__(self):
"""Initialize make_range"""
self.init_prim_io_names(inputs=['start', 'limit', 'delta'], outputs=['output_data'])
class sequence_len(Primitive):
r"""
Returns length of Sequence.
.. note::
This it is only for internal used.
This primitive only have 'CPU' implementation, for other platform, it runs using heterogeneous.
Inputs:
- **sequence** (Union[List, Tuple]) - The sequence.
Outputs:
Integer, length of Sequence.
Raises:
TypeError: The 'sequence' is not list or tuple.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
@prim_attr_register
def __init__(self):
"""Initialize sequence_len"""
self.init_prim_io_names(inputs=['sequence'], outputs=['output_data'])

View File

@ -1,4 +1,4 @@
# Copyright 2022 Huawei Technologies Co., Ltd
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
# Copyright 2022 Huawei Technologies Co., Ltd
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.