forked from mindspore-Ecosystem/mindspore
!33751 [assistant][ops] Add sparse operator SparseToDenseV2
Merge pull request !33751 from 孟权令/SparseToDense
This commit is contained in:
commit
50d97b9911
|
@ -91,6 +91,7 @@ constexpr auto kEnvironSet = "EnvironSet";
|
|||
constexpr auto kEnvironGet = "EnvironGet";
|
||||
constexpr auto kEnvironDestroyAll = "EnvironDestroyAll";
|
||||
constexpr auto kSampleDistortedBoundingBoxV2 = "SampleDistortedBoundingBoxV2";
|
||||
constexpr auto kSparseToDenseV2 = "SparseToDenseV2";
|
||||
constexpr auto kPriorityReplayBufferCreate = "PriorityReplayBufferCreate";
|
||||
constexpr auto kPriorityReplayBufferPush = "PriorityReplayBufferPush";
|
||||
constexpr auto kPriorityReplayBufferSample = "PriorityReplayBufferSample";
|
||||
|
@ -137,6 +138,7 @@ const std::map<std::string, std::string> kOpNameToAicpuOpNameMap{
|
|||
{kCumSum, "Cumsum"},
|
||||
{kCumProd, "Cumprod"},
|
||||
{kSampleDistortedBoundingBoxV2, "SampleDistortedBoundingBoxExt2"},
|
||||
{kSparseToDenseV2, "SparseToDense"},
|
||||
{kAvgPoolV1, "AvgPool"},
|
||||
{kNonZero, "Where"},
|
||||
{kAvgPoolGradV1, "AvgPoolGrad"},
|
||||
|
|
|
@ -43,6 +43,7 @@ using CTask = std::function<void(size_t, size_t)>;
|
|||
namespace mindspore {
|
||||
namespace kernel {
|
||||
constexpr char KERNEL_SIZE[] = "kernel_size";
|
||||
constexpr char VALIDATE_INDICES[] = "validate_indices";
|
||||
constexpr char STRIDE[] = "stride";
|
||||
constexpr char STRIDES[] = "strides";
|
||||
constexpr char DILATION[] = "dilation";
|
||||
|
|
|
@ -0,0 +1,276 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "plugin/device/cpu/kernel/sparse_to_dense_v2_cpu_kernel.h"
|
||||
#include <algorithm>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include <map>
|
||||
#include "plugin/device/cpu/hal/device/cpu_device_address.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
namespace {
|
||||
constexpr size_t kSparseToDenseV2TwoDims = 2;
|
||||
constexpr size_t kSparseToDenseV2OneDim = 1;
|
||||
constexpr size_t kSparseToDenseV2ZeroDim = 0;
|
||||
constexpr size_t kSize0 = 0;
|
||||
constexpr size_t kSize1 = 1;
|
||||
constexpr size_t kSize2 = 2;
|
||||
constexpr size_t kSize3 = 3;
|
||||
constexpr size_t kSize4 = 4;
|
||||
constexpr size_t kIndex0 = 0;
|
||||
constexpr size_t kIndex1 = 1;
|
||||
constexpr size_t kIndex2 = 2;
|
||||
constexpr size_t kIndex3 = 3;
|
||||
#define ADD_KERNEL(t1, t2, t3, t4, t5) \
|
||||
KernelAttr() \
|
||||
.AddInputAttr(kNumberType##t1) \
|
||||
.AddInputAttr(kNumberType##t2) \
|
||||
.AddInputAttr(kNumberType##t3) \
|
||||
.AddInputAttr(kNumberType##t4) \
|
||||
.AddOutputAttr(kNumberType##t5)
|
||||
} // namespace
|
||||
bool SparseToDenseV2CpuKernelMod::Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs) {
|
||||
kernel_name_ = base_operator->name();
|
||||
if (inputs.empty() || outputs.empty()) {
|
||||
MS_LOG(ERROR) << "For '" << kernel_name_ << "' got empty inputs or outputs, which is invalid.";
|
||||
return false;
|
||||
}
|
||||
if (!MatchKernelFunc(base_operator, inputs, outputs)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
int SparseToDenseV2CpuKernelMod::Resize(const BaseOperatorPtr &base_operator,
|
||||
const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs,
|
||||
const std::map<uint32_t, tensor::TensorPtr> &) {
|
||||
if (auto ret = KernelMod::Resize(base_operator, inputs, outputs); ret != KRET_OK) {
|
||||
return ret;
|
||||
}
|
||||
auto indices_shape = inputs.at(kIndex0)->GetShapeVector();
|
||||
indices_shape_ = Convert2SizeT(indices_shape);
|
||||
indices_dims_ = indices_shape_.size();
|
||||
auto output_shape = inputs.at(kIndex1)->GetShapeVector();
|
||||
output_shape_ = Convert2SizeT(output_shape);
|
||||
auto values_shape = inputs.at(kIndex2)->GetShapeVector();
|
||||
values_size_ = LongToSize(values_shape[0]);
|
||||
if (indices_shape_.size() == 0) {
|
||||
if (values_shape.size() != 0 && values_shape[0] != 1) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the indices_shape[0] is 1"
|
||||
<< " should match the the values element " << values_size_ << ".";
|
||||
}
|
||||
} else {
|
||||
if (values_shape.size() != 0) {
|
||||
if (indices_shape_[0] != values_size_) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the indices_shape[0] " << indices_shape_[0]
|
||||
<< " should match the the values element " << values_size_ << ".";
|
||||
}
|
||||
}
|
||||
}
|
||||
return KRET_OK;
|
||||
}
|
||||
template <typename I, typename T>
|
||||
void SparseToDenseV2CpuKernelMod::CheckValidateTwoDim(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> &workspace,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kSize4, kernel_name_);
|
||||
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kSize1, kernel_name_);
|
||||
if (outputs[0]->size == 0) {
|
||||
MS_LOG(WARNING) << "For '" << kernel_name_ << "', output memory size should be greater than 0, but got 0.";
|
||||
}
|
||||
auto ret = memset_s(outputs[0]->addr, outputs[0]->size, 0, outputs[0]->size);
|
||||
if (ret != EOK) {
|
||||
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', memset output failed. Error no: " << ret;
|
||||
}
|
||||
const auto *indices_addr = reinterpret_cast<I *>(inputs[kIndex0]->addr);
|
||||
const auto *output_shape_addr = reinterpret_cast<I *>(inputs[kIndex1]->addr);
|
||||
bool valid = true;
|
||||
bool different = false;
|
||||
bool increasing = true;
|
||||
for (size_t k = 0; k < indices_shape_[1]; ++k) {
|
||||
size_t index = k;
|
||||
if (indices_addr[index] < 0 || indices_addr[index] >= output_shape_addr[index]) {
|
||||
valid = false;
|
||||
}
|
||||
}
|
||||
if (!valid) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the indices is out of bounds.";
|
||||
}
|
||||
for (size_t i = 1; i < indices_shape_[0]; ++i) {
|
||||
for (size_t j = 0; j < indices_shape_[1]; ++j) {
|
||||
size_t index1 = i * indices_shape_[1] + j;
|
||||
size_t index2 = (i - 1) * indices_shape_[1] + j;
|
||||
if (indices_addr[index1] < 0 || indices_addr[index1] >= output_shape_addr[j]) {
|
||||
valid = false;
|
||||
}
|
||||
I diff = indices_addr[index1] - indices_addr[index2];
|
||||
if (diff > 0) {
|
||||
different = true;
|
||||
}
|
||||
if (!different && diff < 0) {
|
||||
increasing = false;
|
||||
}
|
||||
}
|
||||
if (!valid) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the indices is out of bounds.";
|
||||
}
|
||||
if (!increasing) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the indices is out of order.";
|
||||
}
|
||||
if (!different) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the indices is repeated";
|
||||
}
|
||||
}
|
||||
}
|
||||
template <typename I, typename T>
|
||||
void SparseToDenseV2CpuKernelMod::CheckValidateOneDim(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> &workspace,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kSize4, kernel_name_);
|
||||
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kSize1, kernel_name_);
|
||||
if (outputs[0]->size == 0) {
|
||||
MS_LOG(WARNING) << "For '" << kernel_name_ << "', output memory size should be greater than 0, but got 0.";
|
||||
}
|
||||
auto ret = memset_s(outputs[0]->addr, outputs[0]->size, 0, outputs[0]->size);
|
||||
if (ret != EOK) {
|
||||
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', memset output failed. Error no: " << ret;
|
||||
}
|
||||
const auto *indices_addr = reinterpret_cast<I *>(inputs[kIndex0]->addr);
|
||||
const auto *output_shape_addr = reinterpret_cast<I *>(inputs[kIndex1]->addr);
|
||||
bool valid = true;
|
||||
bool different = false;
|
||||
bool increasing = true;
|
||||
if (indices_addr[0] < 0 || indices_addr[0] > output_shape_addr[0]) {
|
||||
valid = false;
|
||||
}
|
||||
for (size_t i = 1; i < indices_shape_[0]; ++i) {
|
||||
if (indices_addr[i] < 0 || indices_addr[i] >= output_shape_addr[0]) {
|
||||
valid = false;
|
||||
}
|
||||
I diff = indices_addr[i] - indices_addr[i - 1];
|
||||
if (diff > 0) {
|
||||
different = true;
|
||||
}
|
||||
if (!different && diff < 0) {
|
||||
increasing = false;
|
||||
}
|
||||
if (!valid) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the indices is out of bounds.";
|
||||
}
|
||||
if (!increasing) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the indices is out of order.";
|
||||
}
|
||||
if (!different) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the indices is repeated";
|
||||
}
|
||||
}
|
||||
}
|
||||
template <typename I, typename T>
|
||||
bool SparseToDenseV2CpuKernelMod::LaunchKernel(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> &workspace,
|
||||
const std::vector<kernel::AddressPtr> &outputs) {
|
||||
if (validate_indices_ == true && indices_dims_ == kSparseToDenseV2TwoDims) {
|
||||
(void)SparseToDenseV2CpuKernelMod::CheckValidateTwoDim<I, T>(inputs, workspace, outputs);
|
||||
} else if (validate_indices_ == true && indices_dims_ == kSparseToDenseV2OneDim) {
|
||||
(void)SparseToDenseV2CpuKernelMod::CheckValidateOneDim<I, T>(inputs, workspace, outputs);
|
||||
}
|
||||
const auto *indices_addr = reinterpret_cast<I *>(inputs[kIndex0]->addr);
|
||||
const auto *output_shape_addr = reinterpret_cast<I *>(inputs[kIndex1]->addr);
|
||||
const auto *values_addr = reinterpret_cast<T *>(inputs[kIndex2]->addr);
|
||||
const auto *default_value_addr = reinterpret_cast<T *>(inputs[kIndex3]->addr);
|
||||
auto *output_addr = reinterpret_cast<T *>(outputs[0]->addr);
|
||||
const size_t indices_length = inputs[kIndex0]->size / sizeof(I);
|
||||
const size_t output_length = outputs[0]->size / sizeof(T);
|
||||
const size_t values_length = inputs[kIndex2]->size / sizeof(T);
|
||||
size_t rank = output_shape_[0];
|
||||
for (size_t p = 0; p < output_length; ++p) {
|
||||
output_addr[p] = default_value_addr[0];
|
||||
}
|
||||
if (indices_dims_ == kSparseToDenseV2ZeroDim) {
|
||||
size_t out_index = 0;
|
||||
int index = indices_addr[0];
|
||||
if (index >= output_shape_addr[0] || index < 0) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the 0th value in "
|
||||
<< "0th dimension index: " << index << " of 'output' out of bounds: [0, "
|
||||
<< output_shape_addr[0] << ")";
|
||||
}
|
||||
size_t count = 1;
|
||||
out_index += IntToSize(index) * count;
|
||||
output_addr[out_index] = values_addr[0];
|
||||
} else {
|
||||
for (size_t i = 0; i < indices_shape_[0]; ++i) {
|
||||
if (i >= values_length && values_size_ != 1) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the index of 'values' out of bounds.";
|
||||
}
|
||||
size_t out_index = 0;
|
||||
for (size_t j = 0; j < rank; j++) {
|
||||
if (i * rank + j >= indices_length) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the index of 'indices' out of bounds.";
|
||||
}
|
||||
int index = indices_addr[i * rank + j];
|
||||
if (index >= output_shape_addr[j] || index < 0) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << kernel_name_ << "', the " << i << "th value in " << j
|
||||
<< "th dimension index: " << index << " of 'output' out of bounds: [0, "
|
||||
<< output_shape_addr[j] << ")";
|
||||
}
|
||||
size_t count = 1;
|
||||
for (size_t k = j + 1; k < rank; k++) {
|
||||
count *= output_shape_addr[k];
|
||||
}
|
||||
out_index += IntToSize(index) * count;
|
||||
}
|
||||
if (values_size_ == 1) {
|
||||
output_addr[out_index] = values_addr[0];
|
||||
} else {
|
||||
output_addr[out_index] = values_addr[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
const std::vector<std::pair<KernelAttr, SparseToDenseV2CpuKernelMod::KernelRunFunc>>
|
||||
&SparseToDenseV2CpuKernelMod::GetFuncList() const {
|
||||
static const std::vector<std::pair<KernelAttr, SparseToDenseV2CpuKernelMod::KernelRunFunc>> func_list = {
|
||||
{ADD_KERNEL(Int32, Int32, Bool, Bool, Bool), &SparseToDenseV2CpuKernelMod::LaunchKernel<int32_t, bool>},
|
||||
{ADD_KERNEL(Int32, Int32, Int8, Int8, Int8), &SparseToDenseV2CpuKernelMod::LaunchKernel<int32_t, int8_t>},
|
||||
{ADD_KERNEL(Int32, Int32, Int16, Int16, Int16), &SparseToDenseV2CpuKernelMod::LaunchKernel<int32_t, int16_t>},
|
||||
{ADD_KERNEL(Int32, Int32, Int32, Int32, Int32), &SparseToDenseV2CpuKernelMod::LaunchKernel<int32_t, int32_t>},
|
||||
{ADD_KERNEL(Int32, Int32, Int64, Int64, Int64), &SparseToDenseV2CpuKernelMod::LaunchKernel<int32_t, int64_t>},
|
||||
{ADD_KERNEL(Int32, Int32, UInt8, UInt8, UInt8), &SparseToDenseV2CpuKernelMod::LaunchKernel<int32_t, uint8_t>},
|
||||
{ADD_KERNEL(Int32, Int32, UInt16, UInt16, UInt16), &SparseToDenseV2CpuKernelMod::LaunchKernel<int32_t, uint16_t>},
|
||||
{ADD_KERNEL(Int32, Int32, Float16, Float16, Float16), &SparseToDenseV2CpuKernelMod::LaunchKernel<int32_t, float16>},
|
||||
{ADD_KERNEL(Int32, Int32, Float32, Float32, Float32), &SparseToDenseV2CpuKernelMod::LaunchKernel<int32_t, float>},
|
||||
{ADD_KERNEL(Int32, Int32, Float64, Float64, Float64), &SparseToDenseV2CpuKernelMod::LaunchKernel<int32_t, double>},
|
||||
{ADD_KERNEL(Int64, Int64, Bool, Bool, Bool), &SparseToDenseV2CpuKernelMod::LaunchKernel<int64_t, bool>},
|
||||
{ADD_KERNEL(Int64, Int64, Int8, Int8, Int8), &SparseToDenseV2CpuKernelMod::LaunchKernel<int64_t, int8_t>},
|
||||
{ADD_KERNEL(Int64, Int64, Int16, Int16, Int16), &SparseToDenseV2CpuKernelMod::LaunchKernel<int64_t, int16_t>},
|
||||
{ADD_KERNEL(Int64, Int64, Int32, Int32, Int32), &SparseToDenseV2CpuKernelMod::LaunchKernel<int64_t, int32_t>},
|
||||
{ADD_KERNEL(Int64, Int64, Int64, Int64, Int64), &SparseToDenseV2CpuKernelMod::LaunchKernel<int64_t, int64_t>},
|
||||
{ADD_KERNEL(Int64, Int64, UInt8, UInt8, UInt8), &SparseToDenseV2CpuKernelMod::LaunchKernel<int64_t, uint8_t>},
|
||||
{ADD_KERNEL(Int64, Int64, UInt16, UInt16, UInt16), &SparseToDenseV2CpuKernelMod::LaunchKernel<int64_t, uint16_t>},
|
||||
{ADD_KERNEL(Int64, Int64, Float16, Float16, Float16), &SparseToDenseV2CpuKernelMod::LaunchKernel<int64_t, float16>},
|
||||
{ADD_KERNEL(Int64, Int64, Float32, Float32, Float32), &SparseToDenseV2CpuKernelMod::LaunchKernel<int64_t, float>},
|
||||
{ADD_KERNEL(Int64, Int64, Float64, Float64, Float64), &SparseToDenseV2CpuKernelMod::LaunchKernel<int64_t, double>},
|
||||
};
|
||||
return func_list;
|
||||
}
|
||||
MS_KERNEL_FACTORY_REG(NativeCpuKernelMod, SparseToDenseV2, SparseToDenseV2CpuKernelMod);
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,73 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_SPARSE_TO_DENSE_V2_CPU_KERNEL_H_
|
||||
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_SPARSE_TO_DENSE_V2_CPU_KERNEL_H_
|
||||
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
#include <map>
|
||||
#include <functional>
|
||||
#include "kernel/common_utils.h"
|
||||
#include "plugin/device/cpu/kernel/cpu_kernel.h"
|
||||
#include "plugin/factory/ms_factory.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
class SparseToDenseV2CpuKernelMod : public NativeCpuKernelMod, public MatchKernelHelper<SparseToDenseV2CpuKernelMod> {
|
||||
public:
|
||||
SparseToDenseV2CpuKernelMod() = default;
|
||||
~SparseToDenseV2CpuKernelMod() override = default;
|
||||
|
||||
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
|
||||
const std::vector<AddressPtr> &outputs) override {
|
||||
return kernel_func_(this, inputs, workspace, outputs);
|
||||
}
|
||||
bool Init(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs) override;
|
||||
|
||||
int Resize(const BaseOperatorPtr &base_operator, const std::vector<KernelTensorPtr> &inputs,
|
||||
const std::vector<KernelTensorPtr> &outputs, const std::map<uint32_t, tensor::TensorPtr> &) override;
|
||||
|
||||
const std::vector<std::pair<KernelAttr, KernelRunFunc>> &GetFuncList() const override;
|
||||
|
||||
protected:
|
||||
std::vector<KernelAttr> GetOpSupport() override { return OpSupport(); };
|
||||
|
||||
private:
|
||||
template <typename I, typename T>
|
||||
void CheckValidateOneDim(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> &workspace,
|
||||
const std::vector<kernel::AddressPtr> &outputs);
|
||||
template <typename I, typename T>
|
||||
void CheckValidateTwoDim(const std::vector<kernel::AddressPtr> &inputs,
|
||||
const std::vector<kernel::AddressPtr> &workspace,
|
||||
const std::vector<kernel::AddressPtr> &outputs);
|
||||
template <typename I, typename T>
|
||||
bool LaunchKernel(const std::vector<kernel::AddressPtr> &inputs, const std::vector<kernel::AddressPtr> &workspace,
|
||||
const std::vector<kernel::AddressPtr> &outputs);
|
||||
|
||||
std::vector<size_t> indices_shape_;
|
||||
std::vector<size_t> output_shape_;
|
||||
bool validate_indices_{true};
|
||||
size_t values_size_{0};
|
||||
size_t indices_dims_{0};
|
||||
};
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_SPARSE_TO_DENSE_V2_CPU_KERNEL_H_
|
|
@ -67,6 +67,7 @@ PrimShapeDependMap &GetHostDependsMap() {
|
|||
static const auto &kDropoutGenMask = prim::kPrimDropoutGenMask->name();
|
||||
static const auto &kStridedSlice = prim::kPrimStridedSlice->name();
|
||||
static const auto &kStridedSliceGrad = prim::kPrimStridedSliceGrad->name();
|
||||
static const auto &kSparseToDenseV2 = prim::kPrimSparseToDenseV2->name();
|
||||
static const auto &kResizeBicubic = prim::kPrimResizeBicubic->name();
|
||||
static const auto &kRandomCategorical = prim::kPrimRandomCategorical->name();
|
||||
static const auto &kMatrixDiagV3 = prim::kPrimMatrixDiagV3->name();
|
||||
|
@ -156,6 +157,7 @@ PrimShapeDependMap &GetHostDependsMap() {
|
|||
{kResizeNearestNeighborV2, ShapeSet{1}},
|
||||
{kResizeNearestNeighborV2Grad, ShapeSet{1}},
|
||||
{kScatterNd, ShapeSet{2}},
|
||||
{kSparseToDenseV2, ShapeSet{1}},
|
||||
{kSliceGrad, ShapeSet{2, 3}},
|
||||
{kFillV2, ShapeSet{0}},
|
||||
{kRandomCategorical, ShapeSet{1}},
|
||||
|
|
|
@ -290,6 +290,7 @@ constexpr auto kCOOTensorDenseMatmul = "COOTensorDenseMatmul";
|
|||
|
||||
// Sparse ops
|
||||
constexpr auto kSparseTensorDenseMatmul = "SparseTensorDenseMatmul";
|
||||
constexpr auto kSparseToDenseV2 = "SparseToDenseV2";
|
||||
constexpr auto kSparseAddmm = "SparseAddmm";
|
||||
constexpr auto kCSRReduceSum = "CSRReduceSum";
|
||||
constexpr auto kCSRMV = "CSRMV";
|
||||
|
@ -928,6 +929,7 @@ GVAR_DEF(PrimitivePtr, kPrimCSRTensorGetDenseShape, std::make_shared<Primitive>(
|
|||
|
||||
// Sparse ops
|
||||
GVAR_DEF(PrimitivePtr, kPrimSparseTensorDenseMatmul, std::make_shared<Primitive>(kSparseTensorDenseMatmul));
|
||||
GVAR_DEF(PrimitivePtr, kPrimSparseToDenseV2, std::make_shared<Primitive>(kSparseToDenseV2));
|
||||
GVAR_DEF(PrimitivePtr, kPrimSparseAddmm, std::make_shared<Primitive>(kSparseAddmm));
|
||||
GVAR_DEF(PrimitivePtr, kPrimCOOTensorDenseMatmul, std::make_shared<Primitive>(kCOOTensorDenseMatmul));
|
||||
GVAR_DEF(PrimitivePtr, kPrimCSRReduceSum, std::make_shared<Primitive>(kCSRReduceSum));
|
||||
|
|
|
@ -227,6 +227,7 @@ constexpr auto kUseNesterov = "use_nesterov";
|
|||
constexpr auto kUseNesteroy = "use_nesteroy";
|
||||
constexpr auto kUseRegularNms = "use_regular_nms";
|
||||
constexpr auto kValid = "valid";
|
||||
constexpr auto kValidateIndices = "validate_indices";
|
||||
constexpr auto kValue = "value";
|
||||
constexpr auto kVariances = "variances";
|
||||
constexpr auto kWeightDecay = "weight_decay";
|
||||
|
|
|
@ -0,0 +1,172 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "ops/sparse_to_dense_v2.h"
|
||||
#include <memory>
|
||||
#include <set>
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include "abstract/ops/primitive_infer_map.h"
|
||||
#include "mindapi/src/helper.h"
|
||||
#include "ops/op_utils.h"
|
||||
#include "utils/check_convert_utils.h"
|
||||
#include "utils/tensor_construct_utils.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
namespace {
|
||||
namespace {
|
||||
constexpr size_t kIndiceselement = 2;
|
||||
constexpr size_t kOutShapeSize = 1;
|
||||
constexpr size_t kValuesSize = 1;
|
||||
constexpr size_t kDefaultSize = 0;
|
||||
constexpr size_t kDefaultElem = 1;
|
||||
} // namespace
|
||||
abstract::ShapePtr SparseToDenseV2InferShape(const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
auto max_length_ptr = primitive->GetAttr("max_length");
|
||||
MS_EXCEPTION_IF_NULL(max_length_ptr);
|
||||
int64_t max_length = GetValue<int64_t>(max_length_ptr);
|
||||
auto indices_shape_ptr = input_args[kInputIndex0]->BuildShape();
|
||||
auto output_shape_shape_ptr = input_args[kInputIndex1]->BuildShape();
|
||||
auto values_shape_ptr = input_args[kInputIndex2]->BuildShape();
|
||||
auto default_value_shape_ptr = input_args[kInputIndex3]->BuildShape();
|
||||
auto indices_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(indices_shape_ptr)[kShape];
|
||||
auto output_shape_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(output_shape_shape_ptr)[kShape];
|
||||
auto values_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(values_shape_ptr)[kShape];
|
||||
auto default_value_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(default_value_shape_ptr)[kShape];
|
||||
(void)CheckAndConvertUtils::CheckInteger("indices dimension", indices_shape.size(), kLessEqual, kIndiceselement,
|
||||
prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("outshape dimension", output_shape_shape.size(), kEqual, kOutShapeSize,
|
||||
prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("values dimension", values_shape.size(), kLessEqual, kValuesSize, prim_name);
|
||||
(void)CheckAndConvertUtils::CheckInteger("default_value dimension", default_value_shape.size(), kEqual, kDefaultSize,
|
||||
prim_name);
|
||||
if (indices_shape.size() == 0) {
|
||||
if (values_shape.size() != 0 && values_shape[0] != 1) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', the indices_shape[0] is 1"
|
||||
<< " should match the the values element " << values_shape[0] << ".";
|
||||
}
|
||||
} else {
|
||||
if (values_shape.size() != 0) {
|
||||
if (indices_shape[0] != values_shape[0]) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', the indices_shape[0] " << indices_shape[0]
|
||||
<< " should match the the values element " << values_shape[0] << ".";
|
||||
}
|
||||
}
|
||||
}
|
||||
size_t output_shape_numelement = output_shape_shape[0];
|
||||
auto output_shape = input_args[1]->cast<abstract::AbstractTensorPtr>();
|
||||
MS_EXCEPTION_IF_NULL(output_shape);
|
||||
auto output_shape_value_ = output_shape->BuildValue();
|
||||
MS_EXCEPTION_IF_NULL(output_shape_value_);
|
||||
auto output_shape_tensor = output_shape_value_->cast<tensor::TensorPtr>();
|
||||
auto output_shape_type = input_args[1]->BuildType();
|
||||
MS_EXCEPTION_IF_NULL(output_shape_type);
|
||||
auto output_shape_type_id = output_shape_type->cast<TensorTypePtr>();
|
||||
MS_EXCEPTION_IF_NULL(output_shape_type_id);
|
||||
auto output_shape_type_element = output_shape_type_id->element();
|
||||
MS_EXCEPTION_IF_NULL(output_shape_type_element);
|
||||
std::vector<int64_t> y_shape;
|
||||
if (!input_args[1]->BuildValue()->isa<AnyValue>() && !input_args[1]->BuildValue()->isa<None>()) {
|
||||
if (output_shape_type_element->type_id() == kNumberTypeInt32) {
|
||||
auto output_shape_data = reinterpret_cast<int32_t *>(output_shape_tensor->data_c());
|
||||
for (size_t i = 0; i < output_shape_numelement; ++i) {
|
||||
if (output_shape_data[i] > 0) {
|
||||
y_shape.push_back(output_shape_data[i]);
|
||||
} else {
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', each dimension must be greater than 0. But got the "
|
||||
<< i << "th dimension of output " << output_shape_data[i] << ".";
|
||||
}
|
||||
}
|
||||
} else if (output_shape_type_element->type_id() == kNumberTypeInt64) {
|
||||
auto output_shape_data = reinterpret_cast<int64_t *>(output_shape_tensor->data_c());
|
||||
for (size_t i = 0; i < output_shape_numelement; ++i) {
|
||||
if (output_shape_data[i] > 0) {
|
||||
y_shape.push_back(output_shape_data[i]);
|
||||
} else {
|
||||
MS_EXCEPTION(ValueError) << "For '" << prim_name << "', each dimension must be greater than 0. But got the "
|
||||
<< i << "th dimension of output " << output_shape_data[i] << ".";
|
||||
}
|
||||
}
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(y_shape);
|
||||
} else {
|
||||
const uint32_t input_shapes = static_cast<uint32_t>(std::pow(max_length, 1.0 / SizeToInt(output_shape_numelement)));
|
||||
ShapeVector shape_min;
|
||||
ShapeVector shape_max;
|
||||
for (size_t i = 0; i < output_shape_numelement; i++) {
|
||||
y_shape.push_back(abstract::Shape::SHP_ANY);
|
||||
shape_min.push_back(0);
|
||||
shape_max.push_back(input_shapes);
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(y_shape, shape_min, shape_max);
|
||||
}
|
||||
}
|
||||
|
||||
TypePtr SparseToDenseV2InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
auto prim_name = prim->name();
|
||||
auto infer_type_indices = input_args[kInputIndex0]->BuildType();
|
||||
auto infer_type_output_shape = input_args[kInputIndex1]->BuildType();
|
||||
auto infer_type_values = input_args[kInputIndex2]->BuildType();
|
||||
auto infer_type_default_value = input_args[kInputIndex3]->BuildType();
|
||||
const std::set<TypePtr> valid_types = {kInt64, kInt32};
|
||||
std::map<std::string, TypePtr> types;
|
||||
types.insert({"indices", infer_type_indices});
|
||||
types.insert({"output_shape", infer_type_output_shape});
|
||||
(void)CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim_name);
|
||||
const std::set<TypePtr> valid_types_value = {kInt64, kInt32, kInt16, kInt8, kUInt16,
|
||||
kUInt8, kFloat16, kFloat32, kFloat64, kBool};
|
||||
std::map<std::string, TypePtr> types_value;
|
||||
types_value.insert({"values", infer_type_values});
|
||||
types_value.insert({"default_value", infer_type_default_value});
|
||||
(void)CheckAndConvertUtils::CheckTensorTypeSame(types_value, valid_types_value, prim_name);
|
||||
return infer_type_values;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
MIND_API_OPERATOR_IMPL(SparseToDenseV2, BaseOperator);
|
||||
|
||||
void SparseToDenseV2::set_validate_indices(const bool validate_indices) {
|
||||
(void)this->AddAttr(kValidateIndices, api::MakeValue(validate_indices));
|
||||
}
|
||||
|
||||
bool SparseToDenseV2::get_validate_indices() const {
|
||||
auto value_ptr = GetAttr(kValidateIndices);
|
||||
return GetValue<bool>(value_ptr);
|
||||
}
|
||||
|
||||
void SparseToDenseV2::Init(const bool validate_indices) { this->set_validate_indices(validate_indices); }
|
||||
|
||||
AbstractBasePtr SparseToDenseV2Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
for (auto item : input_args) {
|
||||
MS_EXCEPTION_IF_NULL(item);
|
||||
}
|
||||
const size_t input_num = 4;
|
||||
(void)CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, prim_name);
|
||||
auto infertype = SparseToDenseV2InferType(primitive, input_args);
|
||||
auto infershape = SparseToDenseV2InferShape(primitive, input_args);
|
||||
return abstract::MakeAbstract(infershape, infertype);
|
||||
}
|
||||
REGISTER_PRIMITIVE_EVAL_IMPL(SparseToDenseV2, prim::kPrimSparseToDenseV2, SparseToDenseV2Infer, nullptr, true);
|
||||
} // namespace ops
|
||||
} // namespace mindspore
|
|
@ -0,0 +1,52 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CORE_OPS_SPARSE_TO_DENSE_V2_H_
|
||||
#define MINDSPORE_CORE_OPS_SPARSE_TO_DENSE_V2_H_
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include "ops/base_operator.h"
|
||||
#include "mindapi/base/types.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
constexpr auto kNameSparseToDenseV2 = "SparseToDenseV2";
|
||||
/// \brief Converts a sparse representation into a dense tensor.
|
||||
/// Refer to Python API @ref mindspore.ops.SparseToDense for more details.
|
||||
class MIND_API SparseToDenseV2 : public BaseOperator {
|
||||
public:
|
||||
MIND_API_BASE_MEMBER(SparseToDenseV2);
|
||||
/// \brief Constructor.
|
||||
SparseToDenseV2() : BaseOperator(kNameSparseToDenseV2) {
|
||||
InitIOName({"indices", "output_shape", "values", "default_value"}, {"output"});
|
||||
}
|
||||
/// \brief Init.
|
||||
void Init(const bool validate_indices = true);
|
||||
/// \brief Set validate_indices.
|
||||
void set_validate_indices(const bool validate_indices);
|
||||
/// \brief Get validate_indices.
|
||||
///
|
||||
/// \return validate_indices.
|
||||
bool get_validate_indices() const;
|
||||
};
|
||||
abstract::AbstractBasePtr SparseToDenseV2Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<abstract::AbstractBasePtr> &input_args);
|
||||
} // namespace ops
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CORE_OPS_SPARSE_TO_DENSE_V2_H_
|
|
@ -16,6 +16,7 @@
|
|||
"""bprop primitives"""
|
||||
from mindspore.ops.operations.sparse_ops import CSRSparseMatrixToSparseTensor
|
||||
from mindspore.ops.operations.sparse_ops import SparseTensorToCSRSparseMatrix
|
||||
from mindspore.ops.operations.sparse_ops import SparseToDenseV2
|
||||
from mindspore.ops.operations.sparse_ops import SparseSegmentSqrtN
|
||||
from mindspore.ops.operations.sparse_ops import SparseSegmentSqrtNWithNumSegments
|
||||
from mindspore.common import dtype as mstype
|
||||
|
@ -54,6 +55,19 @@ def get_bprop_csr_sparse_matrix_to_sparse_tensor(self):
|
|||
return bprop
|
||||
|
||||
|
||||
@bprop_getters.register(SparseToDenseV2)
|
||||
def get_bprop_sparse_to_dense_v2(self):
|
||||
"""Generate bprop for SparseToDenseV2"""
|
||||
|
||||
def bprop(indices, output_shape, values, default_value, out, dout):
|
||||
sparse_values_grad = F.gather_nd(dout, indices)
|
||||
default_value_grad = F.reduce_sum(dout) - F.reduce_sum(sparse_values_grad)
|
||||
result_all = (zeros_like(indices), zeros_like(output_shape), sparse_values_grad, default_value_grad)
|
||||
return result_all
|
||||
|
||||
return bprop
|
||||
|
||||
|
||||
@bprop_getters.register(SparseSegmentSqrtN)
|
||||
def get_bprop_sparse_segment_sqrt_n(self):
|
||||
"""Grad definition for `SparseSegmentSqrtN` operation."""
|
||||
|
|
|
@ -306,5 +306,6 @@ from .sparse_apply_proximal_gradient_descent import _sparse_apply_proximal_gradi
|
|||
from .sparse_apply_momentum import _sparse_apply_momentum_aicpu
|
||||
from .linear_sum_assignment import _linear_sum_assignment_aicpu
|
||||
from .orgqr import _orgqr_aicpu
|
||||
from .sparse_to_dense_v2 import _sparse_to_dense_v2_aicpu
|
||||
from .sparse_sparse_minimum import _sparse_sparse_minimum_aicpu
|
||||
from .broadcast_to import _broadcast_to_aicpu
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
# Copyright 2022 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""SparseToDense op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
|
||||
|
||||
sparse_to_dense_v2_op_info = AiCPURegOp("SparseToDenseV2") \
|
||||
.fusion_type("OPAQUE") \
|
||||
.attr("validate_indices", "bool") \
|
||||
.input(0, "indices", "required") \
|
||||
.input(1, "output_shape", "required") \
|
||||
.input(2, "values", "required") \
|
||||
.input(3, "default_value", "required") \
|
||||
.output(0, "y", "required") \
|
||||
.dtype_format(DataType.I32_Default, DataType.I32_Default, \
|
||||
DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \
|
||||
.dtype_format(DataType.I32_Default, DataType.I32_Default, \
|
||||
DataType.I16_Default, DataType.I16_Default, DataType.I16_Default) \
|
||||
.dtype_format(DataType.I32_Default, DataType.I32_Default, \
|
||||
DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
|
||||
.dtype_format(DataType.I32_Default, DataType.I32_Default, \
|
||||
DataType.I64_Default, DataType.I64_Default, DataType.I64_Default) \
|
||||
.dtype_format(DataType.I32_Default, DataType.I32_Default, \
|
||||
DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \
|
||||
.dtype_format(DataType.I32_Default, DataType.I32_Default, \
|
||||
DataType.U16_Default, DataType.U16_Default, DataType.U16_Default) \
|
||||
.dtype_format(DataType.I32_Default, DataType.I32_Default, \
|
||||
DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \
|
||||
.dtype_format(DataType.I32_Default, DataType.I32_Default, \
|
||||
DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
|
||||
.dtype_format(DataType.I32_Default, DataType.I32_Default, \
|
||||
DataType.F64_Default, DataType.F64_Default, DataType.F64_Default) \
|
||||
.dtype_format(DataType.I64_Default, DataType.I64_Default, \
|
||||
DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \
|
||||
.dtype_format(DataType.I64_Default, DataType.I64_Default, \
|
||||
DataType.I16_Default, DataType.I16_Default, DataType.I16_Default) \
|
||||
.dtype_format(DataType.I64_Default, DataType.I64_Default, \
|
||||
DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
|
||||
.dtype_format(DataType.I64_Default, DataType.I64_Default, \
|
||||
DataType.I64_Default, DataType.I64_Default, DataType.I64_Default) \
|
||||
.dtype_format(DataType.I64_Default, DataType.I64_Default, \
|
||||
DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \
|
||||
.dtype_format(DataType.I64_Default, DataType.I64_Default, \
|
||||
DataType.U16_Default, DataType.U16_Default, DataType.U16_Default) \
|
||||
.dtype_format(DataType.I64_Default, DataType.I64_Default, \
|
||||
DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \
|
||||
.dtype_format(DataType.I64_Default, DataType.I64_Default, \
|
||||
DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
|
||||
.dtype_format(DataType.I64_Default, DataType.I64_Default, \
|
||||
DataType.F64_Default, DataType.F64_Default, DataType.F64_Default) \
|
||||
.dtype_format(DataType.I32_Default, DataType.I32_Default, \
|
||||
DataType.BOOL_Default, DataType.BOOL_Default, DataType.BOOL_Default) \
|
||||
.dtype_format(DataType.I64_Default, DataType.I64_Default, \
|
||||
DataType.BOOL_Default, DataType.BOOL_Default, DataType.BOOL_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(sparse_to_dense_v2_op_info)
|
||||
def _sparse_to_dense_v2_aicpu():
|
||||
"""SparseToDenseV2 AiCPU register"""
|
||||
return
|
|
@ -89,6 +89,60 @@ class SparseToDense(PrimitiveWithInfer):
|
|||
return out
|
||||
|
||||
|
||||
class SparseToDenseV2(Primitive):
|
||||
"""
|
||||
Converts a sparse representation into a dense tensor.
|
||||
|
||||
Args:
|
||||
validate_indices (bool): If true, indices are checked to make sure they are sorted in
|
||||
lexicographic order and that there are no repeats. Default: True.
|
||||
|
||||
Inputs:
|
||||
- **indices** (Tensor) - A 0D, 1D, or 2D Tensor of type int32 or int64, represents the position
|
||||
of the element in the sparse tensor.
|
||||
- **output_shape** (Tensor) - A 1D Tensor of the same type as `indices`, represents the shape
|
||||
of the dense output tensor.
|
||||
- **values** (Tensor) - A 1D Tensor, represents the value corresponding to the position in the `indices`
|
||||
or a scalar value to be used for all indices.
|
||||
- **default_value** (Tensor) - A 0D Tensor of the same type as `sparse_values`, scalar value to
|
||||
set for indices not specified in indices.
|
||||
|
||||
Returns:
|
||||
Tensor, converted from sparse tensor. The dtype is same as `values`, and the shape is `output_shape`.
|
||||
|
||||
Raises:
|
||||
TypeError: If the dtype of `indices` is neither Int32 nor Int64.
|
||||
TypeError: If the dtype of `outputshape` is neither Int32 nor Int64.
|
||||
ValueError: If the shape of `output_shape`, shape of `indices`, shape of
|
||||
`default_value` and shape of `values` don't meet the parameter description.
|
||||
ValueError: If each Element of `output_shape` is not > 0.
|
||||
ValueError: If the shape[0] of `indices` don't match with the element of `values`.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=ms.int32)
|
||||
>>> output_shape = Tensor([3, 4], dtype=ms.int32)
|
||||
>>> values = Tensor([1, 2], dtype=ms.float32)
|
||||
>>> default_value = Tensor(0, dtype=ms.float32)
|
||||
>>> sparse_to_dense_v2 = ops.SparseToDenseV2()
|
||||
>>> out = sparse_to_dense_v2(indices, output_shape, values, default_value)
|
||||
>>> print(out)
|
||||
[[0. 1. 0. 0.]
|
||||
[0. 0. 2. 0.]
|
||||
[0. 0. 0. 0.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
def __init__(self, validate_indices=True):
|
||||
"""Initialize SparseToDenseV2."""
|
||||
self.add_prim_attr("max_length", 1000000)
|
||||
self.validate_indices = validate_indices
|
||||
self.add_prim_attr("validate_indices", self.validate_indices)
|
||||
self.init_prim_io_names(inputs=['indices', 'output_shape', 'values', 'default_value'], outputs=['output'])
|
||||
|
||||
|
||||
class SparseTensorDenseAdd(Primitive):
|
||||
"""
|
||||
Add a sparse tensor and a dense tensor to get a dense tensor.
|
||||
|
|
|
@ -123,6 +123,7 @@ from mindspore.ops.operations.nn_ops import ReLUV3
|
|||
from mindspore.ops.operations.sparse_ops import CSRSparseMatrixToDense
|
||||
from mindspore.ops.operations.sparse_ops import DenseToCSRSparseMatrix, Sspaddmm
|
||||
from mindspore.ops.operations.sparse_ops import SparseTensorDenseMatmul
|
||||
from mindspore.ops.operations.sparse_ops import SparseToDenseV2
|
||||
from mindspore.ops.operations.sparse_ops import SparseMatrixNNZ
|
||||
from mindspore.ops.operations.sparse_ops import SparseTensorDenseAdd
|
||||
from mindspore.ops.operations.sparse_ops import SparseMatrixTranspose
|
||||
|
@ -3254,6 +3255,13 @@ test_case_array_ops = [
|
|||
'block': P.SpaceToDepth(2),
|
||||
'desc_inputs': [[1, 3, 2, 2]],
|
||||
'desc_bprop': [[1, 12, 1, 1]]}),
|
||||
('SparseToDenseV2', {
|
||||
'block': SparseToDenseV2(),
|
||||
'desc_inputs': [Tensor(np.array([[0, 1]]).astype(np.int32)),
|
||||
Tensor(np.array([2, 2]).astype(np.int32)),
|
||||
Tensor(np.array([1.0]).astype(np.float32)),
|
||||
Tensor(0.0, dtype=mstype.float32)],
|
||||
'desc_bprop': [Tensor(np.array([[0.0, 1.0], [0.0, 0.0]]).astype(np.float32))]}),
|
||||
('DepthToSpace', {
|
||||
'block': P.DepthToSpace(2),
|
||||
'desc_inputs': [[1, 12, 1, 1]],
|
||||
|
|
Loading…
Reference in New Issue