!62814 view算子:diagonal动态shape修改的代码合入
Merge pull request !62814 from wudawei/feature-2.3-view
This commit is contained in:
commit
8ddf295604
|
@ -278,8 +278,8 @@ NodePtrList FminFmaxGrad(BpropIRBuilder *ib, bool if_fmin) {
|
|||
return {brrx1, brrx2};
|
||||
}
|
||||
|
||||
inline NodePtrList GradDiagonal(Emitter *ib, const NodePtr &dout, const NodePtr &dx_trans_shape,
|
||||
std::tuple<int64_t, int64_t, int64_t, size_t> int_tuple, const TypePtr &x_dtype) {
|
||||
inline NodePtr GradDiagonal(Emitter *ib, const NodePtr &dout, const NodePtr &dx_trans_shape,
|
||||
std::tuple<int64_t, int64_t, int64_t, size_t> int_tuple, const TypePtr &x_dtype) {
|
||||
auto [offset, dim1, dim2, x_dim] = int_tuple;
|
||||
auto value = ib->Tensor(0, x_dtype);
|
||||
auto dx = ib->Emit("FillV2", {dx_trans_shape, value});
|
||||
|
@ -300,7 +300,7 @@ inline NodePtrList GradDiagonal(Emitter *ib, const NodePtr &dout, const NodePtr
|
|||
}
|
||||
}
|
||||
dx = ib->Transpose(dx, perm);
|
||||
return {dx};
|
||||
return dx;
|
||||
}
|
||||
|
||||
class DiagonalShapeCalc : public ShapeCalcFunctor {
|
||||
|
@ -2240,12 +2240,15 @@ REG_BPROP_BUILDER("Baddbmm").SetUnusedInputs({}).SetBody(BODYFUNC(ib) {
|
|||
});
|
||||
|
||||
REG_BPROP_BUILDER("Diagonal").SetUnusedInputs({i0}).SetBody(BODYFUNC(ib) {
|
||||
auto offset = GetValue<int64_t>(ib->GetAttr("offset"));
|
||||
auto dim1 = GetValue<int64_t>(ib->GetAttr("dim1"));
|
||||
auto dim2 = GetValue<int64_t>(ib->GetAttr("dim2"));
|
||||
auto x = ib->GetInput(kIndex0);
|
||||
auto out = ib->GetInput(kIndex1);
|
||||
auto dout = ib->GetInput(kIndex2);
|
||||
auto offset_node = ib->GetInput(kIndex1);
|
||||
auto dim1_node = ib->GetInput(kIndex2);
|
||||
auto dim2_node = ib->GetInput(kIndex3);
|
||||
auto offset = GetValue<int64_t>(offset_node->BuildValue());
|
||||
auto dim1 = GetValue<int64_t>(dim1_node->BuildValue());
|
||||
auto dim2 = GetValue<int64_t>(dim2_node->BuildValue());
|
||||
auto out = ib->GetInput(kIndex4);
|
||||
auto dout = ib->GetInput(kIndex5);
|
||||
auto x_shape = ib->GetShape(x);
|
||||
if (IsDynamicRank(x_shape)) {
|
||||
MS_LOG_EXCEPTION << "Diagonal doesn't support dynamic rank now, because it need rank of x to do transpose";
|
||||
|
@ -2258,16 +2261,20 @@ REG_BPROP_BUILDER("Diagonal").SetUnusedInputs({i0}).SetBody(BODYFUNC(ib) {
|
|||
if (dim2 < 0) {
|
||||
dim2 += x_dim;
|
||||
}
|
||||
auto true_case = [offset, dim1, dim2, &x, &out, &dout, &x_shape, &x_dtype, x_dim](Emitter *ib) -> NodePtrList {
|
||||
auto true_case = [offset, dim1, dim2, &x, &out, &dout, &x_shape, &x_dtype, &offset_node, &dim1_node, &dim2_node,
|
||||
x_dim](Emitter *ib) -> NodePtrList {
|
||||
auto dx_trans_shape = ib->ShapeCalc(std::make_shared<DiagonalShapeCalc>(dim1, dim2), {x, out})[0];
|
||||
return GradDiagonal(ib, dout, dx_trans_shape, {offset, dim1, dim2, x_dim}, x_dtype);
|
||||
auto grad_diagonal = GradDiagonal(ib, dout, dx_trans_shape, {offset, dim1, dim2, x_dim}, x_dtype);
|
||||
return {grad_diagonal, ib->ZerosLike(offset_node), ib->ZerosLike(dim1_node), ib->ZerosLike(dim2_node)};
|
||||
};
|
||||
auto false_case = [&x, &x_dtype, &offset_node, &dim1_node, &dim2_node](Emitter *ib) -> NodePtrList {
|
||||
return {ib->ZerosLike(x), ib->ZerosLike(offset_node), ib->ZerosLike(dim1_node), ib->ZerosLike(dim2_node)};
|
||||
};
|
||||
auto false_case = [&x](Emitter *ib) -> NodePtrList { return {ib->ZerosLike(x)}; };
|
||||
if (IsDynamic(ib->GetShape(out))) {
|
||||
auto out_size = ib->Emit("Size", {out});
|
||||
auto cond = ib->Emit("scalar_eq", {out_size, ib->Value<int64_t>(0)});
|
||||
auto dx = ib->Conditional(cond, false_case, true_case);
|
||||
return {dx};
|
||||
return {dx, ib->OutZeros(offset_node), ib->OutZeros(dim1_node), ib->OutZeros(dim2_node)};
|
||||
}
|
||||
if (ib->GetSize(out) > 0) {
|
||||
return true_case(ib);
|
||||
|
|
|
@ -5330,5 +5330,11 @@ opInfo.userDefined=True
|
|||
opInfo.workspaceSize=1024
|
||||
input0.type=DT_BOOL,DT_DOUBLE,DT_FLOAT,DT_FLOAT16,DT_INT16,DT_INT32,DT_INT64,DT_INT8,DT_UINT16,DT_UINT32,DT_UINT64,DT_UINT8
|
||||
input0.name=x
|
||||
input1.type=DT_INT64
|
||||
input1.name=offset
|
||||
input2.type=DT_INT64
|
||||
input2.name=dim1
|
||||
input3.type=DT_INT64
|
||||
input3.name=dim2
|
||||
output0.type=DT_BOOL,DT_DOUBLE,DT_FLOAT,DT_FLOAT16,DT_INT16,DT_INT32,DT_INT64,DT_INT8,DT_UINT16,DT_UINT32,DT_UINT64,DT_UINT8
|
||||
output0.name=y
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
namespace mindspore {
|
||||
namespace kernel {
|
||||
namespace {
|
||||
constexpr size_t kDiagonalInputsNum = 1;
|
||||
constexpr size_t kDiagonalInputsNum = 4;
|
||||
constexpr size_t kDiagonalOutputsNum = 1;
|
||||
constexpr int64_t N2 = 2;
|
||||
template <typename T>
|
||||
|
@ -96,9 +96,6 @@ bool DiagonalCpuKernelMod::Init(const std::vector<KernelTensor *> &inputs, const
|
|||
MS_LOG(ERROR) << "For 'Diagonal', it got empty inputs or outputs, which is invalid.";
|
||||
return false;
|
||||
}
|
||||
offset_ = GetValue<int64_t>(primitive_->GetAttr("offset"));
|
||||
dim1_ = GetValue<int64_t>(primitive_->GetAttr("dim1"));
|
||||
dim2_ = GetValue<int64_t>(primitive_->GetAttr("dim2"));
|
||||
auto kernel_attr = GetKernelAttrFromTensors(inputs, outputs);
|
||||
auto [is_match, index] = MatchKernelAttr(kernel_attr, GetOpSupport());
|
||||
if (!is_match) {
|
||||
|
@ -114,6 +111,9 @@ int DiagonalCpuKernelMod::Resize(const std::vector<KernelTensor *> &inputs,
|
|||
const std::vector<KernelTensor *> &outputs) {
|
||||
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kDiagonalInputsNum, kernel_name_);
|
||||
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kDiagonalOutputsNum, kernel_name_);
|
||||
offset_ = inputs[kIndex1]->GetValueWithCheck<int64_t>();
|
||||
dim1_ = inputs[kIndex2]->GetValueWithCheck<int64_t>();
|
||||
dim2_ = inputs[kIndex3]->GetValueWithCheck<int64_t>();
|
||||
if (int ret = KernelMod::Resize(inputs, outputs); ret != KRET_OK) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -202,29 +202,89 @@ bool DiagonalCpuKernelMod::LaunchKernel(const std::vector<KernelTensor *> &input
|
|||
}
|
||||
|
||||
std::vector<std::pair<KernelAttr, DiagonalCpuKernelMod::DiagonalLaunchFunc>> DiagonalCpuKernelMod::func_list_ = {
|
||||
{KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeFloat32)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeFloat32),
|
||||
&DiagonalCpuKernelMod::LaunchKernel<float>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeFloat64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeFloat64),
|
||||
&DiagonalCpuKernelMod::LaunchKernel<double>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeBool),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeBool)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeBool),
|
||||
&DiagonalCpuKernelMod::LaunchKernel<bool>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeFloat16)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeFloat16),
|
||||
&DiagonalCpuKernelMod::LaunchKernel<float16>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeInt8),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeInt8)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeInt8),
|
||||
&DiagonalCpuKernelMod::LaunchKernel<int8_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeInt16),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeInt16)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeInt16),
|
||||
&DiagonalCpuKernelMod::LaunchKernel<int16_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeInt32)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeInt32),
|
||||
&DiagonalCpuKernelMod::LaunchKernel<int32_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeInt64),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeInt64),
|
||||
&DiagonalCpuKernelMod::LaunchKernel<int64_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeUInt8),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeUInt8)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeUInt8),
|
||||
&DiagonalCpuKernelMod::LaunchKernel<uint8_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeUInt16),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeUInt16)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeUInt16),
|
||||
&DiagonalCpuKernelMod::LaunchKernel<uint16_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeUInt32),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeUInt32)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeUInt32),
|
||||
&DiagonalCpuKernelMod::LaunchKernel<uint32_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeUInt64),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeUInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeUInt64),
|
||||
&DiagonalCpuKernelMod::LaunchKernel<uint64_t>}};
|
||||
|
||||
std::vector<KernelAttr> DiagonalCpuKernelMod::GetOpSupport() {
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
namespace mindspore {
|
||||
namespace kernel {
|
||||
namespace {
|
||||
constexpr size_t kDiagonalInputsNum = 1;
|
||||
constexpr size_t kDiagonalInputsNum = 4;
|
||||
constexpr size_t kDiagonalOutputsNum = 1;
|
||||
constexpr int64_t N2 = 2;
|
||||
template <typename T>
|
||||
|
@ -53,9 +53,6 @@ bool DiagonalGpuKernelMod::Init(const std::vector<KernelTensor *> &inputs, const
|
|||
MS_LOG(ERROR) << "For 'Diagonal', it got empty inputs or outputs, which is invalid.";
|
||||
return false;
|
||||
}
|
||||
offset_ = GetValue<int64_t>(primitive_->GetAttr("offset"));
|
||||
dim1_ = GetValue<int64_t>(primitive_->GetAttr("dim1"));
|
||||
dim2_ = GetValue<int64_t>(primitive_->GetAttr("dim2"));
|
||||
auto kernel_attr = GetKernelAttrFromTensors(inputs, outputs);
|
||||
auto [is_match, index] = MatchKernelAttr(kernel_attr, GetOpSupport());
|
||||
if (!is_match) {
|
||||
|
@ -71,6 +68,9 @@ int DiagonalGpuKernelMod::Resize(const std::vector<KernelTensor *> &inputs,
|
|||
const std::vector<KernelTensor *> &outputs) {
|
||||
CHECK_KERNEL_INPUTS_NUM(inputs.size(), kDiagonalInputsNum, kernel_name_);
|
||||
CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kDiagonalOutputsNum, kernel_name_);
|
||||
offset_ = inputs[kIndex1]->GetValueWithCheck<int64_t>();
|
||||
dim1_ = inputs[kIndex2]->GetValueWithCheck<int64_t>();
|
||||
dim2_ = inputs[kIndex3]->GetValueWithCheck<int64_t>();
|
||||
if (int ret = KernelMod::Resize(inputs, outputs); ret != KRET_OK) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -118,29 +118,89 @@ bool DiagonalGpuKernelMod::LaunchKernel(const std::vector<KernelTensor *> &input
|
|||
}
|
||||
|
||||
std::vector<std::pair<KernelAttr, DiagonalGpuKernelMod::DiagonalLaunchFunc>> DiagonalGpuKernelMod::func_list_ = {
|
||||
{KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeFloat32)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeFloat32),
|
||||
&DiagonalGpuKernelMod::LaunchKernel<float>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat64),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeFloat64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeFloat64),
|
||||
&DiagonalGpuKernelMod::LaunchKernel<double>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeBool).AddOutputAttr(kNumberTypeBool),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeBool)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeBool),
|
||||
&DiagonalGpuKernelMod::LaunchKernel<bool>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeFloat16)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeFloat16),
|
||||
&DiagonalGpuKernelMod::LaunchKernel<half>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeInt8).AddOutputAttr(kNumberTypeInt8),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeInt8)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeInt8),
|
||||
&DiagonalGpuKernelMod::LaunchKernel<int8_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeInt16).AddOutputAttr(kNumberTypeInt16),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeInt16)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeInt16),
|
||||
&DiagonalGpuKernelMod::LaunchKernel<int16_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeInt32)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeInt32),
|
||||
&DiagonalGpuKernelMod::LaunchKernel<int32_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeInt64),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeInt64),
|
||||
&DiagonalGpuKernelMod::LaunchKernel<int64_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeUInt8).AddOutputAttr(kNumberTypeUInt8),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeUInt8)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeUInt8),
|
||||
&DiagonalGpuKernelMod::LaunchKernel<uint8_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeUInt16).AddOutputAttr(kNumberTypeUInt16),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeUInt16)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeUInt16),
|
||||
&DiagonalGpuKernelMod::LaunchKernel<uint16_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeUInt32).AddOutputAttr(kNumberTypeUInt32),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeUInt32)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeUInt32),
|
||||
&DiagonalGpuKernelMod::LaunchKernel<uint32_t>},
|
||||
{KernelAttr().AddInputAttr(kNumberTypeUInt64).AddOutputAttr(kNumberTypeUInt64),
|
||||
{KernelAttr()
|
||||
.AddInputAttr(kNumberTypeUInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddInputAttr(kObjectTypeNumber, kNumberTypeInt64)
|
||||
.AddOutputAttr(kNumberTypeUInt64),
|
||||
&DiagonalGpuKernelMod::LaunchKernel<uint64_t>}};
|
||||
|
||||
std::vector<KernelAttr> DiagonalGpuKernelMod::GetOpSupport() {
|
||||
|
|
|
@ -115,11 +115,11 @@ REG_CUST_OP(Lgamma)
|
|||
REG_CUST_OP(Diagonal)
|
||||
.INPUT(x, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT16, DT_INT32, DT_INT64, DT_INT8, DT_UINT16,
|
||||
DT_UINT32, DT_UINT64, DT_UINT8, DT_BOOL}))
|
||||
.OUTPUT(y, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT16, DT_INT32, DT_INT64, DT_INT8, DT_UINT16,
|
||||
DT_UINT32, DT_UINT64, DT_UINT8, DT_BOOL}))
|
||||
.REQUIRED_ATTR(offset, Int)
|
||||
.REQUIRED_ATTR(dim1, Int)
|
||||
.REQUIRED_ATTR(dim2, Int)
|
||||
.OUTPUT(y, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT16, DT_INT32, DT_INT64, DT_INT8, DT_UINT16,
|
||||
DT_UINT32, DT_UINT64, DT_UINT8, DT_BOOL}))
|
||||
.CUST_OP_END_FACTORY_REG(Diagonal)
|
||||
} // namespace ge
|
||||
#endif // MINDSPORE_CCSRC_GRAPH_IR_CUSTOM_OP_PROTO_CUST_MATH_OPS_H_
|
||||
|
|
|
@ -354,9 +354,10 @@ REG_ADPT_DESC(Real, prim::kPrimReal->name(), ADPT_DESC(Real))
|
|||
|
||||
// Diagonal
|
||||
CUST_INPUT_MAP(Diagonal) = {{1, INPUT_DESC(x)}};
|
||||
CUST_ATTR_MAP(Diagonal) = {{"offset", ATTR_DESC(offset, AnyTraits<int64_t>())},
|
||||
{"dim1", ATTR_DESC(dim1, AnyTraits<int64_t>())},
|
||||
{"dim2", ATTR_DESC(dim2, AnyTraits<int64_t>())}};
|
||||
CUST_INPUT_ATTR_MAP(Diagonal) = {{2, ATTR_DESC(offset, AnyTraits<int64_t>())},
|
||||
{3, ATTR_DESC(dim1, AnyTraits<int64_t>())},
|
||||
{4, ATTR_DESC(dim2, AnyTraits<int64_t>())}};
|
||||
CUST_ATTR_MAP(Diagonal) = EMPTY_ATTR_MAP;
|
||||
CUST_OUTPUT_MAP(Diagonal) = {{0, OUTPUT_DESC(y)}};
|
||||
REG_ADPT_DESC(Diagonal, prim::kPrimDiagonal->name(), CUST_ADPT_DESC(Diagonal))
|
||||
} // namespace mindspore::transform
|
||||
|
|
|
@ -1,116 +0,0 @@
|
|||
/**
|
||||
* Copyright 2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "mindapi/src/helper.h"
|
||||
#include "mindspore/core/ops/math_ops.h"
|
||||
#include "ops/diagonal.h"
|
||||
#include "ops/op_utils.h"
|
||||
#include "utils/check_convert_utils.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
namespace {
|
||||
abstract::ShapePtr DiagonalInferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
constexpr size_t kDimNum = 2;
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
const int64_t input_num = 1;
|
||||
const int64_t dyn_shape = abstract::Shape::kShapeDimAny;
|
||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kGreaterEqual, input_num,
|
||||
prim_name);
|
||||
auto x = CheckAndConvertUtils::CheckArgsType(prim_name, input_args, 0, kObjectTypeTensorType);
|
||||
auto x_shape = x->GetShape()->GetShapeVector();
|
||||
auto x_rank = x_shape.size();
|
||||
auto offset = GetValue<int64_t>(primitive->GetAttr("offset"));
|
||||
auto dim1 = GetValue<int64_t>(primitive->GetAttr("dim1"));
|
||||
auto dim2 = GetValue<int64_t>(primitive->GetAttr("dim2"));
|
||||
|
||||
if (IsDynamicRank(x_shape)) {
|
||||
return std::make_shared<abstract::Shape>(ShapeVector({abstract::Shape::kShapeRankAny}));
|
||||
}
|
||||
|
||||
CheckAndConvertUtils::CheckInRange<int64_t>("dim1", dim1, kIncludeBoth, {-x_rank, x_rank - 1}, prim_name);
|
||||
CheckAndConvertUtils::CheckInRange<int64_t>("dim2", dim2, kIncludeBoth, {-x_rank, x_rank - 1}, prim_name);
|
||||
if (x_rank < kDimNum) {
|
||||
MS_EXCEPTION(ValueError) << "For 'Diagonal', input must be at least 2-dimensional, but got : " << x_rank << ".";
|
||||
}
|
||||
auto tmp_dim1 = (dim1 < 0) ? dim1 + x_rank : dim1;
|
||||
auto tmp_dim2 = (dim2 < 0) ? dim2 + x_rank : dim2;
|
||||
if (tmp_dim1 == tmp_dim2) {
|
||||
MS_EXCEPTION(ValueError) << "For 'Diagonal', dim1 and dim2 cannot be identical, but got : dim1 =" << dim1
|
||||
<< " and dim2 = " << dim2 << ".";
|
||||
}
|
||||
std::vector<int64_t> out_shape;
|
||||
for (size_t tmp_dim = 0; tmp_dim < x_rank; tmp_dim++) {
|
||||
if (tmp_dim != tmp_dim1 && tmp_dim != tmp_dim2) {
|
||||
out_shape.push_back(x_shape[tmp_dim]);
|
||||
}
|
||||
}
|
||||
int64_t dsize = dyn_shape;
|
||||
if (x_shape[tmp_dim1] != dyn_shape && x_shape[tmp_dim2] != dyn_shape) {
|
||||
if (offset >= 0) {
|
||||
dsize = std::max<int64_t>(std::min(x_shape[tmp_dim1], x_shape[tmp_dim2] - offset), 0);
|
||||
} else {
|
||||
dsize = std::max<int64_t>(std::min(x_shape[tmp_dim1] + offset, x_shape[tmp_dim2]), 0);
|
||||
}
|
||||
}
|
||||
out_shape.push_back(dsize);
|
||||
return std::make_shared<abstract::Shape>(out_shape);
|
||||
}
|
||||
|
||||
TypePtr DiagonalInferType(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
auto x = CheckAndConvertUtils::CheckArgsType(prim_name, input_args, 0, kObjectTypeTensorType);
|
||||
auto x_dtype = x->GetType();
|
||||
return CheckAndConvertUtils::CheckTensorTypeValid("input type", x_dtype, common_valid_types_with_bool,
|
||||
primitive->name());
|
||||
}
|
||||
} // namespace
|
||||
AbstractBasePtr DiagonalInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
const int64_t input_num = 1;
|
||||
CheckAndConvertUtils::CheckInputArgs(input_args, kEqual, input_num, primitive->name());
|
||||
auto infer_type = DiagonalInferType(primitive, input_args);
|
||||
auto infer_shape = DiagonalInferShape(primitive, input_args);
|
||||
return abstract::MakeAbstract(infer_shape, infer_type);
|
||||
}
|
||||
MIND_API_OPERATOR_IMPL(Diagonal, BaseOperator);
|
||||
|
||||
// AG means auto generated
|
||||
class MIND_API AGDiagonalInfer : public abstract::OpInferBase {
|
||||
public:
|
||||
BaseShapePtr InferShape(const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) const override {
|
||||
return DiagonalInferShape(primitive, input_args);
|
||||
}
|
||||
|
||||
TypePtr InferType(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) const override {
|
||||
return DiagonalInferType(primitive, input_args);
|
||||
}
|
||||
AbstractBasePtr InferShapeAndType(const abstract::AnalysisEnginePtr &engine, const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) const override {
|
||||
return DiagonalInfer(engine, primitive, input_args);
|
||||
}
|
||||
};
|
||||
|
||||
REGISTER_PRIMITIVE_OP_INFER_IMPL(Diagonal, prim::kPrimDiagonal, AGDiagonalInfer, false);
|
||||
} // namespace ops
|
||||
} // namespace mindspore
|
|
@ -52,7 +52,6 @@ GVAR_DEF(PrimitivePtr, kPrimBesselK1, std::make_shared<Primitive>("BesselK1"));
|
|||
GVAR_DEF(PrimitivePtr, kPrimBesselK0e, std::make_shared<Primitive>("BesselK0e"));
|
||||
GVAR_DEF(PrimitivePtr, kPrimBesselK1e, std::make_shared<Primitive>("BesselK1e"));
|
||||
GVAR_DEF(PrimitivePtr, kPrimGer, std::make_shared<Primitive>("Ger"));
|
||||
GVAR_DEF(PrimitivePtr, kPrimDiagonal, std::make_shared<Primitive>(kDiagonalOpName));
|
||||
GVAR_DEF(PrimitivePtr, kPrimTrunc, std::make_shared<Primitive>("Trunc"));
|
||||
GVAR_DEF(PrimitivePtr, kPrimLu, std::make_shared<Primitive>("Lu"));
|
||||
GVAR_DEF(PrimitivePtr, kPrimLuSolve, std::make_shared<Primitive>("LuSolve"));
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
diagonal:
|
||||
description: |
|
||||
Returns specified diagonals of `input`.
|
||||
|
||||
If `input` is 2-D, returns the diagonal of `input` with the given offset.
|
||||
If `input` has more than two
|
||||
dimensions, then the axes specified by `dim1` and `dim2` are used to determine
|
||||
the 2-D sub-array whose diagonal is returned. In this case, remove the `dim1` and `dim2` dimensions of `input`
|
||||
and insert the last dimension of `input` by the diagonal elements determined by `dim1` and `dim2`.
|
||||
|
||||
Args:
|
||||
input (Tensor): Array from which the diagonals are taken.
|
||||
offset (int, optional): Offset of the diagonal from the main diagonal.
|
||||
Can be positive or negative. Default: ``0`` .
|
||||
dim1 (int, optional): Axis to be used as the first axis of the 2-D
|
||||
sub-arrays from which the diagonals should be taken. Defaults to
|
||||
first axis (0). Default: ``0`` .
|
||||
dim2 (int, optional): Axis to be used as the second axis of the 2-D
|
||||
sub-arrays from which the diagonals should be taken. Defaults to
|
||||
second axis (1). Default: ``1`` .
|
||||
|
||||
Returns:
|
||||
Tensor, if `input` is 2-D, then `input` 1-D array containing the diagonal. If
|
||||
``input.ndim > 2``, then the dimensions specified by `dim1` and `dim2` are removed,
|
||||
and a new axis inserted at the end corresponding to the diagonal.
|
||||
|
||||
Raises:
|
||||
TypeError: if `dim1` or `dim2` are not an int.
|
||||
ValueError: if the input tensor has less than two dimensions.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> from mindspore import dtype as mstype
|
||||
>>> x = Tensor([[0, 1], [2, 3]], mstype.float32)
|
||||
>>> output = ops.diagonal(x)
|
||||
>>> print(output)
|
||||
[0 3]
|
|
@ -0,0 +1,20 @@
|
|||
#operator diagonal
|
||||
diagonal:
|
||||
args:
|
||||
x:
|
||||
dtype: tensor
|
||||
offset:
|
||||
dtype: int
|
||||
default: 0
|
||||
prim_init: True
|
||||
dim1:
|
||||
dtype: int
|
||||
default: 0
|
||||
prim_init: True
|
||||
dim2:
|
||||
dtype: int
|
||||
default: 1
|
||||
prim_init: True
|
||||
returns:
|
||||
output:
|
||||
dtype: tensor
|
|
@ -0,0 +1,94 @@
|
|||
/**
|
||||
* Copyright 2023 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "mindapi/src/helper.h"
|
||||
#include "mindspore/core/ops/math_ops.h"
|
||||
#include "ops/ops_func_impl/diagonal.h"
|
||||
#include "ops/op_utils.h"
|
||||
#include "utils/check_convert_utils.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
ShapeVector CalcuateDiagonalShape(size_t dim1, size_t dim2, size_t x_rank, const ShapeVector &input_shape) {
|
||||
ShapeVector out_shape;
|
||||
for (size_t tmp_dim = 0; tmp_dim < x_rank; tmp_dim++) {
|
||||
if (tmp_dim != dim1 && tmp_dim != dim2) {
|
||||
out_shape.push_back(input_shape[tmp_dim]);
|
||||
}
|
||||
}
|
||||
return out_shape;
|
||||
}
|
||||
|
||||
BaseShapePtr DiagonalFuncImpl::InferShape(const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) const {
|
||||
auto input_shape = input_args[0]->GetShape()->GetShapeVector();
|
||||
constexpr size_t kDimNum = 2;
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto prim_name = primitive->name();
|
||||
const int64_t input_num = 4;
|
||||
const int64_t dyn_shape = abstract::Shape::kShapeDimAny;
|
||||
(void)CheckAndConvertUtils::CheckInteger("input numbers", SizeToLong(input_args.size()), kGreaterEqual, input_num,
|
||||
prim_name);
|
||||
if (IsDynamicRank(input_shape)) {
|
||||
return std::make_shared<abstract::Shape>(ShapeVector({abstract::Shape::kShapeRankAny}));
|
||||
}
|
||||
auto x_rank = input_shape.size();
|
||||
auto offset_opt = GetScalarValue<int64_t>(input_args[1]->GetValue());
|
||||
auto dim1_opt = GetScalarValue<int64_t>(input_args[2]->GetValue());
|
||||
auto dim2_opt = GetScalarValue<int64_t>(input_args[3]->GetValue());
|
||||
|
||||
if (!dim1_opt.has_value() || !dim1_opt.has_value()) {
|
||||
return std::make_shared<abstract::Shape>(ShapeVector({abstract::Shape::kShapeRankAny}));
|
||||
}
|
||||
auto dim1 = dim1_opt.value();
|
||||
auto dim2 = dim2_opt.value();
|
||||
CheckAndConvertUtils::CheckInRange<int64_t>("dim1", dim1, kIncludeBoth, {-x_rank, x_rank - 1}, prim_name);
|
||||
CheckAndConvertUtils::CheckInRange<int64_t>("dim2", dim2, kIncludeBoth, {-x_rank, x_rank - 1}, prim_name);
|
||||
if (x_rank < kDimNum) {
|
||||
MS_EXCEPTION(ValueError) << "For 'Diagonal', input must be at least 2-dimensional, but got : " << x_rank << ".";
|
||||
}
|
||||
auto tmp_dim1 = (dim1 < 0) ? dim1 + x_rank : dim1;
|
||||
auto tmp_dim2 = (dim2 < 0) ? dim2 + x_rank : dim2;
|
||||
if (tmp_dim1 == tmp_dim2) {
|
||||
MS_EXCEPTION(ValueError) << "For 'Diagonal', dim1 and dim2 cannot be identical, but got : dim1 =" << dim1
|
||||
<< " and dim2 = " << dim2 << ".";
|
||||
}
|
||||
auto out_shape = CalcuateDiagonalShape(tmp_dim1, tmp_dim2, x_rank, input_shape);
|
||||
int64_t dsize = dyn_shape;
|
||||
if (offset_opt.has_value()) {
|
||||
auto offset = offset_opt.value();
|
||||
if (input_shape[tmp_dim1] != dyn_shape && input_shape[tmp_dim2] != dyn_shape) {
|
||||
if (offset >= 0) {
|
||||
dsize = std::max<int64_t>(std::min(input_shape[tmp_dim1], input_shape[tmp_dim2] - offset), 0);
|
||||
} else {
|
||||
dsize = std::max<int64_t>(std::min(input_shape[tmp_dim1] + offset, input_shape[tmp_dim2]), 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
out_shape.push_back(dsize);
|
||||
return std::make_shared<abstract::Shape>(out_shape);
|
||||
}
|
||||
|
||||
TypePtr DiagonalFuncImpl::InferType(const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) const {
|
||||
MS_EXCEPTION_IF_NULL(input_args[kIndex0]->GetType());
|
||||
return input_args[kIndex0]->GetType()->Clone();
|
||||
}
|
||||
} // namespace ops
|
||||
} // namespace mindspore
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2021-2022 Huawei Technologies Co., Ltd
|
||||
* Copyright 2023 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -14,24 +14,22 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CORE_OPS_DIAGONAL_H_
|
||||
#define MINDSPORE_CORE_OPS_DIAGONAL_H_
|
||||
#ifndef MINDSPORE_CORE_OPS_OPS_FUNC_IMPL_DIAGONAL_H_
|
||||
#define MINDSPORE_CORE_OPS_OPS_FUNC_IMPL_DIAGONAL_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "mindapi/base/types.h"
|
||||
#include "ops/base_operator.h"
|
||||
#include "ops/ops_func_impl/op_func_impl.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
constexpr auto kNameDiagonal = "Diagonal";
|
||||
class MIND_API Diagonal : public BaseOperator {
|
||||
class MIND_API DiagonalFuncImpl : public OpFuncImpl {
|
||||
public:
|
||||
MIND_API_BASE_MEMBER(Diagonal);
|
||||
Diagonal() : BaseOperator(kNameDiagonal) { InitIOName({"x"}, {"y"}); }
|
||||
BaseShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) const override;
|
||||
TypePtr InferType(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) const override;
|
||||
};
|
||||
MIND_API abstract::AbstractBasePtr DiagonalInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<abstract::AbstractBasePtr> &input_args);
|
||||
} // namespace ops
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CORE_OPS_DIAGONAL_H_
|
||||
#endif // MINDSPORE_CORE_OPS_OPS_FUNC_IMPL_DIAGONAL_H_
|
|
@ -20,7 +20,7 @@
|
|||
#include "ops/op_utils.h"
|
||||
|
||||
namespace mindspore::ops {
|
||||
constexpr size_t kDiagonalInputsNum = 1;
|
||||
constexpr size_t kDiagonalInputsNum = 4;
|
||||
constexpr int64_t kDimNum = 2;
|
||||
|
||||
int64_t ComputeData(int64_t offset, int64_t dim1, int64_t dim2, std::vector<int64_t> old_shape) {
|
||||
|
@ -46,9 +46,9 @@ TensorStorageInfoPtrList DiagonalCalc(const PrimitivePtr &prim, const std::vecto
|
|||
auto old_shape = old_tensor_info->old_shape;
|
||||
auto old_strides = old_tensor_info->old_strides;
|
||||
auto storage_offset = old_tensor_info->old_offset;
|
||||
auto offset = GetValue<int64_t>(prim->GetAttr("offset"));
|
||||
auto dim1 = GetValue<int64_t>(prim->GetAttr("dim1"));
|
||||
auto dim2 = GetValue<int64_t>(prim->GetAttr("dim2"));
|
||||
auto offset = GetValue<int64_t>(inputs[1]);
|
||||
auto dim1 = GetValue<int64_t>(inputs[2]);
|
||||
auto dim2 = GetValue<int64_t>(inputs[3]);
|
||||
int64_t dim_size = old_shape.size();
|
||||
(void)CheckAndConvertUtils::CheckInRange<int64_t>("dim1", dim1, kIncludeBoth, {-dim_size, dim_size - 1},
|
||||
prim->name());
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
#include "ops/view/reshape_strides_calc.h"
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include "ops/op_utils.h"
|
||||
#include "utils/check_convert_utils.h"
|
||||
|
||||
namespace mindspore::ops {
|
||||
constexpr size_t kReshapeInputsNum = 2;
|
||||
|
@ -74,6 +76,9 @@ TensorStorageInfoPtrList ReshapeCalc(const PrimitivePtr &prim, const std::vector
|
|||
}
|
||||
auto input_tensor = inputs[0]->cast<tensor::TensorPtr>();
|
||||
MS_EXCEPTION_IF_NULL(input_tensor);
|
||||
auto input_type = input_tensor->Dtype();
|
||||
(void)CheckAndConvertUtils::CheckTypeValid("input", input_type, common_valid_types_with_complex_and_bool,
|
||||
prim->name());
|
||||
auto ori_storage_info = input_tensor->storage_info();
|
||||
if (ori_storage_info != nullptr && !ori_storage_info->is_contiguous) {
|
||||
return {};
|
||||
|
@ -83,4 +88,6 @@ TensorStorageInfoPtrList ReshapeCalc(const PrimitivePtr &prim, const std::vector
|
|||
|
||||
return ReshapeCalcImpl(prim, input_tensor, shape);
|
||||
}
|
||||
|
||||
REG_VIEW_STRIDES_CALC_FUN(Reshape, ReshapeCalc);
|
||||
} // namespace mindspore::ops
|
||||
|
|
|
@ -208,24 +208,11 @@ class _ConstantPadNd(Cell):
|
|||
raise TypeError(msg)
|
||||
|
||||
self.value = value
|
||||
self.padding = _swap_to_ms_padding_order(padding)
|
||||
self._name = name
|
||||
self.padding = padding
|
||||
|
||||
def construct(self, x):
|
||||
"""Construct the pad net."""
|
||||
input_shape = x.shape
|
||||
padding = _check(input_shape, self.padding, self._name)
|
||||
new_padding, start, end = _get_new_padding(padding)
|
||||
mask = ops.OnesLike()(x)
|
||||
output = ops.Pad(new_padding)(x)
|
||||
mask = ops.Pad(new_padding)(mask)
|
||||
ones = ops.OnesLike()(output)
|
||||
value = ops.fill(output.dtype, output.shape, self.value)
|
||||
output = ops.Add()(ops.Mul()(mask, output), ops.Mul()(ops.Sub()(ones, mask), value))
|
||||
slice_op = ops.Slice()
|
||||
begin, size = _get_begin_size(output.shape, start, end)
|
||||
output = slice_op(output, begin, size)
|
||||
return output
|
||||
return ops.pad(x, padding=self.padding, mode='constant', value=self.value)
|
||||
|
||||
|
||||
class ConstantPad1d(_ConstantPadNd):
|
||||
|
|
|
@ -141,7 +141,6 @@ from .array_func import (
|
|||
expand,
|
||||
fold,
|
||||
unfold,
|
||||
diagonal,
|
||||
diagonal_scatter,
|
||||
lstsq,
|
||||
mvlgamma,
|
||||
|
@ -711,7 +710,7 @@ from .other_func import (
|
|||
partial,
|
||||
)
|
||||
from ..operations.manually_defined import (rank,)
|
||||
from ..auto_generate import (assign, masked_fill, minimum, prelu, randperm, real, sin, sinc, sinh, roll_,)
|
||||
from ..auto_generate import (assign, masked_fill, minimum, prelu, randperm, real, sin, sinc, sinh, roll_, diagonal,)
|
||||
|
||||
__all__ = [
|
||||
'assign',
|
||||
|
@ -724,6 +723,7 @@ __all__ = [
|
|||
'sin',
|
||||
'sinc',
|
||||
'sinh',
|
||||
'diagonal',
|
||||
]
|
||||
__all__.extend(array_func.__all__)
|
||||
__all__.extend(parameter_func.__all__)
|
||||
|
|
|
@ -31,7 +31,6 @@ from mindspore.ops.operations._inner_ops import DynamicBroadcastTo
|
|||
from mindspore.ops.operations._sequence_ops import TupleToTensor
|
||||
from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
|
||||
from mindspore.ops.operations._sequence_ops import TensorToList
|
||||
from mindspore.ops.operations.math_ops import Diagonal
|
||||
|
||||
from mindspore.ops.operations.array_ops import (
|
||||
UniqueConsecutive,
|
||||
|
@ -6497,57 +6496,6 @@ def _check_diagonal_axes(dim1, dim2, x_ndim):
|
|||
return axes
|
||||
|
||||
|
||||
def diagonal(input, offset=0, dim1=0, dim2=1):
|
||||
"""
|
||||
Returns specified diagonals of `input`.
|
||||
|
||||
If `input` is 2-D, returns the diagonal of `input` with the given offset.
|
||||
If `input` has more than two
|
||||
dimensions, then the axes specified by `dim1` and `dim2` are used to determine
|
||||
the 2-D sub-array whose diagonal is returned. In this case, remove the `dim1` and `dim2` dimensions of `input`
|
||||
and insert the last dimension of `input` by the diagonal elements determined by `dim1` and `dim2`.
|
||||
|
||||
Args:
|
||||
input (Tensor): Array from which the diagonals are taken.
|
||||
offset (int, optional): Offset of the diagonal from the main diagonal.
|
||||
Can be positive or negative. Default: ``0`` .
|
||||
dim1 (int, optional): Axis to be used as the first axis of the 2-D
|
||||
sub-arrays from which the diagonals should be taken. Defaults to
|
||||
first axis (0). Default: ``0`` .
|
||||
dim2 (int, optional): Axis to be used as the second axis of the 2-D
|
||||
sub-arrays from which the diagonals should be taken. Defaults to
|
||||
second axis (1). Default: ``1`` .
|
||||
|
||||
Returns:
|
||||
Tensor, if `input` is 2-D, then `input` 1-D array containing the diagonal. If
|
||||
``input.ndim > 2``, then the dimensions specified by `dim1` and `dim2` are removed,
|
||||
and a new axis inserted at the end corresponding to the diagonal.
|
||||
|
||||
Raises:
|
||||
TypeError: if `dim1` or `dim2` are not an int.
|
||||
ValueError: if the input tensor has less than two dimensions.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> from mindspore import dtype as mstype
|
||||
>>> x = Tensor([[0, 1], [2, 3]], mstype.float32)
|
||||
>>> output = ops.diagonal(x)
|
||||
>>> print(output)
|
||||
[0 3]
|
||||
"""
|
||||
x_ndim = input.ndim
|
||||
if x_ndim < 2:
|
||||
raise ValueError(f"For 'ops.diagonal', the original tensor requires at least two dimensions, but got {x_ndim}")
|
||||
_check_attr_dtype("dim1", dim1, [int], "diagonal")
|
||||
_check_attr_dtype("dim2", dim2, [int], "diagonal")
|
||||
|
||||
_diagonal = _get_cache_prim(Diagonal)(offset, dim1, dim2)
|
||||
return _diagonal(input)
|
||||
|
||||
|
||||
def _check_is_tensor(param_name, input, cls_name):
|
||||
"""Returns True if input is Tensor."""
|
||||
if not isinstance(input, Tensor):
|
||||
|
@ -7400,7 +7348,6 @@ __all__ = [
|
|||
'expand',
|
||||
'fold',
|
||||
'unfold',
|
||||
'diagonal',
|
||||
'diagonal_scatter',
|
||||
'lstsq',
|
||||
'mvlgamma',
|
||||
|
|
|
@ -40,7 +40,7 @@ from ..auto_generate import (Add, Addcdiv, Addcmul, ReduceMean, ReduceSum, Reduc
|
|||
LinSpace, MatrixDeterminant, LogMatrixDeterminant, Erfinv, Conj,
|
||||
Real, Complex, Angle, MatrixExp, CholeskyInverse, Trace, Cholesky,
|
||||
FFTWithSize, NextAfter, NanToNum, Eig, Qr, Roll, Maximum, Div, CumProd,
|
||||
CumSum, Less, LessEqual)
|
||||
CumSum, Less, LessEqual, Diagonal)
|
||||
|
||||
def _infer_shape_reduce(x, axis, keep_dims, prim_name):
|
||||
"""Common infer for reduce operator"""
|
||||
|
@ -1342,68 +1342,6 @@ class Einsum(Primitive):
|
|||
self.init_prim_io_names(inputs=['inputs'], outputs=['output'])
|
||||
|
||||
|
||||
class Diagonal(Primitive):
|
||||
"""
|
||||
Create a tensor with specific diagonal elements of input. This operator extracts the diagonal elements with
|
||||
offset from the 2-D sub-tensors which specified by dim1 and dim2.
|
||||
The shape of output tensor can be dertermined by removing dim1 and dim2 form the shape of input and appending
|
||||
a dimension at the end. The size of the last dimension is the length of diagonal.
|
||||
|
||||
Args:
|
||||
offset (int): The offset of main diagonal, which controls which diagonal to consider. If :math:`offset=0`,
|
||||
return the main diagonal elements with respect to dim1 and dim2. If :math:`offset>0`, return the
|
||||
diagonal elements that are `offset` units upward from the main diagonal. If :math:`offset<0`, return the
|
||||
diagonal elements that are `offset` units downward from the main diagonal. Default: ``0`` .
|
||||
dim1 (int): The first dimension with respect to which to take diagonal. Default: ``0`` .
|
||||
dim2 (int): The second dimension with respect to which to take diagonal. Default: ``1`` .
|
||||
|
||||
Inputs:
|
||||
- **x** (Tensor) - The input to take diagonal, with float32 or double data type.
|
||||
The input must be at least 2-dimensional.
|
||||
The shape is :math:`(N_{0}, N_{1}, *)` where :math:`*` means, any number of additional dimensions.
|
||||
|
||||
Outputs:
|
||||
- **y** (Tensor) - A tensor whose values are diagonal of input, with the same data type as input.
|
||||
The shape of the output is one dimension lower than the input.
|
||||
If the shape of `x` is :math:`(d_{0}, d_{1}, ..., d_{n-1})`, the size of the `dim1` dimension
|
||||
is :math:`d_{i}` and the size of the `dim2` dimension is :math:`d_{j}`, the shape of `y` is the same
|
||||
as the input shape with `dim1` and `dim2` dimension removed and the diagonal dimension appended.
|
||||
If the `offset` is nonnegative, the size of output's last dimension is
|
||||
:math:`max(min(d_{i}, d_{j}-offset), 0)`. But if the `offset` is negative, the size of output's
|
||||
last dimension is :math:`max(min(d_{i} + offset, d_{j}), 0)`.
|
||||
|
||||
Raises:
|
||||
TypeError: If dtype of `x` is neither float32 nor double.
|
||||
TypeError: If `offset` is not an int.
|
||||
TypeError: If `dim1` is not an int.
|
||||
TypeError: If `dim2` is not an int.
|
||||
ValueError: If the dimension of input is less than 2 dimensions.
|
||||
ValueError: If `dim1` is not in range of [-len(x.shape), len(x.shape)).
|
||||
ValueError: If `dim2` is not in range of [-len(x.shape), len(x.shape)).
|
||||
ValueError: If `dim1` and `dim2` are identical.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([[[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]],
|
||||
... [[12., 13., 14., 15.], [16., 17., 18., 19.], [20., 21., 22., 23.]]]), mindspore.float32)
|
||||
>>> diagonal_ops = ops.Diagonal(offset=1, dim1=-1, dim2=1)
|
||||
>>> y = diagonal_ops(x)
|
||||
>>> print(y)
|
||||
[[ 4. 9.]
|
||||
[16. 21.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
def __init__(self, offset=0, dim1=0, dim2=1):
|
||||
"""Initialize Diagonal"""
|
||||
self.init_prim_io_names(inputs=['x'], outputs=['y'])
|
||||
validator.check_is_int(offset, "offset", self.name)
|
||||
validator.check_is_int(dim1, "dim1", self.name)
|
||||
validator.check_is_int(dim2, "dim2", self.name)
|
||||
|
||||
|
||||
class Histogram(Primitive):
|
||||
"""
|
||||
Computes the histogram of Tensor element distribution.
|
||||
|
|
|
@ -0,0 +1,86 @@
|
|||
# Copyright 2023 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
import numpy as np
|
||||
import pytest
|
||||
import mindspore as ms
|
||||
import mindspore.nn as nn
|
||||
from mindspore import Tensor
|
||||
from mindspore import ops
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_reshape_single_op():
|
||||
"""
|
||||
Feature: reshape
|
||||
Description: Verify the result of reshape
|
||||
Expectation: success
|
||||
"""
|
||||
ms.set_context(mode=ms.GRAPH_MODE)
|
||||
|
||||
class Net(nn.Cell):
|
||||
def construct(self, x, y):
|
||||
return ops.reshape(x, y)
|
||||
|
||||
input_x = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), ms.float32)
|
||||
input_perm = (2, 2, 3)
|
||||
net = Net()
|
||||
expect_output = net(input_x, input_perm).asnumpy()
|
||||
grad_op = ops.GradOperation(get_all=True, get_by_list=False, sens_param=False)
|
||||
expect_grad = grad_op(net)(input_x, input_perm)
|
||||
|
||||
ms.set_context(mode=ms.PYNATIVE_MODE)
|
||||
net = Net()
|
||||
output = net(input_x, input_perm).asnumpy()
|
||||
grad = grad_op(net)(input_x, input_perm)
|
||||
np.testing.assert_array_equal(output, expect_output)
|
||||
np.testing.assert_allclose(grad[0].asnumpy(), expect_grad[0].asnumpy(), 0.00001, 0.00001)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_cpu
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_reshape_multiple_op():
|
||||
"""
|
||||
Feature: reshape
|
||||
Description: Verify the result of reshape
|
||||
Expectation: success
|
||||
"""
|
||||
ms.set_context(mode=ms.GRAPH_MODE)
|
||||
|
||||
class Net(nn.Cell):
|
||||
def construct(self, x, y):
|
||||
temp = ops.reshape(x, y)
|
||||
temp = (temp + 1) * 2
|
||||
return ops.reshape(temp, y)
|
||||
|
||||
input_x = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), ms.float32)
|
||||
input_perm = (2, 2, 3)
|
||||
net = Net()
|
||||
expect_output = net(input_x, input_perm).asnumpy()
|
||||
grad_op = ops.GradOperation(get_all=True, get_by_list=False, sens_param=False)
|
||||
expect_grad = grad_op(net)(input_x, input_perm)
|
||||
|
||||
ms.set_context(mode=ms.PYNATIVE_MODE)
|
||||
net = Net()
|
||||
output = net(input_x, input_perm).asnumpy()
|
||||
grad = grad_op(net)(input_x, input_perm)
|
||||
np.testing.assert_array_equal(output, expect_output)
|
||||
np.testing.assert_allclose(grad[0].asnumpy(), expect_grad[0].asnumpy(), 0.00001, 0.00001)
|
|
@ -38,11 +38,11 @@ TEST_F(TestViewDiagonal, View) {
|
|||
auto offset_ = MakeValue(input_offset);
|
||||
auto dim1_ = MakeValue(input_dim1);
|
||||
auto dim2_ = MakeValue(input_dim2);
|
||||
prim->AddAttr("offset", offset_);
|
||||
prim->AddAttr("dim1", dim1_);
|
||||
prim->AddAttr("dim2", dim2_);
|
||||
std::vector<ValuePtr> inputs_a;
|
||||
inputs_a.emplace_back(input_tensor);
|
||||
inputs_a.emplace_back(offset_);
|
||||
inputs_a.emplace_back(dim1_);
|
||||
inputs_a.emplace_back(dim2_);
|
||||
auto storage_info = DiagonalCalc(prim, inputs_a);
|
||||
std::vector<int64_t> expect_shape({1, 2});
|
||||
std::vector<int64_t> expect_strides({8, 5});
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
/**
|
||||
* Copyright 2023 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "test_view.h"
|
||||
#include "mindspore/core/ops/view/reshape_strides_calc.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
class TestViewReshape : public TestView {
|
||||
public:
|
||||
TestViewReshape() {}
|
||||
};
|
||||
|
||||
/// Feature: Reshape strides calculator
|
||||
/// Description: Test view Reshape strides calculator is right
|
||||
/// Expectation: success
|
||||
TEST_F(TestViewReshape, View) {
|
||||
auto prim = std::make_shared<Primitive>("Reshape");
|
||||
std::vector<int64_t> tensor_data = {1, 2, 3, 4, 5, 6, 7, 8};
|
||||
auto input_tensor = std::make_shared<tensor::Tensor>(tensor_data, kInt64);
|
||||
input_tensor->set_shape({2, 4});
|
||||
std::vector<int64_t> input_perm_data = {1, 4, 2};
|
||||
auto input_perm = MakeValue(input_perm_data);
|
||||
std::vector<ValuePtr> inputs_a;
|
||||
inputs_a.emplace_back(input_tensor);
|
||||
inputs_a.emplace_back(input_perm);
|
||||
auto storage_info = ReshapeCalc(prim, inputs_a);
|
||||
std::vector<int64_t> expect_shape({1, 4, 2});
|
||||
std::vector<int64_t> expect_strides({8, 2, 1});
|
||||
size_t expect_offset = 0;
|
||||
ASSERT_FALSE(storage_info.empty());
|
||||
ASSERT_TRUE(storage_info[0]->is_contiguous);
|
||||
ASSERT_TRUE(storage_info[0]->shape == expect_shape);
|
||||
ASSERT_TRUE(storage_info[0]->strides == expect_strides);
|
||||
ASSERT_TRUE(storage_info[0]->storage_offset == expect_offset);
|
||||
}
|
||||
} // namespace ops
|
||||
} // namespace mindspore
|
Loading…
Reference in New Issue