forked from mindspore-Ecosystem/mindspore
Fix bugs
This commit is contained in:
parent
8eb65baf62
commit
25d568f8a1
|
@ -755,7 +755,7 @@ constexpr auto kSparseSoftmaxCrossEntropyWithLogitsOpName = "SparseSoftmaxCrossE
|
|||
constexpr auto kSparseSoftmaxCrossEntropyWithLogitsV2OpName = "SparseSoftmaxCrossEntropyWithLogitsV2";
|
||||
constexpr auto kSparseSparseMinimumOpName = "SparseSparseMinimum";
|
||||
constexpr auto kSparseSparseMaximumOpName = "SparseSparseMaximum";
|
||||
constexpr auto kSparseTensorDenseMatMulOpName = "SparseTensorDenseMatMul";
|
||||
constexpr auto kSparseTensorDenseMatmulOpName = "SparseTensorDenseMatmul";
|
||||
constexpr auto kSparseTensorDenseAddOpName = "SparseTensorDenseAdd";
|
||||
constexpr auto kSparseTensorToCSRSparseMatrixOpName = "SparseTensorToCSRSparseMatrix";
|
||||
constexpr auto kSparseToDenseOpName = "SparseToDense";
|
||||
|
|
|
@ -64,6 +64,18 @@ uint32_t SparseDenseCwiseOpKernel<Op>::CheckParams(CpuKernelContext &ctx) {
|
|||
KERNEL_CHECK_FALSE((input3_dims <= shape_elements_nums), KERNEL_STATUS_PARAM_INVALID,
|
||||
"The dims of DenseTensor is large than sparseTensor.")
|
||||
KERNEL_CHECK_FALSE((indices_0 == value_0), KERNEL_STATUS_PARAM_INVALID, "The num of indices is not equal to value.")
|
||||
|
||||
int64_t indices_num = x1_indices->GetTensorShape()->GetDimSize(0);
|
||||
int64_t dims = x1_indices->GetTensorShape()->GetDimSize(1);
|
||||
auto x1_indices_data = reinterpret_cast<int64_t *>(x1_indices->GetData());
|
||||
auto x1_shape_data = reinterpret_cast<int64_t *>(x1_shape->GetData());
|
||||
for (int64_t i = 0; i < indices_num; ++i) {
|
||||
for (int64_t j = 0; j < dims; ++j) {
|
||||
KERNEL_CHECK_FALSE((x1_indices_data[i * dims + j] >= 0 && x1_indices_data[i * dims + j] < x1_shape_data[j]),
|
||||
KERNEL_STATUS_PARAM_INVALID, "For SparseDenseCwise%s, indices go out of bounds.",
|
||||
Op::Name().c_str());
|
||||
}
|
||||
}
|
||||
return KERNEL_STATUS_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ uint32_t SparseSegmentSqrtNGradCpuKernel::ComputeKernal(CpuKernelContext &ctx) {
|
|||
}
|
||||
|
||||
for (size_t i = 0; i < m; i++) {
|
||||
if (indices_addr[i] >= ctx.Input(0)->GetTensorShape()->GetDimSize(0)) {
|
||||
if (indices_addr[i] >= k) {
|
||||
KERNEL_LOG_ERROR("Indices out of range.");
|
||||
return KERNEL_STATUS_PARAM_INVALID;
|
||||
}
|
||||
|
@ -118,9 +118,9 @@ uint32_t SparseSegmentSqrtNGradCpuKernel::ComputeKernal(CpuKernelContext &ctx) {
|
|||
for (size_t l = 0; l < n; l++) {
|
||||
y_addr[indices_addr[i - j] * n + l] += x_addr[beginindex * n + l] / (T)(sqrt(countnum));
|
||||
}
|
||||
beginindex = segment_ids_addr[i];
|
||||
countnum = 1;
|
||||
}
|
||||
beginindex = segment_ids_addr[i];
|
||||
countnum = 1;
|
||||
}
|
||||
|
||||
int i = m;
|
||||
|
|
|
@ -46,6 +46,7 @@ constexpr auto kShapes = "shapes";
|
|||
constexpr auto kTypes = "types";
|
||||
constexpr auto kQueueName = "queue_name";
|
||||
constexpr auto kNameRangeV2 = "RangeV2";
|
||||
constexpr auto kSparseTensorDenseMatmul = "SparseTensorDenseMatmul";
|
||||
constexpr auto kSeed = "seed";
|
||||
constexpr auto kSeed0 = "Seed0";
|
||||
constexpr auto kSeed1 = "Seed1";
|
||||
|
@ -322,6 +323,7 @@ const std::map<std::string, std::string> kOpNameToAicpuOpNameMap{
|
|||
{kUpsampleNearest3DGrad, "UpsampleNearest3dGrad"},
|
||||
{kNameRangeV2, "Range"},
|
||||
{kReLUV3, "Relu"},
|
||||
{kSparseTensorDenseMatmul, "SparseTensorDenseMatMul"},
|
||||
{kFillV2, "Fill"},
|
||||
{kUpsampleTrilinear3D, "UpsampleTrilinear3d"},
|
||||
{kUpsampleTrilinear3DGrad, "UpsampleTrilinear3dGrad"},
|
||||
|
|
|
@ -140,7 +140,7 @@ bool AICpuLibSelectPass::Process(const AnfNodePtr &node) const {
|
|||
mindspore::kTridiagonalSolveOpName,
|
||||
mindspore::kSparseTensorToCSRSparseMatrixOpName,
|
||||
mindspore::kSparseTensorDenseAddOpName,
|
||||
mindspore::kSparseTensorDenseMatMulOpName,
|
||||
mindspore::kSparseTensorDenseMatmulOpName,
|
||||
mindspore::kSparseSoftmaxOpName,
|
||||
mindspore::kSparseSliceOpName,
|
||||
mindspore::kSparseSliceGradOpName,
|
||||
|
|
|
@ -104,6 +104,7 @@ PrimShapeDependMap &GetInferDependsMap() {
|
|||
static const auto &kMirrorPad = prim::kPrimMirrorPad->name();
|
||||
static const auto &kAdaptiveAvgPool3D = prim::kPrimAdaptiveAvgPool3D->name();
|
||||
static const auto &kAdaptiveAvgPool3DGrad = prim::kPrimAdaptiveAvgPool3DGrad->name();
|
||||
static const auto &kAvgPoolGradV1 = prim::kPrimAvgPoolGradV1->name();
|
||||
static const auto &kOneHot = prim::kPrimOneHot->name();
|
||||
static const auto &kDropoutGenMask = prim::kPrimDropoutGenMask->name();
|
||||
static const auto &kStridedSlice = prim::kPrimStridedSlice->name();
|
||||
|
@ -198,6 +199,7 @@ PrimShapeDependMap &GetInferDependsMap() {
|
|||
{kConv2DBackpropFilter, ShapeSet{2}},
|
||||
{kConv2DBackpropInput, ShapeSet{2}},
|
||||
{kCol2Im, ShapeSet{1}},
|
||||
{kAvgPoolGradV1, ShapeSet{0}},
|
||||
{kOneHot, ShapeSet{1, 3}},
|
||||
{kDropoutGenMask, ShapeSet{0}},
|
||||
{prim::kStatelessDropOutGenMask, ShapeSet{0}},
|
||||
|
|
|
@ -28,7 +28,8 @@ abstract::ShapePtr AvgPoolGradV1InferShape(const PrimitivePtr &primitive,
|
|||
std::vector<int64_t> kernel_size = GetValue<std::vector<int64_t>>(primitive->GetAttr(kKernelSize));
|
||||
|
||||
auto pad_mode_value = (primitive->GetAttr(kPadMode));
|
||||
auto pad_mode = PadMode(GetValue<int64_t>(pad_mode_value));
|
||||
int64_t pad_mode;
|
||||
CheckAndConvertUtils::GetPadModEnumValue(pad_mode_value, &pad_mode, true);
|
||||
if (format == NHWC) {
|
||||
std::vector<int64_t> ksize_NHWC = {kernel_size[0], kernel_size[1], kernel_size[2], kernel_size[3]};
|
||||
(void)primitive->AddAttr("ksize", MakeValue(ksize_NHWC));
|
||||
|
@ -46,16 +47,10 @@ abstract::ShapePtr AvgPoolGradV1InferShape(const PrimitivePtr &primitive,
|
|||
(void)primitive->AddAttr("padding", MakeValue("SAME"));
|
||||
}
|
||||
|
||||
auto orig_input_shape = input_args[0]->BuildValue();
|
||||
auto orig_input_shape_tensor = orig_input_shape->cast<tensor::TensorPtr>();
|
||||
auto orig_input_shape_tensor_data_ptr = orig_input_shape_tensor->data_c();
|
||||
int32_t *orig_input_shape_ptr = static_cast<int32_t *>(orig_input_shape_tensor_data_ptr);
|
||||
|
||||
std::vector<int64_t> orig_shape = {orig_input_shape_ptr[0], orig_input_shape_ptr[1], orig_input_shape_ptr[2],
|
||||
orig_input_shape_ptr[3]};
|
||||
|
||||
auto orig_shape = GetShapeValue(primitive, input_args[0]);
|
||||
return std::make_shared<abstract::Shape>(orig_shape);
|
||||
}
|
||||
|
||||
TypePtr AvgPoolGradV1InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) {
|
||||
auto name = prim->name();
|
||||
auto orig_input_shape_type = input_args[0]->BuildType();
|
||||
|
|
|
@ -37,6 +37,14 @@ void CheckShapeRank(const size_t cur_rank, const size_t expected_rank, const std
|
|||
<< "-dimensional tensor, but got a " << cur_rank << "-dimensional tensor.";
|
||||
}
|
||||
}
|
||||
|
||||
void AddAicpuAttr(const PrimitivePtr &primitive) {
|
||||
// SparseTensorDenseMatmul has attr adjoint_a/b instead of adjoint_st/dt on aicpu.
|
||||
// add_prim_attr in the python __init__ function doesn't take effect in the expander bprop,
|
||||
// so add them here.
|
||||
(void)primitive->AddAttr("adjoint_a", primitive->GetAttr("adjoint_st"));
|
||||
(void)primitive->AddAttr("adjoint_b", primitive->GetAttr("adjoint_dt"));
|
||||
}
|
||||
} // namespace
|
||||
|
||||
bool checkType(std::string name, TypePtr dtype, const std::set<TypePtr> &vtypes, const PrimitivePtr &primitive) {
|
||||
|
@ -258,6 +266,9 @@ AbstractBasePtr SparseTensorDenseMatmulInfer(const abstract::AnalysisEnginePtr &
|
|||
auto type = SparseTensorDenseMatmulInferType(primitive, input_args);
|
||||
// infer shape
|
||||
auto shape = SparseTensorDenseMatmulInferShape(primitive, input_args);
|
||||
|
||||
AddAicpuAttr(primitive);
|
||||
|
||||
return std::make_shared<abstract::AbstractTensor>(type, shape);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue