!43310 refine some infer shape functions to support dynamic shape

Merge pull request !43310 from looop5/dyn_shape_commit_split_core_only
This commit is contained in:
i-robot 2022-10-08 01:38:57 +00:00 committed by Gitee
commit 4cbf97af70
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
79 changed files with 4457 additions and 157 deletions

View File

@ -1350,6 +1350,17 @@ void AnfRuntimeAlgorithm::UpdateGraphValidRefPair(const KernelGraphPtr &graph) {
graph->set_ref_out_in_map(new_ref_map);
}
bool AnfRuntimeAlgorithm::IsDynamicShapeSkipExecute(const std::string &op_name, const ShapeVector &axes_shape) {
// Skip run ReduceSum when axis is a Empty Tensor
if (op_name != kReduceSumOpName) {
return false;
}
if (std::any_of(axes_shape.begin(), axes_shape.end(), [](int64_t shape) { return shape == 0; })) {
return true;
}
return false;
}
bool AnfRuntimeAlgorithm::IsDynamicShapeSkipExecute(const CNodePtr &cnode) {
// Skip run ReduceSum when axis is a Empty Tensor
MS_EXCEPTION_IF_NULL(cnode);

View File

@ -170,6 +170,7 @@ class BACKEND_EXPORT AnfRuntimeAlgorithm {
static void CacheAddrForAtomicClean(const AnfNodePtr &node, kernel::KernelMod *kernel_mod);
static void UpdateGraphValidRefPair(const KernelGraphPtr &graph);
static bool IsDynamicShapeSkipExecute(const std::string &op_name, const ShapeVector &axes_shape);
static bool IsDynamicShapeSkipExecute(const CNodePtr &cnode);
// return true if need to update output's shape and type after launch
static bool IsNeedUpdateShapeAndTypeAfterLaunch(const AnfNodePtr &cnode);

View File

@ -27,6 +27,8 @@ RER_ASCEND_DYNAMIC_CONST_TO_ATTR(kConcatOpName, 0);
RER_ASCEND_DYNAMIC_CONST_TO_ATTR(kEmbeddingLookupOpName, 2, 3, 4, 5);
RER_ASCEND_DYNAMIC_CONST_TO_ATTR(kExpandDimsOpName, 1);
RER_ASCEND_DYNAMIC_CONST_TO_ATTR(kTransposeOpName, 1);
RER_ASCEND_DYNAMIC_CONST_TO_ATTR(kReduceAllOpName, 1);
RER_ASCEND_DYNAMIC_CONST_TO_ATTR(kReduceAnyOpName, 1);
RER_ASCEND_STATIC_CONST_TO_ATTR(kApplyRMSPropOpname, 5, 6, 7);
RER_ASCEND_STATIC_CONST_TO_ATTR(kArgminV2OpName, 1);

View File

@ -77,6 +77,7 @@ class ReduceCpuKernelFunc : public CpuKernelFunc {
std::function<void(const T *, T *, size_t, size_t, TransposeIterator *)> reduce_func_;
bool simple_execute_{false};
std::string kernel_name_;
bool need_skip_execute_{false};
};
template <typename T>
@ -212,6 +213,12 @@ int ReduceCpuKernelFunc<T>::Resize(const BaseOperatorPtr &base_operator, const s
axis_ = kernel_ptr->get_axis();
}
(void)GetDynamicAttrIntValue(inputs, kAxisIndex_, inputsOnHost, kernel_name_, &axis_);
if (inputs.size() > kAxisIndex_ &&
AnfAlgo::IsDynamicShapeSkipExecute(kernel_name_, inputs[kAxisIndex_]->GetShapeVector())) {
need_skip_execute_ = true;
} else {
need_skip_execute_ = false;
}
HandleInputAxis();
return KRET_OK;
}
@ -276,6 +283,13 @@ bool ReduceCpuKernelFunc<T>::RunFunc(const std::vector<kernel::AddressPtr> &inpu
size_t input_size = inputs[0]->size / sizeof(T);
auto *input_addr = reinterpret_cast<T *>(inputs[0]->addr);
auto *output_addr = reinterpret_cast<T *>(outputs[0]->addr);
if (need_skip_execute_) {
auto ret = memcpy_s(output_addr, outputs[0]->size, input_addr, inputs[0]->size);
if (ret != EOK) {
MS_LOG(EXCEPTION) << "For '" << kernel_name_ << "', launch kernel error: memcpy failed. Error no: " << ret;
}
return true;
}
if (axis_.empty() || input_shape_.empty() || input_shape_.size() == 1) {
if (input_size < kReduceSmallVectorSize) {

View File

@ -172,6 +172,7 @@ PrimShapeDependMap &GetHostDependsMap() {
{kResizeNearestNeighborV2Grad, ShapeSet{1}},
{kScatterNd, ShapeSet{2}},
{kSparseToDenseV2, ShapeSet{1}},
{prim::kPrimSparseTensorDenseMatmul->name(), ShapeSet{2}},
{kSliceGrad, ShapeSet{2, 3}},
{kFillV2, ShapeSet{0}},
{kRandomCategorical, ShapeSet{1}},

View File

@ -45,6 +45,9 @@ abstract::TupleShapePtr ArgMaxWithValueInferShape(const PrimitivePtr &primitive,
auto x_shape = x_shape_map[kShape];
auto axis = GetValue<int64_t>(primitive->GetAttr("axis"));
auto keep_dims = GetValue<bool>(primitive->GetAttr("keep_dims"));
if (IsDynamicRank(x_shape)) {
return std::make_shared<abstract::TupleShape>(std::vector<abstract::BaseShapePtr>{x_shape_ptr, x_shape_ptr});
}
auto x_rank = static_cast<int64_t>(x_shape.size());
if (x_rank == 0) {
if (axis != -1 && axis != 0) {

View File

@ -49,6 +49,9 @@ abstract::TupleShapePtr ArgMinWithValueInferShape(const PrimitivePtr &primitive,
auto keep_dims_value = primitive->GetAttr("keep_dims");
MS_EXCEPTION_IF_NULL(keep_dims_value);
auto keep_dims = GetValue<bool>(keep_dims_value);
if (IsDynamicRank(x_shape)) {
return std::make_shared<abstract::TupleShape>(std::vector<abstract::BaseShapePtr>{x_shape_ptr, x_shape_ptr});
}
auto x_rank = SizeToLong(x_shape.size());
if (x_rank == 0) {
if (axis != -1 && axis != 0) {

View File

@ -38,6 +38,11 @@ abstract::ShapePtr CholeskySolveInferShape(const PrimitivePtr &primitive,
auto x2_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->BuildShape());
auto x1_shape = x1_shape_map[kShape];
auto x2_shape = x2_shape_map[kShape];
ShapeVector out_shape = {};
if (IsDynamicRank(x1_shape) || IsDynamicRank(x2_shape)) {
out_shape.push_back(abstract::Shape::kShapeRankAny);
return std::make_shared<abstract::Shape>(out_shape);
}
if (x1_shape.size() != kDefalutRank && x1_shape.size() != kBatchRank) {
MS_EXCEPTION(ValueError) << "For CholeskySolve, the rank of x1 must be equal to 2 or 3"
<< ", while got x1 rank " << x1_shape.size() << ".";

View File

@ -39,6 +39,9 @@ abstract::ShapePtr ConcatInferShape(const PrimitivePtr &primitive, const std::ve
auto element0 = elements[0]->cast<abstract::AbstractTensorPtr>();
MS_EXCEPTION_IF_NULL(element0);
auto element0_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(element0->BuildShape())[kShape];
if (IsDynamicRank(element0_shape)) {
return std::make_shared<abstract::Shape>(ShapeVector{abstract::Shape::kShapeRankAny});
}
auto element0_rank = element0_shape.size();
auto axis_temp = GetValue<int64_t>(primitive->GetAttr(kAxis));
CheckAndConvertUtils::CheckInRange<int64_t>("Concat axis", axis_temp, kIncludeBoth,
@ -55,7 +58,7 @@ abstract::ShapePtr ConcatInferShape(const PrimitivePtr &primitive, const std::ve
(void)CheckAndConvertUtils::CheckInteger(elementi + " shape rank", SizeToLong(elementi_shape.size()), kEqual,
SizeToLong(element0_shape.size()), prim_name);
for (size_t j = 0; j < element0_rank; ++j) {
if (j != axis && elementi_shape[j] != element0_shape[j]) {
if (j != axis && elementi_shape[j] != element0_shape[j] && elementi_shape[j] != -1 && element0_shape[j] != -1) {
MS_EXCEPTION(ValueError)
<< "For '" << prim_name << "', element" << i
<< " shape in input can not concat with element0. To perform concat in the axis 0 "

View File

@ -182,14 +182,11 @@ abstract::ShapePtr Conv2dInferShape(const PrimitivePtr &primitive, const std::ve
auto x_shape = x_shape_map[kShape];
auto w_shape = w_shape_map[kShape];
auto x_shape_rank = SizeToLong(x_shape.size());
constexpr int dynamic_rank_len = 1;
constexpr int dynamic_rank_value = -2;
ShapeVector output_shape;
if (x_shape_rank == dynamic_rank_len && x_shape[0] == dynamic_rank_value) {
if (IsDynamicRank(x_shape) || IsDynamicRank(w_shape)) {
std::vector<ValuePtr> pad_list_val = {MakeValue(0), MakeValue(0), MakeValue(0), MakeValue(0)};
primitive->set_attr("pad_list", MakeValue(pad_list_val));
output_shape = {dynamic_rank_value};
output_shape = {abstract::Shape::kShapeRankAny};
return std::make_shared<abstract::Shape>(output_shape);
}

View File

@ -23,6 +23,10 @@
namespace mindspore {
namespace ops {
namespace {
constexpr size_t kIdx1 = 1;
constexpr size_t kIdx2 = 2;
constexpr size_t kIdx3 = 3;
constexpr size_t kIdx4 = 4;
abstract::ShapePtr ExtractVolumePatchesInferShape(const PrimitivePtr &primitive,
const std::vector<AbstractBasePtr> &input_args) {
MS_EXCEPTION_IF_NULL(primitive);
@ -35,18 +39,19 @@ abstract::ShapePtr ExtractVolumePatchesInferShape(const PrimitivePtr &primitive,
}
auto x_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape());
auto x_shape = x_shape_map[kShape];
if (IsDynamicRank(x_shape)) {
return std::make_shared<abstract::Shape>(std::vector<int64_t>{-1, -1, -1, -1, -1});
}
constexpr int64_t shape_size = 5;
(void)CheckAndConvertUtils::CheckInteger("input shape", SizeToLong(x_shape.size()), kEqual, shape_size,
primitive->name());
auto x_v = x_shape[2] * x_shape[3] * x_shape[4];
auto x_v = x_shape[kIdx2] * x_shape[kIdx3] * x_shape[kIdx4];
(void)CheckAndConvertUtils::CheckInteger("x_d * x_h * x_w", x_v, kLessEqual, MAX_SHAPE, primitive->name());
std::vector<int64_t> kernel_size = GetValue<std::vector<int64_t>>(primitive->GetAttr(kKernelSize));
std::vector<int64_t> strides = GetValue<std::vector<int64_t>>(primitive->GetAttr(kStrides));
constexpr int64_t kernel_size_num = 5;
(void)CheckAndConvertUtils::CheckInteger("kernel_size_length", SizeToLong(kernel_size.size()), kEqual,
kernel_size_num, primitive->name());
constexpr int64_t strides_num = 5;
(void)CheckAndConvertUtils::CheckInteger("strides_length", SizeToLong(strides.size()), kEqual, strides_num,
(void)CheckAndConvertUtils::CheckInteger("kernel_size_length", SizeToLong(kernel_size.size()), kEqual, shape_size,
primitive->name());
(void)CheckAndConvertUtils::CheckInteger("strides_length", SizeToLong(strides.size()), kEqual, shape_size,
primitive->name());
auto padding = GetValue<std::string>(primitive->GetAttr(kPadding));
for (auto &item : strides) {
@ -55,21 +60,34 @@ abstract::ShapePtr ExtractVolumePatchesInferShape(const PrimitivePtr &primitive,
for (auto &item : kernel_size) {
CheckAndConvertUtils::Check("kernel_size", item, kGreaterThan, 0, primitive->name());
}
std::vector<int64_t> y_shape(5);
std::vector<int64_t> y_shape(shape_size);
int64_t padding_needed = 0;
y_shape[0] = x_shape[0];
y_shape[1] = x_shape[1] * kernel_size[2] * kernel_size[3] * kernel_size[4];
y_shape[1] = x_shape[1] == abstract::Shape::kShapeDimAny
? abstract::Shape::kShapeDimAny
: x_shape[kIdx1] * kernel_size[kIdx2] * kernel_size[kIdx3] * kernel_size[kIdx4];
if (padding == "VALID") {
for (int i = d; i <= w; ++i) {
y_shape[IntToSize(i)] =
x_shape[IntToSize(i)] == abstract::Shape::kShapeDimAny
? abstract::Shape::kShapeDimAny
: 1 + (SizeToLong(x_shape[IntToSize(i)]) - kernel_size[IntToSize(i)]) / strides[IntToSize(i)];
if (y_shape[IntToSize(i)] == abstract::Shape::kShapeDimAny) {
continue;
}
(void)CheckAndConvertUtils::CheckInteger(
"padding = VALID, input[" + std::to_string(i) + "] - kernel_size[" + std::to_string(i) + "]",
SizeToLong(x_shape[IntToSize(i)]) - kernel_size[IntToSize(i)], kGreaterEqual, 0, primitive->name());
y_shape[IntToSize(i)] =
1 + (SizeToLong(x_shape[IntToSize(i)]) - kernel_size[IntToSize(i)]) / strides[IntToSize(i)];
}
} else {
for (int i = d; i <= w; ++i) {
y_shape[IntToSize(i)] = (SizeToLong(x_shape[IntToSize(i)]) + strides[IntToSize(i)] - 1) / strides[IntToSize(i)];
y_shape[IntToSize(i)] =
x_shape[IntToSize(i)] == abstract::Shape::kShapeDimAny
? abstract::Shape::kShapeDimAny
: (SizeToLong(x_shape[IntToSize(i)]) + strides[IntToSize(i)] - 1) / strides[IntToSize(i)];
if (y_shape[IntToSize(i)] == abstract::Shape::kShapeDimAny) {
continue;
}
int64_t output_size = SizeToLong(y_shape[IntToSize(i)]);
padding_needed =
(output_size - 1) * strides[IntToSize(i)] + kernel_size[IntToSize(i)] - SizeToLong(x_shape[IntToSize(i)]);
@ -80,7 +98,10 @@ abstract::ShapePtr ExtractVolumePatchesInferShape(const PrimitivePtr &primitive,
padding_needed, kGreaterEqual, 0, primitive->name());
}
}
if (y_shape[3] != 1 || y_shape[4] != 1) {
if (IsDynamic(y_shape)) {
return std::make_shared<abstract::Shape>(y_shape);
}
if (y_shape[kIdx3] != 1 || y_shape[kIdx4] != 1) {
(void)CheckAndConvertUtils::CheckInteger("input_w + pad_l + pad_r - kernel_w - stride_w",
x_shape[4] + padding_needed - kernel_size[4] - strides[4], kGreaterEqual,
0, primitive->name());

View File

@ -40,15 +40,19 @@ abstract::ShapePtr LerpInferShape(const PrimitivePtr &primitive, const std::vect
auto end_shape = end_shape_map[kShape];
auto weight_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex2]->BuildShape());
auto weight_shape = weight_shape_map[kShape];
if (weight_shape.size() > start_shape.size() && weight_shape.size() > end_shape.size()) {
MS_EXCEPTION(RuntimeError) << "weight should be of dimension max(self.dim(), end.dim()) or lesser.";
}
auto broadcast_shape = CalBroadCastShape(start_shape, end_shape, op_name, "start", "end");
if (input_args[kInputIndex2]->isa<abstract::AbstractTensor>()) {
(void)CalBroadCastShape(start_shape, weight_shape, op_name, "start", "weight");
(void)CalBroadCastShape(end_shape, weight_shape, op_name, "end", "weight");
broadcast_shape = CalBroadCastShape(broadcast_shape, weight_shape, op_name);
}
if (IsDynamicRank(weight_shape) || IsDynamicRank(start_shape) || IsDynamicRank(end_shape)) {
return std::make_shared<abstract::Shape>(broadcast_shape);
}
// Do additional check for the rank of weight for static rank case only.
if (weight_shape.size() > start_shape.size() && weight_shape.size() > end_shape.size()) {
MS_EXCEPTION(RuntimeError) << "weight should be of dimension max(self.dim(), end.dim()) or lesser.";
}
return std::make_shared<abstract::Shape>(broadcast_shape);
}

View File

@ -39,6 +39,10 @@ abstract::TupleShapePtr LogMatrixDeterminantInferShape(const PrimitivePtr &primi
constexpr int64_t number1 = 1;
constexpr int64_t number2 = 2;
constexpr int64_t dy_shape_placeholder = -1;
if (IsDynamicRank(x_shape)) {
abstract::ShapePtr out_shape = std::make_shared<abstract::Shape>(ShapeVector{abstract::Shape::kShapeDimAny});
return std::make_shared<abstract::TupleShape>(std::vector<abstract::BaseShapePtr>{out_shape, out_shape});
}
(void)CheckAndConvertUtils::CheckInteger("x rank", x_rank, kGreaterEqual, number2, prim_name);
std::vector<int64_t> shape(x_shape.begin(), (x_shape.end() - number2));
abstract::ShapePtr out_shape = std::make_shared<abstract::Shape>(shape);

View File

@ -38,12 +38,19 @@ abstract::ShapePtr MatrixExpInferShape(const PrimitivePtr &primitive, const std:
auto x_rank = SizeToLong(x_shape.size());
constexpr int64_t number1 = 1;
constexpr int64_t number2 = 2;
(void)CheckAndConvertUtils::CheckInteger("x rank", x_rank, kGreaterEqual, number2, prim_name);
if (SizeToLong(x_shape[x_rank - number1]) != SizeToLong(x_shape[x_rank - number2])) {
MS_EXCEPTION(ValueError) << "For " << prim_name << ", the input expects a tensor of squared matrices"
<< ", but got shape " << x_shape << ".";
if (IsDynamicRank(x_shape)) {
return std::make_shared<abstract::Shape>(ShapeVector{abstract::Shape::kShapeRankAny});
}
(void)CheckAndConvertUtils::CheckInteger("x rank", x_rank, kGreaterEqual, number2, prim_name);
if (x_shape[x_rank - number1] != x_shape[x_rank - number2]) {
if (x_shape[x_rank - number1] == -1 || x_shape[x_rank - number2] == -1) {
MS_LOG(WARNING) << "There is one dimension of the " << prim_name << "'s input is dynamic. Please make sure "
<< "the input is a squared matrix whose last two dimensions are equal during runtime.";
} else {
MS_EXCEPTION(ValueError) << "For " << prim_name << ", the input expects a tensor of squared matrices"
<< ", but got shape " << x_shape << ".";
}
}
(void)CheckAndConvertUtils::CheckInteger("row size", x_shape[x_rank - number1], kGreaterEqual, number1, prim_name);
return shape_element;
}

View File

@ -57,24 +57,22 @@ class OneHotInfer : public abstract::OpInferBase {
MS_EXCEPTION_IF_NULL(depth);
int64_t depth_value;
if (depth->isa<tensor::Tensor>()) {
auto depth_shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape());
auto depth_shape = depth_shape_map[kShape];
if (IsDynamic(depth_shape)) {
return std::make_shared<abstract::Shape>(std::vector<int64_t>{abstract::Shape::kShapeRankAny});
auto depth_data = CheckAndConvertUtils::CheckTensorIntValue("depth", depth, op_name);
if (depth_data.size() != 1) {
MS_LOG_EXCEPTION << "For " << op_name << ", size of depth shouble be 1, but got " << depth_data.size();
}
(void)CheckAndConvertUtils::CheckTensorTypeValid("depth", input_args[1]->BuildType(), {kInt64}, op_name);
auto depth_data = depth->cast<tensor::TensorPtr>()->data_c();
MS_EXCEPTION_IF_NULL(depth_data);
auto data_value = reinterpret_cast<int64_t *>(depth_data);
depth_value = *data_value;
depth_value = depth_data[0];
(void)CheckAndConvertUtils::CheckInteger("depth value", depth_value, kGreaterEqual, 0, op_name);
} else if (depth->isa<Int64Imm>()) {
depth_value = GetValue<int64_t>(depth);
(void)CheckAndConvertUtils::CheckInteger("depth value", depth_value, kGreaterEqual, 0, op_name);
} else if (input_args[depth_index]->isa<abstract::AbstractTensor>()) {
depth_value = abstract::Shape::kShapeDimAny;
} else {
MS_EXCEPTION(TypeError) << "For '" << op_name
<< "', 'depth' must be a tensor or number of int64, but got an invalid type.";
}
(void)CheckAndConvertUtils::CheckInteger("depth value", depth_value, kGreaterEqual, 0, op_name);
if (axis >= 0) {
(void)in_shape.insert(in_shape.begin() + axis, depth_value);
} else {

View File

@ -40,36 +40,40 @@ abstract::ShapePtr SparseTensorDenseAddInferShape(const PrimitivePtr &prim,
const size_t kDimensionOne = 1;
const size_t kDimensionTwo = 2;
const size_t kDimensionFive = 5;
if (x1_indices_shape_size != kDimensionTwo) {
if (!IsDynamicRank(x1_indices_shape) && x1_indices_shape_size != kDimensionTwo) {
MS_EXCEPTION(ValueError) << "For " << prim_name
<< ", the 'x1_indices' should have rank 2, but got: " << x1_indices_shape_size;
}
if (x1_shape_shape_size != kDimensionOne) {
if (!IsDynamicRank(x1_shape_shape) && x1_shape_shape_size != kDimensionOne) {
MS_EXCEPTION(ValueError) << "For " << prim_name
<< ", the 'x1_shape' should have rank 1, but got: : " << x1_shape_shape_size;
}
if (x1_values_shape_size != kDimensionOne || x1_values_shape[0] != x1_indices_shape[0]) {
MS_EXCEPTION(ValueError) << "For '" << prim_name
<< "', the 'x1_values' must be a 1-D tensor and the first dimension length"
<< " must be equal to the first dimension length of 'x1_indices', but got "
<< x1_values_shape[0] << " vs " << x1_indices_shape[0] << ".";
}
if (x1_shape_shape[0] != x1_indices_shape[1]) {
MS_EXCEPTION(ValueError) << "For '" << prim_name
<< "', the length of 'x1_shape' should be equal to the second dimension"
<< " length of 'x1_indices', but got " << x1_shape_shape[0] << " vs "
<< x1_indices_shape[1] << ".";
}
size_t x1_shape_rank = static_cast<size_t>(x1_shape_shape[0]);
if (x1_shape_rank != x2_shape_size) {
MS_EXCEPTION(ValueError) << "For '" << prim_name
<< "', the rank of 'x1_shape' should be equal to the rank of 'x2_shape', but got "
<< x1_shape_rank << " vs " << x2_shape_size << ".";
}
if (x2_shape_size > kDimensionFive || x2_shape_size < kDimensionOne) {
MS_EXCEPTION(ValueError) << "For '" << prim_name
<< "', Only tensors with ranks between 1 and 5 are currently supported. "
<< "Tensor rank: " << x2_shape_size << ".";
if (!IsDynamic(x1_values_shape) && !IsDynamic(x1_shape_shape) && !IsDynamic(x1_indices_shape) &&
!IsDynamic(x2_shape)) {
if (x1_values_shape_size != kDimensionOne || x1_values_shape[0] != x1_indices_shape[0]) {
MS_EXCEPTION(ValueError) << "For '" << prim_name
<< "', the 'x1_values' must be a 1-D tensor and the first dimension length"
<< " must be equal to the first dimension length of 'x1_indices', but got "
<< x1_values_shape[0] << " vs " << x1_indices_shape[0] << ".";
}
if (x1_shape_shape[0] != x1_indices_shape[1]) {
MS_EXCEPTION(ValueError) << "For '" << prim_name
<< "', the length of 'x1_shape' should be equal to the second dimension"
<< " length of 'x1_indices', but got " << x1_shape_shape[0] << " vs "
<< x1_indices_shape[1] << ".";
}
size_t x1_shape_rank = static_cast<size_t>(x1_shape_shape[0]);
if (x1_shape_rank != x2_shape_size) {
MS_EXCEPTION(ValueError) << "For '" << prim_name
<< "', the rank of 'x1_shape' should be equal to the rank of 'x2_shape', but got "
<< x1_shape_rank << " vs " << x2_shape_size << ".";
}
if (x2_shape_size > kDimensionFive || x2_shape_size < kDimensionOne) {
MS_EXCEPTION(ValueError) << "For '" << prim_name
<< "', Only tensors with ranks between 1 and 5 are currently supported. "
<< "Tensor rank: " << x2_shape_size << ".";
}
}
ShapeVector output_shape = x2_shape;
return std::make_shared<abstract::Shape>(output_shape);

View File

@ -27,6 +27,16 @@
namespace mindspore {
namespace ops {
namespace {
void CheckShapeRank(const size_t cur_rank, const size_t expected_rank, const std::string &op_name,
const std::string &arg_name) {
if (cur_rank != expected_rank) {
MS_LOG(EXCEPTION) << "For '" << op_name << "', '" << arg_name << "' must be a " << expected_rank
<< "-dimensional tensor, but got a " << cur_rank << "-dimensional tensor.";
}
}
} // namespace
bool checkType(std::string name, TypePtr dtype, std::set<TypePtr> vtypes, const PrimitivePtr &primitive) {
std::map<std::string, TypePtr> types;
(void)types.emplace(name, dtype);
@ -68,82 +78,80 @@ abstract::ShapePtr SparseTensorDenseMatmulInferShape(const PrimitivePtr &primiti
auto x2_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[3]->BuildShape())[kShape];
const int kDimensionTwo = 2;
const int kDimensionOne = 1;
auto input_y = input_args[2];
auto y_value = input_y->BuildValue();
auto x1_shape = input_args[2];
auto x1_shape_value = x1_shape->BuildValue();
std::string info;
if (!checkContainer(primitive, input_args, &info)) {
MS_EXCEPTION(TypeError) << "For " << primitive->name() << info;
}
if (input_y->isa<abstract::AbstractTuple>()) {
int64_t shape_len = static_cast<int64_t>(GetValue<std::vector<int64_t>>(y_value).size());
if (x1_shape->isa<abstract::AbstractTuple>()) {
int64_t shape_len = static_cast<int64_t>(GetValue<std::vector<int64_t>>(x1_shape_value).size());
shape_shape = std::vector<int64_t>{shape_len};
}
if (indices_shape.size() != kDimensionTwo) {
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', the input indices should "
<< "have rank 2, but got " << indices_shape.size() << ".";
std::vector<std::vector<int64_t>> all_shapes = {indices_shape, values_shape, shape_shape, x2_shape};
bool is_dynamic = std::any_of(all_shapes.begin(), all_shapes.end(), IsDynamic);
bool is_dynamic_rank = std::any_of(all_shapes.begin(), all_shapes.end(), IsDynamicRank);
if (!is_dynamic_rank) {
CheckShapeRank(indices_shape.size(), kDimensionTwo, primitive->name(), "indices");
CheckShapeRank(values_shape.size(), kDimensionOne, primitive->name(), "values");
CheckShapeRank(shape_shape.size(), kDimensionOne, primitive->name(), "sparse_shape");
CheckShapeRank(x2_shape.size(), kDimensionTwo, primitive->name(), "the shape of input dense");
}
if (indices_shape[1] != kDimensionTwo) {
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', the 2nd dimension of indices "
<< "should be 2, but got " << indices_shape[1] << ".";
}
if (values_shape.size() != kDimensionOne) {
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', the input values should "
<< "have rank 1, but got " << values_shape.size() << ".";
}
if (values_shape[0] != indices_shape[0]) {
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', the input values' length "
<< "is different from indices' first dimension";
}
if (shape_shape.size() != kDimensionOne) {
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', the sparse_shape should "
<< "have rank 1, but got " << shape_shape.size() << ".";
}
if (shape_shape[0] != kDimensionTwo) {
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', the 1st dimension of sparse_shape "
<< "should be 2, but got " << shape_shape[0] << ".";
}
if (x2_shape.size() != kDimensionTwo) {
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', the shape of input dense "
<< "should be [2], but got [" << x2_shape.size() << "].";
if (!is_dynamic) {
if (indices_shape[1] != kDimensionTwo) {
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', the 2nd dimension of indices "
<< "should be 2, but got " << indices_shape[1] << ".";
}
if (values_shape[0] != indices_shape[0]) {
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', the input values' length "
<< "is different from indices' first dimension";
}
if (shape_shape[0] != kDimensionTwo) {
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', the 1st dimension of sparse_shape "
<< "should be 2, but got " << shape_shape[0] << ".";
}
if (x1_shape_value->isa<AnyValue>() || x1_shape_value->isa<None>()) {
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', the input sparse_shape "
<< "should be constant.";
}
}
auto adjoint_a = primitive->GetAttr("adjoint_st");
auto adjoint_b = primitive->GetAttr("adjoint_dt");
bool adjoint_av = GetValue<bool>(adjoint_a);
bool adjoint_bv = GetValue<bool>(adjoint_b);
auto x1_shape_value = input_args[2]->BuildValue();
MS_EXCEPTION_IF_NULL(x1_shape_value);
if (x1_shape_value->isa<AnyValue>() || x1_shape_value->isa<None>()) {
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', the input sparse_shape "
<< "should be constant.";
int64_t x1_row = -1, x1_col = -1;
int64_t x2_row = -1, x2_col = -1;
if (x2_shape.size() == kDimensionTwo) {
x2_row = x2_shape[0];
x2_col = x2_shape[1];
}
if (input_y->isa<abstract::AbstractTuple>()) {
auto temp = GetValue<std::vector<int64_t>>(y_value);
int64_t x1_row = temp[0], x1_col = temp[1];
int64_t x2_row = x2_shape[0], x2_col = x2_shape[1];
if (x1_shape->isa<abstract::AbstractTuple>()) {
auto temp = GetValue<std::vector<int64_t>>(x1_shape_value);
if (temp.size() == kDimensionTwo) {
x1_row = temp[0];
x1_col = temp[1];
}
if (adjoint_av) std::swap(x1_row, x1_col);
if (adjoint_bv) std::swap(x2_row, x2_col);
if (x1_col != x2_row) {
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', the input sparse is "
<< "not compatible with the input dense.";
}
int64_t y_row = x1_row, y_col = x2_col;
std::vector<int64_t> y_shape{y_row, y_col};
return std::make_shared<abstract::Shape>(y_shape);
}
auto x1_shape_tensor = x1_shape_value->cast<tensor::TensorPtr>();
MS_EXCEPTION_IF_NULL(x1_shape_tensor);
// x1_shape has only one type --- int64
int64_t *x1_shape_data = static_cast<int64_t *>(x1_shape_tensor->data_c());
// x1_shape is input[2], right here can use x1_shape_data[0], x1_shape_data[1]
// directly
int64_t x1_row = x1_shape_data[0], x1_col = x1_shape_data[1];
int64_t x2_row = x2_shape[0], x2_col = x2_shape[1];
if (x1_shape_value->isa<tensor::Tensor>()) {
auto shape = CheckAndConvertUtils::CheckTensorIntValue("x1_shape", x1_shape_value, primitive->name());
if (shape.size() == kDimensionTwo) {
x1_row = shape[0];
x1_col = shape[1];
}
}
if (adjoint_av) std::swap(x1_row, x1_col);
if (adjoint_bv) std::swap(x2_row, x2_col);
if (x1_col != x2_row) {
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', the input sparse tensor is "
<< "not compatible with the input dense.";
}
int64_t y_row = x1_row, y_col = x2_col;
std::vector<int64_t> y_shape{y_row, y_col};
return std::make_shared<abstract::Shape>(y_shape);
@ -167,7 +175,7 @@ TypePtr SparseTensorDenseMatmulInferType(const PrimitivePtr &primitive,
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', the input indices "
<< "data type should be int32 or int64.";
}
if (!x1_shape->isa<abstract::AbstractTuple>() && !checkType("shape_type", shape_type, {kInt64}, primitive)) {
if (!x1_shape->isa<abstract::AbstractTuple>() && !checkType("shape_type", shape_type, {kInt64, kInt32}, primitive)) {
MS_LOG(EXCEPTION) << "For '" << primitive->name() << "', the input shape "
<< "data type should be int64.";
}

View File

@ -35,7 +35,6 @@ abstract::AbstractBasePtr TensorShapeInfer(const abstract::AnalysisEnginePtr &,
}
MS_EXCEPTION_IF_NULL(input->shape());
auto shape = input->shape()->shape();
ShapeVector tensor_shp({static_cast<int64_t>(shape.size())});
if (IsDynamic(shape)) {
if (IsDynamicRank(shape)) {

View File

@ -68,6 +68,9 @@ abstract::ShapePtr TileInferShape(const PrimitivePtr &primitive, const std::vect
MS_EXCEPTION_IF_NULL(multiple_value);
if (multiple_value->isa<tensor::Tensor>()) {
multiples_v = CheckAndConvertUtils::CheckTensorIntValue("multiples", multiple_value, prim_name);
if (IsDynamicRank(multiples_v)) {
return std::make_shared<abstract::Shape>(ShapeVector{abstract::Shape::kShapeRankAny});
}
} else {
multiples_v = CheckAndConvertUtils::CheckTupleInt("input[multiples]", multiple_value, prim_name);
}

View File

@ -0,0 +1,119 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
from mindspore import nn, context, Tensor
from mindspore.ops import operations as P
from .test_grad_of_dynamic import TestDynamicGrad
class NetAdd(nn.Cell):
def __init__(self):
super(NetAdd, self).__init__()
self.add = P.Add()
def construct(self, x, y):
return self.add(x, y)
def grad_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(NetAdd())
test_dynamic_bc = TestDynamicGrad(NetAdd())
x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]).astype(np.float32))
y = Tensor(np.array([[-1, 2, -3, 0], [8, 6, -9, 1], [8, 10, 0, 12]]).astype(np.float32))
z = Tensor(np.array([7]).astype(np.float32))
test_dynamic.test_dynamic_grad_net((x, y), is_dynamic_rank)
test_dynamic_bc.test_dynamic_grad_net((x, z), is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_shape():
"""
Feature: test Add dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_rank():
"""
Feature: test Add dynamic rank on GPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(True)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_shape():
"""
Feature: test Add dynamic shape on CPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_rank():
"""
Feature: test Add dynamic rank on CPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(True)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ascend_grad_dynamic_shape():
"""
Feature: test Add dynamic shape on Ascend.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ascend_grad_dynamic_rank():
"""
Feature: test Add dynamic rank on Ascend.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
grad_dyn_case(True)

View File

@ -0,0 +1,62 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore
from mindspore import ops, nn, context, Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class TestAddV2(nn.Cell):
def __init__(self):
super(TestAddV2, self).__init__()
self.ops = ops.operations.math_ops.AddV2()
def construct(self, x, y):
return self.ops(x, y)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_mul_dynamic_shape():
"""
Feature: AddV2 Grad DynamicShape.
Description: Test case of dynamic shape for AddV2 grad operator on CPU.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
test_dynamic = TestDynamicGrad(TestAddV2())
input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
input_y = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
x = [input_x, input_y]
test_dynamic.test_dynamic_grad_net(x)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_mul_dynamic_rank():
"""
Feature: AddV2 Grad DynamicShape.
Description: Test case of dynamic rank for AddV2 grad operator on CPU.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
test_dynamic = TestDynamicGrad(TestAddV2())
input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
input_y = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
x = [input_x, input_y]
test_dynamic.test_dynamic_grad_net(x, True)

View File

@ -0,0 +1,104 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import Tensor, context
from mindspore.ops import operations as P
from .test_grad_of_dynamic import TestDynamicGrad
class AddcdivNet(nn.Cell):
def __init__(self):
super(AddcdivNet, self).__init__()
self.addcdiv = P.Addcdiv()
def construct(self, input_data, x1, x2, value):
return self.addcdiv(input_data, x1, x2, value)
def dynamic_shape():
type_s = np.float32
test_dynamic = TestDynamicGrad(AddcdivNet())
input_data = Tensor(np.array([12]).astype(type_s))
x1 = Tensor(np.array([7]).astype(type_s))
x2 = Tensor(np.array([3]).astype(type_s))
value = Tensor(np.array([37]).astype(type_s))
test_dynamic.test_dynamic_grad_net((input_data, x1, x2, value))
def dynamic_rank():
type_s = np.float32
test_dynamic = TestDynamicGrad(AddcdivNet())
input_data = Tensor(np.array([12]).astype(type_s))
x1 = Tensor(np.array([7]).astype(type_s))
x2 = Tensor(np.array([3]).astype(type_s))
value = Tensor(np.array([37]).astype(type_s))
test_dynamic.test_dynamic_grad_net((input_data, x1, x2, value), True)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_addcdiv_cpu():
"""
Feature: Addcdiv Grad DynamicShape.
Description: Test case of dynamic shape for Addcdiv grad operator on CPU.
Expectation: success.
"""
# Graph mode
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
dynamic_shape()
dynamic_rank()
# PyNative mode
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
dynamic_shape()
dynamic_rank()
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
def test_dynamic_addcdiv_gpu():
"""
Feature: Addcdiv Grad DynamicShape.
Description: Test case of dynamic shape for Addcdiv grad operator on GPU.
Expectation: success.
"""
# Graph mode
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
dynamic_shape()
dynamic_rank()
# PyNative mode
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
dynamic_shape()
dynamic_rank()
def test_dynamic_addcdiv_ascend():
"""
Feature: Addcdiv Grad DynamicShape.
Description: Test case of dynamic shape for Addcdiv grad operator on Ascend.
Expectation: success.
"""
# Graph mode
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
dynamic_shape()
dynamic_rank()
# PyNative mode
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
dynamic_shape()
dynamic_rank()

View File

@ -0,0 +1,82 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import Tensor, context
from mindspore import ops
from .test_grad_of_dynamic import TestDynamicGrad
class NetArgMaxWithValue(nn.Cell):
def __init__(self, keep_dims=False, axis=0):
super(NetArgMaxWithValue, self).__init__()
self.op = ops.ArgMaxWithValue(keep_dims=keep_dims, axis=axis)
def construct(self, x):
return self.op(x)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_dynamic_shape_arg_max_with_value():
"""
Feature: ArgMaxWithValue Grad DynamicShape.
Description: Test case of dynamic shape for ArgMaxWithValue grad.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetArgMaxWithValue(False, -1))
x = Tensor(np.arange(2 * 3 * 4 * 5).reshape((2, 3, 4, 5)).astype(np.float16))
test_dynamic.test_dynamic_grad_net(x)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_dynamic_rank_arg_max_with_value():
"""
Feature: ArgMaxWithValue Grad DynamicRank.
Description: Test case of dynamic rank for ArgMaxWithValue grad.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetArgMaxWithValue(True, 3))
x = Tensor(np.arange(2 * 3 * 4 * 5).reshape((2, 3, 4, 5)).astype(np.float16))
test_dynamic.test_dynamic_grad_net(x, True)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_rank_arg_max_with_value_neg_axis():
"""
Feature: ArgMaxWithValue Grad DynamicRank.
Description: Test case of dynamic rank for ArgMaxWithValue grad.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetArgMaxWithValue(True, -2))
x = Tensor(np.arange(2 * 3 * 4 * 5).reshape((2, 3, 4, 5)).astype(np.float16))
test_dynamic.test_dynamic_grad_net(x, True)

View File

@ -0,0 +1,82 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import Tensor, context
from mindspore import ops
from .test_grad_of_dynamic import TestDynamicGrad
class NetArgMinWithValue(nn.Cell):
def __init__(self, keep_dims=False, axis=0):
super(NetArgMinWithValue, self).__init__()
self.op = ops.ArgMinWithValue(keep_dims=keep_dims, axis=axis)
def construct(self, x):
return self.op(x)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_dynamic_shape_arg_min_with_value():
"""
Feature: ArgMinWithValue Grad DynamicShape.
Description: Test case of dynamic shape for ArgMinWithValue grad.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetArgMinWithValue(False, -1))
x = Tensor(np.arange(2 * 3 * 4 * 5).reshape((2, 3, 4, 5)).astype(np.float16))
test_dynamic.test_dynamic_grad_net(x)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_dynamic_rank_arg_min_with_value():
"""
Feature: ArgMinWithValue Grad DynamicRank.
Description: Test case of dynamic rank for ArgMinWithValue grad.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetArgMinWithValue(True, 3))
x = Tensor(np.arange(2 * 3 * 4 * 5).reshape((2, 3, 4, 5)).astype(np.float16))
test_dynamic.test_dynamic_grad_net(x, True)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_rank_arg_min_with_value_neg_axis():
"""
Feature: ArgMinWithValue Grad DynamicRank.
Description: Test case of dynamic rank for ArgMinWithValue grad.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetArgMinWithValue(False, -2))
x = Tensor(np.arange(2 * 3 * 4 * 5).reshape((2, 3, 4, 5)).astype(np.float16))
test_dynamic.test_dynamic_grad_net(x, True)

View File

@ -44,6 +44,9 @@ def dynamic_rank():
test_dynamic.test_dynamic_grad_net((x, y), True)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_atan2_cpu():
"""
Feature: Atan2 Grad DynamicShape.
@ -61,8 +64,8 @@ def test_dynamic_atan2_cpu():
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_atan2_gpu():
"""
Feature: Atan2 Grad DynamicShape.
@ -80,9 +83,9 @@ def test_dynamic_atan2_gpu():
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_dynamic_atan2_ascend():
"""
Feature: Atan2 Grad DynamicShape.

View File

@ -0,0 +1,62 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import Tensor, context
from mindspore import ops
from .test_grad_of_dynamic import TestDynamicGrad
class NetAvgPool3D(nn.Cell):
def __init__(self):
super(NetAvgPool3D, self).__init__()
self.op = ops.AvgPool3D(kernel_size=2, strides=1, pad_mode="valid")
def construct(self, x):
return self.op(x)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_shape_avg_pool_3d():
"""
Feature: AvgPool3D Grad DynamicShape.
Description: Test case of dynamic shape for AvgPool3D grad.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetAvgPool3D())
x = Tensor(np.arange(1 * 2 * 2 * 2 * 3).reshape((1, 2, 2, 2, 3)).astype(np.float16))
test_dynamic.test_dynamic_grad_net(x)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_rank_avg_pool_3d():
"""
Feature: AvgPool3D Grad DynamicRank.
Description: Test case of dynamic rank for AvgPool3D grad.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetAvgPool3D())
x = Tensor(np.arange(1 * 2 * 2 * 2 * 3).reshape((1, 2, 2, 2, 3)).astype(np.float16))
test_dynamic.test_dynamic_grad_net(x, True)

View File

@ -0,0 +1,68 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import ops, nn, context, Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class BCEWithLogitsLossNet(nn.Cell):
def __init__(self, reduction):
super(BCEWithLogitsLossNet, self).__init__()
self.op = ops.BCEWithLogitsLoss(reduction=reduction)
def construct(self, predict, target, weight, pos_weight):
return self.op(predict, target, weight, pos_weight)
def dyn_grad_func(dtype=np.float16, is_dynamic_rank=False):
test_dynamic = TestDynamicGrad(BCEWithLogitsLossNet("mean"))
predict = Tensor(np.arange(6).reshape(2, 3).astype(dtype))
target = Tensor(np.arange(34, 40).reshape(2, 3).astype(dtype))
weight = Tensor(np.array([2, 3, 1]).astype(dtype))
pos_weight = Tensor(np.array([6, 3, 4]).astype(dtype))
inputs = [predict, target, weight, pos_weight]
test_dynamic.test_dynamic_grad_net(inputs, is_dynamic_rank=is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
def test_bcewithlogitsloss_dynamic_shape():
"""
Feature: Test the bprop process of BCEWithLogitsLoss in PyNative modee with dynamic shape inputs
Description: The inputs are dynamic shape and the bprop function uses these shapes when reduction mode is mean
Expectation: Assert the result is equal to that of static shape inputs
"""
context.set_context(mode=context.PYNATIVE_MODE)
dyn_grad_func(is_dynamic_rank=False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_bcewithlogitsloss_dynamic_rank():
"""
Feature: Test the bprop process of BCEWithLogitsLoss in PyNative mode with dynamic rank inputs
Description: The inputs are dynamic rank and the bprop function uses these shapes when reduction mode is mean
Expectation: Assert the result is equal to that of static shape inputs
"""
context.set_context(mode=context.PYNATIVE_MODE)
dyn_grad_func(dtype=np.float32, is_dynamic_rank=True)

View File

@ -0,0 +1,115 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn, context, Tensor
from mindspore.ops.operations.math_ops import BesselI1
from .test_grad_of_dynamic import TestDynamicGrad
class BesselI1Net(nn.Cell):
def __init__(self):
super().__init__()
self.bessel_i1 = BesselI1()
def construct(self, x):
return self.bessel_i1(x)
def grad_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(BesselI1Net())
x_np = np.array([1, 2, 3, 4]).astype(np.float32)
test_dynamic.test_dynamic_grad_net(Tensor(x_np), is_dynamic_rank)
@pytest.mark.level2
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_shape_1():
"""
Feature: test BesselI1 grad dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(False)
@pytest.mark.level2
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_rank_1():
"""
Feature: test BesselI1 grad dynamic rank on GPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(True)
@pytest.mark.skip(reason="For 'BroadcastTo', input args must be tensor or tuple")
@pytest.mark.level2
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_shape_2():
"""
Feature: test BesselI1 grad dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
grad_dyn_case(False)
@pytest.mark.skip(reason="For 'BroadcastTo', input args must be tensor or tuple")
@pytest.mark.level2
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_rank_2():
"""
Feature: test BesselI1 grad dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
grad_dyn_case(True)
@pytest.mark.level2
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_shape():
"""
Feature: test BesselI1 grad dynamic shape on CPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(False)
@pytest.mark.level2
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_rank():
"""
Feature: test BesselI1 grad dynamic rank on CPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(True)

View File

@ -0,0 +1,115 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn, context, Tensor
from mindspore.ops.operations.math_ops import BesselJ1
from .test_grad_of_dynamic import TestDynamicGrad
class BesselJ1Net(nn.Cell):
def __init__(self):
super().__init__()
self.bessel_j1 = BesselJ1()
def construct(self, x):
return self.bessel_j1(x)
def grad_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(BesselJ1Net())
x_np = np.array([1, 2, 3, 4]).astype(np.float32)
test_dynamic.test_dynamic_grad_net(Tensor(x_np), is_dynamic_rank)
@pytest.mark.level2
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_shape_1():
"""
Feature: test BesselJ1 grad dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(False)
@pytest.mark.level2
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_rank_1():
"""
Feature: test BesselJ1 grad dynamic rank on GPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(True)
@pytest.mark.skip(reason="For 'BroadcastTo', input args must be tensor or tuple")
@pytest.mark.level2
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_shape_2():
"""
Feature: test BesselJ1 grad dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
grad_dyn_case(False)
@pytest.mark.skip(reason="For 'BroadcastTo', input args must be tensor or tuple")
@pytest.mark.level2
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_rank_2():
"""
Feature: test BesselJ1 grad dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
grad_dyn_case(True)
@pytest.mark.level2
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_shape():
"""
Feature: test BesselJ1 grad dynamic shape on CPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(False)
@pytest.mark.level2
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_rank():
"""
Feature: test BesselJ1 grad dynamic rank on CPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(True)

View File

@ -0,0 +1,60 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import ops, nn, context, Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class NetBroadcastTo(nn.Cell):
def __init__(self):
super(NetBroadcastTo, self).__init__()
self.op = ops.BroadcastTo((16, 2, 3))
def construct(self, x):
return self.op(x)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_shape_broadcast_to():
"""
Feature: BroadcastTo Grad DynamicShape.
Description: Test case of dynamic shape for BroadcastTo grad.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetBroadcastTo())
x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]).astype(np.float16))
test_dynamic.test_dynamic_grad_net(x)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_rank_broadcast_to():
"""
Feature: BroadcastTo Grad DynamicRank.
Description: Test case of dynamic rank for BroadcastTo grad.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetBroadcastTo())
x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]).astype(np.float16))
test_dynamic.test_dynamic_grad_net(x, True)

View File

@ -0,0 +1,62 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn, Tensor, context
from mindspore.common import dtype as mstype
from mindspore.ops.operations.math_ops import CholeskySolve
from .test_grad_of_dynamic import TestDynamicGrad
class CholeskySolveNet(nn.Cell):
def __init__(self):
super(CholeskySolveNet, self).__init__()
self.cholesky = CholeskySolve()
def construct(self, x1, x2):
return self.cholesky(x1, x2)
def grad_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(CholeskySolveNet())
x1 = Tensor(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), mstype.float32)
x2 = Tensor(np.array([[2, 0, 0], [4, 1, 0], [-1, 1, 2]]), mstype.float32)
test_dynamic.test_dynamic_grad_net([x1, x2], is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_choleskysolve_dynamic_shape():
"""
Feature: Test CholeskySolve on CPU.
Description: The shape of inputs is dynamic.
Expectation: Assert that results are consistent with expect.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_choleskysolve_dynamic_rank():
"""
Feature: Test CholeskySolve on CPU.
Description: The rank of inputs is dynamic.
Expectation: Assert that results are consistent with expect.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
grad_dyn_case(True)

View File

@ -0,0 +1,54 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from mindspore import nn, ops, context, Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class NetConcat(nn.Cell):
def __init__(self):
super(NetConcat, self).__init__()
self.op = ops.Concat()
def construct(self, *args):
return self.op(args)
def grad_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(NetConcat())
x = Tensor(np.array([[-1.0, 4.0], [2.0, -5.0]]).astype(np.float32))
y = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
test_dynamic.test_dynamic_grad_net([x, y], is_dynamic_rank)
def test_grad_dynamic_shape():
"""
Feature: test Concat dynamic shape.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(False)
def test_grad_dynamic_rank():
"""
Feature: test Concat dynamic rank.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(True)

View File

@ -0,0 +1,63 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn, ops, context, Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class NetConv2d(nn.Cell):
def __init__(self):
super(NetConv2d, self).__init__()
self.op = ops.Conv2D(4, (3, 3))
def construct(self, x, y):
return self.op(x, y)
def grad_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(NetConv2d())
x = Tensor(np.ones([4, 4, 4, 4]).astype(np.float32))
y = Tensor(np.ones([4, 4, 3, 3]).astype(np.float32))
test_dynamic.test_dynamic_grad_net([x, y], is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_grad_dynamic_shape():
"""
Feature: test Conv2D dynamic shape.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_grad_dynamic_rank():
"""
Feature: test Conv2D dynamic rank.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(True)

View File

@ -0,0 +1,129 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import Tensor, context
from mindspore.ops import operations as P
from .test_grad_of_dynamic import TestDynamicGrad
class Conv2dTransposeNet(nn.Cell):
def __init__(self):
super(Conv2dTransposeNet, self).__init__()
out_channel = 1
kernel_size = 3
self.conv_input = P.Conv2DTranspose(out_channel,
kernel_size,
pad_mode="valid",
pad=0,
mode=1,
stride=1,
dilation=1,
group=1)
def construct(self, out, w, shape):
return self.conv_input(out, w, shape)
def dynamic_shape():
test_dynamic = TestDynamicGrad(Conv2dTransposeNet(), skip_convert_out_ids=[2])
w = Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32))
x = Tensor(np.array([[[
[3, 0, 1, 2, 7, 4],
[1, 5, 8, 9, 3, 1],
[2, 7, 2, 5, 1, 3],
[0, 1, 3, 1, 7, 8],
[4, 2, 1, 6, 2, 8],
[2, 4, 5, 2, 3, 9]]]]).astype(np.float32))
out = Tensor(np.array([[[
[-5, -4, 0, 8],
[-10, -2, 2, 3],
[0, -2, -4, -7],
[-3, -2, -3, -16]]]]).astype(np.float32))
test_dynamic.test_dynamic_grad_net((out, w, x.shape))
def dynamic_rank():
test_dynamic = TestDynamicGrad(Conv2dTransposeNet(), skip_convert_out_ids=[2])
w = Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32))
x = Tensor(np.array([[[
[3, 0, 1, 2, 7, 4],
[1, 5, 8, 9, 3, 1],
[2, 7, 2, 5, 1, 3],
[0, 1, 3, 1, 7, 8],
[4, 2, 1, 6, 2, 8],
[2, 4, 5, 2, 3, 9]]]]).astype(np.float32))
out = Tensor(np.array([[[
[-5, -4, 0, 8],
[-10, -2, 2, 3],
[0, -2, -4, -7],
[-3, -2, -3, -16]]]]).astype(np.float32))
test_dynamic.test_dynamic_grad_net((out, w, x.shape), True)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_conv2dtranspose_cpu():
"""
Feature: Conv2dTranspose Grad DynamicShape.
Description: Test case of dynamic shape for Conv2dTranspose grad operator on CPU.
Expectation: success.
"""
# Graph mode
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
dynamic_shape()
dynamic_rank()
# PyNative mode
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
dynamic_shape()
dynamic_rank()
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
def test_dynamic_conv2dtranspose_gpu():
"""
Feature: Conv2dTranspose Grad DynamicShape.
Description: Test case of dynamic shape for Conv2dTranspose grad operator on GPU.
Expectation: success.
"""
# Graph mode
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
dynamic_shape()
dynamic_rank()
# PyNative mode
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
dynamic_shape()
dynamic_rank()
def test_dynamic_conv2dtranspose_ascend():
"""
Feature: Conv2dTranspose Grad DynamicShape.
Description: Test case of dynamic shape for Conv2dTranspose grad operator on Ascend.
Expectation: success.
"""
# Graph mode
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
dynamic_shape()
dynamic_rank()
# PyNative mode
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
dynamic_shape()
dynamic_rank()

View File

@ -47,31 +47,33 @@ def grad_dyn_case(is_dynamic_rank):
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_shape_1():
def test_grad_dynamic_shape_1():
"""
Feature: test Conv3D grad dynamic shape on GPU.
Feature: test Conv3D grad dynamic shape.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_rank_1():
def test_grad_dynamic_rank_1():
"""
Feature: test Conv3D grad dynamic rank on GPU.
Feature: test Conv3D grad dynamic rank.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(True)
@pytest.mark.level1
@pytest.mark.level2
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_shape_2():
@ -84,7 +86,7 @@ def test_gpu_grad_dynamic_shape_2():
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.level2
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_rank_2():
@ -95,31 +97,3 @@ def test_gpu_grad_dynamic_rank_2():
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
grad_dyn_case(True)
@pytest.mark.skip(reason="CPU无Conv3DBackpropFilter, Conv3DBackpropInput, kernel实现")
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_shape():
"""
Feature: test Conv3D grad dynamic shape on CPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(False)
@pytest.mark.skip(reason="CPU无Conv3DBackpropFilter, Conv3DBackpropInput, kernel实现")
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_rank():
"""
Feature: test Conv3D grad dynamic rank on CPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(True)

View File

@ -0,0 +1,64 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import ops, nn, context, Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class CumProdNet(nn.Cell):
def __init__(self):
super(CumProdNet, self).__init__()
self.op = ops.CumProd()
def construct(self, x, axis):
return self.op(x, axis)
def dyn_grad_func(dtype=np.float16, is_dynamic_rank=False):
test_dynamic = TestDynamicGrad(CumProdNet())
x = Tensor(np.random.rand(2, 3, 4, 4).astype(dtype))
axis = 0
inputs = [x, axis]
test_dynamic.test_dynamic_grad_net(inputs, is_dynamic_rank=is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_cumprod_dynamic_shape():
"""
Feature: Test the bprop process of CumProd in PyNative mode with dynamic shape inputs
Description: The inputs are dynamic shape and the bprop function invokes the operator itself.
Expectation: Assert the result is equal to that of static shape inputs
"""
context.set_context(mode=context.PYNATIVE_MODE)
dyn_grad_func(is_dynamic_rank=False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_cumprod_dynamic_rank():
"""
Feature: Test the bprop process of CumProd in PyNative mode with dynamic rank inputs
Description: The inputs are dynamic rank and the bprop function invokes the operator itself.
Expectation: Assert the result is equal to that of static shape inputs
"""
context.set_context(mode=context.PYNATIVE_MODE)
dyn_grad_func(dtype=np.float32, is_dynamic_rank=True)

View File

@ -0,0 +1,68 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import ops, nn, context, Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class CumSumNet(nn.Cell):
def __init__(self):
super(CumSumNet, self).__init__()
self.op = ops.CumSum()
def construct(self, x, axis):
return self.op(x, axis)
def dyn_grad_func(dtype=np.float16, is_dynamic_rank=False):
test_dynamic = TestDynamicGrad(CumSumNet())
x = Tensor(np.random.rand(2, 3, 4, 4).astype(dtype))
axis = 0
inputs = [x, axis]
test_dynamic.test_dynamic_grad_net(inputs, is_dynamic_rank=is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
def test_cumsum_dynamic_shape():
"""
Feature: Test the bprop process of CumSum in PyNative mode with dynamic shape inputs
Description: The inputs are dynamic shape and the bprop function invokes the operator itself.
Expectation: Assert the result is equal to that of static shape inputs
"""
context.set_context(mode=context.PYNATIVE_MODE)
dyn_grad_func(is_dynamic_rank=False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
def test_cumsum_dynamic_rank():
"""
Feature: Test the bprop process of CumSum in PyNative mode with dynamic rank inputs
Description: The inputs are dynamic rank and the bprop function invokes the operator itself.
Expectation: Assert the result is equal to that of static shape inputs
"""
context.set_context(mode=context.PYNATIVE_MODE)
dyn_grad_func(dtype=np.float32, is_dynamic_rank=True)

View File

@ -0,0 +1,102 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import Tensor, context
from mindspore.ops import operations as P
from .test_grad_of_dynamic import TestDynamicGrad
class DivNet(nn.Cell):
def __init__(self):
super(DivNet, self).__init__()
self.div = P.Div()
def construct(self, x, y):
return self.div(x, y)
def dynamic_shape():
test_dynamic = TestDynamicGrad(DivNet())
x = Tensor(np.array([4, 4]).astype(np.float32))
y = Tensor(np.array([2, 2]).astype(np.float32))
test_dynamic.test_dynamic_grad_net((x, y))
def dynamic_rank():
test_dynamic = TestDynamicGrad(DivNet())
x = Tensor(np.array([4, 4]).astype(np.float32))
y = Tensor(np.array([2, 2]).astype(np.float32))
test_dynamic.test_dynamic_grad_net((x, y), True)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_div_cpu():
"""
Feature: Div Grad DynamicShape.
Description: Test case of dynamic shape for Div grad operator on CPU.
Expectation: success.
"""
# Graph mode
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
dynamic_shape()
dynamic_rank()
# PyNative mode
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
dynamic_shape()
dynamic_rank()
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
def test_dynamic_div_gpu():
"""
Feature: Div Grad DynamicShape.
Description: Test case of dynamic shape for Div grad operator on GPU.
Expectation: success.
"""
# Graph mode
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
dynamic_shape()
dynamic_rank()
# PyNative mode
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
dynamic_shape()
dynamic_rank()
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
def test_dynamic_div_ascend():
"""
Feature: Div Grad DynamicShape.
Description: Test case of dynamic shape for Div grad operator on Ascend.
Expectation: success.
"""
# Graph mode
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
dynamic_shape()
dynamic_rank()
# PyNative mode
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
dynamic_shape()
dynamic_rank()

View File

@ -0,0 +1,62 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn, context, Tensor
from mindspore.ops.operations import _inner_ops as inner
from .test_grad_of_dynamic import TestDynamicGrad
class Net(nn.Cell):
def __init__(self, ksizes, strides, rates, padding="valid"):
super(Net, self).__init__()
self.extractimagepatches = inner.ExtractImagePatches(ksizes, strides, rates, padding)
def construct(self, input_tensor):
return self.extractimagepatches(input_tensor)
def extract_image_patches_test(is_dyn_rank):
net = Net([1, 1, 2, 4], [1, 1, 7, 5], [1, 1, 2, 1], "valid")
input_tensor = Tensor(np.arange(360).reshape(3, 2, 6, 10).astype(np.float32))
tester = TestDynamicGrad(net)
tester.test_dynamic_grad_net([input_tensor], is_dyn_rank)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
def test_extract_image_patches_dyn_shape():
"""
Feature: ExtractImagePatches Grad DynamicShape.
Description: Test case of dynamic shape for ExtractImagePatches grad operator.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
extract_image_patches_test(False)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
def test_extract_image_patches_dyn_rank():
"""
Feature: ExtractImagePatches Grad DynamicShape.
Description: Test case of dynamic rank for ExtractImagePatches grad operator.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
extract_image_patches_test(True)

View File

@ -0,0 +1,61 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import ops, nn, context, Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class Net(nn.Cell):
def __init__(self, ksizes, strides, padding="valid"):
super(Net, self).__init__()
self.extractvolumepatches = ops.ExtractVolumePatches(ksizes, strides, padding)
def construct(self, input_tensor):
return self.extractvolumepatches(input_tensor)
def extract_volume_patches_test(is_dyn_rank):
net = Net([1, 1, 2, 2, 2], [1, 1, 1, 1, 1], "VALID")
input_tensor = Tensor(np.random.rand(1, 1, 3, 3, 3).astype(np.float16))
tester = TestDynamicGrad(net)
tester.test_dynamic_grad_net([input_tensor], is_dyn_rank)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
def test_extract_volume_patches_dyn_shape():
"""
Feature: ExtractVolumePatches Grad DynamicShape.
Description: Test case of dynamic shape for ExtractVolumePatches grad operator.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
extract_volume_patches_test(False)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
def test_extract_volume_patches_dyn_rank():
"""
Feature: ExtractvolumePatches Grad DynamicShape.
Description: Test case of dynamic rank for ExtractVolumePatches grad operator.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
extract_volume_patches_test(True)

View File

@ -0,0 +1,64 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import ops, nn, context, Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class FloorNet(nn.Cell):
def __init__(self):
super(FloorNet, self).__init__()
self.floor = ops.Floor()
def construct(self, x):
return self.floor(x)
def floor_test(is_dyn_rank):
x = Tensor(np.random.randn(32, 8, 32).astype(np.float32))
tester = TestDynamicGrad(FloorNet())
tester.test_dynamic_grad_net([x], is_dyn_rank)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
def test_floor_dyn_shape():
"""
Feature: Floor Grad DynamicShape.
Description: Test case of dynamic shape for Floor grad operator.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
floor_test(False)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
def test_floor_dyn_rank():
"""
Feature: Floor Grad DynamicShape.
Description: Test case of dynamic rank for Floor grad operator.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
floor_test(True)

View File

@ -0,0 +1,65 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import ops, nn, context, Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class GatherNdNet(nn.Cell):
def __init__(self):
super(GatherNdNet, self).__init__()
self.gathernd = ops.GatherNd()
def construct(self, x, indices):
return self.gathernd(x, indices)
def gathernd_test(is_dyn_rank):
x = Tensor(np.random.randn(3, 2).astype(np.float32))
indices = Tensor(np.array([[1, 1], [0, 1]]).astype(np.int32))
tester = TestDynamicGrad(GatherNdNet(), skip_convert_out_ids=[0])
tester.test_dynamic_grad_net([x, indices], is_dyn_rank)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
def test_gathernd_dyn_shape():
"""
Feature: GatherND Grad DynamicShape.
Description: Test case of dynamic shape for GatherND grad operator.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
gathernd_test(False)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
def test_gathernd_dyn_rank():
"""
Feature: GatherND Grad DynamicShape.
Description: Test case of dynamic rank for GatherND grad operator.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
gathernd_test(True)

View File

@ -0,0 +1,62 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import mindspore as ms
from mindspore import nn, context, Tensor
from mindspore.ops.operations.math_ops import MatrixSolve
from .test_grad_of_dynamic import TestDynamicGrad
class NetMatrixSolve(nn.Cell):
def __init__(self):
super(NetMatrixSolve, self).__init__()
self.sol = MatrixSolve()
def construct(self, matrix, rhs):
return self.sol(matrix, rhs)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_bprop_matrix_solve_dynamic_shape():
"""
Features: ensure that matrix_solve can support [dynamic shape] while undergoing its gradient backprogation(bprop)
Description: the test hides the complete shape info so that operations only infer the exact shape until runtime
Expectation: success
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetMatrixSolve(), skip_convert_out_ids=[0])
x = Tensor([[5, 4], [3, 1]], ms.float32)
rhs = Tensor([[7], [2]], ms.float32)
test_dynamic.test_dynamic_grad_net([x, rhs])
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_bprop_matrix_solve_dynamic_rank():
"""
Features: ensure that matrix_solve can support [dynamic rank] while undergoing its gradient backprogation(bprop)
Description: the test hides the complete rank(the least amount of needed info when referring values in a tensor)
information so that operations only infer the exact shape until runtime
Expectation: success
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetMatrixSolve(), skip_convert_out_ids=[0])
x = Tensor([[5, 4], [3, 1]], ms.float32)
rhs = Tensor([[7], [2]], ms.float32)
test_dynamic.test_dynamic_grad_net([x, rhs], True)

View File

@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from mindspore import ops, nn, context, Tensor
@ -108,7 +107,14 @@ def output_compare(outputs, expects):
if not isinstance(expects, (list, tuple)):
expects = [expects]
assert all(list(map(lambda x, y: np.allclose(x.asnumpy(), y.asnumpy()), outputs, expects)))
def _make_sense(element):
if not isinstance(element, Tensor):
return Tensor(element)
return element
new_outputs = [_make_sense(e) for e in outputs]
new_expects = [_make_sense(e) for e in expects]
assert all(list(map(lambda x, y: np.allclose(x.asnumpy(), y.asnumpy()), new_outputs, new_expects)))
class TestDynamicGrad:

View File

@ -0,0 +1,67 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn
from mindspore import Tensor
from mindspore import context
from mindspore.ops.operations.math_ops import Igamma
from .test_grad_of_dynamic import TestDynamicGrad
class IgammaNet(nn.Cell):
def __init__(self):
super().__init__()
self.igamma = Igamma()
def construct(self, a, x):
return self.igamma(a, x)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
def test_dynamic_shape_igamma():
"""
Feature: Igamma Grad DynamicShape.
Description: Test case of dynamic shape for Igamma grad operator on CPU.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
a_np = np.array([[10, 22], [20, 50]]).astype(np.float32)
x_np = np.array([[10, 22], [20, 50]]).astype(np.float32)
test_dynamic = TestDynamicGrad(IgammaNet())
test_dynamic.test_dynamic_grad_net(
[Tensor(a_np), Tensor(x_np)], False)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
def test_dynamic_rank_igamma():
"""
Feature: Igamma Grad DynamicShape.
Description: Test case of dynamic rank for Igamma grad operator on CPU.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
a_np = np.array([[10, 22], [20, 50]]).astype(np.float32)
x_np = np.array([[10, 22], [20, 50]]).astype(np.float32)
test_dynamic = TestDynamicGrad(IgammaNet())
test_dynamic.test_dynamic_grad_net(
[Tensor(a_np), Tensor(x_np)], True)

View File

@ -0,0 +1,67 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn
from mindspore import Tensor
from mindspore import context
from mindspore.ops.operations.math_ops import Igammac
from .test_grad_of_dynamic import TestDynamicGrad
class IgammacNet(nn.Cell):
def __init__(self):
super().__init__()
self.igammac = Igammac()
def construct(self, a, x):
return self.igammac(a, x)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
def test_dynamic_shape_igammac():
"""
Feature: Igammac Grad DynamicShape.
Description: Test case of dynamic shape for Igammac grad operator on CPU.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
a_np = np.array([[10, 22], [20, 50]]).astype(np.float32)
x_np = np.array([[10, 22], [20, 50]]).astype(np.float32)
test_dynamic = TestDynamicGrad(IgammacNet())
test_dynamic.test_dynamic_grad_net(
[Tensor(a_np), Tensor(x_np)], False)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
def test_dynamic_rank_igammac():
"""
Feature: Igammac Grad DynamicShape.
Description: Test case of dynamic rank for Igammac grad operator on CPU.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
a_np = np.array([[10, 22], [20, 50]]).astype(np.float32)
x_np = np.array([[10, 22], [20, 50]]).astype(np.float32)
test_dynamic = TestDynamicGrad(IgammacNet())
test_dynamic.test_dynamic_grad_net(
[Tensor(a_np), Tensor(x_np)], True)

View File

@ -0,0 +1,76 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn
from mindspore.ops import operations as P
from mindspore import Tensor
from mindspore import context
from .test_grad_of_dynamic import TestDynamicGrad
class IndexFillNet(nn.Cell):
def __init__(self):
super(IndexFillNet, self).__init__()
self.index_fill = P.array_ops.IndexFill()
def construct(self, x, dim, index, value):
out = self.index_fill(x, dim, index, value)
return out
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
def test_dynamic_shape_index_fill():
"""
Feature: IndexFill Grad DynamicShape.
Description: Test case of dynamic shape for IndexFill grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
dim_type = np.int64
data_type = np.int32
dim = Tensor(np.array(1, dtype=dim_type))
value = Tensor(np.array(-10, dtype=data_type))
x_np = np.random.random(size=(5, 5, 5)).astype(data_type)
index_np = np.random.randint(low=0, high=5, size=4).astype(np.int32)
test_dynamic = TestDynamicGrad(IndexFillNet())
test_dynamic.test_dynamic_grad_net(
[Tensor(x_np), dim, Tensor(index_np), value], False)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
def test_dynamic_rank_index_fill():
"""
Feature: IndexFill Grad DynamicShape.
Description: Test case of dynamic rank for IndexFill grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
dim_type = np.int64
data_type = np.int32
dim = Tensor(np.array(1, dtype=dim_type))
value = Tensor(np.array(-10, dtype=data_type))
x_np = np.random.random(size=(5, 5, 5)).astype(data_type)
index_np = np.random.randint(low=0, high=5, size=4).astype(np.int32)
test_dynamic = TestDynamicGrad(IndexFillNet())
test_dynamic.test_dynamic_grad_net(
[Tensor(x_np), dim, Tensor(index_np), value], True)

View File

@ -0,0 +1,94 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn
from mindspore.ops import operations as P
from mindspore import Tensor
from mindspore import context
from .test_grad_of_dynamic import TestDynamicGrad
class KLDivLossNet(nn.Cell):
def __init__(self, reduction="none"):
super(KLDivLossNet, self).__init__()
self.kl_div_loss = P.KLDivLoss(reduction)
def construct(self, x, y):
return self.kl_div_loss(x, y)
def grad_dyn_case(is_dynamic_rank, reduction="none"):
prediction = Tensor(np.log(np.array([[0.3, 0.7], [0.5, 0.5]])).astype(np.float32))
target = Tensor(np.array([[-1, 1], [1, -1]]).astype(np.float32))
test_dynamic = TestDynamicGrad(KLDivLossNet(reduction))
test_dynamic.test_dynamic_grad_net([prediction, target], is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_shape():
"""
Feature: test UnsortedSegmentMin dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(False)
grad_dyn_case(False, "mean")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_rank():
"""
Feature: test UnsortedSegmentMin dynamic rank on GPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(True)
grad_dyn_case(True, "mean")
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_shape():
"""
Feature: test UnsortedSegmentMin dynamic shape on CPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(False)
grad_dyn_case(False, "mean")
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_rank():
"""
Feature: test UnsortedSegmentMin dynamic rank on CPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(True)
grad_dyn_case(True, "mean")

View File

@ -0,0 +1,222 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn, context, Tensor
from mindspore.ops.operations import Lerp
from .test_grad_of_dynamic import TestDynamicGrad
class NetLerp(nn.Cell):
def __init__(self):
super(NetLerp, self).__init__()
self.lerp = Lerp()
def construct(self, x, y, z):
return self.lerp(x, y, z)
def grad_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(NetLerp())
x = np.array([[1., -1., 2.], [3.1, 2, 1.]], dtype=np.float32)
y = np.array([[1.2, -1., 2.1], [3., 2., 1.1]], dtype=np.float32)
z = np.array([[1., -1.2, 0.9], [0.1, 2., 1.]], dtype=np.float32)
test_dynamic.test_dynamic_grad_net([Tensor(x), Tensor(y), Tensor(z)], is_dynamic_rank)
def grad_partial_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(NetLerp())
test_dynamic.skip_convert_in_ids = [2]
x = np.array([[1., -1., 2.], [3.1, 2, 1.]], dtype=np.float32)
y = np.array([[1.2, -1., 2.1], [3., 2., 1.1]], dtype=np.float32)
z = np.array([[1., -1.2, 0.9], [0.1, 2., 1.]], dtype=np.float32)
test_dynamic.test_dynamic_grad_net([Tensor(x), Tensor(y), Tensor(z)], is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_shape_1():
"""
Feature: test Lerp grad dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(False)
grad_partial_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_rank_1():
"""
Feature: test Lerp grad dynamic rank on GPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(True)
grad_partial_dyn_case(True)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_shape_2():
"""
Feature: test Lerp grad dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
grad_dyn_case(False)
grad_partial_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_rank_2():
"""
Feature: test Lerp grad dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
grad_dyn_case(True)
grad_partial_dyn_case(True)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_shape_1():
"""
Feature: test Lerp grad dynamic shape on CPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(False)
grad_partial_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_rank_1():
"""
Feature: test Lerp grad dynamic rank on CPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(True)
grad_partial_dyn_case(True)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_shape_2():
"""
Feature: test Lerp grad dynamic shape on CPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
grad_dyn_case(False)
grad_partial_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_rank_2():
"""
Feature: test Lerp grad dynamic rank on CPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
grad_dyn_case(True)
grad_partial_dyn_case(True)
@pytest.mark.skip(reason="Ascend does not support dynamic shape")
@pytest.mark.level2
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ascend_grad_dynamic_shape_1():
"""
Feature: test Lerp grad dynamic rank on Ascend.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
grad_dyn_case(False)
grad_partial_dyn_case(False)
@pytest.mark.skip(reason="Ascend does not support dynamic shape")
@pytest.mark.level2
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ascend_grad_dynamic_rank_1():
"""
Feature: test Lerp grad dynamic rank on Ascend.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
grad_dyn_case(True)
grad_partial_dyn_case(True)
@pytest.mark.skip(reason="Ascend does not support dynamic shape")
@pytest.mark.level2
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ascend_grad_dynamic_shape_2():
"""
Feature: test Lerp grad dynamic rank on Ascend.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
grad_dyn_case(False)
grad_partial_dyn_case(False)
@pytest.mark.skip(reason="Ascend does not support dynamic shape")
@pytest.mark.level2
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ascend_grad_dynamic_rank_2():
"""
Feature: test Lerp grad dynamic rank on Ascend.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
grad_dyn_case(True)
grad_partial_dyn_case(True)

View File

@ -0,0 +1,66 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn
from mindspore.ops import operations as P
from mindspore import Tensor
from mindspore import context
from .test_grad_of_dynamic import TestDynamicGrad
class LogMatrixDeterminantNet(nn.Cell):
def __init__(self):
super(LogMatrixDeterminantNet, self).__init__()
self.log_matrix_determinant = P.LogMatrixDeterminant()
def construct(self, input_x):
output = self.log_matrix_determinant(input_x)
return output
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
def test_dynamic_shape_log_matrix_determinant():
"""
Feature: LogMatrixDeterminant Grad DynamicShape.
Description: Test case of dynamic shape for LogMatrixDeterminant grad operator on CPU and GPU.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
input_x = np.random.random((4, 4)).astype(np.float32)
test_dynamic = TestDynamicGrad(LogMatrixDeterminantNet())
test_dynamic.test_dynamic_grad_net(
[Tensor(input_x)], False)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
def test_dynamic_rank_log_matrix_determinant():
"""
Feature: LogMatrixDeterminant Grad DynamicShape.
Description: Test case of dynamic rank for LogMatrixDeterminant grad operator on CPU and GPU.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
input_x = np.random.random((4, 4)).astype(np.float32)
test_dynamic = TestDynamicGrad(LogMatrixDeterminantNet())
test_dynamic.test_dynamic_grad_net(
[Tensor(input_x)], True)

View File

@ -0,0 +1,65 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn, context, Tensor
from mindspore.ops.operations.math_ops import MatrixExp
from .test_grad_of_dynamic import TestDynamicGrad
class NetMatrixExp(nn.Cell):
def __init__(self):
super(NetMatrixExp, self).__init__()
self.matrix_exp = MatrixExp()
def construct(self, x):
return self.matrix_exp(x)
def matrix_exp_test(is_dyn_rank):
x = Tensor(np.array([[-1.0, 4.0], [2.0, -5.0]]).astype(np.float16))
tester = TestDynamicGrad(NetMatrixExp())
tester.test_dynamic_grad_net(x, is_dyn_rank)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
def test_matrix_exp_dyn_shape():
"""
Feature: MatrixExp Grad DynamicShape.
Description: Test case of dynamic shape for MatrixExp grad operator.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
matrix_exp_test(False)
context.set_context(mode=context.GRAPH_MODE)
matrix_exp_test(False)
@pytest.mark.level2
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
def test_matrix_exp_dyn_rank():
"""
Feature: MatrixExp Grad DynamicRank.
Description: Test case of dynamic rank for MatrixExp grad operator.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
matrix_exp_test(True)
context.set_context(mode=context.GRAPH_MODE)
matrix_exp_test(True)

View File

@ -0,0 +1,73 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn
from mindspore import Tensor, context
from mindspore.ops.operations.array_ops import MatrixSetDiagV3
from mindspore import dtype as mstype
from .test_grad_of_dynamic import TestDynamicGrad
class MatrixSetDiagV3Net(nn.Cell):
def __init__(self):
super(MatrixSetDiagV3Net, self).__init__()
self.matrix_set_diag_v3 = MatrixSetDiagV3()
def construct(self, x, diagonal, k):
return self.matrix_set_diag_v3(x, diagonal, k)
def grad_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(MatrixSetDiagV3Net())
input_x = Tensor(np.array([[[5, 5, 5, 5],
[5, 5, 5, 5],
[5, 5, 5, 5]],
[[5, 5, 5, 5],
[5, 5, 5, 5],
[5, 5, 5, 5]]]), mstype.float32)
diagonal = Tensor(np.array([[1, 2, 3],
[4, 5, 6]]), mstype.float32)
k = Tensor(1, mstype.int32)
test_dynamic.test_dynamic_grad_net([input_x, diagonal, k], is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_shape():
"""
Feature: test MatrixSetDiagV3 dynamic shape on GPU, CPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_rank():
"""
Feature: test MatrixSetDiagV3 dynamic rank on GPU, CPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(True)

View File

@ -0,0 +1,83 @@
import numpy as np
import pytest
from mindspore import nn
from mindspore.ops.operations import _grad_ops as G
from mindspore import Tensor
from mindspore import context
from .test_grad_of_dynamic import TestDynamicGrad
class NetPoolGrad(nn.Cell):
def __init__(self):
super(NetPoolGrad, self).__init__()
self.maxpool_grad_fun = G.MaxPoolGrad(pad_mode="VALID",
kernel_size=2,
strides=2)
def construct(self, x, a, d):
return self.maxpool_grad_fun(x, a, d)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_mod_shape():
"""
Feature: Mod Grad DynamicShape.
Description: Test case of dynamic shape for Mod grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetPoolGrad())
x = Tensor(np.array([[[
[0, 1, 2, 3, 4, 5],
[6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]
]]]).astype(np.float32))
d = Tensor(np.array([[[
[3, 3, 3],
[3, 3, 3],
[3, 3, 3]
]]]).astype(np.float32))
a = Tensor(np.array([[[
[7, 9, 11],
[19, 21, 23],
[31, 33, 35]
]]]).astype(np.float32))
test_dynamic.test_dynamic_grad_net([x, a, d])
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_mod_rank():
"""
Feature: Mod Grad DynamicRank.
Description: Test case of dynamic rank for Mod grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetPoolGrad())
x = Tensor(np.array([[[
[0, 1, 2, 3, 4, 5],
[6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]
]]]).astype(np.float32))
d = Tensor(np.array([[[
[3, 3, 3],
[3, 3, 3],
[3, 3, 3]
]]]).astype(np.float32))
a = Tensor(np.array([[[
[7, 9, 11],
[19, 21, 23],
[31, 33, 35]
]]]).astype(np.float32))
test_dynamic.test_dynamic_grad_net([x, a, d], True)

View File

@ -0,0 +1,49 @@
import numpy as np
import pytest
import mindspore as mp
from mindspore import nn, context, Tensor
from mindspore.ops.operations.math_ops import Mod
from .test_grad_of_dynamic import TestDynamicGrad
class NetMod(nn.Cell):
def __init__(self):
super(NetMod, self).__init__()
self.mod = Mod()
def construct(self, x1, x2):
return self.mod(x1, x2)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_mod_shape():
"""
Feature: Mod Grad DynamicShape.
Description: Test case of dynamic shape for Mod grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetMod())
x = Tensor(np.array([-4.0, 5.0, 6.0]), mp.float32)
y = Tensor(np.array([3.0, 2.0, 3.0]), mp.float32)
test_dynamic.test_dynamic_grad_net([x, y])
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_mod_rank():
"""
Feature: Mod Grad DynamicRank.
Description: Test case of dynamic rank for Mod grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetMod())
x = Tensor(np.array([-4.0, 5.0, 6.0]), mp.float32)
y = Tensor(np.array([3.0, 2.0, 3.0]), mp.float32)
test_dynamic.test_dynamic_grad_net([x, y], True)

View File

@ -0,0 +1,66 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore
from mindspore import ops, nn, context, Tensor
from .test_grad_of_dynamic import TestDynamicGrad
context.set_context(mode=context.PYNATIVE_MODE)
class TestMul(nn.Cell):
def __init__(self):
super(TestMul, self).__init__()
self.ops = ops.Mul()
def construct(self, x, y):
return self.ops(x, y)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
def test_mul_dynamic_shape():
"""
Feature: Mul Grad DynamicShape.
Description: Test case of dynamic shape for Mul grad operator on GPU.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
test_dynamic = TestDynamicGrad(TestMul())
input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
input_y = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
x = [input_x, input_y]
test_dynamic.test_dynamic_grad_net(x)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
def test_mul_dynamic_rank():
"""
Feature: Mul Grad DynamicShape.
Description: Test case of dynamic rank for Mul grad operator on GPU.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
test_dynamic = TestDynamicGrad(TestMul())
input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
input_y = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
x = [input_x, input_y]
test_dynamic.test_dynamic_grad_net(x, True)

View File

@ -0,0 +1,49 @@
import numpy as np
import pytest
import mindspore as mp
from mindspore import nn, context, Tensor
from mindspore.ops.operations.math_ops import NextAfter
from .test_grad_of_dynamic import TestDynamicGrad
class NetNextAfter(nn.Cell):
def __init__(self):
super(NetNextAfter, self).__init__()
self.nextafter = NextAfter()
def construct(self, x1, x2):
return self.nextafter(x1, x2)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_nextafter_shape():
"""
Feature: NextAfter Grad DynamicShape.
Description: Test case of dynamic shape for NextAfter grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetNextAfter())
x1 = Tensor(np.asarray([0.0]), mp.float32)
x2 = Tensor(np.asarray([0.1]), mp.float32)
test_dynamic.test_dynamic_grad_net([x1, x2])
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_nextafter_rank():
"""
Feature: NextAfter Grad DynamicRank.
Description: Test case of dynamic rank for NextAfter grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetNextAfter())
x1 = Tensor(np.asarray([0.0]), mp.float32)
x2 = Tensor(np.asarray([0.1]), mp.float32)
test_dynamic.test_dynamic_grad_net([x1, x2], True)

View File

@ -0,0 +1,53 @@
import numpy as np
import pytest
from mindspore import nn
from mindspore.ops import operations as P
from mindspore import context
from mindspore import Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class NetOneHot(nn.Cell):
def __init__(self):
super(NetOneHot, self).__init__()
self.onehot = P.OneHot()
self.depth = 3
def construct(self, indices, on_value, off_value):
return self.onehot(indices, self.depth, on_value, off_value)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_one_hot_shape():
"""
Feature: OneHot Grad DynamicShape.
Description: Test case of dynamic shape for OneHot grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetOneHot())
indices = Tensor(np.array([0, 1, 2]).astype(np.int32))
on_value = Tensor(np.array([1.0]).astype(np.float32))
off_value = Tensor(np.array([0.0]).astype(np.float32))
test_dynamic.test_dynamic_grad_net([indices, on_value, off_value])
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_one_hot_rank():
"""
Feature: OneHot Grad DynamicRank.
Description: Test case of dynamic rank for OneHot grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetOneHot())
indices = Tensor(np.array([0, 1, 2]).astype(np.int32))
on_value = Tensor(np.array([1.0]).astype(np.float32))
off_value = Tensor(np.array([0.0]).astype(np.float32))
test_dynamic.test_dynamic_grad_net([indices, on_value, off_value], True)

View File

@ -0,0 +1,63 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn
from mindspore.ops import operations as P
from mindspore import context
from mindspore import Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class NetPadding(nn.Cell):
def __init__(self):
super(NetPadding, self).__init__()
self.padding = P.Padding()
def construct(self, x):
return self.padding(x)
@pytest.mark.level2
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_gpu_dynamic_padding_shape():
"""
Feature: Padding Grad DynamicShape.
Description: Test case of dynamic shape for Padding grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetPadding())
x = Tensor(np.array([[11], [2]]).astype(np.float16))
test_dynamic.test_dynamic_grad_net(x)
@pytest.mark.level2
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_gpu_dynamic_padding_rank():
"""
Feature: Padding Grad DynamicRank.
Description: Test case of dynamic rank for Padding grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetPadding())
x = Tensor(np.array([[11], [2]]).astype(np.float16))
test_dynamic.test_dynamic_grad_net(x, True)

View File

@ -0,0 +1,67 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn, ops, context, Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class NetPow(nn.Cell):
def __init__(self):
super(NetPow, self).__init__()
self.op = ops.Pow()
def construct(self, x, y):
return self.op(x, y)
def grad_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(NetPow())
x = Tensor(np.array([1.0, 2.0, 4.0]).astype(np.float32))
y = Tensor(np.array([2.0, 4.0, 3.0]).astype(np.float32))
test_dynamic.test_dynamic_grad_net([x, y], is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_grad_dynamic_shape():
"""
Feature: test Pow dynamic shape.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_grad_dynamic_rank():
"""
Feature: test Pow dynamic rank.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(True)

View File

@ -0,0 +1,67 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn, ops, context, Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class NetRealDiv(nn.Cell):
def __init__(self):
super(NetRealDiv, self).__init__()
self.real_div = ops.RealDiv()
def construct(self, x, y):
return self.real_div(x, y)
def grad_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(NetRealDiv())
x = Tensor(np.array([1.0, 2.0, 3.0]).astype(np.float32))
y = Tensor(np.array([4.0, 5.0, 6.0]).astype(np.float32))
test_dynamic.test_dynamic_grad_net([x, y], is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_grad_dynamic_shape():
"""
Feature: test RealDiv dynamic shape.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_grad_dynamic_rank():
"""
Feature: test RealDiv dynamic rank.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(True)

View File

@ -0,0 +1,64 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn
from mindspore.ops import operations as P
from mindspore import context
from mindspore import Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class NetReduceAll(nn.Cell):
def __init__(self):
super(NetReduceAll, self).__init__()
self.reduceall = P.ReduceAll()
self.axis = 0
def construct(self, x):
return self.reduceall(x, self.axis)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_reduceall_shape():
"""
Feature: ReduceAll Grad DynamicShape.
Description: Test case of dynamic shape for ReduceAll grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetReduceAll())
x = Tensor(np.random.randn(3, 4, 5).astype(np.bool))
test_dynamic.test_dynamic_grad_net(x)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_reduceall_rank():
"""
Feature: ReduceAll Grad DynamicRank.
Description: Test case of dynamic rank for ReduceAll grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetReduceAll())
x = Tensor(np.random.randn(3, 4, 5).astype(np.bool))
test_dynamic.test_dynamic_grad_net(x, True)

View File

@ -0,0 +1,64 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn
from mindspore.ops import operations as P
from mindspore import context
from mindspore import Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class NetReduceAny(nn.Cell):
def __init__(self):
super(NetReduceAny, self).__init__()
self.reduceany = P.ReduceAny()
self.axis = 0
def construct(self, x):
return self.reduceany(x, self.axis)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_reduceany_shape():
"""
Feature: ReduceAny Grad DynamicShape.
Description: Test case of dynamic shape for ReduceAny grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetReduceAny())
x = Tensor(np.random.randn(3, 4, 5).astype(np.bool))
test_dynamic.test_dynamic_grad_net(x)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_reduceany_rank():
"""
Feature: ReduceAny Grad DynamicRank.
Description: Test case of dynamic rank for ReduceAny grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetReduceAny())
x = Tensor(np.random.randn(3, 4, 5).astype(np.bool))
test_dynamic.test_dynamic_grad_net(x, True)

View File

@ -0,0 +1,64 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn
from mindspore.ops import operations as P
from mindspore import context
from mindspore import Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class NetReduceMean(nn.Cell):
def __init__(self):
super(NetReduceMean, self).__init__()
self.reducemean = P.ReduceMean()
self.axis = (0,)
def construct(self, x):
return self.reducemean(x, self.axis)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_reducemean_shape():
"""
Feature: ReduceMean Grad DynamicShape.
Description: Test case of dynamic shape for ReduceMean grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetReduceMean())
x = Tensor(np.random.normal(0, 1, [2, 3, 4, 3]).astype(np.float32))
test_dynamic.test_dynamic_grad_net(x)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_reducemean_rank():
"""
Feature: ReduceMean Grad DynamicRank.
Description: Test case of dynamic rank for ReduceMean grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetReduceMean())
x = Tensor(np.random.normal(0, 1, [2, 3, 4, 3]).astype(np.float32))
test_dynamic.test_dynamic_grad_net(x, True)

View File

@ -0,0 +1,64 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn
from mindspore.ops import operations as P
from mindspore import context
from mindspore import Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class NetReduceSum(nn.Cell):
def __init__(self):
super(NetReduceSum, self).__init__()
self.reducesum = P.ReduceSum()
self.axis = (0,)
def construct(self, x):
return self.reducesum(x, self.axis)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_reducesum_shape():
"""
Feature: ReduceSum Grad DynamicShape.
Description: Test case of dynamic shape for ReduceSum grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetReduceSum())
x = Tensor(np.random.randn(3, 4, 5).astype(np.float32))
test_dynamic.test_dynamic_grad_net(x)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_reducesum_rank():
"""
Feature: ReduceSum Grad DynamicRank.
Description: Test case of dynamic rank for ReduceSum grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetReduceSum())
x = Tensor(np.random.randn(3, 4, 5).astype(np.float32))
test_dynamic.test_dynamic_grad_net(x, True)

View File

@ -0,0 +1,64 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as mp
from mindspore import nn, context, Tensor
from mindspore.ops.operations.array_ops import ResizeNearestNeighborV2
from .test_grad_of_dynamic import TestDynamicGrad
class NetResizeNearestNeighborV2(nn.Cell):
def __init__(self):
super(NetResizeNearestNeighborV2, self).__init__()
self.resize_nearest_neighbor_v2 = ResizeNearestNeighborV2()
def construct(self, input_tensor, size):
return self.resize_nearest_neighbor_v2(input_tensor, size)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_resize_nearest_neighbor_v2_shape():
"""
Feature: ResizeNearestNeighborV2 Grad DynamicShape.
Description: Test case of dynamic shape for ResizeNearestNeighborV2 grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetResizeNearestNeighborV2())
input_tensor = Tensor(np.array([[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]]]).astype(np.float32))
size = Tensor([2, 2], mp.int32)
test_dynamic.test_dynamic_grad_net([input_tensor, size])
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_resize_nearest_neighbor_v2_rank():
"""
Feature: ResizeNearestNeighborV2 Grad DynamicRank.
Description: Test case of dynamic rank for ResizeNearestNeighborV2 grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetResizeNearestNeighborV2())
input_tensor = Tensor(np.array([[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]]]).astype(np.float32))
size = Tensor([2, 2], mp.int32)
test_dynamic.test_dynamic_grad_net([input_tensor, size], True)

View File

@ -0,0 +1,50 @@
import numpy as np
import pytest
from mindspore import nn
from mindspore import context
from mindspore.ops.operations.array_ops import SegmentMean
from mindspore import Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class NetSegmentMean(nn.Cell):
def __init__(self):
super(NetSegmentMean, self).__init__()
self.segmentmean = SegmentMean()
def construct(self, x, segment_ids):
return self.segmentmean(x, segment_ids)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_segment_mean_shape():
"""
Feature: SegmentMean Grad DynamicShape.
Description: Test case of dynamic shape for SegmentMean grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetSegmentMean())
x = Tensor(np.array([2, 2, 3, 4]).astype(np.float32))
segment_ids = Tensor(np.array([0, 0, 1, 2]).astype(np.int32))
test_dynamic.test_dynamic_grad_net([x, segment_ids])
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_segment_mean_rank():
"""
Feature: SegmentMean Grad DynamicRank.
Description: Test case of dynamic rank for SegmentMean grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetSegmentMean())
x = Tensor(np.array([2, 2, 3, 4]).astype(np.float32))
segment_ids = Tensor(np.array([0, 0, 1, 2]).astype(np.int32))
test_dynamic.test_dynamic_grad_net([x, segment_ids], True)

View File

@ -61,3 +61,29 @@ def test_cpu_grad_dynamic_rank():
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(True)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_shape():
"""
Feature: test SparseSegmentSqrtN dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_rank():
"""
Feature: test SparseSegmentSqrtN dynamic rank on GPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(True)

View File

@ -62,3 +62,29 @@ def test_cpu_grad_dynamic_rank():
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(True)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_shape():
"""
Feature: test SparseSegmentSqrtNWithNumSegments dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_rank():
"""
Feature: test SparseSegmentSqrtNWithNumSegments dynamic rank on GPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(True)

View File

@ -0,0 +1,90 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import mindspore as ms
from mindspore import nn, context, Tensor
from mindspore.ops.operations.sparse_ops import SparseTensorDenseAdd
from .test_grad_of_dynamic import TestDynamicGrad
class NetSparseTensorDenseAdd(nn.Cell):
def __init__(self):
super(NetSparseTensorDenseAdd, self).__init__()
self.sparse_tensor_dense_add = SparseTensorDenseAdd()
def construct(self, x1_indices, x1_values, x1_shape, x2):
return self.sparse_tensor_dense_add(x1_indices, x1_values, x1_shape, x2)
def grad_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(NetSparseTensorDenseAdd())
x1_indices = Tensor([[0, 0], [0, 1]], dtype=ms.int64)
x1_values = Tensor([1, 1], dtype=ms.float32)
x1_shape = Tensor([3, 3], dtype=ms.int64)
x2 = Tensor([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=ms.float32)
test_dynamic.test_dynamic_grad_net((x1_indices, x1_values, x1_shape, x2), is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_shape():
"""
Feature: test SparseTensorDenseAdd dynamic shape on CPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_rank():
"""
Feature: test SparseTensorDenseAdd dynamic rank on CPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(True)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_shape():
"""
Feature: test SparseTensorDenseAdd dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_rank():
"""
Feature: test SparseTensorDenseAdd dynamic rank on GPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(True)

View File

@ -0,0 +1,57 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore as ms
from mindspore import nn, context, Tensor
from mindspore.ops.operations.sparse_ops import SparseTensorDenseMatmul
from .test_grad_of_dynamic import TestDynamicGrad
class NetSparseTensorDenseMatmul(nn.Cell):
def __init__(self):
super(NetSparseTensorDenseMatmul, self).__init__()
self.sparse_tensor_dense_matmul = SparseTensorDenseMatmul()
def construct(self, x1_indices, x1_values, x1_shape, x2):
return self.sparse_tensor_dense_matmul(x1_indices, x1_values, x1_shape, x2)
def grad_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(NetSparseTensorDenseMatmul())
x1_indices = Tensor([[0, 1], [1, 2]], dtype=ms.int32)
x1_values = Tensor([1, 2], dtype=ms.float32)
x1_shape = (3, 4)
x2 = Tensor([[1, 1], [2, 2], [3, 3], [4, 4]], dtype=ms.float32)
test_dynamic.test_dynamic_grad_net((x1_indices, x1_values, x1_shape, x2), is_dynamic_rank)
def test_cpu_grad_dynamic_shape():
"""
Feature: test SparseTensorDenseMatmul dynamic shape on CPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(False)
def test_cpu_grad_dynamic_rank():
"""
Feature: test SparseTensorDenseMatmul dynamic rank on CPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(True)

View File

@ -62,6 +62,9 @@ def test_gpu_grad_dynamic_rank():
grad_dyn_case(True)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_shape():
"""
Feature: test SquaredDifference dynamic shape on CPU.
@ -72,6 +75,9 @@ def test_cpu_grad_dynamic_shape():
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_rank():
"""
Feature: test SquaredDifference dynamic rank on CPU.

View File

@ -0,0 +1,63 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn
from mindspore.ops import operations as P
from mindspore import context
from mindspore import Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class NetSqueeze(nn.Cell):
def __init__(self):
super(NetSqueeze, self).__init__()
self.squeeze = P.Squeeze()
def construct(self, x):
return self.squeeze(x)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_squeeze_shape():
"""
Feature: Squeeze Grad DynamicShape.
Description: Test case of dynamic shape for Squeeze grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetSqueeze())
x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]).astype(np.float16))
test_dynamic.test_dynamic_grad_net(x)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_dynamic_squeeze_rank():
"""
Feature: Squeeze Grad DynamicRank.
Description: Test case of dynamic rank for Squeeze grad operator on CPU, GPU and Ascend.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE)
test_dynamic = TestDynamicGrad(NetSqueeze())
x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]).astype(np.float16))
test_dynamic.test_dynamic_grad_net(x, True)

View File

@ -0,0 +1,85 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import ops, nn, context, Tensor
from mindspore.ops.operations import _grad_ops as G
from .test_grad_of_dynamic import TestDynamicGrad
class StridedSliceGradNet(nn.Cell):
def __init__(self):
super(StridedSliceGradNet, self).__init__()
self.op = G.StridedSliceGrad()
self.shape_op = ops.Shape()
self.dyn_shape_op = ops.TensorShape()
def construct(self, dy, x, begin, end, strides):
x_shape = self.shape_op(x)
if -1 in x_shape or -2 in x_shape:
x_shape = self.dyn_shape_op(x)
return self.op(dy, x_shape, begin, end, strides)
def dyn_grad_func(dtype=np.float16, is_dynamic_rank=False):
test_dynamic = TestDynamicGrad(StridedSliceGradNet())
dy = Tensor(np.ones((2, 1, 1)).astype(dtype))
x = Tensor(
np.array(
[
[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]],
]
).astype(dtype)
)
begin = (1, 0, 2)
end = (3, 1, 3)
strides = (1, 1, 1)
inputs = [dy, x, begin, end, strides]
test_dynamic.test_dynamic_grad_net(inputs, is_dynamic_rank=is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
def test_stridedslicegrad_dynamic_shape():
"""
Feature: Test the bprop process of StridedSliceGrad in PyNative mode with dynamic shape inputs
Description: The inputs are dynamic shape and the bprop function invokes the operator StridedSlice.
Expectation: Assert the result is equal to that of static shape inputs
"""
context.set_context(mode=context.PYNATIVE_MODE)
dyn_grad_func(dtype=np.float32, is_dynamic_rank=False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
def test_stridedslicegrad_dynamic_rank():
"""
Feature: Test the bprop process of StridedSliceGrad in PyNative mode with dynamic rank inputs
Description: The inputs are dynamic rank and the bprop function invokes the operator StridedSlice.
Expectation: Assert the result is equal to that of static shape inputs
"""
context.set_context(mode=context.PYNATIVE_MODE)
dyn_grad_func(dtype=np.float32, is_dynamic_rank=True)

View File

@ -0,0 +1,144 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn, context, Tensor
from mindspore.ops.operations import linalg_ops
from .test_grad_of_dynamic import TestDynamicGrad
class SvdNet(nn.Cell):
def __init__(self):
super(SvdNet, self).__init__()
self.svd = linalg_ops.Svd(full_matrices=False, compute_uv=False)
def construct(self, a):
s, _, _ = self.svd(a)
return s
def grad_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(SvdNet())
np.random.seed(1)
a = np.random.rand(3, 2).astype(np.float32)
tensor_a = Tensor(a)
test_dynamic.test_dynamic_grad_net(tensor_a, is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_shape():
"""
Feature: test Svd grad dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_rank():
"""
Feature: test Svd grad dynamic rank on GPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(True)
@pytest.mark.skip(reason="MatrixDiagV3在GRAPH_MODE下不支持`num_rows`动态shape")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_shape_2():
"""
Feature: test Svd grad dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_rank_2():
"""
Feature: test Svd grad dynamic rank on GPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
grad_dyn_case(True)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_shape():
"""
Feature: test Svd grad dynamic shape on CPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_rank():
"""
Feature: test Svd grad dynamic rank on CPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(True)
@pytest.mark.skip(reason="MatrixDiagV3在GRAPH_MODE下不支持`num_rows`动态shape")
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_shape_2():
"""
Feature: test Svd grad dynamic shape on CPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_rank_2():
"""
Feature: test Svd grad dynamic rank on CPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
grad_dyn_case(True)

View File

@ -0,0 +1,101 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore
from mindspore import ops, nn, context, Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class TestMax(nn.Cell):
def __init__(self):
super(TestMax, self).__init__()
self.ops = ops.TensorScatterMax()
def construct(self, input_x, indices, updates):
return self.ops(input_x, indices, updates)
class TestMin(nn.Cell):
def __init__(self):
super(TestMin, self).__init__()
self.ops = ops.TensorScatterMin()
def construct(self, input_x, indices, updates):
return self.ops(input_x, indices, updates)
def tensor_scatter_max_min_dynamic_shape(is_dyn_rank=False):
input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
x = [input_x, indices, updates]
test_dynamic_max = TestDynamicGrad(TestMax())
test_dynamic_max.test_dynamic_grad_net(x, is_dyn_rank)
test_dynamic_min = TestDynamicGrad(TestMin())
test_dynamic_min.test_dynamic_grad_net(x, is_dyn_rank)
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
def test_gpu_tensor_scatter_max_min_dynamic_shape():
"""
Feature: TensorScatterMax/Min Grad DynamicShape.
Description: Test case of dynamic shape for TensorScatterMax/Min grad operator on GPU.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
tensor_scatter_max_min_dynamic_shape()
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
def test_gpu_tensor_scatter_max_min_dynamic_rank():
"""
Feature: TensorScatterMax/Min Grad DynamicShape.
Description: Test case of dynamic rank for TensorScatterMax/Min grad operator on GPU.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
tensor_scatter_max_min_dynamic_shape(True)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_tensor_scatter_max_min_dynamic_shape():
"""
Feature: TensorScatterMax/Min Grad DynamicShape.
Description: Test case of dynamic shape for TensorScatterMax/Min grad operator on CPU.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
tensor_scatter_max_min_dynamic_shape()
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_tensor_scatter_max_min_dynamic_rank():
"""
Feature: TensorScatterMax/Min Grad DynamicShape.
Description: Test case of dynamic rank for TensorScatterMax/Min grad operator on CPU.
Expectation: success.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
tensor_scatter_max_min_dynamic_shape(True)

View File

@ -0,0 +1,107 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn
from mindspore.ops import operations as P
from mindspore import Tensor
from mindspore import context
from .test_grad_of_dynamic import TestDynamicGrad
class UnsortedSegmentMinNet(nn.Cell):
def __init__(self, num_segments):
super(UnsortedSegmentMinNet, self).__init__()
self.unsorted_segment_min = P.UnsortedSegmentMin()
self.num_segments = num_segments
def construct(self, data, ids):
return self.unsorted_segment_min(data, ids, self.num_segments)
def grad_dyn_case(is_dynamic_rank):
num_segments = 2
test_dynamic = TestDynamicGrad(UnsortedSegmentMinNet(num_segments))
input_x = Tensor(
np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32))
test_dynamic.test_dynamic_grad_net([input_x, segment_ids], is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_shape():
"""
Feature: test UnsortedSegmentMin dynamic shape on GPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_grad_dynamic_rank():
"""
Feature: test UnsortedSegmentMin dynamic rank on GPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
grad_dyn_case(True)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_shape():
"""
Feature: test UnsortedSegmentMin dynamic shape on CPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_cpu_grad_dynamic_rank():
"""
Feature: test UnsortedSegmentMin dynamic rank on CPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
grad_dyn_case(True)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ascend_grad_dynamic_shape():
"""
Feature: test UnsortedSegmentMin dynamic shape on Ascend.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
grad_dyn_case(False)

View File

@ -0,0 +1,68 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn
from mindspore.ops import operations as P
from mindspore import Tensor, context
from .test_grad_of_dynamic import TestDynamicGrad
class UnsortedSegmentProdNet(nn.Cell):
def __init__(self, num_segments):
super(UnsortedSegmentProdNet, self).__init__()
self.unsorted_segment_prod = P.UnsortedSegmentProd()
self.num_segments = num_segments
def construct(self, data, ids):
return self.unsorted_segment_prod(data, ids, self.num_segments)
def grad_dyn_case(is_dynamic_rank):
num_segments = 2
test_dynamic = TestDynamicGrad(UnsortedSegmentProdNet(num_segments))
input_x = Tensor(
np.array([[1, 2, 3], [4, 5, 6], [4, 2, 1]]).astype(np.float32))
segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32))
test_dynamic.test_dynamic_grad_net([input_x, segment_ids], is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unsorted_segment_prod_dynamic_shape():
"""
Feature: test UnsortedSegmentProd dynamic shape on GPU, CPU.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unsorted_segment_prod_dynamic_rank():
"""
Feature: test UnsortedSegmentProd dynamic rank on GPU, CPU.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(True)

View File

@ -0,0 +1,67 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn, ops, context, Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class NetXdivy(nn.Cell):
def __init__(self):
super(NetXdivy, self).__init__()
self.op = ops.Xdivy()
def construct(self, x, y):
return self.op(x, y)
def grad_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(NetXdivy())
x = Tensor(np.array([2, 4, -1]).astype(np.float32))
y = Tensor(np.array([2, 2, 2]).astype(np.float32))
test_dynamic.test_dynamic_grad_net([x, y], is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_grad_dynamic_shape():
"""
Feature: test Xdivy dynamic shape.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_grad_dynamic_rank():
"""
Feature: test Xdivy dynamic rank.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(True)

View File

@ -0,0 +1,67 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn, ops, context, Tensor
from .test_grad_of_dynamic import TestDynamicGrad
class NetXlogy(nn.Cell):
def __init__(self):
super(NetXlogy, self).__init__()
self.op = ops.Xlogy()
def construct(self, x, y):
return self.op(x, y)
def grad_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(NetXlogy())
x = Tensor(np.array([-5, 0, 4]).astype(np.float32))
y = Tensor(np.array([2, 2, 2]).astype(np.float32))
test_dynamic.test_dynamic_grad_net([x, y], is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_grad_dynamic_shape():
"""
Feature: test Xlogy dynamic shape.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_grad_dynamic_rank():
"""
Feature: test Xlogy dynamic rank.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(True)