yolov5 dynamic ops
This commit is contained in:
parent
d1c989ae85
commit
5adef70624
|
@ -33,6 +33,7 @@
|
|||
"mindspore/mindspore/ccsrc/frontend/parallel/graph_util/graph_splitter.cc" "knownConditionTrueFalse"
|
||||
"mindspore/mindspore/ccsrc/backend/graph_compiler/backend.cc" "knownConditionTrueFalse"
|
||||
"mindspore/mindspore/ccsrc/backend/graph_compiler/backend.cc" "variableScope"
|
||||
"mindspore/mindspore/core/ops/max_pool.cc" "zerodivcond"
|
||||
|
||||
# MindData
|
||||
"mindspore/mindspore/ccsrc/minddata/dataset/engine/dataset_iterator.cc" "useStlAlgorithm"
|
||||
|
|
|
@ -57,7 +57,8 @@ void ConvertMakeTupleInputToPlantInputs(const FuncGraphPtr &graph, const CNodePt
|
|||
MS_EXCEPTION_IF_NULL(cnode_ptr);
|
||||
MS_EXCEPTION_IF_NULL(graph);
|
||||
if (common::AnfAlgo::CheckPrimitiveType(cnode_ptr, prim::kPrimCall) ||
|
||||
common::AnfAlgo::CheckPrimitiveType(cnode_ptr, prim::kPrimPartial)) {
|
||||
common::AnfAlgo::CheckPrimitiveType(cnode_ptr, prim::kPrimPartial) ||
|
||||
common::AnfAlgo::CheckPrimitiveType(cnode_ptr, prim::kPrimResizeNearestNeighborGrad)) {
|
||||
return;
|
||||
}
|
||||
std::vector<AnfNodePtr> plant_inputs;
|
||||
|
|
|
@ -137,6 +137,11 @@ const AnfNodePtr MaxPool2MaxPoolWithArgmax::Process(const FuncGraphPtr &graph, c
|
|||
MS_EXCEPTION_IF_NULL(maxpool_grad);
|
||||
auto maxpool = GetMaxPool(maxpool_grad);
|
||||
MS_EXCEPTION_IF_NULL(maxpool);
|
||||
if (common::AnfAlgo::IsDynamicShape(maxpool)) {
|
||||
// maxpoolwithargmax and maxpoolgradwithargmax don't support dynamic shape, so add the judgement;
|
||||
// can delete the judgement after supported;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto maxpool_argmax = CreateMaxPoolWithArgmax(graph, maxpool);
|
||||
std::vector<AnfNodePtr> maxpool_argmax_outputs;
|
||||
|
|
|
@ -101,6 +101,19 @@ class ResizeNearestNeighborGpuKernelMod : public DeprecatedNativeGpuKernelMod {
|
|||
input_size_list_.push_back(input_size_);
|
||||
output_size_list_.push_back(output_size_);
|
||||
}
|
||||
void ResetResource() override {
|
||||
input_size_list_.clear();
|
||||
output_size_list_.clear();
|
||||
workspace_size_list_.clear();
|
||||
input_shape_.clear();
|
||||
output_shape_.clear();
|
||||
align_corners_ = false;
|
||||
is_null_input_ = false;
|
||||
shape_size_ = 0;
|
||||
input_size_ = 0;
|
||||
output_size_ = 0;
|
||||
workspace_size_ = 0;
|
||||
}
|
||||
|
||||
private:
|
||||
float Scaling(const int in_size, const int out_size, bool align_corners) {
|
||||
|
|
|
@ -27,5 +27,17 @@ MS_REG_GPU_KERNEL_ONE(ResizeNearestNeighborGrad,
|
|||
MS_REG_GPU_KERNEL_ONE(ResizeNearestNeighborGrad,
|
||||
KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32),
|
||||
ResizeNearestNeighborGradGpuKernelMod, int)
|
||||
MS_REG_GPU_KERNEL_ONE(
|
||||
ResizeNearestNeighborGrad,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeFloat32),
|
||||
ResizeNearestNeighborGradGpuKernelMod, float)
|
||||
MS_REG_GPU_KERNEL_ONE(
|
||||
ResizeNearestNeighborGrad,
|
||||
KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeFloat16),
|
||||
ResizeNearestNeighborGradGpuKernelMod, half)
|
||||
MS_REG_GPU_KERNEL_ONE(
|
||||
ResizeNearestNeighborGrad,
|
||||
KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeInt32),
|
||||
ResizeNearestNeighborGradGpuKernelMod, int)
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -18,12 +18,16 @@
|
|||
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_ARRAYS_RESIZE_NEAREST_NEIGHBOR_GRAD_GPU_KERNEL_H_
|
||||
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include "plugin/device/gpu/kernel/gpu_kernel.h"
|
||||
#include "plugin/device/gpu/kernel/gpu_kernel_factory.h"
|
||||
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/resize_nearest_neighbor_grad_impl.cuh"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
constexpr size_t kInputNumOne = 1;
|
||||
constexpr size_t kInputNumTwo = 2;
|
||||
constexpr size_t kSecondInputSize = 2;
|
||||
template <typename T>
|
||||
class ResizeNearestNeighborGradGpuKernelMod : public DeprecatedNativeGpuKernelMod {
|
||||
public:
|
||||
|
@ -33,7 +37,8 @@ class ResizeNearestNeighborGradGpuKernelMod : public DeprecatedNativeGpuKernelMo
|
|||
shape_size_(0),
|
||||
input_size_(0),
|
||||
output_size_(0),
|
||||
workspace_size_(0) {}
|
||||
workspace_size_(0),
|
||||
input_num_(0) {}
|
||||
~ResizeNearestNeighborGradGpuKernelMod() override = default;
|
||||
|
||||
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
|
||||
|
@ -54,10 +59,12 @@ class ResizeNearestNeighborGradGpuKernelMod : public DeprecatedNativeGpuKernelMo
|
|||
|
||||
bool Init(const CNodePtr &kernel_node) override {
|
||||
auto kernel_name = common::AnfAlgo::GetCNodeName(kernel_node);
|
||||
size_t input_num = common::AnfAlgo::GetInputTensorNum(kernel_node);
|
||||
input_num_ = common::AnfAlgo::GetInputTensorNum(kernel_node);
|
||||
kernel_node_ = kernel_node;
|
||||
if (input_num != 1) {
|
||||
MS_LOG(EXCEPTION) << "For '" << kernel_name << "', the number of inputs must be 1, but got " << input_num;
|
||||
if (input_num_ != kInputNumOne && input_num_ != kInputNumTwo) {
|
||||
MS_LOG(EXCEPTION) << "For '" << kernel_name
|
||||
<< "', the number of inputs must be 1(static shape) or 2(dynamic shape), but got "
|
||||
<< input_num_;
|
||||
}
|
||||
size_t output_num = common::AnfAlgo::GetOutputTensorNum(kernel_node);
|
||||
if (output_num != 1) {
|
||||
|
@ -68,7 +75,7 @@ class ResizeNearestNeighborGradGpuKernelMod : public DeprecatedNativeGpuKernelMo
|
|||
auto output_shape = common::AnfAlgo::GetOutputInferShape(kernel_node, 0);
|
||||
is_null_input_ =
|
||||
CHECK_SHAPE_NULL(input_shape, kernel_name, "input") || CHECK_SHAPE_NULL(output_shape, kernel_name, "output");
|
||||
if (is_null_input_) {
|
||||
if (is_null_input_ || IsDynamic(output_shape)) {
|
||||
InitSizeLists();
|
||||
return true;
|
||||
}
|
||||
|
@ -106,9 +113,28 @@ class ResizeNearestNeighborGradGpuKernelMod : public DeprecatedNativeGpuKernelMo
|
|||
protected:
|
||||
void InitSizeLists() override {
|
||||
input_size_list_.push_back(input_size_);
|
||||
if (input_num_ == kInputNumTwo) {
|
||||
// 2 int64_t shape num
|
||||
input_size_list_.push_back(sizeof(int64_t) * kSecondInputSize);
|
||||
}
|
||||
output_size_list_.push_back(output_size_);
|
||||
}
|
||||
|
||||
void ResetResource() override {
|
||||
input_size_list_.clear();
|
||||
output_size_list_.clear();
|
||||
workspace_size_list_.clear();
|
||||
input_shape_.clear();
|
||||
output_shape_.clear();
|
||||
align_corners_ = false;
|
||||
is_null_input_ = false;
|
||||
shape_size_ = 0;
|
||||
input_size_ = 0;
|
||||
output_size_ = 0;
|
||||
workspace_size_ = 0;
|
||||
input_num_ = 0;
|
||||
}
|
||||
|
||||
private:
|
||||
float Scaling(const int in_size, const int out_size, bool align_corners) {
|
||||
return (align_corners && out_size > 1) ? (in_size - 1) / static_cast<float>(out_size - 1)
|
||||
|
@ -123,6 +149,7 @@ class ResizeNearestNeighborGradGpuKernelMod : public DeprecatedNativeGpuKernelMo
|
|||
size_t input_size_;
|
||||
size_t output_size_;
|
||||
size_t workspace_size_;
|
||||
size_t input_num_;
|
||||
};
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -172,10 +172,13 @@ class PoolingFwdGpuKernelMod : public DeprecatedNativeGpuKernelMod {
|
|||
protected:
|
||||
void InitResource() {
|
||||
cudnn_handle_ = device::gpu::GPUDeviceManager::GetInstance().GetCudnnHandle();
|
||||
|
||||
CHECK_CUDNN_RET_WITH_EXCEPT(kernel_node_, cudnnCreateTensorDescriptor(&input_descriptor_),
|
||||
"cudnnCreateTensorDescriptor failed");
|
||||
|
||||
CHECK_CUDNN_RET_WITH_EXCEPT(kernel_node_, cudnnCreateTensorDescriptor(&output_descriptor_),
|
||||
"cudnnCreateTensorDescriptor failed");
|
||||
|
||||
CHECK_CUDNN_RET_WITH_EXCEPT(kernel_node_, cudnnCreatePoolingDescriptor(&pooling_descriptor_),
|
||||
"cudnnCreatePoolingDescriptor failed");
|
||||
}
|
||||
|
@ -192,6 +195,12 @@ class PoolingFwdGpuKernelMod : public DeprecatedNativeGpuKernelMod {
|
|||
output_size_list_.push_back(output_size_);
|
||||
workspace_size_list_.push_back(output_size_);
|
||||
}
|
||||
void ResetResource() override {
|
||||
input_size_list_.clear();
|
||||
output_size_list_.clear();
|
||||
workspace_size_list_.clear();
|
||||
stride_.clear();
|
||||
}
|
||||
|
||||
private:
|
||||
void CheckParam(const CNodePtr &kernel_node) {
|
||||
|
|
|
@ -84,6 +84,16 @@ class SigmoidCrossEntropyWithLogitsGpuKernelMod : public DeprecatedNativeGpuKern
|
|||
output_size_list_.push_back(outputs_size_);
|
||||
}
|
||||
|
||||
void ResetResource() override {
|
||||
input_size_list_.clear();
|
||||
output_size_list_.clear();
|
||||
workspace_size_list_.clear();
|
||||
logits_size_ = 0;
|
||||
labels_size_ = 0;
|
||||
outputs_size_ = 0;
|
||||
is_null_input_ = false;
|
||||
}
|
||||
|
||||
private:
|
||||
size_t logits_size_;
|
||||
size_t labels_size_;
|
||||
|
|
|
@ -83,6 +83,16 @@ class SigmoidCrossEntropyWithLogitsGradGpuKernelMod : public DeprecatedNativeGpu
|
|||
output_size_list_.push_back(outputs_size_);
|
||||
}
|
||||
|
||||
void ResetResource() override {
|
||||
input_size_list_.clear();
|
||||
output_size_list_.clear();
|
||||
workspace_size_list_.clear();
|
||||
logits_size_ = 0;
|
||||
labels_size_ = 0;
|
||||
outputs_size_ = 0;
|
||||
is_null_input_ = false;
|
||||
}
|
||||
|
||||
private:
|
||||
size_t logits_size_;
|
||||
size_t labels_size_;
|
||||
|
|
|
@ -46,6 +46,8 @@
|
|||
#include "ops/rpc_recv.h"
|
||||
#include "ops/rpc_send.h"
|
||||
#include "ops/tensor_scatter_arithmetic.h"
|
||||
#include "ops/max_pool.h"
|
||||
#include "ops/grad/max_pool_grad.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace abstract {
|
||||
|
@ -102,6 +104,7 @@ PrimShapeDependMap &GetHostDependsMap() {
|
|||
static const auto &kSspaddmm = prim::kPrimSspaddmm->name();
|
||||
static const auto &kBartlettWindow = prim::kPrimBartlettWindow->name();
|
||||
static const auto &kExtractGlimpse = prim::kPrimExtractGlimpse->name();
|
||||
static const auto &kResizeNearestNeighborGrad = prim::kPrimResizeNearestNeighborGrad->name();
|
||||
|
||||
// Common host depends.
|
||||
static PrimShapeDependMap host_depends{{kExtractGlimpse, ShapeSet{1}},
|
||||
|
@ -154,7 +157,8 @@ PrimShapeDependMap &GetHostDependsMap() {
|
|||
{kBlackmanWindow, ShapeSet{0}},
|
||||
{kExpand, ShapeSet{1}},
|
||||
{kSspaddmm, ShapeSet{0, 2, 3, 5, 7}},
|
||||
{kBartlettWindow, ShapeSet{0}}};
|
||||
{kBartlettWindow, ShapeSet{0}},
|
||||
{kResizeNearestNeighborGrad, ShapeSet{1}}};
|
||||
return host_depends;
|
||||
}
|
||||
std::set<int64_t> GetDependsFormMap(const std::string &prim_name, size_t input_num) {
|
||||
|
@ -388,6 +392,8 @@ PrimitiveEvalImplMap &GetPrimitiveToBackendEvalImplMap() {
|
|||
{prim::kPrimAdamApplyOne, R{InferImplAdamApplyOne, nullptr, true}},
|
||||
{prim::kPrimAdamApplyOneWithDecay, R{InferImplAdamApplyOneWithDecay, nullptr, true}},
|
||||
{prim::kPrimTensorScatterUpdate, R{ops::TensorScatterArithmeticInfer, nullptr, true}},
|
||||
{prim::kPrimMaxPool, R{ops::MaxPoolInfer, nullptr, true}},
|
||||
{prim::kPrimMaxPoolGrad, R{ops::MaxPoolGradInfer, nullptr, true}},
|
||||
};
|
||||
return prim_backend_eval_implement_map;
|
||||
}
|
||||
|
|
|
@ -21,7 +21,31 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
namespace {
|
||||
abstract::ShapePtr MaxPoolGradInferShape(const PrimitivePtr &primitive,
|
||||
const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape());
|
||||
auto shape = std::make_shared<abstract::Shape>(shape_map[kShape], shape_map[kMinShape], shape_map[kMaxShape]);
|
||||
return shape;
|
||||
}
|
||||
TypePtr MaxPoolGradInferType(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) {
|
||||
MS_LOG(EXCEPTION) << "For '" << primitive->name()
|
||||
<< ", the input args used for infer shape and type is necessary, but missing it.";
|
||||
}
|
||||
|
||||
return input_args[0]->BuildType();
|
||||
}
|
||||
} // namespace
|
||||
MIND_API_OPERATOR_IMPL(MaxPoolGrad, PoolGrad);
|
||||
REGISTER_PRIMITIVE_C(kNameMaxPoolGrad, MaxPoolGrad);
|
||||
abstract::AbstractBasePtr MaxPoolGradInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<abstract::AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto infer_type = MaxPoolGradInferType(primitive, input_args);
|
||||
auto infer_shape = MaxPoolGradInferShape(primitive, input_args);
|
||||
return abstract::MakeAbstract(infer_shape, infer_type);
|
||||
}
|
||||
REGISTER_PRIMITIVE_EVAL_IMPL(MaxPoolGrad, prim::kPrimMaxPoolGrad, MaxPoolGradInfer, nullptr, true);
|
||||
} // namespace ops
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -36,7 +36,29 @@ abstract::ShapePtr ResizeNearestNeighborGradInferShape(const PrimitivePtr &primi
|
|||
auto grad_shape = grad_shape_ptr->shape();
|
||||
auto size_ptr = input_args[1]->BuildValue();
|
||||
MS_EXCEPTION_IF_NULL(size_ptr);
|
||||
std::vector<int64_t> size_v = GetValue<std::vector<int64_t>>(size_ptr);
|
||||
|
||||
std::vector<int64_t> size_v;
|
||||
if (size_ptr->isa<tensor::Tensor>()) {
|
||||
auto size_tensor = size_ptr->cast<tensor::TensorPtr>();
|
||||
MS_EXCEPTION_IF_NULL(size_tensor);
|
||||
size_t data_size = size_tensor->DataSize();
|
||||
auto tensor_data = reinterpret_cast<int64_t *>(size_tensor->data_c());
|
||||
MS_EXCEPTION_IF_NULL(tensor_data);
|
||||
for (size_t i = 0; i < data_size; ++i) {
|
||||
size_v.push_back(static_cast<int64_t>(*tensor_data));
|
||||
++tensor_data;
|
||||
}
|
||||
} else if (size_ptr->isa<ValueTuple>()) {
|
||||
std::vector<ValuePtr> size_vec = size_ptr->cast<ValueTuplePtr>()->value();
|
||||
std::transform(size_vec.begin(), size_vec.end(), std::back_inserter(size_v),
|
||||
[](const ValuePtr e) { return GetValue<int64_t>(e); });
|
||||
} else if (size_ptr->isa<AnyValue>()) {
|
||||
size_v.push_back(-1);
|
||||
size_v.push_back(-1);
|
||||
} else {
|
||||
size_v = GetValue<std::vector<int64_t>>(size_ptr);
|
||||
}
|
||||
|
||||
std::vector<int64_t> ret_shape;
|
||||
ret_shape.push_back(grad_shape[0]);
|
||||
ret_shape.push_back(grad_shape[1]);
|
||||
|
@ -44,14 +66,18 @@ abstract::ShapePtr ResizeNearestNeighborGradInferShape(const PrimitivePtr &primi
|
|||
if (grad_shape_ptr->IsDynamic()) {
|
||||
auto grad_min_shape = grad_shape_ptr->min_shape();
|
||||
std::vector<int64_t> ret_min_shape;
|
||||
ret_min_shape.push_back(grad_min_shape[0]);
|
||||
ret_min_shape.push_back(grad_min_shape[1]);
|
||||
ret_min_shape.insert(ret_min_shape.end(), size_v.begin(), size_v.end());
|
||||
if (!grad_min_shape.empty()) {
|
||||
ret_min_shape.push_back(grad_min_shape[0]);
|
||||
ret_min_shape.push_back(grad_min_shape[1]);
|
||||
ret_min_shape.insert(ret_min_shape.end(), size_v.begin(), size_v.end());
|
||||
}
|
||||
auto grad_max_shape = grad_shape_ptr->max_shape();
|
||||
std::vector<int64_t> ret_max_shape;
|
||||
ret_max_shape.push_back(grad_max_shape[0]);
|
||||
ret_max_shape.push_back(grad_max_shape[1]);
|
||||
ret_max_shape.insert(ret_max_shape.end(), size_v.begin(), size_v.end());
|
||||
if (!grad_max_shape.empty()) {
|
||||
ret_max_shape.push_back(grad_max_shape[0]);
|
||||
ret_max_shape.push_back(grad_max_shape[1]);
|
||||
ret_max_shape.insert(ret_max_shape.end(), size_v.begin(), size_v.end());
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(ret_shape, ret_min_shape, ret_max_shape);
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(ret_shape);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <memory>
|
||||
#include <set>
|
||||
#include <vector>
|
||||
#include <cmath>
|
||||
#include "ops/op_utils.h"
|
||||
#include "utils/check_convert_utils.h"
|
||||
#include "abstract/ops/primitive_infer_map.h"
|
||||
|
@ -27,6 +28,11 @@
|
|||
|
||||
namespace mindspore {
|
||||
namespace ops {
|
||||
constexpr size_t kSizeFour = 4;
|
||||
constexpr size_t kIndex0 = 0;
|
||||
constexpr size_t kIndex1 = 1;
|
||||
constexpr size_t kIndex2 = 2;
|
||||
constexpr size_t kIndex3 = 3;
|
||||
void MaxPool::set_pad_mode(const PadMode &pad_mode) {
|
||||
int64_t swi = pad_mode;
|
||||
(void)this->AddAttr(kPadMode, api::MakeValue(swi));
|
||||
|
@ -80,7 +86,127 @@ void MaxPool::Init(const std::vector<int64_t> &kernel_size, const std::vector<in
|
|||
this->set_round_mode(round_mode);
|
||||
}
|
||||
|
||||
namespace {
|
||||
void ConvertShapeNHWCToNCHW(std::vector<int64_t> *nhwc_shape) {
|
||||
if (nhwc_shape->empty()) {
|
||||
return;
|
||||
}
|
||||
if (nhwc_shape->size() != kSizeFour) {
|
||||
MS_EXCEPTION(ValueError) << "The size of shape should be 4, but got " << nhwc_shape->size();
|
||||
}
|
||||
int64_t tmp = (*nhwc_shape)[kIndex3];
|
||||
(*nhwc_shape)[kIndex3] = (*nhwc_shape)[kIndex2];
|
||||
(*nhwc_shape)[kIndex2] = (*nhwc_shape)[kIndex1];
|
||||
(*nhwc_shape)[kIndex1] = tmp;
|
||||
}
|
||||
|
||||
int64_t CeilDiv(int64_t a, int64_t b) {
|
||||
if (b == 0) {
|
||||
MS_EXCEPTION(ValueError) << "The number can not be divided by zero.";
|
||||
}
|
||||
int64_t result = a / b;
|
||||
if (a % b != 0) {
|
||||
result += 1;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
abstract::ShapePtr MaxPoolInferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
MS_EXCEPTION_IF_NULL(primitive);
|
||||
auto op_name = primitive->name();
|
||||
std::vector<int64_t> kernel_size = GetValue<std::vector<int64_t>>(primitive->GetAttr(kKernelSize));
|
||||
std::vector<int64_t> strides = GetValue<std::vector<int64_t>>(primitive->GetAttr(kStrides));
|
||||
int64_t data_format = CheckAndConvertUtils::GetAndCheckFormat(primitive->GetAttr(kFormat));
|
||||
int64_t pad_mode = 0;
|
||||
CheckAndConvertUtils::GetPadModEnumValue(primitive->GetAttr(kPadMode), &pad_mode, true);
|
||||
|
||||
(void)CheckAndConvertUtils::CheckValue<size_t>("length of kernel_size", kernel_size.size(), kEqual, kSizeFour,
|
||||
op_name);
|
||||
(void)CheckAndConvertUtils::CheckValue<size_t>("length of strides", strides.size(), kEqual, kSizeFour, op_name);
|
||||
|
||||
auto shape_map = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape());
|
||||
if (shape_map.empty()) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', the input should exist, but missed.";
|
||||
}
|
||||
auto in_shape = shape_map[kShape];
|
||||
auto min_shape = shape_map[kMinShape];
|
||||
auto max_shape = shape_map[kMaxShape];
|
||||
(void)CheckAndConvertUtils::CheckValue<size_t>("length of input", in_shape.size(), kEqual, kSizeFour, op_name);
|
||||
|
||||
if (data_format == NHWC) {
|
||||
ConvertShapeNHWCToNCHW(&in_shape);
|
||||
ConvertShapeNHWCToNCHW(&min_shape);
|
||||
ConvertShapeNHWCToNCHW(&max_shape);
|
||||
} else if (data_format != NCHW) {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', the input format should be NCHW or NHWC, but got "
|
||||
<< data_format << ".";
|
||||
}
|
||||
|
||||
int64_t out_h = 0, out_w = 0, out_h_min = 0, out_w_min = 0, out_h_max = 0, out_w_max = 0;
|
||||
if (pad_mode == PadMode::SAME) {
|
||||
out_h = in_shape[kIndex2] == -1 ? -1 : CeilDiv(in_shape[kIndex2], strides[kIndex2]);
|
||||
out_w = in_shape[kIndex3] == -1 ? -1 : CeilDiv(in_shape[kIndex3], strides[kIndex3]);
|
||||
if (!min_shape.empty()) {
|
||||
out_h_min = CeilDiv(min_shape[kIndex2], strides[kIndex2]);
|
||||
out_w_min = CeilDiv(min_shape[kIndex3], strides[kIndex3]);
|
||||
}
|
||||
if (!max_shape.empty()) {
|
||||
out_h_max = CeilDiv(max_shape[kIndex2], strides[kIndex2]);
|
||||
out_w_max = CeilDiv(max_shape[kIndex3], strides[kIndex3]);
|
||||
}
|
||||
} else if (pad_mode == PadMode::VALID) {
|
||||
out_h = in_shape[kIndex2] == -1 ? -1 : CeilDiv((in_shape[kIndex2] - (kernel_size[kIndex2] - 1)), strides[kIndex2]);
|
||||
out_w = in_shape[kIndex3] == -1 ? -1 : CeilDiv((in_shape[kIndex3] - (kernel_size[kIndex3] - 1)), strides[kIndex3]);
|
||||
if (!min_shape.empty()) {
|
||||
out_h_min = CeilDiv((min_shape[kIndex2] - (kernel_size[kIndex2] - 1)), strides[kIndex2]);
|
||||
out_w_min = CeilDiv((min_shape[kIndex3] - (kernel_size[kIndex3] - 1)), strides[kIndex3]);
|
||||
}
|
||||
if (!max_shape.empty()) {
|
||||
out_h_max = CeilDiv((max_shape[kIndex2] - (kernel_size[kIndex2] - 1)), strides[kIndex2]);
|
||||
out_w_max = CeilDiv((max_shape[kIndex3] - (kernel_size[kIndex3] - 1)), strides[kIndex3]);
|
||||
}
|
||||
} else {
|
||||
MS_EXCEPTION(ValueError) << "For '" << primitive->name() << "', the pad_mode should be same or valid, but got "
|
||||
<< pad_mode << ".";
|
||||
}
|
||||
abstract::ShapePtr shape;
|
||||
if (data_format == NHWC) {
|
||||
std::vector<int64_t> out_shape = {in_shape[kIndex0], out_h, out_w, in_shape[kIndex1]};
|
||||
if (!min_shape.empty() && !max_shape.empty()) {
|
||||
std::vector<int64_t> out_shape_min = {min_shape[kIndex0], out_h_min, out_w_min, min_shape[kIndex1]};
|
||||
std::vector<int64_t> out_shape_max = {max_shape[kIndex0], out_h_max, out_w_max, max_shape[kIndex1]};
|
||||
shape = std::make_shared<abstract::Shape>(out_shape, out_shape_min, out_shape_max);
|
||||
} else {
|
||||
shape = std::make_shared<abstract::Shape>(out_shape);
|
||||
}
|
||||
} else {
|
||||
std::vector<int64_t> out_shape = {in_shape[kIndex0], in_shape[kIndex1], out_h, out_w};
|
||||
if (!min_shape.empty() && !max_shape.empty()) {
|
||||
std::vector<int64_t> out_shape_min = {min_shape[kIndex0], min_shape[kIndex1], out_h_min, out_w_min};
|
||||
std::vector<int64_t> out_shape_max = {max_shape[kIndex0], max_shape[kIndex1], out_h_max, out_w_max};
|
||||
shape = std::make_shared<abstract::Shape>(out_shape, out_shape_min, out_shape_max);
|
||||
} else {
|
||||
shape = std::make_shared<abstract::Shape>(out_shape);
|
||||
}
|
||||
}
|
||||
return shape;
|
||||
}
|
||||
|
||||
TypePtr MaxPoolInferType(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
|
||||
if (std::any_of(input_args.begin(), input_args.end(), [](const AbstractBasePtr &a) { return a == nullptr; })) {
|
||||
MS_EXCEPTION(TypeError) << "For '" << primitive->name()
|
||||
<< "', the input args used for infer shape and type is necessary, but missing it.";
|
||||
}
|
||||
return input_args[0]->BuildType();
|
||||
}
|
||||
} // namespace
|
||||
MIND_API_OPERATOR_IMPL(MaxPool, BaseOperator);
|
||||
REGISTER_PRIMITIVE_C(kNameMaxPool, MaxPool);
|
||||
abstract::AbstractBasePtr MaxPoolInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
|
||||
const std::vector<abstract::AbstractBasePtr> &input_args) {
|
||||
TypePtr type = MaxPoolInferType(primitive, input_args);
|
||||
abstract::ShapePtr shape = MaxPoolInferShape(primitive, input_args);
|
||||
return abstract::MakeAbstract(shape, type);
|
||||
}
|
||||
REGISTER_PRIMITIVE_EVAL_IMPL(MaxPool, prim::kPrimMaxPool, MaxPoolInfer, nullptr, true);
|
||||
} // namespace ops
|
||||
} // namespace mindspore
|
||||
|
|
|
@ -54,7 +54,7 @@ abstract::ShapePtr ResizeNearestNeighborInferShape(const PrimitivePtr &primitive
|
|||
auto x_shape_ptr = CheckAndConvertUtils::GetTensorInputShape(prim_name, input_args, 0);
|
||||
auto x_shape = x_shape_ptr->shape();
|
||||
ValuePtr size_ptr;
|
||||
if (x_shape_ptr->IsDynamic()) {
|
||||
if (x_shape_ptr->IsDynamic() && input_args.size() > 1) {
|
||||
size_ptr = input_args[1]->BuildValue();
|
||||
} else {
|
||||
size_ptr = primitive->GetAttr(kSize);
|
||||
|
@ -72,10 +72,12 @@ abstract::ShapePtr ResizeNearestNeighborInferShape(const PrimitivePtr &primitive
|
|||
if (x_shape_ptr->IsDynamic()) {
|
||||
auto x_min_shape = x_shape_ptr->min_shape();
|
||||
auto x_max_shape = x_shape_ptr->max_shape();
|
||||
x_min_shape.erase(x_min_shape.begin() + size_size, x_min_shape.end());
|
||||
x_min_shape.insert(x_min_shape.end(), size_v.begin(), size_v.end());
|
||||
x_max_shape.erase(x_max_shape.begin() + size_size, x_max_shape.end());
|
||||
x_max_shape.insert(x_max_shape.end(), size_v.begin(), size_v.end());
|
||||
if (!x_min_shape.empty() && !x_max_shape.empty()) {
|
||||
x_min_shape.erase(x_min_shape.begin() + size_size, x_min_shape.end());
|
||||
x_min_shape.insert(x_min_shape.end(), size_v.begin(), size_v.end());
|
||||
x_max_shape.erase(x_max_shape.begin() + size_size, x_max_shape.end());
|
||||
x_max_shape.insert(x_max_shape.end(), size_v.begin(), size_v.end());
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(x_shape, x_min_shape, x_max_shape);
|
||||
}
|
||||
return std::make_shared<abstract::Shape>(x_shape);
|
||||
|
|
|
@ -767,11 +767,15 @@ def get_bprop_zeroslike(self):
|
|||
def get_bprop_resize_nearest_neighbor(self):
|
||||
"""Generate bprop for ResizeNearestNeighbor"""
|
||||
op = G.ResizeNearestNeighborGrad(self.align_corners)
|
||||
tensor_shape = P.TensorShape()
|
||||
|
||||
def bprop(inputs, out, dout):
|
||||
shp = shape_op(inputs)
|
||||
if -1 in shape_op(inputs):
|
||||
shp = tensor_shape(inputs)
|
||||
else:
|
||||
shp = shape_op(inputs)
|
||||
# 2 and 3 represent the height and width
|
||||
shp = (shp[2], shp[3])
|
||||
shp = shp[2:]
|
||||
return (op(dout, shp),)
|
||||
|
||||
return bprop
|
||||
|
|
|
@ -26,12 +26,14 @@ max_pool_grad_op_info = TBERegOp("MaxPoolGrad") \
|
|||
.attr("kernel_size", "required", "listInt", "all") \
|
||||
.attr("strides", "required", "listInt", "all") \
|
||||
.attr("pad_mode", "required", "str", "all") \
|
||||
.attr("data_format", "optional", "str", "all", "NHWC") \
|
||||
.attr("format", "optional", "str", "all", "NHWC") \
|
||||
.input(0, "x1", False, "required", "all") \
|
||||
.input(1, "x2", False, "required", "all") \
|
||||
.input(2, "grad", False, "required", "all") \
|
||||
.output(0, "y", False, "required", "all") \
|
||||
.dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \
|
||||
.dtype_format(DataType.None_None, DataType.None_None, DataType.None_None, DataType.None_None) \
|
||||
.dynamic_shape(True) \
|
||||
.is_dynamic_format(True) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
|
||||
0.1.1 MindSpore*1.8.0:£
|
||||
0.1.1 MindSpore*1.8.0:¢
|
||||
<EFBFBD>
|
||||
|
||||
bprop.14:xbprop.14:[CNode]15:1bprop.14:[CNode]15:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:-Default/S-Prim-hyper_map[zeros_like_leaf]-op9
|
||||
z
|
||||
bprop.14:[CNode]15:1bprop.14:[CNode]16:3bprop.14:[CNode]16:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op10bprop.14*
|
||||
bprop.12:xbprop.12:[CNode]13:1bprop.12:[CNode]13:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:-Default/S-Prim-hyper_map[zeros_like_leaf]-op7
|
||||
y
|
||||
bprop.12:[CNode]13:1bprop.12:[CNode]14:3bprop.12:[CNode]14:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op8bprop.12*
|
||||
|
||||
bprop.14:x*
|
||||
bprop.14:out*
|
||||
bprop.14:dout2
|
||||
bprop.14:[CNode]16:3:@5d57d331381f09bf25bfb5ef72ec9d679698183646a122ef4f127212683ebf45PbH
|
||||
#S-Prim-hyper_map[zeros_like_leaf]:2!S-Prim-hyper_map[zeros_like_leaf]b&
|
||||
S-Prim-MakeTuple:4S-Prim-MakeTupleh
|
||||
bprop.12:x*
|
||||
bprop.12:out*
|
||||
bprop.12:dout2
|
||||
bprop.12:[CNode]14:3:@c0204b943f5430ecc5fb77e4a6d67388ec50534279bc72793d0a997633157be1Pb&
|
||||
S-Prim-MakeTuple:4S-Prim-MakeTuplebH
|
||||
#S-Prim-hyper_map[zeros_like_leaf]:2!S-Prim-hyper_map[zeros_like_leaf]h
|
|
@ -1,14 +1,14 @@
|
|||
|
||||
0.1.1 MindSpore*1.8.0:¤
|
||||
’
|
||||
0.1.1 MindSpore*1.8.0:£
|
||||
‘
|
||||
|
||||
bprop.17:xbprop.17:[CNode]18:1bprop.17:[CNode]18:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:.Default/S-Prim-hyper_map[zeros_like_leaf]-op11
|
||||
bprop.15:xbprop.15:[CNode]16:1bprop.15:[CNode]16:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:-Default/S-Prim-hyper_map[zeros_like_leaf]-op9
|
||||
z
|
||||
bprop.17:[CNode]18:1bprop.17:[CNode]19:3bprop.17:[CNode]19:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op12bprop.17*
|
||||
bprop.15:[CNode]16:1bprop.15:[CNode]17:3bprop.15:[CNode]17:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op10bprop.15*
|
||||
|
||||
bprop.17:x*
|
||||
bprop.17:out*
|
||||
bprop.17:dout2
|
||||
bprop.17:[CNode]19:3:@5d57d331381f09bf25bfb5ef72ec9d679698183646a122ef4f127212683ebf45Pb&
|
||||
S-Prim-MakeTuple:4S-Prim-MakeTuplebH
|
||||
#S-Prim-hyper_map[zeros_like_leaf]:2!S-Prim-hyper_map[zeros_like_leaf]h
|
||||
bprop.15:x*
|
||||
bprop.15:out*
|
||||
bprop.15:dout2
|
||||
bprop.15:[CNode]17:3:@c0204b943f5430ecc5fb77e4a6d67388ec50534279bc72793d0a997633157be1PbH
|
||||
#S-Prim-hyper_map[zeros_like_leaf]:2!S-Prim-hyper_map[zeros_like_leaf]b&
|
||||
S-Prim-MakeTuple:4S-Prim-MakeTupleh
|
|
@ -1,12 +1,14 @@
|
|||
|
||||
0.1.1 MindSpore*1.8.0:¶
|
||||
—
|
||||
bprop.116:xbprop.116:[CNode]117:1bprop.116:[CNode]117:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:.Default/S-Prim-hyper_map[zeros_like_leaf]-op84
|
||||
€
|
||||
bprop.116:[CNode]117:1bprop.116:[CNode]118:3bprop.116:[CNode]118:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op85 bprop.116*
|
||||
bprop.116:x*
|
||||
bprop.116:out*
|
||||
bprop.116:dout2
|
||||
bprop.116:[CNode]118:3:@5d57d331381f09bf25bfb5ef72ec9d679698183646a122ef4f127212683ebf45Pb&
|
||||
S-Prim-MakeTuple:4S-Prim-MakeTuplebH
|
||||
#S-Prim-hyper_map[zeros_like_leaf]:2!S-Prim-hyper_map[zeros_like_leaf]h
|
||||
0.1.1 MindSpore*1.8.0:¤
|
||||
’
|
||||
|
||||
bprop.18:xbprop.18:[CNode]19:1bprop.18:[CNode]19:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:.Default/S-Prim-hyper_map[zeros_like_leaf]-op11
|
||||
z
|
||||
bprop.18:[CNode]19:1bprop.18:[CNode]20:3bprop.18:[CNode]20:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op12bprop.18*
|
||||
|
||||
bprop.18:x*
|
||||
bprop.18:out*
|
||||
bprop.18:dout2
|
||||
bprop.18:[CNode]20:3:@c0204b943f5430ecc5fb77e4a6d67388ec50534279bc72793d0a997633157be1PbH
|
||||
#S-Prim-hyper_map[zeros_like_leaf]:2!S-Prim-hyper_map[zeros_like_leaf]b&
|
||||
S-Prim-MakeTuple:4S-Prim-MakeTupleh
|
|
@ -1,12 +1,14 @@
|
|||
|
||||
0.1.1 MindSpore*1.8.0:¶
|
||||
—
|
||||
bprop.122:xbprop.122:[CNode]123:1bprop.122:[CNode]123:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:.Default/S-Prim-hyper_map[zeros_like_leaf]-op88
|
||||
€
|
||||
bprop.122:[CNode]123:1bprop.122:[CNode]124:3bprop.122:[CNode]124:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op89 bprop.122*
|
||||
bprop.122:x*
|
||||
bprop.122:out*
|
||||
bprop.122:dout2
|
||||
bprop.122:[CNode]124:3:@5d57d331381f09bf25bfb5ef72ec9d679698183646a122ef4f127212683ebf45Pb&
|
||||
0.1.1 MindSpore*1.8.0:¤
|
||||
’
|
||||
|
||||
bprop.24:xbprop.24:[CNode]25:1bprop.24:[CNode]25:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:.Default/S-Prim-hyper_map[zeros_like_leaf]-op15
|
||||
z
|
||||
bprop.24:[CNode]25:1bprop.24:[CNode]26:3bprop.24:[CNode]26:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op16bprop.24*
|
||||
|
||||
bprop.24:x*
|
||||
bprop.24:out*
|
||||
bprop.24:dout2
|
||||
bprop.24:[CNode]26:3:@c0204b943f5430ecc5fb77e4a6d67388ec50534279bc72793d0a997633157be1Pb&
|
||||
S-Prim-MakeTuple:4S-Prim-MakeTuplebH
|
||||
#S-Prim-hyper_map[zeros_like_leaf]:2!S-Prim-hyper_map[zeros_like_leaf]h
|
|
@ -1,9 +1,9 @@
|
|||
|
||||
0.1.1 MindSpore*1.8.0:ü
|
||||
m
|
||||
bprop.3:doutbprop.3:[CNode]4:1bprop.3:[CNode]4:1"REF::S-Prim-MakeTuple:2:Default/S-Prim-MakeTuple-op2bprop.3*
|
||||
bprop.3:x*
|
||||
bprop.3:out*
|
||||
bprop.3:dout2
|
||||
bprop.3:[CNode]4:1:@5d57d331381f09bf25bfb5ef72ec9d679698183646a122ef4f127212683ebf45Pb&
|
||||
bprop.1:doutbprop.1:[CNode]2:1bprop.1:[CNode]2:1"REF::S-Prim-MakeTuple:2:Default/S-Prim-MakeTuple-op0bprop.1*
|
||||
bprop.1:x*
|
||||
bprop.1:out*
|
||||
bprop.1:dout2
|
||||
bprop.1:[CNode]2:1:@c0204b943f5430ecc5fb77e4a6d67388ec50534279bc72793d0a997633157be1Pb&
|
||||
S-Prim-MakeTuple:2S-Prim-MakeTupleh
|
|
@ -1,12 +1,12 @@
|
|||
|
||||
0.1.1 MindSpore*1.8.0:”
|
||||
0.1.1 MindSpore*1.8.0:‘
|
||||
<EFBFBD>
|
||||
bprop.8:xbprop.8:[CNode]9:1bprop.8:[CNode]9:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:-Default/S-Prim-hyper_map[zeros_like_leaf]-op5
|
||||
u
|
||||
bprop.8:[CNode]9:1bprop.8:[CNode]10:3bprop.8:[CNode]10:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op6bprop.8*
|
||||
bprop.8:x*
|
||||
bprop.8:out*
|
||||
bprop.8:dout2
|
||||
bprop.8:[CNode]10:3:@5d57d331381f09bf25bfb5ef72ec9d679698183646a122ef4f127212683ebf45Pb&
|
||||
S-Prim-MakeTuple:4S-Prim-MakeTuplebH
|
||||
#S-Prim-hyper_map[zeros_like_leaf]:2!S-Prim-hyper_map[zeros_like_leaf]h
|
||||
bprop.6:xbprop.6:[CNode]7:1bprop.6:[CNode]7:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:-Default/S-Prim-hyper_map[zeros_like_leaf]-op3
|
||||
s
|
||||
bprop.6:[CNode]7:1bprop.6:[CNode]8:3bprop.6:[CNode]8:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op4bprop.6*
|
||||
bprop.6:x*
|
||||
bprop.6:out*
|
||||
bprop.6:dout2
|
||||
bprop.6:[CNode]8:3:@c0204b943f5430ecc5fb77e4a6d67388ec50534279bc72793d0a997633157be1PbH
|
||||
#S-Prim-hyper_map[zeros_like_leaf]:2!S-Prim-hyper_map[zeros_like_leaf]b&
|
||||
S-Prim-MakeTuple:4S-Prim-MakeTupleh
|
|
@ -1,12 +1,12 @@
|
|||
|
||||
0.1.1 MindSpore*1.8.0:‘
|
||||
Œ
|
||||
bprop.5:xbprop.5:[CNode]6:1bprop.5:[CNode]6:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:-Default/S-Prim-hyper_map[zeros_like_leaf]-op3
|
||||
bprop.3:xbprop.3:[CNode]4:1bprop.3:[CNode]4:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:-Default/S-Prim-hyper_map[zeros_like_leaf]-op1
|
||||
s
|
||||
bprop.5:[CNode]6:1bprop.5:[CNode]7:3bprop.5:[CNode]7:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op4bprop.5*
|
||||
bprop.5:x*
|
||||
bprop.5:out*
|
||||
bprop.5:dout2
|
||||
bprop.5:[CNode]7:3:@5d57d331381f09bf25bfb5ef72ec9d679698183646a122ef4f127212683ebf45Pb&
|
||||
bprop.3:[CNode]4:1bprop.3:[CNode]5:3bprop.3:[CNode]5:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op2bprop.3*
|
||||
bprop.3:x*
|
||||
bprop.3:out*
|
||||
bprop.3:dout2
|
||||
bprop.3:[CNode]5:3:@c0204b943f5430ecc5fb77e4a6d67388ec50534279bc72793d0a997633157be1Pb&
|
||||
S-Prim-MakeTuple:4S-Prim-MakeTuplebH
|
||||
#S-Prim-hyper_map[zeros_like_leaf]:2!S-Prim-hyper_map[zeros_like_leaf]h
|
|
@ -1,12 +1,14 @@
|
|||
|
||||
0.1.1 MindSpore*1.8.0:¶
|
||||
—
|
||||
bprop.125:xbprop.125:[CNode]126:1bprop.125:[CNode]126:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:.Default/S-Prim-hyper_map[zeros_like_leaf]-op90
|
||||
€
|
||||
bprop.125:[CNode]126:1bprop.125:[CNode]127:3bprop.125:[CNode]127:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op91 bprop.125*
|
||||
bprop.125:x*
|
||||
bprop.125:out*
|
||||
bprop.125:dout2
|
||||
bprop.125:[CNode]127:3:@5d57d331381f09bf25bfb5ef72ec9d679698183646a122ef4f127212683ebf45PbH
|
||||
0.1.1 MindSpore*1.8.0:¤
|
||||
’
|
||||
|
||||
bprop.27:xbprop.27:[CNode]28:1bprop.27:[CNode]28:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:.Default/S-Prim-hyper_map[zeros_like_leaf]-op17
|
||||
z
|
||||
bprop.27:[CNode]28:1bprop.27:[CNode]29:3bprop.27:[CNode]29:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op18bprop.27*
|
||||
|
||||
bprop.27:x*
|
||||
bprop.27:out*
|
||||
bprop.27:dout2
|
||||
bprop.27:[CNode]29:3:@c0204b943f5430ecc5fb77e4a6d67388ec50534279bc72793d0a997633157be1PbH
|
||||
#S-Prim-hyper_map[zeros_like_leaf]:2!S-Prim-hyper_map[zeros_like_leaf]b&
|
||||
S-Prim-MakeTuple:4S-Prim-MakeTupleh
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,31 +1,35 @@
|
|||
|
||||
0.1.1 MindSpore*1.8.0:ø
|
||||
š
|
||||
bprop.128:condbprop.128:[CNode]129:1bprop.128:[CNode]129:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:.Default/S-Prim-hyper_map[zeros_like_leaf]-op92
|
||||
—
|
||||
bprop.128:xbprop.128:[CNode]130:3bprop.128:[CNode]130:3"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:.Default/S-Prim-hyper_map[zeros_like_leaf]-op93
|
||||
š
|
||||
bprop.128:cond
|
||||
bprop.128:dout
|
||||
bprop.128:[CNode]130:3bprop.128:[CNode]131:4bprop.128:[CNode]131:4"REF::S-Prim-Select:5:Default/S-Prim-Select-op94
|
||||
—
|
||||
bprop.128:ybprop.128:[CNode]132:6bprop.128:[CNode]132:6"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:.Default/S-Prim-hyper_map[zeros_like_leaf]-op95
|
||||
š
|
||||
bprop.128:cond
|
||||
bprop.128:[CNode]132:6
|
||||
bprop.128:doutbprop.128:[CNode]133:7bprop.128:[CNode]133:7"REF::S-Prim-Select:5:Default/S-Prim-Select-op96
|
||||
°
|
||||
bprop.128:[CNode]129:1
|
||||
bprop.128:[CNode]131:4
|
||||
bprop.128:[CNode]133:7bprop.128:[CNode]134:8bprop.128:[CNode]134:8"REF::S-Prim-MakeTuple:9:Default/S-Prim-MakeTuple-op97 bprop.128*
|
||||
bprop.128:cond*
|
||||
bprop.128:x*
|
||||
bprop.128:y*
|
||||
bprop.128:out*
|
||||
bprop.128:dout2
|
||||
bprop.128:[CNode]134:8:@5d57d331381f09bf25bfb5ef72ec9d679698183646a122ef4f127212683ebf45Pb&
|
||||
S-Prim-MakeTuple:9S-Prim-MakeTuplebH
|
||||
#S-Prim-hyper_map[zeros_like_leaf]:2!S-Prim-hyper_map[zeros_like_leaf]bv
|
||||
0.1.1 MindSpore*1.8.0:Ç
|
||||
•
|
||||
bprop.30:condbprop.30:[CNode]31:1bprop.30:[CNode]31:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:.Default/S-Prim-hyper_map[zeros_like_leaf]-op19
|
||||
’
|
||||
|
||||
bprop.30:xbprop.30:[CNode]32:3bprop.30:[CNode]32:3"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:.Default/S-Prim-hyper_map[zeros_like_leaf]-op20
|
||||
’
|
||||
bprop.30:cond
|
||||
bprop.30:dout
|
||||
bprop.30:[CNode]32:3bprop.30:[CNode]33:4bprop.30:[CNode]33:4"REF::S-Prim-Select:5:Default/S-Prim-Select-op21
|
||||
’
|
||||
|
||||
bprop.30:ybprop.30:[CNode]34:6bprop.30:[CNode]34:6"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:.Default/S-Prim-hyper_map[zeros_like_leaf]-op22
|
||||
’
|
||||
bprop.30:cond
|
||||
bprop.30:[CNode]34:6
|
||||
bprop.30:doutbprop.30:[CNode]35:7bprop.30:[CNode]35:7"REF::S-Prim-Select:5:Default/S-Prim-Select-op23
|
||||
¦
|
||||
bprop.30:[CNode]31:1
|
||||
bprop.30:[CNode]33:4
|
||||
bprop.30:[CNode]35:7bprop.30:[CNode]36:8bprop.30:[CNode]36:8"REF::S-Prim-MakeTuple:9:Default/S-Prim-MakeTuple-op24bprop.30*
|
||||
bprop.30:cond*
|
||||
|
||||
bprop.30:x*
|
||||
|
||||
bprop.30:y*
|
||||
bprop.30:out*
|
||||
bprop.30:dout2
|
||||
bprop.30:[CNode]36:8:@c0204b943f5430ecc5fb77e4a6d67388ec50534279bc72793d0a997633157be1Pbv
|
||||
S-Prim-Select:5
S-Prim-Select
|
||||
output_names€ŠZoutput€3
|
||||
input_names€ŠZ condition€ŠZx€ŠZy€h
|
||||
input_names€ŠZ condition€ŠZx€ŠZy€bH
|
||||
#S-Prim-hyper_map[zeros_like_leaf]:2!S-Prim-hyper_map[zeros_like_leaf]b&
|
||||
S-Prim-MakeTuple:9S-Prim-MakeTupleh
|
|
@ -1,12 +1,14 @@
|
|||
|
||||
0.1.1 MindSpore*1.8.0:¶
|
||||
—
|
||||
bprop.119:xbprop.119:[CNode]120:1bprop.119:[CNode]120:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:.Default/S-Prim-hyper_map[zeros_like_leaf]-op86
|
||||
€
|
||||
bprop.119:[CNode]120:1bprop.119:[CNode]121:3bprop.119:[CNode]121:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op87 bprop.119*
|
||||
bprop.119:x*
|
||||
bprop.119:out*
|
||||
bprop.119:dout2
|
||||
bprop.119:[CNode]121:3:@5d57d331381f09bf25bfb5ef72ec9d679698183646a122ef4f127212683ebf45Pb&
|
||||
0.1.1 MindSpore*1.8.0:¤
|
||||
’
|
||||
|
||||
bprop.21:xbprop.21:[CNode]22:1bprop.21:[CNode]22:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:.Default/S-Prim-hyper_map[zeros_like_leaf]-op13
|
||||
z
|
||||
bprop.21:[CNode]22:1bprop.21:[CNode]23:3bprop.21:[CNode]23:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op14bprop.21*
|
||||
|
||||
bprop.21:x*
|
||||
bprop.21:out*
|
||||
bprop.21:dout2
|
||||
bprop.21:[CNode]23:3:@c0204b943f5430ecc5fb77e4a6d67388ec50534279bc72793d0a997633157be1Pb&
|
||||
S-Prim-MakeTuple:4S-Prim-MakeTuplebH
|
||||
#S-Prim-hyper_map[zeros_like_leaf]:2!S-Prim-hyper_map[zeros_like_leaf]h
|
|
@ -1,14 +1,12 @@
|
|||
|
||||
0.1.1 MindSpore*1.8.0:¢
|
||||
‘
|
||||
|
||||
bprop.11:xbprop.11:[CNode]12:1bprop.11:[CNode]12:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:-Default/S-Prim-hyper_map[zeros_like_leaf]-op7
|
||||
y
|
||||
bprop.11:[CNode]12:1bprop.11:[CNode]13:3bprop.11:[CNode]13:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op8bprop.11*
|
||||
|
||||
bprop.11:x*
|
||||
bprop.11:out*
|
||||
bprop.11:dout2
|
||||
bprop.11:[CNode]13:3:@5d57d331381f09bf25bfb5ef72ec9d679698183646a122ef4f127212683ebf45Pb&
|
||||
0.1.1 MindSpore*1.8.0:—
|
||||
Ž
|
||||
bprop.9:xbprop.9:[CNode]10:1bprop.9:[CNode]10:1"(REF::S-Prim-hyper_map[zeros_like_leaf]:2:-Default/S-Prim-hyper_map[zeros_like_leaf]-op5
|
||||
v
|
||||
bprop.9:[CNode]10:1bprop.9:[CNode]11:3bprop.9:[CNode]11:3"REF::S-Prim-MakeTuple:4:Default/S-Prim-MakeTuple-op6bprop.9*
|
||||
bprop.9:x*
|
||||
bprop.9:out*
|
||||
bprop.9:dout2
|
||||
bprop.9:[CNode]11:3:@c0204b943f5430ecc5fb77e4a6d67388ec50534279bc72793d0a997633157be1Pb&
|
||||
S-Prim-MakeTuple:4S-Prim-MakeTuplebH
|
||||
#S-Prim-hyper_map[zeros_like_leaf]:2!S-Prim-hyper_map[zeros_like_leaf]h
|
|
@ -1609,15 +1609,27 @@ class _Pool(PrimitiveWithInfer):
|
|||
_, _, stride_h, stride_w = self.strides
|
||||
|
||||
if self.pad_mode == "VALID":
|
||||
out_h = math.ceil((input_h - (kernel_h - 1)) / stride_h)
|
||||
out_w = math.ceil((input_w - (kernel_w - 1)) / stride_w)
|
||||
if input_h == -1:
|
||||
out_h = -1
|
||||
else:
|
||||
out_h = math.ceil((input_h - (kernel_h - 1)) / stride_h)
|
||||
if input_w == -1:
|
||||
out_w = -1
|
||||
else:
|
||||
out_w = math.ceil((input_w - (kernel_w - 1)) / stride_w)
|
||||
elif self.pad_mode == "SAME":
|
||||
out_h = math.ceil(input_h / stride_h)
|
||||
out_w = math.ceil(input_w / stride_w)
|
||||
if input_h == -1:
|
||||
out_h = -1
|
||||
else:
|
||||
out_h = math.ceil(input_h / stride_h)
|
||||
if input_w == -1:
|
||||
out_w = -1
|
||||
else:
|
||||
out_w = math.ceil(input_w / stride_w)
|
||||
out_shape = [batch, channel, out_h, out_w] if self.format == "NCHW" else [batch, out_h, out_w, channel]
|
||||
|
||||
for shape_value in out_shape:
|
||||
if shape_value <= 0:
|
||||
if shape_value <= 0 and shape_value != -1:
|
||||
raise ValueError(f"For '{self.name}', the each element of the output shape must be larger than 0, "
|
||||
f"but got output shape: {out_shape}. The input shape: {x_shape}, "
|
||||
f"kernel size: {self.kernel_size}, strides: {self.strides}."
|
||||
|
|
|
@ -80,6 +80,17 @@ class GradNetWrtX(nn.Cell):
|
|||
return self.grad(self.network)(input_, output_grad)
|
||||
|
||||
|
||||
|
||||
class GradNetWrtX2inputs(nn.Cell):
|
||||
def __init__(self, network):
|
||||
super(GradNetWrtX2inputs, self).__init__()
|
||||
self.grad = ops.GradOperation(get_all=True, sens_param=True)
|
||||
self.network = network
|
||||
|
||||
def construct(self, input1, input2, output_grad):
|
||||
return self.grad(self.network)(input1, input2, output_grad)
|
||||
|
||||
|
||||
def comm_func(dyn_range, input_shp, data_type, op_net, num=None):
|
||||
list_data = []
|
||||
for i in dyn_range:
|
||||
|
@ -282,6 +293,16 @@ class Stack(nn.Cell):
|
|||
return out
|
||||
|
||||
|
||||
class MaxPool(nn.Cell):
|
||||
def __init__(self):
|
||||
super(MaxPool, self).__init__()
|
||||
self.maxpool = ops.MaxPool(pad_mode="VALID", kernel_size=2, strides=1)
|
||||
|
||||
def construct(self, x):
|
||||
out = self.maxpool(x)
|
||||
return out
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
|
@ -451,3 +472,71 @@ def test_dynamic_stack():
|
|||
input_shape = [(None, 5)]
|
||||
net = Stack()
|
||||
comm_func(dynamic_range, input_shape, data_type, net)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_dynamic_maxpool1():
|
||||
"""
|
||||
Feature: Test Dynamic maxpool and its backward. The input shape is dynamic.
|
||||
Description: The input shape is dynamic.
|
||||
Expectation: Assert that results are consistent with fixed shape.
|
||||
"""
|
||||
dynamic_range = range(2, 64)
|
||||
data_type = np.float32
|
||||
input_shape = [(32, 16, 32, None)]
|
||||
net = MaxPool()
|
||||
comm_func(dynamic_range, input_shape, data_type, net)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_dynamic_maxpool2():
|
||||
"""
|
||||
Feature: Test Dynamic maxpool and its backward. The input shape is dynamic.
|
||||
Description: The input shape is dynamic.
|
||||
Expectation: Assert that results are consistent with fixed shape.
|
||||
"""
|
||||
dynamic_range = range(2, 64)
|
||||
data_type = np.float32
|
||||
input_shape = [(32, 16, None, 8)]
|
||||
net = MaxPool()
|
||||
comm_func(dynamic_range, input_shape, data_type, net)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_dynamic_maxpool3():
|
||||
"""
|
||||
Feature: Test Dynamic maxpool and its backward. The input shape is dynamic.
|
||||
Description: The input shape is dynamic.
|
||||
Expectation: Assert that results are consistent with fixed shape.
|
||||
"""
|
||||
dynamic_range = range(2, 64)
|
||||
data_type = np.float32
|
||||
input_shape = [(32, None, 32, 8)]
|
||||
net = MaxPool()
|
||||
comm_func(dynamic_range, input_shape, data_type, net)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_arm_ascend_training
|
||||
@pytest.mark.platform_x86_ascend_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_dynamic_maxpool4():
|
||||
"""
|
||||
Feature: Test Dynamic maxpool and its backward. The input shape is dynamic.
|
||||
Description: The input shape is dynamic.
|
||||
Expectation: Assert that results are consistent with fixed shape.
|
||||
"""
|
||||
dynamic_range = range(2, 64)
|
||||
data_type = np.float32
|
||||
input_shape = [(None, 16, 32, 8)]
|
||||
net = MaxPool()
|
||||
comm_func(dynamic_range, input_shape, data_type, net)
|
||||
|
|
|
@ -93,6 +93,42 @@ class GradNetWrtX(nn.Cell):
|
|||
return gradient_function(*inputs)
|
||||
|
||||
|
||||
def comm_func(dyn_range, input_shp, data_type, op_net, num=None, output_compare_idx=None):
|
||||
list_data = []
|
||||
for i in dyn_range:
|
||||
tmp_data = []
|
||||
for data_shp in input_shp:
|
||||
if num is None:
|
||||
cur_shp = [dim if dim is not None else i for dim in data_shp]
|
||||
else:
|
||||
cur_shp = []
|
||||
k = 0
|
||||
for dim in data_shp:
|
||||
if dim is not None:
|
||||
cur_shp.append(dim)
|
||||
elif k == 1:
|
||||
cur_shp.append(num)
|
||||
else:
|
||||
cur_shp.append(i)
|
||||
k = k + 1
|
||||
tmp_data.append(np.random.random(cur_shp).astype(data_type))
|
||||
list_data.append(tuple(tmp_data))
|
||||
|
||||
data_map = {}
|
||||
for i, val in enumerate(input_shp):
|
||||
data_map["data" + str(i + 1)] = val
|
||||
|
||||
dataset = ds.GeneratorDataset(list_data, list(data_map.keys()))
|
||||
dataset.set_dynamic_columns(columns=data_map)
|
||||
|
||||
gradient = dynamic_shape_sink_process(op_net, dataset)
|
||||
gradient_cmp = fixed_shape_process(op_net, dataset)
|
||||
if output_compare_idx is None:
|
||||
assert compare(gradient, gradient_cmp)
|
||||
else:
|
||||
assert compare(gradient[output_compare_idx], gradient_cmp[output_compare_idx])
|
||||
|
||||
|
||||
class ConcatNet(nn.Cell):
|
||||
def __init__(self, axis):
|
||||
super(ConcatNet, self).__init__()
|
||||
|
@ -359,3 +395,229 @@ def test_dynamic_add():
|
|||
output = dynamic_shape_sink_process(net, dataset)
|
||||
output_cmp = fixed_shape_process(net, dataset)
|
||||
assert compare(output, output_cmp)
|
||||
|
||||
|
||||
class BatchNorm(nn.Cell):
|
||||
def __init__(self):
|
||||
super(BatchNorm, self).__init__()
|
||||
self.batch_norm = ops.BatchNorm()
|
||||
|
||||
def construct(self, input_x, scale, bias, mean, variance):
|
||||
out = self.batch_norm(input_x, scale, bias, mean, variance)
|
||||
return out
|
||||
|
||||
|
||||
class MaxPool(nn.Cell):
|
||||
def __init__(self):
|
||||
super(MaxPool, self).__init__()
|
||||
self.maxpool = ops.MaxPool(pad_mode="VALID", kernel_size=2, strides=1)
|
||||
|
||||
def construct(self, x):
|
||||
out = self.maxpool(x)
|
||||
return out
|
||||
|
||||
|
||||
class SigmoidCrossEntropyWithLogits(nn.Cell):
|
||||
def __init__(self):
|
||||
super(SigmoidCrossEntropyWithLogits, self).__init__()
|
||||
self.op = ops.SigmoidCrossEntropyWithLogits()
|
||||
|
||||
def construct(self, x, y):
|
||||
out = self.op(x, y)
|
||||
return out
|
||||
|
||||
|
||||
class Sigmoid(nn.Cell):
|
||||
def __init__(self):
|
||||
super(Sigmoid, self).__init__()
|
||||
self.op = ops.Sigmoid()
|
||||
|
||||
def construct(self, x):
|
||||
out = self.op(x)
|
||||
return out
|
||||
|
||||
|
||||
class ResizeNearestNeighbor(nn.Cell):
|
||||
def __init__(self):
|
||||
super(ResizeNearestNeighbor, self).__init__()
|
||||
self.op = ops.ResizeNearestNeighbor((2, 2))
|
||||
|
||||
def construct(self, x):
|
||||
out = self.op(x)
|
||||
return out
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_dynamic_batchnorm():
|
||||
"""
|
||||
Feature: Test Dynamic batchnorm and its backward. The input shape is dynamic.
|
||||
Description: The input shape is dynamic.
|
||||
Expectation: Assert that results are consistent with fixed shape.
|
||||
"""
|
||||
dynamic_range = range(2, 64)
|
||||
data_type = np.float32
|
||||
input_shape = [(None, 64), (64,), (64,), (64,), (64,)]
|
||||
net = BatchNorm()
|
||||
comm_func(dynamic_range, input_shape, data_type, net, output_compare_idx=0)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_dynamic_batchnorm2():
|
||||
"""
|
||||
Feature: Test Dynamic batchnorm and its backward. The input shape is dynamic.
|
||||
Description: The input shape is dynamic.
|
||||
Expectation: Assert that results are consistent with fixed shape.
|
||||
"""
|
||||
dynamic_range = range(2, 64)
|
||||
data_type = np.float32
|
||||
input_shape = [(64, None), (None,), (None,), (None,), (None,)]
|
||||
net = BatchNorm()
|
||||
comm_func(dynamic_range, input_shape, data_type, net, output_compare_idx=0)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_dynamic_maxpool1():
|
||||
"""
|
||||
Feature: Test Dynamic maxpool and its backward. The input shape is dynamic.
|
||||
Description: The input shape is dynamic.
|
||||
Expectation: Assert that results are consistent with fixed shape.
|
||||
"""
|
||||
dynamic_range = range(2, 64)
|
||||
data_type = np.float32
|
||||
input_shape = [(32, 16, 32, None)]
|
||||
net = MaxPool()
|
||||
comm_func(dynamic_range, input_shape, data_type, net)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_dynamic_maxpool2():
|
||||
"""
|
||||
Feature: Test Dynamic maxpool and its backward. The input shape is dynamic.
|
||||
Description: The input shape is dynamic.
|
||||
Expectation: Assert that results are consistent with fixed shape.
|
||||
"""
|
||||
dynamic_range = range(2, 64)
|
||||
data_type = np.float32
|
||||
input_shape = [(32, 16, None, 8)]
|
||||
net = MaxPool()
|
||||
comm_func(dynamic_range, input_shape, data_type, net)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_dynamic_maxpool3():
|
||||
"""
|
||||
Feature: Test Dynamic maxpool and its backward. The input shape is dynamic.
|
||||
Description: The input shape is dynamic.
|
||||
Expectation: Assert that results are consistent with fixed shape.
|
||||
"""
|
||||
dynamic_range = range(2, 64)
|
||||
data_type = np.float32
|
||||
input_shape = [(32, None, 32, 8)]
|
||||
net = MaxPool()
|
||||
comm_func(dynamic_range, input_shape, data_type, net)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_dynamic_maxpool4():
|
||||
"""
|
||||
Feature: Test Dynamic maxpool and its backward. The input shape is dynamic.
|
||||
Description: The input shape is dynamic.
|
||||
Expectation: Assert that results are consistent with fixed shape.
|
||||
"""
|
||||
dynamic_range = range(2, 64)
|
||||
data_type = np.float32
|
||||
input_shape = [(None, 16, 32, 8)]
|
||||
net = MaxPool()
|
||||
comm_func(dynamic_range, input_shape, data_type, net)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_dynamic_sigmoid_cross_entropy_with_logits():
|
||||
"""
|
||||
Feature: Test Dynamic SigmoidCrossEntropyWithLogits and its backward. The input shape is dynamic.
|
||||
Description: The input shape is dynamic.
|
||||
Expectation: Assert that results are consistent with fixed shape.
|
||||
"""
|
||||
dynamic_range = range(2, 64)
|
||||
data_type = np.float32
|
||||
input_shape = [(None, 16, 32, 8), (None, 16, 32, 8)]
|
||||
net = SigmoidCrossEntropyWithLogits()
|
||||
comm_func(dynamic_range, input_shape, data_type, net)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_dynamic_sigmoid_cross_entropy_with_logits_grad():
|
||||
"""
|
||||
Feature: Test Dynamic SigmoidCrossEntropyWithLogitsGrad and its backward. The input shape is dynamic.
|
||||
Description: The input shape is dynamic.
|
||||
Expectation: Assert that results are consistent with fixed shape.
|
||||
"""
|
||||
dynamic_range = range(2, 64)
|
||||
data_type = np.float32
|
||||
input_shape = [(4, 16, None, 8), (4, 16, None, 8), (4, 16, None, 8)]
|
||||
net = GradNetWrtX(SigmoidCrossEntropyWithLogits())
|
||||
comm_func(dynamic_range, input_shape, data_type, net)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_dynamic_sigmoid_grad():
|
||||
"""
|
||||
Feature: Test Dynamic SigmoidGrad and its backward. The input shape is dynamic.
|
||||
Description: The input shape is dynamic.
|
||||
Expectation: Assert that results are consistent with fixed shape.
|
||||
"""
|
||||
dynamic_range = range(2, 64)
|
||||
data_type = np.float32
|
||||
input_shape = [(4, 16, None, 8), (4, 16, None, 8)]
|
||||
net = GradNetWrtX(Sigmoid())
|
||||
comm_func(dynamic_range, input_shape, data_type, net)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_dynamic_resize_nearest_neighbor():
|
||||
"""
|
||||
Feature: Test Dynamic ResizeNearestNeighbor and its backward. The input shape is dynamic.
|
||||
Description: The input shape is dynamic.
|
||||
Expectation: Assert that results are consistent with fixed shape.
|
||||
"""
|
||||
dynamic_range = range(2, 64)
|
||||
data_type = np.float32
|
||||
input_shape = [(4, 16, None, 8)]
|
||||
net = ResizeNearestNeighbor()
|
||||
comm_func(dynamic_range, input_shape, data_type, net)
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_dynamic_resize_nearest_neighbor_grad():
|
||||
"""
|
||||
Feature: Test Dynamic ResizeNearestNeighborGrad and its backward. The input shape is dynamic.
|
||||
Description: The input shape is dynamic.
|
||||
Expectation: Assert that results are consistent with fixed shape.
|
||||
"""
|
||||
dynamic_range = range(2, 64)
|
||||
data_type = np.float32
|
||||
input_shape = [(4, 16, None, 8), (4, 16, 2, 2)]
|
||||
net = GradNetWrtX(ResizeNearestNeighbor())
|
||||
comm_func(dynamic_range, input_shape, data_type, net)
|
||||
|
|
Loading…
Reference in New Issue