!12092 Change L2Norm, merge from r1.1 to master

From: @liangzhibo
Reviewed-by: @ginfung
Signed-off-by:
This commit is contained in:
mindspore-ci-bot 2021-02-05 14:13:12 +08:00 committed by Gitee
commit c9aaa70b39
11 changed files with 152 additions and 10 deletions

View File

@ -23,6 +23,7 @@
#include "backend/optimizer/pass/convert_tuple_input_to_dynamic_input.h"
#include "backend/optimizer/pass/const_to_attr_strided_slice_grad.h"
#include "backend/optimizer/pass/convert_const_scalar_to_tensor.h"
#include "backend/optimizer/pass/convert_attr_to_unify_mindir.h"
#include "utils/ms_context.h"
#include "debug/anf_ir_dump.h"
@ -41,6 +42,7 @@ void BackendCommonOptimization(const std::shared_ptr<session::KernelGraph> &kern
auto optimizer = std::make_shared<GraphOptimizer>();
auto common_pm = std::make_shared<PassManager>("common_pm");
common_pm->AddPass(std::make_shared<ConvertConstInputToAttr>());
common_pm->AddPass(std::make_shared<ConvertAttrToUnifyMindIR>());
common_pm->AddPass(std::make_shared<ConstToAttrStridedSliceGradPass>());
common_pm->AddPass(std::make_shared<ConvertConstInputToTensorInput>());
common_pm->AddPass(std::make_shared<ConvertTupleOutputToMaketuple>());

View File

@ -0,0 +1,65 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/optimizer/pass/convert_attr_to_unify_mindir.h"
#include <vector>
#include <string>
#include "utils/check_convert_utils.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/kernel_compiler/common_utils.h"
namespace mindspore {
namespace opt {
const AnfNodePtr ConvertAttrToUnifyMindIR::Process(const FuncGraphPtr &, const AnfNodePtr &node,
const EquivPtr &) const {
if (node == nullptr || !AnfAlgo::IsRealCNodeKernel(node)) {
return nullptr;
}
std::vector<AnfNodePtr> todos;
if (AnfAlgo::IsGraphKernel(node)) {
auto sub_graph = AnfAlgo::GetCNodeFuncGraphPtr(node);
MS_EXCEPTION_IF_NULL(sub_graph);
kernel::GetValidKernelNodes(sub_graph, &todos);
} else {
todos.push_back(node);
}
for (auto &t : todos) {
CNodePtr cnode = t->cast<CNodePtr>();
auto inputs = cnode->inputs();
AnfNodePtr op = inputs[0];
if (IsValueNode<Primitive>(op)) {
auto prim = GetValueNode<PrimitivePtr>(op);
auto attrs = prim->attrs();
std::string type_name = prim->name();
for (auto attr : attrs) {
bool converted = CheckAndConvertUtils::ConvertAttrValueToString(type_name, attr.first, &attr.second);
if (converted) {
prim->set_attr(attr.first, attr.second);
}
bool converted_ir_attr = CheckAndConvertUtils::CheckIrAttrtoOpAttr(type_name, attr.first, &attr.second);
if (converted_ir_attr) {
prim->set_attr(attr.first, attr.second);
}
}
}
}
return node;
}
} // namespace opt
} // namespace mindspore

View File

@ -0,0 +1,34 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_PASS_CONVERT_ATTR_TO_UNIFY_MINDIR_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_PASS_CONVERT_ATTR_TO_UNIFY_MINDIR_H_
#include "ir/anf.h"
#include "backend/optimizer/common/optimizer.h"
namespace mindspore {
namespace opt {
class ConvertAttrToUnifyMindIR : public PatternProcessPass {
public:
explicit ConvertAttrToUnifyMindIR(bool multigraph = true)
: PatternProcessPass("convert_attr_to_unify_mindir", multigraph) {}
~ConvertAttrToUnifyMindIR() override = default;
const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override;
};
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_PASS_CONVERT_ATTR_TO_UNIFY_MINDIR_H_

View File

@ -296,8 +296,8 @@ Strategys PrepareL2Normalize(const std::vector<std::shared_ptr<OperatorInfo>> &o
auto iter = ops[iter_ops]->attrs().find(AXIS);
if (iter != ops[iter_ops]->attrs().end()) {
MS_EXCEPTION_IF_NULL(iter->second);
if (iter->second->isa<Int64Imm>()) {
axis = iter->second->cast<Int64ImmPtr>()->value();
if (iter->second->isa<ValueSequeue>()) {
axis = GetValue<std::vector<int64_t>>(iter->second)[0];
} else {
MS_LOG(EXCEPTION) << ops[iter_ops]->name() << " : The value of axis is not int64_t.";
}

View File

@ -51,8 +51,8 @@ Status L2NormalizeInfo::GetAttrs() {
auto iter = attrs_.find(AXIS);
if (iter != attrs_.end()) {
MS_EXCEPTION_IF_NULL(iter->second);
if (iter->second->isa<Int64Imm>()) {
axis_ = iter->second->cast<Int64ImmPtr>()->value();
if (iter->second->isa<ValueSequeue>()) {
axis_ = GetValue<std::vector<int64_t>>(iter->second)[0];
} else {
MS_LOG(ERROR) << name_ << " : The value of axis is not int64_t.";
return FAILED;

View File

@ -33,6 +33,7 @@
#include "utils/any.h"
#include "utils/utils.h"
#include "utils/ms_context.h"
#include "utils/check_convert_utils.h"
#include "utils/context/context_extends.h"
#include "utils/config_manager.h"
#include "utils/convert_utils_py.h"
@ -458,6 +459,27 @@ void ConstructInputTensor(const OpExecInfoPtr &op_run_info, std::vector<int64_t>
op_prim->EndRecordAddAttr();
}
void ConvertAttrToUnifyMindIR(const OpExecInfoPtr &op_run_info) {
MS_EXCEPTION_IF_NULL(op_run_info);
PrimitivePtr op_prim = op_run_info->py_primitive;
MS_EXCEPTION_IF_NULL(op_prim);
std::string op_name = op_run_info->op_name;
auto attrs = op_prim->attrs();
for (auto attr : attrs) {
bool converted = CheckAndConvertUtils::ConvertAttrValueToString(op_name, attr.first, &attr.second);
if (converted) {
op_prim->set_attr(attr.first, attr.second);
}
bool converted_ir_attr = CheckAndConvertUtils::CheckIrAttrtoOpAttr(op_name, attr.first, &attr.second);
if (converted_ir_attr) {
op_prim->set_attr(attr.first, attr.second);
}
}
}
BaseRef TransformBaseRefListToTuple(const BaseRef &base_ref) {
if (utils::isa<VectorRef>(base_ref)) {
auto ref_list = utils::cast<VectorRef>(base_ref);
@ -1425,6 +1447,7 @@ py::object PynativeExecutor::RunOpInMs(const OpExecInfoPtr &op_exec_info, Pynati
std::vector<tensor::TensorPtr> input_tensors;
std::vector<int64_t> tensors_mask;
ConstructInputTensor(op_exec_info, &tensors_mask, &input_tensors);
ConvertAttrToUnifyMindIR(op_exec_info);
// get graph info for checking it whether existing in the cache
std::string graph_info = GetSingleOpGraphInfo(op_exec_info, input_tensors);
#if defined(__APPLE__)

View File

@ -14,6 +14,7 @@
* limitations under the License.
*/
#include "transform/graph_ir/op_adapter.h"
#include "utils/check_convert_utils.h"
namespace mindspore {
namespace transform {
@ -567,6 +568,9 @@ int OpAdapterImpl::SetNormalOpAttr(const OperatorPtr &op, const PrimitivePtr &pr
for (auto &it : attr_map_) {
auto value = prim->GetAttr(it.first);
if (value != nullptr) {
// convert parts of attr to str eg. data_format or change ir attr to op attr eg. axis[0]
CheckAndConvertUtils::ConvertAttrValueToString(prim->name(), it.first, &value);
CheckAndConvertUtils::CheckIrAttrtoOpAttr(prim->name(), it.first, &value);
// set attr from primitive
int ret = setAttr(op, it.first, value);
if (ret) {

View File

@ -226,6 +226,7 @@ bool CheckAndConvertUtils::ConvertAttrValueToString(const std::string &op_type,
return true;
}
namespace {
typedef std::map<std::string, std::function<ValuePtr(ValuePtr)>> AttrFunction;
@ -241,6 +242,7 @@ std::map<std::string, AttrFunction> kIrAttrToOpAttr = {{"L2Normalize", {{"axis",
{"L2NormalizeGrad", {{"axis", L2NormalizeAttrConversion}}}};
} // namespace
bool CheckAndConvertUtils::IsEqualVector(const std::vector<int64_t> &vec_1, const std::vector<int64_t> &vec_2) {
if (vec_1.size() != vec_2.size()) {
return false;

View File

@ -1083,7 +1083,7 @@ class L2NormalizeGrad(PrimitiveWithInfer):
Gradients of L2 normalize.
Args:
axis (int): The begin axis for the input to apply L2 normalize. Default: 0.
axis (Union[list(int), tuple(int), int]): The begin axis for the input to apply L2 normalize. Default: 0.
epsilon (float): A small value added for numerical stability. Default: 1e-4.
Inputs:
@ -1097,8 +1097,13 @@ class L2NormalizeGrad(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, axis=0, epsilon=1e-4):
validator.check_value_type('axis', axis, [int], self.name)
axis = [axis] if isinstance(axis, int) else axis
validator.check_value_type('axis', axis, [list, tuple], self.name)
validator.check_value_type('epsilon', epsilon, [int, float], self.name)
self.add_prim_attr('axis', axis)
self.init_attrs['axis'] = axis
if len(axis) != 1:
raise TypeError("The length of axis must be 1, later will support multiple axis!")
def infer_shape(self, input_x, out, dout):
validator.check('input_x shape', input_x, 'out shape', out, Rel.EQ, self.name)

View File

@ -2990,7 +2990,8 @@ class L2Normalize(PrimitiveWithInfer):
where :math:`\epsilon` is epsilon.
Args:
axis (int): The starting axis for the input to apply the L2 normalization. Default: 0.
axis (Union[list(int), tuple(int), int]): The starting axis for the input to apply the L2 normalization.
Default: 0.
epsilon (float): A small value added for numerical stability. Default: 1e-4.
Inputs:
@ -3012,12 +3013,18 @@ class L2Normalize(PrimitiveWithInfer):
@prim_attr_register
def __init__(self, axis=0, epsilon=1e-4):
validator.check_value_type('axis', axis, [int], self.name)
axis = [axis] if isinstance(axis, int) else axis
validator.check_value_type('axis', axis, [list, tuple], self.name)
validator.check_value_type('epsilon', epsilon, [int, float], self.name)
self.add_prim_attr('axis', axis)
self.init_attrs['axis'] = axis
if len(axis) != 1:
raise TypeError("The length of axis must be 1, later will support multiple axis!")
self.axis = axis
def infer_shape(self, input_x):
dim = len(input_x)
validator.check_int_range(self.axis, -dim, dim, Rel.INC_LEFT, 'axis value', self.name)
validator.check_int_range(self.axis[0], -dim, dim, Rel.INC_LEFT, 'axis value', self.name)
return input_x
def infer_dtype(self, input_x):

View File

@ -54,7 +54,7 @@ void TestL2NormalizeInfo::SetUp() {
g_device_manager = std::make_shared<DeviceManager>();
g_device_manager->Init(dev_list, local_dev, stage_map, "hccl");
ValuePtr axis = MakeValue(static_cast<int64_t>(1));
ValuePtr axis = MakeValue(std::vector<int64_t>{1});
std::unordered_map<std::string, ValuePtr> attr = {{AXIS, axis}};
Shapes inputs_shape = {{32, 64, 96}};