tbe json creator refactor

This commit is contained in:
hwjiaorui 2021-07-20 18:58:03 +08:00
parent c9af7643d7
commit bb9b27cf86
19 changed files with 2015 additions and 62 deletions

View File

@ -168,6 +168,11 @@ void KernelBuildInfo::KernelBuildInfoBuilder::SetFusionType(FusionType fusion_ty
kernel_build_info_->fusion_type_ = fusion_type;
}
void KernelBuildInfo::KernelBuildInfoBuilder::SetOutputDataDesc(const std::vector<nlohmann::json> &data_desc) {
MS_EXCEPTION_IF_NULL(kernel_build_info_);
kernel_build_info_->output_data_desc_ = data_desc;
}
void KernelBuildInfo::KernelBuildInfoBuilder::SetProcessor(Processor processor) {
MS_EXCEPTION_IF_NULL(kernel_build_info_);
kernel_build_info_->processor_ = processor;

View File

@ -43,6 +43,7 @@ class KernelBuildInfo {
outputs_format_ = {};
inputs_device_type_ = {};
outputs_device_type_ = {};
output_data_desc_ = {};
}
~KernelBuildInfo() = default;
@ -81,6 +82,8 @@ class KernelBuildInfo {
OpPattern op_pattern() const { return op_pattern_; }
std::vector<nlohmann::json> output_data_desc() const { return output_data_desc_; }
FusionType fusion_type() const { return fusion_type_; }
Processor processor() const { return processor_; }
@ -110,6 +113,7 @@ class KernelBuildInfo {
std::vector<std::string> output_reshape_type_;
std::vector<TypeId> inputs_device_type_;
std::vector<TypeId> outputs_device_type_;
std::vector<nlohmann::json> output_data_desc_;
FusionType fusion_type_;
Processor processor_;
};
@ -156,6 +160,8 @@ class KernelBuildInfo::KernelBuildInfoBuilder {
void SetOutputsReshapeType(const std::vector<std::string> &output_reshape_type);
void SetFusionType(FusionType fusion_type);
// save prebuild result
void SetOutputDataDesc(const std::vector<nlohmann::json> &data_desc);
void SetProcessor(Processor processor);

View File

@ -21,32 +21,58 @@
#include <string>
#include <memory>
#include <vector>
#include <set>
#include <algorithm>
#include <unordered_map>
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/kernel_compiler/oplib/opinfo.h"
#include "frontend/parallel/ops_info/ops_utils.h"
#include "backend/kernel_compiler/tbe/tbe_dynaminc_shape_util.h"
#include "backend/kernel_compiler/tbe/tbe_json/tbe_json_utils.h"
#include "utils/json_operation_utils.h"
namespace mindspore {
namespace kernel {
namespace tbe {
constexpr size_t INPUT0 = 0;
constexpr size_t INPUT1 = 1;
constexpr size_t INPUT2 = 2;
constexpr size_t INPUT3 = 3;
constexpr size_t INPUT4 = 4;
constexpr size_t INPUT5 = 5;
constexpr size_t INPUT6 = 6;
constexpr size_t INPUT7 = 7;
constexpr size_t INPUT8 = 8;
namespace {
std::unordered_set<std::string> input_order_adjusted_ops = {
"Conv2DBackpropInput", "Conv2DBackpropFilter", "LogSoftmaxGrad", "LayerNormGrad", "LayerNormXBackprop",
"LayerNormBetaGammaBackprop", "MinimumGrad", "MaximumGrad", "ApplyCenteredRMSProp"};
constexpr int kInvalid = -1;
constexpr int kFloat = 0;
constexpr int kFloat16 = 1;
constexpr int kInt8 = 2;
constexpr int kInt32 = 3;
constexpr int kUint8 = 4;
constexpr int kUint64 = 10;
constexpr int kBool = 12;
int TypeStrToDstType(const std::string &type_str) {
std::unordered_map<std::string, int> type_name_type_id_map = {
{"Float", kFloat}, {"Float32", kFloat}, {"Float16", kFloat16}, {"Int8", kInt8},
{"Int32", kInt32}, {"UInt8", kUint8}, {"UInt64", kUint64}, {"Bool", kBool}};
auto iter = type_name_type_id_map.find(type_str);
if (iter != type_name_type_id_map.end()) {
return iter->second;
} else {
MS_LOG(INFO) << "Error type str is invailed: " << type_str;
}
return kInvalid;
}
} // namespace
std::unordered_set<std::string> TbeAdapter::input_order_adjusted_ops_ = {
kConv2DBackpropInputOpName, kConv2DBackpropFilterOpName, kLogSoftmaxGradOpName,
kLayerNormGradOpName, kLayerNormXBackpropOpName, kLayerNormBetaGammaBackpropOpName,
kMinimumGradOpName, kMaximumGradOpName, kApplyCenteredRMSPropOpName};
std::map<std::string, FAttrsPass> TbeAdapter::build_json_attr_pass_map_ = {
// TODO(xxx): tbeadapter max and min
// {"MaximumGrad", TbeAdapter::MaximumGradAttrJsonPass},
// {"MinimumGrad", TbeAdapter::MinimumGradAttrJsonPass},
{"Cast", TbeAdapter::CastAttrJsonPass}};
void TbeAdapter::InputOrderPass(const std::string &op_name, std::vector<std::vector<nlohmann::json>> const &inputs_list,
nlohmann::json *inputs_json) {
MS_EXCEPTION_IF_NULL(inputs_json);
if (input_order_adjusted_ops.find(op_name) == input_order_adjusted_ops.end()) {
if (input_order_adjusted_ops_.find(op_name) == input_order_adjusted_ops_.end()) {
(void)std::copy(inputs_list.begin(), inputs_list.end(), std::back_inserter((*inputs_json)));
} else {
if (op_name == "MinimumGrad" || op_name == "MaximumGrad") {
@ -81,7 +107,7 @@ void TbeAdapter::InputOrderPass(const std::string &op_name, std::vector<std::vec
void TbeAdapter::FusionInputOrderPass(const std::string &op_name, const std::vector<nlohmann::json> &inputs_list,
std::vector<nlohmann::json> *inputs_json) {
MS_EXCEPTION_IF_NULL(inputs_json);
if (input_order_adjusted_ops.find(op_name) == input_order_adjusted_ops.end()) {
if (input_order_adjusted_ops_.find(op_name) == input_order_adjusted_ops_.end()) {
(void)std::copy(inputs_list.begin(), inputs_list.end(), std::back_inserter((*inputs_json)));
} else {
if (op_name == "MinimumGrad" || op_name == "MaximumGrad") {
@ -104,7 +130,7 @@ void TbeAdapter::FusionInputOrderPass(const std::string &op_name, const std::vec
void TbeAdapter::FusionDataOrderPass(const std::string &op_name, const std::vector<AnfNodePtr> &data_layer,
std::vector<AnfNodePtr> *reorder_data_layer) {
MS_EXCEPTION_IF_NULL(reorder_data_layer);
if (input_order_adjusted_ops.find(op_name) == input_order_adjusted_ops.end()) {
if (input_order_adjusted_ops_.find(op_name) == input_order_adjusted_ops_.end()) {
(void)std::copy(data_layer.begin(), data_layer.end(), std::back_inserter((*reorder_data_layer)));
} else {
if (op_name == "MinimumGrad" || op_name == "MaximumGrad") {
@ -124,13 +150,7 @@ void TbeAdapter::FusionDataOrderPass(const std::string &op_name, const std::vect
}
}
std::map<std::string, FAttrsPass> TbeAdapter::build_json_attr_pass_map_ = {
{"MaximumGrad", TbeAdapter::MaxiOrMinimumGradAttrJsonPass},
{"MinimumGrad", TbeAdapter::MaxiOrMinimumGradAttrJsonPass},
{"Cast", TbeAdapter::CastAttrJsonPass}};
bool TbeAdapter::RunAttrPass(const mindspore::AnfNodePtr &anf_node,
const std::vector<std::shared_ptr<mindspore::kernel::OpAttr>> &op_info_attrs,
bool TbeAdapter::RunAttrPass(const mindspore::AnfNodePtr &anf_node, const std::vector<OpAttrPtr> &op_info_attrs,
nlohmann::json *attrs_json) {
MS_EXCEPTION_IF_NULL(attrs_json);
auto cnode_name = AnfAlgo::GetCNodeName(anf_node);
@ -168,35 +188,6 @@ void TbeAdapter::MaxiOrMinimumGradAttrJsonPass(const AnfNodePtr &anf_node,
MS_LOG(INFO) << "MaxiOrMinimumGradAttrJsonPass done.";
}
static int TypeStrToDstType(const std::string &type_str) {
constexpr int kInvalid = -1;
constexpr int kFloat = 0;
constexpr int kFloat16 = 1;
constexpr int kInt8 = 2;
constexpr int kInt32 = 3;
constexpr int kUint8 = 4;
constexpr int kUint64 = 10;
constexpr int kBool = 12;
if (type_str == "Float" || type_str == "Float32") {
return kFloat;
} else if (type_str == "Float16") {
return kFloat16;
} else if (type_str == "Int8") {
return kInt8;
} else if (type_str == "Int32") {
return kInt32;
} else if (type_str == "UInt8") {
return kUint8;
} else if (type_str == "UInt64") {
return kUint64;
} else if (type_str == "Bool") {
return kBool;
} else {
MS_LOG(INFO) << "Error type str is invailed: " << type_str;
}
return kInvalid;
}
void TbeAdapter::CastAttrJsonPass(const mindspore::AnfNodePtr &anf_node,
const std::vector<std::shared_ptr<mindspore::kernel::OpAttr>> &op_info_attrs,
nlohmann::json *attrs_json) {
@ -249,6 +240,192 @@ void TbeAdapter::GenTopKV2IndicesTensorInfo(const std::shared_ptr<mindspore::Anf
input_desc_json["valid"] = true;
input_list->emplace_back(input_desc_json);
}
bool TbeAdapter::IsSpecialFusionComputeNode(const std::vector<mindspore::AnfNodePtr> &compute_nodes) {
auto result = std::find_if(compute_nodes.begin(), compute_nodes.end(), [](const auto &it) {
auto op_name = AnfAlgo::GetCNodeName(it);
return (op_name == kConv2DBackpropInputOpName || op_name == kConv2DOpName);
});
return result != compute_nodes.end();
}
bool TbeAdapter::GetSpecInputLayers(const std::string &op_name, const std::vector<mindspore::AnfNodePtr> &reorder_layer,
std::map<const AnfNodePtr, FusionDataType> *spec_data_input) {
if ((op_name == kReluGradV2OpName || op_name == kAddNOpName || op_name == kTensorAddOpName) &&
reorder_layer.empty()) {
MS_LOG(WARNING) << "Fusion error: node(" << op_name << " )'s input is null. ";
return false;
}
if (op_name == kReluGradV2OpName) {
(*spec_data_input)[reorder_layer[0]] = kFusionReLUGradV2;
} else if (op_name == kAddNOpName) {
for (const auto &it : reorder_layer) {
(*spec_data_input)[it] = kFusionAddN;
}
} else if (op_name == kTensorAddOpName) {
(*spec_data_input)[reorder_layer[0]] = kFusionAdd;
}
return true;
}
void TbeAdapter::FusionDescJsonPass(const AnfNodePtr &node, nlohmann::json *output_desc,
const std::map<const AnfNodePtr, tbe::FusionDataType> &spec_data_input) {
MS_EXCEPTION_IF_NULL(node);
MS_EXCEPTION_IF_NULL(output_desc);
tbe::FusionDataType fusion_data_type =
spec_data_input.find(node) != spec_data_input.end() ? spec_data_input.at(node) : tbe::kFusionNormal;
std::vector<size_t> shape = (*output_desc)["shape"];
if ((fusion_data_type == kFusionAddN || fusion_data_type == kFusionAdd) && shape.size() == 5) {
std::vector<size_t> spec_shape = {};
spec_shape.emplace_back(shape[0]);
spec_shape.emplace_back(shape[1]);
spec_shape.emplace_back(shape[2] * shape[3]);
spec_shape.emplace_back(shape[4]);
(*output_desc)["shape"] = spec_shape;
} else if (fusion_data_type == kFusionReLUGradV2) {
std::vector<size_t> spec_shape = {};
spec_shape.emplace_back(shape[0]);
spec_shape.emplace_back(shape[1]);
spec_shape.emplace_back(shape[2] * shape[3]);
spec_shape.emplace_back(16);
(*output_desc)["shape"] = spec_shape;
(*output_desc)["data_type"] = "bool";
}
}
std::string TbeAdapter::GetRealOpType(const std::string &origin_type) {
static std::map<std::string, std::string> buffer_fussion_op_map = {
{parallel::DEPTHWISE_CONV2D_NATIVE, parallel::DEPTHWISE_CONV2D}};
auto iter = buffer_fussion_op_map.find(origin_type);
return (iter != buffer_fussion_op_map.end()) ? iter->second : origin_type;
}
std::string TbeAdapter::GetNodeFusionType(const mindspore::CNodePtr &cnode) {
MS_EXCEPTION_IF_NULL(cnode);
auto node_type = AnfAlgo::GetCNodeName(cnode);
static std::map<std::string, std::string> fusion_type_map = {{kConv2DOpName, "Convolution"},
{kBNTrainingReduceOpName, "bn_reduce"},
{kBNTrainingUpdateOpName, "bn_update"},
{kReluV2OpName, "ElemWise"},
{kTensorAddOpName, "ElemWise"},
{kConv2DBackpropInputOpName, "Conv2d_backprop_input"},
{kConv2DBackpropFilterOpName, "Conv2d_backprop_filter"},
{kDepthwiseConv2dNativeOpName, "DepthwiseConvolution"},
{kAddNOpName, "ElemWise"},
{kReluGradV2OpName, "ElemWise"},
{kRealDivOpName, "ElemWise"},
{kBiasAddOpName, "BiasAdd"}};
auto find = fusion_type_map.find(node_type);
if (find == fusion_type_map.end()) {
MS_LOG(INFO) << "Fusion warning: get node fusion type failed from lists, origin node type: " << node_type;
auto op_info = mindspore::kernel::tbe::TbeDynamicShapeUtil::FindOp(node_type, cnode);
MS_EXCEPTION_IF_NULL(op_info);
return op_info->fusion_type();
} else {
return find->second;
}
}
std::string TbeAdapter::FormatPass(const std::string &format, const size_t &origin_shape_size) {
if (format == kOpFormat_DEFAULT) {
return origin_shape_size == kNCHWShapeSize ? kOpFormat_NCHW : kOpFormat_ND;
} else if (format == kOpFormat_FRAC_Z) {
return kOpFormat_FRACTAL_Z;
} else {
return format;
}
}
bool TbeAdapter::GetSpecDataInput(const FusionScopeInfo &fusion_scope_info,
std::map<const AnfNodePtr, tbe::FusionDataType> *spec_data_input) {
MS_EXCEPTION_IF_NULL(spec_data_input);
auto input_nodes = fusion_scope_info.input_nodes;
auto compute_nodes = fusion_scope_info.compute_nodes;
for (const auto &compute_node : compute_nodes) {
MS_EXCEPTION_IF_NULL(compute_node);
std::vector<mindspore::AnfNodePtr> layer = {};
std::vector<mindspore::AnfNodePtr> reorder_layer = {};
auto op_name = AnfAlgo::GetCNodeName(compute_node);
auto ccompute_node = compute_node->cast<CNodePtr>();
if (ccompute_node == nullptr) {
MS_LOG(WARNING) << "Fusion error: fusion compute node must be cnode, but the node is "
<< ccompute_node->DebugString();
return false;
}
for (size_t i = 1; i < ccompute_node->inputs().size(); ++i) {
auto input = ccompute_node->input(i);
auto find_iter = std::find(input_nodes.begin(), input_nodes.end(), input);
if (find_iter != input_nodes.end()) {
layer.emplace_back((*find_iter));
}
}
InputOrderPass<AnfNodePtr>(op_name, layer, &reorder_layer);
if (IsSpecialFusionComputeNode(compute_nodes)) {
if (!GetSpecInputLayers(op_name, reorder_layer, spec_data_input)) {
return false;
}
}
}
return true;
}
bool TbeAdapter::IsPlaceHolderInput(const AnfNodePtr &node, const OpIOInfoPtr &input_ptr) {
MS_EXCEPTION_IF_NULL(node);
MS_EXCEPTION_IF_NULL(input_ptr);
static std::set<std::string> node_set = {kDynamicRNNOpName, kDynamicGRUV2OpName};
auto cnode_name = AnfAlgo::GetCNodeName(node);
if (node_set.find(cnode_name) == node_set.end()) {
return false;
}
auto cnode = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
if (AnfAlgo::HasNodeAttr("placeholder_index", cnode)) {
auto none_index = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(node, "placeholder_index");
return find(none_index.begin(), none_index.end(), input_ptr->index()) != none_index.end();
} else {
MS_LOG(EXCEPTION) << "Cnode: " << cnode_name << "doesn't has attribute placeholder_index.";
}
}
void TbeAdapter::CastAttrJsonPrePass(const AnfNodePtr &anf_node, std::vector<OpAttrPtr> *op_info_attrs,
nlohmann::json *attrs_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(attrs_json);
if (AnfAlgo::GetCNodeName(anf_node) != kCastOpName) {
return;
}
if (op_info_attrs->size() != 1) {
MS_LOG(INFO) << "cast node should has dst_type attr";
return;
}
auto attr_name = (*op_info_attrs)[0]->name();
auto type_ptr = std::make_shared<TensorType>(TypeIdToType(AnfAlgo::GetOutputDeviceDataType(anf_node, 0)));
MS_EXCEPTION_IF_NULL(type_ptr);
auto type_element = type_ptr->element();
MS_EXCEPTION_IF_NULL(type_element);
auto dtype = type_element->ToString();
auto dst_type_value = TypeStrToDstType(dtype);
nlohmann::json attr_obj;
attr_obj["value"] = dst_type_value;
attr_obj["valid"] = true;
attr_obj["name"] = attr_name;
attrs_json->push_back(attr_obj);
op_info_attrs->clear();
}
void TbeAdapter::CastJsonPostPass(const AnfNodePtr &anf_node, nlohmann::json *attrs_json) {
if (AnfAlgo::GetCNodeName(anf_node) != kCastOpName) {
return;
}
std::map<int, std::string> dst_type_map{{0, "float32"}, {1, "float16"}, {2, "int8"}, {3, "int32"},
{4, "uint8"}, {10, "uint64"}, {12, "bool"}};
auto type_id = GetJsonValue<int>(attrs_json->at(0), kJValue);
auto iter = dst_type_map.find(type_id);
if (iter != dst_type_map.end()) {
attrs_json->at(0)[kJValue] = iter->second;
} else {
MS_LOG(EXCEPTION) << "Invalid type:" << type_id;
}
}
} // namespace tbe
} // namespace kernel
} // namespace mindspore

View File

@ -20,42 +20,128 @@
#include <map>
#include <memory>
#include <vector>
#include <algorithm>
#include <unordered_set>
#include "nlohmann/json.hpp"
#include "base/base.h"
#include "backend/kernel_compiler/oplib/opinfo.h"
#include "backend/kernel_compiler/kernel_fusion.h"
// Note: This file is mainly used to adapt the ME front-end operator description and
// the TBE back-end operator implementation difference
namespace mindspore {
namespace kernel {
constexpr size_t INPUT0 = 0;
constexpr size_t INPUT1 = 1;
constexpr size_t INPUT2 = 2;
constexpr size_t INPUT3 = 3;
constexpr size_t INPUT4 = 4;
constexpr size_t INPUT5 = 5;
constexpr size_t INPUT6 = 6;
constexpr size_t INPUT7 = 7;
constexpr size_t INPUT8 = 8;
enum kCreaterType : int { SINGLE_BUILD = 0, OP_SELECT_FORMAT, CHECK_SUPPORTED, OP_PRE_COMPILE };
namespace tbe {
const std::map<std::string, std::string> opTypeAdapter = {{"ReLUV2", "ReluV2"},
{"ReLU6", "Relu6"},
{"ReLU6Grad", "Relu6Grad"},
{"ReLUGrad", "ReluGrad"},
{"ReLU", "Relu"},
{"Gather", "GatherV2"},
{"SparseApplyFtrl", "SparseApplyFtrlD"},
{"Concat", "ConcatD"},
{"DepthwiseConv2dNative", "DepthwiseConv2D"},
{"FastGeLU", "FastGelu"},
{"FastGeLUGrad", "FastGeluGrad"},
{"GeLU", "Gelu"},
{"GeLUGrad", "GeluGrad"},
{"PReLU", "PRelu"},
{"PReLUGrad", "PReluGrad"},
{"SeLU", "Selu"}};
enum FusionDataType { kFusionNormal = 0, kFusionAddN, kFusionReLUGradV2, kFusionAdd };
using FAttrsPass = void (*)(const AnfNodePtr &anf_node, const std::vector<std::shared_ptr<OpAttr>> &op_info_attrs,
nlohmann::json *attrs_json);
using FPreAttrsPass = void (*)(const AnfNodePtr &anf_node, std::vector<OpAttrPtr> *op_info_attrs,
nlohmann::json *attrs_json);
class TbeAdapter {
public:
TbeAdapter() = default;
~TbeAdapter() = default;
template <typename T>
static void InputOrderPass(const std::string &op_name, std::vector<T> const &inputs_list,
std::vector<T> *inputs_json) {
MS_EXCEPTION_IF_NULL(inputs_json);
if (input_order_adjusted_ops_.find(op_name) == input_order_adjusted_ops_.end()) {
(void)std::copy(inputs_list.begin(), inputs_list.end(), std::back_inserter((*inputs_json)));
} else {
if (op_name == kMinimumGradOpName || op_name == kMaximumGradOpName) {
inputs_json->push_back(inputs_list[INPUT2]);
inputs_json->push_back(inputs_list[INPUT0]);
inputs_json->push_back(inputs_list[INPUT1]);
for (size_t i = 3; i < inputs_list.size(); ++i) {
inputs_json->push_back(inputs_list[i]);
}
} else if (op_name == kApplyCenteredRMSPropOpName) {
// Parameter order of ApplyCenteredRMSProp's TBE implementation is different from python API, so map
// TBE parameter to correspond python API parameter by latter's index using hardcode
inputs_json->push_back(inputs_list[INPUT0]);
inputs_json->push_back(inputs_list[INPUT1]);
inputs_json->push_back(inputs_list[INPUT2]);
inputs_json->push_back(inputs_list[INPUT3]);
inputs_json->push_back(inputs_list[INPUT5]);
inputs_json->push_back(inputs_list[INPUT6]);
inputs_json->push_back(inputs_list[INPUT7]);
inputs_json->push_back(inputs_list[INPUT8]);
inputs_json->push_back(inputs_list[INPUT4]);
} else {
inputs_json->push_back(inputs_list[INPUT1]);
inputs_json->push_back(inputs_list[INPUT0]);
for (size_t i = 2; i < inputs_list.size(); ++i) {
inputs_json->push_back(inputs_list[i]);
}
}
}
}
static void InputOrderPass(const std::string &op_name, std::vector<std::vector<nlohmann::json>> const &inputs_list,
nlohmann::json *inputs_json);
static bool RunAttrPass(const AnfNodePtr &anf_node, const std::vector<std::shared_ptr<OpAttr>> &op_info_attrs,
nlohmann::json *attrs_json);
static void GenTopKV2IndicesTensorInfo(const std::shared_ptr<AnfNode> &anf_node, size_t real_input_index,
std::vector<nlohmann::json> *input_list, kCreaterType creater_type);
// TODO(xxx): delete
// FusionInputOrderPass/InputOrderPass/FusionDataOrderPass/GenTopKV2IndicesTensorInfo/GetNodeFusionType
static void FusionInputOrderPass(const std::string &op_name, const std::vector<nlohmann::json> &inputs_list,
std::vector<nlohmann::json> *inputs_json);
static void InputOrderPass(const std::string &op_name, std::vector<std::vector<nlohmann::json>> const &inputs_list,
nlohmann::json *inputs_json);
static void FusionDataOrderPass(const std::string &op_name, const std::vector<AnfNodePtr> &data_layer,
std::vector<AnfNodePtr> *reorder_data_layer);
static void GenTopKV2IndicesTensorInfo(const std::shared_ptr<AnfNode> &anf_node, size_t real_input_index,
std::vector<nlohmann::json> *input_list, kCreaterType creater_type);
static std::string GetNodeFusionType(const mindspore::CNodePtr &cnode);
static bool RunAttrPass(const AnfNodePtr &anf_node, const std::vector<std::shared_ptr<OpAttr>> &op_info_attrs,
nlohmann::json *attrs_json);
static void FusionDescJsonPass(const AnfNodePtr &node, nlohmann::json *output_desc,
const std::map<const AnfNodePtr, tbe::FusionDataType> &spec_data_input);
static std::string GetRealOpType(const std::string &origin_type);
static std::string FormatPass(const std::string &format, const size_t &origin_shape_size);
static bool GetSpecDataInput(const FusionScopeInfo &fusion_scope_info,
std::map<const AnfNodePtr, tbe::FusionDataType> *spec_data_input);
static bool IsPlaceHolderInput(const AnfNodePtr &node, const OpIOInfoPtr &input_ptr);
static void CastAttrJsonPrePass(const AnfNodePtr &anf_node, std::vector<OpAttrPtr> *op_info_attrs,
nlohmann::json *attrs_json);
static void CastJsonPostPass(const AnfNodePtr &anf_node, nlohmann::json *attrs_json);
private:
// TODO(xxx): delete MaxiOrMinimumGradAttrJsonPass
static void MaxiOrMinimumGradAttrJsonPass(const AnfNodePtr &anf_node,
const std::vector<std::shared_ptr<OpAttr>> &op_info_attrs,
nlohmann::json *attrs_json);
static void CastAttrJsonPass(const AnfNodePtr &anf_node, const std::vector<std::shared_ptr<OpAttr>> &op_info_attrs,
nlohmann::json *attrs_json);
static bool IsSpecialFusionComputeNode(const std::vector<mindspore::AnfNodePtr> &compute_nodes);
static bool GetSpecInputLayers(const std::string &op_name, const std::vector<mindspore::AnfNodePtr> &reorder_layer,
std::map<const AnfNodePtr, FusionDataType> *spec_data_input);
static std::map<std::string, FAttrsPass> build_json_attr_pass_map_;
static std::unordered_set<std::string> input_order_adjusted_ops_;
};
} // namespace tbe
} // namespace kernel

View File

@ -0,0 +1,306 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/tbe/tbe_json/fusion_tbe_json_creator.h"
#include <memory>
#include <list>
#include <algorithm>
#include <string>
#include <vector>
#include "base/core_ops.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/kernel_compiler/tbe/tbe_adapter.h"
#include "backend/kernel_compiler/tbe/tbe_convert_utils.h"
#include "backend/kernel_compiler/tbe/tbe_dynaminc_shape_util.h"
#include "backend/kernel_compiler/tbe/tbe_utils.h"
#include "runtime/dev.h"
#include "utils/json_operation_utils.h"
#include "backend/kernel_compiler/tbe/tbe_json/tbe_json_utils.h"
namespace mindspore::kernel {
using mindspore::kernel::tbe::TbeAdapter;
bool FusionBuildTbeJsonCreator::GenJson(const FusionScopeInfo &fusion_scope_info, nlohmann::json *fusion_json) {
MS_EXCEPTION_IF_NULL(fusion_json);
MS_LOG(DEBUG) << "Start Generate Fusion Json, Fusion Node: " << fusion_scope_info.full_name;
nlohmann::json soc_info_json;
kernel::tbe::TbeUtils::GenSocInfo(&soc_info_json);
(*fusion_json)[kJSocInfo] = soc_info_json;
std::vector<nlohmann::json> op_list_json;
if (!GenOpListJson(fusion_scope_info, &op_list_json)) {
MS_LOG(ERROR) << "Generate fusion json failed.";
return false;
}
(*fusion_json)[kJOpList] = op_list_json;
GenFusionOpName(fusion_json, kJFusionKernelNamePrefix);
AddOpNameForComputeNode(fusion_json);
(*fusion_json)[kJL1Size] = -1;
(*fusion_json)[kJGraphName] = "";
(*fusion_json)[kJScopeID] = fusion_scope_info.scope_id;
(*fusion_json)[kJFullName] = fusion_scope_info.full_name;
MS_LOG(DEBUG) << "Json name is : " << GetJsonName() << ", fusion json:" << fusion_json->dump();
return true;
}
bool FusionBuildTbeJsonCreator::GenOpListJson(const FusionScopeInfo &fusion_scope_info,
std::vector<nlohmann::json> *fusion_json) {
MS_EXCEPTION_IF_NULL(fusion_json);
MS_LOG(DEBUG) << "Start";
if (!CheckInput(fusion_scope_info)) {
return false;
}
optional_index_ = 0;
auto compute_nodes = fusion_scope_info.compute_nodes;
std::vector<nlohmann::json> compute_list;
for (const auto &compute_node : compute_nodes) {
nlohmann::json compute_json;
if (!GenComputeJson(compute_node, &compute_json)) {
MS_LOG(ERROR) << "Fusion Error: gen fusion compute json failed. node full name: "
<< compute_node->fullname_with_scope();
return false;
}
compute_json[kJOriName] = {fusion_scope_info.full_name};
compute_list.push_back(compute_json);
}
// FusionDataType fusion_data_type: speceial process json desc output shape [kFusionAddN, kFusionReLUGradV2]
ANodeFusionDataTypeMap spec_data_input;
if (!TbeAdapter::GetSpecDataInput(fusion_scope_info, &spec_data_input)) {
return false;
}
GenDataJson(compute_nodes, compute_list, fusion_json, spec_data_input);
(*fusion_json).insert((*fusion_json).end(), compute_list.begin(), compute_list.end());
MS_LOG(DEBUG) << "End";
return true;
}
bool FusionBuildTbeJsonCreator::CheckInput(const FusionScopeInfo &fusion_scope_info) {
MS_LOG(DEBUG) << "Start";
auto input_nodes = fusion_scope_info.input_nodes;
auto compute_nodes = fusion_scope_info.compute_nodes;
size_t input_size = 0;
for (const auto &compute_node : compute_nodes) {
MS_EXCEPTION_IF_NULL(compute_node);
auto ccompute_node = compute_node->cast<CNodePtr>();
if (ccompute_node == nullptr) {
MS_LOG(ERROR) << "Fusion error: fusion compute node must be cnode, but the node is "
<< ccompute_node->DebugString();
return false;
}
for (size_t i = 1; i < ccompute_node->inputs().size(); ++i) {
auto input = ccompute_node->input(i);
auto find_iter = std::find(input_nodes.begin(), input_nodes.end(), input);
if (find_iter != input_nodes.end()) {
input_size++;
}
}
}
if (input_nodes.size() != input_size) {
MS_LOG(ERROR) << "Fusion error: fusion scope error, compute node input size:" << input_size
<< ", input nodes num:" << input_nodes.size();
return false;
}
MS_LOG(DEBUG) << "End";
return true;
}
bool FusionBuildTbeJsonCreator::GenDataJson(const std::vector<AnfNodePtr> &compute_nodes,
const std::vector<nlohmann::json> &compute_json,
std::vector<nlohmann::json> *op_list_json,
const ANodeFusionDataTypeMap &spec_data_input) {
MS_EXCEPTION_IF_NULL(op_list_json);
MS_LOG(DEBUG) << "Start.";
std::vector<std::string> compute_nodes_fullname;
std::transform(compute_nodes.begin(), compute_nodes.end(), back_inserter(compute_nodes_fullname),
[](const AnfNodePtr &node) { return node->fullname_with_scope(); });
for (size_t i = 0; i < compute_nodes.size(); i++) {
auto inputs_desc = GetJsonValue<std::vector<nlohmann::json>>(compute_json.at(i), kJInputDesc);
for (const auto &input_desc : inputs_desc) {
if (std::find(compute_nodes_fullname.begin(), compute_nodes_fullname.end(),
GetJsonValue<std::string>(input_desc, kJName)) != compute_nodes_fullname.end()) {
continue;
}
nlohmann::json data_json;
nlohmann::json output_desc = input_desc;
std::vector<nlohmann::json> output_desc_list;
if (input_desc.find(kJOriShape) != input_desc.end()) {
auto input_node = GetInputCNode(compute_nodes[i], output_desc);
TbeAdapter::FusionDescJsonPass(input_node, &output_desc, spec_data_input);
}
output_desc_list.push_back(output_desc);
data_json[kJName] = GetJsonValue<std::string>(input_desc, kJName);
data_json[kJType] = kJData;
data_json[kJOutputDesc] = output_desc_list;
(*op_list_json).push_back(data_json);
}
}
MS_LOG(DEBUG) << "End.";
return true;
}
AnfNodePtr FusionBuildTbeJsonCreator::GetInputCNode(const AnfNodePtr &node, const nlohmann::json &input_desc) {
auto input_name = GetJsonValue<std::string>(input_desc, kJName);
auto cnode = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
for (size_t i = 1; i < cnode->inputs().size(); i++) {
auto kernel_idx = AnfAlgo::VisitKernel(cnode->input(i), 0);
auto full_name = kernel_idx.first->fullname_with_scope();
std::string desc_name = kernel_idx.second > 0 ? (full_name + "_" + std::to_string(kernel_idx.second)) : full_name;
if (input_name == desc_name) {
return cnode->input(i);
}
}
MS_LOG(EXCEPTION) << "Can not find node:[" << node->fullname_with_scope() << "]'s input [" << input_name << "]";
}
bool FusionBuildTbeJsonCreator::GenInputsJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(compute_json);
std::vector<nlohmann::json> input_desc_list_tmp = {};
auto op_name = AnfAlgo::GetCNodeName(anf_node);
auto cnode = anf_node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
bool is_dynamic_input = AnfAlgo::HasNodeAttr(kAttrDynInputSizes, cnode);
if (is_dynamic_input) {
MS_LOG(INFO) << op_name << " has dynamic input.";
if (!CheckDynamicInput(cnode)) {
return false;
}
}
size_t input_index = 0;
for (size_t i = 1; i < cnode->inputs().size(); ++i) {
auto input = cnode->input(i);
if (HasAbstractMonad(input)) {
continue;
}
auto kernel_idx = AnfAlgo::VisitKernel(input, 0);
nlohmann::json input_desc;
GenDescJson(kernel_idx.first, kernel_idx.second, kernel_idx.second, &input_desc);
if (is_dynamic_input) {
input_desc[kJDynIndex] = (i - 1);
}
input_desc_list_tmp.emplace_back(input_desc);
input_index++;
}
std::vector<size_t> inputs_tensor_num;
auto op_info = tbe::TbeDynamicShapeUtil::FindOp(op_name, anf_node);
TbeJsonUtils::GetInputsRealNum(anf_node, op_info->inputs_ptr(), &inputs_tensor_num);
size_t need_input_num = std::accumulate(inputs_tensor_num.begin(), inputs_tensor_num.end(), static_cast<size_t>(0));
for (size_t i = input_index; i < need_input_num; ++i) {
nlohmann::json optional_input_desc;
optional_input_desc[kJName] = std::string(kJOptional) + std::to_string(optional_index_);
optional_input_desc[kJShape] = kJNull;
optional_input_desc[kJDataType] = 0;
optional_index_++;
input_desc_list_tmp.emplace_back(optional_input_desc);
}
std::vector<nlohmann::json> input_desc_list;
TbeAdapter::InputOrderPass<nlohmann::json>(op_name, input_desc_list_tmp, &input_desc_list);
(*compute_json)[kJInputDesc] = input_desc_list;
return true;
}
bool FusionBuildTbeJsonCreator::CheckDynamicInput(const CNodePtr &cnode) {
MS_EXCEPTION_IF_NULL(cnode);
if (!AnfAlgo::HasNodeAttr(kAttrDynInputSizes, cnode)) {
MS_LOG(ERROR) << "Fusion error: cnode [ " << AnfAlgo::GetCNodeName(cnode) << "] has not attr dyn_input_sizes.";
return false;
}
// for dynamic input number, dyn_input_sizes has the info of dynamic input num for each input.
auto dyn_input_sizes = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(cnode, kAttrDynInputSizes);
if (dyn_input_sizes.size() != 1) {
MS_LOG(ERROR) << "Fusion error: fusion build not support dynamic input size > 1";
return false;
}
auto real_input_size = cnode->inputs().size() - 1;
if (LongToSize(dyn_input_sizes[0]) != real_input_size) {
MS_LOG(ERROR) << "Fusion error: dyn_input_size" << dyn_input_sizes[0] << "not equal real_input_size"
<< real_input_size;
return false;
}
return true;
}
bool FusionBuildTbeJsonCreator::GenOutputsJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(compute_json);
auto output_size = AnfAlgo::GetOutputTensorNum(anf_node);
auto cnode = anf_node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
std::vector<nlohmann::json> output_desc_list;
if (AnfAlgo::HasNodeAttr(kAttrOutputUsedNum, cnode)) {
auto output_used_nums = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(anf_node, kAttrOutputUsedNum);
if (output_used_nums.size() != output_size) {
MS_LOG(ERROR) << "Fusion error: [" << AnfAlgo::GetCNodeName(anf_node) << " ]'s output tenor num(" << output_size
<< ")"
<< " is not match output used num(" << output_used_nums.size() << ")";
return false;
}
auto desc_output_index = GetDescOutputIndex(output_used_nums);
for (size_t i = 0; i < output_size; ++i) {
MS_LOG(DEBUG) << "Fusion index: " << i << ", desc_output_index: " << desc_output_index[i];
nlohmann::json output_desc;
GenDescJson(anf_node, i, desc_output_index[i], &output_desc);
output_desc_list.emplace_back(output_desc);
}
for (size_t j = output_size; j < desc_output_index.size(); ++j) {
MS_LOG(DEBUG) << "Fusion index: " << j << ", desc_output_index: " << desc_output_index[j];
nlohmann::json output_desc;
GenReusedOutputDesc(anf_node, j, desc_output_index[j], &output_desc, output_size);
output_desc_list.emplace_back(output_desc);
}
} else {
for (size_t i = 0; i < output_size; ++i) {
nlohmann::json output_desc;
GenDescJson(anf_node, i, i, &output_desc);
output_desc_list.emplace_back(output_desc);
}
}
(*compute_json)[kJOutputDesc] = output_desc_list;
return true;
}
void FusionBuildTbeJsonCreator::GenReusedOutputDesc(const AnfNodePtr &anf_node, size_t index, size_t output_index,
nlohmann::json *output_desc, size_t out_size) {
GenDesCommonJson(output_desc);
std::string output_desc_name = anf_node->fullname_with_scope() + "_" + std::to_string(index);
(*output_desc)[kJName] = output_desc_name;
(*output_desc)[kJOutputIndex] = output_index;
std::vector<size_t> shape;
(*output_desc)[kJShape] = shape;
(*output_desc)[kJDataType] = tbe::TypeIdToString(AnfAlgo::GetOutputDeviceDataType(anf_node, out_size - 1));
}
std::vector<size_t> FusionBuildTbeJsonCreator::GetDescOutputIndex(const std::vector<int64_t> &output_used_nums) {
std::vector<size_t> desc_output_index = {};
for (size_t idx = 0; idx < output_used_nums.size(); ++idx) {
desc_output_index.emplace_back(idx);
if (output_used_nums[idx] > 1) {
desc_output_index.emplace_back(idx);
}
}
return desc_output_index;
}
bool FusionBuildTbeJsonCreator::AttrsJsonPostProcessing(const AnfNodePtr &anf_node, const OpInfoPtr &op_info_ptr,
nlohmann::json *attrs_json) {
tbe::TbeAdapter::CastJsonPostPass(anf_node, attrs_json);
return true;
}
} // namespace mindspore::kernel

View File

@ -0,0 +1,49 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_FUSION_TBE_JSON_CREATOR_H
#define MINDSPORE_FUSION_TBE_JSON_CREATOR_H
#include <map>
#include <vector>
#include "backend/kernel_compiler/tbe/tbe_json/tbe_json_creator.h"
namespace mindspore::kernel {
using ANodeFusionDataTypeMap = std::map<const AnfNodePtr, tbe::FusionDataType>;
class FusionBuildTbeJsonCreator : public TbeJsonCreator {
public:
FusionBuildTbeJsonCreator() : optional_index_(0) {}
~FusionBuildTbeJsonCreator() override = default;
bool GenJson(const FusionScopeInfo &fusion_scope_info, nlohmann::json *fusion_json) override;
protected:
bool GenOpListJson(const FusionScopeInfo &fusion_scope_info, std::vector<nlohmann::json> *fusion_json);
bool GenInputsJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) override;
bool GenOutputsJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) override;
std::vector<size_t> GetDescOutputIndex(const std::vector<int64_t> &output_used_nums);
void GenReusedOutputDesc(const AnfNodePtr &anf_node, size_t index, size_t output_index, nlohmann::json *output_desc,
size_t out_size);
bool GenDataJson(const std::vector<AnfNodePtr> &compute_nodes, const std::vector<nlohmann::json> &compute_json,
std::vector<nlohmann::json> *op_list_json, const ANodeFusionDataTypeMap &spec_data_input);
bool AttrsJsonPostProcessing(const AnfNodePtr &anf_node, const OpInfoPtr &op_info_ptr,
nlohmann::json *attrs_json) override;
private:
AnfNodePtr GetInputCNode(const AnfNodePtr &node, const nlohmann::json &input_desc);
bool CheckDynamicInput(const CNodePtr &cnode);
bool CheckInput(const FusionScopeInfo &fusion_scope_info);
size_t optional_index_;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_FUSION_TBE_JSON_CREATOR_H

View File

@ -0,0 +1,439 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/tbe/tbe_json/single_tbe_json_creator.h"
#include <memory>
#include <list>
#include <string>
#include <algorithm>
#include "frontend/parallel/ops_info/ops_utils.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/kernel_compiler/tbe/tbe_adapter.h"
#include "backend/kernel_compiler/tbe/tbe_convert_utils.h"
#include "backend/kernel_compiler/tbe/tbe_dynaminc_shape_util.h"
#include "backend/kernel_compiler/tbe/tbe_utils.h"
#include "runtime/dev.h"
#include "utils/ms_utils.h"
#include "utils/json_operation_utils.h"
#include "backend/kernel_compiler/tbe/tbe_json/tbe_json_utils.h"
namespace mindspore::kernel {
using mindspore::kernel::tbe::TbeAdapter;
bool SingleTbeJsonCreator::GenJson(const AnfNodePtr &anf_node, nlohmann::json *kernel_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(kernel_json);
auto op_name = AnfAlgo::GetCNodeName(anf_node);
MS_LOG(DEBUG) << "Start, node [ " << op_name << " ].";
nlohmann::json soc_info_json;
std::vector<nlohmann::json> op_list;
kernel::tbe::TbeUtils::GenSocInfo(&soc_info_json);
if (!GenOpListJson(anf_node, &op_list)) {
MS_LOG(ERROR) << "Anf Node [" << op_name << "] generate op_list json failed";
return false;
}
(*kernel_json)[kJSocInfo] = soc_info_json;
(*kernel_json)[kJOpList] = op_list;
GenFusionOpName(kernel_json);
AddOpNameForComputeNode(kernel_json);
(*kernel_json)[kJFullName] = anf_node->fullname_with_scope();
(*kernel_json)[kJGraphName] = "";
(*kernel_json)[kJScopeID] = -1;
(*kernel_json)[kJL1Size] = -1;
MS_LOG(DEBUG) << "Json info name is : " << GetJsonName() << ", kernel json:" << kernel_json->dump();
return true;
}
bool SingleTbeJsonCreator::GenOpListJson(const AnfNodePtr &anf_node, std::vector<nlohmann::json> *op_list_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(op_list_json);
MS_LOG(DEBUG) << "Start.";
nlohmann::json compute_json;
if (!GenComputeJson(anf_node, &compute_json)) {
MS_LOG(ERROR) << "Anf Node [" << AnfAlgo::GetCNodeName(anf_node) << "] generate compute json failed";
return false;
}
GenDataJson(anf_node, compute_json, op_list_json);
(*op_list_json).push_back(compute_json);
MS_LOG(DEBUG) << "End.";
return true;
}
void SingleTbeJsonCreator::GenDataJson(const AnfNodePtr &anf_node, const nlohmann::json &compute_json,
std::vector<nlohmann::json> *op_list_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(op_list_json);
MS_LOG(DEBUG) << "Start";
auto op_name = AnfAlgo::GetCNodeName(anf_node);
auto op_info_ptr = mindspore::kernel::tbe::TbeDynamicShapeUtil::FindOp(op_name, anf_node);
auto inputs_ptr = op_info_ptr->inputs_ptr();
auto inputs_json = GetJsonValue<std::vector<nlohmann::json>>(compute_json, kJInputDesc);
for (size_t i = 0; i < inputs_ptr.size(); i++) {
auto input_json = inputs_json.at(i);
auto input_ptr = inputs_ptr[i];
MS_EXCEPTION_IF_NULL(input_ptr);
nlohmann::json data_json;
std::vector<nlohmann::json> output_desc;
if (input_json.is_array()) {
data_json[kJName] = input_ptr->name() + "_dynamic";
auto tmp_inputs = input_json.get<std::vector<nlohmann::json>>();
std::copy(tmp_inputs.begin(), tmp_inputs.end(), std::back_inserter(output_desc));
} else {
data_json[kJName] = GetJsonValue<std::string>(input_json, kJName);
output_desc.push_back(input_json);
}
data_json[kJOutputDesc] = output_desc;
data_json[kJType] = kJData;
(*op_list_json).push_back(data_json);
}
MS_LOG(DEBUG) << "End";
}
bool SingleTbeJsonCreator::GenInputsJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(compute_json);
MS_LOG(DEBUG) << "Start.";
auto op_name = AnfAlgo::GetCNodeName(anf_node);
auto op_info_ptr = mindspore::kernel::tbe::TbeDynamicShapeUtil::FindOp(op_name, anf_node);
MS_EXCEPTION_IF_NULL(op_info_ptr);
std::vector<OpIOInfoPtr> inputs_ptr = op_info_ptr->inputs_ptr();
std::vector<nlohmann::json> inputs_json;
if (inputs_ptr.empty()) {
MS_LOG(WARNING) << op_name << " registration info has no input info.";
(*compute_json)[kJInputDesc] = inputs_json;
return true;
}
std::vector<size_t> inputs_tensor_num;
if (!TbeJsonUtils::GetInputsRealNum(anf_node, inputs_ptr, &inputs_tensor_num)) {
return false;
}
std::vector<nlohmann::json> inputs_desc;
size_t real_input_num = AnfAlgo::GetInputTensorNum(anf_node);
for (size_t i = 0; i < real_input_num; i++) {
nlohmann::json input_desc;
GenInputDescJson(anf_node, i, &input_desc);
inputs_desc.emplace_back(input_desc);
}
size_t need_input_num = std::accumulate(inputs_tensor_num.begin(), inputs_tensor_num.end(), static_cast<size_t>(0));
// gen optional desc
for (size_t i = AnfAlgo::GetInputTensorNum(anf_node); i < need_input_num; i++) {
nlohmann::json input_desc;
input_desc[kJValid] = false;
input_desc[kJShape] = kJNull;
input_desc[kJDataType] = 0;
inputs_desc.emplace_back(input_desc);
}
std::vector<nlohmann::json> inputs_list;
if (!AssignInputsJson(anf_node, inputs_desc, inputs_tensor_num, inputs_ptr, &inputs_list)) {
return false;
}
TbeAdapter::InputOrderPass<nlohmann::json>(op_name, inputs_list, &inputs_json);
(*compute_json)[kJInputDesc] = inputs_json;
MS_LOG(DEBUG) << "End.";
return true;
}
void SingleTbeJsonCreator::GenInputDescJson(const AnfNodePtr &anf_node, size_t real_input_index,
nlohmann::json *input_desc) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(input_desc);
GenDesCommonJson(input_desc);
auto shape = AnfAlgo::GetInputDeviceShape(anf_node, real_input_index);
auto ori_shape = AnfAlgo::GetPrevNodeOutputInferShape(anf_node, real_input_index);
if (shape.empty()) {
shape.emplace_back(1);
}
if (ori_shape.empty()) {
ori_shape.emplace_back(1);
}
auto def_format = TbeJsonUtils::IsNeedChangeDefaultFormat(anf_node) ? kOpFormat_NCDHW : kOpFormat_NCHW;
auto format = AnfAlgo::GetInputFormat(anf_node, real_input_index);
format = TbeAdapter::FormatPass(format, ori_shape.size());
format =
(def_format == kOpFormat_NCDHW && k3DFormatSet.find(format) == k3DFormatSet.end()) ? kOpFormat_NCDHW : format;
(*input_desc)[kJDtype] = tbe::TypeIdToString(AnfAlgo::GetInputDeviceDataType(anf_node, real_input_index));
(*input_desc)[kJDataType] = GetJsonValue<std::string>(*input_desc, kJDtype);
(*input_desc)[kJOriShape] = ori_shape;
(*input_desc)[kJOriFormat] = def_format;
(*input_desc)[kJShape] = shape;
(*input_desc)[kJFormat] = format;
(*input_desc)[kJValid] = true;
(*input_desc)[kJRange] = tbe::TbeDynamicShapeUtil::GetInputDynamicRange(anf_node, real_input_index, format);
}
void SingleTbeJsonCreator::GenOutputDescJson(const AnfNodePtr &anf_node, size_t node_out_idx,
nlohmann::json *output_desc) {
MS_EXCEPTION_IF_NULL(anf_node);
GenDescJson(anf_node, node_out_idx, node_out_idx, output_desc);
output_desc->erase(kJOutputIndex);
(*output_desc)[kJValid] = true;
(*output_desc)[kJRange] =
tbe::TbeDynamicShapeUtil::GetOutputDynamicRange(anf_node, node_out_idx, (*output_desc)[kJFormat]);
}
bool SingleTbeJsonCreator::AssignInputsJson(const AnfNodePtr &anf_node, const std::vector<nlohmann::json> &inputs_desc,
const std::vector<size_t> &inputs_tensor_num,
const std::vector<OpIOInfoPtr> &inputs_ptr,
std::vector<nlohmann::json> *inputs_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(inputs_json);
MS_LOG(DEBUG) << "Start.";
size_t inputs_desc_index = 0;
for (size_t i = 0; i < inputs_tensor_num.size(); i++) {
auto input_ptr = inputs_ptr[i];
MS_EXCEPTION_IF_NULL(input_ptr);
auto param_type = input_ptr->param_type();
if (param_type == kJParamOptional) {
nlohmann::json current_input_desc;
// DynamicRnn and DynamicGRUV2 exist placeholder input
if (TbeAdapter::IsPlaceHolderInput(anf_node, input_ptr)) {
current_input_desc[kJValid] = false;
current_input_desc[kJShape] = kJNull;
current_input_desc[kJDataType] = 0;
} else {
current_input_desc = inputs_desc.at(inputs_desc_index);
}
current_input_desc[kJName] = input_ptr->name() + "_optional_";
current_input_desc[kJParamType] = input_ptr->param_type();
(*inputs_json).emplace_back(current_input_desc);
inputs_desc_index++;
} else if (param_type == kJParamDynamic) {
std::vector<nlohmann::json> dynamic_inputs_desc;
for (size_t j = 0; j < inputs_tensor_num[i]; j++) {
auto current_input_desc = inputs_desc.at(inputs_desc_index);
current_input_desc[kJName] = input_ptr->name() + "_dynamic_" + std::to_string(j);
current_input_desc[kJParamType] = input_ptr->param_type();
dynamic_inputs_desc.emplace_back(current_input_desc);
inputs_desc_index++;
}
(*inputs_json).emplace_back(dynamic_inputs_desc);
} else if (param_type == kJParamRequred) {
auto current_input_desc = inputs_desc.at(inputs_desc_index);
current_input_desc[kJName] = input_ptr->name() + "_0";
current_input_desc[kJParamType] = input_ptr->param_type();
(*inputs_json).emplace_back(current_input_desc);
inputs_desc_index++;
} else {
MS_LOG(ERROR) << "Unsupported input param type:[" << param_type
<< "], supported list: {optional, dynamic, required}.";
return false;
}
}
MS_LOG(DEBUG) << "End.";
return true;
}
bool SingleTbeJsonCreator::GenOutputsJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(compute_json);
MS_LOG(DEBUG) << "Start.";
auto op_name = AnfAlgo::GetCNodeName(anf_node);
auto op_info_ptr = mindspore::kernel::tbe::TbeDynamicShapeUtil::FindOp(op_name, anf_node);
MS_EXCEPTION_IF_NULL(op_info_ptr);
std::vector<size_t> outputs_tensor_num;
auto outputs_ptr = op_info_ptr->outputs_ptr();
if (!TbeJsonUtils::GetOutputsRealNum(anf_node, outputs_ptr, &outputs_tensor_num)) {
return false;
}
size_t sum_outputs_num =
std::accumulate(outputs_tensor_num.begin(), outputs_tensor_num.end(), static_cast<size_t>(0));
size_t real_output_num = AnfAlgo::GetOutputTensorNum(anf_node);
std::vector<nlohmann::json> outputs_desc;
for (size_t i = 0; i < real_output_num; i++) {
nlohmann::json output_desc;
GenOutputDescJson(anf_node, i, &output_desc);
outputs_desc.emplace_back(output_desc);
}
for (size_t i = real_output_num; i < sum_outputs_num; i++) {
nlohmann::json output_desc;
output_desc[kJValid] = false;
output_desc[kJShape] = kJNull;
outputs_desc.emplace_back(output_desc);
}
std::vector<nlohmann::json> outputs_json;
if (!AssignOutputsJson(anf_node, outputs_desc, outputs_tensor_num, outputs_ptr, &outputs_json)) {
return false;
}
(*compute_json)[kJOutputDesc] = outputs_json;
MS_LOG(DEBUG) << "End.";
return true;
}
bool SingleTbeJsonCreator::AssignOutputsJson(const AnfNodePtr &anf_node,
const std::vector<nlohmann::json> &outputs_desc,
const std::vector<size_t> &outputs_tensor_num,
const std::vector<OpIOInfoPtr> &outputs_ptr,
std::vector<nlohmann::json> *outputs_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_LOG(DEBUG) << "Start.";
size_t outputs_desc_index = 0;
for (size_t i = 0; i < outputs_tensor_num.size(); i++) {
auto output_ptr = outputs_ptr[i];
MS_EXCEPTION_IF_NULL(output_ptr);
auto param_type = output_ptr->param_type();
if (param_type == kJParamDynamic) {
std::vector<nlohmann::json> dynamic_outputs_desc;
for (size_t j = 0; j < outputs_tensor_num[i]; j++) {
auto current_input_desc = outputs_desc.at(outputs_desc_index);
current_input_desc[kJName] = output_ptr->name();
current_input_desc[kJParamType] = output_ptr->param_type();
dynamic_outputs_desc.emplace_back(current_input_desc);
outputs_desc_index++;
}
(*outputs_json).emplace_back(dynamic_outputs_desc);
} else if (param_type == kJParamRequred || param_type == kJParamOptional) {
auto current_input_desc = outputs_desc.at(outputs_desc_index);
current_input_desc[kJName] = output_ptr->name();
current_input_desc[kJParamType] = output_ptr->param_type();
(*outputs_json).emplace_back(current_input_desc);
outputs_desc_index++;
} else {
MS_LOG(ERROR) << "Unsupported output param type:[" << param_type
<< "], supported list: {required, dynamic, optional}.";
return false;
}
}
MS_LOG(DEBUG) << "End.";
return true;
}
void SingleTbeJsonCreator::GenOtherJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(compute_json);
MS_LOG(DEBUG) << "Start.";
(*compute_json)[kJOriName] = {anf_node->fullname_with_scope()};
(*compute_json)[kJBuildType] = kAccuratelyBuild;
(*compute_json)[kJMissSupportInfo] = "";
(*compute_json)[kJMaxKernelID] = 10;
(*compute_json)[kJUnknowShape] = tbe::TbeDynamicShapeUtil::GetDynamicShapeAttr(anf_node);
MS_LOG(DEBUG) << "End.";
}
void SelectTbeJsonCreator::GenDescJson(const AnfNodePtr &anf_node, size_t node_out_idx, size_t desc_output_idx,
nlohmann::json *output_desc) {
MS_EXCEPTION_IF_NULL(anf_node);
GenDesCommonJson(output_desc);
std::vector<int64_t> shape;
std::vector<int64_t> ori_shape;
AnfAlgo::GetRealDynamicShape(AnfAlgo::GetOutputInferShape(anf_node, node_out_idx), NOT_NULL(&ori_shape));
if (ori_shape.empty()) {
ori_shape.emplace_back(1);
}
shape = ori_shape;
auto def_format = TbeJsonUtils::IsNeedChangeDefaultFormat(anf_node) ? kOpFormat_NCDHW : kOpFormat_NCHW;
auto format = def_format;
(*output_desc)[kJDataType] = tbe::TypeIdToString(AnfAlgo::GetOutputInferDataType(anf_node, node_out_idx));
(*output_desc)[kJDtype] = GetJsonValue<std::string>(*output_desc, kJDataType);
(*output_desc)[kJFormat] = format;
(*output_desc)[kJOriFormat] = def_format;
(*output_desc)[kJOriShape] = ori_shape;
(*output_desc)[kJShape] = shape;
(*output_desc)[kJOutputIndex] = desc_output_idx;
}
void SelectTbeJsonCreator::GenInputDescJson(const AnfNodePtr &anf_node, size_t real_input_index,
nlohmann::json *input_desc) {
MS_EXCEPTION_IF_NULL(anf_node);
GenDesCommonJson(input_desc);
auto shape = AnfAlgo::GetPrevNodeOutputInferShape(anf_node, real_input_index);
if (shape.empty()) {
shape.emplace_back(1);
}
auto ori_shape = shape;
auto def_format = TbeJsonUtils::IsNeedChangeDefaultFormat(anf_node) ? kOpFormat_NCDHW : kOpFormat_NCHW;
auto format = def_format;
(*input_desc)[kJDtype] = tbe::TypeIdToString(AnfAlgo::GetPrevNodeOutputInferDataType(anf_node, real_input_index));
(*input_desc)[kJDataType] = GetJsonValue<std::string>(*input_desc, kJDtype);
(*input_desc)[kJOriShape] = ori_shape;
(*input_desc)[kJOriFormat] = def_format;
(*input_desc)[kJShape] = shape;
(*input_desc)[kJFormat] = format;
(*input_desc)[kJValid] = true;
(*input_desc)[kJRange] = tbe::TbeDynamicShapeUtil::GetInputDynamicRange(anf_node, real_input_index, format);
}
bool SelectTbeJsonCreator::AttrsJsonPostProcessing(const AnfNodePtr &anf_node, const OpInfoPtr &op_info_ptr,
nlohmann::json *attrs_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(op_info_ptr);
MS_EXCEPTION_IF_NULL(attrs_json);
auto node_name = AnfAlgo::GetCNodeName(anf_node);
if (node_name == parallel::LAYER_NORM) {
for (auto json_item = attrs_json->begin(); json_item < attrs_json->end(); json_item++) {
if (GetJsonValue<std::string>(*json_item, kJName) == kAttrEpsilon) {
json_item = attrs_json->erase(json_item);
}
}
}
return true;
}
void CheckTbeJsonCreator::GenDescJson(const AnfNodePtr &anf_node, size_t node_out_idx, size_t desc_output_idx,
nlohmann::json *output_desc) {
MS_EXCEPTION_IF_NULL(anf_node);
GenDesCommonJson(output_desc);
std::vector<int64_t> shape;
std::vector<int64_t> ori_shape;
AnfAlgo::GetRealDynamicShape(AnfAlgo::GetOutputInferShape(anf_node, node_out_idx), NOT_NULL(&ori_shape));
if (ori_shape.empty()) {
ori_shape.emplace_back(1);
}
shape = ori_shape;
auto def_format = TbeJsonUtils::IsNeedChangeDefaultFormat(anf_node) ? kOpFormat_NCDHW : kOpFormat_NCHW;
auto format = AnfAlgo::GetOutputFormat(anf_node, node_out_idx);
format = TbeAdapter::FormatPass(format, ori_shape.size());
(*output_desc)[kJDataType] = tbe::TypeIdToString(AnfAlgo::GetOutputDeviceDataType(anf_node, node_out_idx));
(*output_desc)[kJDtype] = GetJsonValue<std::string>(*output_desc, kJDataType);
(*output_desc)[kJFormat] = format;
(*output_desc)[kJOriFormat] = def_format;
(*output_desc)[kJOriShape] = ori_shape;
(*output_desc)[kJShape] = shape;
(*output_desc)[kJOutputIndex] = desc_output_idx;
}
void CheckTbeJsonCreator::GenInputDescJson(const AnfNodePtr &anf_node, size_t real_input_index,
nlohmann::json *input_desc) {
MS_EXCEPTION_IF_NULL(anf_node);
GenDesCommonJson(input_desc);
auto shape = AnfAlgo::GetPrevNodeOutputInferShape(anf_node, real_input_index);
if (shape.empty()) {
shape.emplace_back(1);
}
auto ori_shape = shape;
auto def_format = TbeJsonUtils::IsNeedChangeDefaultFormat(anf_node) ? kOpFormat_NCDHW : kOpFormat_NCHW;
auto format = AnfAlgo::GetInputFormat(anf_node, real_input_index);
format = TbeAdapter::FormatPass(format, ori_shape.size());
(*input_desc)[kJDtype] = tbe::TypeIdToString(AnfAlgo::GetInputDeviceDataType(anf_node, real_input_index));
(*input_desc)[kJDataType] = GetJsonValue<std::string>(*input_desc, kJDtype);
(*input_desc)[kJOriShape] = ori_shape;
(*input_desc)[kJOriFormat] = def_format;
(*input_desc)[kJShape] = shape;
(*input_desc)[kJFormat] = format;
(*input_desc)[kJValid] = true;
(*input_desc)[kJRange] = tbe::TbeDynamicShapeUtil::GetInputDynamicRange(anf_node, real_input_index, format);
}
} // namespace mindspore::kernel

View File

@ -0,0 +1,73 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_SINGLE_TBE_JSON_CREATOR_H
#define MINDSPORE_SINGLE_TBE_JSON_CREATOR_H
#include <vector>
#include "backend/kernel_compiler/tbe/tbe_json/tbe_json_creator.h"
namespace mindspore::kernel {
class SingleTbeJsonCreator : public TbeJsonCreator {
public:
SingleTbeJsonCreator() = default;
virtual ~SingleTbeJsonCreator() = default;
bool GenJson(const AnfNodePtr &anf_node, nlohmann::json *kernel_json) override;
protected:
bool GenOpListJson(const AnfNodePtr &anf_node, std::vector<nlohmann::json> *op_list_json);
void GenDataJson(const AnfNodePtr &anf_node, const nlohmann::json &compute_json,
std::vector<nlohmann::json> *op_list_json);
bool GenInputsJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) override;
virtual void GenInputDescJson(const AnfNodePtr &anf_node, size_t real_input_index, nlohmann::json *input_desc);
bool AssignInputsJson(const AnfNodePtr &anf_node, const std::vector<nlohmann::json> &inputs_desc,
const std::vector<size_t> &inputs_tensor_num, const std::vector<OpIOInfoPtr> &inputs_ptr,
std::vector<nlohmann::json> *inputs_json);
void GenOutputDescJson(const AnfNodePtr &anf_node, size_t node_out_idx, nlohmann::json *output_desc);
bool GenOutputsJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) override;
bool AssignOutputsJson(const AnfNodePtr &anf_node, const std::vector<nlohmann::json> &outputs_desc,
const std::vector<size_t> &outputs_tensor_num, const std::vector<OpIOInfoPtr> &outputs_ptr,
std::vector<nlohmann::json> *outputs_json);
void GenOtherJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) override;
};
class CheckTbeJsonCreator : public SingleTbeJsonCreator {
public:
CheckTbeJsonCreator() = default;
~CheckTbeJsonCreator() override = default;
protected:
void GenDescJson(const AnfNodePtr &anf_node, size_t node_out_idx, size_t desc_output_idx,
nlohmann::json *output_desc) override;
void GenInputDescJson(const AnfNodePtr &anf_node, size_t real_input_index, nlohmann::json *input_desc) override;
};
class SelectTbeJsonCreator : public SingleTbeJsonCreator {
public:
SelectTbeJsonCreator() = default;
~SelectTbeJsonCreator() override = default;
protected:
bool AttrsJsonPostProcessing(const AnfNodePtr &anf_node, const OpInfoPtr &op_info_ptr,
nlohmann::json *attrs_json) override;
void GenDescJson(const AnfNodePtr &anf_node, size_t node_out_idx, size_t desc_output_idx,
nlohmann::json *output_desc) override;
void GenInputDescJson(const AnfNodePtr &anf_node, size_t real_input_index, nlohmann::json *input_desc) override;
};
class BuildTbeJsonCreator : public SingleTbeJsonCreator {
public:
BuildTbeJsonCreator() = default;
~BuildTbeJsonCreator() override = default;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_SINGLE_TBE_JSON_CREATOR_H

View File

@ -0,0 +1,420 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/tbe/tbe_json/tbe_json_creator.h"
#include <memory>
#include <map>
#include <utility>
#include <algorithm>
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/kernel_compiler/common_utils.h"
#include "backend/kernel_compiler/tbe/tbe_adapter.h"
#include "backend/kernel_compiler/tbe/tbe_convert_utils.h"
#include "backend/kernel_compiler/tbe/tbe_dynaminc_shape_util.h"
#include "utils/ms_context.h"
#include "runtime/dev.h"
#include "utils/ms_utils.h"
#include "utils/json_operation_utils.h"
#include "backend/kernel_compiler/tbe/tbe_json/tbe_json_utils.h"
namespace mindspore::kernel {
namespace {
std::unordered_map<std::string, TypeID> type_id_map = {{kVTypeInt, TypeID::kIntID},
{kVTypeInt64, TypeID::kInt64ID},
{kVTypeStr, TypeID::kStrID},
{kVTypeBool, TypeID::kBoolID},
{kVTypeFloat, TypeID::kFloatID},
{kVTypeListInt, TypeID::kListIntID},
{kVTypeListFloat, TypeID::kListFloatID},
{kVTypeListUInt64, TypeID::kListUInt64ID},
{kVTypeListListInt, TypeID::kListListIntID}};
bool ParseListIntValue(const mindspore::ValuePtr &value, std::vector<int64_t> *attr_value) {
auto value_type = value->type();
if (value_type == nullptr) {
MS_LOG(ERROR) << "Value's type is null.";
return false;
}
if (value_type->ToString() == kVTypeInt64) {
attr_value->push_back(GetValue<int64_t>(value));
} else {
auto vec = value->isa<ValueTuple>() ? value->cast<ValueTuplePtr>()->value() : value->cast<ValueListPtr>()->value();
if (!vec.empty()) {
if (vec[0]->isa<Int32Imm>()) {
std::vector<int32_t> attr_value_me = GetValue<std::vector<int32_t>>(value);
(void)std::transform(attr_value_me.begin(), attr_value_me.end(), std::back_inserter(*attr_value),
[](const int &value) { return static_cast<int64_t>(value); });
} else {
*attr_value = GetValue<std::vector<int64_t>>(value);
}
}
}
return true;
}
bool ParseAttrValue(const std::string &type, const mindspore::ValuePtr &value, nlohmann::json *attr_obj) {
MS_EXCEPTION_IF_NULL(attr_obj);
if (value == nullptr) {
MS_LOG(ERROR) << "Node's attr value is null.";
return false;
}
auto result = type_id_map.find(type);
if (result == type_id_map.end()) {
MS_LOG(ERROR) << "Type: " << type << "not support";
return false;
}
switch (result->second) {
case TypeID::kIntID:
(*attr_obj)[kJValue] = value->isa<Int32Imm>() ? GetValue<int>(value) : GetValue<int64_t>(value);
break;
case TypeID::kInt64ID:
(*attr_obj)[kJValue] = GetValue<int64_t>(value);
break;
case TypeID::kStrID: {
auto attr_value = GetValue<std::string>(value);
(*attr_obj)[kJValue] = attr_value == kOpFormat_FRAC_Z ? kJOpFormat_FRACTAL_Z : attr_value;
break;
}
case TypeID::kBoolID:
(*attr_obj)[kJValue] = GetValue<bool>(value);
break;
case TypeID::kFloatID:
(*attr_obj)[kJValue] = GetValue<float>(value);
break;
case TypeID::kListIntID: {
std::vector<int64_t> attr_value;
if (!ParseListIntValue(value, &attr_value)) {
MS_LOG(ERROR) << "Parse list_value failed, maybe the input is a nullptr.";
return false;
}
(*attr_obj)[kJValue] = attr_value;
break;
}
case TypeID::kListFloatID: {
auto value_type = value->type();
if (value_type == nullptr) {
MS_LOG(ERROR) << "Value's type is null.";
return false;
}
(*attr_obj)[kJValue] = value_type->ToString() == kVTypeFloat ? std::vector<float>{GetValue<float>(value)}
: GetValue<std::vector<float>>(value);
break;
}
case TypeID::kListUInt64ID:
(*attr_obj)[kJValue] = GetValue<std::vector<size_t>>(value);
break;
case TypeID::kListListIntID:
(*attr_obj)[kJValue] = GetValue<std::vector<std::vector<int64_t>>>(value);
break;
}
return true;
}
bool ParseAttrDefaultValue(const std::string &type, const std::string &value, nlohmann::json *attr_obj) {
MS_EXCEPTION_IF_NULL(attr_obj);
auto result = type_id_map.find(type);
if (result == type_id_map.end()) {
MS_LOG(ERROR) << "Type: " << type << "not support";
return false;
}
switch (result->second) {
case TypeID::kIntID:
(*attr_obj)[kJValue] = std::stoi(value);
break;
case TypeID::kInt64ID:
(*attr_obj)[kJValue] = std::stoll(value);
break;
case TypeID::kStrID:
(*attr_obj)[kJValue] = value;
break;
case TypeID::kBoolID: {
bool attr_value = false;
std::istringstream(value) >> std::boolalpha >> attr_value;
(*attr_obj)[kJValue] = attr_value;
break;
}
case TypeID::kFloatID:
(*attr_obj)[kJValue] = std::stof(value);
break;
case TypeID::kListIntID: {
std::stringstream string_value(value);
std::string list_elem;
std::vector<int64_t> attrs_value;
while (std::getline(string_value, list_elem, ',')) {
attrs_value.push_back(std::stoi(list_elem));
}
(*attr_obj)[kJValue] = attrs_value;
break;
}
default:
MS_LOG(ERROR) << "Type: " << type << "not support";
return false;
}
return true;
}
} // namespace
bool TbeJsonCreator::GenComputeJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(compute_json);
MS_LOG(DEBUG) << "Start.";
if (!GenInputsJson(anf_node, compute_json)) {
MS_LOG(ERROR) << "generate inputs json failed, node full name:" << anf_node->fullname_with_scope();
return false;
}
if (!GenOutputsJson(anf_node, compute_json)) {
MS_LOG(ERROR) << "generate outputs json failed, node full name:" << anf_node->fullname_with_scope();
return false;
}
GenOutputDataDescJson(anf_node, compute_json);
GenAttrsDescJson(anf_node, compute_json);
GenComputeCommonJson(anf_node, compute_json);
GenOtherJson(anf_node, compute_json);
MS_LOG(DEBUG) << "End.";
return true;
}
void TbeJsonCreator::GenFusionOpName(nlohmann::json *kernel_json, std::string prefix) {
json_name_.clear();
size_t hash_id = GenJsonHash((*kernel_json));
auto context_ptr = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(context_ptr);
json_name_ = std::move(prefix);
auto device_id = context_ptr->get_param<uint32_t>(MS_CTX_DEVICE_ID);
for (auto node_json : (*kernel_json)[kJOpList]) {
if (GetJsonValue<std::string>(node_json, kJType) != kJData) {
json_name_.append(node_json[kJFuncName]);
json_name_.append("_");
}
}
json_name_ = json_name_ + std::to_string(hash_id) + "_" + std::to_string(device_id);
MS_LOG(DEBUG) << "Generate Json name: " << json_name_;
(*kernel_json)[kJFusionOpName] = json_name_;
}
void TbeJsonCreator::DeleteDescName(nlohmann::json *desc_jsons) {
for (auto &desc_json : (*desc_jsons)) {
if (desc_json.is_array()) {
for (auto &desc_item : desc_json) {
desc_item.erase(kJName);
}
} else {
desc_json.erase(kJName);
}
}
}
size_t TbeJsonCreator::GenJsonHash(nlohmann::json tbe_json) {
auto &op_lists = tbe_json.at(kJOpList);
for (auto &op : op_lists) {
op.erase(kJName);
op.erase(kJOriName);
op.erase(kJPattern);
DeleteDescName(&op.at(kJOutputDesc));
if (op[kJType] != kJData) {
DeleteDescName(&op.at(kJInputDesc));
}
}
return std::hash<std::string>()(tbe_json.dump());
}
void TbeJsonCreator::AddOpNameForComputeNode(nlohmann::json *kernel_json) {
auto op_name = GetJsonValue<std::string>((*kernel_json), kJFusionOpName);
for (auto &node_json : (*kernel_json).at(kJOpList)) {
// compute node
if (GetJsonValue<std::string>(node_json, kJType) != kJData) {
node_json[kJOpName] = op_name;
}
}
}
bool TbeJsonCreator::GenAttrsJson(const AnfNodePtr &anf_node, const OpInfoPtr &op_info, nlohmann::json *attrs_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(op_info);
MS_EXCEPTION_IF_NULL(attrs_json);
auto attrs_ptr = op_info->attrs_ptr();
if (!AttrsJsonPreProcessing(anf_node, &attrs_ptr, attrs_json)) {
MS_LOG(EXCEPTION) << "PreProcessing node attr error, node: " << anf_node->fullname_with_scope();
}
std::string op_name = AnfAlgo::GetCNodeName(anf_node);
auto primitive = AnfAlgo::GetCNodePrimitive(anf_node);
MS_EXCEPTION_IF_NULL(primitive);
for (const auto &attr_ptr : attrs_ptr) {
std::string attr_name = attr_ptr->name();
nlohmann::json attr_obj;
attr_obj[kJName] = attr_name;
if (primitive->GetAttr(attr_name) != nullptr) {
if (!ParseAttrValue(attr_ptr->type(), primitive->GetAttr(attr_name), &attr_obj)) {
MS_LOG(EXCEPTION) << "op [ " << op_info->op_name() << " ]'s attr [ " << attr_name << " ] generates failed";
}
attr_obj[kJValid] = true;
} else {
auto default_value = attr_ptr->default_value();
if (!default_value.empty()) {
if (!ParseAttrDefaultValue(attr_ptr->type(), default_value, &attr_obj)) {
MS_LOG(EXCEPTION) << "op [ " << op_info->op_name() << " ]'s default attr [ " << attr_name
<< " ] generates failed";
}
attr_obj[kJValid] = true;
} else {
MS_LOG(INFO) << "op " << op_name << "'s attr \"" << attr_name << "\" should have a default value.";
if (!op_info->impl_path().empty() && attr_ptr->param_type() == kJParamRequred) {
MS_LOG(EXCEPTION) << "Op name: " << op_info->op_name() << " attr: " << attr_name
<< " is required, but not set.";
} else {
attr_obj[kJValid] = false;
}
}
}
(*attrs_json).push_back(attr_obj);
}
if (!AttrsJsonPostProcessing(anf_node, op_info, attrs_json)) {
MS_LOG(EXCEPTION) << "PostProcessing node attr error, node: " << anf_node->fullname_with_scope();
}
return true;
}
bool TbeJsonCreator::GenAttrsDescJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(compute_json);
auto cnode = anf_node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
auto op_name = AnfAlgo::GetCNodeName(cnode);
auto op_info_ptr = tbe::TbeDynamicShapeUtil::FindOp(op_name, cnode);
nlohmann::json attrs_json;
GenAttrsJson(cnode, op_info_ptr, &attrs_json);
nlohmann::json attrs_desc;
for (const auto &attr : attrs_json) {
if (GetJsonValue<std::string>(attr, kJName) != kJIsRef && GetJsonValue<bool>(attr, kJValid)) {
attrs_desc.push_back(attr.at(kJValue));
}
}
if (!attrs_desc.empty()) {
(*compute_json)[kJAttrDesc] = attrs_desc;
}
return true;
}
void TbeJsonCreator::GenComputeCommonJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(compute_json);
auto cnode = anf_node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
auto op_name = AnfAlgo::GetCNodeName(cnode);
auto op_info_ptr = tbe::TbeDynamicShapeUtil::FindOp(op_name, cnode);
auto func_name = op_info_ptr->kernel_name();
(*compute_json)[kJFuncName] = func_name;
auto python_module_path = op_info_ptr->impl_path();
if (python_module_path.empty()) {
python_module_path = kPyPath;
}
auto iter = tbe::opTypeAdapter.find(op_name);
(*compute_json)[kJType] = (iter != tbe::opTypeAdapter.end()) ? iter->second : op_name;
(*compute_json)[kJPyModulePath] = python_module_path;
(*compute_json)[kJDynamicCompileStatic] = false;
(*compute_json)[kJInt64Mode] = false;
(*compute_json)[kJName] = cnode->fullname_with_scope();
(*compute_json)[kJPattern] = kernel::GetFusionNameByType(AnfAlgo::GetFusionType(cnode));
(*compute_json)[kJModuleName] = kJModuleNamePrefix + func_name;
}
// node_out_idx: node output index
// desc_output_idx: this index use to add json
void TbeJsonCreator::GenDescJson(const AnfNodePtr &anf_node, size_t node_out_idx, size_t desc_output_idx,
nlohmann::json *output_desc) {
MS_EXCEPTION_IF_NULL(anf_node);
GenDesCommonJson(output_desc);
std::vector<int64_t> shape;
std::vector<int64_t> ori_shape;
AnfAlgo::GetRealDynamicShape(AnfAlgo::GetOutputDeviceShape(anf_node, node_out_idx), NOT_NULL(&shape));
AnfAlgo::GetRealDynamicShape(AnfAlgo::GetOutputInferShape(anf_node, node_out_idx), NOT_NULL(&ori_shape));
if (shape.empty()) {
shape.emplace_back(1);
}
if (ori_shape.empty()) {
ori_shape.emplace_back(1);
}
auto full_name = anf_node->fullname_with_scope();
auto output_desc_name = node_out_idx > 0 ? (full_name + "_" + std::to_string(node_out_idx)) : full_name;
// !! Note: format: only data node's output use it
auto format = AnfAlgo::GetOutputFormat(anf_node, node_out_idx);
format = tbe::TbeAdapter::FormatPass(format, ori_shape.size());
auto def_format = TbeJsonUtils::IsNeedChangeDefaultFormat(anf_node) ? kOpFormat_NCDHW : kOpFormat_NCHW;
format =
(def_format == kOpFormat_NCDHW && k3DFormatSet.find(format) == k3DFormatSet.end()) ? kOpFormat_NCDHW : format;
(*output_desc)[kJDataType] = tbe::TypeIdToString(AnfAlgo::GetOutputDeviceDataType(anf_node, node_out_idx));
(*output_desc)[kJDtype] = GetJsonValue<std::string>(*output_desc, kJDataType);
(*output_desc)[kJFormat] = format;
(*output_desc)[kJOriFormat] = def_format;
(*output_desc)[kJOriShape] = ori_shape;
(*output_desc)[kJShape] = shape;
(*output_desc)[kJName] = output_desc_name;
// !! Note: output_index, only node's output use it
(*output_desc)[kJOutputIndex] = desc_output_idx;
}
void TbeJsonCreator::GenDesCommonJson(nlohmann::json *output_desc) {
MS_EXCEPTION_IF_NULL(output_desc);
(*output_desc)[kJL1AddrOffset] = 0;
(*output_desc)[kJL1FusionType] = -1;
(*output_desc)[kJL1WorkspaceSize] = -1;
(*output_desc)[kJAddrType] = 0;
(*output_desc)[kJSliceOffset] = nlohmann::json::array();
(*output_desc)[kJSplitIndex] = 0;
(*output_desc)[kJTotalShape] = nlohmann::json::array();
(*output_desc)[kJValidShape] = nlohmann::json::array();
}
bool TbeJsonCreator::AttrsJsonPreProcessing(const AnfNodePtr &anf_node, std::vector<OpAttrPtr> *attrs_ptr,
nlohmann::json *attrs_json) {
tbe::TbeAdapter::CastAttrJsonPrePass(anf_node, attrs_ptr, attrs_json);
return true;
}
bool TbeJsonCreator::GenOutputDataDescJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(compute_json);
auto op_desc = AnfAlgo::GetOutputDataDesc(anf_node);
if (!op_desc.empty() && op_desc.at(0).find(kJListArgs) != op_desc.at(0).end()) {
(*compute_json)[kJOutputDataDesc] = GetJsonValue<nlohmann::json>(op_desc.at(0), kJListArgs);
} else {
auto outputs_desc = GetJsonValue<std::vector<nlohmann::json>>(*compute_json, kJOutputDesc);
std::vector<nlohmann::json> outputs_data_desc;
for (auto output_desc : outputs_desc) {
if (output_desc.find(kJOriShape) != output_desc.end()) {
output_desc.erase(kJName);
outputs_data_desc.push_back(output_desc);
}
}
(*compute_json)[kJOutputDataDesc] = outputs_data_desc;
}
return true;
}
bool TbeJsonCreator::AttrsJsonPostProcessing(const AnfNodePtr &anf_node, const OpInfoPtr &op_info_ptr,
nlohmann::json *attrs_json) {
return true;
}
} // namespace mindspore::kernel

View File

@ -0,0 +1,78 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_TBE_JSON_CREATOR_H_
#define MINDSPORE_TBE_JSON_CREATOR_H_
#include <string>
#include <unordered_map>
#include <memory>
#include <map>
#include <utility>
#include <vector>
#include <nlohmann/json.hpp>
#include "ir/dtype.h"
#include "backend/kernel_compiler/kernel.h"
#include "backend/kernel_compiler/kernel_fusion.h"
#include "backend/kernel_compiler/oplib/oplib.h"
#include "backend/kernel_compiler/tbe/tbe_adapter.h"
namespace mindspore::kernel {
enum class TypeID {
kIntID = 0,
kInt64ID,
kStrID,
kBoolID,
kFloatID,
kListIntID,
kListFloatID,
kListUInt64ID,
kListListIntID
};
class TbeJsonCreator {
public:
TbeJsonCreator() = default;
virtual ~TbeJsonCreator() = default;
virtual bool GenJson(const AnfNodePtr &anf_node, nlohmann::json *kernel_json) { return false; }
virtual bool GenJson(const FusionScopeInfo &fusion_scope_info, nlohmann::json *fusion_json) { return false; }
std::string GetJsonName() { return json_name_; }
protected:
bool GenComputeJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json);
virtual bool GenInputsJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) { return false; }
virtual bool GenOutputsJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) { return false; }
bool GenOutputDataDescJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json);
void GenComputeCommonJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json);
virtual void GenOtherJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json) {}
bool GenAttrsDescJson(const AnfNodePtr &anf_node, nlohmann::json *compute_json);
bool GenAttrsJson(const AnfNodePtr &anf_node, const OpInfoPtr &op_info_ptr, nlohmann::json *attrs_json);
bool AttrsJsonPreProcessing(const AnfNodePtr &anf_node, std::vector<OpAttrPtr> *attrs_ptr,
nlohmann::json *attrs_json);
virtual bool AttrsJsonPostProcessing(const AnfNodePtr &anf_node, const OpInfoPtr &op_info_ptr,
nlohmann::json *attrs_json);
virtual void GenDescJson(const AnfNodePtr &anf_node, size_t node_out_idx, size_t desc_output_idx,
nlohmann::json *output_desc);
void GenDesCommonJson(nlohmann::json *output_desc);
size_t GenJsonHash(nlohmann::json tbe_json);
void DeleteDescName(nlohmann::json *desc_json);
void AddOpNameForComputeNode(nlohmann::json *kernel_json);
void GenFusionOpName(nlohmann::json *kernel_json, std::string prefix = "");
private:
std::string json_name_;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_TBE_JSON_CREATOR_H_

View File

@ -0,0 +1,83 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/tbe/tbe_json/tbe_json_utils.h"
#include <memory>
#include <string>
#include <vector>
#include "base/core_ops.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/kernel_compiler/tbe/tbe_convert_utils.h"
#include "backend/kernel_compiler/tbe/tbe_dynaminc_shape_util.h"
#include "runtime/dev.h"
#include "utils/json_operation_utils.h"
namespace mindspore::kernel {
bool TbeJsonUtils::GetInputsRealNum(const AnfNodePtr &anf_node, const std::vector<OpIOInfoPtr> &inputs_ptr,
std::vector<size_t> *inputs_num) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(inputs_num);
auto primitive = AnfAlgo::GetCNodePrimitive(anf_node);
// for dynamic input number, dyn_input_sizes has the info of dynamic input num for each input.
auto dyn_input_sizes_ptr = primitive->GetAttr(kAttrDynInputSizes);
std::vector<int64_t> dyn_input_sizes = (dyn_input_sizes_ptr != nullptr)
? GetValue<const std::vector<int64_t>>(dyn_input_sizes_ptr)
: std::vector<int64_t>{};
size_t dyn_input_index = 0;
for (const auto &input_ptr : inputs_ptr) {
if (input_ptr->param_type() == kJParamDynamic) {
if (dyn_input_index >= dyn_input_sizes.size()) {
MS_LOG(ERROR) << "Dyn input index" << dyn_input_index << "is over dyn input num" << dyn_input_sizes.size();
return false;
} else {
(*inputs_num).emplace_back(LongToSize(dyn_input_sizes[dyn_input_index]));
dyn_input_index++;
}
} else {
(*inputs_num).emplace_back(1);
}
}
return true;
}
bool TbeJsonUtils::GetOutputsRealNum(const AnfNodePtr &anf_node, const std::vector<OpIOInfoPtr> &outputs_ptr,
std::vector<size_t> *outputs_num) {
MS_EXCEPTION_IF_NULL(anf_node);
size_t real_output_num = AnfAlgo::GetOutputTensorNum(anf_node);
for (const auto &output_ptr : outputs_ptr) {
if (output_ptr->param_type() == kJParamDynamic) {
if (outputs_ptr.size() > 1) {
MS_LOG(ERROR) << "Dynamic output is unsupported multi output, node [ " << AnfAlgo::GetCNodeName(anf_node)
<< " ] has " << outputs_ptr.size() << "outputs, however one of the outputs param_type is "
<< output_ptr->param_type();
return false;
}
outputs_num->emplace_back(real_output_num);
} else {
outputs_num->emplace_back(1);
}
}
return true;
}
bool TbeJsonUtils::IsNeedChangeDefaultFormat(const AnfNodePtr &anf_node) {
MS_EXCEPTION_IF_NULL(anf_node);
return anf_node->isa<CNode>() && AnfAlgo::HasNodeAttr(kAttrFormat, anf_node->cast<CNodePtr>()) &&
AnfAlgo::GetNodeAttr<std::string>(anf_node, kAttrFormat) == kOpFormat_NCDHW;
}
} // namespace mindspore::kernel

View File

@ -0,0 +1,113 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_TBE_JSON_UTILS_H
#define MINDSPORE_TBE_JSON_UTILS_H
#include <memory>
#include <map>
#include <list>
#include <algorithm>
#include <vector>
#include <string>
#include "base/core_ops.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/kernel_compiler/tbe/tbe_convert_utils.h"
#include "backend/kernel_compiler/tbe/tbe_dynaminc_shape_util.h"
#include "runtime/dev.h"
#include "utils/ms_utils.h"
namespace mindspore::kernel {
constexpr auto kJFusionOpList = "op_list";
constexpr auto kJFusionKernelNamePrefix = "te_fusion_";
constexpr auto kJOptional = "optional_";
constexpr auto kJOpFormat_FRACTAL_Z = "FRACTAL_Z";
constexpr auto kJFullName = "full_name";
constexpr auto kJDtype = "dtype";
constexpr auto kJName = "name";
constexpr auto kJOriShape = "ori_shape";
constexpr auto kJOriFormat = "ori_format";
constexpr auto kJShape = "shape";
constexpr auto kJFormat = "format";
constexpr auto kJValid = "valid";
constexpr auto kJParamType = "param_type";
constexpr auto kJParamDynamic = "dynamic";
constexpr auto kJParamRequred = "required";
constexpr auto kJParamOptional = "optional";
constexpr auto kJDataType = "data_type";
constexpr auto kJOutputIndex = "output_index";
constexpr auto kJOutputDataDesc = "output_data_desc";
constexpr auto kJOutputDesc = "output_desc";
constexpr auto kJInputDesc = "input_desc";
constexpr auto kJRange = "range";
constexpr auto kVTypeInt = "int";
constexpr auto kVTypeStr = "str";
constexpr auto kVTypeBool = "bool";
constexpr auto kVTypeFloat = "float";
constexpr auto kVTypeListInt = "listInt";
constexpr auto kVTypeInt32 = "Int32";
constexpr auto kVTypeInt64 = "Int64";
constexpr auto kVTypeListUInt64 = "listUInt64";
constexpr auto kVTypeListFloat = "listFloat";
constexpr auto kVTypeListListInt = "listListInt";
constexpr auto kJValue = "value";
constexpr auto kJDynIndex = "dyn_index";
constexpr auto kJFuncName = "func_name";
constexpr auto kJL1AddrOffset = "L1_addr_offset";
constexpr auto kJL1FusionType = "L1_fusion_type";
constexpr auto kJL1WorkspaceSize = "L1_workspace_size";
constexpr auto kJAddrType = "addr_type";
constexpr auto kJSliceOffset = "slice_offset";
constexpr auto kJSplitIndex = "split_index";
constexpr auto kJTotalShape = "total_shape";
constexpr auto kJDynamicCompileStatic = "dynamic_compile_static";
constexpr auto kJInt64Mode = "int64mode";
constexpr auto kJValidShape = "valid_shape";
constexpr auto kJModuleName = "module_name";
constexpr auto kJModuleNamePrefix = "impl.";
constexpr auto kJPattern = "pattern";
constexpr auto kJPyModulePath = "py_module_path";
constexpr auto kJAttrDesc = "attr_desc";
constexpr auto kJSocInfo = "SocInfo";
constexpr auto kJFusionOpName = "fusion_op_name";
constexpr auto kJGraphID = "graph_id";
constexpr auto kJType = "type";
constexpr auto kJIsRef = "isRef";
constexpr auto kJL1Size = "l1_size";
constexpr auto kJScopeID = "scope_id";
constexpr auto kJGraphName = "graph_name";
constexpr auto kJOpList = "op_list";
constexpr auto kJNull = "NULL";
constexpr auto kJData = "Data";
constexpr auto kJOriName = "ori_name";
constexpr auto kJBuildType = "build_type";
constexpr auto kJMissSupportInfo = "miss_support_info";
constexpr auto kJMaxKernelID = "max_kernel_id";
constexpr auto kJOpName = "op_name";
constexpr auto kJUnknowShape = "unknown_shape";
constexpr auto kJListArgs = "list_args";
constexpr auto kAccuratelyBuild = "accurately_build";
constexpr auto kPyPath = "/usr/local/Ascend/opp/op_impl/built-in/ai_core/tbe";
class TbeJsonUtils {
public:
static bool GetInputsRealNum(const AnfNodePtr &anf_node, const std::vector<OpIOInfoPtr> &inputs_ptr,
std::vector<size_t> *inputs_num);
static bool GetOutputsRealNum(const AnfNodePtr &anf_node, const std::vector<OpIOInfoPtr> &outputs_ptr,
std::vector<size_t> *outputs_num);
static bool IsNeedChangeDefaultFormat(const AnfNodePtr &anf_node);
};
} // namespace mindspore::kernel
#endif // MINDSPORE_TBE_JSON_UTILS_H

View File

@ -33,8 +33,6 @@ namespace kernel {
namespace tbe {
using std::string;
using std::vector;
const std::map<std::string, std::string> opTypeAdapter = {
{"ReLUV2", "ReluV2"}, {"ReLU6", "Relu6"}, {"ReLU6Grad", "Relu6Grad"}, {"ReLUGrad", "ReluGrad"}, {"ReLU", "Relu"}};
class TbeUtils {
public:

View File

@ -1246,6 +1246,37 @@ KernelType AnfRuntimeAlgorithm::GetKernelType(const AnfNodePtr &node) {
return build_info->kernel_type();
}
void AnfRuntimeAlgorithm::SetFusionType(const AnfNodePtr &node, const kernel::FusionType &type) {
MS_EXCEPTION_IF_NULL(node);
auto builder =
std::make_shared<kernel::KernelBuildInfo::KernelBuildInfoBuilder>(AnfAlgo::GetSelectKernelBuildInfo(node));
MS_EXCEPTION_IF_NULL(builder);
builder->SetFusionType(type);
AnfAlgo::SetSelectKernelBuildInfo(builder->Build(), node.get());
}
void AnfRuntimeAlgorithm::SetOutputDataDesc(const AnfNodePtr &node, const std::vector<nlohmann::json> &desc) {
MS_EXCEPTION_IF_NULL(node);
auto builder =
std::make_shared<kernel::KernelBuildInfo::KernelBuildInfoBuilder>(AnfAlgo::GetSelectKernelBuildInfo(node));
MS_EXCEPTION_IF_NULL(builder);
builder->SetOutputDataDesc(desc);
AnfAlgo::SetSelectKernelBuildInfo(builder->Build(), node.get());
}
std::vector<nlohmann::json> AnfRuntimeAlgorithm::GetOutputDataDesc(const AnfNodePtr &node) {
MS_EXCEPTION_IF_NULL(node);
auto kernel_info = static_cast<device::KernelInfo *>(node->kernel_info());
if (kernel_info == nullptr) {
return {};
}
auto build_info = kernel_info->select_kernel_build_info();
if (build_info == nullptr) {
return {};
}
return build_info->output_data_desc();
}
kernel::Processor AnfRuntimeAlgorithm::GetProcessor(const AnfNodePtr &node) {
MS_EXCEPTION_IF_NULL(node);
auto kernel_info = static_cast<device::KernelInfo *>(node->kernel_info());

View File

@ -206,6 +206,9 @@ class AnfRuntimeAlgorithm {
static kernel::Processor GetProcessor(const AnfNodePtr &node);
// get fusion type:AICORE,AICPU...
static kernel::FusionType GetFusionType(const AnfNodePtr &node);
static void SetFusionType(const AnfNodePtr &node, const kernel::FusionType &type);
static void SetOutputDataDesc(const AnfNodePtr &node, const std::vector<nlohmann::json> &desc);
static std::vector<nlohmann::json> GetOutputDataDesc(const AnfNodePtr &node);
// set select kernel_build_info
static void SetSelectKernelBuildInfo(const kernel::KernelBuildInfoPtr &select_kernel_build_info, AnfNode *node);
// get select kernel_build_info

View File

@ -0,0 +1,33 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "utils/json_operation_utils.h"
#include <string>
#include <vector>
#include <nlohmann/json.hpp>
#include "ir/dtype.h"
namespace mindspore {
bool ParseJson(const std::string &str, nlohmann::json *des_json) {
try {
(*des_json) = nlohmann::json::parse(str);
return true;
} catch (nlohmann::json::parse_error &e) {
MS_LOG(ERROR) << "Json Parse Error, string info: " << str;
MS_LOG(ERROR) << "Json Parse Error, error info: " << e.what();
return false;
}
}
} // namespace mindspore

View File

@ -0,0 +1,46 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_JSON_OPERATION_UTILS_H
#define MINDSPORE_JSON_OPERATION_UTILS_H
#include <string>
#include <vector>
#include <nlohmann/json.hpp>
#include "ir/dtype.h"
namespace mindspore {
template <typename T>
T GetJsonValue(const nlohmann::json &json, const std::string &key) {
auto obj_json = json.find(key);
if (obj_json != json.end()) {
try {
T value = obj_json.value();
return value;
} catch (std::exception &e) {
MS_LOG(ERROR) << "Get Json Value Error, error info: " << e.what();
MS_LOG(EXCEPTION) << "Get Json Value Error, target type: " << typeid(T).name() << ", key: [" << key << "]"
<< ", json dump: " << json.dump();
}
} else {
MS_LOG(EXCEPTION) << "Get Json Value Error, can not find key [" << key << "], json dump: " << json.dump();
}
}
bool ParseJson(const std::string &str, nlohmann::json *des_json);
} // namespace mindspore
#endif // MINDSPORE_JSON_OPERATION_UTILS_H

View File

@ -297,6 +297,10 @@ constexpr auto kSparseSoftmaxCrossEntropyWithLogitsOpName = "SparseSoftmaxCrossE
constexpr auto kOneHotOpName = "OneHot";
constexpr auto kSoftmaxCrossEntropyWithLogitsOpName = "SoftmaxCrossEntropyWithLogits";
constexpr auto kUniformCandidateSamplerOpName = "UniformCandidateSampler";
constexpr auto kLogSoftmaxGradOpName = "LogSoftmaxGrad";
constexpr auto kLayerNormGradOpName = "LayerNormGrad";
constexpr auto kMinimumGradOpName = "MinimumGrad";
constexpr auto kMaximumGradOpName = "MaximumGrad";
// Communication world group
constexpr auto kNcclWorldGroup = "nccl_world_group";
@ -531,7 +535,9 @@ constexpr auto kOpFormat_NCHW = "NCHW";
constexpr auto kOpFormat_NHWC = "NHWC";
constexpr auto kOpFormat_HWCN = "HWCN";
constexpr auto kOpFormat_NC1HWC0 = "NC1HWC0";
constexpr auto kNCHWShapeSize = 4;
constexpr auto kOpFormat_FRAC_Z = "FracZ";
constexpr auto kOpFormat_FRACTAL_Z = "FRACTAL_Z";
constexpr auto kOpFormat_FRAC_NZ = "FRACTAL_NZ";
constexpr auto kOpFormat_C1HWNCoC0 = "C1HWNCoC0";
constexpr auto kOpFormat_NC1HWC0_C04 = "NC1HWC0_C04";

View File

@ -7526,7 +7526,8 @@ class DynamicRNN(PrimitiveWithInfer):
validator.check("h_shape[1]", h_shape[1], "batch_size", batch_size, Rel.EQ, self.name)
validator.check("h_shape[2]", h_shape[2], "hidden_size", hidden_size, Rel.EQ, self.name)
validator.check("c_shape", c_shape, "h_shape", h_shape, Rel.EQ, self.name)
self.placeholder_index = [3]
self.add_prim_attr("placeholder_index", self.placeholder_index)
y_shape = (num_step, batch_size, hidden_size)
return y_shape, y_shape, y_shape, y_shape, y_shape, y_shape, y_shape, y_shape