forked from mindspore-Ecosystem/mindspore
!7565 Decouple GraphKernel ops from akg op register
Merge pull request !7565 from DeshiChen/1012_gen_json_from_anf
This commit is contained in:
commit
93c03ca4df
2
akg
2
akg
|
@ -1 +1 @@
|
|||
Subproject commit 68fa41b7b029e28fe523e0e25a6a1fad7a8ccbcd
|
||||
Subproject commit 03ef896b90a34ebdb7eeb3fa77d7d4252d021011
|
|
@ -15,14 +15,16 @@
|
|||
*/
|
||||
|
||||
#include "backend/kernel_compiler/akg/akg_kernel_json_generator.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <sstream>
|
||||
#include <tuple>
|
||||
#include "backend/kernel_compiler/akg/akg_kernel_attrs_process.h"
|
||||
#include "backend/kernel_compiler/common_utils.h"
|
||||
#include "backend/kernel_compiler/oplib/oplib.h"
|
||||
#include "backend/kernel_compiler/akg/akg_kernel_attrs_process.h"
|
||||
#include "backend/session/anf_runtime_algorithm.h"
|
||||
|
||||
namespace mindspore {
|
||||
|
@ -37,6 +39,93 @@ std::vector<int> GetDynInputSize(const AnfNodePtr &anf_node) {
|
|||
}
|
||||
return dyn_input_sizes;
|
||||
}
|
||||
|
||||
class OpInfoExtractor {
|
||||
public:
|
||||
OpInfoExtractor() = default;
|
||||
~OpInfoExtractor() = default;
|
||||
OpInfoPtr Run(const AnfNodePtr &anf_node) {
|
||||
MS_EXCEPTION_IF_NULL(anf_node);
|
||||
cnode_ = anf_node->cast<CNodePtr>();
|
||||
MS_EXCEPTION_IF_NULL(cnode_);
|
||||
auto op_info = std::make_shared<OpInfo>();
|
||||
op_info->set_op_name(AnfAlgo::GetCNodeName(cnode_));
|
||||
op_info->set_imply_type(OpImplyType::kAKG);
|
||||
ExtractInputs(op_info);
|
||||
ExtractOutputs(op_info);
|
||||
ExtractAttrs(op_info);
|
||||
return op_info;
|
||||
}
|
||||
|
||||
private:
|
||||
void ExtractInputs(const OpInfoPtr &op_info) {
|
||||
auto dyn_input_sizes = GetDynInputSize(cnode_);
|
||||
if (dyn_input_sizes.empty()) {
|
||||
for (size_t i = 1; i < cnode_->size(); i++) {
|
||||
auto io_info = std::make_shared<OpIOInfo>();
|
||||
io_info->set_name("input_" + std::to_string(i - 1));
|
||||
op_info->add_inputs_ptr(io_info);
|
||||
}
|
||||
} else {
|
||||
for (size_t i = 0; i < dyn_input_sizes.size(); i++) {
|
||||
auto io_info = std::make_shared<OpIOInfo>();
|
||||
io_info->set_name("input_" + std::to_string(i));
|
||||
io_info->set_param_type("dynamic");
|
||||
op_info->add_inputs_ptr(io_info);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ExtractOutputs(const OpInfoPtr &op_info) {
|
||||
// only support single output in op desc.
|
||||
auto io_info = std::make_shared<OpIOInfo>();
|
||||
io_info->set_name("output");
|
||||
op_info->add_outputs_ptr(io_info);
|
||||
}
|
||||
|
||||
bool ExcludeAttr(const std::string &name) {
|
||||
const std::set<std::string> black_list = {"IsFeatureMapInputList", "IsFeatureMapOutput", kAttrOutputNames,
|
||||
kAttrInputNames};
|
||||
return black_list.count(name) != 0;
|
||||
}
|
||||
|
||||
void ExtractAttrs(const OpInfoPtr &op_info) {
|
||||
auto prim = GetCNodePrimitive(cnode_);
|
||||
if (prim == nullptr) return;
|
||||
for (const auto &[name, v] : prim->attrs()) {
|
||||
if (ExcludeAttr(name)) continue;
|
||||
auto op_attr = std::make_shared<OpAttr>();
|
||||
op_attr->set_name(name);
|
||||
op_attr->set_param_type("required");
|
||||
// Only support the following types in op json.
|
||||
if (v->isa<Int32Imm>() || v->isa<Int64Imm>()) {
|
||||
op_attr->set_type("int");
|
||||
} else if (v->isa<FP32Imm>() || v->isa<FP64Imm>()) {
|
||||
op_attr->set_type("float");
|
||||
} else if (v->isa<BoolImm>()) {
|
||||
op_attr->set_type("bool");
|
||||
} else if (v->isa<StringImm>()) {
|
||||
op_attr->set_type("str");
|
||||
} else if (v->isa<ValueList>() || v->isa<ValueTuple>()) {
|
||||
auto vec = v->isa<ValueList>() ? v->cast<ValueListPtr>()->value() : v->cast<ValueTuplePtr>()->value();
|
||||
if (vec.empty()) {
|
||||
op_attr->set_type("listInt");
|
||||
} else if (vec[0]->isa<Int32Imm>() || vec[0]->isa<Int64Imm>()) {
|
||||
op_attr->set_type("listInt");
|
||||
} else if (vec[0]->isa<StringImm>()) {
|
||||
op_attr->set_type("listStr");
|
||||
}
|
||||
}
|
||||
if (op_attr->type().empty()) {
|
||||
MS_LOG(DEBUG) << "Unknow type, ignore attr " << name;
|
||||
continue;
|
||||
}
|
||||
op_info->add_attrs_ptr(op_attr);
|
||||
}
|
||||
}
|
||||
|
||||
CNodePtr cnode_;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
int AkgKernelJsonGenerator::op_cnt_ = 0;
|
||||
|
@ -77,10 +166,10 @@ inline std::string AkgKernelJsonGenerator::GetOutputFormat(const AnfNodePtr &anf
|
|||
return dump_option_.is_before_select_kernel ? kOpFormat_DEFAULT : AnfAlgo::GetOutputFormat(anf_node, index);
|
||||
}
|
||||
|
||||
bool AkgKernelJsonGenerator::CreateInputDescJson(const AnfNodePtr &anf_node, const std::shared_ptr<OpInfo> &op_info,
|
||||
nlohmann::json *const inputs_json) {
|
||||
bool AkgKernelJsonGenerator::CreateInputDescJson(const AnfNodePtr &anf_node, const OpInfoPtr &op_info,
|
||||
nlohmann::json *inputs_json) {
|
||||
// for dynamic input number, dyn_input_sizes has the info of dynamic input num for each input.
|
||||
std::vector<std::shared_ptr<OpIOInfo>> inputs_ptr = op_info->inputs_ptr();
|
||||
auto inputs_ptr = op_info->inputs_ptr();
|
||||
if (inputs_ptr.empty()) {
|
||||
MS_LOG(ERROR) << "Kernel [" << anf_node->fullname_with_scope() << "] regist info has no input info";
|
||||
return false;
|
||||
|
@ -90,7 +179,7 @@ bool AkgKernelJsonGenerator::CreateInputDescJson(const AnfNodePtr &anf_node, con
|
|||
auto dyn_input_sizes = GetDynInputSize(anf_node);
|
||||
size_t real_input_index = 0;
|
||||
for (size_t i = 0; i < inputs_ptr.size(); i++) {
|
||||
std::shared_ptr<OpIOInfo> input_ptr = inputs_ptr[i];
|
||||
auto input_ptr = inputs_ptr[i];
|
||||
if (input_ptr == nullptr) {
|
||||
MS_LOG(ERROR) << "Kernel [" << anf_node->fullname_with_scope() << "] regist input[" << i << "] is nullptr";
|
||||
return false;
|
||||
|
@ -112,9 +201,7 @@ bool AkgKernelJsonGenerator::CreateInputDescJson(const AnfNodePtr &anf_node, con
|
|||
input_desc_json[kJsonKeyName] = input_ptr->name();
|
||||
input_desc_json[kJsonKeyTensorName] = "input_" + std::to_string(GetInputTensorIdxInc(anf_node, real_input_index));
|
||||
auto input_shape = this->GetInputShape(anf_node, real_input_index);
|
||||
bool fold_const =
|
||||
anf_node->func_graph() != nullptr && anf_node->func_graph()->has_attr(FUNC_GRAPH_ATTR_GRAPH_KERNEL);
|
||||
if (fold_const && GetInputTensorValue(anf_node, real_input_index, &input_desc_json)) {
|
||||
if (AnfAlgo::IsNodeInGraphKernel(anf_node) && GetInputTensorValue(anf_node, real_input_index, &input_desc_json)) {
|
||||
MS_LOG(DEBUG) << "Take input[" << real_input_index << "] of [" << anf_node->DebugString(2)
|
||||
<< "] as const tensor, shape: [" << Vector2Str(input_shape)
|
||||
<< "], value: " << input_desc_json[kJsonKeyValue];
|
||||
|
@ -132,8 +219,8 @@ bool AkgKernelJsonGenerator::CreateInputDescJson(const AnfNodePtr &anf_node, con
|
|||
return true;
|
||||
}
|
||||
|
||||
bool AkgKernelJsonGenerator::CreateOutputDescJson(const AnfNodePtr &anf_node, const std::shared_ptr<OpInfo> &op_info,
|
||||
nlohmann::json *const outputs_json) {
|
||||
bool AkgKernelJsonGenerator::CreateOutputDescJson(const AnfNodePtr &anf_node, const OpInfoPtr &op_info,
|
||||
nlohmann::json *outputs_json) {
|
||||
MS_EXCEPTION_IF_NULL(anf_node);
|
||||
MS_EXCEPTION_IF_NULL(op_info);
|
||||
MS_EXCEPTION_IF_NULL(outputs_json);
|
||||
|
@ -160,9 +247,9 @@ bool AkgKernelJsonGenerator::CreateOutputDescJson(const AnfNodePtr &anf_node, co
|
|||
return true;
|
||||
}
|
||||
|
||||
void AkgKernelJsonGenerator::GetJson(const AnfNodePtr &anf_node, const std::vector<int> &dyn_input_sizes,
|
||||
const std::shared_ptr<OpAttr> &op_attr, nlohmann::json *const attr_json,
|
||||
const ValuePtr &attr_value) {
|
||||
void AkgKernelJsonGenerator::GetAttrJson(const AnfNodePtr &anf_node, const std::vector<int> &dyn_input_sizes,
|
||||
const OpAttrPtr &op_attr, nlohmann::json *attr_json,
|
||||
const ValuePtr &attr_value) {
|
||||
MS_EXCEPTION_IF_NULL(anf_node);
|
||||
MS_EXCEPTION_IF_NULL(op_attr);
|
||||
MS_EXCEPTION_IF_NULL(attr_json);
|
||||
|
@ -195,9 +282,9 @@ void AkgKernelJsonGenerator::GetJson(const AnfNodePtr &anf_node, const std::vect
|
|||
}
|
||||
}
|
||||
|
||||
bool AkgKernelJsonGenerator::CreateAttrDescJson(const AnfNodePtr &anf_node, const std::shared_ptr<OpInfo> &op_info,
|
||||
nlohmann::json *const attrs_json) {
|
||||
std::vector<std::shared_ptr<OpAttr>> attrs = op_info->attrs_ptr();
|
||||
bool AkgKernelJsonGenerator::CreateAttrDescJson(const AnfNodePtr &anf_node, const OpInfoPtr &op_info,
|
||||
nlohmann::json *attrs_json) {
|
||||
auto attrs = op_info->attrs_ptr();
|
||||
if (attrs.empty()) {
|
||||
MS_LOG(DEBUG) << "Apply kernel [" << anf_node->fullname_with_scope() << "] op info attrs is empty";
|
||||
return true;
|
||||
|
@ -206,7 +293,7 @@ bool AkgKernelJsonGenerator::CreateAttrDescJson(const AnfNodePtr &anf_node, cons
|
|||
auto primitive = AnfAlgo::GetCNodePrimitive(anf_node);
|
||||
|
||||
// create input name list for "x_shape" in attr with "x" in primitive.
|
||||
std::vector<std::shared_ptr<OpIOInfo>> inputs = op_info->inputs_ptr();
|
||||
auto inputs = op_info->inputs_ptr();
|
||||
std::map<std::string, size_t> op_info_shape_name;
|
||||
for (size_t i = 0; i < inputs.size(); i++) {
|
||||
op_info_shape_name[inputs[i]->name() + "_shape"] = i;
|
||||
|
@ -243,7 +330,7 @@ bool AkgKernelJsonGenerator::CreateAttrDescJson(const AnfNodePtr &anf_node, cons
|
|||
return false;
|
||||
}
|
||||
} else {
|
||||
GetJson(anf_node, dyn_input_sizes, op_attr, &attr_json, attr_value);
|
||||
GetAttrJson(anf_node, dyn_input_sizes, op_attr, &attr_json, attr_value);
|
||||
attr_json[kJsonKeyName] = op_attr->name();
|
||||
attrs_json->push_back(attr_json);
|
||||
}
|
||||
|
@ -306,7 +393,7 @@ std::string AkgKernelJsonGenerator::GetTensorName(const nlohmann::json &node_jso
|
|||
}
|
||||
|
||||
void AkgKernelJsonGenerator::SetTensorName(const std::string &tag, const std::string &new_name,
|
||||
const std::pair<size_t, size_t> &position, nlohmann::json *const node_json) {
|
||||
const std::pair<size_t, size_t> &position, nlohmann::json *node_json) {
|
||||
MS_EXCEPTION_IF_NULL(node_json);
|
||||
if (node_json->count(tag) == 0) {
|
||||
MS_LOG(ERROR) << "Node [" << node_json->dump() << "] has no key [" << tag << "].";
|
||||
|
@ -337,16 +424,7 @@ void AkgKernelJsonGenerator::SetTensorName(const std::string &tag, const std::st
|
|||
return;
|
||||
}
|
||||
|
||||
bool AkgKernelJsonGenerator::GenerateSingleKernelJson(const AnfNodePtr &anf_node, nlohmann::json *const node_json) {
|
||||
MS_EXCEPTION_IF_NULL(anf_node);
|
||||
MS_EXCEPTION_IF_NULL(node_json);
|
||||
auto op_name = AnfAlgo::GetCNodeName(anf_node);
|
||||
auto op_info = mindspore::kernel::OpLib::FindOp(op_name, OpImplyType::kAKG);
|
||||
MS_EXCEPTION_IF_NULL(op_info);
|
||||
|
||||
// get basic params from currentNodeOpDesc
|
||||
(*node_json)[kJsonKeyName] = op_name;
|
||||
(*node_json)[kJsonKeyImplPath] = op_info->impl_path();
|
||||
void AkgKernelJsonGenerator::SaveNodeAddress(const AnfNodePtr &anf_node, nlohmann::json *node_json) {
|
||||
if (dump_option_.save_ptr_address) {
|
||||
std::ostringstream get_the_address;
|
||||
get_the_address << anf_node.get();
|
||||
|
@ -354,6 +432,26 @@ bool AkgKernelJsonGenerator::GenerateSingleKernelJson(const AnfNodePtr &anf_node
|
|||
(*node_json)[kJsonKeyPtrAddress] = address;
|
||||
address_node_map_[address] = anf_node;
|
||||
}
|
||||
}
|
||||
|
||||
OpInfoPtr AkgKernelJsonGenerator::ExtractOpInfo(const AnfNodePtr &anf_node) {
|
||||
if (dump_option_.extract_opinfo_from_anfnode) {
|
||||
return OpInfoExtractor().Run(anf_node);
|
||||
} else {
|
||||
return mindspore::kernel::OpLib::FindOp(AnfAlgo::GetCNodeName(anf_node), OpImplyType::kAKG);
|
||||
}
|
||||
}
|
||||
|
||||
bool AkgKernelJsonGenerator::GenerateSingleKernelJson(const AnfNodePtr &anf_node, nlohmann::json *node_json) {
|
||||
MS_EXCEPTION_IF_NULL(anf_node);
|
||||
MS_EXCEPTION_IF_NULL(node_json);
|
||||
OpInfoPtr op_info = ExtractOpInfo(anf_node);
|
||||
MS_EXCEPTION_IF_NULL(op_info);
|
||||
|
||||
// get basic params from currentNodeOpDesc
|
||||
(*node_json)[kJsonKeyName] = op_info->op_name();
|
||||
(*node_json)[kJsonKeyImplPath] = op_info->impl_path();
|
||||
SaveNodeAddress(anf_node, node_json);
|
||||
|
||||
// input desc
|
||||
nlohmann::json inputs_json;
|
||||
|
@ -383,8 +481,8 @@ bool AkgKernelJsonGenerator::GenerateSingleKernelJson(const AnfNodePtr &anf_node
|
|||
return true;
|
||||
}
|
||||
|
||||
bool AkgKernelJsonGenerator::GetIOSize(const nlohmann::json &node_json, std::vector<size_t> *const input_size,
|
||||
std::vector<size_t> *const output_size) {
|
||||
bool AkgKernelJsonGenerator::GetIOSize(const nlohmann::json &node_json, std::vector<size_t> *input_size,
|
||||
std::vector<size_t> *output_size) {
|
||||
if (input_size == nullptr || output_size == nullptr) {
|
||||
MS_LOG(ERROR) << "input size or output size is nullptr";
|
||||
return false;
|
||||
|
@ -415,12 +513,13 @@ bool AkgKernelJsonGenerator::GetIOSize(const nlohmann::json &node_json, std::vec
|
|||
return true;
|
||||
}
|
||||
|
||||
bool AkgKernelJsonGenerator::CollectJson(const AnfNodePtr &anf_node, nlohmann::json *const kernel_json) {
|
||||
bool AkgKernelJsonGenerator::CollectJson(const AnfNodePtr &anf_node, nlohmann::json *kernel_json) {
|
||||
MS_EXCEPTION_IF_NULL(anf_node);
|
||||
MS_EXCEPTION_IF_NULL(kernel_json);
|
||||
std::string op_name = AnfAlgo::GetCNodeName(anf_node);
|
||||
MS_LOG(INFO) << "Akg start generate kernel json desc, full scope name is : " << anf_node->fullname_with_scope();
|
||||
SetAkgKernelAttrs(anf_node);
|
||||
dump_option_.extract_opinfo_from_anfnode = false;
|
||||
if (!GenerateSingleKernelJson(anf_node, kernel_json)) {
|
||||
MS_LOG(ERROR) << "Op[" << anf_node->fullname_with_scope() << "] create single kernel json failed.";
|
||||
return false;
|
||||
|
@ -447,8 +546,7 @@ bool AkgKernelJsonGenerator::CollectJson(const AnfNodePtr &anf_node, nlohmann::j
|
|||
|
||||
bool AkgKernelJsonGenerator::CollectFusedJson(const std::vector<AnfNodePtr> &anf_nodes,
|
||||
const std::vector<AnfNodePtr> &input_list,
|
||||
const std::vector<AnfNodePtr> &output_list,
|
||||
nlohmann::json *const kernel_json) {
|
||||
const std::vector<AnfNodePtr> &output_list, nlohmann::json *kernel_json) {
|
||||
if (anf_nodes.empty() || input_list.empty()) {
|
||||
MS_LOG(ERROR) << "Invalid input size, anf_nodes [" << anf_nodes.size() << "], input_list [" << input_list.size()
|
||||
<< "].";
|
||||
|
@ -457,6 +555,7 @@ bool AkgKernelJsonGenerator::CollectFusedJson(const std::vector<AnfNodePtr> &anf
|
|||
MS_LOG(INFO) << "Fusion nodes: [" << output_list.size() << "], input_list: [" << anf_nodes.size()
|
||||
<< "], output_list: [" << input_list.size() << "].";
|
||||
std::map<AnfNodePtr, nlohmann::json> node_json_map;
|
||||
dump_option_.extract_opinfo_from_anfnode = true;
|
||||
if (!GenSingleJsons(anf_nodes, &node_json_map)) return false;
|
||||
|
||||
UpdateTensorName(anf_nodes, &node_json_map);
|
||||
|
|
|
@ -16,14 +16,14 @@
|
|||
|
||||
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_AKG_AKG_KERNEL_JSON_GENERATOR_H_
|
||||
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_AKG_AKG_KERNEL_JSON_GENERATOR_H_
|
||||
#include <unordered_map>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include <nlohmann/json.hpp>
|
||||
#include "backend/kernel_compiler/oplib/oplib.h"
|
||||
#include "nlohmann/json.hpp"
|
||||
|
||||
namespace mindspore {
|
||||
namespace kernel {
|
||||
|
@ -55,6 +55,7 @@ constexpr auto kAttrInputNames = "input_names";
|
|||
struct DumpOption {
|
||||
bool is_before_select_kernel = false;
|
||||
bool save_ptr_address = false;
|
||||
bool extract_opinfo_from_anfnode = false;
|
||||
};
|
||||
|
||||
class AkgKernelJsonGenerator {
|
||||
|
@ -63,13 +64,13 @@ class AkgKernelJsonGenerator {
|
|||
explicit AkgKernelJsonGenerator(DumpOption dump_option) : dump_option_(dump_option) { Clear(); }
|
||||
~AkgKernelJsonGenerator() = default;
|
||||
|
||||
bool CollectJson(const AnfNodePtr &anf_node, nlohmann::json *const kernel_json);
|
||||
bool CollectJson(const AnfNodePtr &anf_node, nlohmann::json *kernel_json);
|
||||
bool CollectFusedJson(const std::vector<AnfNodePtr> &anf_nodes, const std::vector<AnfNodePtr> &input_list,
|
||||
const std::vector<AnfNodePtr> &output_list, nlohmann::json *const kernel_json);
|
||||
const std::vector<AnfNodePtr> &output_list, nlohmann::json *kernel_json);
|
||||
bool CollectJson(const AnfNodePtr &anf_node);
|
||||
bool CollectFusedJson(const std::vector<AnfNodePtr> &anf_nodes, const std::vector<AnfNodePtr> &input_list,
|
||||
const std::vector<AnfNodePtr> &output_list);
|
||||
bool GenerateSingleKernelJson(const AnfNodePtr &anf_node, nlohmann::json *const node_json);
|
||||
bool GenerateSingleKernelJson(const AnfNodePtr &anf_node, nlohmann::json *node_json);
|
||||
std::string kernel_name() const { return kernel_name_; }
|
||||
nlohmann::json kernel_json() const { return kernel_json_; }
|
||||
std::string kernel_json_str() const { return kernel_json_.dump(); }
|
||||
|
@ -84,16 +85,12 @@ class AkgKernelJsonGenerator {
|
|||
std::map<std::string, AnfNodePtr> address_node_map() { return address_node_map_; }
|
||||
|
||||
private:
|
||||
bool CreateInputDescJson(const AnfNodePtr &anf_node, const std::shared_ptr<OpInfo> &op_info,
|
||||
nlohmann::json *const inputs_json);
|
||||
bool CreateOutputDescJson(const AnfNodePtr &anf_node, const std::shared_ptr<OpInfo> &op_info,
|
||||
nlohmann::json *const outputs_json);
|
||||
void GetJson(const AnfNodePtr &anf_node, const std::vector<int> &dyn_input_sizes,
|
||||
const std::shared_ptr<OpAttr> &op_attr, nlohmann::json *const attr_json, const ValuePtr &attr_value);
|
||||
bool CreateAttrDescJson(const AnfNodePtr &anf_node, const std::shared_ptr<OpInfo> &op_info,
|
||||
nlohmann::json *const attrs_json);
|
||||
bool GetIOSize(const nlohmann::json &node_json, std::vector<size_t> *const input_size,
|
||||
std::vector<size_t> *const output_size);
|
||||
bool CreateInputDescJson(const AnfNodePtr &anf_node, const OpInfoPtr &op_info, nlohmann::json *inputs_json);
|
||||
bool CreateOutputDescJson(const AnfNodePtr &anf_node, const OpInfoPtr &op_info, nlohmann::json *outputs_json);
|
||||
void GetAttrJson(const AnfNodePtr &anf_node, const std::vector<int> &dyn_input_sizes, const OpAttrPtr &op_attr,
|
||||
nlohmann::json *attr_json, const ValuePtr &attr_value);
|
||||
bool CreateAttrDescJson(const AnfNodePtr &anf_node, const OpInfoPtr &op_info, nlohmann::json *attrs_json);
|
||||
bool GetIOSize(const nlohmann::json &node_json, std::vector<size_t> *input_size, std::vector<size_t> *output_size);
|
||||
bool GenSingleJsons(const std::vector<AnfNodePtr> &anf_nodes, std::map<AnfNodePtr, nlohmann::json> *node_json_map);
|
||||
void UpdateTensorName(const std::vector<AnfNodePtr> &anf_nodes, std::map<AnfNodePtr, nlohmann::json> *node_json_map);
|
||||
nlohmann::json CreateInputsJson(const std::vector<AnfNodePtr> &anf_nodes, const std::vector<AnfNodePtr> &input_list,
|
||||
|
@ -106,7 +103,7 @@ class AkgKernelJsonGenerator {
|
|||
size_t GetInputTensorIdxInc(const AnfNodePtr &anf_node, size_t input_idx);
|
||||
size_t GetOutputTensorIdxInc();
|
||||
void SetTensorName(const std::string &tag, const std::string &new_name, const std::pair<size_t, size_t> &position,
|
||||
nlohmann::json *const node_json);
|
||||
nlohmann::json *node_json);
|
||||
std::string GetTensorName(const nlohmann::json &node_json, const std::string &tag,
|
||||
const std::pair<size_t, size_t> &position);
|
||||
TypeId GetInputDataType(const AnfNodePtr &anf_node, size_t real_index);
|
||||
|
@ -115,6 +112,8 @@ class AkgKernelJsonGenerator {
|
|||
TypeId GetOutputDataType(const AnfNodePtr &anf_node, size_t index);
|
||||
std::vector<size_t> GetOutputShape(const AnfNodePtr &anf_node, size_t index);
|
||||
std::string GetOutputFormat(const AnfNodePtr &anf_node, size_t index);
|
||||
void SaveNodeAddress(const AnfNodePtr &anf_node, nlohmann::json *node_json);
|
||||
OpInfoPtr ExtractOpInfo(const AnfNodePtr &anf_node);
|
||||
|
||||
DumpOption dump_option_;
|
||||
static int op_cnt_;
|
||||
|
|
|
@ -175,6 +175,10 @@ class OpInfo {
|
|||
std::vector<std::shared_ptr<OpIOInfo>> outputs_ptr_;
|
||||
std::unordered_map<size_t, size_t> ref_infos_;
|
||||
};
|
||||
|
||||
using OpAttrPtr = std::shared_ptr<OpAttr>;
|
||||
using OpIOInfoPtr = std::shared_ptr<OpIOInfo>;
|
||||
using OpInfoPtr = std::shared_ptr<OpInfo>;
|
||||
} // namespace kernel
|
||||
} // namespace mindspore
|
||||
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_OPLIB_OPINFO_H_
|
||||
|
|
|
@ -931,6 +931,11 @@ bool AnfRuntimeAlgorithm::IsGraphKernel(const AnfNodePtr &node) {
|
|||
return func_graph->has_attr(FUNC_GRAPH_ATTR_GRAPH_KERNEL);
|
||||
}
|
||||
|
||||
bool AnfRuntimeAlgorithm::IsNodeInGraphKernel(const AnfNodePtr &node) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
return node->func_graph() != nullptr && node->func_graph()->has_attr(FUNC_GRAPH_ATTR_GRAPH_KERNEL);
|
||||
}
|
||||
|
||||
bool AnfRuntimeAlgorithm::IsParameterWeight(const ParameterPtr &node) {
|
||||
MS_EXCEPTION_IF_NULL(node);
|
||||
return node->has_default();
|
||||
|
|
|
@ -184,6 +184,8 @@ class AnfRuntimeAlgorithm {
|
|||
static bool IsRealCNodeKernel(const AnfNodePtr &node);
|
||||
// checkout whether the anf node is a graph kernel.
|
||||
static bool IsGraphKernel(const AnfNodePtr &node);
|
||||
// checkout whether the anf node is an inner node of graph kernel.
|
||||
static bool IsNodeInGraphKernel(const AnfNodePtr &node);
|
||||
// check parameter is weight or data
|
||||
static bool IsParameterWeight(const ParameterPtr &node);
|
||||
// set stream id of kernel,which will be set in stream assign and be used in stream generate
|
||||
|
|
|
@ -103,6 +103,13 @@ bool SelectAkgKernel(const CNodePtr &kernel_node, const std::shared_ptr<KernelBu
|
|||
MS_EXCEPTION_IF_NULL(kernel_node);
|
||||
MS_EXCEPTION_IF_NULL(selected_kernel_info);
|
||||
std::vector<std::shared_ptr<KernelBuildInfo>> kernel_info_list;
|
||||
|
||||
if (AnfAlgo::IsNodeInGraphKernel(kernel_node)) {
|
||||
// The op_info in OpLib is only used for basic ops,
|
||||
// we don't care it in GraphKernel.
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string op_name = AnfAlgo::GetCNodeName(kernel_node);
|
||||
|
||||
auto op_info_ptr = mindspore::kernel::OpLib::FindOp(op_name, kernel::OpImplyType::kAKG);
|
||||
|
|
|
@ -13,47 +13,25 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""__init__"""
|
||||
from .abs import _abs_akg
|
||||
from .add import _add_akg
|
||||
from .add_n import _addn_akg
|
||||
from .cast import _cast_akg
|
||||
from .equal import _equal_akg
|
||||
from .exp import _exp_akg
|
||||
from .expand_dims import _expand_dims_akg
|
||||
from .greater_equal import _greater_equal_akg
|
||||
from .hsigmoid import _hsigmoid_akg
|
||||
from .hsigmoid_grad import _hsigmoid_grad_akg
|
||||
from .hswish import _hswish_akg
|
||||
from .hswish_grad import _hswish_grad_akg
|
||||
from .lessequal import _lessequal_akg
|
||||
from .log import _log_akg
|
||||
from .logical_and import _logical_and_akg
|
||||
from .logical_not import _logical_not_akg
|
||||
from .logical_or import _logical_or_akg
|
||||
from .maximum import _maximum_akg
|
||||
from .mean import _simple_mean_akg
|
||||
from .mean_grad import _simple_mean_grad_akg
|
||||
from .minimum import _minimum_akg
|
||||
from .mul import _mul_akg
|
||||
from .neg import _neg_akg
|
||||
from .notequal import _notequal_akg
|
||||
from .pow import _pow_akg
|
||||
from .real_div import _real_div_akg
|
||||
from .reciprocal import _reciprocal_akg
|
||||
from .reduce_max import _reduce_max_akg
|
||||
from .reduce_sum import _reduce_sum_akg
|
||||
from .relu6 import _relu6_akg
|
||||
from .relu6_grad import _relu6_grad_akg
|
||||
from .reshape import _reshape_akg
|
||||
from .round import _round_akg
|
||||
from .rsqrt import _rsqrt_akg
|
||||
from .sqrt import _sqrt_akg
|
||||
from .squeeze import _squeeze_akg
|
||||
from .squeeze_grad import _squeeze_grad_akg
|
||||
from .sub import _sub_akg
|
||||
from .tile import _tile_akg
|
||||
from .select import _select_akg
|
||||
from .greater import _greater_akg
|
||||
from .assign import _assign_akg
|
||||
|
||||
# Please insert op register in lexicographical order of the filename.
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Abs op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("Abs") \
|
||||
.fusion_type("ELEMWISE") \
|
||||
.input(0, "x") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default) \
|
||||
.dtype_format(DT.I32_Default, DT.I32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _abs_akg():
|
||||
"""Abs Akg register"""
|
||||
return
|
|
@ -1,33 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""TensorAdd op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("TensorAdd") \
|
||||
.fusion_type("ELEMWISE") \
|
||||
.input(0, "x") \
|
||||
.input(1, "y") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default, DT.F32_Default) \
|
||||
.dtype_format(DT.I32_Default, DT.I32_Default, DT.I32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _add_akg():
|
||||
"""TensorAdd Akg register"""
|
||||
return
|
|
@ -1,31 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""AddN op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("AddN") \
|
||||
.fusion_type("ELEMWISE") \
|
||||
.input(0, "inputs", "dynamic") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _addn_akg():
|
||||
"""AddN Akg register"""
|
||||
return
|
|
@ -1,33 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Assign op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("Assign") \
|
||||
.fusion_type("ELEMWISE") \
|
||||
.input(0, "x") \
|
||||
.input(1, "y") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default, DT.F32_Default) \
|
||||
.dtype_format(DT.I32_Default, DT.I32_Default, DT.I32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _assign_akg():
|
||||
"""Assign Akg register"""
|
||||
return
|
|
@ -1,31 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Exp op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("Exp") \
|
||||
.fusion_type("ELEMWISE") \
|
||||
.input(0, "x") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _exp_akg():
|
||||
"""Exp Akg register"""
|
||||
return
|
|
@ -1,33 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""ExpandDims op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("ExpandDims") \
|
||||
.fusion_type("OPAQUE") \
|
||||
.input(0, "x") \
|
||||
.output(0, "output") \
|
||||
.attr("axis", "required", "int") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default) \
|
||||
.dtype_format(DT.I32_Default, DT.I32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _expand_dims_akg():
|
||||
"""ExpandDims Akg register"""
|
||||
return
|
|
@ -1,34 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Greater op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("Greater") \
|
||||
.fusion_type("ELEMWISE") \
|
||||
.input(0, "x") \
|
||||
.input(1, "y") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default, DT.BOOL_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default, DT.BOOL_Default) \
|
||||
.dtype_format(DT.F16_5HD, DT.F16_5HD, DT.BOOL_5HD) \
|
||||
.dtype_format(DT.F32_5HD, DT.F32_5HD, DT.BOOL_5HD) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _greater_akg():
|
||||
"""Greater Akg register"""
|
||||
return
|
|
@ -1,32 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Log op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("Log") \
|
||||
.fusion_type("ELEMWISE") \
|
||||
.input(0, "x") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default) \
|
||||
.dtype_format(DT.I32_Default, DT.I32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _log_akg():
|
||||
"""Log Akg register"""
|
||||
return
|
|
@ -1,33 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Maximum op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("Maximum") \
|
||||
.fusion_type("COMMREDUCE") \
|
||||
.input(0, "x") \
|
||||
.input(1, "y") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default, DT.F32_Default) \
|
||||
.dtype_format(DT.I32_Default, DT.I32_Default, DT.I32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _maximum_akg():
|
||||
"""Maximum Akg register"""
|
||||
return
|
|
@ -1,33 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Minimum op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("Minimum") \
|
||||
.fusion_type("COMMREDUCE") \
|
||||
.input(0, "x") \
|
||||
.input(1, "y") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default, DT.F32_Default) \
|
||||
.dtype_format(DT.I32_Default, DT.I32_Default, DT.I32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _minimum_akg():
|
||||
"""Minimum Akg register"""
|
||||
return
|
|
@ -1,31 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Neg op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("Neg") \
|
||||
.fusion_type("ELEMWISE") \
|
||||
.input(0, "x") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _neg_akg():
|
||||
"""Neg Akg register"""
|
||||
return
|
|
@ -1,33 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Pow op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("Pow") \
|
||||
.fusion_type("ELEMWISE") \
|
||||
.input(0, "x") \
|
||||
.input(1, "y") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default, DT.F32_Default) \
|
||||
.dtype_format(DT.I32_Default, DT.I32_Default, DT.I32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _pow_akg():
|
||||
"""Pow Akg register"""
|
||||
return
|
|
@ -1,32 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""RealDiv op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("RealDiv") \
|
||||
.fusion_type("ELEMWISE") \
|
||||
.input(0, "x") \
|
||||
.input(1, "y") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default, DT.F32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _real_div_akg():
|
||||
"""RealDiv Akg register"""
|
||||
return
|
|
@ -1,31 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Reciprocal op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("Reciprocal") \
|
||||
.fusion_type("ELEMWISE") \
|
||||
.input(0, "x") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _reciprocal_akg():
|
||||
"""Reciprocal Akg register"""
|
||||
return
|
|
@ -1,33 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""ReduceMax op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("ReduceMax") \
|
||||
.fusion_type("COMMREDUCE") \
|
||||
.input(0, "x") \
|
||||
.output(0, "output") \
|
||||
.attr("axis", "required", "listInt") \
|
||||
.attr("keep_dims", "required", "bool") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _reduce_max_akg():
|
||||
"""ReduceMax Akg register"""
|
||||
return
|
|
@ -1,33 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""ReduceMin op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("ReduceMin") \
|
||||
.fusion_type("COMMREDUCE") \
|
||||
.input(0, "x") \
|
||||
.output(0, "output") \
|
||||
.attr("axis", "required", "listInt") \
|
||||
.attr("keep_dims", "required", "bool") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _reduce_min_akg():
|
||||
"""ReduceMin Akg register"""
|
||||
return
|
|
@ -1,33 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""ReduceSum op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("ReduceSum") \
|
||||
.fusion_type("COMMREDUCE") \
|
||||
.input(0, "x") \
|
||||
.output(0, "output") \
|
||||
.attr("axis", "required", "listInt") \
|
||||
.attr("keep_dims", "required", "bool") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _reduce_sum_akg():
|
||||
"""ReduceSum Akg register"""
|
||||
return
|
|
@ -1,41 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Reshape op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("Reshape") \
|
||||
.fusion_type("OPAQUE") \
|
||||
.input(0, "x") \
|
||||
.output(0, "y") \
|
||||
.attr("shape", "required", "listInt") \
|
||||
.dtype_format(DT.BOOL_Default, DT.BOOL_Default) \
|
||||
.dtype_format(DT.I8_Default, DT.I8_Default) \
|
||||
.dtype_format(DT.I16_Default, DT.I16_Default) \
|
||||
.dtype_format(DT.I32_Default, DT.I32_Default) \
|
||||
.dtype_format(DT.I64_Default, DT.I64_Default) \
|
||||
.dtype_format(DT.U8_Default, DT.U8_Default) \
|
||||
.dtype_format(DT.U16_Default, DT.U16_Default) \
|
||||
.dtype_format(DT.U32_Default, DT.U32_Default) \
|
||||
.dtype_format(DT.U64_Default, DT.U64_Default) \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default) \
|
||||
.dtype_format(DT.F64_Default, DT.F64_Default) \
|
||||
.get_op_info()
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _reshape_akg():
|
||||
"""Reshape Akg register"""
|
||||
return
|
|
@ -1,32 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Round op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("Round") \
|
||||
.fusion_type("OPAQUE") \
|
||||
.input(0, "x") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default) \
|
||||
.dtype_format(DT.I32_Default, DT.I32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _round_akg():
|
||||
"""Round Akg register"""
|
||||
return
|
|
@ -1,31 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Rsqrt op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("Rsqrt") \
|
||||
.fusion_type("OPAQUE") \
|
||||
.input(0, "x") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _rsqrt_akg():
|
||||
"""Rsqrt Akg register"""
|
||||
return
|
|
@ -1,37 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Select op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("Select") \
|
||||
.fusion_type("ELEMWISE") \
|
||||
.input(0, "condition") \
|
||||
.input(1, "x") \
|
||||
.input(2, "y") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.BOOL_Default, DT.F16_Default, DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.BOOL_Default, DT.F32_Default, DT.F32_Default, DT.F32_Default) \
|
||||
.dtype_format(DT.BOOL_Default, DT.I32_Default, DT.I32_Default, DT.I32_Default) \
|
||||
.dtype_format(DT.BOOL_5HD, DT.F16_5HD, DT.F16_5HD, DT.F16_5HD) \
|
||||
.dtype_format(DT.BOOL_5HD, DT.F32_5HD, DT.F32_5HD, DT.F32_5HD) \
|
||||
.dtype_format(DT.BOOL_5HD, DT.I32_5HD, DT.I32_5HD, DT.I32_5HD) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _select_akg():
|
||||
"""Select Akg register"""
|
||||
return
|
|
@ -1,31 +0,0 @@
|
|||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
"""Sqrt op"""
|
||||
from mindspore.ops.op_info_register import op_info_register, AkgGpuRegOp, DataType as DT
|
||||
|
||||
op_info = AkgGpuRegOp("Sqrt") \
|
||||
.fusion_type("ELEMWISE") \
|
||||
.input(0, "x") \
|
||||
.output(0, "output") \
|
||||
.dtype_format(DT.F16_Default, DT.F16_Default) \
|
||||
.dtype_format(DT.F32_Default, DT.F32_Default) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
@op_info_register(op_info)
|
||||
def _sqrt_akg():
|
||||
"""Sqrt Akg register"""
|
||||
return
|
Loading…
Reference in New Issue