Add v2 overflow check on ascend

This commit is contained in:
suxin 2023-03-01 12:46:35 +08:00
parent 34ba528c42
commit 9fe50205bd
24 changed files with 669 additions and 58 deletions

View File

@ -282,6 +282,8 @@ constexpr auto kNonZeroOpName = "NonZero";
constexpr auto kNPUAllocFloatStatusOpName = "NPUAllocFloatStatus";
constexpr auto kNPUClearFloatStatusOpName = "NPUClearFloatStatus";
constexpr auto kNPUGetFloatStatusOpName = "NPUGetFloatStatus";
constexpr auto kNPUClearFloatStatusV2OpName = "NPUClearFloatStatusV2";
constexpr auto kNPUGetFloatStatusV2OpName = "NPUGetFloatStatusV2";
constexpr auto kOneHotOpName = "OneHot";
constexpr auto kPadAndShiftOpName = "PadAndShift";
constexpr auto kPaddingOpName = "Padding";

View File

@ -24,6 +24,17 @@
namespace mindspore {
namespace kernel {
constexpr size_t kJsonSuffixLength = 5;
constexpr char kBinFileName[] = "binFileName";
constexpr char kBinFileSuffix[] = "binFileSuffix";
constexpr char kBlockDim[] = "blockDim";
constexpr char kKernelName[] = "kernelName";
constexpr char kMagic[] = "magic";
constexpr char kOpParaSize[] = "opParaSize";
constexpr char kSize[] = "size";
constexpr char kType[] = "type";
constexpr char kParameters[] = "parameters";
constexpr char kWorkspace[] = "workspace";
constexpr char kGlobalWorkspaceSpecWorkspace[] = "globalworkspace_spec_workspace";
namespace {
bool CheckHash(const std::string &json_file, const std::string &bin_file, const nlohmann::json &js) {
if (js.find("sha256") == js.end()) {
@ -138,7 +149,7 @@ bool KernelPack::ReadFromJsonFile(const std::string &json_f, const std::string &
return true;
}
std::string binfile_suffix = js["binFileSuffix"];
std::string binfile_suffix = js[kBinFileSuffix];
std::string bin_f = json_f.substr(0, json_f.length() - kJsonSuffixLength) + binfile_suffix;
if (binfile_suffix == ".so") {
// change "xx/xx.so" -> "xx/libxx.so"
@ -176,31 +187,57 @@ bool KernelPack::ReadFromJsonFile(const std::string &json_f, const std::string &
return true;
}
void KernelPack::ParseKernelJson(const nlohmann::json &js) {
kernel_json_info_.bin_file_name = js["binFileName"];
kernel_json_info_.bin_file_suffix = js["binFileSuffix"];
kernel_json_info_.block_dim = js["blockDim"];
kernel_json_info_.kernel_name = js["kernelName"];
kernel_json_info_.magic = js["magic"];
if (js.contains("opParaSize")) {
kernel_json_info_.op_para_size = js["opParaSize"];
void KernelPack::ParseGlogbleWorkSpace(const std::string &key, const nlohmann::json &js,
KernelJsonInfo *kernel_json_info) {
MS_EXCEPTION_IF_NULL(kernel_json_info);
if (js.find(key) == js.end()) {
return;
}
if (js.find("parameters") != js.end()) {
if (!js.at("parameters").is_array()) {
try {
auto global_workspace = js.at(key);
auto iter_size = global_workspace.find(kSize);
if (iter_size != global_workspace.end()) {
kernel_json_info->global_workspace.size = global_workspace.at(kSize);
kernel_json_info->global_workspace.is_overflow = true;
}
auto iter_type = global_workspace.find(kType);
if (iter_type != global_workspace.end()) {
kernel_json_info->global_workspace.type = global_workspace.at(kType);
kernel_json_info->global_workspace.is_overflow = true;
}
} catch (std::exception &e) {
MS_LOG(EXCEPTION) << "Parse json value failed, jsong is:" + js.dump() + ", error info: " << e.what();
}
}
void KernelPack::ParseKernelJson(const nlohmann::json &js) {
kernel_json_info_.bin_file_name = js[kBinFileName];
kernel_json_info_.bin_file_suffix = js[kBinFileSuffix];
kernel_json_info_.block_dim = js[kBlockDim];
kernel_json_info_.kernel_name = js[kKernelName];
kernel_json_info_.magic = js[kMagic];
if (js.contains(kOpParaSize)) {
kernel_json_info_.op_para_size = js[kOpParaSize];
}
if (js.find(kParameters) != js.end()) {
if (!js.at(kParameters).is_array()) {
MS_LOG(DEBUG) << "Format error!,parameters should be array.";
}
std::vector<size_t> sizes = js.at("parameters");
std::vector<size_t> sizes = js.at(kParameters);
for (auto size : sizes) {
kernel_json_info_.parameters.push_back(size);
}
}
if (js.find("workspace") != js.end()) {
auto workspace = js.at("workspace");
std::vector<size_t> sizes = workspace.at("size");
if (js.find(kWorkspace) != js.end()) {
auto workspace = js.at(kWorkspace);
std::vector<size_t> sizes = workspace.at(kSize);
for (auto size : sizes) {
kernel_json_info_.workspaces.push_back(size);
}
}
if (js.find(kGlobalWorkspaceSpecWorkspace) != js.end()) {
ParseGlogbleWorkSpace(kGlobalWorkspaceSpecWorkspace, js, &kernel_json_info_);
}
kernel_json_info_.sha256 = js["sha256"];
kernel_json_info_.has_kernel_list = js.find("kernelList") != js.end();
}

View File

@ -121,6 +121,12 @@ struct FlexArray {
char contents[];
};
struct GlobalWorkspace {
size_t size;
size_t type;
bool is_overflow = false;
};
struct KernelJsonInfo {
std::string bin_file_name;
std::string bin_file_suffix;
@ -130,6 +136,7 @@ struct KernelJsonInfo {
std::vector<size_t> parameters;
std::string sha256;
std::vector<size_t> workspaces;
GlobalWorkspace global_workspace;
bool has_kernel_list = false;
uint32_t op_para_size;
KernelJsonInfo() : block_dim(0), op_para_size(0) {}
@ -159,6 +166,7 @@ class BACKEND_EXPORT KernelPack {
private:
bool ReadFromJsonFileHelper(std::ifstream &kernel_bin);
void ParseKernelJson(const nlohmann::json &js);
static void ParseGlogbleWorkSpace(const std::string &key, const nlohmann::json &js, KernelJsonInfo *kernel_json_info);
KernelJsonInfo kernel_json_info_;
FlexArray *json_;
FlexArray *kernel_;

View File

@ -19,6 +19,7 @@
#include <algorithm>
#include "ir/func_graph.h"
#include "runtime/mem.h"
#include "acl/acl_rt.h"
#include "utils/ms_context.h"
#include "utils/convert_utils_base.h"
#include "graphengine/inc/external/runtime/rt_error_codes.h"
@ -35,6 +36,7 @@ constexpr double kHalfRatio = 0.5;
// The Ascend max available device memory is 32GB.
constexpr float kAscendMaxDeviceMemory = 32;
constexpr uint64_t kOverflowAddrSize = 512;
constexpr char kGlobalOverflowWorkspace[] = "GLOBAL_OVERFLOW_WORKSPACE";
size_t AscendMemAdapter::GetRoundDownAlignSize(size_t input_size) {
return (input_size / kAscendMemAlignSize) * kAscendMemAlignSize;
@ -176,16 +178,16 @@ uint8_t *AscendMemAdapter::MallocDynamicDevMem(size_t size, const std::string &t
return memory_block_ptr;
}
uint8_t *AscendMemAdapter::MallocOverflowMem(const CNodePtr &kernel) {
uint8_t *AscendMemAdapter::MallocOverflowMem() {
std::lock_guard<std::mutex> locker(overflow_mutex_);
auto funcGraph = kernel->func_graph();
MS_EXCEPTION_IF_NULL(funcGraph);
if (overflow_memory_info_map_.find(funcGraph->ToString()) != overflow_memory_info_map_.cend()) {
return overflow_memory_info_map_.find(funcGraph->ToString())->second;
auto iter = overflow_memory_info_map_.find(kGlobalOverflowWorkspace);
if (iter != overflow_memory_info_map_.cend()) {
return iter->second;
} else {
auto overflow_memory_ptr = MallocStaticDevMem(kOverflowAddrSize, "overflow memory ptr");
auto overflow_memory_ptr = MallocStaticDevMem(kOverflowAddrSize, "global overflow memory ptr");
MS_EXCEPTION_IF_NULL(overflow_memory_ptr);
(void)overflow_memory_info_map_.emplace(funcGraph->ToString(), overflow_memory_ptr);
(void)aclrtMemset(overflow_memory_ptr, kOverflowAddrSize, 0, kOverflowAddrSize);
(void)overflow_memory_info_map_.emplace(kGlobalOverflowWorkspace, overflow_memory_ptr);
return overflow_memory_ptr;
}
}

View File

@ -39,7 +39,7 @@ class AscendMemAdapter {
uint8_t *MallocStaticDevMem(size_t size, const std::string &tag = "");
uint8_t *MallocDynamicDevMem(size_t size, const std::string &tag = "");
uint8_t *MallocOverflowMem(const CNodePtr &kernel);
uint8_t *MallocOverflowMem();
bool FreeStaticDevMem(void *) const { return true; }
void ResetDynamicMemory();

View File

@ -297,10 +297,11 @@ std::vector<TaskInfoPtr> HcclKernel::GenTask(const std::vector<AddressPtr> &inpu
}
std::vector<void *> global_workspace_addr;
auto overflow_memory_ptr =
device::ascend::AscendMemAdapter::GetInstance().MallocOverflowMem(anf_node_.lock()->cast<CNodePtr>());
auto overflow_memory_ptr = device::ascend::AscendMemAdapter::GetInstance().MallocOverflowMem();
MS_EXCEPTION_IF_NULL(overflow_memory_ptr);
global_workspace_addr.push_back(reinterpret_cast<void *>(overflow_memory_ptr));
MS_LOG(DEBUG) << "Assign overflow memory for node " << anf_node->fullname_with_scope() << ", addr is "
<< overflow_memory_ptr;
HcclTaskInfoPtr hcclTaskInfo =
std::make_shared<HcclTaskInfo>(unique_name_, stream_id, hccl::HcclAdapter::GetHcclType(anf_node), input_data_addr,

View File

@ -278,6 +278,13 @@ bool DynamicTbeKernelMod::Launch(const std::vector<AddressPtr> &inputs, const st
runtimeargs.push_back(tiling_data_ptr_);
}
AddressPtr overflow_address_ptr = GetOverflowAddress();
if (overflow_address_ptr != nullptr) {
runtimeargs.emplace_back(overflow_address_ptr->addr);
MS_LOG(DEBUG) << "Assign overflow memory for node " << node->fullname_with_scope() << ", addr is "
<< overflow_address_ptr->addr;
}
rtL2Ctrl_t *l2ctrl = nullptr;
auto args_size = static_cast<uint32_t>(UlongToUint(sizeof(void *)) * runtimeargs.size());
auto node_info = cnode->fullname_with_scope();

View File

@ -30,6 +30,7 @@
#include "plugin/device/ascend/kernel/tbe/tbe_json/tbe_json_utils.h"
namespace mindspore::kernel {
constexpr size_t kNpuV2OpListJsonSize = 2;
using mindspore::kernel::tbe::TbeAdapter;
bool SingleTbeJsonCreator::GenJson(const AnfNodePtr &anf_node, nlohmann::json *kernel_json) {
MS_EXCEPTION_IF_NULL(anf_node);
@ -56,6 +57,46 @@ bool SingleTbeJsonCreator::GenJson(const AnfNodePtr &anf_node, nlohmann::json *k
return true;
}
void NpuClearV2PostProcessing(const AnfNodePtr &anf_node, std::vector<nlohmann::json> *op_list_json) {
MS_EXCEPTION_IF_NULL(op_list_json);
if (op_list_json->size() != kNpuV2OpListJsonSize) {
MS_LOG(ERROR) << "Op list json's size is not equal to 2, abort post processing.";
}
auto compute_json = (*op_list_json)[1];
std::vector<nlohmann::json> empty_vector_json;
compute_json[kJInputDesc] = empty_vector_json;
compute_json[kJOutputDataDesc] = empty_vector_json;
compute_json[kJOutputDesc] = empty_vector_json;
op_list_json->clear();
(*op_list_json).emplace_back(compute_json);
MS_LOG(DEBUG) << "Op list json after post processing:" << compute_json.dump();
}
void NpuGetV2PostProcessing(const AnfNodePtr &anf_node, std::vector<nlohmann::json> *op_list_json) {
MS_EXCEPTION_IF_NULL(op_list_json);
if (op_list_json->size() != kNpuV2OpListJsonSize) {
MS_LOG(ERROR) << "Op list json's size is not equal to 2, abort post processing.";
}
auto compute_json = (*op_list_json)[1];
std::vector<nlohmann::json> empty_vector_json;
compute_json[kJInputDesc] = empty_vector_json;
op_list_json->clear();
(*op_list_json).emplace_back(compute_json);
MS_LOG(DEBUG) << "Op list json after post processing:" << compute_json.dump();
}
void SingleTbeJsonCreator::OpListPostProcessing(const AnfNodePtr &anf_node, std::vector<nlohmann::json> *op_list_json) {
MS_EXCEPTION_IF_NULL(op_list_json);
auto kernel_name = common::AnfAlgo::GetCNodeName(anf_node);
if (kernel_name == kNPUClearFloatStatusV2OpName) {
NpuClearV2PostProcessing(anf_node, op_list_json);
} else if (kernel_name == kNPUGetFloatStatusV2OpName) {
NpuGetV2PostProcessing(anf_node, op_list_json);
}
}
bool SingleTbeJsonCreator::GenOpListJson(const AnfNodePtr &anf_node, std::vector<nlohmann::json> *op_list_json) {
MS_EXCEPTION_IF_NULL(anf_node);
MS_EXCEPTION_IF_NULL(op_list_json);
@ -67,6 +108,7 @@ bool SingleTbeJsonCreator::GenOpListJson(const AnfNodePtr &anf_node, std::vector
}
GenDataJson(anf_node, compute_json, op_list_json);
(*op_list_json).push_back(compute_json);
OpListPostProcessing(anf_node, op_list_json);
MS_LOG(DEBUG) << "End.";
return true;
}

View File

@ -29,6 +29,7 @@ class SingleTbeJsonCreator : public TbeJsonCreator {
protected:
bool GenOpListJson(const AnfNodePtr &anf_node, std::vector<nlohmann::json> *op_list_json);
void OpListPostProcessing(const AnfNodePtr &anf_node, std::vector<nlohmann::json> *op_list_json);
void GenDataJson(const AnfNodePtr &anf_node, const nlohmann::json &compute_json,
std::vector<nlohmann::json> *op_list_json) const;
virtual void GenInputDescJson(const AnfNodePtr &anf_node, size_t real_input_index, nlohmann::json *input_desc);

View File

@ -330,7 +330,7 @@ size_t TbeJsonCreator::GenJsonHash(nlohmann::json tbe_json) const {
DeleteDescName(&op.at(kJInputDesc));
}
}
return std::hash<std::string>()(op_lists.dump());
return std::hash<std::string>()(op_lists.dump() + tbe_json.at(kJSocInfo).dump());
}
void TbeJsonCreator::AddOpNameForComputeNode(nlohmann::json *kernel_json) const {

View File

@ -107,6 +107,7 @@ constexpr auto kPyPath = "/usr/local/Ascend/latest/opp/built-in/op_impl/ai_core/
constexpr auto kJMaxKernelIDValue = 10;
constexpr auto kJConstValue = "const_value";
constexpr auto kJConstValueDtype = "const_value_dtype";
constexpr auto kJStatusCheck = "status_check";
class TbeJsonUtils {
public:

View File

@ -515,6 +515,8 @@ void TbeKernelCompileManager::GenKernelMod(const std::vector<CNodePtr> &node_lis
if (AnfAlgo::GetKernelMod(node) != nullptr) {
continue; // kernel mod already exist, continue;
}
auto op_name = common::AnfAlgo::GetCNodeName(node);
auto full_name = node->fullname_with_scope();
auto json_name = full_name_to_json_name_[full_name];
auto kernel_pack = tbe::TbeUtils::SearchCache(json_name, false);
@ -543,6 +545,12 @@ void TbeKernelCompileManager::GenKernelMod(const std::vector<CNodePtr> &node_lis
kernel_mod_ptr->SetInputSizeList(iter->second.input_size_list);
kernel_mod_ptr->SetOutputSizeList(iter->second.output_size_list);
kernel_mod_ptr->SetWorkspaceSizeList(kernel_info_json.workspaces);
if (op_name == kNPUClearFloatStatusV2OpName || op_name == kNPUGetFloatStatusV2OpName) {
constexpr size_t io_byte_size = 32;
const std::vector<size_t> size_list = {io_byte_size};
kernel_mod_ptr->SetInputSizeList(size_list);
kernel_mod_ptr->SetOutputSizeList(size_list);
}
AnfAlgo::SetKernelMod(kernel_mod_ptr, node.get());
}
ClearOldTask();

View File

@ -21,6 +21,7 @@
#include "utils/ms_context.h"
#include "plugin/device/ascend/hal/device/ge_runtime/task_info.h"
#include "runtime/device/kernel_runtime.h"
#include "plugin/device/ascend/hal/device/ascend_memory_adapter.h"
namespace mindspore {
namespace kernel {
@ -59,14 +60,23 @@ bool TbeKernelMod::Launch(const std::vector<mindspore::kernel::AddressPtr> &inpu
return false;
}
auto node = anf_node_.lock();
MS_EXCEPTION_IF_NULL(node);
auto cnode = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
std::vector<mindspore::kernel::AddressPtr> real_inputs;
std::vector<mindspore::kernel::AddressPtr> real_outputs;
GetRealIOAddress(cnode, inputs, outputs, &real_inputs, &real_outputs);
// pack all addresses into a vector.
std::vector<void *> runtimeargs;
(void)std::transform(std::begin(inputs), std::end(inputs), std::back_inserter(runtimeargs),
(void)std::transform(std::begin(real_inputs), std::end(real_inputs), std::back_inserter(runtimeargs),
[](const AddressPtr &input) -> void * {
MS_EXCEPTION_IF_NULL(input);
return input->addr;
});
(void)std::transform(std::begin(outputs), std::end(outputs), std::back_inserter(runtimeargs),
(void)std::transform(std::begin(real_outputs), std::end(real_outputs), std::back_inserter(runtimeargs),
[](const AddressPtr &output) -> void * {
MS_EXCEPTION_IF_NULL(output);
return output->addr;
@ -78,6 +88,13 @@ bool TbeKernelMod::Launch(const std::vector<mindspore::kernel::AddressPtr> &inpu
return addr->addr;
});
}
AddressPtr overflow_address_ptr = GetOverflowAddress();
if (overflow_address_ptr != nullptr) {
runtimeargs.emplace_back(overflow_address_ptr->addr);
MS_LOG(DEBUG) << "Assign overflow memory for node " << cnode->fullname_with_scope() << ", addr is "
<< overflow_address_ptr->addr;
}
rtL2Ctrl_t *l2ctrl = nullptr;
const void *stubFunc = reinterpret_cast<void *>(func_stub);
auto argsSize = static_cast<uint32_t>(UlongToUint(sizeof(void *)) * runtimeargs.size());
@ -105,13 +122,22 @@ std::vector<TaskInfoPtr> TbeKernelMod::GenTask(const std::vector<AddressPtr> &in
std::vector<void *> output_data_addrs;
std::vector<void *> workspace_addrs;
auto node = anf_node_.lock();
MS_EXCEPTION_IF_NULL(node);
auto cnode = node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
std::vector<mindspore::kernel::AddressPtr> real_inputs;
std::vector<mindspore::kernel::AddressPtr> real_outputs;
GetRealIOAddress(cnode, inputs, outputs, &real_inputs, &real_outputs);
// pack all addresses into a vector.
(void)std::transform(std::begin(inputs), std::end(inputs), std::back_inserter(input_data_addrs),
(void)std::transform(std::begin(real_inputs), std::end(real_inputs), std::back_inserter(input_data_addrs),
[](const AddressPtr &input) -> void * {
MS_EXCEPTION_IF_NULL(input);
return input->addr;
});
(void)std::transform(std::begin(outputs), std::end(outputs), std::back_inserter(output_data_addrs),
(void)std::transform(std::begin(real_outputs), std::end(real_outputs), std::back_inserter(output_data_addrs),
[](const AddressPtr &output) -> void * {
MS_EXCEPTION_IF_NULL(output);
return output->addr;
@ -124,6 +150,13 @@ std::vector<TaskInfoPtr> TbeKernelMod::GenTask(const std::vector<AddressPtr> &in
});
}
AddressPtr overflow_address_ptr = GetOverflowAddress();
if (overflow_address_ptr != nullptr) {
workspace_addrs.emplace_back(overflow_address_ptr->addr);
MS_LOG(DEBUG) << "Assign overflow memory for node " << cnode->fullname_with_scope() << ", addr is "
<< overflow_address_ptr->addr;
}
stream_id_ = stream_id;
auto funcstub = KernelManager::GenFuncStub(*kernel_pack_, false, &block_dim_);
if (funcstub == 0) {
@ -145,5 +178,46 @@ vector<size_t> TbeKernelMod::GenParameters() {
auto kernel_json_info = kernel_pack_->kernel_json_info();
return kernel_json_info.parameters;
}
bool TbeKernelMod::GetKernelOverflowStatus() {
MS_EXCEPTION_IF_NULL(kernel_pack_);
auto kernel_json_info = kernel_pack_->kernel_json_info();
return kernel_json_info.global_workspace.is_overflow;
}
AddressPtr TbeKernelMod::GetOverflowAddress() {
AddressPtr overflow_address_ptr = nullptr;
auto is_overflow = GetKernelOverflowStatus();
if (is_overflow) {
constexpr size_t size = 32;
auto overflow_memory_ptr = device::ascend::AscendMemAdapter::GetInstance().MallocOverflowMem();
MS_EXCEPTION_IF_NULL(overflow_memory_ptr);
overflow_address_ptr = std::make_shared<kernel::Address>();
overflow_address_ptr->addr = overflow_memory_ptr;
overflow_address_ptr->size = size;
}
return overflow_address_ptr;
}
void TbeKernelMod::GetRealIOAddress(const AnfNodePtr &cnode, const vector<AddressPtr> &inputs,
const vector<AddressPtr> &outputs,
vector<mindspore::kernel::AddressPtr> *real_inputs,
vector<mindspore::kernel::AddressPtr> *real_outputs) const {
auto op_name = common::AnfAlgo::GetCNodeName(cnode);
MS_EXCEPTION_IF_NULL(real_inputs);
MS_EXCEPTION_IF_NULL(real_outputs);
*real_inputs = inputs;
*real_outputs = outputs;
if (op_name == kNPUClearFloatStatusV2OpName) {
// NPUClearFloatStatusV2 has no input output.
real_inputs->clear();
real_outputs->clear();
MS_LOG(INFO) << "Clear Node " << cnode->fullname_with_scope() << "'s inputs and outputs";
} else if (op_name == kNPUGetFloatStatusV2OpName) {
// NPUGetFloatStatusV2 has no input
real_inputs->clear();
MS_LOG(INFO) << "Clear Node " << cnode->fullname_with_scope() << "'s inputs";
}
}
} // namespace kernel
} // namespace mindspore

View File

@ -42,6 +42,11 @@ class TbeKernelMod : public AscendKernelMod {
std::vector<TaskInfoPtr> GenTask(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspaces,
const std::vector<AddressPtr> &outputs, uint32_t stream_id) override;
std::vector<size_t> GenParameters() override;
bool GetKernelOverflowStatus();
AddressPtr GetOverflowAddress();
void GetRealIOAddress(const AnfNodePtr &cnode, const std::vector<AddressPtr> &inputs,
const std::vector<AddressPtr> &outputs, std::vector<AddressPtr> *real_inputs,
std::vector<AddressPtr> *real_outputs) const;
protected:
KernelPackPtr kernel_pack_;

View File

@ -164,6 +164,8 @@ nlohmann::json TbeUtils::GenSocInfo() {
soc_info_json["op_debug_level"] = GetOpDebugLevel();
soc_info_json["autoTilingMode"] = context_ptr->get_param<std::string>(MS_CTX_TUNE_MODE);
soc_info_json["deviceId"] = std::to_string(context_ptr->get_param<uint32_t>(MS_CTX_DEVICE_ID));
soc_info_json["status_check"] = "true";
std::string config_path;
if (!Common::CommonFuncForConfigPath("", common::GetEnv("OP_BANK_PATH"), &config_path)) {
MS_LOG(EXCEPTION) << "Invalid environment variable 'OP_BANK_PATH', the path is " << common::GetEnv("OP_BANK_PATH")

View File

@ -293,7 +293,7 @@ def get_options_info(job_content):
options["autoTilingMode"] = job_content["SocInfo"]["autoTilingMode"]
options["op_impl_mode_list"] = job_content["SocInfo"]["op_impl_mode_list"]
options["kernel_meta_temp_dir"] = job_content["SocInfo"]["kernel_meta_temp_dir"]
options["status_check"] = "false"
options["status_check"] = job_content["SocInfo"]["status_check"]
return options

View File

@ -308,8 +308,11 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
super(TrainOneStepWithLossScaleCell, self).__init__(network, optimizer, sens=None)
self.hyper_map = C.HyperMap()
self.base = Tensor(1, mstype.float32)
self.base0 = Tensor(0, mstype.int32)
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.reduce_all = P.ReduceAll(keep_dims=False)
self.less_equal = P.LessEqual()
self.equal = P.Equal()
self.allreduce = P.AllReduce()
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
self.gpu_target = (context.get_context("device_target") == "GPU")
@ -389,13 +392,11 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
The second value is the same as the input of `compute_input`, but contains some information about the
execution order.
"""
status = False
status = Tensor([0, 0, 0, 0, 0, 0, 0, 0], mstype.int32)
if not self.gpu_target:
# init overflow buffer
status = P.NPUAllocFloatStatus()()
status = F.depend(status, pre_cond)
# clear overflow buffer
clear_status = P.NPUClearFloatStatus()(status)
clear_status = P.NPUClearFloatStatusV2()(status)
compute_input = F.depend(compute_input, clear_status)
return status, compute_input
@ -418,22 +419,36 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
"""
if not self.gpu_target:
status = F.depend(status, compute_output)
get_status = P.NPUGetFloatStatus()(status)
status = F.depend(status, get_status)
# sum overflow buffer elements, 0:not overflow , >0:overflow
flag_sum = self.reduce_sum(status, (0,))
get_status = P.NPUGetFloatStatusV2()(status)
if self.is_distributed:
# sum overflow flag over devices
flag_reduce = self.allreduce(get_status)
# get_status not equal to [0,]*8 means overflow
flag = self.equal(self.base0, flag_reduce)
status = F.depend(status, flag)
clear_status = P.NPUClearFloatStatusV2()(status)
flag = F.depend(flag, clear_status)
overall_finite = self.reduce_all(flag)
else:
status = F.depend(status, get_status)
clear_status = P.NPUClearFloatStatusV2()(status)
get_status = F.depend(get_status, clear_status)
flag = self.equal(self.base0, get_status)
overall_finite = self.reduce_all(flag)
overflow = not overall_finite
else:
flag_sum = self.hyper_map(F.partial(_grad_overflow), compute_output)
flag_sum = P.AddN()(flag_sum)
# convert flag_sum to scalar
flag_sum = P.Reshape()(flag_sum, (()))
if self.is_distributed:
# sum overflow flag over devices
flag_reduce = self.allreduce(flag_sum)
overflow = self.less_equal(self.base, flag_reduce)
else:
overflow = self.less_equal(self.base, flag_sum)
if self.is_distributed:
# sum overflow flag over devices
flag_reduce = self.allreduce(flag_sum)
overflow = self.less_equal(self.base, flag_reduce)
else:
overflow = self.less_equal(self.base, flag_sum)
return overflow
def process_loss_scale(self, overflow):

View File

@ -624,3 +624,5 @@ from .hard_swish_ds import _hard_swish_ds_tbe
from .hard_swish_grad_ds import _hard_swish_grad_ds_tbe
from .arg_min_with_value_ds import _arg_min_with_value_ds_tbe
from .im2col import _im2col_tbe
from .npu_clear_float_status_v2 import _npu_clear_float_status_v2_tbe
from .npu_get_float_status_v2 import _npu_get_float_status_v2_tbe

View File

@ -0,0 +1,35 @@
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""NPUClearFloatStatusV2 op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
npu_clear_float_status_v2_op_info = TBERegOp("NPUClearFloatStatusV2") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("n_p_u_clear_float_status_v2.so") \
.compute_cost(10) \
.kernel_name("n_p_u_clear_float_status_v2") \
.partial_flag(True) \
.input(0, "addr", False, "required", "all") \
.output(0, "data", False, "required", "all") \
.dtype_format(DataType.I32_Default, DataType.I32_Default) \
.get_op_info()
@op_info_register(npu_clear_float_status_v2_op_info)
def _npu_clear_float_status_v2_tbe():
"""NPUClearFloatStatusV2 TBE register"""
return

View File

@ -0,0 +1,35 @@
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""NPUGetFloatStatusV2 op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
npu_get_float_status_v2_op_info = TBERegOp("NPUGetFloatStatusV2") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("n_p_u_get_float_status_v2.so") \
.compute_cost(10) \
.kernel_name("n_p_u_get_float_status_v2") \
.partial_flag(True) \
.input(0, "addr", False, "required", "all") \
.output(0, "data", False, "required", "all") \
.dtype_format(DataType.I32_Default, DataType.I32_Default) \
.get_op_info()
@op_info_register(npu_get_float_status_v2_op_info)
def _npu_get_float_status_v2_tbe():
"""NPUGetFloatStatusV2 TBE register"""
return

View File

@ -66,8 +66,8 @@ from .math_ops import (Abs, ACos, Asin, Asinh, AddN, AccumulateNV2, AssignAdd, A
Ceil, Acosh, Greater, GreaterEqual, Lerp, Less, LessEqual, Log, Log1p, LogicalAnd, Mod,
LogicalNot, LogicalOr, LogicalXor, LpNorm, MatMul, Maximum, MulNoNan,
MatrixDeterminant, LogMatrixDeterminant, Minimum, Mul, Neg, NMSWithMask, NotEqual,
NPUAllocFloatStatus, NPUClearFloatStatus, LinSpace, Einsum, Renorm,
NPUGetFloatStatus, Pow, RealDiv, IsNan, IsInf, IsFinite, FloatStatus,
NPUAllocFloatStatus, NPUClearFloatStatus, NPUClearFloatStatusV2, LinSpace, Einsum, Renorm,
NPUGetFloatStatus, NPUGetFloatStatusV2, Pow, RealDiv, IsNan, IsInf, IsFinite, FloatStatus,
Reciprocal, CumSum, HistogramFixedWidth, SquaredDifference, Xdivy, Xlogy,
Sin, Sqrt, Rsqrt, BesselI0e, BesselI1e, TruncateDiv, TruncateMod, Addcdiv,
Addcmul, Square, Sub, TensorAdd, Add, Sign, Round, SquareSumAll, Atan, Atanh, Cosh, Sinh, Eps,
@ -302,6 +302,8 @@ __all__ = [
'NPUAllocFloatStatus',
'NPUGetFloatStatus',
'NPUClearFloatStatus',
'NPUGetFloatStatusV2',
'NPUClearFloatStatusV2',
'IsNan',
'IsFinite',
'IsInf',

View File

@ -4287,6 +4287,189 @@ class NPUClearFloatStatus(PrimitiveWithInfer):
return mstype.float32
class NPUClearFloatStatusV2(PrimitiveWithInfer):
"""
Clear the flag for storage overflow status. This flag is located in a register at a
fixed address on the `Ascend` device, and overflow information is automatically
written to this register.
The flag is a one-dimensional Tensor with shape :math:`(8,)` and data type `mindspore.dtype.int32`.
If the value of flag is zero, no overflow has occurred, otherwise, overflow.
When performing overflow detection on the network, you should first call `NPUClearFloatStatusV2` to
reset the register before the detection, and then call `NPUGetFloatStatusV2` to get the register
status after the network execution is completed.
Note:
- In order to avoid mis-optimization by the compiler, additional input and output are added to
this operator. The input and output are defined as a shape of: math:`(8,)` and data type of
`mindspore.dtype.int32` Tensor, meaningless.
- Since this op lacks contextual dependencies with parameters in the network,
:class:`mindspore.ops.Depend` needs to be used to ensure order of execution.
Inputs:
Tensor, an additional input created to avoid compiler optimization, is specified as shape :math:`(8,)`,
data type is `mindspore.dtype.int32`, and has no actual meaning.
Outputs:
Tensor, shape and data type are the same as input, meaningless.
Raises:
TypeError: If `x` is not a Tensor.
TypeError: If dtype of `x` is not int32.
ValueError: If shape of `x` is not equal to :math:`(8,)`.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import ops, nn, Tensor
>>> class Net(nn.Cell):
... def __init__(self):
... super().__init__()
... self.clear_status = ops.NPUClearFloatStatusV2()
... self.get_status = ops.NPUGetFloatStatusV2()
... self.sub = ops.Sub()
... self.neg = ops.Neg()
... self.equal = ops.Equal()
... self.reduce_all = ops.ReduceAll(keep_dims=False)
... self.base = Tensor([0], dtype=ms.int32)
...
... def construct(self, x):
... init = Tensor([0]*8, dtype=ms.int32)
... clear_status = self.clear_status(init)
... x = ops.depend(x, clear_status)
... res = self.sub(x, self.neg(x))
... init = ops.depend(init, res)
... get_status = self.get_status(init)
... flag = self.equal(self.base, get_status)
... overall_finite = self.reduce_all(flag)
... overflow = not overall_finite
... return overflow
...
>>> value = 65504
>>> data = np.full((2, 3), value, dtype=np.float16)
>>> x = Tensor(data, dtype=ms.float16)
>>> net = Net()
>>> res = net(x)
>>> print(res)
True
>>> value = 10
>>> data = np.full((2, 3), value, dtype=np.float16)
>>> x = Tensor(data, dtype=ms.float16)
>>> net = Net()
>>> res = net(x)
>>> print(res)
False
"""
@prim_attr_register
def __init__(self):
"""Initialize NPUGetFloatStatus"""
def infer_shape(self, x_shape):
cls_name = self.name
validator.check_equal_int(len(x_shape), 1, "len(x_shape)", cls_name)
validator.check_equal_int(x_shape[0], 8, "x_shape[0]", cls_name)
return [8]
def infer_dtype(self, x_dtype):
validator.check_tensor_dtype_valid('x', x_dtype, [mstype.int32], self.name)
return mstype.int32
class NPUGetFloatStatusV2(PrimitiveWithInfer):
"""
Get the flag for storage overflow status. This flag is located in a register at a
fixed address on the `Ascend` device, and overflow information is automatically
written to this register.
The flag is a one-dimensional Tensor with shape :math:`(8,)` and data type `mindspore.dtype.int32`.
If the value of flag is zero, no overflow has occurred, otherwise, overflow.
When performing overflow detection on the network, you should first call `NPUClearFloatStatusV2` to
reset the register before the detection, and then call `NPUGetFloatStatusV2` to get the register
status after the network execution is completed.
Note:
- In order to avoid mis-optimization by the compiler, additional input is added to
this operator. The input is defined as a shape of: math:`(8,)` and data type of
`mindspore.dtype.int32` Tensor, meaningless.
- Since this op lacks contextual dependencies with parameters in the network,
:class:`mindspore.ops.Depend` needs to be used to ensure order of execution.
Inputs:
Tensor, an additional input created to avoid compiler optimization, is specified as shape :math:`(8,)`,
data type is `mindspore.dtype.int32`, and has no actual meaning.
Usually use the output of `NPUClearFloatStatusV2`.
Outputs:
Tensor, shape and data type are the same as input. If all are zero, it means no overflow, otherwise, overflow.
Raises:
TypeError: If `x` is not a Tensor.
TypeError: If dtype of `x` is not int32.
ValueError: If shape of `x` is not equal to :math:`(8,)`.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import ops, nn, Tensor
>>> class Net(nn.Cell):
... def __init__(self):
... super().__init__()
... self.clear_status = ops.NPUClearFloatStatusV2()
... self.get_status = ops.NPUGetFloatStatusV2()
... self.sub = ops.Sub()
... self.neg = ops.Neg()
... self.equal = ops.Equal()
... self.reduce_all = ops.ReduceAll(keep_dims=False)
... self.base = Tensor([0], dtype=ms.int32)
...
... def construct(self, x):
... init = Tensor([0]*8, dtype=ms.int32)
... clear_status = self.clear_status(init)
... x = ops.depend(x, clear_status)
... res = self.sub(x, self.neg(x))
... init = ops.depend(init, res)
... get_status = self.get_status(init)
... flag = self.equal(self.base, get_status)
... overall_finite = self.reduce_all(flag)
... overflow = not overall_finite
... return overflow
...
>>> value = 65504
>>> data = np.full((2, 3), value, dtype=np.float16)
>>> x = Tensor(data, dtype=ms.float16)
>>> net = Net()
>>> res = net(x)
>>> print(res)
True
>>> value = 10
>>> data = np.full((2, 3), value, dtype=np.float16)
>>> x = Tensor(data, dtype=ms.float16)
>>> net = Net()
>>> res = net(x)
>>> print(res)
False
"""
@prim_attr_register
def __init__(self):
"""Initialize NPUGetFloatStatus"""
def infer_shape(self, x_shape):
cls_name = self.name
validator.check_equal_int(len(x_shape), 1, "len(x_shape)", cls_name)
validator.check_equal_int(x_shape[0], 8, "x_shape[0]", cls_name)
return [8]
def infer_dtype(self, x_dtype):
validator.check_tensor_dtype_valid('x', x_dtype, [mstype.int32], self.name)
return mstype.int32
class Cos(Primitive):
r"""
Computes cosine of input element-wise.

View File

@ -0,0 +1,148 @@
import pytest
import numpy as np
import mindspore as ms
from mindspore import Tensor, nn, ops
from mindspore import dtype as mstype
ms.set_context(device_target="Ascend")
class OverflowCheckNet(nn.Cell):
def __init__(self):
super(OverflowCheckNet, self).__init__()
self.base1 = Tensor(1, mstype.float32)
self.base2 = Tensor(0, mstype.int32)
self.reduce_sum = ops.ReduceSum(keep_dims=False)
self.less_equal = ops.LessEqual()
self.reduce_all = ops.ReduceAll(keep_dims=False)
self.equal = ops.Equal()
def start_overflow_check_v1(self, pre_cond, compute_input):
status = False
# init overflow buffer
status = ops.NPUAllocFloatStatus()()
status = ops.depend(status, pre_cond)
# clear overflow buffer
clear_status = ops.NPUClearFloatStatus()(status)
compute_input = ops.depend(compute_input, clear_status)
return status, compute_input
def get_overflow_status_v1(self, status, compute_output):
status = ops.depend(status, compute_output)
get_status = ops.NPUGetFloatStatus()(status)
status = ops.depend(status, get_status)
# sum overflow buffer elements, 0:not overflow , >0:overflow
flag_sum = self.reduce_sum(status, (0,))
overflow = self.less_equal(self.base1, flag_sum)
return overflow
def start_overflow_check_v2(self, pre_cond, compute_input):
status = Tensor([0] * 8, mstype.int32)
status = ops.depend(status, pre_cond)
# clear overflow buffer
clear_status = ops.NPUClearFloatStatusV2()(status)
compute_input = ops.depend(compute_input, clear_status)
return status, compute_input
def get_overflow_status_v2(self, status, compute_output):
status = ops.depend(status, compute_output)
get_status = ops.NPUGetFloatStatusV2()(status)
status = ops.depend(status, get_status)
clear_status = ops.NPUClearFloatStatusV2()(status)
get_status = ops.depend(get_status, clear_status)
flag = self.equal(self.base2, get_status)
overall_finite = self.reduce_all(flag)
return not overall_finite
class OverFlowNetV2GetStatusAfterClear(OverflowCheckNet):
def __init__(self):
super(OverFlowNetV2GetStatusAfterClear, self).__init__()
self.mul = ops.Mul()
self.sub = ops.Sub()
def construct(self, x1, x2):
y1 = self.mul(x1, x1)
status, compute_input = self.start_overflow_check_v2(y1, x2)
y2 = self.sub(y1, compute_input)
cond = self.get_overflow_status_v2(status, y2)
return cond
class OverFlowNetV2GetStatus(OverflowCheckNet):
def __init__(self):
super(OverFlowNetV2GetStatus, self).__init__()
self.add = ops.Add()
self.mul = ops.Mul()
def construct(self, x1, x2):
y1 = self.add(x1, x1)
status, compute_input = self.start_overflow_check_v2(y1, x2)
y2 = self.mul(y1, compute_input)
cond = self.get_overflow_status_v2(status, y2)
return cond
class OverflowCheckV1vsV2(OverflowCheckNet):
def __init__(self):
super(OverflowCheckV1vsV2, self).__init__()
self.add = ops.Add()
self.atan2 = ops.Atan2()
def construct(self, x1, x2, version):
y1 = self.add(x1, x1)
if version == 1:
status, compute_input = self.start_overflow_check_v1(y1, x2)
y2 = self.atan2(y1, compute_input)
cond = self.get_overflow_status_v1(status, y2)
else:
status, compute_input = self.start_overflow_check_v2(y1, x2)
y2 = self.atan2(y1, compute_input)
cond = self.get_overflow_status_v2(status, y2)
return cond
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_v2_overflow_get_after_clear(mode):
"""
Feature: overflow check v2
Description: Verify the result of get_status after clear
Expectation: success
"""
ms.set_context(mode=mode)
net = OverFlowNetV2GetStatusAfterClear()
output = net(Tensor(65504, mstype.float16), Tensor(1, mstype.float16))
assert not output
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_v2_clear_overflow_get(mode):
"""
Feature: overflow check v2
Description: Verify the result of get_status when overflow
Expectation: success
"""
ms.set_context(mode=mode)
net = OverFlowNetV2GetStatus()
output = net(Tensor(1, mstype.float16), Tensor(65504, mstype.float16))
assert output
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_v1_vs_v2_overflow_check(mode):
"""
Feature: overflow check v1 vs v2
Description: Verify the result of atan2 when inputs include 0
Expectation: success
"""
ms.set_context(mode=mode)
input1 = np.random.random((2, 4)).astype(np.float32)
input2 = np.random.random((2, 4)).astype(np.float32)
input1[0] = 0
input2[1] = 0
net = OverflowCheckV1vsV2()
overflow_v1 = net(Tensor(input1), Tensor(input2), 1)
overflow_v2 = net(Tensor(input1), Tensor(input2), 2)
assert overflow_v1
assert not overflow_v2

View File

@ -76,10 +76,10 @@ TEST_F(TestHWTBEJsonCreator, test_tbe_single_common) {
auto tbe_json_creator_build = std::make_shared<BuildTbeJsonCreator>();
nlohmann::json kernel_json;
EXPECT_TRUE(tbe_json_creator_select->GenJson(relu1, &kernel_json));
EXPECT_EQ(tbe_json_creator_select->GetJsonHash(), 9567971019919923944U)
EXPECT_EQ(tbe_json_creator_select->GetJsonHash(), 625899251304433355U)
<< "Error json is:" << kernel_json << ", for expected json, see file: tbe_single_common_select.json";
EXPECT_TRUE(tbe_json_creator_build->GenJson(relu1, &kernel_json));
EXPECT_EQ(tbe_json_creator_build->GetJsonHash(), 10629156561275712246U)
EXPECT_EQ(tbe_json_creator_build->GetJsonHash(), 15309694836809643355U)
<< "Error json is:" << kernel_json << ", for expected json, see file: tbe_single_common_build.json";
}
@ -118,12 +118,13 @@ TEST_F(TestHWTBEJsonCreator, test_tbe_single_conv2d_backprop_filter) {
auto tbe_json_creator_build = std::make_shared<BuildTbeJsonCreator>();
nlohmann::json kernel_json;
EXPECT_TRUE(tbe_json_creator_select->GenJson(conv2d_backprop_filter, &kernel_json));
EXPECT_EQ(tbe_json_creator_select->GetJsonHash(), 11540209983217608112U)
EXPECT_EQ(tbe_json_creator_select->GetJsonHash(), 18275171084022100496U)
<< "Error json is:" << kernel_json
<< ", for expected json, see file: tbe_single_conv2d_backprop_filter_select.json";
EXPECT_TRUE(tbe_json_creator_build->GenJson(conv2d_backprop_filter, &kernel_json));
EXPECT_EQ(tbe_json_creator_build->GetJsonHash(), 17147032375801630428U)
<< "Error json is:" << kernel_json << ", for expected json, see file: tbe_single_conv2d_backprop_filter_build.json";
EXPECT_EQ(tbe_json_creator_build->GetJsonHash(), 12361929799718293308U)
<< "Error json is:" << kernel_json
<< ", for expected json, see file: tbe_single_conv2d_backprop_filter_build.json";
}
TEST_F(TestHWTBEJsonCreator, test_tbe_single_dynamic_rnn) {
@ -176,10 +177,10 @@ TEST_F(TestHWTBEJsonCreator, test_tbe_single_dynamic_rnn) {
auto tbe_json_creator_build = std::make_shared<BuildTbeJsonCreator>();
nlohmann::json kernel_json;
EXPECT_TRUE(tbe_json_creator_select->GenJson(dynamic_rnn, &kernel_json));
EXPECT_EQ(tbe_json_creator_select->GetJsonHash(), 7416506495715211266U)
EXPECT_EQ(tbe_json_creator_select->GetJsonHash(), 5451839939298424872U)
<< "Error json is:" << kernel_json << ", for expected json, see file: tbe_single_dynamic_rnn_select.json";
EXPECT_TRUE(tbe_json_creator_build->GenJson(dynamic_rnn, &kernel_json));
EXPECT_EQ(tbe_json_creator_build->GetJsonHash(), 11313869240174356202U)
EXPECT_EQ(tbe_json_creator_build->GetJsonHash(), 10947818070940734489U)
<< "Error json is:" << kernel_json << ", for expected json, see file: tbe_single_dynamic_rnn_build.json";
}
@ -229,10 +230,10 @@ TEST_F(TestHWTBEJsonCreator, test_tbe_single_layer_norm) {
auto tbe_json_creator_build = std::make_shared<BuildTbeJsonCreator>();
nlohmann::json kernel_json;
EXPECT_TRUE(tbe_json_creator_select->GenJson(layer_norm, &kernel_json));
EXPECT_EQ(tbe_json_creator_select->GetJsonHash(), 3528443918959131090U)
EXPECT_EQ(tbe_json_creator_select->GetJsonHash(), 14732182622642040787U)
<< "Error json is:" << kernel_json << ", for expected json, see file: tbe_single_layer_norm_select.json";
EXPECT_TRUE(tbe_json_creator_build->GenJson(layer_norm, &kernel_json));
EXPECT_EQ(tbe_json_creator_build->GetJsonHash(), 15344551887829075086U)
EXPECT_EQ(tbe_json_creator_build->GetJsonHash(), 3685000753742854829U)
<< "Error json is:" << kernel_json << ", for expected json, see file: tbe_single_layer_norm_build.json";
}
@ -305,7 +306,7 @@ TEST_F(TestHWTBEJsonCreator, test_tbe_fusion_common) {
nlohmann::json fusion_json;
auto tbe_json_creator = std::make_shared<FusionBuildTbeJsonCreator>();
EXPECT_TRUE(tbe_json_creator->GenJson(fusion_scope_info, &fusion_json));
EXPECT_EQ(tbe_json_creator->GetJsonHash(), 11263748967143619025U)
EXPECT_EQ(tbe_json_creator->GetJsonHash(), 9312533568432116240U)
<< "Error json is:" << fusion_json << ", for expected json, see file: tbe_fusion_common.json";
}
@ -366,7 +367,7 @@ TEST_F(TestHWTBEJsonCreator, test_fusion_add_conv2d) {
nlohmann::json fusion_json;
auto tbe_json_creator = std::make_shared<FusionBuildTbeJsonCreator>();
EXPECT_TRUE(tbe_json_creator->GenJson(fusion_scope_info, &fusion_json));
EXPECT_EQ(tbe_json_creator->GetJsonHash(), 17118025395077309742U)
EXPECT_EQ(tbe_json_creator->GetJsonHash(), 6177172257612258689U)
<< "Error json is:" << fusion_json << ", for expected json, see file: test_fusion_add_conv2d.json";
}