delete ut libgraph.so and liberror_manager.so dependency

This commit is contained in:
qujianwei 2022-02-28 19:26:55 +08:00
parent cf5248276e
commit 7e1aebc2c9
16 changed files with 411 additions and 1569 deletions

View File

@ -26,29 +26,7 @@ function(ge_protobuf_generate c_var h_var)
set(${h_var} ${${h_var}} PARENT_SCOPE)
endfunction()
if(ENABLE_TESTCASES)
set(_ge_tmp_CMAKE_INSTALL_PREFIX ${CMAKE_INSTALL_PREFIX})
set(_ge_tmp_ENABLE_GITEE ${ENABLE_GITEE})
set(_ge_tmp_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
set(ENABLE_GITEE ON)
set(CMAKE_INSTALL_PREFIX ${BUILD_PATH}/graphengine)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D__FILE__='\"$(subst $(realpath ${CMAKE_SOURCE_DIR})/,,$(abspath $<))\"' \
-Wno-builtin-macro-redefined")
if(ENABLE_TESTCASES)
# use slog, error manager, mmpa in non ascend mode, e.g. tests
set(GE_PREBUILD_PATH ${GE_SOURCE_DIR}/third_party/prebuild/${CMAKE_HOST_SYSTEM_PROCESSOR})
set(ENABLE_MS_TESTCASES TRUE)
find_submodule_lib(slog libalog.so ${GE_PREBUILD_PATH})
find_submodule_lib(static_mmpa libmmpa.a ${GE_PREBUILD_PATH})
endif()
string(REPLACE " -Werror" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
add_subdirectory(${GE_SOURCE_DIR})
set(CMAKE_INSTALL_PREFIX ${_ge_tmp_CMAKE_INSTALL_PREFIX})
set(ENABLE_GITEE ${_ge_tmp_ENABLE_GITEE})
set(CMAKE_CXX_FLAGS ${_ge_tmp_CMAKE_CXX_FLAGS})
elseif(MODE_ASCEND_ALL OR MODE_ASCEND_ACL)
if(ENABLE_TESTCASES OR MODE_ASCEND_ALL OR MODE_ASCEND_ACL)
if(NOT(BUILD_LITE))
file(GLOB_RECURSE GE_PROTO_FILE RELATIVE ${CMAKE_SOURCE_DIR} "graphengine/metadef/proto/*.proto")
else()

View File

@ -252,14 +252,6 @@ if(ENABLE_D OR ENABLE_ACL)
COMPONENT mindspore
)
endif()
elseif(ENABLE_TESTCASES)
install(
FILES
${CMAKE_BINARY_DIR}/graphengine/metadef/graph/libgraph.so
${BUILD_PATH}/graphengine/c_sec/lib/libc_sec.so
DESTINATION ${INSTALL_LIB_DIR}
COMPONENT mindspore
)
endif()
if(MS_BUILD_GRPC)

View File

@ -30,6 +30,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR}/stub/runtime/)
include_directories(${CMAKE_BINARY_DIR})
include_directories(${CMAKE_BINARY_DIR}/proto/graphengine_protos)
include_directories(${CMAKE_BINARY_DIR}/proto/metadef_protos)
include_directories(${CMAKE_BINARY_DIR}/proto/ge)
include_directories(${CUDA_INCLUDE_DIRS})
include_directories(${CMAKE_SOURCE_DIR}/mindspore/ccsrc/plugin/device/cpu/kernel)
MESSAGE("check ut_test ${CMAKE_BINARY_DIR}")
@ -175,8 +176,6 @@ file(GLOB_RECURSE MINDSPORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
"../../../mindspore/ccsrc/backend/common/session/executor_manager.cc"
"../../../mindspore/ccsrc/backend/common/session/session_factory.cc"
"../../../mindspore/ccsrc/backend/common/session/kernel_build_client.cc"
"../../../mindspore/ccsrc/transform/graph_ir/*.cc"
"../../../mindspore/ccsrc/transform/graph_ir/op_declare/*.cc"
"../../../mindspore/ccsrc/ps/*.cc"
"../../../mindspore/ccsrc/fl/*.cc"
"../../../mindspore/ccsrc/distributed/cluster/actor_route_table_service.cc"
@ -188,9 +187,7 @@ file(GLOB_RECURSE MINDSPORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
"../../../mindspore/ccsrc/kernel/kernel.cc"
"../../../mindspore/ccsrc/kernel/ascend_kernel_mod.cc"
"../../../mindspore/ccsrc/backend/common/optimizer/helper.cc"
"../../../mindspore/ccsrc/plugin/device/ascend/hal/device/executor/tiling/op_tiling_adapter.cc"
"../../../mindspore/ccsrc/plugin/device/ascend/hal/device/executor/aicpu_ext_info_handle.cc"
"../../../mindspore/ccsrc/plugin/device/ascend/hal/device/ge_types_convert.cc"
"../../../mindspore/ccsrc/plugin/device/ascend/kernel/aicpu/aicpu_util.cc"
)
@ -227,6 +224,7 @@ list(REMOVE_ITEM MINDSPORE_SRC_LIST
list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/plugin/device/ascend/kernel/tbe/tbe_kernel_compile.cc")
list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/plugin/device/cpu/kernel/akg/akg_cpu_kernel_mod.cc")
list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/plugin/device/cpu/kernel/akg/akg_cpu_kernel_build.cc")
if(ENABLE_SECURITY)
list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/profiler/device/profiling.cc")
list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/profiler/device/ascend/memory_profiling.cc")
@ -238,8 +236,7 @@ list(REMOVE_ITEM MINDSPORE_SRC_LIST "../../../mindspore/ccsrc/profiler/device/as
add_library(_ut_mindspore_obj OBJECT ${MINDSPORE_SRC_LIST})
add_library(_ut_ut_obj OBJECT ${UT_SRCS})
add_dependencies(_ut_mindspore_obj graph)
add_dependencies(_ut_ut_obj engine-cache-server graph)
add_dependencies(_ut_ut_obj engine-cache-server)
set(ut_objects $<TARGET_OBJECTS:_ut_ut_obj> $<TARGET_OBJECTS:_ut_mindspore_obj>
$<TARGET_OBJECTS:core_obj> $<TARGET_OBJECTS:core_proto_obj> $<TARGET_OBJECTS:mindrt_mid>
$<TARGET_OBJECTS:mindspore_shared_lib_obj> $<TARGET_OBJECTS:_mindspore_utils_obj>
@ -267,11 +264,6 @@ if(MINDSPORE_PROTO_LIST)
set_target_properties(proto_input_ut PROPERTIES COMPILE_FLAGS "-Wno-unused-variable")
endif()
if(ENABLE_D)
target_link_libraries(ut_tests PRIVATE graph ge_runner ge_client)
target_link_libraries(mindspore PRIVATE tsdclient)
endif()
if(CMAKE_SYSTEM_NAME MATCHES "Linux")
target_link_libraries(ut_tests PRIVATE mindspore::gtest mindspore::event mindspore::event_pthreads
mindspore::event_openssl ${PYTHON_LIBRARIES} pthread util dl)
@ -288,4 +280,4 @@ if(USE_GLOG)
target_link_libraries(ut_tests PRIVATE mindspore::glog)
endif()
target_link_libraries(ut_tests PRIVATE mindspore securec graph error_manager)
target_link_libraries(ut_tests PRIVATE mindspore securec)

View File

@ -44,6 +44,11 @@ class MockOpsKernelInfoStore : public ge::OpsKernelInfoStore {
ge::Status Finalize() override { return ge::SUCCESS; }
void GetAllOpsKernelInfo(std::map<string, ge::OpInfo> &infos) const override {}
bool CheckSupported(const ge::OpDescPtr &opDescPtr, std::string &un_supported_reason) const override { return true; }
bool CheckSupported(const ge::NodePtr &node, std::string &un_supported_reason) const { return true; }
bool CheckAccuracySupported(const ge::OpDescPtr &opDescPtr, std::string &un_supported_reason,
const bool realQuery = false) const override { return true; }
bool CheckAccuracySupported(const ge::NodePtr &node, std::string &un_supported_reason,
const bool realQuery = false) const { return true; }
ge::Status LoadTask(ge::GETaskInfo &task) override { return ge::SUCCESS; }
};

View File

@ -0,0 +1,210 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/util/error_manager/error_manager.h"
#include <fstream>
#include <iostream>
#include <algorithm>
#include <mutex>
#include <nlohmann/json.hpp>
#include <sstream>
#include <cstdarg>
#include <securec.h>
#include "mmpa/mmpa_api.h"
#include "toolchain/slog.h"
#define GE_MODULE_NAME static_cast<int32_t>(GE)
const std::string kParamCheckErrorSuffix = "8888";
namespace {
#ifdef __GNUC__
const error_message::char_t *const kErrorCodePath = "../conf/error_manager/error_code.json";
const error_message::char_t *const kSeparator = "/";
#else
const error_message::char_t *const kErrorCodePath = "..\\conf\\error_manager\\error_code.json";
const error_message::char_t *const kSeparator = "\\";
#endif
const error_message::char_t *const kErrorList = "error_info_list";
const error_message::char_t *const kErrCode = "ErrCode";
const error_message::char_t *const kErrMessage = "ErrMessage";
const error_message::char_t *const kArgList = "Arglist";
const uint64_t kLength = 2UL;
} // namespace
///
/// @brief Obtain ErrorManager instance
/// @return ErrorManager instance
///
ErrorManager &ErrorManager::GetInstance() {
static ErrorManager instance;
return instance;
}
///
/// @brief init
/// @param [in] path: current so path
/// @return int 0(success) -1(fail)
///
int32_t ErrorManager::Init(const std::string path) {
return 0;
}
///
/// @brief init
/// @return int 0(success) -1(fail)
///
int32_t ErrorManager::Init() {
return 0;
}
int32_t ErrorManager::ReportInterErrMessage(const std::string error_code, const std::string &error_msg) {
return 0;
}
///
/// @brief report error message
/// @param [in] error_code: error code
/// @param [in] args_map: parameter map
/// @return int 0(success) -1(fail)
///
int32_t ErrorManager::ReportErrMessage(const std::string error_code,
const std::map<std::string, std::string> &args_map) {
return 0;
}
std::string ErrorManager::GetErrorMessage() {
return "";
}
std::string ErrorManager::GetWarningMessage() {
return "";
}
///
/// @brief output error message
/// @param [in] handle: print handle
/// @return int 0(success) -1(fail)
///
int32_t ErrorManager::OutputErrMessage(int32_t handle) {
return 0;
}
///
/// @brief output message
/// @param [in] handle: print handle
/// @return int 0(success) -1(fail)
///
int32_t ErrorManager::OutputMessage(int32_t handle) {
return 0;
}
///
/// @brief parse json file
/// @param [in] path: json path
/// @return int 0(success) -1(fail)
///
int32_t ErrorManager::ParseJsonFile(const std::string path) {
return 0;
}
///
/// @brief read json file
/// @param [in] file_path: json path
/// @param [in] handle: print handle
/// @return int 0(success) -1(fail)
///
int32_t ErrorManager::ReadJsonFile(const std::string &file_path, void *const handle) {
return 0;
}
///
/// @brief report error message
/// @param [in] error_code: error code
/// @param [in] vector parameter key, vector parameter value
/// @return int 0(success) -1(fail)
///
void ErrorManager::ATCReportErrMessage(const std::string error_code, const std::vector<std::string> &key,
const std::vector<std::string> &value) {}
///
/// @brief report graph compile failed message such as error code and op_name in mustune case
/// @param [in] msg: failed message map, key is error code, value is op_name
/// @param [out] classified_msg: classified_msg message map, key is error code, value is op_name vector
///
void ErrorManager::ClassifyCompileFailedMsg(const std::map<std::string, std::string> &msg,
std::map<std::string,
std::vector<std::string>> &classified_msg) {}
///
/// @brief report graph compile failed message such as error code and op_name in mustune case
/// @param [in] root_graph_name: root graph name
/// @param [in] msg: failed message map, key is error code, value is op_name
/// @return int 0(success) -1(fail)
///
int32_t ErrorManager::ReportMstuneCompileFailedMsg(const std::string &root_graph_name,
const std::map<std::string, std::string> &msg) {
return 0;
}
///
/// @brief get graph compile failed message in mustune case
/// @param [in] graph_name: graph name
/// @param [out] msg_map: failed message map, key is error code, value is op_name list
/// @return int 0(success) -1(fail)
///
int32_t ErrorManager::GetMstuneCompileFailedMsg(const std::string &graph_name, std::map<std::string,
std::vector<std::string>> &msg_map) {
return 0;
}
std::vector<ErrorManager::ErrorItem> &ErrorManager::GetErrorMsgContainerByWorkId(uint64_t work_id) {
auto iter = error_message_per_work_id_.find(work_id);
if (iter == error_message_per_work_id_.end()) {
(void)error_message_per_work_id_.emplace(work_id, std::vector<ErrorItem>());
iter = error_message_per_work_id_.find(work_id);
}
return iter->second;
}
std::vector<ErrorManager::ErrorItem> &ErrorManager::GetWarningMsgContainerByWorkId(uint64_t work_id) {
auto iter = warning_messages_per_work_id_.find(work_id);
if (iter == warning_messages_per_work_id_.end()) {
(void)warning_messages_per_work_id_.emplace(work_id, std::vector<ErrorItem>());
iter = warning_messages_per_work_id_.find(work_id);
}
return iter->second;
}
void ErrorManager::GenWorkStreamIdDefault() {}
void ErrorManager::GenWorkStreamIdBySessionGraph(const uint64_t session_id, const uint64_t graph_id) {}
void ErrorManager::ClearErrorMsgContainerByWorkId(const uint64_t work_stream_id) {}
void ErrorManager::ClearWarningMsgContainerByWorkId(const uint64_t work_stream_id) {}
void ErrorManager::SetErrorContext(error_message::Context error_context) {}
void ErrorManager::SetStage(const std::string &first_stage, const std::string &second_stage) {}
void ErrorManager::SetStage(const error_message::char_t *first_stage, const size_t first_len,
const error_message::char_t *second_stage, const size_t second_len) {}
bool ErrorManager::IsInnerErrorCode(const std::string &error_code) const { return true; }

View File

@ -0,0 +1,38 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "graph/compute_graph.h"
#include "graph/detail/attributes_holder.h"
#include "graph/attr_store.h"
ge::ComputeGraph::ComputeGraph(const std::string &name) {}
ge::ComputeGraph::~ComputeGraph() {}
ge::ProtoAttrMap &ge::ComputeGraph::MutableAttrMap() {
std::shared_ptr<ge::ProtoAttrMap> attrs = std::make_shared<ge::ProtoAttrMap>();
return *attrs;
}
ge::ConstProtoAttrMap &ge::ComputeGraph::GetAttrMap() const {
std::shared_ptr<ge::ConstProtoAttrMap> attrs = std::make_shared<ge::ConstProtoAttrMap>();
return *attrs;
}
ge::NodePtr ge::ComputeGraph::AddNode(ge::OpDescPtr op) {
ge::NodePtr nodePtr;
return nodePtr;
}

View File

@ -0,0 +1,107 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include "plugin/device/ascend/hal/device/executor/tiling/op_tiling_adapter.h"
#include "plugin/device/ascend/kernel/tbe/tbe_kernel_build.h"
#include "plugin/device/ascend/kernel/tbe/tbe_dynaminc_shape_util.h"
#include "backend/common/session/anf_runtime_algorithm.h"
#include "include/common/utils/anfalgo.h"
#include "plugin/device/ascend/hal/device/ge_types_convert.h"
#include "include/common/utils/utils.h"
#include "external/graph/tensor.h"
#include "external/register/op_tiling_registry.h"
#include "graph/utils/graph_utils.h"
#include "common/ge_inner_error_codes.h"
#include "graph/utils/op_desc_utils.h"
namespace mindspore {
namespace device {
namespace tiling {
constexpr auto COMPILE_INFO_KEY = "compile_info_key";
constexpr auto COMPILE_INFO_JSON = "compile_info_json";
constexpr auto ATOMIC_COMPILE_INFO_KEY = "_atomic_compile_info_key";
constexpr auto ATOMIC_COMPILE_INFO_JSON = "_atomic_compile_info_json";
constexpr auto ATTR_NAME_OP_INFER_DEPENDS = "_op_infer_depends";
constexpr auto CONSTANTOP = "Constant";
constexpr auto ATTR_NAME_WEIGHTS = "value";
constexpr auto PARAM_DYNAMIC = "dynamic";
std::string OpTilingCalculateAdapter::GetRealOpType(const std::string &op_type) {
static const std::map<std::string, std::string> kOpTypeMap = {
{"SparseApplyFtrl", "SparseApplyFtrlD"},
{"SparseApplyProximalAdagrad", "SparseApplyProximalAdagradD"},
{"SparseGatherV2", "Gather"},
{"Pad", "PadD"},
{"Split", "SplitD"},
{"Concat", "ConcatD"},
{"Softmax", "SoftmaxV2"},
{"DropoutDoMask", "DropOutDoMask"},
{"IOU", "Iou"},
{"DynamicBroadcastTo", "BroadcastTo"},
{"DynamicResizeNearestNeighbor", "ResizeNearestNeighborV2"},
{"ParallelResizeBilinear", "SyncResizeBilinearV2"},
{"ParallelResizeBilinearGrad", "SyncResizeBilinearV2Grad"},
};
auto iter = kOpTypeMap.find(op_type);
if (iter == kOpTypeMap.end()) {
return op_type;
}
return iter->second;
}
std::string OpTilingCalculateAdapter::GetOutputName(const CNodePtr &node, size_t index) {
return "";
}
std::string OpTilingCalculateAdapter::GetInputName(const CNodePtr &node, size_t index) {
return "";
}
void OpTilingCalculateAdapter::ConvertInputShapeAndType(const CNodePtr &node, ge::OpDescPtr *op_desc) {}
void OpTilingCalculateAdapter::ConvertOutputShapeAndType(const CNodePtr &node, ge::OpDescPtr *op_desc) {}
void OpTilingCalculateAdapter::ConvertCompileInfo(const CNodePtr &node, ge::OpDescPtr *op_desc) {}
ge::NodePtr OpTilingCalculateAdapter::NewConstantOp(const CNodePtr &node, const std::string &name,
const tensor::TensorPtr &tensor_data, ge::ComputeGraphPtr *ge_graph,
size_t index) {
ge::NodePtr constand_op;
return constand_op;
}
std::vector<std::tuple<std::size_t, ge::NodePtr>> OpTilingCalculateAdapter::ConvertDepends(
const CNodePtr &node, const std::map<uint32_t, tensor::TensorPtr> &depend_tensor_map, ge::OpDescPtr *op_desc,
ge::ComputeGraphPtr *ge_graph) {
std::vector<std::tuple<std::size_t, ge::NodePtr>> constant_ops;
return constant_ops;
}
void OpTilingCalculateAdapter::AddEdge(const ge::NodePtr &ge_node,
const std::vector<std::tuple<std::size_t, ge::NodePtr>> &constant_ops) {}
void OpTilingCalculateAdapter::InitOpIoName(const CNodePtr &node) {}
ge::Operator OpTilingCalculateAdapter::AnfNodeToGeNodeAdapter(
const CNodePtr &node, ge::ComputeGraphPtr *ge_graph, const std::map<uint32_t, tensor::TensorPtr> &depend_tensor_map,
const std::string &op_compile_info) {
ge::Operator op;
return op;
}
} // namespace tiling
} // namespace device
} // namespace mindspore

View File

@ -0,0 +1,46 @@
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "include/transform/graph_ir/util.h"
#include <utility>
#include <map>
#include "securec/include/securec.h"
#include "include/common/utils/convert_utils.h"
#include "include/common/utils/utils.h"
namespace mindspore {
namespace transform {
const size_t kErrorSize = 0;
static std::map<MeDataType, size_t> datatype_size_map = {
{MeDataType::kNumberTypeFloat16, sizeof(float) / 2}, {MeDataType::kNumberTypeFloat32, sizeof(float)}, // 1/2 of float
{MeDataType::kNumberTypeFloat64, sizeof(double)}, {MeDataType::kNumberTypeInt8, sizeof(int8_t)},
{MeDataType::kNumberTypeInt16, sizeof(int16_t)}, {MeDataType::kNumberTypeInt32, sizeof(int32_t)},
{MeDataType::kNumberTypeInt64, sizeof(int64_t)}, {MeDataType::kNumberTypeUInt8, sizeof(uint8_t)},
{MeDataType::kNumberTypeUInt16, sizeof(uint16_t)}, {MeDataType::kNumberTypeUInt32, sizeof(uint32_t)},
{MeDataType::kNumberTypeUInt64, sizeof(uint64_t)}, {MeDataType::kNumberTypeBool, sizeof(bool)}};
size_t TransformUtil::GetDataTypeSize(const MeDataType &type) {
if (datatype_size_map.find(type) != datatype_size_map.end()) {
return datatype_size_map[type];
} else {
MS_LOG(ERROR) << "Illegal tensor data type!";
return kErrorSize;
}
}
} // namespace transform
} // namespace mindspore

View File

@ -1,865 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <unordered_map>
#include "pybind11/pybind11.h"
#include "transform/transform_base_test.h"
#include "common/py_func_graph_fetcher.h"
#include "pipeline/jit/parse/parse.h"
#include "debug/draw.h"
#include "debug/anf_ir_dump.h"
#include "pipeline/jit/static_analysis/prim.h"
#include "frontend/operator/ops.h"
#include "common/common_test.h"
#define private public
#include "include/transform/graph_ir/types.h"
#include "include/transform/graph_ir/convert.h"
#include "securec/include/securec.h"
#include "include/common/utils/utils.h"
using std::cout;
using std::endl;
using std::string;
using std::unordered_map;
namespace mindspore {
namespace transform {
using AbstractScalar = abstract::AbstractScalar;
using mindspore::parse::ResolveAll;
class TestConvert : public UT::Common {
public:
TestConvert() {}
virtual void SetUp();
virtual void TearDown();
static const std::shared_ptr<Float> kF32;
};
void TestConvert::SetUp() { UT::InitPythonPath(); }
void TestConvert::TearDown() {}
const std::shared_ptr<Float> TestConvert::kF32 = std::make_shared<Float>(32);
AnfGraphPtr createAnfGraph() { return std::make_shared<AnfGraph>(); }
TEST_F(TestConvert, TestConstruct) {
AnfGraphPtr func_graph = std::make_shared<AnfGraph>();
DfGraphConvertor converter(func_graph);
converter.ConvertAllNode().GetComputeGraph();
ASSERT_NE(converter.ErrCode(), SUCCESS);
}
namespace {
bool MakeDfGraph(PrimitivePtr prim, unsigned int nparam) {
std::shared_ptr<FuncGraph> anf_graph = MakeFuncGraph(prim, nparam);
std::shared_ptr<FuncGraphManager> graph_manager = MakeManager({anf_graph});
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
if (converter.ErrCode() != 0) {
MS_LOG(ERROR) << "DfGraphConvertor convert " << prim->name() << " error, error code is: " << converter.ErrCode();
return false;
}
if (df_graph == nullptr) {
MS_LOG(ERROR) << "DfGraphConvertor get " << prim->name() << " compute func_graph failed";
return false;
}
return true;
}
} // namespace
TEST_F(TestConvert, TestConvertConv2d) {
PrimitivePtr conv2d = prim::kPrimConv2D;
conv2d->AddAttr("stride", MakeValue(static_cast<int64_t>(2)));
conv2d->AddAttr("pad", MakeValue(static_cast<int64_t>(0)));
conv2d->AddAttr("dilation", MakeValue(static_cast<int64_t>(0)));
FuncGraphPtr anf_graph = MakeFuncGraph(conv2d, 2);
std::shared_ptr<FuncGraphManager> graph_manager = MakeManager({anf_graph});
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestConvertMaxpooling) {
auto prim = std::make_shared<Primitive>("MaxPool");
FuncGraphPtr anf_graph = MakeFuncGraph(prim, 5); // ary, ksize, stride, padding, data_format
std::shared_ptr<FuncGraphManager> graph_manager = MakeManager({anf_graph});
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestReluOps) {
auto prim = prim::kPrimRelu;
prim->AddAttr("T", MakeValue(static_cast<int64_t>(0)));
auto func_graph = MakeFuncGraph(prim, 1);
ASSERT_TRUE(nullptr != func_graph);
// save the func_graph to manager
std::shared_ptr<FuncGraphManager> manager = Manage(func_graph);
// call resolve
bool ret_ = ResolveAll(manager);
ASSERT_TRUE(ret_);
// draw graph
auto anfGraph = *(manager->func_graphs().begin());
DfGraphConvertor converter(anfGraph);
converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
}
TEST_F(TestConvert, TestConvertBatchNorm) {
PrimitivePtr batch_norm = prim::kPrimBatchNorm;
batch_norm->AddAttr("epsilon", MakeValue(0.001f));
batch_norm->AddAttr("momentum", MakeValue(0.1f));
FuncGraphPtr anf_graph = std::make_shared<FuncGraph>();
std::vector<AnfNodePtr> inputs;
inputs.push_back(NewValueNode(batch_norm));
for (unsigned int i = 0; i < 5; i++) {
inputs.push_back(anf_graph->add_parameter());
}
CNodePtr cnode_prim = anf_graph->NewCNode(inputs);
inputs.clear();
inputs.push_back(NewValueNode(prim::kPrimTupleGetItem));
inputs.push_back(cnode_prim);
inputs.push_back(NewValueNode(static_cast<int64_t>(2)));
CNodePtr cnode_getitem = anf_graph->NewCNode(inputs);
inputs.clear();
inputs.push_back(NewValueNode(prim::kPrimRelu));
inputs.push_back(cnode_getitem);
CNodePtr cnode_relu = anf_graph->NewCNode(inputs);
inputs.clear();
inputs.push_back(NewValueNode(std::make_shared<Primitive>("Return")));
inputs.push_back(cnode_relu);
CNodePtr cnode_return = anf_graph->NewCNode(inputs);
anf_graph->set_return(cnode_return);
std::shared_ptr<FuncGraphManager> graph_manager = MakeManager({anf_graph});
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestConvertConvBackpropInput) {
auto prim = prim::kPrimConv2DBackpropInput;
const std::vector<int64_t> list{1,1};
prim->AddAttr("stride", MakeValue(list));
prim->AddAttr("pad", MakeValue(static_cast<int64_t>(0)));
prim->AddAttr("pad_mode", MakeValue(std::string("pad")));
prim->AddAttr("dilation", MakeValue(static_cast<int64_t>(1)));
prim->AddAttr("group", MakeValue(static_cast<int64_t>(1)));
prim->AddAttr("mode", MakeValue(static_cast<int64_t>(1)));
prim->AddAttr("dilation", MakeValue(static_cast<int64_t>(1)));
auto func_graph = MakeFuncGraph(prim, 3);
ASSERT_NE(func_graph, nullptr);
// save the func_graph to manager
std::shared_ptr<FuncGraphManager> manager = Manage(func_graph);
// call resolve
bool ret_ = ResolveAll(manager);
ASSERT_TRUE(ret_);
// draw graph
auto anf_graph = *(manager->func_graphs().begin());
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestConvertConvBackpropFilter) {
auto prim = prim::kPrimConv2DBackpropFilter;
const std::vector<int64_t> list{1,1};
prim->AddAttr("stride", MakeValue(list));
prim->AddAttr("pad", MakeValue(static_cast<int64_t>(0)));
prim->AddAttr("pad_mode", MakeValue(std::string("pad")));
prim->AddAttr("dilation", MakeValue(static_cast<int64_t>(1)));
prim->AddAttr("group", MakeValue(static_cast<int64_t>(1)));
prim->AddAttr("mode", MakeValue(static_cast<int64_t>(1)));
prim->AddAttr("dilation", MakeValue(static_cast<int64_t>(1)));
auto func_graph = MakeFuncGraph(prim, 3);
ASSERT_NE(func_graph, nullptr);
// save the func_graph to manager
std::shared_ptr<FuncGraphManager> manager = Manage(func_graph);
// call resolve
bool ret_ = ResolveAll(manager);
ASSERT_TRUE(ret_);
// draw graph
auto anf_graph = *(manager->func_graphs().begin());
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestConvertReluGrad) {
auto prim = prim::kPrimReluGrad;
prim->AddAttr("alpha", MakeValue(0.1f));
prim->AddAttr("beta", MakeValue(0.1f));
prim->AddAttr("mode", MakeValue(static_cast<int64_t>(1)));
auto func_graph = MakeFuncGraph(prim, 2);
ASSERT_NE(func_graph, nullptr);
// save the func_graph to manager
std::shared_ptr<FuncGraphManager> manager = Manage(func_graph);
// call resolve
bool ret_ = ResolveAll(manager);
ASSERT_TRUE(ret_);
// draw graph
auto anf_graph = *(manager->func_graphs().begin());
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestConvertBiasAdd) {
auto prim = std::make_shared<Primitive>("BiasAdd");
prim->AddAttr("alpha", MakeValue(0.0f));
prim->AddAttr("beta", MakeValue(1.0f));
auto func_graph = MakeFuncGraph(prim, 2);
ASSERT_NE(func_graph, nullptr);
// save the func_graph to manager
std::shared_ptr<FuncGraphManager> manager = Manage(func_graph);
// call resolve
bool ret_ = ResolveAll(manager);
ASSERT_TRUE(ret_);
// draw graph
auto anf_graph = *(manager->func_graphs().begin());
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestConvertBiasAddGrad) {
auto prim = prim::kPrimBiasAddGrad;
prim->AddAttr("alpha", MakeValue(0.0f));
prim->AddAttr("beta", MakeValue(1.0f));
auto func_graph = MakeFuncGraph(prim, 2);
ASSERT_NE(func_graph, nullptr);
// save the func_graph to manager
std::shared_ptr<FuncGraphManager> manager = Manage(func_graph);
// call resolve
bool ret_ = ResolveAll(manager);
ASSERT_TRUE(ret_);
// draw graph
auto anf_graph = *(manager->func_graphs().begin());
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestConvertMaxPoolGradWithArgmax) {
auto prim = std::make_shared<Primitive>("MaxPoolGradWithArgmax");
prim->AddAttr("alpha", MakeValue(0.0f));
prim->AddAttr("beta", MakeValue(1.0f));
prim->AddAttr("window", MakeValue(static_cast<int64_t>(2)));
prim->AddAttr("stride", MakeValue(static_cast<int64_t>(1)));
prim->AddAttr("ceil_mode", MakeValue(static_cast<int64_t>(0)));
prim->AddAttr("data_mode", MakeValue(static_cast<int64_t>(0)));
prim->AddAttr("alpha", MakeValue(0.1f));
prim->AddAttr("beta", MakeValue(1.0f));
auto func_graph = MakeFuncGraph(prim, 2);
ASSERT_NE(func_graph, nullptr);
// save the func_graph to manager
std::shared_ptr<FuncGraphManager> manager = Manage(func_graph);
// call resolve
bool ret_ = ResolveAll(manager);
ASSERT_TRUE(ret_);
// draw graph
auto anf_graph = *(manager->func_graphs().begin());
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestConcat) {
auto prim = prim::kPrimConcat;
std::shared_ptr<FuncGraph> anf_graph = MakeFuncGraph(prim, 2);
std::shared_ptr<FuncGraphManager> graph_manager = MakeManager({anf_graph});
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestGatherV2) {
auto prim = prim::kPrimGather;
std::shared_ptr<FuncGraph> anf_graph = MakeFuncGraph(prim, 3);
std::shared_ptr<FuncGraphManager> graph_manager = MakeManager({anf_graph});
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestCast) {
auto prim = prim::kPrimCast;
std::shared_ptr<FuncGraph> anf_graph = MakeFuncGraph(prim, 2);
std::shared_ptr<FuncGraphManager> graph_manager = MakeManager({anf_graph});
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestExp) {
auto prim = std::make_shared<Primitive>("Exp");
std::shared_ptr<FuncGraph> anf_graph = MakeFuncGraph(prim, 1);
std::shared_ptr<FuncGraphManager> graph_manager = MakeManager({anf_graph});
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestFloor) {
auto prim = std::make_shared<Primitive>("Floor");
std::shared_ptr<FuncGraph> anf_graph = MakeFuncGraph(prim, 1);
std::shared_ptr<FuncGraphManager> graph_manager = MakeManager({anf_graph});
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestGreaterEqual) {
auto prim = std::make_shared<Primitive>("GreaterEqual");
std::shared_ptr<FuncGraph> anf_graph = MakeFuncGraph(prim, 2);
std::shared_ptr<FuncGraphManager> graph_manager = MakeManager({anf_graph});
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestLess) {
auto prim = std::make_shared<Primitive>("Less");
prim->AddAttr("T", MakeValue(kFloat32));
std::shared_ptr<FuncGraph> anf_graph = MakeFuncGraph(prim, 2);
std::shared_ptr<FuncGraphManager> graph_manager = MakeManager({anf_graph});
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestLessEqual) {
auto prim = std::make_shared<Primitive>("LessEqual");
std::shared_ptr<FuncGraph> anf_graph = MakeFuncGraph(prim, 2);
std::shared_ptr<FuncGraphManager> graph_manager = MakeManager({anf_graph});
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestLogicalNot) {
auto prim = std::make_shared<Primitive>("LogicalNot");
std::shared_ptr<FuncGraph> anf_graph = MakeFuncGraph(prim, 1);
std::shared_ptr<FuncGraphManager> graph_manager = MakeManager({anf_graph});
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestAssignAdd) {
auto prim = prim::kPrimAssignAdd;
prim->AddAttr("use_locking", MakeValue(true));
std::shared_ptr<FuncGraph> anf_graph = MakeFuncGraph(prim, 2);
std::shared_ptr<FuncGraphManager> graph_manager = MakeManager({anf_graph});
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, LogSoftmax) {
auto prim = prim::kPrimLogSoftmax;
prim->AddAttr("axis", MakeValue(static_cast<int64_t>(0)));
std::shared_ptr<FuncGraph> anf_graph = MakeFuncGraph(prim, 1);
std::shared_ptr<FuncGraphManager> graph_manager = MakeManager({anf_graph});
DfGraphConvertor converter(anf_graph);
auto df_graph = converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
ASSERT_NE(df_graph, nullptr);
}
TEST_F(TestConvert, TestMaximumOps) {
auto prim = prim::kPrimMaximum;
bool ret = MakeDfGraph(prim, 2);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestReduceMeanOps) {
auto prim = prim::kPrimReduceMean;
prim->AddAttr("keepdims", MakeValue(true));
bool ret = MakeDfGraph(prim, 2);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestMinimumOps) {
auto prim = prim::kPrimMinimum;
bool ret = MakeDfGraph(prim, 2);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestFusedMinOrMaxGradOps) {
// Add infer step to this test case
ASSERT_TRUE(true);
}
TEST_F(TestConvert, TestSqueezeOps) {
auto prim = prim::kPrimSqueeze;
bool ret = MakeDfGraph(prim, 2);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestMulOps) {
auto prim = prim::kPrimMul;
bool ret = MakeDfGraph(prim, 2);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestNegOps) {
auto prim = prim::kPrimNeg;
bool ret = MakeDfGraph(prim, 1);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestOneHotOps) {
auto prim = prim::kPrimOneHot;
prim->AddAttr("axis", MakeValue(static_cast<int64_t>(0)));
bool ret = MakeDfGraph(prim, 4);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestPowOps) {
auto prim = std::make_shared<Primitive>("Pow");
bool ret = MakeDfGraph(prim, 2);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestReciprocalOps) {
auto prim = std::make_shared<Primitive>("Reciprocal");
bool ret = MakeDfGraph(prim, 1);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestSelectOps) {
auto prim = prim::kPrimSelect;
bool ret = MakeDfGraph(prim, 3);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestSqrtOps) {
auto prim = std::make_shared<Primitive>("Sqrt");
bool ret = MakeDfGraph(prim, 1);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestSquareOps) {
auto prim = std::make_shared<Primitive>("Square");
bool ret = MakeDfGraph(prim, 1);
ASSERT_TRUE(ret);
}
#ifndef ENABLE_SECURITY
TEST_F(TestConvert, TestScalarSummaryOps) {
auto prim = prim::kPrimScalarSummary;
// should have only 1 input.
bool ret = MakeDfGraph(prim, 2);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestTensorSummaryOps) {
auto prim = prim::kPrimTensorSummary;
bool ret = MakeDfGraph(prim, 2);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestHistogramSummaryOps) {
auto prim = prim::kPrimHistogramSummary;
bool ret = MakeDfGraph(prim, 2);
ASSERT_TRUE(ret);
}
#endif
TEST_F(TestConvert, TestGreaterOps) {
auto prim = std::make_shared<Primitive>("Greater");
bool ret = MakeDfGraph(prim, 2);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestEqualOps) {
auto prim = std::make_shared<Primitive>("Equal");
bool ret = MakeDfGraph(prim, 2);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestArgMaxiOps) {
auto prim = std::make_shared<Primitive>("Argmax");
bool ret = MakeDfGraph(prim, 2);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestResizeNearestNeighborOps) {
auto prim = std::make_shared<Primitive>("ResizeNearestNeighbor");
bool ret = MakeDfGraph(prim, 1);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestApplyMomentumOps) {
auto prim = std::make_shared<Primitive>("ApplyMomentum");
bool ret = MakeDfGraph(prim, 5);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestNPUGetFloatStatusOps) {
auto prim = std::make_shared<Primitive>("NPUGetFloatStatus");
bool ret = MakeDfGraph(prim, 1);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestNPUAllocFloatStatusOps) {
auto prim = std::make_shared<Primitive>("NPUAllocFloatStatus");
bool ret = MakeDfGraph(prim, 0);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestNPUClearFloatStatusOps) {
auto prim = std::make_shared<Primitive>("NPUClearFloatStatus");
bool ret = MakeDfGraph(prim, 1);
ASSERT_TRUE(ret);
}
TEST_F(TestConvert, TestAddOps) {
auto prim = std::make_shared<Primitive>("Add");
auto func_graph = MakeFuncGraph(prim, 2);
ASSERT_TRUE(nullptr != func_graph);
// save the func_graph to manager
std::shared_ptr<FuncGraphManager> manager = Manage(func_graph);
// call resolve
bool ret_ = ResolveAll(manager);
ASSERT_TRUE(ret_);
// draw graph
auto anfGraph = *(manager->func_graphs().begin());
DfGraphConvertor converter(anfGraph);
converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
}
TEST_F(TestConvert, TestConvertTensor) {
float data[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
// Create a tensor with wanted data type and shape
std::vector<int64_t> dims{2, 2, 3};
std::vector<int64_t> ge_dims{2, 2, 3};
auto type_id = kNumberTypeFloat32;
MeTensor me_tensor(type_id, dims);
// Get the writable data pointer of the tensor and cast it to its data type
uint8_t* me_data_ptr = reinterpret_cast<uint8_t*>(me_tensor.data_c());
// Copy or use the writable data pointer of the ME tensor
memcpy_s(me_data_ptr, me_tensor.data().nbytes(), data, 12 * sizeof(float));
auto me_tensor_ptr = std::make_shared<MeTensor>(me_tensor);
auto ge_tensor_ptr = TransformUtil::ConvertTensor(me_tensor_ptr, kOpFormat_NCHW);
ASSERT_EQ(ge_tensor_ptr->GetTensorDesc().GetFormat(), GeFormat::FORMAT_NCHW);
ASSERT_EQ(ge_tensor_ptr->GetTensorDesc().GetDataType(), GeDataType::DT_FLOAT);
// ASSERT_EQ(ge_tensor_ptr->GetTensorDesc().array().GetDims(), ge_dims);
int i = 0;
for (i = 0; i < ge_dims.size(); i++) {
ASSERT_EQ(ge_dims[i], ge_tensor_ptr->GetTensorDesc().GetShape().GetDims()[i]);
}
for (i = 0; i < ge_tensor_ptr->GetTensorDesc().GetShape().GetShapeSize(); i++) {
ASSERT_EQ(data[i], (reinterpret_cast<float*>(ge_tensor_ptr->GetData()))[i]);
}
}
TEST_F(TestConvert, TestConvertTensor0Dims) {
// shape with 0 dims is also valid
std::vector<int64_t> dims{};
auto type_id = kNumberTypeFloat32;
auto me_tensor_ptr = std::make_shared<MeTensor>(type_id, dims);
ASSERT_NE(TransformUtil::ConvertTensor(me_tensor_ptr, kOpFormat_NCHW), nullptr);
}
TEST_F(TestConvert, TestConvertTensorError) {
std::vector<int64_t> dims2{2, 3, 4};
auto type_id_2 = kNumberTypeFloat32;
auto me_tensor_ptr_2 = std::make_shared<MeTensor>(type_id_2, dims2);
ASSERT_NE(TransformUtil::ConvertTensor(me_tensor_ptr_2, "xyz"), nullptr);
}
TEST_F(TestConvert, TestUtilsConvertDataType) {
ASSERT_EQ(TransformUtil::ConvertDataType(MeDataType::kNumberTypeFloat16), GeDataType::DT_FLOAT16);
ASSERT_EQ(TransformUtil::ConvertDataType(MeDataType::kNumberTypeFloat32), GeDataType::DT_FLOAT);
ASSERT_EQ(TransformUtil::ConvertDataType(MeDataType::kNumberTypeFloat64), GeDataType::DT_DOUBLE);
ASSERT_EQ(TransformUtil::ConvertDataType(MeDataType::kNumberTypeInt8), GeDataType::DT_INT8);
ASSERT_EQ(TransformUtil::ConvertDataType(MeDataType::kNumberTypeInt16), GeDataType::DT_INT16);
ASSERT_EQ(TransformUtil::ConvertDataType(MeDataType::kNumberTypeInt32), GeDataType::DT_INT32);
ASSERT_EQ(TransformUtil::ConvertDataType(MeDataType::kNumberTypeInt64), GeDataType::DT_INT64);
ASSERT_EQ(TransformUtil::ConvertDataType(MeDataType::kNumberTypeUInt32), GeDataType::DT_UINT32);
ASSERT_EQ(TransformUtil::ConvertDataType(MeDataType::kNumberTypeBool), GeDataType::DT_BOOL);
}
TEST_F(TestConvert, TestUtilsConvertFormat) {
ASSERT_EQ(TransformUtil::ConvertFormat(kOpFormat_NCHW), GeFormat::FORMAT_NCHW);
ASSERT_EQ(TransformUtil::ConvertFormat(kOpFormat_NC1HWC0), GeFormat::FORMAT_NC1HWC0);
ASSERT_EQ(TransformUtil::ConvertFormat(kOpFormat_NHWC), GeFormat::FORMAT_NHWC);
ASSERT_EQ(TransformUtil::ConvertFormat("xyz"), GeFormat::FORMAT_ND);
}
TEST_F(TestConvert, TestUtilsDataSize) {
ASSERT_EQ(TransformUtil::GetDataTypeSize(MeDataType::kNumberTypeFloat32), 4);
ASSERT_EQ(TransformUtil::GetDataTypeSize(MeDataType::kNumberTypeFloat16), 2);
ASSERT_EQ(TransformUtil::GetDataTypeSize(MeDataType::kNumberTypeFloat64), 8);
ASSERT_EQ(TransformUtil::GetDataTypeSize(MeDataType::kNumberTypeInt8), 1);
ASSERT_EQ(TransformUtil::GetDataTypeSize(MeDataType::kNumberTypeInt16), 2);
ASSERT_EQ(TransformUtil::GetDataTypeSize(MeDataType::kNumberTypeInt32), 4);
ASSERT_EQ(TransformUtil::GetDataTypeSize(MeDataType::kNumberTypeInt64), 8);
ASSERT_EQ(TransformUtil::GetDataTypeSize(MeDataType::kNumberTypeUInt32), 4);
ASSERT_EQ(TransformUtil::GetDataTypeSize(MeDataType::kNumberTypeBool), 1);
}
TEST_F(TestConvert, TestConvertGeTensor) {
#define DTYPE float
ge::DataType dt = ge::DataType::DT_FLOAT;
std::vector<float> data1 = {1.1, 2.2, 3.3, 4.4, 6.6, 7.7, 8.8, 9.9};
std::vector<DTYPE> data2 = {1, 2, 3, 4, 6, 7, 8, 9};
auto data = data1;
ge::Shape shape({2, 2, 2});
ge::Format format = ge::Format::FORMAT_NCHW;
ge::TensorDesc desc(shape, format, dt);
GeTensorPtr ge_tensor_ptr =
std::make_shared<GeTensor>(desc, reinterpret_cast<uint8_t*>(data.data()), data.size() * sizeof(DTYPE));
GeTensor& ge_tensor = *ge_tensor_ptr;
const DTYPE* ge_data = reinterpret_cast<DTYPE*>(ge_tensor.GetData());
// make sure GetData()'s return is a reference
assert(ge_data == reinterpret_cast<DTYPE*>(ge_tensor.GetData()));
cout << "ge data size is: " << std::dec << ge_tensor.GetSize() << " bytes" << endl;
for (int i = 0; i < ge_tensor.GetSize() / sizeof(DTYPE); i++) {
cout << "ge data is: " << static_cast<DTYPE>(*(ge_data + i)) << endl;
}
MeTensorPtr me_tensor_ptr = TransformUtil::ConvertGeTensor(ge_tensor_ptr);
MeTensor& me_tensor = *me_tensor_ptr;
cout << "after convert ge tensor to me tensor" << endl;
DTYPE* me_data = reinterpret_cast<DTYPE*>(me_tensor.data_c());
PrintMeTensor(&me_tensor);
assert(ge_tensor.GetSize() == me_tensor.data().nbytes());
assert(memcmp(ge_data, me_data, ge_tensor.GetSize()) == 0);
}
TEST_F(TestConvert, TestConvertMakeTuple) {
FuncGraphPtr func_graph = std::make_shared<FuncGraph>();
std::vector<AnfNodePtr> inputs;
inputs.push_back(NewValueNode(std::make_shared<Primitive>("MakeTuple")));
for (int i = 0; i < 3; i++) {
auto input = func_graph->add_parameter();
input->set_name("x" + std::to_string(i));
inputs.push_back(input);
}
CNodePtr cnode_prim = func_graph->NewCNode(inputs);
inputs.clear();
inputs.push_back(NewValueNode(std::make_shared<Primitive>("Return")));
inputs.push_back(cnode_prim);
CNodePtr cnode_return = func_graph->NewCNode(inputs);
func_graph->set_return(cnode_return);
// save the func_graph to manager
std::shared_ptr<FuncGraphManager> manager = Manage(func_graph);
// call resolve
bool ret_ = ResolveAll(manager);
ASSERT_TRUE(ret_);
// draw graph
auto anfGraph = *(manager->func_graphs().begin());
DfGraphConvertor converter(anfGraph);
converter.ConvertAllNode().BuildGraph().GetComputeGraph();
ASSERT_EQ(converter.ErrCode(), 0);
}
TEST_F(TestConvert, TestConvertInputTensors) {
#define DTYPE float
std::initializer_list<int64_t> list0 = {1, 1, 4, 4};
std::initializer_list<int64_t> list1 = {2, 3, 4, 5};
std::initializer_list<int64_t> list2 = {9, 9, 1, 1};
MeTensorPtr input_ptr1 = MakeTensor(kF32, list0);
MeTensorPtr input_ptr2 = MakeTensor(kF32, list1);
MeTensorPtr input_ptr3 = MakeTensor(kF32, list2);
std::vector<MeTensorPtr> me_inputs;
me_inputs.emplace_back(input_ptr1);
me_inputs.emplace_back(input_ptr2);
me_inputs.emplace_back(input_ptr3);
std::vector<GeTensorPtr> ge_tensors = TransformUtil::ConvertInputTensors(me_inputs, kOpFormat_NCHW);
for (int i = 0; i < ge_tensors.size(); i++) {
DTYPE* me_data = reinterpret_cast<DTYPE*>(me_inputs[i]->data_c());
const DTYPE* ge_data = reinterpret_cast<DTYPE*>(ge_tensors[i]->GetData());
ASSERT_TRUE(ge_tensors[i]->GetSize() == me_inputs[i]->data().nbytes());
ASSERT_EQ(memcmp(ge_data, me_data, ge_tensors[i]->GetSize()), 0);
ASSERT_TRUE(ge_tensors[i]->GetTensorDesc().GetShape().GetDims() ==
TransformUtil::ConvertMeShape(me_inputs[i]->shape_c()).GetDims());
}
}
TEST_F(TestConvert, TestConvertGeTensors) {
#define DTYPE float
ge::DataType dt = ge::DataType::DT_FLOAT;
std::vector<float> data1(16);
std::vector<float> data2(120);
std::vector<float> data3(81);
ge::Shape shape1({1, 1, 4, 4});
ge::Shape shape2({2, 3, 4, 5});
ge::Shape shape3({9, 9, 1, 1});
ge::Format format = ge::Format::FORMAT_NCHW;
ge::TensorDesc desc1(shape1, format, dt);
ge::TensorDesc desc2(shape2, format, dt);
ge::TensorDesc desc3(shape3, format, dt);
GeTensorPtr ge_tensor_ptr1 =
std::make_shared<GeTensor>(desc1, reinterpret_cast<uint8_t*>(data1.data()), data1.size() * sizeof(DTYPE));
GeTensorPtr ge_tensor_ptr2 =
std::make_shared<GeTensor>(desc2, reinterpret_cast<uint8_t*>(data2.data()), data2.size() * sizeof(DTYPE));
GeTensorPtr ge_tensor_ptr3 =
std::make_shared<GeTensor>(desc3, reinterpret_cast<uint8_t*>(data3.data()), data3.size() * sizeof(DTYPE));
std::vector<GeTensorPtr> ge_tensors;
ge_tensors.emplace_back(ge_tensor_ptr1);
ge_tensors.emplace_back(ge_tensor_ptr2);
ge_tensors.emplace_back(ge_tensor_ptr3);
std::vector<std::vector<int64_t>> request_dims;
std::vector<int64_t> dims1 = {1, 1, 4, 4};
std::vector<int64_t> dims2 = {2, 3, 4, 5};
std::vector<int64_t> dims3 = {9, 9, 1, 1};
request_dims.emplace_back(dims1);
request_dims.emplace_back(dims2);
request_dims.emplace_back(dims3);
std::vector<MeTensorPtr> me_outputs = TransformUtil::ConvertGeTensors(ge_tensors, request_dims);
for (int i = 0; i < ge_tensors.size(); i++) {
DTYPE* me_data = reinterpret_cast<DTYPE*>(me_outputs[i]->data_c());
const DTYPE* ge_data = reinterpret_cast<DTYPE*>(ge_tensors[i]->GetData());
ASSERT_TRUE(ge_tensors[i]->GetSize() == me_outputs[i]->data().nbytes());
ASSERT_EQ(memcmp(ge_data, me_data, ge_tensors[i]->GetSize()), 0);
ASSERT_TRUE(request_dims[i] == me_outputs[i]->shape_c());
}
}
TEST_F(TestConvert, TestConvertGeShape1) {
GeShape ge_shape({10, 1, 1, 1});
std::vector<int64_t> request_dims{10};
ASSERT_TRUE(TransformUtil::ConvertGeShape(ge_shape, request_dims) == request_dims);
}
TEST_F(TestConvert, TestConvertGeShape2) {
GeShape ge_shape({10, 15, 1, 1});
std::vector<int64_t> request_dims{10, 15};
ASSERT_TRUE(TransformUtil::ConvertGeShape(ge_shape, request_dims) == request_dims);
}
TEST_F(TestConvert, TestConvertGeShape3) {
GeShape ge_shape({10, 13, 18, 1});
std::vector<int64_t> request_dims{10, 13, 18};
ASSERT_TRUE(TransformUtil::ConvertGeShape(ge_shape, request_dims) == request_dims);
}
TEST_F(TestConvert, TestConvertGeShape4) {
GeShape ge_shape({1, 10, 1, 1});
std::vector<int64_t> request_dims{10};
ASSERT_TRUE(TransformUtil::ConvertGeShape(ge_shape, request_dims) == request_dims);
}
TEST_F(TestConvert, TestConvertGeShape5) {
GeShape ge_shape({10, 1, 1, 2});
std::vector<int64_t> request_dims{10};
ASSERT_TRUE(TransformUtil::ConvertGeShape(ge_shape, request_dims) == TransformUtil::ConvertGeShape(ge_shape));
}
TEST_F(TestConvert, TestConvertGeShape6) {
GeShape ge_shape({5, 2, 1, 1});
std::vector<int64_t> request_dims{10};
ASSERT_TRUE(TransformUtil::ConvertGeShape(ge_shape, request_dims) == TransformUtil::ConvertGeShape(ge_shape));
}
TEST_F(TestConvert, TestConvertGeShape7) {
GeShape ge_shape({10});
std::vector<int64_t> request_dims{10, 1};
ASSERT_TRUE(TransformUtil::ConvertGeShape(ge_shape, request_dims) == TransformUtil::ConvertGeShape(ge_shape));
}
} // namespace transform
} // namespace mindspore

View File

@ -1,43 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <memory>
#include "common/common_test.h"
#include "graph/tensor.h"
#ifdef OPEN_SOURCE
#include "ge/client/ge_api.h"
#else
#include "external/ge/ge_api.h"
#endif
#include "graph/operator.h"
#include "graph/operator_reg.h"
namespace mindspore {
namespace transform {
class TestGEStub : public UT::Common {
public:
TestGEStub() {}
};
TEST_F(TestGEStub, TestAPI) {
// only test for ge header compiling
ASSERT_TRUE(true);
}
} // namespace transform
} // namespace mindspore

View File

@ -1,54 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <memory>
#include "common/common_test.h"
#ifdef OPEN_SOURCE
#include "ge/client/ge_api.h"
#else
#include "external/ge/ge_api.h"
#endif
#define private public
#include "include/transform/graph_ir/graph_builder.h"
#include "include/transform/graph_ir/df_graph_manager.h"
using UT::Common;
namespace mindspore {
namespace transform {
class TestDfGraphBuilder : public UT::Common {
public:
TestDfGraphBuilder() {}
void SetUp();
void TearDown();
};
void TestDfGraphBuilder::SetUp() {}
void TestDfGraphBuilder::TearDown() {}
TEST_F(TestDfGraphBuilder, TestBuildDatasetGraph) {
DatasetGraphParam param4("queue_name", 1, 32, {0, 3}, {{32, 224, 224, 3}, {32}}, {});
ASSERT_EQ(transform::SUCCESS, BuildDatasetGraph(param4));
DfGraphManager::GetInstance().ClearGraph();
}
} // namespace transform
} // namespace mindspore

View File

@ -1,68 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <memory>
#include "common/common_test.h"
#ifdef OPEN_SOURCE
#include "ge/client/ge_api.h"
#else
#include "external/ge/ge_api.h"
#endif
#define private public
#include "include/transform/graph_ir/df_graph_manager.h"
using UT::Common;
namespace mindspore {
namespace transform {
class TestDfGraphManager : public UT::Common {
public:
TestDfGraphManager() {}
};
TEST_F(TestDfGraphManager, TestAPI) {
// test public interface:
DfGraphManager& graph_manager = DfGraphManager::GetInstance();
ASSERT_EQ(0, graph_manager.GetAllGraphs().size());
// test public interface:
std::shared_ptr<ge::Graph> ge_graph = std::make_shared<ge::Graph>();
ASSERT_TRUE(graph_manager.AddGraph("test_graph", nullptr) != Status::SUCCESS);
graph_manager.AddGraph("test_graph", ge_graph);
ASSERT_EQ(1, graph_manager.GetAllGraphs().size());
std::vector<DfGraphWrapperPtr> wrappers = graph_manager.GetAllGraphs();
ASSERT_EQ("test_graph", wrappers.back()->name_);
ASSERT_EQ(ge_graph, wrappers.back()->graph_ptr_);
// test public interface:
DfGraphWrapperPtr wrappers2 = graph_manager.GetGraphByName("test_graph");
ASSERT_EQ(ge_graph, wrappers2->graph_ptr_);
// test public interface:
graph_manager.ClearGraph();
ASSERT_EQ(0, graph_manager.GetAllGraphs().size());
// test public interface:
int id = graph_manager.GenerateId();
assert(id > 0);
}
} // namespace transform
} // namespace mindspore

View File

@ -1,247 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <memory>
#include "common/common_test.h"
#include "ir/dtype.h"
#include "pybind_api/ir/tensor_py.h"
#include "transform/transform_base_test.h"
#include "common/py_func_graph_fetcher.h"
#include "pipeline/jit/static_analysis/static_analysis.h"
#include "frontend/operator/ops.h"
#include "include/transform/graph_ir/df_graph_manager.h"
#include "include/transform/graph_ir/convert.h"
#include "include/common/utils/utils.h"
#ifdef OPEN_SOURCE
#include "ge/client/ge_api.h"
#else
#include "external/ge/ge_api.h"
#endif
#define private public
#include "include/transform/graph_ir/graph_runner.h"
using mindspore::tensor::TensorPy;
namespace mindspore {
namespace transform {
class TestGraphRunner : public UT::Common {
public:
TestGraphRunner() {}
void SetUp();
static const std::shared_ptr<Float> kF64;
static const std::shared_ptr<Float> kF32;
private:
};
void TestGraphRunner::SetUp() { UT::InitPythonPath(); }
const std::shared_ptr<Float> TestGraphRunner::kF64 = std::make_shared<Float>(64);
const std::shared_ptr<Float> TestGraphRunner::kF32 = std::make_shared<Float>(32);
std::shared_ptr<DfGraphConvertor> MakeGeGraph() {
PrimitivePtr conv2d = prim::kPrimConv2D;
conv2d->AddAttr("stride", MakeValue(static_cast<int64_t>(1)));
conv2d->AddAttr("pad", MakeValue(static_cast<int64_t>(0)));
conv2d->AddAttr("pad_mode", MakeValue(std::string("pad")));
conv2d->AddAttr("dilation", MakeValue(static_cast<int64_t>(1)));
conv2d->AddAttr("group", MakeValue(static_cast<int64_t>(1)));
conv2d->AddAttr("mode", MakeValue(static_cast<int64_t>(1)));
conv2d->AddAttr("out_channel", MakeValue(static_cast<int64_t>(2)));
conv2d->AddAttr("kernel_size", MakeValue(std::vector<int64_t>({2, 2})));
conv2d->AddAttr("dilation", MakeValue(static_cast<int64_t>(1)));
conv2d->AddAttr("data_format", MakeValue(kOpFormat_NCHW));
FuncGraphPtr anf_graph = MakeFuncGraph(conv2d, 2);
std::shared_ptr<FuncGraphManager> ir_graph_manager = MakeManager({anf_graph});
return std::make_shared<DfGraphConvertor>(anf_graph);
}
namespace {
std::shared_ptr<std::vector<MeTensorPtr>> DoExecGraph(const std::vector<MeTensorPtr> &inputs) {
std::vector<GeTensorPtr> ge_tensor_ptrs = TransformUtil::ConvertInputTensors(inputs, kOpFormat_NCHW);
std::vector<GeTensorPtr> ge_outputs;
transform::GraphRunnerOptions options;
transform::GraphRunner graph_runner(options);
transform::RunOptions run_options;
run_options.name = "fp_bp_subgraph";
MS_LOG(INFO) << "Run func_graph begin, inputs size is: " << inputs.size();
Status ret = graph_runner.RunGraph(run_options, ge_tensor_ptrs, &ge_outputs);
MS_LOG(INFO) << "Run func_graph finish, outputs size is: " << ge_outputs.size();
if (ret != Status::SUCCESS) {
return nullptr;
}
std::vector<std::vector<int64_t>> request_dims;
std::vector<int64_t> dims1 = {1, 1, 4, 4};
std::vector<int64_t> dims2 = {2, 3, 4, 5};
std::vector<int64_t> dims3 = {9, 9};
request_dims.emplace_back(dims1);
request_dims.emplace_back(dims2);
request_dims.emplace_back(dims3);
std::vector<MeTensorPtr> me_outputs = TransformUtil::ConvertGeTensors(ge_outputs, request_dims);
return std::make_shared<std::vector<MeTensorPtr>>(me_outputs);
}
} // namespace
TEST_F(TestGraphRunner, TestGeTensorConstructor) {
// Init a data buffer
float ge_tensor_data[] = {1.1, 2.2, 3.3, 4.4, 5.5, 6.6};
// Create a Tensor with wanted data type and shape
MeTensor tensor = MeTensor(TypeId::kNumberTypeFloat32, std::vector<int64_t>({1, 2, 3}));
// Get the writable data pointer from the tensor
float *me_tensor_data = reinterpret_cast<float *>(tensor.data_c());
// Copy data from buffer to tensor's data
memcpy_s(me_tensor_data, static_cast<size_t>(tensor.data().nbytes()), ge_tensor_data, sizeof(ge_tensor_data));
PrintMeTensor(&tensor);
std::cout << "----------------------------------" << std::endl;
py::tuple py_tuple =
py::make_tuple(py::make_tuple(py::make_tuple(1.1f, 2.2f, 3.3f), py::make_tuple(4.4f, 5.5f, 6.6f)));
py::array my_arry = py::array(py_tuple).attr("astype").cast<py::function>()("float32").cast<py::array>();
auto tensor_tuple = TensorPy::MakeTensor(my_arry, kFloat32);
PrintMeTensor(tensor_tuple.get());
py::array tensor_array = TensorPy::AsNumpy(tensor);
py::array tensor_tuple_array = TensorPy::AsNumpy(*tensor_tuple);
assert(memcmp(ge_tensor_data, tensor_array.data(), sizeof(ge_tensor_data)) == 0);
assert(memcmp(ge_tensor_data, tensor_tuple_array.data(), sizeof(ge_tensor_data)) == 0);
}
TEST_F(TestGraphRunner, TestRunGraphException) {
DfGraphManager &graph_manager = DfGraphManager::GetInstance();
graph_manager.ClearGraph();
std::map<string, MeTensorPtr> dict;
std::initializer_list<int64_t> list0{2, 1, 2, 2};
MeTensorPtr init_tensor_ptr = MakeTensor(kF32, list0);
dict["x1"] = init_tensor_ptr;
std::shared_ptr<DfGraphConvertor> converter = MakeGeGraph();
(*converter).ConvertAllNode().InitParam(dict).BuildGraph();
auto df_graph = (*converter).GetComputeGraph();
graph_manager.AddGraph("test_graph", df_graph);
std::initializer_list<int64_t> list1{1, 1, 2, 3};
MeTensorPtr me_tensor_ptr = MakeTensor(kF32, list1);
std::initializer_list<int64_t> list2{1, 1, 4, 4};
MeTensorPtr input_ptr = MakeTensor(kF32, list2);
std::vector<MeTensorPtr> me_inputs;
me_inputs.emplace_back(input_ptr);
std::vector<MeTensorPtr> me_outputs;
GraphRunnerOptions options;
GraphRunner graph_runner(options);
RunOptions run_options;
ASSERT_TRUE(graph_runner.RunGraph(run_options, me_inputs, &me_outputs) != Status::SUCCESS);
run_options.name = "test_graph";
ASSERT_TRUE(graph_runner.RunGraph(run_options, me_inputs, &me_outputs) == Status::SUCCESS);
GraphRunner graph_runner2(options);
ASSERT_TRUE(graph_runner2.RunGraph(run_options, me_inputs, &me_outputs) == Status::SUCCESS);
// when the GraphManager is empty
graph_manager.ClearGraph();
GraphRunner graph_runner3(options);
ASSERT_TRUE(graph_runner3.RunGraph(run_options, me_inputs, &me_outputs) != Status::SUCCESS);
}
TEST_F(TestGraphRunner, TestRunGraph) {
DfGraphManager &graph_manager = DfGraphManager::GetInstance();
graph_manager.ClearGraph();
std::shared_ptr<DfGraphConvertor> converter = MakeGeGraph();
std::map<std::string, MeTensorPtr> dict;
std::initializer_list<int64_t> list0{2, 1, 2, 2};
dict.emplace("x1", MakeTensor(kF32, list0));
(*converter).ConvertAllNode().InitParam(dict).BuildGraph();
graph_manager.AddGraph("test_graph", (*converter).GetComputeGraph());
TypePtr type_id = kFloat32;
py::tuple tuple = py::make_tuple(
py::make_tuple(py::make_tuple(py::make_tuple(1.0, 2.0, 3.0, 4.0), py::make_tuple(4.0, 5.0, 6.0, 7.0))),
py::make_tuple(py::make_tuple(py::make_tuple(1.0, 2.0, 3.0, 4.0), py::make_tuple(4.0, 5.0, 6.0, 7.0))));
py::array array = py::array(tuple);
MeTensorPtr me_tensor_ptr = TensorPy::MakeTensor(array, type_id);
MS_LOG(INFO) << "inputs me tensor data is: ";
PrintMeTensor(&(*me_tensor_ptr));
std::vector<MeTensorPtr> me_inputs;
me_inputs.emplace_back(me_tensor_ptr);
std::vector<MeTensorPtr> me_outputs;
GraphRunnerOptions options;
GraphRunner graph_runner(options);
RunOptions run_options;
run_options.name = "test_graph";
ASSERT_TRUE(graph_runner.RunGraph(run_options, me_inputs, &me_outputs) == Status::SUCCESS);
MS_LOG(INFO) << "outputs me tensor data is: ";
for (auto i = 0; i < me_outputs.size(); i++) {
PrintMeTensor(&(*me_outputs[i]));
}
}
TEST_F(TestGraphRunner, TestAPI) {
DfGraphManager &graph_manager = DfGraphManager::GetInstance();
graph_manager.ClearGraph();
std::shared_ptr<DfGraphConvertor> converter = MakeGeGraph();
std::map<std::string, MeTensorPtr> dict;
std::initializer_list<int64_t> list0{2, 1, 2, 2};
dict.emplace("x1", MakeTensor(kF32, list0));
(*converter).ConvertAllNode().InitParam(dict).BuildGraph();
graph_manager.AddGraph("fp_bp_subgraph", (*converter).GetComputeGraph());
std::initializer_list<int64_t> list1{1, 1, 4, 4};
std::initializer_list<int64_t> list2{2, 3, 4, 5};
std::initializer_list<int64_t> list3{9, 9, 1, 1};
MeTensorPtr input_ptr1 = MakeTensor(kF32, list1);
MeTensorPtr input_ptr2 = MakeTensor(kF32, list2);
MeTensorPtr input_ptr3 = MakeTensor(kF32, list3);
std::vector<MeTensorPtr> me_inputs;
std::vector<MeTensorPtr> me_outputs;
me_inputs.emplace_back(input_ptr1);
me_inputs.emplace_back(input_ptr2);
me_inputs.emplace_back(input_ptr3);
auto ret = DoExecGraph(me_inputs);
ASSERT_TRUE(ret != nullptr);
me_outputs = *ret;
MS_LOG(INFO) << "outputs me tensor data is: ";
for (auto tensor : me_outputs) {
PrintMeTensor(&(*tensor));
}
}
} // namespace transform
} // namespace mindspore

View File

@ -1,97 +0,0 @@
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <string>
#include "common/common_test.h"
#include "transform/graph_ir/op_adapter.h"
#include "transform/graph_ir/op_declare/array_ops_declare.h"
#include "frontend/operator/ops.h"
using std::cout;
using std::endl;
using std::string;
namespace mindspore {
namespace transform {
class TestOpAdapter : public UT::Common {
public:
TestOpAdapter() {}
};
#if 0
// fix conv2d ut
TEST_F(TestOpAdapter, TestSpecilization_Conv2D) {
BaseOpAdapter *adpt = new OpAdapter<Conv2D>();
auto input = std::make_shared<ge::Operator>();
auto conv = std::make_shared<Conv2D>();
ASSERT_EQ(adpt->setInput(conv, 1, input), 0);
ASSERT_EQ(adpt->setInput(conv, 2, input), 0);
ASSERT_EQ(adpt->setInput(conv, 3, input), NOT_FOUND);
ASSERT_EQ(0, adpt->setAttr(conv, "group", 1));
ASSERT_EQ(0, adpt->setAttr(conv, "mode", 1));
delete adpt;
}
#endif
TEST_F(TestOpAdapter, TestSpecilization_Const) {
BaseOpAdapter *adpt = new OpAdapter<Const>();
auto valuenode = std::make_shared<Const>();
auto input = std::make_shared<Const>();
ASSERT_EQ(adpt->setInput(valuenode, 1, input), NOT_FOUND);
delete adpt;
}
#if 0
// fix conv2d ut
TEST_F(TestOpAdapter, TestSetAttr_Conv2d_Primitive) {
BaseOpAdapter *adpt = new OpAdapter<Conv2D>();
auto conv = std::make_shared<Conv2D>();
ASSERT_EQ(adpt->setAttr(conv, "padding", 1), NOT_FOUND);
ASSERT_EQ(adpt->setAttr(conv, "pad", 1), 0);
ASSERT_EQ(adpt->setAttr(conv, "pad_mode", string("same")), 0);
ASSERT_EQ(adpt->setAttr(conv, "nothing", "test"), NOT_FOUND);
const mindspore::HashMap<std::string, ValuePtr> attrs = {
{"padding", MakeValue(2)},
{"padding_mode", MakeValue(string("normal"))},
{"stride", MakeValue(8)}
};
auto prim = prim::kPrimConv2D;
prim->SetAttrs({
{"strides", MakeValue(3)},
{"padding", MakeValue(1)},
});
ASSERT_EQ(prim->name(), prim::kPrimConv2D->name());
Int64Imm strides(3);
Int64Imm padding(1);
ASSERT_EQ(*(prim->GetAttr("strides")), strides);
ASSERT_EQ(*(prim->GetAttr("padding")), padding);
ASSERT_EQ(adpt->setAttr(conv, prim), 0);
delete adpt;
}
#endif
} // namespace transform
} // namespace mindspore

View File

@ -1,107 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include "common/common_test.h"
#include "transform/transform_base_test.h"
#include "pybind_api/ir/tensor_py.h"
#include "pipeline/jit/parse/resolve.h"
using mindspore::tensor::TensorPy;
namespace mindspore {
namespace transform {
using mindspore::parse::ParsePythonCode;
namespace python_adapter = mindspore::python_adapter;
using mindspore::parse::ResolveAll;
std::vector<FuncGraphPtr> getAnfGraph(string package, string function) {
py::function fn_ = python_adapter::GetPyFn(package, function);
FuncGraphPtr func_graph = ParsePythonCode(fn_);
std::vector<FuncGraphPtr> graphVector;
graphVector.clear();
if (nullptr == func_graph) return graphVector;
// save the func_graph to manager
std::shared_ptr<FuncGraphManager> manager = Manage(func_graph);
// call resolve
bool ret_ = ResolveAll(manager);
if (!ret_) return graphVector;
// get graph
for (auto func_graph : manager->func_graphs()) {
graphVector.push_back(func_graph);
}
return graphVector;
}
void PrintMeTensor(MeTensor* tensor) {
#define DTYPE float
DTYPE* me_data = reinterpret_cast<DTYPE*>((*tensor).data_c());
size_t elements = (*tensor).ElementsNum();
std::cout << "the in memory block data size is: " << std::dec << tensor->data().nbytes() << " bytes" << std::endl;
std::cout << "the in memory block data is: " << std::endl;
for (int i = 0; i < elements; i++) {
std::cout << static_cast<DTYPE>(*(me_data + i)) << std::endl;
}
std::cout << "the py::str() data is: " << std::endl;
py::array tensor_data = TensorPy::AsNumpy(*tensor);
std::cout << std::string(py::str(tensor_data)) << std::endl;
std::cout << "tensor dtype is: " << py::str(tensor_data.dtype()) << std::endl;
}
FuncGraphPtr MakeFuncGraph(const PrimitivePtr prim, unsigned int nparam) {
// build the func_graph manually, eg:
// MakeFuncGraph(std::make_shared<Primitive>("scalar_add"), 2) means:
/* python source code:
* @mindspore
* def f(x, y):
* return x + y
*/
FuncGraphPtr func_graph = std::make_shared<FuncGraph>();
std::vector<AnfNodePtr> inputs;
inputs.push_back(NewValueNode(prim));
for (unsigned int i = 0; i < nparam; i++) {
if ((prim->name() == "ScalarSummary" || prim->name() == "TensorSummary" ||
prim->name() == "ImageSummary" || prim->name() == "HistogramSummary") &&
i == 0) {
auto input = NewValueNode("testSummary");
inputs.push_back(input);
} else {
auto input = func_graph->add_parameter();
input->set_name("x" + std::to_string(i));
inputs.push_back(input);
}
}
CNodePtr cnode_prim = func_graph->NewCNode(inputs);
inputs.clear();
inputs.push_back(NewValueNode(std::make_shared<Primitive>("Return")));
inputs.push_back(cnode_prim);
CNodePtr cnode_return = func_graph->NewCNode(inputs);
func_graph->set_return(cnode_return);
return func_graph;
}
MeTensorPtr MakeTensor(const TypePtr& t, std::initializer_list<int64_t> shp) {
auto shape = std::vector<int64_t>(shp);
auto tensor = std::make_shared<tensor::Tensor>(t->type_id(), shape);
return tensor;
}
} // namespace transform
} // namespace mindspore

View File

@ -1,45 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TESTS_UT_TRANSFORM_UT_TRANSFORM_BASE_H_
#define TESTS_UT_TRANSFORM_UT_TRANSFORM_BASE_H_
#include <iostream>
#include <string>
#include <memory>
#include <vector>
#include "include/transform/graph_ir/util.h"
#include "ir/tensor.h"
#include "common/common_test.h"
#include "pipeline/jit/parse/parse.h"
#include "graph/tensor.h"
#ifdef OPEN_SOURCE
#include "ge/client/ge_api.h"
#else
#include "external/ge/ge_api.h"
#endif
namespace mindspore {
namespace transform {
std::vector<FuncGraphPtr> getAnfGraph(std::string package, std::string function);
void PrintMeTensor(MeTensor* tensor);
FuncGraphPtr MakeFuncGraph(const PrimitivePtr prim, unsigned int nparam);
MeTensorPtr MakeTensor(const TypePtr& t, std::initializer_list<int64_t> shp);
} // namespace transform
} // namespace mindspore
#endif // TESTS_UT_TRANSFORM_UT_TRANSFORM_BASE_H_