Activate CI tests of CxxApiLite
This commit is contained in:
parent
885bd46760
commit
25ab4b7666
|
@ -7,6 +7,7 @@ set(CONVERTER_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/converter)
|
||||||
set(OBFUSCATOR_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/obfuscator)
|
set(OBFUSCATOR_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/obfuscator)
|
||||||
set(CROPPER_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/cropper)
|
set(CROPPER_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/cropper)
|
||||||
set(TEST_CASE_DIR ${TOP_DIR}/mindspore/lite/test/build)
|
set(TEST_CASE_DIR ${TOP_DIR}/mindspore/lite/test/build)
|
||||||
|
set(TEST_DIR ${TOP_DIR}/mindspore/lite/test)
|
||||||
|
|
||||||
set(RUNTIME_DIR ${RUNTIME_PKG_NAME}/runtime)
|
set(RUNTIME_DIR ${RUNTIME_PKG_NAME}/runtime)
|
||||||
set(RUNTIME_INC_DIR ${RUNTIME_PKG_NAME}/runtime/include)
|
set(RUNTIME_INC_DIR ${RUNTIME_PKG_NAME}/runtime/include)
|
||||||
|
@ -22,6 +23,7 @@ set(BENCHMARK_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/benchmark)
|
||||||
set(MINDSPORE_LITE_TRAIN_LIB_NAME libmindspore-lite-train)
|
set(MINDSPORE_LITE_TRAIN_LIB_NAME libmindspore-lite-train)
|
||||||
set(BENCHMARK_TRAIN_NAME benchmark_train)
|
set(BENCHMARK_TRAIN_NAME benchmark_train)
|
||||||
set(BENCHMARK_TRAIN_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/benchmark_train)
|
set(BENCHMARK_TRAIN_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/benchmark_train)
|
||||||
|
file(GLOB JPEGTURBO_LIB_LIST ${jpeg_turbo_LIBPATH}/*.so)
|
||||||
|
|
||||||
# full mode will also package the files of lite_cv mode.
|
# full mode will also package the files of lite_cv mode.
|
||||||
if(BUILD_MINDDATA STREQUAL "full")
|
if(BUILD_MINDDATA STREQUAL "full")
|
||||||
|
@ -38,7 +40,6 @@ if(BUILD_MINDDATA STREQUAL "full")
|
||||||
DESTINATION ${MIND_DATA_INC_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
DESTINATION ${MIND_DATA_INC_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||||
|
|
||||||
if(PLATFORM_ARM64)
|
if(PLATFORM_ARM64)
|
||||||
file(GLOB JPEGTURBO_LIB_LIST ${jpeg_turbo_LIBPATH}/*.so)
|
|
||||||
install(FILES ${TOP_DIR}/mindspore/lite/build/minddata/libminddata-lite.so DESTINATION
|
install(FILES ${TOP_DIR}/mindspore/lite/build/minddata/libminddata-lite.so DESTINATION
|
||||||
${RUNTIME_LIB_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
${RUNTIME_LIB_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||||
install(FILES ${TOP_DIR}/mindspore/lite/build/minddata/libminddata-lite.a DESTINATION
|
install(FILES ${TOP_DIR}/mindspore/lite/build/minddata/libminddata-lite.a DESTINATION
|
||||||
|
@ -47,7 +48,6 @@ if(BUILD_MINDDATA STREQUAL "full")
|
||||||
install(FILES ${TOP_DIR}/mindspore/lite/build/securec/src/libsecurec.a
|
install(FILES ${TOP_DIR}/mindspore/lite/build/securec/src/libsecurec.a
|
||||||
DESTINATION ${SECUREC_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
DESTINATION ${SECUREC_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||||
elseif(PLATFORM_ARM32)
|
elseif(PLATFORM_ARM32)
|
||||||
file(GLOB JPEGTURBO_LIB_LIST ${jpeg_turbo_LIBPATH}/*.so)
|
|
||||||
install(FILES ${TOP_DIR}/mindspore/lite/build/minddata/libminddata-lite.so DESTINATION
|
install(FILES ${TOP_DIR}/mindspore/lite/build/minddata/libminddata-lite.so DESTINATION
|
||||||
${RUNTIME_LIB_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
${RUNTIME_LIB_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||||
install(FILES ${TOP_DIR}/mindspore/lite/build/minddata/libminddata-lite.a DESTINATION
|
install(FILES ${TOP_DIR}/mindspore/lite/build/minddata/libminddata-lite.a DESTINATION
|
||||||
|
@ -77,12 +77,10 @@ if(BUILD_MINDDATA STREQUAL "wrapper")
|
||||||
install(DIRECTORY ${TOP_DIR}/mindspore/ccsrc/minddata/dataset/include/ DESTINATION ${MIND_DATA_INC_DIR}
|
install(DIRECTORY ${TOP_DIR}/mindspore/ccsrc/minddata/dataset/include/ DESTINATION ${MIND_DATA_INC_DIR}
|
||||||
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "vision.h" EXCLUDE)
|
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "vision.h" EXCLUDE)
|
||||||
if(PLATFORM_ARM64)
|
if(PLATFORM_ARM64)
|
||||||
file(GLOB JPEGTURBO_LIB_LIST ${jpeg_turbo_LIBPATH}/*.so)
|
|
||||||
install(FILES ${TOP_DIR}/mindspore/lite/build/minddata/libminddata-lite.so DESTINATION ${RUNTIME_LIB_DIR}
|
install(FILES ${TOP_DIR}/mindspore/lite/build/minddata/libminddata-lite.so DESTINATION ${RUNTIME_LIB_DIR}
|
||||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||||
install(FILES ${JPEGTURBO_LIB_LIST} DESTINATION ${TURBO_DIR}/lib COMPONENT ${RUNTIME_COMPONENT_NAME})
|
install(FILES ${JPEGTURBO_LIB_LIST} DESTINATION ${TURBO_DIR}/lib COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||||
elseif(PLATFORM_ARM32)
|
elseif(PLATFORM_ARM32)
|
||||||
file(GLOB JPEGTURBO_LIB_LIST ${jpeg_turbo_LIBPATH}/*.so)
|
|
||||||
install(FILES ${TOP_DIR}/mindspore/lite/build/minddata/libminddata-lite.so DESTINATION ${RUNTIME_LIB_DIR}
|
install(FILES ${TOP_DIR}/mindspore/lite/build/minddata/libminddata-lite.so DESTINATION ${RUNTIME_LIB_DIR}
|
||||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||||
install(FILES ${JPEGTURBO_LIB_LIST} DESTINATION ${TURBO_DIR}/lib COMPONENT ${RUNTIME_COMPONENT_NAME})
|
install(FILES ${JPEGTURBO_LIB_LIST} DESTINATION ${TURBO_DIR}/lib COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||||
|
@ -229,6 +227,9 @@ if(PLATFORM_ARM64)
|
||||||
if(MSLITE_ENABLE_TESTCASES)
|
if(MSLITE_ENABLE_TESTCASES)
|
||||||
install(FILES ${TOP_DIR}/mindspore/lite/build/test/lite-test DESTINATION ${TEST_CASE_DIR}
|
install(FILES ${TOP_DIR}/mindspore/lite/build/test/lite-test DESTINATION ${TEST_CASE_DIR}
|
||||||
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
COMPONENT ${RUNTIME_COMPONENT_NAME})
|
||||||
|
install(DIRECTORY ${TOP_DIR}/mindspore/lite/build/minddata/ DESTINATION ${TEST_CASE_DIR}
|
||||||
|
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.so")
|
||||||
|
install(FILES ${JPEGTURBO_LIB_LIST} DESTINATION ${TEST_CASE_DIR})
|
||||||
endif()
|
endif()
|
||||||
elseif(PLATFORM_ARM32)
|
elseif(PLATFORM_ARM32)
|
||||||
if(SUPPORT_NPU)
|
if(SUPPORT_NPU)
|
||||||
|
|
|
@ -76,10 +76,21 @@ add_definitions(-DENABLE_V0)
|
||||||
|
|
||||||
file(GLOB_RECURSE OPS_SRC ${LITE_DIR}/src/ops/*.cc)
|
file(GLOB_RECURSE OPS_SRC ${LITE_DIR}/src/ops/*.cc)
|
||||||
file(GLOB CXX_SRC
|
file(GLOB CXX_SRC
|
||||||
${LITE_DIR}/src/cxx_api/*.cc
|
${LITE_DIR}/src/cxx_api/*.cc
|
||||||
${LITE_DIR}/src/cxx_api/graph/*.cc
|
${LITE_DIR}/src/cxx_api/graph/*.cc
|
||||||
${LITE_DIR}/src/cxx_api/model/*.cc
|
${LITE_DIR}/src/cxx_api/model/*.cc
|
||||||
${LITE_DIR}/src/cxx_api/tensor/*.cc)
|
${LITE_DIR}/src/cxx_api/tensor/*.cc
|
||||||
|
)
|
||||||
|
|
||||||
|
file(GLOB CXX_API_TRAIN_SRCS
|
||||||
|
${LITE_DIR}/src/cxx_api/train/*.cc
|
||||||
|
${LITE_DIR}/src/cxx_api/metrics/*.cc
|
||||||
|
${LITE_DIR}/src/cxx_api/callback/*.cc
|
||||||
|
)
|
||||||
|
file(GLOB TRAIN_SRCS
|
||||||
|
${LITE_DIR}/src/train/*.cc
|
||||||
|
)
|
||||||
|
|
||||||
if(MSLITE_ENABLE_CONVERTER)
|
if(MSLITE_ENABLE_CONVERTER)
|
||||||
set(OPS_SRC ${OPS_SRC})
|
set(OPS_SRC ${OPS_SRC})
|
||||||
endif()
|
endif()
|
||||||
|
@ -120,6 +131,8 @@ set(TEST_LITE_SRC
|
||||||
${LITE_DIR}/src/errorcode.cc
|
${LITE_DIR}/src/errorcode.cc
|
||||||
${LITE_DIR}/src/cpu_info.cc
|
${LITE_DIR}/src/cpu_info.cc
|
||||||
${LITE_DIR}/tools/common/flag_parser.cc
|
${LITE_DIR}/tools/common/flag_parser.cc
|
||||||
|
${LITE_DIR}/src/train/train_populate_parameter.cc
|
||||||
|
${LITE_DIR}/src/train/train_populate_parameter_v0.cc
|
||||||
)
|
)
|
||||||
|
|
||||||
file(GLOB KERNEL_REG_SRC ${LITE_DIR}/src/registry/*.cc)
|
file(GLOB KERNEL_REG_SRC ${LITE_DIR}/src/registry/*.cc)
|
||||||
|
@ -279,20 +292,10 @@ endif()
|
||||||
if(SUPPORT_TRAIN)
|
if(SUPPORT_TRAIN)
|
||||||
set(TEST_LITE_SRC
|
set(TEST_LITE_SRC
|
||||||
${TEST_LITE_SRC}
|
${TEST_LITE_SRC}
|
||||||
${LITE_DIR}/src/train/train_populate_parameter.cc
|
${CXX_API_TRAIN_SRCS}
|
||||||
${LITE_DIR}/src/train/train_populate_parameter_v0.cc
|
${TRAIN_SRCS}
|
||||||
${LITE_DIR}/src/train/train_session.cc
|
|
||||||
${LITE_DIR}/src/train/train_export.cc
|
|
||||||
${LITE_DIR}/src/train/train_utils.cc
|
|
||||||
${LITE_DIR}/src/train/transfer_session.cc
|
|
||||||
${LITE_DIR}/tools/common/storage.cc
|
${LITE_DIR}/tools/common/storage.cc
|
||||||
)
|
)
|
||||||
else()
|
|
||||||
set(TEST_LITE_SRC
|
|
||||||
${TEST_LITE_SRC}
|
|
||||||
${LITE_DIR}/src/train/train_populate_parameter.cc
|
|
||||||
${LITE_DIR}/src/train/train_populate_parameter_v0.cc
|
|
||||||
)
|
|
||||||
endif()
|
endif()
|
||||||
### test src
|
### test src
|
||||||
file(GLOB_RECURSE TEST_CASE_KERNEL_SRC
|
file(GLOB_RECURSE TEST_CASE_KERNEL_SRC
|
||||||
|
@ -305,6 +308,7 @@ file(GLOB_RECURSE TEST_CASE_KERNEL_SRC
|
||||||
|
|
||||||
file(GLOB_RECURSE TEST_CASE_KERNEL_TRAIN_SRC
|
file(GLOB_RECURSE TEST_CASE_KERNEL_TRAIN_SRC
|
||||||
${TEST_DIR}/ut/src/runtime/kernel/arm/fp32_grad/*.cc
|
${TEST_DIR}/ut/src/runtime/kernel/arm/fp32_grad/*.cc
|
||||||
|
${TEST_DIR}/ut/src/runtime/kernel/arm/cxx_api/*.cc
|
||||||
)
|
)
|
||||||
|
|
||||||
set(TEST_SRC
|
set(TEST_SRC
|
||||||
|
@ -346,12 +350,6 @@ if(SUPPORT_TRAIN)
|
||||||
set(TEST_SRC
|
set(TEST_SRC
|
||||||
${TEST_SRC}
|
${TEST_SRC}
|
||||||
${TEST_CASE_KERNEL_TRAIN_SRC}
|
${TEST_CASE_KERNEL_TRAIN_SRC}
|
||||||
${TEST_DIR}/ut/src/infer_test.cc # temporary
|
|
||||||
)
|
|
||||||
else()
|
|
||||||
set(TEST_SRC
|
|
||||||
${TEST_SRC}
|
|
||||||
${TEST_DIR}/ut/src/infer_test.cc
|
|
||||||
)
|
)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
@ -391,6 +389,7 @@ add_dependencies(lite-test fbs_src)
|
||||||
|
|
||||||
if(SUPPORT_TRAIN)
|
if(SUPPORT_TRAIN)
|
||||||
add_dependencies(lite-test fbs_inner_src)
|
add_dependencies(lite-test fbs_inner_src)
|
||||||
|
target_link_libraries(lite-test minddata-lite)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
target_link_libraries(lite-test
|
target_link_libraries(lite-test
|
||||||
|
|
|
@ -57,20 +57,19 @@ echo 'run common ut tests'
|
||||||
./lite-test --gtest_filter="ModelParserRegistryTest.TestRegistry"
|
./lite-test --gtest_filter="ModelParserRegistryTest.TestRegistry"
|
||||||
./lite-test --gtest_filter="PassRegistryTest.TestRegistry"
|
./lite-test --gtest_filter="PassRegistryTest.TestRegistry"
|
||||||
|
|
||||||
# test cases specific for train
|
if [ -f "$BUILD_DIR/src/libmindspore-lite-train.so" ]; then
|
||||||
|
echo 'run cxx_api ut tests'
|
||||||
|
./lite-test --gtest_filter="TestCxxApiLiteModel*"
|
||||||
|
./lite-test --gtest_filter="TestCxxApiLiteSerialization*"
|
||||||
|
|
||||||
echo 'run train ut tests'
|
echo 'run train ut tests'
|
||||||
# ./lite-test --gtest_filter="TestConvolutionGradFp32*"
|
./lite-test --gtest_filter="TestActGradFp32*"
|
||||||
# ./lite-test --gtest_filter="TestActGradFp32*"
|
./lite-test --gtest_filter="TestSoftmaxGradFp32*"
|
||||||
# ./lite-test --gtest_filter="TestSoftmaxGradFp32*"
|
./lite-test --gtest_filter="TestSoftmaxCrossEntropyFp32*"
|
||||||
# ./lite-test --gtest_filter="TestSoftmaxCrossEntropyFp32*"
|
./lite-test --gtest_filter="TestBiasGradFp32*"
|
||||||
# ./lite-test --gtest_filter="TestDeConvolutionGradFp32*"
|
#./lite-test --gtest_filter="TestConvolutionGradFp32*"
|
||||||
# ./lite-test --gtest_filter="TestBiasGradFp32*"
|
#./lite-test --gtest_filter="TestDeConvolutionGradFp32*"
|
||||||
|
fi
|
||||||
# test cases specific for CXX_API
|
|
||||||
|
|
||||||
# ./lite-test --gtest_filter="TestCxxApiLiteModel*"
|
|
||||||
# ./lite-test --gtest_filter="TestCxxApiLiteSerialization*"
|
|
||||||
|
|
||||||
echo 'run inference ut tests'
|
echo 'run inference ut tests'
|
||||||
./lite-test --gtest_filter="ControlFlowTest.TestMergeWhileModel"
|
./lite-test --gtest_filter="ControlFlowTest.TestMergeWhileModel"
|
||||||
|
|
|
@ -31,8 +31,8 @@ ut_gpu_config=${test_dir}/config/ut_gpu.cfg
|
||||||
|
|
||||||
function Run_gpu_ut() {
|
function Run_gpu_ut() {
|
||||||
cp -a ${test_dir}/build/lite-test ${ut_test_path}/lite-test || exit 1
|
cp -a ${test_dir}/build/lite-test ${ut_test_path}/lite-test || exit 1
|
||||||
|
cp -a ${test_dir}/build/*.so ${ut_test_path}/
|
||||||
cp -r ${test_dir}/ut/src/runtime/kernel/opencl/test_data ${ut_test_path} || exit 1
|
cp -r ${test_dir}/ut/src/runtime/kernel/opencl/test_data ${ut_test_path} || exit 1
|
||||||
|
|
||||||
# adb push all needed files to the phone
|
# adb push all needed files to the phone
|
||||||
adb -s ${device_id} push ${ut_test_path} /data/local/tmp/ > adb_push_log.txt
|
adb -s ${device_id} push ${ut_test_path} /data/local/tmp/ > adb_push_log.txt
|
||||||
|
|
||||||
|
@ -67,7 +67,8 @@ Run_gpu_ut_status=$?
|
||||||
|
|
||||||
cat ${run_ut_result_file}
|
cat ${run_ut_result_file}
|
||||||
if [[ $Run_gpu_ut_status == 1 ]]; then
|
if [[ $Run_gpu_ut_status == 1 ]]; then
|
||||||
|
cat adb_push_log.txt
|
||||||
cat ${run_gpu_ut_log_file}
|
cat ${run_gpu_ut_log_file}
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
exit 0
|
exit 0
|
||||||
|
|
|
@ -30,7 +30,7 @@ TEST_F(TestCxxApiLiteModel, test_build_context_uninitialized_FAILED) {
|
||||||
Model model;
|
Model model;
|
||||||
Graph graph;
|
Graph graph;
|
||||||
|
|
||||||
ASSERT_TRUE(Serialization::Load("./test_data/nets/conv_train_model.ms", ModelType::kFlatBuffer, &graph) == kSuccess);
|
ASSERT_TRUE(Serialization::Load("./nets/conv_train_model.ms", ModelType::kFlatBuffer, &graph) == kSuccess);
|
||||||
auto status = model.Build(GraphCell(graph), nullptr, nullptr);
|
auto status = model.Build(GraphCell(graph), nullptr, nullptr);
|
||||||
ASSERT_TRUE(status != kSuccess);
|
ASSERT_TRUE(status != kSuccess);
|
||||||
auto err_mst = status.GetErrDescription();
|
auto err_mst = status.GetErrDescription();
|
||||||
|
@ -53,7 +53,7 @@ TEST_F(TestCxxApiLiteModel, test_build_SUCCES) {
|
||||||
auto cpu_context = std::make_shared<mindspore::CPUDeviceInfo>();
|
auto cpu_context = std::make_shared<mindspore::CPUDeviceInfo>();
|
||||||
context->MutableDeviceInfo().push_back(cpu_context);
|
context->MutableDeviceInfo().push_back(cpu_context);
|
||||||
|
|
||||||
ASSERT_TRUE(Serialization::Load("./test_data/nets/conv_train_model.ms", ModelType::kFlatBuffer, &graph) == kSuccess);
|
ASSERT_TRUE(Serialization::Load("./nets/conv_train_model.ms", ModelType::kFlatBuffer, &graph) == kSuccess);
|
||||||
ASSERT_TRUE(model.Build(GraphCell(graph), context, nullptr) == kSuccess);
|
ASSERT_TRUE(model.Build(GraphCell(graph), context, nullptr) == kSuccess);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ TEST_F(TestCxxApiLiteModel, test_train_mode_SUCCES) {
|
||||||
auto cpu_context = std::make_shared<mindspore::CPUDeviceInfo>();
|
auto cpu_context = std::make_shared<mindspore::CPUDeviceInfo>();
|
||||||
context->MutableDeviceInfo().push_back(cpu_context);
|
context->MutableDeviceInfo().push_back(cpu_context);
|
||||||
|
|
||||||
ASSERT_TRUE(Serialization::Load("./test_data/nets/conv_train_model.ms", ModelType::kFlatBuffer, &graph) == kSuccess);
|
ASSERT_TRUE(Serialization::Load("./nets/conv_train_model.ms", ModelType::kFlatBuffer, &graph) == kSuccess);
|
||||||
ASSERT_TRUE(model.Build(GraphCell(graph), context, nullptr) == kSuccess);
|
ASSERT_TRUE(model.Build(GraphCell(graph), context, nullptr) == kSuccess);
|
||||||
ASSERT_TRUE(model.SetTrainMode(true) == kSuccess);
|
ASSERT_TRUE(model.SetTrainMode(true) == kSuccess);
|
||||||
ASSERT_TRUE(model.GetTrainMode() == true);
|
ASSERT_TRUE(model.GetTrainMode() == true);
|
||||||
|
@ -88,7 +88,7 @@ TEST_F(TestCxxApiLiteModel, test_outputs_SUCCESS) {
|
||||||
auto cpu_context = std::make_shared<mindspore::CPUDeviceInfo>();
|
auto cpu_context = std::make_shared<mindspore::CPUDeviceInfo>();
|
||||||
context->MutableDeviceInfo().push_back(cpu_context);
|
context->MutableDeviceInfo().push_back(cpu_context);
|
||||||
|
|
||||||
ASSERT_TRUE(Serialization::Load("./test_data/nets/conv_train_model.ms", ModelType::kFlatBuffer, &graph) == kSuccess);
|
ASSERT_TRUE(Serialization::Load("./nets/conv_train_model.ms", ModelType::kFlatBuffer, &graph) == kSuccess);
|
||||||
ASSERT_TRUE(model.Build(GraphCell(graph), context, nullptr) == kSuccess);
|
ASSERT_TRUE(model.Build(GraphCell(graph), context, nullptr) == kSuccess);
|
||||||
auto outputs = model.GetOutputs();
|
auto outputs = model.GetOutputs();
|
||||||
ASSERT_GT(outputs.size(), 0);
|
ASSERT_GT(outputs.size(), 0);
|
||||||
|
@ -109,7 +109,7 @@ TEST_F(TestCxxApiLiteModel, test_metrics_SUCCESS) {
|
||||||
auto cpu_context = std::make_shared<mindspore::CPUDeviceInfo>();
|
auto cpu_context = std::make_shared<mindspore::CPUDeviceInfo>();
|
||||||
context->MutableDeviceInfo().push_back(cpu_context);
|
context->MutableDeviceInfo().push_back(cpu_context);
|
||||||
|
|
||||||
ASSERT_TRUE(Serialization::Load("./test_data/nets/conv_train_model.ms", ModelType::kFlatBuffer, &graph) == kSuccess);
|
ASSERT_TRUE(Serialization::Load("./nets/conv_train_model.ms", ModelType::kFlatBuffer, &graph) == kSuccess);
|
||||||
ASSERT_TRUE(model.Build(GraphCell(graph), context, nullptr) == kSuccess);
|
ASSERT_TRUE(model.Build(GraphCell(graph), context, nullptr) == kSuccess);
|
||||||
AccuracyMetrics ac;
|
AccuracyMetrics ac;
|
||||||
ASSERT_TRUE(model.InitMetrics({&ac}) == kSuccess);
|
ASSERT_TRUE(model.InitMetrics({&ac}) == kSuccess);
|
||||||
|
|
|
@ -25,25 +25,25 @@ class TestCxxApiLiteSerialization : public mindspore::CommonTest {
|
||||||
|
|
||||||
TEST_F(TestCxxApiLiteSerialization, test_load_no_encrpty_mindir_SUCCESS) {
|
TEST_F(TestCxxApiLiteSerialization, test_load_no_encrpty_mindir_SUCCESS) {
|
||||||
Graph graph;
|
Graph graph;
|
||||||
ASSERT_TRUE(Serialization::Load("./test_data/nets/retinaface1.ms", ModelType::kFlatBuffer, &graph) == kSuccess);
|
ASSERT_TRUE(Serialization::Load("./nets/retinaface1.ms", ModelType::kFlatBuffer, &graph) == kSuccess);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestCxxApiLiteSerialization, test_load_file_not_exist_FAILED) {
|
TEST_F(TestCxxApiLiteSerialization, test_load_file_not_exist_FAILED) {
|
||||||
Graph graph;
|
Graph graph;
|
||||||
auto status = Serialization::Load("./test_data/nets/file_not_exist.mindir", ModelType::kMindIR, &graph);
|
auto status = Serialization::Load("./nets/file_not_exist.mindir", ModelType::kMindIR, &graph);
|
||||||
ASSERT_TRUE(status != kSuccess);
|
ASSERT_TRUE(status != kSuccess);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestCxxApiLiteSerialization, test_load_file_not_exist_x2_FAILED) {
|
TEST_F(TestCxxApiLiteSerialization, test_load_file_not_exist_x2_FAILED) {
|
||||||
std::vector<Graph> graphs;
|
std::vector<Graph> graphs;
|
||||||
auto status = Serialization::Load(std::vector<std::string>(2, "./data/mindir/file_not_exist.mindir"),
|
auto status =
|
||||||
ModelType::kMindIR, &graphs);
|
Serialization::Load(std::vector<std::string>(2, "./nets/file_not_exist.mindir"), ModelType::kMindIR, &graphs);
|
||||||
ASSERT_TRUE(status != kSuccess);
|
ASSERT_TRUE(status != kSuccess);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestCxxApiLiteSerialization, test_export_uninitialized_FAILED) {
|
TEST_F(TestCxxApiLiteSerialization, test_export_uninitialized_FAILED) {
|
||||||
Model model;
|
Model model;
|
||||||
ASSERT_TRUE(Serialization::ExportModel(model, ModelType::kFlatBuffer, "./test_data/nets/export.ms") != kSuccess);
|
ASSERT_TRUE(Serialization::ExportModel(model, ModelType::kFlatBuffer, "./nets/export.ms") != kSuccess);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace mindspore
|
} // namespace mindspore
|
||||||
|
|
|
@ -33,17 +33,17 @@ class TestActGradFp16 : public mindspore::CommonTest {
|
||||||
TEST_F(TestActGradFp16, ReluGradFp16) {
|
TEST_F(TestActGradFp16, ReluGradFp16) {
|
||||||
size_t output_data_size = 50;
|
size_t output_data_size = 50;
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/activationGrad/relu_y_50.bin";
|
std::string input_path = "./activationGrad/relu_y_50.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
||||||
|
|
||||||
std::string yt_path = "./test_data/activationGrad/relu_yt_50.bin";
|
std::string yt_path = "./activationGrad/relu_yt_50.bin";
|
||||||
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
||||||
|
|
||||||
std::string output_path = "./test_data/activationGrad/relu_out_50.bin";
|
std::string output_path = "./activationGrad/relu_out_50.bin";
|
||||||
auto ref_data = reinterpret_cast<const float *>(mindspore::lite::ReadFile(output_path.c_str(), &input_size));
|
auto ref_data = reinterpret_cast<const float *>(mindspore::lite::ReadFile(output_path.c_str(), &input_size));
|
||||||
ASSERT_NE(ref_data, nullptr);
|
ASSERT_NE(ref_data, nullptr);
|
||||||
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
||||||
|
@ -90,15 +90,15 @@ TEST_F(TestActGradFp16, ReluGradFp16) {
|
||||||
TEST_F(TestActGradFp16, SigmoidGradFp16) {
|
TEST_F(TestActGradFp16, SigmoidGradFp16) {
|
||||||
size_t output_data_size = 50;
|
size_t output_data_size = 50;
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/activationGrad/sigmoid_y_50.bin";
|
std::string input_path = "./activationGrad/sigmoid_y_50.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
|
|
||||||
std::string yt_path = "./test_data/activationGrad/sigmoid_yt_50.bin";
|
std::string yt_path = "./activationGrad/sigmoid_yt_50.bin";
|
||||||
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
|
|
||||||
std::string output_path = "./test_data/activationGrad/sigmoid_out_50.bin";
|
std::string output_path = "./activationGrad/sigmoid_out_50.bin";
|
||||||
auto ref_data = reinterpret_cast<const float *>(mindspore::lite::ReadFile(output_path.c_str(), &input_size));
|
auto ref_data = reinterpret_cast<const float *>(mindspore::lite::ReadFile(output_path.c_str(), &input_size));
|
||||||
ASSERT_NE(ref_data, nullptr);
|
ASSERT_NE(ref_data, nullptr);
|
||||||
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
||||||
|
|
|
@ -33,15 +33,15 @@ class TestArithmeticSelfGradFp16 : public mindspore::CommonTest {
|
||||||
TEST_F(TestArithmeticSelfGradFp16, LogGradFp16) {
|
TEST_F(TestArithmeticSelfGradFp16, LogGradFp16) {
|
||||||
size_t output_data_size = 50;
|
size_t output_data_size = 50;
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/activationGrad/log_x_50.bin";
|
std::string input_path = "./activationGrad/log_x_50.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
|
|
||||||
std::string yt_path = "./test_data/activationGrad/log_yt_50.bin";
|
std::string yt_path = "./activationGrad/log_yt_50.bin";
|
||||||
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
|
|
||||||
std::string output_path = "./test_data/activationGrad/log_out_50.bin";
|
std::string output_path = "./activationGrad/log_out_50.bin";
|
||||||
auto ref_data = reinterpret_cast<const float *>(mindspore::lite::ReadFile(output_path.c_str(), &input_size));
|
auto ref_data = reinterpret_cast<const float *>(mindspore::lite::ReadFile(output_path.c_str(), &input_size));
|
||||||
ASSERT_NE(ref_data, nullptr);
|
ASSERT_NE(ref_data, nullptr);
|
||||||
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
||||||
|
|
|
@ -40,12 +40,12 @@ TEST_F(TestActGradFp32, ReluGradFp32) {
|
||||||
size_t output_data_size = 50;
|
size_t output_data_size = 50;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/activationGrad/relu_y_50.bin";
|
std::string input_path = "./activationGrad/relu_y_50.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
||||||
|
|
||||||
std::string yt_path = "./test_data/activationGrad/relu_yt_50.bin";
|
std::string yt_path = "./activationGrad/relu_yt_50.bin";
|
||||||
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
||||||
|
@ -74,7 +74,7 @@ TEST_F(TestActGradFp32, ReluGradFp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/activationGrad/relu_out_50.bin";
|
std::string output_path = "./activationGrad/relu_out_50.bin";
|
||||||
|
|
||||||
int res = CompareRelativeOutput(output_data, output_path);
|
int res = CompareRelativeOutput(output_data, output_path);
|
||||||
|
|
||||||
|
@ -94,11 +94,11 @@ TEST_F(TestActGradFp32, Relu6GradFp32) {
|
||||||
size_t output_data_size = 50;
|
size_t output_data_size = 50;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/activationGrad/relu6_y_50.bin";
|
std::string input_path = "./activationGrad/relu6_y_50.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
|
|
||||||
std::string yt_path = "./test_data/activationGrad/relu6_yt_50.bin";
|
std::string yt_path = "./activationGrad/relu6_yt_50.bin";
|
||||||
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
|
|
||||||
|
@ -126,7 +126,7 @@ TEST_F(TestActGradFp32, Relu6GradFp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/activationGrad/relu6_out_50.bin";
|
std::string output_path = "./activationGrad/relu6_out_50.bin";
|
||||||
int res = CompareRelativeOutput(output_data, output_path);
|
int res = CompareRelativeOutput(output_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -145,11 +145,11 @@ TEST_F(TestActGradFp32, LReluGradFp32) {
|
||||||
size_t output_data_size = 50;
|
size_t output_data_size = 50;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/activationGrad/lrelu_y_50.bin";
|
std::string input_path = "./activationGrad/lrelu_y_50.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
|
|
||||||
std::string yt_path = "./test_data/activationGrad/lrelu_yt_50.bin";
|
std::string yt_path = "./activationGrad/lrelu_yt_50.bin";
|
||||||
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
|
|
||||||
|
@ -177,7 +177,7 @@ TEST_F(TestActGradFp32, LReluGradFp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/activationGrad/lrelu_out_50.bin";
|
std::string output_path = "./activationGrad/lrelu_out_50.bin";
|
||||||
int res = CompareRelativeOutput(output_data, output_path);
|
int res = CompareRelativeOutput(output_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -196,11 +196,11 @@ TEST_F(TestActGradFp32, SigmoidGradFp32) {
|
||||||
size_t output_data_size = 50;
|
size_t output_data_size = 50;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/activationGrad/sigmoid_y_50.bin";
|
std::string input_path = "./activationGrad/sigmoid_y_50.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
|
|
||||||
std::string yt_path = "./test_data/activationGrad/sigmoid_yt_50.bin";
|
std::string yt_path = "./activationGrad/sigmoid_yt_50.bin";
|
||||||
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
|
|
||||||
|
@ -228,7 +228,7 @@ TEST_F(TestActGradFp32, SigmoidGradFp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/activationGrad/sigmoid_out_50.bin";
|
std::string output_path = "./activationGrad/sigmoid_out_50.bin";
|
||||||
int res = CompareRelativeOutput(output_data, output_path);
|
int res = CompareRelativeOutput(output_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -248,11 +248,11 @@ TEST_F(TestActGradFp32, tanhGradFp32) {
|
||||||
size_t output_data_size = 50;
|
size_t output_data_size = 50;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/activationGrad/tanh_y_50.bin";
|
std::string input_path = "./activationGrad/tanh_y_50.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
|
|
||||||
std::string yt_path = "./test_data/activationGrad/tanh_yt_50.bin";
|
std::string yt_path = "./activationGrad/tanh_yt_50.bin";
|
||||||
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
|
|
||||||
|
@ -280,7 +280,7 @@ TEST_F(TestActGradFp32, tanhGradFp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/activationGrad/tanh_out_50.bin";
|
std::string output_path = "./activationGrad/tanh_out_50.bin";
|
||||||
int res = CompareRelativeOutput(output_data, output_path);
|
int res = CompareRelativeOutput(output_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -298,12 +298,12 @@ TEST_F(TestActGradFp32, hswishGradFp32) {
|
||||||
const size_t output_data_size = 10;
|
const size_t output_data_size = 10;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/activationGrad/hswish_x_50.bin";
|
std::string input_path = "./activationGrad/hswish_x_50.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
||||||
|
|
||||||
std::string yt_path = "./test_data/activationGrad/hswish_yt_50.bin";
|
std::string yt_path = "./activationGrad/hswish_yt_50.bin";
|
||||||
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
||||||
|
@ -333,7 +333,7 @@ TEST_F(TestActGradFp32, hswishGradFp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/activationGrad/hswish_out_50.bin";
|
std::string output_path = "./activationGrad/hswish_out_50.bin";
|
||||||
int res = CompareRelativeOutput(output_data, output_path);
|
int res = CompareRelativeOutput(output_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -351,12 +351,12 @@ TEST_F(TestActGradFp32, hsigmoidGradFp32) {
|
||||||
const size_t output_data_size = 10;
|
const size_t output_data_size = 10;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/activationGrad/hsig_x_50.bin";
|
std::string input_path = "./activationGrad/hsig_x_50.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
||||||
|
|
||||||
std::string yt_path = "./test_data/activationGrad/hsig_yt_50.bin";
|
std::string yt_path = "./activationGrad/hsig_yt_50.bin";
|
||||||
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
EXPECT_EQ(input_size, output_data_size * sizeof(float));
|
||||||
|
@ -386,7 +386,7 @@ TEST_F(TestActGradFp32, hsigmoidGradFp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/activationGrad/hsig_out_50.bin";
|
std::string output_path = "./activationGrad/hsig_out_50.bin";
|
||||||
int res = CompareRelativeOutput(output_data, output_path);
|
int res = CompareRelativeOutput(output_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
|
|
@ -60,24 +60,24 @@ std::vector<lite::Tensor *> GenerateTensorsForTest(const char *test, int test_id
|
||||||
std::vector<int> small_dim({6});
|
std::vector<int> small_dim({6});
|
||||||
int large_size = (4 * 6);
|
int large_size = (4 * 6);
|
||||||
int small_size = (1 * 6);
|
int small_size = (1 * 6);
|
||||||
char *dx1_file = const_cast<char *>("./test_data/operators/arithmetic_fp32_1_x1_4_6.bin");
|
char *dx1_file = const_cast<char *>("./operators/arithmetic_fp32_1_x1_4_6.bin");
|
||||||
char *dx2_file = const_cast<char *>("./test_data/operators/arithmetic_fp32_1_x2_1_6.bin");
|
char *dx2_file = const_cast<char *>("./operators/arithmetic_fp32_1_x2_1_6.bin");
|
||||||
|
|
||||||
if (test_id == 7) {
|
if (test_id == 7) {
|
||||||
large_dim = std::vector<int>({4, 5, 6});
|
large_dim = std::vector<int>({4, 5, 6});
|
||||||
small_dim = std::vector<int>({6});
|
small_dim = std::vector<int>({6});
|
||||||
large_size = (4 * 5 * 6);
|
large_size = (4 * 5 * 6);
|
||||||
small_size = (6);
|
small_size = (6);
|
||||||
dx1_file = const_cast<char *>("./test_data/operators/arithmetic_fp32_7_x1_4_5_6.bin");
|
dx1_file = const_cast<char *>("./operators/arithmetic_fp32_7_x1_4_5_6.bin");
|
||||||
dx2_file = const_cast<char *>("./test_data/operators/arithmetic_fp32_7_x2_1_1_6.bin");
|
dx2_file = const_cast<char *>("./operators/arithmetic_fp32_7_x2_1_1_6.bin");
|
||||||
}
|
}
|
||||||
if (test_id >= 8) {
|
if (test_id >= 8) {
|
||||||
large_dim = std::vector<int>({5, 4, 6});
|
large_dim = std::vector<int>({5, 4, 6});
|
||||||
small_dim = std::vector<int>({5, 1, 6});
|
small_dim = std::vector<int>({5, 1, 6});
|
||||||
large_size = (4 * 5 * 6);
|
large_size = (4 * 5 * 6);
|
||||||
small_size = (5 * 6);
|
small_size = (5 * 6);
|
||||||
dx1_file = const_cast<char *>("./test_data/operators/arithmetic_fp32_8_x1_5_4_6.bin");
|
dx1_file = const_cast<char *>("./operators/arithmetic_fp32_8_x1_5_4_6.bin");
|
||||||
dx2_file = const_cast<char *>("./test_data/operators/arithmetic_fp32_8_x2_5_1_6.bin");
|
dx2_file = const_cast<char *>("./operators/arithmetic_fp32_8_x2_5_1_6.bin");
|
||||||
}
|
}
|
||||||
|
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(test, &input_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(test, &input_size));
|
||||||
|
@ -195,8 +195,7 @@ std::vector<lite::Tensor *> GenerateTensorsForTest(const char *test, int test_id
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestArithmeticGradFp32, TestAddGradFp32) {
|
TEST_F(TestArithmeticGradFp32, TestAddGradFp32) {
|
||||||
std::vector<lite::Tensor *> all_tensors =
|
std::vector<lite::Tensor *> all_tensors = GenerateTensorsForTest("./operators/arithmetic_fp32_1_dy_4_6.bin", 1);
|
||||||
GenerateTensorsForTest("./test_data/operators/arithmetic_fp32_1_dy_4_6.bin", 1);
|
|
||||||
ASSERT_NE(all_tensors.size(), 0);
|
ASSERT_NE(all_tensors.size(), 0);
|
||||||
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[1], all_tensors[2]};
|
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[1], all_tensors[2]};
|
||||||
std::vector<lite::Tensor *> outputs = {all_tensors[3], all_tensors[4]};
|
std::vector<lite::Tensor *> outputs = {all_tensors[3], all_tensors[4]};
|
||||||
|
@ -224,10 +223,10 @@ TEST_F(TestArithmeticGradFp32, TestAddGradFp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/operators/arithmetic_fp32_1_dx1_4_6.bin";
|
std::string output_path = "./operators/arithmetic_fp32_1_dx1_4_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
||||||
|
|
||||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_1_dx2_1_6.bin";
|
std::string dx2_path = "./operators/arithmetic_fp32_1_dx2_1_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||||
for (auto tensor : all_tensors) {
|
for (auto tensor : all_tensors) {
|
||||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||||
|
@ -239,8 +238,7 @@ TEST_F(TestArithmeticGradFp32, TestAddGradFp32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestArithmeticGradFp32, TestAddGrad2Fp32) {
|
TEST_F(TestArithmeticGradFp32, TestAddGrad2Fp32) {
|
||||||
std::vector<lite::Tensor *> all_tensors =
|
std::vector<lite::Tensor *> all_tensors = GenerateTensorsForTest("./operators/arithmetic_fp32_1_dy_4_6.bin", 1);
|
||||||
GenerateTensorsForTest("./test_data/operators/arithmetic_fp32_1_dy_4_6.bin", 1);
|
|
||||||
ASSERT_NE(all_tensors.size(), 0);
|
ASSERT_NE(all_tensors.size(), 0);
|
||||||
|
|
||||||
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[2], all_tensors[1]};
|
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[2], all_tensors[1]};
|
||||||
|
@ -269,10 +267,10 @@ TEST_F(TestArithmeticGradFp32, TestAddGrad2Fp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/operators/arithmetic_fp32_1_dx1_4_6.bin";
|
std::string output_path = "./operators/arithmetic_fp32_1_dx1_4_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
||||||
|
|
||||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_1_dx2_1_6.bin";
|
std::string dx2_path = "./operators/arithmetic_fp32_1_dx2_1_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||||
for (auto tensor : all_tensors) {
|
for (auto tensor : all_tensors) {
|
||||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||||
|
@ -286,8 +284,7 @@ TEST_F(TestArithmeticGradFp32, TestAddGrad2Fp32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestArithmeticGradFp32, TestAddGrad3Fp32) {
|
TEST_F(TestArithmeticGradFp32, TestAddGrad3Fp32) {
|
||||||
std::vector<lite::Tensor *> all_tensors =
|
std::vector<lite::Tensor *> all_tensors = GenerateTensorsForTest("./operators/arithmetic_fp32_8_dy_5_4_6.bin", 8);
|
||||||
GenerateTensorsForTest("./test_data/operators/arithmetic_fp32_8_dy_5_4_6.bin", 8);
|
|
||||||
ASSERT_NE(all_tensors.size(), 0);
|
ASSERT_NE(all_tensors.size(), 0);
|
||||||
|
|
||||||
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[1], all_tensors[2]};
|
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[1], all_tensors[2]};
|
||||||
|
@ -316,10 +313,10 @@ TEST_F(TestArithmeticGradFp32, TestAddGrad3Fp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/operators/arithmetic_fp32_8_dx2_5_1_6.bin";
|
std::string output_path = "./operators/arithmetic_fp32_8_dx2_5_1_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
||||||
|
|
||||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_8_dx1_5_4_6.bin";
|
std::string dx2_path = "./operators/arithmetic_fp32_8_dx1_5_4_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||||
|
|
||||||
for (auto tensor : all_tensors) {
|
for (auto tensor : all_tensors) {
|
||||||
|
@ -334,8 +331,7 @@ TEST_F(TestArithmeticGradFp32, TestAddGrad3Fp32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestArithmeticGradFp32, TestSubGradFp32) {
|
TEST_F(TestArithmeticGradFp32, TestSubGradFp32) {
|
||||||
std::vector<lite::Tensor *> all_tensors =
|
std::vector<lite::Tensor *> all_tensors = GenerateTensorsForTest("./operators/arithmetic_fp32_2_dy_4_6.bin", 2);
|
||||||
GenerateTensorsForTest("./test_data/operators/arithmetic_fp32_2_dy_4_6.bin", 2);
|
|
||||||
ASSERT_NE(all_tensors.size(), 0);
|
ASSERT_NE(all_tensors.size(), 0);
|
||||||
|
|
||||||
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[1], all_tensors[2]};
|
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[1], all_tensors[2]};
|
||||||
|
@ -364,10 +360,10 @@ TEST_F(TestArithmeticGradFp32, TestSubGradFp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/operators/arithmetic_fp32_2_dx1_4_6.bin";
|
std::string output_path = "./operators/arithmetic_fp32_2_dx1_4_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
||||||
|
|
||||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_2_dx2_1_6.bin";
|
std::string dx2_path = "./operators/arithmetic_fp32_2_dx2_1_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||||
|
|
||||||
for (auto tensor : all_tensors) {
|
for (auto tensor : all_tensors) {
|
||||||
|
@ -382,8 +378,7 @@ TEST_F(TestArithmeticGradFp32, TestSubGradFp32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestArithmeticGradFp32, TestSubGrad2Fp32) {
|
TEST_F(TestArithmeticGradFp32, TestSubGrad2Fp32) {
|
||||||
std::vector<lite::Tensor *> all_tensors =
|
std::vector<lite::Tensor *> all_tensors = GenerateTensorsForTest("./operators/arithmetic_fp32_3_dy_4_6.bin", 3);
|
||||||
GenerateTensorsForTest("./test_data/operators/arithmetic_fp32_3_dy_4_6.bin", 3);
|
|
||||||
ASSERT_NE(all_tensors.size(), 0);
|
ASSERT_NE(all_tensors.size(), 0);
|
||||||
|
|
||||||
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[2], all_tensors[1]};
|
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[2], all_tensors[1]};
|
||||||
|
@ -412,10 +407,10 @@ TEST_F(TestArithmeticGradFp32, TestSubGrad2Fp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/operators/arithmetic_fp32_3_dx1_4_6.bin";
|
std::string output_path = "./operators/arithmetic_fp32_3_dx1_4_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
||||||
|
|
||||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_3_dx2_1_6.bin";
|
std::string dx2_path = "./operators/arithmetic_fp32_3_dx2_1_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||||
|
|
||||||
for (auto tensor : all_tensors) {
|
for (auto tensor : all_tensors) {
|
||||||
|
@ -428,8 +423,7 @@ TEST_F(TestArithmeticGradFp32, TestSubGrad2Fp32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestArithmeticGradFp32, TestMulGradFp32) {
|
TEST_F(TestArithmeticGradFp32, TestMulGradFp32) {
|
||||||
std::vector<lite::Tensor *> all_tensors =
|
std::vector<lite::Tensor *> all_tensors = GenerateTensorsForTest("./operators/arithmetic_fp32_4_dy_4_6.bin", 4);
|
||||||
GenerateTensorsForTest("./test_data/operators/arithmetic_fp32_4_dy_4_6.bin", 4);
|
|
||||||
ASSERT_NE(all_tensors.size(), 0);
|
ASSERT_NE(all_tensors.size(), 0);
|
||||||
|
|
||||||
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[1], all_tensors[2]};
|
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[1], all_tensors[2]};
|
||||||
|
@ -467,10 +461,10 @@ TEST_F(TestArithmeticGradFp32, TestMulGradFp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/operators/arithmetic_fp32_4_dx1_4_6.bin";
|
std::string output_path = "./operators/arithmetic_fp32_4_dx1_4_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
||||||
|
|
||||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_4_dx2_1_6.bin";
|
std::string dx2_path = "./operators/arithmetic_fp32_4_dx2_1_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||||
for (auto tensor : all_tensors) {
|
for (auto tensor : all_tensors) {
|
||||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||||
|
@ -483,8 +477,7 @@ TEST_F(TestArithmeticGradFp32, TestMulGradFp32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestArithmeticGradFp32, TestMulGrad2Fp32) {
|
TEST_F(TestArithmeticGradFp32, TestMulGrad2Fp32) {
|
||||||
std::vector<lite::Tensor *> all_tensors =
|
std::vector<lite::Tensor *> all_tensors = GenerateTensorsForTest("./operators/arithmetic_fp32_4_dy_4_6.bin", 4);
|
||||||
GenerateTensorsForTest("./test_data/operators/arithmetic_fp32_4_dy_4_6.bin", 4);
|
|
||||||
ASSERT_NE(all_tensors.size(), 0);
|
ASSERT_NE(all_tensors.size(), 0);
|
||||||
|
|
||||||
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[2], all_tensors[1]};
|
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[2], all_tensors[1]};
|
||||||
|
@ -513,10 +506,10 @@ TEST_F(TestArithmeticGradFp32, TestMulGrad2Fp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/operators/arithmetic_fp32_4_dx1_4_6.bin";
|
std::string output_path = "./operators/arithmetic_fp32_4_dx1_4_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
||||||
|
|
||||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_4_dx2_1_6.bin";
|
std::string dx2_path = "./operators/arithmetic_fp32_4_dx2_1_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||||
for (auto tensor : all_tensors) {
|
for (auto tensor : all_tensors) {
|
||||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||||
|
@ -530,8 +523,7 @@ TEST_F(TestArithmeticGradFp32, TestMulGrad2Fp32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestArithmeticGradFp32, TestMulGrad3Fp32) {
|
TEST_F(TestArithmeticGradFp32, TestMulGrad3Fp32) {
|
||||||
std::vector<lite::Tensor *> all_tensors =
|
std::vector<lite::Tensor *> all_tensors = GenerateTensorsForTest("./operators/arithmetic_fp32_9_dy_5_4_6.bin", 9);
|
||||||
GenerateTensorsForTest("./test_data/operators/arithmetic_fp32_9_dy_5_4_6.bin", 9);
|
|
||||||
ASSERT_NE(all_tensors.size(), 0);
|
ASSERT_NE(all_tensors.size(), 0);
|
||||||
|
|
||||||
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[1], all_tensors[2]};
|
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[1], all_tensors[2]};
|
||||||
|
@ -560,10 +552,10 @@ TEST_F(TestArithmeticGradFp32, TestMulGrad3Fp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/operators/arithmetic_fp32_9_dx1_5_4_6.bin";
|
std::string output_path = "./operators/arithmetic_fp32_9_dx1_5_4_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
||||||
|
|
||||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_9_dx2_5_1_6.bin";
|
std::string dx2_path = "./operators/arithmetic_fp32_9_dx2_5_1_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||||
for (auto tensor : all_tensors) {
|
for (auto tensor : all_tensors) {
|
||||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||||
|
@ -577,8 +569,7 @@ TEST_F(TestArithmeticGradFp32, TestMulGrad3Fp32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestArithmeticGradFp32, TestMulGrad4Fp32) {
|
TEST_F(TestArithmeticGradFp32, TestMulGrad4Fp32) {
|
||||||
std::vector<lite::Tensor *> all_tensors =
|
std::vector<lite::Tensor *> all_tensors = GenerateTensorsForTest("./operators/arithmetic_fp32_9_dy_5_4_6.bin", 9);
|
||||||
GenerateTensorsForTest("./test_data/operators/arithmetic_fp32_9_dy_5_4_6.bin", 9);
|
|
||||||
ASSERT_NE(all_tensors.size(), 0);
|
ASSERT_NE(all_tensors.size(), 0);
|
||||||
|
|
||||||
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[2], all_tensors[1]};
|
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[2], all_tensors[1]};
|
||||||
|
@ -607,10 +598,10 @@ TEST_F(TestArithmeticGradFp32, TestMulGrad4Fp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/operators/arithmetic_fp32_9_dx1_5_4_6.bin";
|
std::string output_path = "./operators/arithmetic_fp32_9_dx1_5_4_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), output_path));
|
||||||
|
|
||||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_9_dx2_5_1_6.bin";
|
std::string dx2_path = "./operators/arithmetic_fp32_9_dx2_5_1_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||||
for (auto tensor : all_tensors) {
|
for (auto tensor : all_tensors) {
|
||||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||||
|
@ -624,8 +615,7 @@ TEST_F(TestArithmeticGradFp32, TestMulGrad4Fp32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestArithmeticGradFp32, TestDivGradFp32) {
|
TEST_F(TestArithmeticGradFp32, TestDivGradFp32) {
|
||||||
std::vector<lite::Tensor *> all_tensors =
|
std::vector<lite::Tensor *> all_tensors = GenerateTensorsForTest("./operators/arithmetic_fp32_5_dy_4_6.bin", 5);
|
||||||
GenerateTensorsForTest("./test_data/operators/arithmetic_fp32_5_dy_4_6.bin", 5);
|
|
||||||
ASSERT_NE(all_tensors.size(), 0);
|
ASSERT_NE(all_tensors.size(), 0);
|
||||||
|
|
||||||
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[1], all_tensors[2]};
|
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[1], all_tensors[2]};
|
||||||
|
@ -654,10 +644,10 @@ TEST_F(TestArithmeticGradFp32, TestDivGradFp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string output_path = "./test_data/operators/arithmetic_fp32_5_dx1_4_6.bin";
|
std::string output_path = "./operators/arithmetic_fp32_5_dx1_4_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), output_path));
|
||||||
|
|
||||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_5_dx2_1_6.bin";
|
std::string dx2_path = "./operators/arithmetic_fp32_5_dx2_1_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, dx2_path));
|
||||||
for (auto tensor : all_tensors) {
|
for (auto tensor : all_tensors) {
|
||||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||||
|
@ -671,8 +661,7 @@ TEST_F(TestArithmeticGradFp32, TestDivGradFp32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestArithmeticGradFp32, TestDivGrad2Fp32) {
|
TEST_F(TestArithmeticGradFp32, TestDivGrad2Fp32) {
|
||||||
std::vector<lite::Tensor *> all_tensors =
|
std::vector<lite::Tensor *> all_tensors = GenerateTensorsForTest("./operators/arithmetic_fp32_6_dy_4_6.bin", 6);
|
||||||
GenerateTensorsForTest("./test_data/operators/arithmetic_fp32_6_dy_4_6.bin", 6);
|
|
||||||
ASSERT_NE(all_tensors.size(), 0);
|
ASSERT_NE(all_tensors.size(), 0);
|
||||||
|
|
||||||
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[2], all_tensors[1]};
|
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[2], all_tensors[1]};
|
||||||
|
@ -701,10 +690,10 @@ TEST_F(TestArithmeticGradFp32, TestDivGrad2Fp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string dx2_path = "./test_data/operators/arithmetic_fp32_6_dx2_4_6.bin";
|
std::string dx2_path = "./operators/arithmetic_fp32_6_dx2_4_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), dx2_path));
|
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[1]->MutableData()), dx2_path));
|
||||||
|
|
||||||
std::string output_path = "./test_data/operators/arithmetic_fp32_6_dx1_1_6.bin";
|
std::string output_path = "./operators/arithmetic_fp32_6_dx1_1_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, output_path));
|
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, output_path));
|
||||||
|
|
||||||
for (auto tensor : all_tensors) {
|
for (auto tensor : all_tensors) {
|
||||||
|
@ -719,8 +708,7 @@ TEST_F(TestArithmeticGradFp32, TestDivGrad2Fp32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestArithmeticGradFp32, TestDivGrad3Fp32) {
|
TEST_F(TestArithmeticGradFp32, TestDivGrad3Fp32) {
|
||||||
std::vector<lite::Tensor *> all_tensors =
|
std::vector<lite::Tensor *> all_tensors = GenerateTensorsForTest("./operators/arithmetic_fp32_10_dy_5_4_6.bin", 10);
|
||||||
GenerateTensorsForTest("./test_data/operators/arithmetic_fp32_10_dy_5_4_6.bin", 10);
|
|
||||||
ASSERT_NE(all_tensors.size(), 0);
|
ASSERT_NE(all_tensors.size(), 0);
|
||||||
|
|
||||||
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[1], all_tensors[2]};
|
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[1], all_tensors[2]};
|
||||||
|
@ -749,10 +737,10 @@ TEST_F(TestArithmeticGradFp32, TestDivGrad3Fp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string dx1_path = "./test_data/operators/arithmetic_fp32_10_dx1_5_4_6.bin";
|
std::string dx1_path = "./operators/arithmetic_fp32_10_dx1_5_4_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), dx1_path));
|
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), dx1_path));
|
||||||
|
|
||||||
std::string output_path = "./test_data/operators/arithmetic_fp32_10_dx2_5_1_6.bin";
|
std::string output_path = "./operators/arithmetic_fp32_10_dx2_5_1_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, output_path));
|
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, output_path));
|
||||||
for (auto tensor : all_tensors) {
|
for (auto tensor : all_tensors) {
|
||||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||||
|
@ -766,8 +754,7 @@ TEST_F(TestArithmeticGradFp32, TestDivGrad3Fp32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestArithmeticGradFp32, Test3DDivGrad2Fp32) {
|
TEST_F(TestArithmeticGradFp32, Test3DDivGrad2Fp32) {
|
||||||
std::vector<lite::Tensor *> all_tensors =
|
std::vector<lite::Tensor *> all_tensors = GenerateTensorsForTest("./operators/arithmetic_fp32_7_dy_4_5_6.bin", 7);
|
||||||
GenerateTensorsForTest("./test_data/operators/arithmetic_fp32_7_dy_4_5_6.bin", 7);
|
|
||||||
ASSERT_NE(all_tensors.size(), 0);
|
ASSERT_NE(all_tensors.size(), 0);
|
||||||
|
|
||||||
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[1], all_tensors[2]};
|
std::vector<lite::Tensor *> inputs = {all_tensors[0], all_tensors[1], all_tensors[2]};
|
||||||
|
@ -796,10 +783,10 @@ TEST_F(TestArithmeticGradFp32, Test3DDivGrad2Fp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string dx1_path = "./test_data/operators/arithmetic_fp32_7_dx1_4_5_6.bin";
|
std::string dx1_path = "./operators/arithmetic_fp32_7_dx1_4_5_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), dx1_path));
|
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), dx1_path));
|
||||||
|
|
||||||
std::string output_path = "./test_data/operators/arithmetic_fp32_7_dx2_1_1_6.bin";
|
std::string output_path = "./operators/arithmetic_fp32_7_dx2_1_1_6.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, output_path));
|
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, output_path));
|
||||||
for (auto tensor : all_tensors) {
|
for (auto tensor : all_tensors) {
|
||||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||||
|
@ -819,10 +806,10 @@ TEST_F(TestArithmeticGradFp32, TestMaximumGradBroadcastFp32) {
|
||||||
int large_size = (2 * 3);
|
int large_size = (2 * 3);
|
||||||
int small_size = 3;
|
int small_size = 3;
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
char *dx1_file = const_cast<char *>("./test_data/operators/x1_maximum.bin");
|
char *dx1_file = const_cast<char *>("./operators/x1_maximum.bin");
|
||||||
char *dx2_file = const_cast<char *>("./test_data/operators/x2_maximum.bin");
|
char *dx2_file = const_cast<char *>("./operators/x2_maximum.bin");
|
||||||
|
|
||||||
std::string yt_path = "./test_data/operators/yt_maximum.bin";
|
std::string yt_path = "./operators/yt_maximum.bin";
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
||||||
ASSERT_NE(dy_data, nullptr);
|
ASSERT_NE(dy_data, nullptr);
|
||||||
EXPECT_EQ(input_size, large_size * sizeof(float));
|
EXPECT_EQ(input_size, large_size * sizeof(float));
|
||||||
|
@ -881,10 +868,10 @@ TEST_F(TestArithmeticGradFp32, TestMaximumGradBroadcastFp32) {
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
std::string dx1_path = "./test_data/operators/x1_grad_maximum.bin";
|
std::string dx1_path = "./operators/x1_grad_maximum.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), dx1_path));
|
EXPECT_EQ(0, CompareRelativeOutput(reinterpret_cast<float *>(outputs[0]->MutableData()), dx1_path));
|
||||||
|
|
||||||
std::string output_path = "./test_data/operators/x2_grad_maximum.bin";
|
std::string output_path = "./operators/x2_grad_maximum.bin";
|
||||||
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, output_path));
|
EXPECT_EQ(0, CompareRelativeOutput(output_ptr, output_path));
|
||||||
for (auto tensor : inputs) {
|
for (auto tensor : inputs) {
|
||||||
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
delete[] reinterpret_cast<float *>(tensor->MutableData());
|
||||||
|
|
|
@ -34,7 +34,7 @@ TEST_F(TestBiasGradFp32, BiasGradFp32) {
|
||||||
ASSERT_NE(bias_param, nullptr);
|
ASSERT_NE(bias_param, nullptr);
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/operators/biasgradfp32_1_dy_10_28_28_7.bin";
|
std::string input_path = "./operators/biasgradfp32_1_dy_10_28_28_7.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ TEST_F(TestBiasGradFp32, BiasGradFp32) {
|
||||||
std::cout << output_data[i] << " ,";
|
std::cout << output_data[i] << " ,";
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
std::string output_path = "./test_data/operators/biasgradfp32_1_db_7.bin";
|
std::string output_path = "./operators/biasgradfp32_1_db_7.bin";
|
||||||
auto res = CompareRelativeOutput(output_data, output_path);
|
auto res = CompareRelativeOutput(output_data, output_path);
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ TEST_F(TestBiasGradFp32, BiasGrad2DFp32) {
|
||||||
ASSERT_NE(bias_param, nullptr);
|
ASSERT_NE(bias_param, nullptr);
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/operators/fc_yt.f32";
|
std::string input_path = "./operators/fc_yt.f32";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
std::vector<int> dim_dy({2, 20});
|
std::vector<int> dim_dy({2, 20});
|
||||||
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
||||||
|
@ -121,7 +121,7 @@ TEST_F(TestBiasGradFp32, BiasGrad2DFp32) {
|
||||||
std::cout << output_data[i] << " ,";
|
std::cout << output_data[i] << " ,";
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
std::string output_path = "./test_data/operators/fc_b_grad.f32";
|
std::string output_path = "./operators/fc_b_grad.f32";
|
||||||
auto res = CompareRelativeOutput(output_data, output_path);
|
auto res = CompareRelativeOutput(output_data, output_path);
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
|
||||||
|
|
|
@ -55,15 +55,15 @@ TEST_F(TestBNGradFp32, BNGradFp32) {
|
||||||
const int height = 4;
|
const int height = 4;
|
||||||
const int width = 5;
|
const int width = 5;
|
||||||
|
|
||||||
auto dy_tensor = CreateInTensor("./test_data/bngrad/dy_2_4_5_3.bin", {batch, height, width, channels});
|
auto dy_tensor = CreateInTensor("./bngrad/dy_2_4_5_3.bin", {batch, height, width, channels});
|
||||||
ASSERT_NE(dy_tensor, nullptr);
|
ASSERT_NE(dy_tensor, nullptr);
|
||||||
auto x_tensor = CreateInTensor("./test_data/bngrad/input_x_2_4_5_3.bin", {batch, height, width, channels});
|
auto x_tensor = CreateInTensor("./bngrad/input_x_2_4_5_3.bin", {batch, height, width, channels});
|
||||||
ASSERT_NE(x_tensor, nullptr);
|
ASSERT_NE(x_tensor, nullptr);
|
||||||
auto scale_tensor = CreateInTensor("./test_data/bngrad/scale_3.bin", {1, 1, 1, channels});
|
auto scale_tensor = CreateInTensor("./bngrad/scale_3.bin", {1, 1, 1, channels});
|
||||||
ASSERT_NE(scale_tensor, nullptr);
|
ASSERT_NE(scale_tensor, nullptr);
|
||||||
auto mean_tensor = CreateInTensor("./test_data/bngrad/save_mean_3.bin", {1, 1, 1, channels});
|
auto mean_tensor = CreateInTensor("./bngrad/save_mean_3.bin", {1, 1, 1, channels});
|
||||||
ASSERT_NE(mean_tensor, nullptr);
|
ASSERT_NE(mean_tensor, nullptr);
|
||||||
auto var_tensor = CreateInTensor("././test_data/bngrad/save_var_3.bin", {1, 1, 1, channels});
|
auto var_tensor = CreateInTensor("././bngrad/save_var_3.bin", {1, 1, 1, channels});
|
||||||
ASSERT_NE(var_tensor, nullptr);
|
ASSERT_NE(var_tensor, nullptr);
|
||||||
|
|
||||||
// prepare output tensors
|
// prepare output tensors
|
||||||
|
@ -95,19 +95,19 @@ TEST_F(TestBNGradFp32, BNGradFp32) {
|
||||||
auto dx = reinterpret_cast<float *>(outputs[0]->MutableData());
|
auto dx = reinterpret_cast<float *>(outputs[0]->MutableData());
|
||||||
for (int i = 0; i < 7; i++) std::cout << dx[i] << " ";
|
for (int i = 0; i < 7; i++) std::cout << dx[i] << " ";
|
||||||
std::cout << "\n";
|
std::cout << "\n";
|
||||||
auto res = CompareRelativeOutput(dx, "./test_data/bngrad/output_dx_2_4_5_3.bin");
|
auto res = CompareRelativeOutput(dx, "./bngrad/output_dx_2_4_5_3.bin");
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
std::cout << "\n=======dscale=======\n";
|
std::cout << "\n=======dscale=======\n";
|
||||||
auto dscale = reinterpret_cast<float *>(outputs[1]->MutableData());
|
auto dscale = reinterpret_cast<float *>(outputs[1]->MutableData());
|
||||||
for (int i = 0; i < channels; i++) std::cout << dscale[i] << " ";
|
for (int i = 0; i < channels; i++) std::cout << dscale[i] << " ";
|
||||||
std::cout << "\n";
|
std::cout << "\n";
|
||||||
res = CompareRelativeOutput(dscale, "./test_data/bngrad/output_dscale_3.bin");
|
res = CompareRelativeOutput(dscale, "./bngrad/output_dscale_3.bin");
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
std::cout << "==========dbias==========\n";
|
std::cout << "==========dbias==========\n";
|
||||||
auto dbias = reinterpret_cast<float *>(outputs[2]->MutableData());
|
auto dbias = reinterpret_cast<float *>(outputs[2]->MutableData());
|
||||||
for (int i = 0; i < 3; i++) std::cout << dbias[i] << " ";
|
for (int i = 0; i < 3; i++) std::cout << dbias[i] << " ";
|
||||||
std::cout << "\n";
|
std::cout << "\n";
|
||||||
res = CompareRelativeOutput(dbias, "./test_data/bngrad/output_dbias_3.bin");
|
res = CompareRelativeOutput(dbias, "./bngrad/output_dbias_3.bin");
|
||||||
for (auto v : inputs) {
|
for (auto v : inputs) {
|
||||||
delete[] reinterpret_cast<float *>(v->MutableData());
|
delete[] reinterpret_cast<float *>(v->MutableData());
|
||||||
v->set_data(nullptr);
|
v->set_data(nullptr);
|
||||||
|
@ -128,7 +128,7 @@ TEST_F(TestBNGradFp32, BNTtrainFp32) {
|
||||||
const int height = 4;
|
const int height = 4;
|
||||||
const int width = 5;
|
const int width = 5;
|
||||||
bn_param->channel_ = channels;
|
bn_param->channel_ = channels;
|
||||||
auto x_tensor = CreateInTensor("./test_data/bngrad/input_x_2_4_5_3.bin", {batch, height, width, channels});
|
auto x_tensor = CreateInTensor("./bngrad/input_x_2_4_5_3.bin", {batch, height, width, channels});
|
||||||
|
|
||||||
lite::Tensor scale_tensor(TypeId::kNumberTypeFloat32, {1, 1, 1, channels});
|
lite::Tensor scale_tensor(TypeId::kNumberTypeFloat32, {1, 1, 1, channels});
|
||||||
ASSERT_EQ(scale_tensor.MallocData(), 0);
|
ASSERT_EQ(scale_tensor.MallocData(), 0);
|
||||||
|
@ -204,9 +204,9 @@ TEST_F(TestBNGradFp32, BNTtrainFp32) {
|
||||||
for (int i = 0; i < channels; i++) std::cout << curr_var[i] << " ";
|
for (int i = 0; i < channels; i++) std::cout << curr_var[i] << " ";
|
||||||
std::cout << "\n";
|
std::cout << "\n";
|
||||||
delete[] reinterpret_cast<float *>(x_tensor->MutableData());
|
delete[] reinterpret_cast<float *>(x_tensor->MutableData());
|
||||||
auto res = CompareRelativeOutput(curr_mean, "./test_data/bngrad/running_mean_3.bin");
|
auto res = CompareRelativeOutput(curr_mean, "./bngrad/running_mean_3.bin");
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
res = CompareRelativeOutput(curr_var, "./test_data/bngrad/running_var_3.bin");
|
res = CompareRelativeOutput(curr_var, "./bngrad/running_var_3.bin");
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
|
||||||
x_tensor->set_data(nullptr);
|
x_tensor->set_data(nullptr);
|
||||||
|
|
|
@ -81,7 +81,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32FilterGrad) {
|
||||||
|
|
||||||
InitConvParamGroup1FP32(conv_param);
|
InitConvParamGroup1FP32(conv_param);
|
||||||
size_t dy_size;
|
size_t dy_size;
|
||||||
std::string dy_path = "./test_data/conv/convfp32_dy_1_28_28_32.bin";
|
std::string dy_path = "./conv/convfp32_dy_1_28_28_32.bin";
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
||||||
ASSERT_NE(dy_data, nullptr);
|
ASSERT_NE(dy_data, nullptr);
|
||||||
std::vector<int> dim_dy({1, 28, 28, 32});
|
std::vector<int> dim_dy({1, 28, 28, 32});
|
||||||
|
@ -95,7 +95,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32FilterGrad) {
|
||||||
conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_;
|
conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/conv/convfp32_x_1_28_28_3.bin";
|
std::string input_path = "./conv/convfp32_x_1_28_28_3.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
std::vector<int> dim_x({1, 28, 28, 3});
|
std::vector<int> dim_x({1, 28, 28, 3});
|
||||||
|
@ -137,7 +137,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32FilterGrad) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/conv/convfp32_dw_32_3_3_3.bin";
|
std::string output_path = "./conv/convfp32_dw_32_3_3_3.bin";
|
||||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -160,14 +160,14 @@ TEST_F(TestConvolutionGradFp32, ConvFp32InputGrad) {
|
||||||
|
|
||||||
InitConvParamGroup1FP32(conv_param);
|
InitConvParamGroup1FP32(conv_param);
|
||||||
size_t dy_size;
|
size_t dy_size;
|
||||||
std::string dy_path = "./test_data/conv/convfp32_dy_1_28_28_32.bin";
|
std::string dy_path = "./conv/convfp32_dy_1_28_28_32.bin";
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
||||||
std::vector<int> dim_dy({1, 28, 28, 32});
|
std::vector<int> dim_dy({1, 28, 28, 32});
|
||||||
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
||||||
dy_tensor.set_data(dy_data);
|
dy_tensor.set_data(dy_data);
|
||||||
|
|
||||||
size_t w_size;
|
size_t w_size;
|
||||||
std::string w_path = "./test_data/conv/convfp32_w_32_3_3_3.bin";
|
std::string w_path = "./conv/convfp32_w_32_3_3_3.bin";
|
||||||
auto w_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(w_path.c_str(), &w_size));
|
auto w_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(w_path.c_str(), &w_size));
|
||||||
std::vector<int> dim_dw({32, 3, 3, 3});
|
std::vector<int> dim_dw({32, 3, 3, 3});
|
||||||
lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_dw);
|
lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_dw);
|
||||||
|
@ -215,7 +215,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32InputGrad) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/conv/convfp32_dx_1_28_28_3.bin";
|
std::string output_path = "./conv/convfp32_dx_1_28_28_3.bin";
|
||||||
auto res = CompareRelativeOutput(dx_data, output_path);
|
auto res = CompareRelativeOutput(dx_data, output_path);
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
delete[] dx_data;
|
delete[] dx_data;
|
||||||
|
@ -237,7 +237,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupFilterGrad) {
|
||||||
|
|
||||||
InitConvParamGroup3FP32(conv_param);
|
InitConvParamGroup3FP32(conv_param);
|
||||||
size_t dy_size;
|
size_t dy_size;
|
||||||
std::string dy_path = "./test_data/conv/convfp32_dy_g3_1_28_28_18.bin";
|
std::string dy_path = "./conv/convfp32_dy_g3_1_28_28_18.bin";
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
||||||
std::vector<int> dim_dy({1, 28, 28, 18});
|
std::vector<int> dim_dy({1, 28, 28, 18});
|
||||||
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
||||||
|
@ -250,7 +250,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupFilterGrad) {
|
||||||
conv_param->input_channel_ / conv_param->group_;
|
conv_param->input_channel_ / conv_param->group_;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/conv/convfp32_x_g3_1_28_28_3.bin";
|
std::string input_path = "./conv/convfp32_x_g3_1_28_28_3.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
std::vector<int> dim_x({1, 28, 28, 3});
|
std::vector<int> dim_x({1, 28, 28, 3});
|
||||||
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
|
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
|
||||||
|
@ -288,7 +288,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupFilterGrad) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/conv/convfp32_dw_g3_18_3_3_3.bin";
|
std::string output_path = "./conv/convfp32_dw_g3_18_3_3_3.bin";
|
||||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
|
||||||
|
@ -310,14 +310,14 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupInputGrad) {
|
||||||
|
|
||||||
InitConvParamGroup3FP32(conv_param);
|
InitConvParamGroup3FP32(conv_param);
|
||||||
size_t dy_size;
|
size_t dy_size;
|
||||||
std::string dy_path = "./test_data/conv/convfp32_dy_g3_1_28_28_18.bin";
|
std::string dy_path = "./conv/convfp32_dy_g3_1_28_28_18.bin";
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
||||||
std::vector<int> dim_dy({1, 28, 28, 18});
|
std::vector<int> dim_dy({1, 28, 28, 18});
|
||||||
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
||||||
dy_tensor.set_data(dy_data);
|
dy_tensor.set_data(dy_data);
|
||||||
|
|
||||||
size_t w_size;
|
size_t w_size;
|
||||||
std::string w_path = "./test_data/conv/convfp32_w_g3_18_3_3_3.bin";
|
std::string w_path = "./conv/convfp32_w_g3_18_3_3_3.bin";
|
||||||
auto w_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(w_path.c_str(), &w_size));
|
auto w_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(w_path.c_str(), &w_size));
|
||||||
std::vector<int> dim_dw({18, 3, 3, 1});
|
std::vector<int> dim_dw({18, 3, 3, 1});
|
||||||
lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_dw);
|
lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_dw);
|
||||||
|
@ -365,7 +365,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupInputGrad) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/conv/convfp32_dx_g3_1_28_28_3.bin";
|
std::string output_path = "./conv/convfp32_dx_g3_1_28_28_3.bin";
|
||||||
auto res = CompareRelativeOutput(dx_data, output_path);
|
auto res = CompareRelativeOutput(dx_data, output_path);
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
delete[] dx_data;
|
delete[] dx_data;
|
||||||
|
@ -387,7 +387,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationFilterGrad) {
|
||||||
|
|
||||||
InitConvParamGroup3Dilation2FP32(conv_param);
|
InitConvParamGroup3Dilation2FP32(conv_param);
|
||||||
size_t dy_size;
|
size_t dy_size;
|
||||||
std::string dy_path = "./test_data/conv/convfp32_dy_g3_d2_1_26_26_18.bin";
|
std::string dy_path = "./conv/convfp32_dy_g3_d2_1_26_26_18.bin";
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
||||||
std::vector<int> dim_dy({1, 26, 26, 18});
|
std::vector<int> dim_dy({1, 26, 26, 18});
|
||||||
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
||||||
|
@ -400,7 +400,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationFilterGrad) {
|
||||||
conv_param->input_channel_ / conv_param->group_;
|
conv_param->input_channel_ / conv_param->group_;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/conv/convfp32_x_g3_d2_1_28_28_3.bin";
|
std::string input_path = "./conv/convfp32_x_g3_d2_1_28_28_3.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
std::vector<int> dim_x({1, 28, 28, 3});
|
std::vector<int> dim_x({1, 28, 28, 3});
|
||||||
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
|
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
|
||||||
|
@ -441,7 +441,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationFilterGrad) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/conv/convfp32_dw_g3_d2_18_3_3_3.bin";
|
std::string output_path = "./conv/convfp32_dw_g3_d2_18_3_3_3.bin";
|
||||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
delete[] input_data;
|
delete[] input_data;
|
||||||
|
@ -462,14 +462,14 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationInputGrad) {
|
||||||
|
|
||||||
InitConvParamGroup3Dilation2FP32(conv_param);
|
InitConvParamGroup3Dilation2FP32(conv_param);
|
||||||
size_t dy_size;
|
size_t dy_size;
|
||||||
std::string dy_path = "./test_data/conv/convfp32_dy_g3_d2_1_26_26_18.bin";
|
std::string dy_path = "./conv/convfp32_dy_g3_d2_1_26_26_18.bin";
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
||||||
std::vector<int> dim_dy({1, 26, 26, 18});
|
std::vector<int> dim_dy({1, 26, 26, 18});
|
||||||
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
||||||
dy_tensor.set_data(dy_data);
|
dy_tensor.set_data(dy_data);
|
||||||
|
|
||||||
size_t w_size;
|
size_t w_size;
|
||||||
std::string w_path = "./test_data/conv/convfp32_w_g3_d2_18_3_3_3.bin";
|
std::string w_path = "./conv/convfp32_w_g3_d2_18_3_3_3.bin";
|
||||||
auto w_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(w_path.c_str(), &w_size));
|
auto w_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(w_path.c_str(), &w_size));
|
||||||
std::vector<int> dim_w({18, 3, 3, 1});
|
std::vector<int> dim_w({18, 3, 3, 1});
|
||||||
lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_w);
|
lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_w);
|
||||||
|
@ -512,7 +512,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationInputGrad) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/conv/convfp32_dx_g3_d2_1_28_28_3.bin";
|
std::string output_path = "./conv/convfp32_dx_g3_d2_1_28_28_3.bin";
|
||||||
auto res = CompareRelativeOutput(dx_data, output_path);
|
auto res = CompareRelativeOutput(dx_data, output_path);
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
delete[] dx_data;
|
delete[] dx_data;
|
||||||
|
@ -533,14 +533,14 @@ TEST_F(TestConvolutionGradFp32, ConvGroupDilation) {
|
||||||
|
|
||||||
InitConvParamGroup3Dilation2FP32(conv_param);
|
InitConvParamGroup3Dilation2FP32(conv_param);
|
||||||
size_t x_size;
|
size_t x_size;
|
||||||
std::string x_path = "./test_data/conv/convfp32_x_g3_d2_1_28_28_3.bin";
|
std::string x_path = "./conv/convfp32_x_g3_d2_1_28_28_3.bin";
|
||||||
auto x_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(x_path.c_str(), &x_size));
|
auto x_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(x_path.c_str(), &x_size));
|
||||||
std::vector<int> dim_x({1, 28, 28, 3});
|
std::vector<int> dim_x({1, 28, 28, 3});
|
||||||
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
|
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
|
||||||
x_tensor.set_data(x_data);
|
x_tensor.set_data(x_data);
|
||||||
|
|
||||||
size_t w_size;
|
size_t w_size;
|
||||||
std::string w_path = "./test_data/conv/convfp32_w_g3_d2_18_3_3_3.bin";
|
std::string w_path = "./conv/convfp32_w_g3_d2_18_3_3_3.bin";
|
||||||
auto w_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(w_path.c_str(), &w_size));
|
auto w_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(w_path.c_str(), &w_size));
|
||||||
std::vector<int> dim_w({18, 3, 3, 1});
|
std::vector<int> dim_w({18, 3, 3, 1});
|
||||||
lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_w);
|
lite::Tensor w_tensor(TypeId::kNumberTypeFloat32, dim_w);
|
||||||
|
@ -589,7 +589,7 @@ TEST_F(TestConvolutionGradFp32, ConvGroupDilation) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/conv/convfp32_y_g3_d2_1_26_26_18.bin";
|
std::string output_path = "./conv/convfp32_y_g3_d2_1_26_26_18.bin";
|
||||||
auto res = CompareRelativeOutput(y_data, output_path);
|
auto res = CompareRelativeOutput(y_data, output_path);
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
|
||||||
|
@ -638,7 +638,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32Dilation2Group2Stride2FilterGrad) {
|
||||||
conv_param->thread_num_ = 1;
|
conv_param->thread_num_ = 1;
|
||||||
|
|
||||||
size_t dy_size;
|
size_t dy_size;
|
||||||
std::string dy_path = "./test_data/conv/convfp32_dy_d2_g2_s2_2_12_15_15.bin";
|
std::string dy_path = "./conv/convfp32_dy_d2_g2_s2_2_12_15_15.bin";
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
||||||
std::vector<int> dim_dy({2, 15, 15, 12});
|
std::vector<int> dim_dy({2, 15, 15, 12});
|
||||||
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
||||||
|
@ -651,7 +651,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32Dilation2Group2Stride2FilterGrad) {
|
||||||
conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_;
|
conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/conv/convfp32_input0_d2_g2_s2_2_4_32_32.bin";
|
std::string input_path = "./conv/convfp32_input0_d2_g2_s2_2_4_32_32.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
std::vector<int> dim_x({2, 32, 32, 4});
|
std::vector<int> dim_x({2, 32, 32, 4});
|
||||||
|
@ -693,7 +693,7 @@ TEST_F(TestConvolutionGradFp32, ConvFp32Dilation2Group2Stride2FilterGrad) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/conv/convfp32_dw_d2_g2_s2_12_2_3_3.bin";
|
std::string output_path = "./conv/convfp32_dw_d2_g2_s2_12_2_3_3.bin";
|
||||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -743,7 +743,7 @@ TEST_F(TestConvolutionGradFp32, ConvGroup2Dilation2Stride2) {
|
||||||
conv_param->thread_num_ = 1;
|
conv_param->thread_num_ = 1;
|
||||||
|
|
||||||
size_t dy_size;
|
size_t dy_size;
|
||||||
std::string dy_path = "./test_data/conv/convfp32_dy_d2_g2_s2_2_12_15_15.bin";
|
std::string dy_path = "./conv/convfp32_dy_d2_g2_s2_2_12_15_15.bin";
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
||||||
ASSERT_NE(dy_data, nullptr);
|
ASSERT_NE(dy_data, nullptr);
|
||||||
std::vector<int> dim_dy({2, 15, 15, 12});
|
std::vector<int> dim_dy({2, 15, 15, 12});
|
||||||
|
@ -751,7 +751,7 @@ TEST_F(TestConvolutionGradFp32, ConvGroup2Dilation2Stride2) {
|
||||||
dy_tensor.set_data(dy_data);
|
dy_tensor.set_data(dy_data);
|
||||||
|
|
||||||
size_t w_size;
|
size_t w_size;
|
||||||
std::string w_path = "./test_data/conv/convfp32_w_d2_g2_s2_12_2_3_3.bin";
|
std::string w_path = "./conv/convfp32_w_d2_g2_s2_12_2_3_3.bin";
|
||||||
auto w_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(w_path.c_str(), &w_size));
|
auto w_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(w_path.c_str(), &w_size));
|
||||||
ASSERT_NE(w_data, nullptr);
|
ASSERT_NE(w_data, nullptr);
|
||||||
std::vector<int> dim_w({12, 3, 3, 2});
|
std::vector<int> dim_w({12, 3, 3, 2});
|
||||||
|
@ -801,7 +801,7 @@ TEST_F(TestConvolutionGradFp32, ConvGroup2Dilation2Stride2) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/conv/convfp32_inputdx_d2_g2_s2_2_4_32_32.bin";
|
std::string output_path = "./conv/convfp32_inputdx_d2_g2_s2_2_4_32_32.bin";
|
||||||
auto res = CompareRelativeOutput(dx_data, output_path);
|
auto res = CompareRelativeOutput(dx_data, output_path);
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
delete[] dx_data;
|
delete[] dx_data;
|
||||||
|
|
|
@ -63,7 +63,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32FilterGrad) {
|
||||||
conv_param->thread_num_ = 1;
|
conv_param->thread_num_ = 1;
|
||||||
|
|
||||||
size_t dy_size;
|
size_t dy_size;
|
||||||
std::string dy_path = "./test_data/deconv/deconvfp32_dy_2_9_63_63.bin";
|
std::string dy_path = "./deconv/deconvfp32_dy_2_9_63_63.bin";
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
||||||
ASSERT_NE(dy_data, nullptr);
|
ASSERT_NE(dy_data, nullptr);
|
||||||
std::vector<int> dim_dy({2, 63, 63, 9});
|
std::vector<int> dim_dy({2, 63, 63, 9});
|
||||||
|
@ -74,7 +74,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32FilterGrad) {
|
||||||
conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_;
|
conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/deconv/deconvfp32_input0_2_3_32_32.bin";
|
std::string input_path = "./deconv/deconvfp32_input0_2_3_32_32.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
std::vector<int> dim_x({2, 32, 32, 3});
|
std::vector<int> dim_x({2, 32, 32, 3});
|
||||||
|
@ -120,7 +120,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32FilterGrad) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/deconv/deconvfp32_dw_9_3_3_3.bin";
|
std::string output_path = "./deconv/deconvfp32_dw_9_3_3_3.bin";
|
||||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -171,7 +171,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2FilterGrad) {
|
||||||
conv_param->thread_num_ = 1;
|
conv_param->thread_num_ = 1;
|
||||||
|
|
||||||
size_t dy_size;
|
size_t dy_size;
|
||||||
std::string dy_path = "./test_data/deconv/deconvfp32_dy_d2_2_9_65_65.bin";
|
std::string dy_path = "./deconv/deconvfp32_dy_d2_2_9_65_65.bin";
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
||||||
ASSERT_NE(dy_data, nullptr);
|
ASSERT_NE(dy_data, nullptr);
|
||||||
std::vector<int> dim_dy({2, 65, 65, 9});
|
std::vector<int> dim_dy({2, 65, 65, 9});
|
||||||
|
@ -182,7 +182,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2FilterGrad) {
|
||||||
conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_;
|
conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/deconv/deconvfp32_input0_d2_2_3_32_32.bin";
|
std::string input_path = "./deconv/deconvfp32_input0_d2_2_3_32_32.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
std::vector<int> dim_x({2, 32, 32, 3});
|
std::vector<int> dim_x({2, 32, 32, 3});
|
||||||
|
@ -223,7 +223,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2FilterGrad) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/deconv/deconvfp32_dw_d2_9_3_3_3.bin";
|
std::string output_path = "./deconv/deconvfp32_dw_d2_9_3_3_3.bin";
|
||||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -273,7 +273,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group3FilterGrad) {
|
||||||
conv_param->thread_num_ = 1;
|
conv_param->thread_num_ = 1;
|
||||||
|
|
||||||
size_t dy_size;
|
size_t dy_size;
|
||||||
std::string dy_path = "./test_data/deconv/deconvfp32_dy_d2_g3_2_9_65_65.bin";
|
std::string dy_path = "./deconv/deconvfp32_dy_d2_g3_2_9_65_65.bin";
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
||||||
ASSERT_NE(dy_data, nullptr);
|
ASSERT_NE(dy_data, nullptr);
|
||||||
std::vector<int> dim_dy({2, 65, 65, 9});
|
std::vector<int> dim_dy({2, 65, 65, 9});
|
||||||
|
@ -287,7 +287,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group3FilterGrad) {
|
||||||
conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_;
|
conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/deconv/deconvfp32_input0_d2_g3_2_3_32_32.bin";
|
std::string input_path = "./deconv/deconvfp32_input0_d2_g3_2_3_32_32.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
std::vector<int> dim_x({2, 32, 32, 3});
|
std::vector<int> dim_x({2, 32, 32, 3});
|
||||||
|
@ -330,7 +330,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group3FilterGrad) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/deconv/deconvfp32_dw_d2_g3_3_3_3_3.bin";
|
std::string output_path = "./deconv/deconvfp32_dw_d2_g3_3_3_3_3.bin";
|
||||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -380,7 +380,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group3Stride1FilterGrad) {
|
||||||
conv_param->thread_num_ = 1;
|
conv_param->thread_num_ = 1;
|
||||||
|
|
||||||
size_t dy_size;
|
size_t dy_size;
|
||||||
std::string dy_path = "./test_data/deconv/deconvfp32_dy_d2_g3_s1_2_9_34_34.bin";
|
std::string dy_path = "./deconv/deconvfp32_dy_d2_g3_s1_2_9_34_34.bin";
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
||||||
ASSERT_NE(dy_data, nullptr);
|
ASSERT_NE(dy_data, nullptr);
|
||||||
std::vector<int> dim_dy({2, 34, 34, 9});
|
std::vector<int> dim_dy({2, 34, 34, 9});
|
||||||
|
@ -391,7 +391,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group3Stride1FilterGrad) {
|
||||||
conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_;
|
conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/deconv/deconvfp32_input0_d2_g3_s1_2_3_32_32.bin";
|
std::string input_path = "./deconv/deconvfp32_input0_d2_g3_s1_2_3_32_32.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
std::vector<int> dim_x({2, 32, 32, 3});
|
std::vector<int> dim_x({2, 32, 32, 3});
|
||||||
|
@ -437,7 +437,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group3Stride1FilterGrad) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/deconv/deconvfp32_dw_d2_g3_s1_3_3_3_3.bin";
|
std::string output_path = "./deconv/deconvfp32_dw_d2_g3_s1_3_3_3_3.bin";
|
||||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -487,7 +487,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group2Stride2FilterGrad) {
|
||||||
conv_param->thread_num_ = 1;
|
conv_param->thread_num_ = 1;
|
||||||
|
|
||||||
size_t dy_size;
|
size_t dy_size;
|
||||||
std::string dy_path = "./test_data/deconv/deconvfp32_dy_d2_g2_s2_2_12_65_65.bin";
|
std::string dy_path = "./deconv/deconvfp32_dy_d2_g2_s2_2_12_65_65.bin";
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
||||||
ASSERT_NE(dy_data, nullptr);
|
ASSERT_NE(dy_data, nullptr);
|
||||||
std::vector<int> dim_dy({2, 65, 65, 12});
|
std::vector<int> dim_dy({2, 65, 65, 12});
|
||||||
|
@ -498,7 +498,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group2Stride2FilterGrad) {
|
||||||
conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_;
|
conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/deconv/deconvfp32_input0_d2_g2_s2_2_4_32_32.bin";
|
std::string input_path = "./deconv/deconvfp32_input0_d2_g2_s2_2_4_32_32.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
std::vector<int> dim_x({2, 32, 32, 4});
|
std::vector<int> dim_x({2, 32, 32, 4});
|
||||||
|
@ -544,7 +544,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group2Stride2FilterGrad) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/deconv/deconvfp32_dw_d2_g2_s2_6_4_3_3.bin";
|
std::string output_path = "./deconv/deconvfp32_dw_d2_g2_s2_6_4_3_3.bin";
|
||||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -594,7 +594,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group12Stride2FilterGrad) {
|
||||||
conv_param->thread_num_ = 1;
|
conv_param->thread_num_ = 1;
|
||||||
|
|
||||||
size_t dy_size;
|
size_t dy_size;
|
||||||
std::string dy_path = "./test_data/deconv/deconvfp32_dy_d2_g12_s2_2_12_65_65.bin";
|
std::string dy_path = "./deconv/deconvfp32_dy_d2_g12_s2_2_12_65_65.bin";
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &dy_size));
|
||||||
ASSERT_NE(dy_data, nullptr);
|
ASSERT_NE(dy_data, nullptr);
|
||||||
std::vector<int> dim_dy({2, 65, 65, 12});
|
std::vector<int> dim_dy({2, 65, 65, 12});
|
||||||
|
@ -608,7 +608,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group12Stride2FilterGrad) {
|
||||||
conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_;
|
conv_param->output_channel_ * conv_param->kernel_h_ * conv_param->kernel_w_ * conv_param->input_channel_;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/deconv/deconvfp32_input0_d2_g12_s2_2_12_32_32.bin";
|
std::string input_path = "./deconv/deconvfp32_input0_d2_g12_s2_2_12_32_32.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
std::vector<int> dim_x({2, 32, 32, 12});
|
std::vector<int> dim_x({2, 32, 32, 12});
|
||||||
|
@ -651,7 +651,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group12Stride2FilterGrad) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/deconv/deconvfp32_dw_d2_g12_s2_12_1_3_3.bin";
|
std::string output_path = "./deconv/deconvfp32_dw_d2_g12_s2_12_1_3_3.bin";
|
||||||
auto res = CompareRelativeOutput(dw_data, output_path);
|
auto res = CompareRelativeOutput(dw_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
|
|
@ -102,12 +102,12 @@ TEST_F(NetworkTest, efficient_net) {
|
||||||
context->device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_ = lite::NO_BIND;
|
context->device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_ = lite::NO_BIND;
|
||||||
context->thread_num_ = 1;
|
context->thread_num_ = 1;
|
||||||
|
|
||||||
std::string net = "./test_data/nets/effnetb0_fwd_nofuse.ms";
|
std::string net = "./nets/effnetb0_fwd_nofuse.ms";
|
||||||
auto session = session::TrainSession::CreateTrainSession(net, context, false);
|
auto session = session::TrainSession::CreateTrainSession(net, context, false);
|
||||||
ASSERT_NE(session, nullptr);
|
ASSERT_NE(session, nullptr);
|
||||||
|
|
||||||
std::string in = "./test_data/nets/effNet_input_x_1_3_224_224.bin";
|
std::string in = "./nets/effNet_input_x_1_3_224_224.bin";
|
||||||
std::string out = "./test_data/nets/effNet_output_y_1_1000.bin";
|
std::string out = "./nets/effNet_output_y_1_1000.bin";
|
||||||
auto res = runNet(session, in, out, "650");
|
auto res = runNet(session, in, out, "650");
|
||||||
delete session;
|
delete session;
|
||||||
delete context;
|
delete context;
|
||||||
|
@ -118,7 +118,7 @@ TEST_F(NetworkTest, mobileface_net) {
|
||||||
char *buf = nullptr;
|
char *buf = nullptr;
|
||||||
size_t net_size = 0;
|
size_t net_size = 0;
|
||||||
|
|
||||||
std::string net = "./test_data/nets/mobilefacenet0924.ms";
|
std::string net = "./nets/mobilefacenet0924.ms";
|
||||||
ReadFile(net.c_str(), &net_size, &buf);
|
ReadFile(net.c_str(), &net_size, &buf);
|
||||||
auto model = lite::Model::Import(buf, net_size);
|
auto model = lite::Model::Import(buf, net_size);
|
||||||
delete[] buf;
|
delete[] buf;
|
||||||
|
@ -133,8 +133,8 @@ TEST_F(NetworkTest, mobileface_net) {
|
||||||
ASSERT_EQ(lite::RET_OK, ret);
|
ASSERT_EQ(lite::RET_OK, ret);
|
||||||
// session->Eval();
|
// session->Eval();
|
||||||
|
|
||||||
std::string in = "./test_data/nets/facenet_input.f32";
|
std::string in = "./nets/facenet_input.f32";
|
||||||
std::string out = "./test_data/nets/facenet_output.f32";
|
std::string out = "./nets/facenet_output.f32";
|
||||||
auto res = runNet(session, in, out, "354", true);
|
auto res = runNet(session, in, out, "354", true);
|
||||||
|
|
||||||
ASSERT_EQ(res, 0);
|
ASSERT_EQ(res, 0);
|
||||||
|
@ -144,7 +144,7 @@ TEST_F(NetworkTest, mobileface_net) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(NetworkTest, noname) {
|
TEST_F(NetworkTest, noname) {
|
||||||
std::string net = "./test_data/nets/lenet_train.ms";
|
std::string net = "./nets/lenet_train.ms";
|
||||||
lite::Context context;
|
lite::Context context;
|
||||||
context.device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_ = lite::NO_BIND;
|
context.device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_ = lite::NO_BIND;
|
||||||
context.thread_num_ = 1;
|
context.thread_num_ = 1;
|
||||||
|
@ -163,7 +163,7 @@ TEST_F(NetworkTest, noname) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(NetworkTest, setname) {
|
TEST_F(NetworkTest, setname) {
|
||||||
std::string net = "./test_data/nets/lenet_train.ms";
|
std::string net = "./nets/lenet_train.ms";
|
||||||
lite::Context context;
|
lite::Context context;
|
||||||
context.device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_ = lite::NO_BIND;
|
context.device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_ = lite::NO_BIND;
|
||||||
context.thread_num_ = 1;
|
context.thread_num_ = 1;
|
||||||
|
|
|
@ -72,7 +72,7 @@ TEST_F(TestPoolingGradFp32, AvgPoolingGradFp32) {
|
||||||
pooling_param->output_batch_ * pooling_param->output_channel_ * pooling_param->output_h_ * pooling_param->output_w_;
|
pooling_param->output_batch_ * pooling_param->output_channel_ * pooling_param->output_h_ * pooling_param->output_w_;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/pooling/avgpoolgradfp32_1_dy_1_28_28_3.bin";
|
std::string input_path = "./pooling/avgpoolgradfp32_1_dy_1_28_28_3.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
|
|
||||||
|
@ -101,7 +101,7 @@ TEST_F(TestPoolingGradFp32, AvgPoolingGradFp32) {
|
||||||
std::cout << output_data[i] << " ,";
|
std::cout << output_data[i] << " ,";
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
std::string output_path = "./test_data/pooling/avgpoolgradfp32_1_dx_1_28_28_3.bin";
|
std::string output_path = "./pooling/avgpoolgradfp32_1_dx_1_28_28_3.bin";
|
||||||
auto res = CompareOutput(output_data, output_data_size, output_path);
|
auto res = CompareOutput(output_data, output_data_size, output_path);
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
|
||||||
|
@ -127,14 +127,14 @@ TEST_F(TestPoolingGradFp32, AvgPoolingKernelGradFp32) {
|
||||||
pooling_param->output_batch_ * pooling_param->output_channel_ * pooling_param->output_h_ * pooling_param->output_w_;
|
pooling_param->output_batch_ * pooling_param->output_channel_ * pooling_param->output_h_ * pooling_param->output_w_;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/pooling/avgpoolgradfp32_1_dy_1_28_28_3.bin";
|
std::string input_path = "./pooling/avgpoolgradfp32_1_dy_1_28_28_3.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
std::vector<int> dim_dy({1, 28, 28, 3});
|
std::vector<int> dim_dy({1, 28, 28, 3});
|
||||||
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
||||||
dy_tensor.set_data(input_data);
|
dy_tensor.set_data(input_data);
|
||||||
|
|
||||||
std::string input1_path = "./test_data/pooling/avgpoolgradfp32_1_x_1_28_28_3.bin";
|
std::string input1_path = "./pooling/avgpoolgradfp32_1_x_1_28_28_3.bin";
|
||||||
auto input1_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input1_path.c_str(), &input_size));
|
auto input1_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input1_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input1_data, nullptr);
|
ASSERT_NE(input1_data, nullptr);
|
||||||
std::vector<int> dim_x({1, 28, 28, 3});
|
std::vector<int> dim_x({1, 28, 28, 3});
|
||||||
|
@ -170,7 +170,7 @@ TEST_F(TestPoolingGradFp32, AvgPoolingKernelGradFp32) {
|
||||||
std::cout << output_data[i] << " ,";
|
std::cout << output_data[i] << " ,";
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
std::string output_path = "./test_data/pooling/avgpoolgradfp32_1_dx_1_28_28_3.bin";
|
std::string output_path = "./pooling/avgpoolgradfp32_1_dx_1_28_28_3.bin";
|
||||||
auto res = CompareOutput(output_data, output_data_size, output_path);
|
auto res = CompareOutput(output_data, output_data_size, output_path);
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
|
||||||
|
@ -200,14 +200,14 @@ TEST_F(TestPoolingGradFp32, AvgPoolingBatchGradFp32) {
|
||||||
printf("Calculating runtime cost...\n");
|
printf("Calculating runtime cost...\n");
|
||||||
// uint64_t time_avg = 0;
|
// uint64_t time_avg = 0;
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/pooling/avgpoolgradfp32_1_dy_3_28_28_3.bin";
|
std::string input_path = "./pooling/avgpoolgradfp32_1_dy_3_28_28_3.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
std::vector<int> dim_dy({3, 28, 28, 3});
|
std::vector<int> dim_dy({3, 28, 28, 3});
|
||||||
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
lite::Tensor dy_tensor(TypeId::kNumberTypeFloat32, dim_dy);
|
||||||
dy_tensor.set_data(input_data);
|
dy_tensor.set_data(input_data);
|
||||||
|
|
||||||
std::string input1_path = "./test_data/pooling/avgpoolgradfp32_1_x_3_28_28_3.bin";
|
std::string input1_path = "./pooling/avgpoolgradfp32_1_x_3_28_28_3.bin";
|
||||||
auto input1_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input1_path.c_str(), &input_size));
|
auto input1_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input1_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input1_data, nullptr);
|
ASSERT_NE(input1_data, nullptr);
|
||||||
std::vector<int> dim_x({3, 28, 28, 3});
|
std::vector<int> dim_x({3, 28, 28, 3});
|
||||||
|
@ -242,7 +242,7 @@ TEST_F(TestPoolingGradFp32, AvgPoolingBatchGradFp32) {
|
||||||
std::cout << output_data[i] << " ,";
|
std::cout << output_data[i] << " ,";
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
std::string output_path = "./test_data/pooling/avgpoolgradfp32_1_dx_3_28_28_3.bin";
|
std::string output_path = "./pooling/avgpoolgradfp32_1_dx_3_28_28_3.bin";
|
||||||
size_t output_data_size = dx_tensor.ElementsNum();
|
size_t output_data_size = dx_tensor.ElementsNum();
|
||||||
auto res = CompareOutput(output_data, output_data_size, output_path);
|
auto res = CompareOutput(output_data, output_data_size, output_path);
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -274,15 +274,15 @@ TEST_F(TestPoolingGradFp32, AvgPoolGradStride2Fp32) {
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
|
|
||||||
auto x_data = reinterpret_cast<float *>(
|
auto x_data =
|
||||||
mindspore::lite::ReadFile("./test_data/pooling/avgpoolgradfp32_s2_x_3_28_28_3.bin", &input_size));
|
reinterpret_cast<float *>(mindspore::lite::ReadFile("./pooling/avgpoolgradfp32_s2_x_3_28_28_3.bin", &input_size));
|
||||||
ASSERT_NE(x_data, nullptr);
|
ASSERT_NE(x_data, nullptr);
|
||||||
std::vector<int> dim_x({pool->output_batch_, pool->input_h_, pool->input_w_, pool->input_channel_});
|
std::vector<int> dim_x({pool->output_batch_, pool->input_h_, pool->input_w_, pool->input_channel_});
|
||||||
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
|
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
|
||||||
x_tensor.set_data(x_data);
|
x_tensor.set_data(x_data);
|
||||||
|
|
||||||
auto yt_data = reinterpret_cast<float *>(
|
auto yt_data =
|
||||||
mindspore::lite::ReadFile("./test_data/pooling/avgpoolgradfp32_s2_dy_3_28_28_3.bin", &input_size));
|
reinterpret_cast<float *>(mindspore::lite::ReadFile("./pooling/avgpoolgradfp32_s2_dy_3_28_28_3.bin", &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
std::vector<int> dim_y({pool->output_batch_, pool->output_h_, pool->output_w_, pool->output_channel_});
|
std::vector<int> dim_y({pool->output_batch_, pool->output_h_, pool->output_w_, pool->output_channel_});
|
||||||
lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
||||||
|
@ -308,14 +308,12 @@ TEST_F(TestPoolingGradFp32, AvgPoolGradStride2Fp32) {
|
||||||
ret = kernel->Run();
|
ret = kernel->Run();
|
||||||
EXPECT_EQ(0, ret);
|
EXPECT_EQ(0, ret);
|
||||||
|
|
||||||
std::string output_path = "./test_data/pooling/avgpoolgradfp32_s2_dx_3_28_28_3.bin";
|
std::string output_path = "./pooling/avgpoolgradfp32_s2_dx_3_28_28_3.bin";
|
||||||
auto res = CompareRelativeOutput(out_data, output_path);
|
auto res = CompareRelativeOutput(out_data, output_path);
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
|
||||||
delete[] x_data;
|
delete[] x_data;
|
||||||
delete[] yt_data;
|
delete[] yt_data;
|
||||||
// delete[] out_data;
|
|
||||||
// delete conv_param;
|
|
||||||
x_tensor.set_data(nullptr);
|
x_tensor.set_data(nullptr);
|
||||||
yt_tensor.set_data(nullptr);
|
yt_tensor.set_data(nullptr);
|
||||||
delete kernel;
|
delete kernel;
|
||||||
|
@ -340,15 +338,15 @@ TEST_F(TestPoolingGradFp32, AvgPoolGradStride3Fp32) {
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
|
|
||||||
auto x_data = reinterpret_cast<float *>(
|
auto x_data =
|
||||||
mindspore::lite::ReadFile("./test_data/pooling/avgpoolgradfp32_s3_x_3_28_28_3.bin", &input_size));
|
reinterpret_cast<float *>(mindspore::lite::ReadFile("./pooling/avgpoolgradfp32_s3_x_3_28_28_3.bin", &input_size));
|
||||||
ASSERT_NE(x_data, nullptr);
|
ASSERT_NE(x_data, nullptr);
|
||||||
std::vector<int> dim_x({pool->output_batch_, pool->input_h_, pool->input_w_, pool->input_channel_});
|
std::vector<int> dim_x({pool->output_batch_, pool->input_h_, pool->input_w_, pool->input_channel_});
|
||||||
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
|
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
|
||||||
x_tensor.set_data(x_data);
|
x_tensor.set_data(x_data);
|
||||||
|
|
||||||
auto yt_data = reinterpret_cast<float *>(
|
auto yt_data =
|
||||||
mindspore::lite::ReadFile("./test_data/pooling/avgpoolgradfp32_s3_dy_3_28_28_3.bin", &input_size));
|
reinterpret_cast<float *>(mindspore::lite::ReadFile("./pooling/avgpoolgradfp32_s3_dy_3_28_28_3.bin", &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
std::vector<int> dim_y({pool->output_batch_, pool->output_h_, pool->output_w_, pool->output_channel_});
|
std::vector<int> dim_y({pool->output_batch_, pool->output_h_, pool->output_w_, pool->output_channel_});
|
||||||
lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
||||||
|
@ -376,15 +374,13 @@ TEST_F(TestPoolingGradFp32, AvgPoolGradStride3Fp32) {
|
||||||
ret = kernel->Run();
|
ret = kernel->Run();
|
||||||
EXPECT_EQ(0, ret);
|
EXPECT_EQ(0, ret);
|
||||||
|
|
||||||
std::string output_path = "./test_data/pooling/avgpoolgradfp32_s3_dx_3_28_28_3.bin";
|
std::string output_path = "./pooling/avgpoolgradfp32_s3_dx_3_28_28_3.bin";
|
||||||
auto res = CompareRelativeOutput(out_data, output_path);
|
auto res = CompareRelativeOutput(out_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
|
||||||
delete[] x_data;
|
delete[] x_data;
|
||||||
delete[] yt_data;
|
delete[] yt_data;
|
||||||
// delete[] out_data;
|
|
||||||
// delete conv_param;
|
|
||||||
x_tensor.set_data(nullptr);
|
x_tensor.set_data(nullptr);
|
||||||
yt_tensor.set_data(nullptr);
|
yt_tensor.set_data(nullptr);
|
||||||
delete kernel;
|
delete kernel;
|
||||||
|
@ -406,15 +402,15 @@ TEST_F(TestPoolingGradFp32, MaxPoolingGradFp32) {
|
||||||
pooling_param->output_batch_ * pooling_param->output_channel_ * pooling_param->output_h_ * pooling_param->output_w_;
|
pooling_param->output_batch_ * pooling_param->output_channel_ * pooling_param->output_h_ * pooling_param->output_w_;
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string i_path = "./test_data/pooling/maxpoolgradfp32_1_x_1_28_28_3.bin";
|
std::string i_path = "./pooling/maxpoolgradfp32_1_x_1_28_28_3.bin";
|
||||||
auto in_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(i_path.c_str(), &input_size));
|
auto in_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(i_path.c_str(), &input_size));
|
||||||
ASSERT_NE(in_data, nullptr);
|
ASSERT_NE(in_data, nullptr);
|
||||||
|
|
||||||
std::string dy_path = "./test_data/pooling/maxpoolgradfp32_1_dy_1_28_28_3.bin";
|
std::string dy_path = "./pooling/maxpoolgradfp32_1_dy_1_28_28_3.bin";
|
||||||
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &input_size));
|
auto dy_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dy_path.c_str(), &input_size));
|
||||||
ASSERT_NE(dy_data, nullptr);
|
ASSERT_NE(dy_data, nullptr);
|
||||||
|
|
||||||
std::string dx_path = "./test_data/pooling/maxpoolgradfp32_1_dx_1_28_28_3.bin";
|
std::string dx_path = "./pooling/maxpoolgradfp32_1_dx_1_28_28_3.bin";
|
||||||
auto dx_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dx_path.c_str(), &input_size));
|
auto dx_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(dx_path.c_str(), &input_size));
|
||||||
ASSERT_NE(dx_data, nullptr);
|
ASSERT_NE(dx_data, nullptr);
|
||||||
int in_batch_size =
|
int in_batch_size =
|
||||||
|
@ -443,7 +439,7 @@ TEST_F(TestPoolingGradFp32, MaxPoolingGradFp32) {
|
||||||
std::cout << output_data[i] << " ,";
|
std::cout << output_data[i] << " ,";
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
std::string output_path = "./test_data/pooling/maxpoolgradfp32_1_xgrad_1_28_28_3.bin";
|
std::string output_path = "./pooling/maxpoolgradfp32_1_xgrad_1_28_28_3.bin";
|
||||||
auto res = CompareOutput(output_data, output_data_size, output_path);
|
auto res = CompareOutput(output_data, output_data_size, output_path);
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
|
||||||
|
@ -469,22 +465,22 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradBatchFp32) {
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
|
|
||||||
auto x_data = reinterpret_cast<float *>(
|
auto x_data =
|
||||||
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_1_x_3_28_28_3.bin", &input_size));
|
reinterpret_cast<float *>(mindspore::lite::ReadFile("./pooling/maxpoolgradfp32_1_x_3_28_28_3.bin", &input_size));
|
||||||
ASSERT_NE(x_data, nullptr);
|
ASSERT_NE(x_data, nullptr);
|
||||||
std::vector<int> dim_x({3, 28, 28, 3});
|
std::vector<int> dim_x({3, 28, 28, 3});
|
||||||
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
|
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
|
||||||
x_tensor.set_data(x_data);
|
x_tensor.set_data(x_data);
|
||||||
|
|
||||||
auto y_data = reinterpret_cast<float *>(
|
auto y_data =
|
||||||
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_1_dx_3_28_28_3.bin", &input_size));
|
reinterpret_cast<float *>(mindspore::lite::ReadFile("./pooling/maxpoolgradfp32_1_dx_3_28_28_3.bin", &input_size));
|
||||||
ASSERT_NE(y_data, nullptr);
|
ASSERT_NE(y_data, nullptr);
|
||||||
std::vector<int> dim_y({3, 28, 28, 3});
|
std::vector<int> dim_y({3, 28, 28, 3});
|
||||||
lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
||||||
y_tensor.set_data(y_data);
|
y_tensor.set_data(y_data);
|
||||||
|
|
||||||
auto yt_data = reinterpret_cast<float *>(
|
auto yt_data =
|
||||||
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_1_dy_3_28_28_3.bin", &input_size));
|
reinterpret_cast<float *>(mindspore::lite::ReadFile("./pooling/maxpoolgradfp32_1_dy_3_28_28_3.bin", &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
||||||
yt_tensor.set_data(yt_data);
|
yt_tensor.set_data(yt_data);
|
||||||
|
@ -511,7 +507,7 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradBatchFp32) {
|
||||||
ret = kernel->Run();
|
ret = kernel->Run();
|
||||||
EXPECT_EQ(0, ret);
|
EXPECT_EQ(0, ret);
|
||||||
|
|
||||||
std::string output_path = "./test_data/pooling/maxpoolgradfp32_1_xgrad_3_28_28_3.bin";
|
std::string output_path = "./pooling/maxpoolgradfp32_1_xgrad_3_28_28_3.bin";
|
||||||
auto res = CompareRelativeOutput(out_data, output_path);
|
auto res = CompareRelativeOutput(out_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -519,8 +515,6 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradBatchFp32) {
|
||||||
delete[] x_data;
|
delete[] x_data;
|
||||||
delete[] y_data;
|
delete[] y_data;
|
||||||
delete[] yt_data;
|
delete[] yt_data;
|
||||||
// delete[] out_data;
|
|
||||||
// delete conv_param;
|
|
||||||
x_tensor.set_data(nullptr);
|
x_tensor.set_data(nullptr);
|
||||||
y_tensor.set_data(nullptr);
|
y_tensor.set_data(nullptr);
|
||||||
yt_tensor.set_data(nullptr);
|
yt_tensor.set_data(nullptr);
|
||||||
|
@ -547,22 +541,22 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride2Fp32) {
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
|
|
||||||
auto x_data = reinterpret_cast<float *>(
|
auto x_data =
|
||||||
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s2_x_3_28_28_3.bin", &input_size));
|
reinterpret_cast<float *>(mindspore::lite::ReadFile("./pooling/maxpoolgradfp32_s2_x_3_28_28_3.bin", &input_size));
|
||||||
ASSERT_NE(x_data, nullptr);
|
ASSERT_NE(x_data, nullptr);
|
||||||
std::vector<int> dim_x({maxpool->output_batch_, maxpool->input_h_, maxpool->input_w_, maxpool->input_channel_});
|
std::vector<int> dim_x({maxpool->output_batch_, maxpool->input_h_, maxpool->input_w_, maxpool->input_channel_});
|
||||||
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
|
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
|
||||||
x_tensor.set_data(x_data);
|
x_tensor.set_data(x_data);
|
||||||
|
|
||||||
auto y_data = reinterpret_cast<float *>(
|
auto y_data =
|
||||||
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s2_dx_3_28_28_3.bin", &input_size));
|
reinterpret_cast<float *>(mindspore::lite::ReadFile("./pooling/maxpoolgradfp32_s2_dx_3_28_28_3.bin", &input_size));
|
||||||
ASSERT_NE(y_data, nullptr);
|
ASSERT_NE(y_data, nullptr);
|
||||||
std::vector<int> dim_y({maxpool->output_batch_, maxpool->output_h_, maxpool->output_w_, maxpool->output_channel_});
|
std::vector<int> dim_y({maxpool->output_batch_, maxpool->output_h_, maxpool->output_w_, maxpool->output_channel_});
|
||||||
lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
||||||
y_tensor.set_data(y_data);
|
y_tensor.set_data(y_data);
|
||||||
|
|
||||||
auto yt_data = reinterpret_cast<float *>(
|
auto yt_data =
|
||||||
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s2_dy_3_28_28_3.bin", &input_size));
|
reinterpret_cast<float *>(mindspore::lite::ReadFile("./pooling/maxpoolgradfp32_s2_dy_3_28_28_3.bin", &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
||||||
yt_tensor.set_data(yt_data);
|
yt_tensor.set_data(yt_data);
|
||||||
|
@ -590,7 +584,7 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride2Fp32) {
|
||||||
ret = kernel->Run();
|
ret = kernel->Run();
|
||||||
EXPECT_EQ(0, ret);
|
EXPECT_EQ(0, ret);
|
||||||
|
|
||||||
std::string output_path = "./test_data/pooling/maxpoolgradfp32_s2_xgrad_3_28_28_3.bin";
|
std::string output_path = "./pooling/maxpoolgradfp32_s2_xgrad_3_28_28_3.bin";
|
||||||
auto res = CompareRelativeOutput(out_data, output_path);
|
auto res = CompareRelativeOutput(out_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -598,8 +592,6 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride2Fp32) {
|
||||||
delete[] x_data;
|
delete[] x_data;
|
||||||
delete[] y_data;
|
delete[] y_data;
|
||||||
delete[] yt_data;
|
delete[] yt_data;
|
||||||
// delete[] out_data;
|
|
||||||
// delete conv_param;
|
|
||||||
x_tensor.set_data(nullptr);
|
x_tensor.set_data(nullptr);
|
||||||
y_tensor.set_data(nullptr);
|
y_tensor.set_data(nullptr);
|
||||||
yt_tensor.set_data(nullptr);
|
yt_tensor.set_data(nullptr);
|
||||||
|
@ -626,22 +618,22 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride3Fp32) {
|
||||||
|
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
|
|
||||||
auto x_data = reinterpret_cast<float *>(
|
auto x_data =
|
||||||
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s3_x_3_28_28_3.bin", &input_size));
|
reinterpret_cast<float *>(mindspore::lite::ReadFile("./pooling/maxpoolgradfp32_s3_x_3_28_28_3.bin", &input_size));
|
||||||
ASSERT_NE(x_data, nullptr);
|
ASSERT_NE(x_data, nullptr);
|
||||||
std::vector<int> dim_x({maxpool->output_batch_, maxpool->input_h_, maxpool->input_w_, maxpool->input_channel_});
|
std::vector<int> dim_x({maxpool->output_batch_, maxpool->input_h_, maxpool->input_w_, maxpool->input_channel_});
|
||||||
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
|
lite::Tensor x_tensor(TypeId::kNumberTypeFloat32, dim_x);
|
||||||
x_tensor.set_data(x_data);
|
x_tensor.set_data(x_data);
|
||||||
|
|
||||||
auto y_data = reinterpret_cast<float *>(
|
auto y_data =
|
||||||
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s3_dx_3_28_28_3.bin", &input_size));
|
reinterpret_cast<float *>(mindspore::lite::ReadFile("./pooling/maxpoolgradfp32_s3_dx_3_28_28_3.bin", &input_size));
|
||||||
ASSERT_NE(y_data, nullptr);
|
ASSERT_NE(y_data, nullptr);
|
||||||
std::vector<int> dim_y({maxpool->output_batch_, maxpool->output_h_, maxpool->output_w_, maxpool->output_channel_});
|
std::vector<int> dim_y({maxpool->output_batch_, maxpool->output_h_, maxpool->output_w_, maxpool->output_channel_});
|
||||||
lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
||||||
y_tensor.set_data(y_data);
|
y_tensor.set_data(y_data);
|
||||||
|
|
||||||
auto yt_data = reinterpret_cast<float *>(
|
auto yt_data =
|
||||||
mindspore::lite::ReadFile("./test_data/pooling/maxpoolgradfp32_s3_dy_3_28_28_3.bin", &input_size));
|
reinterpret_cast<float *>(mindspore::lite::ReadFile("./pooling/maxpoolgradfp32_s3_dy_3_28_28_3.bin", &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
lite::Tensor yt_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
||||||
yt_tensor.set_data(yt_data);
|
yt_tensor.set_data(yt_data);
|
||||||
|
@ -669,7 +661,7 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride3Fp32) {
|
||||||
ret = kernel->Run();
|
ret = kernel->Run();
|
||||||
EXPECT_EQ(0, ret);
|
EXPECT_EQ(0, ret);
|
||||||
|
|
||||||
std::string output_path = "./test_data/pooling/maxpoolgradfp32_s3_xgrad_3_28_28_3.bin";
|
std::string output_path = "./pooling/maxpoolgradfp32_s3_xgrad_3_28_28_3.bin";
|
||||||
auto res = CompareRelativeOutput(out_data, output_path);
|
auto res = CompareRelativeOutput(out_data, output_path);
|
||||||
|
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -677,8 +669,6 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride3Fp32) {
|
||||||
delete[] x_data;
|
delete[] x_data;
|
||||||
delete[] y_data;
|
delete[] y_data;
|
||||||
delete[] yt_data;
|
delete[] yt_data;
|
||||||
// delete[] out_data;
|
|
||||||
// delete conv_param;
|
|
||||||
x_tensor.set_data(nullptr);
|
x_tensor.set_data(nullptr);
|
||||||
y_tensor.set_data(nullptr);
|
y_tensor.set_data(nullptr);
|
||||||
yt_tensor.set_data(nullptr);
|
yt_tensor.set_data(nullptr);
|
||||||
|
|
|
@ -34,14 +34,14 @@ TEST_F(TestSoftmaxCrossEntropyFp32, SoftmaxCrossEntropyFp32) {
|
||||||
ASSERT_NE(sce_param, nullptr);
|
ASSERT_NE(sce_param, nullptr);
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
|
|
||||||
std::string input_path = "./test_data/operators/sce_fp32_1_y_6_4.bin";
|
std::string input_path = "./operators/sce_fp32_1_y_6_4.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
std::vector<int> dim_y({6, 4});
|
std::vector<int> dim_y({6, 4});
|
||||||
lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
lite::Tensor y_tensor(TypeId::kNumberTypeFloat32, dim_y);
|
||||||
y_tensor.set_data(input_data);
|
y_tensor.set_data(input_data);
|
||||||
|
|
||||||
std::string label_path = "./test_data/operators/sce_fp32_1_l_6.bin";
|
std::string label_path = "./operators/sce_fp32_1_l_6.bin";
|
||||||
auto ll_labels = reinterpret_cast<int64_t *>(mindspore::lite::ReadFile(label_path.c_str(), &input_size));
|
auto ll_labels = reinterpret_cast<int64_t *>(mindspore::lite::ReadFile(label_path.c_str(), &input_size));
|
||||||
ASSERT_NE(ll_labels, nullptr);
|
ASSERT_NE(ll_labels, nullptr);
|
||||||
auto labels = new float[6 * 4];
|
auto labels = new float[6 * 4];
|
||||||
|
@ -86,7 +86,7 @@ TEST_F(TestSoftmaxCrossEntropyFp32, SoftmaxCrossEntropyFp32) {
|
||||||
|
|
||||||
printf("==================Testing Grad===============\n");
|
printf("==================Testing Grad===============\n");
|
||||||
|
|
||||||
std::string output_path = "./test_data/operators/sce_fp32_1_loss_1.bin";
|
std::string output_path = "./operators/sce_fp32_1_loss_1.bin";
|
||||||
CompareOutput(loss, 1, output_path);
|
CompareOutput(loss, 1, output_path);
|
||||||
|
|
||||||
((mindspore::kernel::SparseSoftmaxCrossEntropyWithLogitsCPUKernel *)kernel_obj)->Train();
|
((mindspore::kernel::SparseSoftmaxCrossEntropyWithLogitsCPUKernel *)kernel_obj)->Train();
|
||||||
|
@ -100,7 +100,7 @@ TEST_F(TestSoftmaxCrossEntropyFp32, SoftmaxCrossEntropyFp32) {
|
||||||
std::cout << grad[i] << " ,";
|
std::cout << grad[i] << " ,";
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
std::string grad_path = "./test_data/operators/sce_fp32_1_dy_6_4.bin";
|
std::string grad_path = "./operators/sce_fp32_1_dy_6_4.bin";
|
||||||
auto res = CompareRelativeOutput(grad, grad_path);
|
auto res = CompareRelativeOutput(grad, grad_path);
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
|
||||||
|
|
|
@ -69,10 +69,10 @@ TEST_F(TestSoftmaxGradFp32, SoftmaxGradAxis0) {
|
||||||
ASSERT_NE(sum_mul, nullptr);
|
ASSERT_NE(sum_mul, nullptr);
|
||||||
std::vector<int> shape = {1, 9, 11, 12};
|
std::vector<int> shape = {1, 9, 11, 12};
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/softmax/softmaxgrad_yinput.bin";
|
std::string input_path = "./softmax/softmaxgrad_yinput.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
std::string yt_path = "./test_data/softmax/softmaxgrad_yt_input.bin";
|
std::string yt_path = "./softmax/softmaxgrad_yt_input.bin";
|
||||||
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
// runtime part
|
// runtime part
|
||||||
|
@ -96,7 +96,7 @@ TEST_F(TestSoftmaxGradFp32, SoftmaxGradAxis0) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/softmax/softmaxgrad_out.bin";
|
std::string output_path = "./softmax/softmaxgrad_out.bin";
|
||||||
|
|
||||||
auto res = CompareRelativeOutput(out_data, output_path);
|
auto res = CompareRelativeOutput(out_data, output_path);
|
||||||
EXPECT_EQ(res, 0);
|
EXPECT_EQ(res, 0);
|
||||||
|
@ -130,11 +130,11 @@ TEST_F(TestSoftmaxGradFp32, SoftmaxGradAxis1) {
|
||||||
|
|
||||||
std::vector<int> shape = {1, 9, 11, 12};
|
std::vector<int> shape = {1, 9, 11, 12};
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/softmax/softmaxgrad_1_yinput.bin";
|
std::string input_path = "./softmax/softmaxgrad_1_yinput.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
|
|
||||||
std::string yt_path = "./test_data/softmax/softmaxgrad_1_yt_input.bin";
|
std::string yt_path = "./softmax/softmaxgrad_1_yt_input.bin";
|
||||||
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
|
|
||||||
|
@ -160,7 +160,7 @@ TEST_F(TestSoftmaxGradFp32, SoftmaxGradAxis1) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/softmax/softmaxgrad_1_out.bin";
|
std::string output_path = "./softmax/softmaxgrad_1_out.bin";
|
||||||
// auto output_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
// auto output_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
|
|
||||||
auto res = CompareRelativeOutput(out_data, output_path);
|
auto res = CompareRelativeOutput(out_data, output_path);
|
||||||
|
@ -195,11 +195,11 @@ TEST_F(TestSoftmaxGradFp32, SoftmaxGradAxis2) {
|
||||||
|
|
||||||
std::vector<int> shape = {1, 9, 11, 12};
|
std::vector<int> shape = {1, 9, 11, 12};
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/softmax/softmaxgrad_2_yinput.bin";
|
std::string input_path = "./softmax/softmaxgrad_2_yinput.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
|
|
||||||
std::string yt_path = "./test_data/softmax/softmaxgrad_2_yt_input.bin";
|
std::string yt_path = "./softmax/softmaxgrad_2_yt_input.bin";
|
||||||
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
|
|
||||||
|
@ -225,7 +225,7 @@ TEST_F(TestSoftmaxGradFp32, SoftmaxGradAxis2) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/softmax/softmaxgrad_2_out.bin";
|
std::string output_path = "./softmax/softmaxgrad_2_out.bin";
|
||||||
// auto output_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
// auto output_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
|
|
||||||
auto res = CompareRelativeOutput(out_data, output_path);
|
auto res = CompareRelativeOutput(out_data, output_path);
|
||||||
|
@ -260,10 +260,10 @@ TEST_F(TestSoftmaxGradFp32, SoftmaxGradAxis3) {
|
||||||
|
|
||||||
std::vector<int> shape = {1, 9, 11, 12};
|
std::vector<int> shape = {1, 9, 11, 12};
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/softmax/softmaxgrad_3_yinput.bin";
|
std::string input_path = "./softmax/softmaxgrad_3_yinput.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
std::string yt_path = "./test_data/softmax/softmaxgrad_3_yt_input.bin";
|
std::string yt_path = "./softmax/softmaxgrad_3_yt_input.bin";
|
||||||
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
|
|
||||||
|
@ -289,7 +289,7 @@ TEST_F(TestSoftmaxGradFp32, SoftmaxGradAxis3) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/softmax/softmaxgrad_3_out.bin";
|
std::string output_path = "./softmax/softmaxgrad_3_out.bin";
|
||||||
// auto output_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
// auto output_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
|
|
||||||
auto res = CompareRelativeOutput(out_data, output_path);
|
auto res = CompareRelativeOutput(out_data, output_path);
|
||||||
|
@ -325,11 +325,11 @@ TEST_F(TestSoftmaxGradFp32, SoftmaxGradAxisMinus1) {
|
||||||
|
|
||||||
std::vector<int> shape = {1, 9, 11, 12};
|
std::vector<int> shape = {1, 9, 11, 12};
|
||||||
size_t input_size;
|
size_t input_size;
|
||||||
std::string input_path = "./test_data/softmax/softmaxgrad_-1_yinput.bin";
|
std::string input_path = "./softmax/softmaxgrad_-1_yinput.bin";
|
||||||
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
ASSERT_NE(input_data, nullptr);
|
ASSERT_NE(input_data, nullptr);
|
||||||
|
|
||||||
std::string yt_path = "./test_data/softmax/softmaxgrad_-1_yt_input.bin";
|
std::string yt_path = "./softmax/softmaxgrad_-1_yt_input.bin";
|
||||||
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size));
|
||||||
ASSERT_NE(yt_data, nullptr);
|
ASSERT_NE(yt_data, nullptr);
|
||||||
// runtime part
|
// runtime part
|
||||||
|
@ -354,7 +354,7 @@ TEST_F(TestSoftmaxGradFp32, SoftmaxGradAxisMinus1) {
|
||||||
time_avg = cost / loop_count;
|
time_avg = cost / loop_count;
|
||||||
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
printf("single thread running time : %f ms\n", time_avg / 1000.0f);
|
||||||
|
|
||||||
std::string output_path = "./test_data/softmax/softmaxgrad_-1_out.bin";
|
std::string output_path = "./softmax/softmaxgrad_-1_out.bin";
|
||||||
// auto output_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
// auto output_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
|
||||||
|
|
||||||
auto res = CompareRelativeOutput(out_data, output_path);
|
auto res = CompareRelativeOutput(out_data, output_path);
|
||||||
|
|
Binary file not shown.
Loading…
Reference in New Issue