adapt ms_cpu ci for graphkernel

This commit is contained in:
zengzitao 2021-12-03 17:04:54 +08:00
parent 9fb2f887e5
commit 6f4aebbe11
6 changed files with 133 additions and 3 deletions

View File

@ -28,6 +28,7 @@ option(MODE_ASCEND_ACL "supports ascend acl mode only" OFF)
option(ENABLE_SYM_FILE "enable sym file" OFF)
option(BUILD_DEV_MODE "MindSpore build nightly dev mode" OFF)
option(ENABLE_FAST_HASH_TABLE "Enable use fast hash table instead of std ones" ON)
option(USE_LLVM "use llvm" OFF)
if(NOT ENABLE_D AND NOT ENABLE_TESTCASES AND NOT ENABLE_ACL AND NOT ENABLE_GE)
set(ENABLE_GLIBCXX ON)
@ -157,3 +158,7 @@ endif()
if(ENABLE_AKG AND CMAKE_SYSTEM_NAME MATCHES "Linux")
add_compile_definitions(ENABLE_AKG)
endif()
if(USE_LLVM)
add_compile_definitions(USE_LLVM)
endif()

View File

@ -187,10 +187,19 @@ void GraphKernelFlags::Refresh() {
MS_LOG(WARNING) << "GraphKernel only support GRAPH_MODE";
opt_level = OptLevel_0;
}
#ifndef USE_LLVM
auto is_cpu = (context->get_param<std::string>(MS_CTX_DEVICE_TARGET) == kCPUDevice);
if (is_cpu) {
MS_LOG(WARNING) << "GraphKernel is not usable without LLVM on cpu platform";
opt_level = OptLevel_0;
}
#endif
}
#endif
// Dump flags so that people can check the setting.
MS_LOG(INFO) << "graph_kernel_flags = \"" << flags_cache_ << "\", all flags: " << DumpAllFlags();
// If enable graphkernel, Dump flags so that people can check the setting.
if (IsEnableGraphKernel()) {
MS_LOG(INFO) << "graph_kernel_flags = \"" << flags_cache_ << "\", all flags: " << DumpAllFlags();
}
}
void GraphKernelFlags::RegisterFlags(std::map<std::string, std::string> *flag_map) {

42
scripts/build/akg_find_llvm.sh Executable file
View File

@ -0,0 +1,42 @@
#!/bin/bash
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Find a suitable LLVM version for AKG.
#
# This file generates a temporary cmake script file
# and executes it by `cmake -P` (cmake script mode).
#
# If no suitable LLVM is found, the `find_package` function runs normally,
# the `cmake` command exits with status `0`.
#
# If suitable LLVM is found, the `find_package` will encounter the error
# "add_library command is not scriptable" in `LLVMExports.cmake` of LLVM library.
# This error is caused because of running `cmake` in script mode.
# Finally the `cmake` command exit with status `1`.
echo "find_package(LLVM 12 QUIET)" > akg_llvm_tmp.cmake
cmake -P akg_llvm_tmp.cmake > /dev/null 2>&1
result=$?
rm akg_llvm_tmp.cmake
if [ ${result} -eq 0 ]; then
echo "off"
else
echo "on"
fi

View File

@ -81,7 +81,7 @@ build_mindspore()
fi
if [[ "X$ENABLE_AKG" = "Xon" ]]; then
CMAKE_ARGS="${CMAKE_ARGS} -DENABLE_AKG=ON"
if [[ "X$ENABLE_CPU" = "Xon" && "X$ENABLE_D" != "Xon" && "X$ENABLE_GPU" != "Xon" ]]; then
if [[ "X$USE_LLVM" = "Xon" ]]; then
CMAKE_ARGS="${CMAKE_ARGS} -DUSE_LLVM=ON"
fi
fi

View File

@ -63,6 +63,7 @@ parse_device()
fi
elif [[ "X$DEVICE" == "Xcpu" ]]; then
export ENABLE_CPU="on"
export ENABLE_AKG="on"
elif [[ "X$DEVICE" == "X" ]]; then
:
else
@ -70,4 +71,9 @@ parse_device()
usage
exit 1
fi
if [[ "X$ENABLE_AKG" == "Xon" && "X$ENABLE_D" != "Xon" ]]; then
# check llvm version for akg
export USE_LLVM=`bash ${BASEPATH}/scripts/build/akg_find_llvm.sh`
fi
}

View File

@ -0,0 +1,68 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore import Tensor
from mindspore.nn import Cell
import mindspore.ops.operations as P
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.sqrt = P.Sqrt()
self.add = P.Add()
self.neg = P.Neg()
self.mul = P.Mul()
def construct(self, x0, x1):
sqrt_res = self.sqrt(x0)
neg_res = self.neg(sqrt_res)
add_res = self.add(x1, sqrt_res)
real_res = self.mul(add_res, add_res)
return neg_res, real_res
def easy_fuse():
def get_output(i0, i1, enable_graph_kernel=False):
context.set_context(enable_graph_kernel=enable_graph_kernel)
net_obj = Net()
output = net_obj(i0, i1)
return output
i0 = Tensor(np.random.uniform(1, 2, [1, 1024]).astype(np.float32))
i1 = Tensor(np.random.uniform(1, 2, [1024, 1024]).astype(np.float32))
expect = get_output(i0, i1, False)
output = get_output(i0, i1, True)
expect0_np = expect[0].asnumpy().copy()
expect1_np = expect[1].asnumpy().copy()
output0_np = output[0].asnumpy().copy()
output1_np = output[1].asnumpy().copy()
assert np.allclose(expect0_np, output0_np, rtol=1.e-4, atol=1.e-4, equal_nan=True)
assert np.allclose(expect1_np, output1_np, rtol=1.e-4, atol=1.e-4, equal_nan=True)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_easy_fuse_cpu():
"""
Feature: easy test case for graph_kernel in cpu.
Description: cpu test case, use graph_kernel execute ops.
Expectation: the result match with close graph_kernel result
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
easy_fuse()