forked from mindspore-Ecosystem/mindspore
update auto install scripts
This commit is contained in:
parent
221246711a
commit
9b9d688c4c
|
@ -1,5 +1,4 @@
|
|||
#!/bin/bash
|
||||
|
||||
sudo apt-get --purge remove nvidia-*
|
||||
sudo apt-get --purge remove cuda-*
|
||||
sudo apt-get --purge remove cudnn-*
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
set -ex
|
||||
|
||||
MINDSPORE_VERSION=${MINDSPORE_VERSION:-1.5.0}
|
||||
PYTHON_VERSION=${PYTHON_VERSION:-3.7.5}
|
||||
|
||||
#use huaweicloud mirror in China
|
||||
sudo sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list
|
||||
|
@ -14,14 +15,14 @@ sudo update-alternatives --install /usr/bin/python python /usr/bin/python3.7 100
|
|||
|
||||
cd /tmp
|
||||
curl -O https://mirrors.tuna.tsinghua.edu.cn/anaconda/miniconda/Miniconda3-py37_4.10.3-Linux-x86_64.sh
|
||||
bash Miniconda3-py37_4.10.3-Linux-x86_64.sh
|
||||
bash Miniconda3-py37_4.10.3-Linux-x86_64.sh -b
|
||||
|
||||
# add conda to PATH
|
||||
echo -e 'export PATH=~/miniconda3/bin/:$PATH' >> ~/.bash_profile
|
||||
echo -e '. ~/miniconda3/etc/profile.d/conda.sh' >> ~/.bash_profile
|
||||
source ~/.bash_profile
|
||||
conda init bash
|
||||
# setting up conda mirror
|
||||
# setting up conda mirror with qinghua source
|
||||
cat >~/.condarc <<END
|
||||
channels:
|
||||
- defaults
|
||||
|
@ -42,6 +43,25 @@ END
|
|||
|
||||
#initialize conda env and install mindspore-cpu
|
||||
|
||||
conda create -n py37 python=3.7.5 -y
|
||||
conda activate py37
|
||||
conda create -n ms_${PYTHON_VERSION} python=${PYTHON_VERSION} -y
|
||||
conda activate ms_${PYTHON_VERSION}
|
||||
conda install mindspore-cpu=${MINDSPORE_VERSION} -c mindspore -c conda-forge -y
|
||||
|
||||
# check if it is the right mindspore version
|
||||
python -c "import mindspore;mindspore.run_check()"
|
||||
|
||||
# check if it can be run with GPU
|
||||
|
||||
cat > example.py <<END
|
||||
import numpy as np
|
||||
from mindspore import Tensor
|
||||
import mindspore.ops as ops
|
||||
import mindspore.context as context
|
||||
|
||||
context.set_context(device_target="GPU")
|
||||
x = Tensor(np.ones([1,3,3,4]).astype(np.float32))
|
||||
y = Tensor(np.ones([1,3,3,4]).astype(np.float32))
|
||||
print(ops.add(x, y))
|
||||
END
|
||||
|
||||
python example.py
|
|
@ -4,29 +4,20 @@ set -ex
|
|||
MINDSPORE_VERSION=${MINDSPORE_VERSION:-1.5.0}
|
||||
PYTHON_VERSION=${PYTHON_VERSION:-3.7.5}
|
||||
MINDSPORE_VERSION=${MINDSPORE_VERSION:-1.5.0}
|
||||
CUDA_VERSION=${CUDA_VERSION:-8.0.5.39-1+cuda11.1}
|
||||
CUDA_VERSION=${CUDA_VERSION:-11.1.1-1}
|
||||
LIB_CUDA_VERSION=${LIB_CUDA_VERSION:-8.0.5.39-1+cuda11.1}
|
||||
DISTRIBUTED=${DISTRIBUTED:-false}
|
||||
CUDA_INSTALL_PATH=${CUDA_INSTALL_PATH:-cuda-11}
|
||||
CUDATOOLKIT_VERSION=${CUDATOOLKIT_VERSION:-11.1}
|
||||
CUDNN_VERSION=${CUDNN_VERSION:-8.0.5}
|
||||
|
||||
#use huaweicloud mirror in China
|
||||
sudo sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list
|
||||
sudo sed -i "s@http://.*security.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list
|
||||
sudo apt-get update
|
||||
|
||||
# install python 3.7 and make it default
|
||||
sudo apt-get install gcc-7 libgmp-dev curl python3.7 -y
|
||||
sudo update-alternatives --install /usr/bin/python python /usr/bin/python3.7 100
|
||||
|
||||
cd /tmp
|
||||
curl -O https://mirrors.tuna.tsinghua.edu.cn/anaconda/miniconda/Miniconda3-py37_4.10.3-Linux-x86_64.sh
|
||||
bash Miniconda3-py37_4.10.3-Linux-x86_64.sh
|
||||
|
||||
# add conda to PATH
|
||||
echo -e 'export PATH=~/miniconda3/bin/:$PATH' >> ~/.bash_profile
|
||||
echo -e '. ~/miniconda3/etc/profile.d/conda.sh' >> ~/.bash_profile
|
||||
source ~/.bash_profile
|
||||
echo -e 'export PATH=~/miniconda3/bin/:$PATH' >> ~/.bash_profile
|
||||
echo -e '. ~/miniconda3/etc/profile.d/conda.sh' >> ~/.bash_profile
|
||||
source ~/.bash_profile
|
||||
conda init bash
|
||||
# setting up conda mirror
|
||||
cat >~/.condarc <<END
|
||||
|
@ -49,68 +40,39 @@ END
|
|||
|
||||
#initialize conda env and install mindspore-cpu
|
||||
|
||||
conda create -n py37 python=3.7.5 -y
|
||||
conda activate py37
|
||||
conda create -n ms_${PYTHON_VERSION} python=${PYTHON_VERSION} -y
|
||||
conda activate ms_${PYTHON_VERSION}
|
||||
|
||||
# install gmp 6.1.2, downloading gmp is slow
|
||||
echo "install gmp start"
|
||||
|
||||
sudo apt-get install m4 -y
|
||||
cd /tmp
|
||||
curl -O https://gmplib.org/download/gmp/gmp-6.1.2.tar.xz
|
||||
xz -d gmp-6.1.2.tar.xz
|
||||
tar xvzf gmp-6.1.2.tar && cd gmp-6.1.2
|
||||
./configure --prefix=/usr/local/gmp-6.1.2
|
||||
make
|
||||
sudo make install
|
||||
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/gmp-6.1.2/lib' >> ~/.bash_profile
|
||||
|
||||
echo "install gmp success"
|
||||
# install cuda with apt-get
|
||||
echo "install cuda start"
|
||||
|
||||
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-ubuntu1804.pin
|
||||
sudo mv cuda-ubuntu1804.pin /etc/apt/preferences.d/cuda-repository-pin-600
|
||||
wget -c https://developer.download.nvidia.com/compute/cuda/11.1.1/local_installers/cuda-repo-ubuntu1804-11-1-local_11.1.1-455.32.00-1_amd64.deb
|
||||
sudo dpkg -i cuda-repo-ubuntu1804-11-1-local_11.1.1-455.32.00-1_amd64.deb
|
||||
sudo apt-key add /var/cuda-repo-ubuntu1804-11-1-local/7fa2af80.pub
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install cuda
|
||||
|
||||
# add cuda to path
|
||||
cat >> ~/.bash_profile <<END
|
||||
export PATH=/usr/local/cuda/bin:\$PATH
|
||||
export LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:/usr/local/cuda/lib64
|
||||
END
|
||||
source ~/.bash_profile
|
||||
|
||||
echo "cuda install success."
|
||||
|
||||
# install cudnn
|
||||
cd /tmp
|
||||
wget https://developer.download.nvidia.cn/compute/cuda/repos/ubuntu1804/x86_64/libcudnn8_${CUDA_VERSION}_amd64.deb
|
||||
wget https://developer.download.nvidia.cn/compute/cuda/repos/ubuntu1804/x86_64/libcudnn8-dev_${CUDA_VERSION}_amd64.deb
|
||||
sudo dpkg -i libcudnn8_${CUDA_VERSION}_amd64.deb libcudnn8-dev_${CUDA_VERSION}_amd64.deb
|
||||
|
||||
# Install CuDNN 8 and NCCL 2
|
||||
wget https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/nvidia-machine-learning-repo-ubuntu1804_1.0.0-1_amd64.deb
|
||||
sudo dpkg -i nvidia-machine-learning-repo-ubuntu1804_1.0.0-1_amd64.deb
|
||||
sudo apt update
|
||||
sudo apt install -y libcudnn8=${CUDA_VERSION} libcudnn8-dev=${CUDA_VERSION} libnccl2=2.7.8-1+cuda11.1 libnccl2-dev=2.7.8-1+cuda11.1
|
||||
|
||||
# optional (tensort for serving, openmpi for distributed training)
|
||||
if [[ "$DISTRIBUTED" == "false" ]]; then
|
||||
cd /tmp
|
||||
curl -O https://download.open-mpi.org/release/open-mpi/v4.0/openmpi-4.0.3.tar.gz
|
||||
tar xvzf openmpi-4.0.3.tar.gz
|
||||
cd openmpi-4.0.3
|
||||
./configure --prefix=/usr/local/openmpi
|
||||
make
|
||||
sudo make install
|
||||
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/openmpi/lib' >> ~/.bash_profile
|
||||
echo 'export PATH=$PATH:/usr/local/openmpi/bin' >> ~/.bash_profile
|
||||
source ~/.bash_profile
|
||||
fi
|
||||
# echo "install gmp start"
|
||||
# sudo apt-get install m4 -y
|
||||
# cd /tmp
|
||||
# curl -O https://gmplib.org/download/gmp/gmp-6.1.2.tar.xz
|
||||
# xz -d gmp-6.1.2.tar.xz
|
||||
# tar xvzf gmp-6.1.2.tar && cd gmp-6.1.2
|
||||
# ./configure --prefix=/usr/local/gmp-6.1.2
|
||||
# make
|
||||
# sudo make install
|
||||
# echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/gmp-6.1.2/lib' >> ~/.bash_profile
|
||||
|
||||
# install mindspore-gpu with conda
|
||||
conda install mindspore-gpu=${MINDSPORE_VERSION} cudatoolkit=${CUDATOOLKIT_VERSION} cudnn=${CUDNN_VERSION} -c mindspore -c conda-forge
|
||||
conda install mindspore-gpu=${MINDSPORE_VERSION} cudatoolkit=${CUDATOOLKIT_VERSION} -c mindspore -c conda-forge -y
|
||||
|
||||
# check if it is the right mindspore version
|
||||
python -c "import mindspore;mindspore.run_check()"
|
||||
|
||||
# check if it can be run with GPU
|
||||
|
||||
cat > example.py <<END
|
||||
import numpy as np
|
||||
from mindspore import Tensor
|
||||
import mindspore.ops as ops
|
||||
import mindspore.context as context
|
||||
|
||||
context.set_context(device_target="GPU")
|
||||
x = Tensor(np.ones([1,3,3,4]).astype(np.float32))
|
||||
y = Tensor(np.ones([1,3,3,4]).astype(np.float32))
|
||||
print(ops.add(x, y))
|
||||
END
|
||||
|
||||
python example.py
|
|
@ -5,14 +5,14 @@ PYTHON_VERSION=${PYTHON_VERSION:-3.7.5}
|
|||
MINDSPORE_VERSION=${MINDSPORE_VERSION:-1.5.0}
|
||||
CUDA_VERSION=${CUDA_VERSION:-8.0.5.39-1+cuda11.1}
|
||||
DISTRIBUTED=${DISTRIBUTED:-false}
|
||||
CUDA_INSTALL_PATH=${CUDA_INSTALL_PATH:-cuda-11}
|
||||
CUDA_VERSION=${CUDA_VERSION:-11.1.1-1}
|
||||
CUDA_INSTALL_PATH=${CUDA_INSTALL_PATH:-cuda-11.1}
|
||||
LIBNCCL2_VERSION=${LIBNCCL2_VERSION:-2.7.8-1+cuda11.1}
|
||||
ARCH=$(uname -m)
|
||||
|
||||
if [[ "${PYTHON_VERSION}" == "3.7.5" ]]; then
|
||||
VERSION="${MINDSPORE_VERSION}-cp37-cp37m"
|
||||
else
|
||||
VERSION="${MINDSPORE_VERSION}-cp39-cp39m"
|
||||
fi
|
||||
declare -A version_map=()
|
||||
version_map["3.7.5"]="${MINDSPORE_VERSION}-cp37-cp37m"
|
||||
version_map["3.9.0"]="${MINDSPORE_VERSION}-cp39-cp39m"
|
||||
|
||||
#use huaweicloud mirror in China
|
||||
sudo sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list
|
||||
|
@ -20,7 +20,7 @@ sudo sed -i "s@http://.*security.ubuntu.com@http://repo.huaweicloud.com@g" /etc/
|
|||
sudo apt-get update
|
||||
|
||||
# install python 3.7 and make it default
|
||||
sudo apt-get install gcc-7 libgmp-dev curl python3.7 openssl ubuntu-drivers-common openssl -y
|
||||
sudo apt-get install gcc-7 libgmp-dev curl python3.7 openssl ubuntu-drivers-common openssl software-properties-common -y
|
||||
sudo update-alternatives --install /usr/bin/python python /usr/bin/python3.7 100
|
||||
|
||||
cd /tmp
|
||||
|
@ -28,9 +28,7 @@ curl -O https://bootstrap.pypa.io/get-pip.py
|
|||
sudo python get-pip.py
|
||||
|
||||
# add pip mirror
|
||||
|
||||
mkdir ~/.pip
|
||||
|
||||
mkdir -p ~/.pip
|
||||
cat > ~/.pip/pip.conf <<END
|
||||
[global]
|
||||
index-url = https://repo.huaweicloud.com/repository/pypi/simple
|
||||
|
@ -38,28 +36,55 @@ trusted-host = repo.huaweicloud.com
|
|||
timeout = 120
|
||||
END
|
||||
|
||||
# install gmp 6.1.2, downloading gmp is slow
|
||||
# install nvidia driver if not presented
|
||||
# root@ecs-gpu-testing:~# ubuntu-drivers devices
|
||||
# == /sys/devices/pci0000:20/0000:20:00.0/0000:21:01.0 ==
|
||||
# modalias : pci:v000010DEd00001EB8sv000010DEsd000012A2bc03sc02i00
|
||||
# vendor : NVIDIA Corporation
|
||||
# driver : nvidia-driver-418 - third-party non-free
|
||||
# driver : nvidia-driver-450 - third-party non-free
|
||||
# driver : nvidia-driver-460 - third-party non-free
|
||||
# driver : nvidia-driver-450-server - distro non-free
|
||||
# driver : nvidia-driver-460-server - distro non-free
|
||||
# driver : nvidia-driver-440 - third-party non-free
|
||||
# driver : nvidia-driver-418-server - distro non-free
|
||||
# driver : nvidia-driver-465 - third-party non-free
|
||||
# driver : nvidia-driver-470 - third-party non-free recommended #pick the latest one
|
||||
# driver : nvidia-driver-410 - third-party non-free
|
||||
# driver : nvidia-driver-470-server - distro non-free
|
||||
# driver : nvidia-driver-455 - third-party non-free
|
||||
# driver : xserver-xorg-video-nouveau - distro free builtin
|
||||
# sudo apt-get install nvidia-driver-470 -y
|
||||
# nvidia-smi # run this to check the driver is working
|
||||
#root@ecs-testing:~# nvidia-smi
|
||||
#Thu Dec 30 21:06:13 2021
|
||||
#+-----------------------------------------------------------------------------+
|
||||
#| NVIDIA-SMI 460.73.01 Driver Version: 460.73.01 CUDA Version: 11.2 |
|
||||
#|-------------------------------+----------------------+----------------------+
|
||||
#| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
|
||||
#| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|
||||
#| | | MIG M. |
|
||||
#|===============================+======================+======================|
|
||||
#| 0 Tesla T4 Off | 00000000:21:01.0 Off | 0 |
|
||||
#| N/A 61C P0 29W / 70W | 0MiB / 15109MiB | 0% Default |
|
||||
#| | | N/A |
|
||||
#+-------------------------------+----------------------+----------------------+
|
||||
#
|
||||
#+-----------------------------------------------------------------------------+
|
||||
#| Processes: |
|
||||
#| GPU GI CI PID Type Process name GPU Memory |
|
||||
#| ID ID Usage |
|
||||
#|=============================================================================|
|
||||
#| No running processes found |
|
||||
#+-----------------------------------------------------------------------------+
|
||||
|
||||
sudo apt-get install m4 -y
|
||||
cd /tmp
|
||||
curl -O https://gmplib.org/download/gmp/gmp-6.1.2.tar.xz
|
||||
xz -d gmp-6.1.2.tar.xz
|
||||
tar xvzf gmp-6.1.2.tar && cd gmp-6.1.2
|
||||
./configure --prefix=/usr/local/gmp-6.1.2
|
||||
make
|
||||
sudo make install
|
||||
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/gmp-6.1.2/lib' >> ~/.bash_profile
|
||||
|
||||
# install cuda with linux.run
|
||||
# another option is to use https://developer.download.nvidia.com/compute/cuda/11.1.0/local_installers/cuda_11.1.0_455.23.05_linux.run
|
||||
|
||||
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-ubuntu1804.pin
|
||||
sudo mv cuda-ubuntu1804.pin /etc/apt/preferences.d/cuda-repository-pin-600
|
||||
wget -c https://developer.download.nvidia.com/compute/cuda/11.1.1/local_installers/cuda-repo-ubuntu1804-11-1-local_11.1.1-455.32.00-1_amd64.deb
|
||||
sudo dpkg -i cuda-repo-ubuntu1804-11-1-local_11.1.1-455.32.00-1_amd64.deb
|
||||
sudo apt-key add /var/cuda-repo-ubuntu1804-11-1-local/7fa2af80.pub
|
||||
# install cuda/cudnn/nccl2 with apt-get
|
||||
# another option is to use linux.run https://developer.download.nvidia.com/compute/cuda/11.1.0/local_installers/cuda_11.1.0_455.23.05_linux.run
|
||||
sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub
|
||||
sudo add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/ /"
|
||||
sudo add-apt-repository "deb https://developer.download.nvidia.cn/compute/machine-learning/repos/ubuntu1804/x86_64/ /"
|
||||
sudo apt-get update
|
||||
sudo apt-get -y install cuda
|
||||
sudo apt-get -y install cuda=${CUDA_VERSION}
|
||||
|
||||
# add cuda to path
|
||||
cat >> ~/.bash_profile <<END
|
||||
|
@ -67,35 +92,43 @@ export PATH=/usr/local/cuda/bin:\$PATH
|
|||
export LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:/usr/local/cuda/lib64
|
||||
END
|
||||
source ~/.bash_profile
|
||||
|
||||
echo "cuda install success."
|
||||
|
||||
# install cudnn
|
||||
cd /tmp
|
||||
wget https://developer.download.nvidia.cn/compute/cuda/repos/ubuntu1804/x86_64/libcudnn8_${CUDA_VERSION}_amd64.deb
|
||||
wget https://developer.download.nvidia.cn/compute/cuda/repos/ubuntu1804/x86_64/libcudnn8-dev_${CUDA_VERSION}_amd64.deb
|
||||
sudo dpkg -i libcudnn8_${CUDA_VERSION}_amd64.deb libcudnn8-dev_${CUDA_VERSION}_amd64.deb
|
||||
|
||||
# Install CuDNN 8 and NCCL 2
|
||||
wget https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/nvidia-machine-learning-repo-ubuntu1804_1.0.0-1_amd64.deb
|
||||
sudo dpkg -i nvidia-machine-learning-repo-ubuntu1804_1.0.0-1_amd64.deb
|
||||
sudo apt update
|
||||
sudo apt install -y libcudnn8=${CUDA_VERSION} libcudnn8-dev=${CUDA_VERSION} libnccl2=2.7.8-1+cuda11.1 libnccl2-dev=2.7.8-1+cuda11.1
|
||||
sudo apt-get install -y libcudnn8=${CUDA_VERSION} libcudnn8-dev=${CUDA_VERSION} libnccl2=${LIBNCCL2_VERSION} libnccl-dev=${LIBNCCL2_VERSION}
|
||||
|
||||
# optional (tensort for serving, openmpi for distributed training)
|
||||
if [[ "$DISTRIBUTED" == "false" ]]; then
|
||||
cd /tmp
|
||||
curl -O https://download.open-mpi.org/release/open-mpi/v4.0/openmpi-4.0.3.tar.gz
|
||||
tar xvzf openmpi-4.0.3.tar.gz
|
||||
cd openmpi-4.0.3
|
||||
./configure --prefix=/usr/local/openmpi
|
||||
make
|
||||
sudo make install
|
||||
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/openmpi/lib' >> ~/.bash_profile
|
||||
echo 'export PATH=$PATH:/usr/local/openmpi/bin' >> ~/.bash_profile
|
||||
source ~/.bash_profile
|
||||
fi
|
||||
# uncomment this to compile openmpi
|
||||
# cd /tmp
|
||||
# curl -O https://download.open-mpi.org/release/open-mpi/v4.0/openmpi-4.0.3.tar.gz
|
||||
# tar xvzf openmpi-4.0.3.tar.gz
|
||||
# cd openmpi-4.0.3
|
||||
# ./configure --prefix=/usr/local/openmpi
|
||||
# make
|
||||
# sudo make install
|
||||
# echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/openmpi/lib' >> ~/.bash_profile
|
||||
# echo 'export PATH=$PATH:/usr/local/openmpi/bin' >> ~/.bash_profile
|
||||
# source ~/.bash_profile
|
||||
# reference this to install tensorrt https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html#downloading
|
||||
|
||||
# install mindspore gpu
|
||||
echo "install mindspore gpu ${MINDSPORE_VERSION}"
|
||||
pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/${MINDSPORE_VERSION}/MindSpore/gpu/${ARCH}/${CUDA_INSTALL_PATH}/mindspore_gpu-${version_map["$PYTHON_VERSION"]}-linux_${ARCH}.whl --trusted-host ms-release.obs.cn-north-4.myhuaweicloud.com -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
|
||||
pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/${MINDSPORE_VERSION}/MindSpore/gpu/${CUDA_INSTALL_PATH}/mindspore-${VERSION}-linux_${ARCH}.whl --trusted-host ms-release.obs.cn-north-4.myhuaweicloud.com -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
|
||||
# check if it is the right mindspore version
|
||||
python -c "import mindspore;mindspore.run_check()"
|
||||
|
||||
# check if it can be run with GPU
|
||||
|
||||
cat > example.py <<END
|
||||
import numpy as np
|
||||
from mindspore import Tensor
|
||||
import mindspore.ops as ops
|
||||
import mindspore.context as context
|
||||
|
||||
context.set_context(device_target="GPU")
|
||||
x = Tensor(np.ones([1,3,3,4]).astype(np.float32))
|
||||
y = Tensor(np.ones([1,3,3,4]).astype(np.float32))
|
||||
print(ops.add(x, y))
|
||||
END
|
||||
|
||||
python example.py
|
|
@ -6,12 +6,8 @@ PYTHON_VERSION=${PYTHON_VERSION:-3.7.5}
|
|||
MINDSPORE_VERSION=${MINDSPORE_VERSION:-1.5.0}
|
||||
ARCH=`uname -m`
|
||||
|
||||
if [[ "${PYTHON_VERSION}" == "3.7.5" ]]; then
|
||||
VERSION="${MINDSPORE_VERSION}-cp37-cp37m"
|
||||
else
|
||||
VERSION="${MINDSPORE_VERSION}-cp39-cp39"
|
||||
fi
|
||||
|
||||
declare -A version_map=()
|
||||
version_map["3.7.5"]="${MINDSPORE_VERSION}-cp37-cp37m"
|
||||
|
||||
#use huaweicloud mirror in China
|
||||
sudo sed -i "s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g" /etc/apt/sources.list
|
||||
|
@ -26,4 +22,23 @@ cd /tmp
|
|||
curl -O https://bootstrap.pypa.io/get-pip.py
|
||||
sudo python get-pip.py
|
||||
|
||||
pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/${MINDSPORE_VERSION}/MindSpore/cpu/${ARCH}/mindspore-${VERSION}-linux_${ARCH}.whl --trusted-host ms-release.obs.cn-north-4.myhuaweicloud.com -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/${MINDSPORE_VERSION}/MindSpore/cpu/${ARCH}/mindspore-${version_map["$PYTHON_VERSION"]}-linux_${ARCH}.whl --trusted-host ms-release.obs.cn-north-4.myhuaweicloud.com -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
|
||||
# check if it is the right mindspore version
|
||||
python -c "import mindspore;mindspore.run_check()"
|
||||
|
||||
# check if it can be run with GPU
|
||||
|
||||
cat > example.py <<END
|
||||
import numpy as np
|
||||
from mindspore import Tensor
|
||||
import mindspore.ops as ops
|
||||
import mindspore.context as context
|
||||
|
||||
context.set_context(device_target="GPU")
|
||||
x = Tensor(np.ones([1,3,3,4]).astype(np.float32))
|
||||
y = Tensor(np.ones([1,3,3,4]).astype(np.float32))
|
||||
print(ops.add(x, y))
|
||||
END
|
||||
|
||||
python example.py
|
Loading…
Reference in New Issue