mirror of https://github.com/tracel-ai/burn.git
Update TORCH_CUDA_VERSION usage (#1284)
This commit is contained in:
parent
88f5a3e88c
commit
8f753b3aae
|
@ -50,7 +50,7 @@ Therefore, creating the tape only requires a simple and efficient graph traversa
|
||||||
|
|
||||||
## Cuda
|
## Cuda
|
||||||
|
|
||||||
To run with CUDA set `TORCH_CUDA_VERSION=cu113`.
|
To run with CUDA set `TORCH_CUDA_VERSION=cu121`.
|
||||||
|
|
||||||
## Notes
|
## Notes
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ cargo run --example mnist --release --features ndarray # CPU NdAr
|
||||||
cargo run --example mnist --release --features ndarray-blas-openblas # CPU NdArray Backend - f32 - blas with openblas
|
cargo run --example mnist --release --features ndarray-blas-openblas # CPU NdArray Backend - f32 - blas with openblas
|
||||||
cargo run --example mnist --release --features ndarray-blas-netlib # CPU NdArray Backend - f32 - blas with netlib
|
cargo run --example mnist --release --features ndarray-blas-netlib # CPU NdArray Backend - f32 - blas with netlib
|
||||||
echo "Using tch backend"
|
echo "Using tch backend"
|
||||||
export TORCH_CUDA_VERSION=cu113 # Set the cuda version
|
export TORCH_CUDA_VERSION=cu121 # Set the cuda version
|
||||||
cargo run --example mnist --release --features tch-gpu # GPU Tch Backend - f32
|
cargo run --example mnist --release --features tch-gpu # GPU Tch Backend - f32
|
||||||
cargo run --example mnist --release --features tch-cpu # CPU Tch Backend - f32
|
cargo run --example mnist --release --features tch-cpu # CPU Tch Backend - f32
|
||||||
echo "Using wgpu backend"
|
echo "Using wgpu backend"
|
||||||
|
|
|
@ -23,7 +23,7 @@ cargo run --example regression --release --features ndarray # CPU
|
||||||
cargo run --example regression --release --features ndarray-blas-openblas # CPU NdArray Backend - f32 - blas with openblas
|
cargo run --example regression --release --features ndarray-blas-openblas # CPU NdArray Backend - f32 - blas with openblas
|
||||||
cargo run --example regression --release --features ndarray-blas-netlib # CPU NdArray Backend - f32 - blas with netlib
|
cargo run --example regression --release --features ndarray-blas-netlib # CPU NdArray Backend - f32 - blas with netlib
|
||||||
echo "Using tch backend"
|
echo "Using tch backend"
|
||||||
export TORCH_CUDA_VERSION=cu113 # Set the cuda version
|
export TORCH_CUDA_VERSION=cu121 # Set the cuda version
|
||||||
cargo run --example regression --release --features tch-gpu # GPU Tch Backend - f32
|
cargo run --example regression --release --features tch-gpu # GPU Tch Backend - f32
|
||||||
cargo run --example regression --release --features tch-cpu # CPU Tch Backend - f32
|
cargo run --example regression --release --features tch-cpu # CPU Tch Backend - f32
|
||||||
echo "Using wgpu backend"
|
echo "Using wgpu backend"
|
||||||
|
|
|
@ -29,7 +29,7 @@ cd burn
|
||||||
# Use the --release flag to really speed up training.
|
# Use the --release flag to really speed up training.
|
||||||
# Use the f16 feature if your CUDA device supports FP16 (half precision) operations. May not work well on every device.
|
# Use the f16 feature if your CUDA device supports FP16 (half precision) operations. May not work well on every device.
|
||||||
|
|
||||||
export TORCH_CUDA_VERSION=cu117 # Set the cuda version (CUDA users)
|
export TORCH_CUDA_VERSION=cu121 # Set the cuda version (CUDA users)
|
||||||
|
|
||||||
# AG News
|
# AG News
|
||||||
cargo run --example ag-news-train --release --features tch-gpu # Train on the ag news dataset
|
cargo run --example ag-news-train --release --features tch-gpu # Train on the ag news dataset
|
||||||
|
|
|
@ -14,7 +14,7 @@ git clone https://github.com/tracel-ai/burn.git
|
||||||
cd burn
|
cd burn
|
||||||
|
|
||||||
# Use the --release flag to really speed up training.
|
# Use the --release flag to really speed up training.
|
||||||
export TORCH_CUDA_VERSION=cu113
|
export TORCH_CUDA_VERSION=cu121
|
||||||
cargo run --example text-generation --release
|
cargo run --example text-generation --release
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue