diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f7fc8670b..778d88032 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -45,7 +45,7 @@ Be sure to follow the coding standards and guidelines used in the rest of the pr ### Step 6: Run the Pre-Pull Request Script -Before you open a pull request, please run the [`run-checks.sh`](/run-before-pr.sh) script. This +Before you open a pull request, please run [`./run-checks.sh all`](/run-checks.sh). This will ensure that your changes are in line with our project's standards and guidelines. You can run this script by opening a terminal, navigating to your local project directory, and typing `./run-checks`. diff --git a/burn-book/src/building-blocks/tensor.md b/burn-book/src/building-blocks/tensor.md index 3fe247a36..e3d10658f 100644 --- a/burn-book/src/building-blocks/tensor.md +++ b/burn-book/src/building-blocks/tensor.md @@ -20,10 +20,13 @@ The actual shape of the tensor is inferred from its initialization. For example, ```rust, ignore let floats = [1.0, 2.0, 3.0, 4.0, 5.0]; -// correct: Tensor is 1-Dimensional with 5 elements -let tensor_1 = Tensor::::from_floats(floats); +// Get the default device +let device = Default::default(); -// incorrect: let tensor_1 = Tensor::::from_floats(floats); +// correct: Tensor is 1-Dimensional with 5 elements +let tensor_1 = Tensor::::from_floats(floats, &device); + +// incorrect: let tensor_1 = Tensor::::from_floats(floats, &device); // this will lead to an error and is for creating a 5-D tensor ``` @@ -38,19 +41,19 @@ a tensor from different inputs. ```rust, ignore // Initialization from a given Backend (Wgpu) -let tensor_1 = Tensor::::from_data([1.0, 2.0, 3.0]); +let tensor_1 = Tensor::::from_data([1.0, 2.0, 3.0], &device); // Initialization from a generic Backend -let tensor_2 = Tensor::::from_data(Data::from([1.0, 2.0, 3.0]).convert()); +let tensor_2 = Tensor::::from_data(Data::from([1.0, 2.0, 3.0]).convert(), &device); // Initialization using from_floats (Recommended for f32 ElementType) // Will be converted to Data internally. // `.convert()` not needed as from_floats() defined for fixed ElementType -let tensor_3 = Tensor::::from_floats([1.0, 2.0, 3.0]); +let tensor_3 = Tensor::::from_floats([1.0, 2.0, 3.0], &device); // Initialization of Int Tensor from array slices let arr: [i32; 6] = [1, 2, 3, 4, 5, 6]; -let tensor_4 = Tensor::::from_data(Data::from(&arr[0..3]).convert()); +let tensor_4 = Tensor::::from_data(Data::from(&arr[0..3]).convert(), &device); // Initialization from a custom type @@ -66,7 +69,7 @@ let bmi = BodyMetrics{ weight: 80.0 }; let data = Data::from([bmi.age as f32, bmi.height as f32, bmi.weight]).convert(); -let tensor_5 = Tensor::::from_data(data); +let tensor_5 = Tensor::::from_data(data, &device); ``` @@ -84,7 +87,7 @@ times will necessitate cloning it. Let's look at an example to understand the ow cloning better. Suppose we want to do a simple min-max normalization of an input tensor. ```rust, ignore -let input = Tensor::::from_floats([1.0, 2.0, 3.0, 4.0]); +let input = Tensor::::from_floats([1.0, 2.0, 3.0, 4.0], &device); let min = input.min(); let max = input.max(); let input = (input - min).div(max - min); @@ -98,7 +101,7 @@ available for further operations. Burn Tensors like most complex primitives do n doing min-max normalization with cloning. ```rust, ignore -let input = Tensor::::from_floats([1.0, 2.0, 3.0, 4.0]); +let input = Tensor::::from_floats([1.0, 2.0, 3.0, 4.0], &device); let min = input.clone().min(); let max = input.clone().max(); let input = (input.clone() - min.clone()).div(max - min);