remove from_devauto occurrences

This commit is contained in:
louisfd 2023-12-21 15:53:04 -05:00
parent 5a5ea06db2
commit 8acf4b223b
121 changed files with 675 additions and 942 deletions

View File

@ -5,11 +5,8 @@ mod tests {
#[test]
fn should_diff_cos() {
let data_1 = Data::<f32, 2>::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::<f32, 2>::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([[0.0, 1.0], [3.0, 4.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[6.0, 7.0], [9.0, 10.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone().cos());
let tensor_4 = tensor_3.matmul(tensor_2.clone());

View File

@ -5,14 +5,12 @@ mod tests {
#[test]
fn test_cross_entropy_loss_grad() {
let data_1 = Data::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::from([[6.0, 7.0], [9.0, 10.0]]);
let data_targets = Data::from([[0.8, 0.2], [0.9, 0.1]]);
let tensor_1 = Tensor::<TestAutodiffBackend, 2>::from_data_devauto(data_1).require_grad();
let tensor_2 = Tensor::<TestAutodiffBackend, 2>::from_data_devauto(data_2).require_grad();
let tensor_1 =
Tensor::<TestAutodiffBackend, 2>::from([[0.0, 1.0], [3.0, 4.0]]).require_grad();
let tensor_2 =
Tensor::<TestAutodiffBackend, 2>::from([[6.0, 7.0], [9.0, 10.0]]).require_grad();
let tensor_targets =
Tensor::<TestAutodiffBackend, 2>::from_data_devauto(data_targets).require_grad();
Tensor::<TestAutodiffBackend, 2>::from([[0.8, 0.2], [0.9, 0.1]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = loss::cross_entropy_with_logits(tensor_3, tensor_targets);

View File

@ -5,11 +5,8 @@ mod tests {
#[test]
fn should_diff_div() {
let data_1 = Data::from([1.0, 7.0]);
let data_2 = Data::from([4.0, 7.0]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([1.0, 7.0]).require_grad();
let tensor_2 = TestAutodiffTensor::from([4.0, 7.0]).require_grad();
let tensor_3 = tensor_1.clone().div(tensor_2.clone());
let grads = tensor_3.backward();
@ -27,9 +24,7 @@ mod tests {
#[test]
fn should_diff_div_scalar() {
let data = Data::from([1.0, 7.0]);
let tensor = TestAutodiffTensor::from_data_devauto(data).require_grad();
let tensor = TestAutodiffTensor::from([1.0, 7.0]).require_grad();
let tensor_out = tensor.clone().div_scalar(4.0);
let grads = tensor_out.backward();
@ -40,13 +35,9 @@ mod tests {
#[test]
fn test_div_complex_1() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_3 = TestAutodiffTensor::from_data_devauto(data_3).require_grad();
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [13.0, -3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[4.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_3 = TestAutodiffTensor::from([[2.0, 2.0], [2.0, 2.0]]).require_grad();
let tensor_4 = tensor_1.clone().div(tensor_2.clone());
let tensor_5 = tensor_4.div(tensor_3);
@ -66,11 +57,8 @@ mod tests {
#[test]
fn test_div_complex_2() {
let data_1 = Data::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([[0.0, 1.0], [3.0, 4.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[6.0, 7.0], [9.0, 10.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = tensor_3.div(tensor_2.clone());

View File

@ -5,11 +5,8 @@ mod tests {
#[test]
fn should_diff_erf() {
let data_1 = Data::<f32, 2>::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::<f32, 2>::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([[0.0, 1.0], [3.0, 4.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[6.0, 7.0], [9.0, 10.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone().erf());
let tensor_4 = tensor_3.matmul(tensor_2.clone());

View File

@ -5,11 +5,8 @@ mod tests {
#[test]
fn should_diff_exp() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [-2.0, -3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[4.0, -7.0], [2.0, 3.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone().exp());
let grads = tensor_3.backward();

View File

@ -5,13 +5,9 @@ mod tests {
#[test]
fn test_gather_grad() {
let tensor_1 =
TestAutodiffTensor::from_data_devauto(Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]))
.require_grad();
let indices = Tensor::<TestAutodiffBackend, 2, Int>::from_data_devauto(Data::from([
[2, 1, 0, 1, 2],
[1, 0, 2, 1, 0],
]));
let tensor_1 = TestAutodiffTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]).require_grad();
let indices =
Tensor::<TestAutodiffBackend, 2, Int>::from([[2, 1, 0, 1, 2], [1, 0, 2, 1, 0]]);
let tensor_2 = tensor_1.clone().matmul(tensor_1.clone().transpose());
let tensor_3 = tensor_1.clone().gather(1, indices);
@ -29,16 +25,9 @@ mod tests {
#[test]
fn test_scatter_grad() {
let tensor_1 =
TestAutodiffTensor::from_data_devauto(Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]))
.require_grad();
let values =
TestAutodiffTensor::from_data_devauto(Data::from([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]))
.require_grad();
let indices = Tensor::<TestAutodiffBackend, 2, Int>::from_data_devauto(Data::from([
[2, 1, 0],
[2, 0, 1],
]));
let tensor_1 = TestAutodiffTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]).require_grad();
let values = TestAutodiffTensor::from([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]).require_grad();
let indices = Tensor::<TestAutodiffBackend, 2, Int>::from([[2, 1, 0], [2, 0, 1]]);
let tensor_2 = tensor_1.clone().matmul(tensor_1.clone().transpose());
let tensor_3 = tensor_1.clone().scatter(1, indices, values.clone());

View File

@ -5,10 +5,8 @@ mod tests {
#[test]
fn should_diff_gelu() {
let tensor_1 =
TestAutodiffTensor::from_floats_devauto([[0.0, 1.0], [-3.0, 4.0]]).require_grad();
let tensor_2 =
TestAutodiffTensor::from_floats_devauto([[6.0, -0.5], [9.0, 10.0]]).require_grad();
let tensor_1 = TestAutodiffTensor::from([[0.0, 1.0], [-3.0, 4.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[6.0, -0.5], [9.0, 10.0]]).require_grad();
let x = tensor_1.clone().matmul(activation::gelu(tensor_2.clone()));
let x = tensor_1.clone().matmul(x);

View File

@ -5,11 +5,8 @@ mod tests {
#[test]
fn should_diff_log() {
let data_1 = Data::<f32, 2>::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::<f32, 2>::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([[0.0, 1.0], [3.0, 4.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[6.0, 7.0], [9.0, 10.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone().log());
let tensor_4 = tensor_3.matmul(tensor_2.clone());

View File

@ -5,13 +5,9 @@ mod tests {
#[test]
fn should_diff_mask_fill() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, 7.0], [2.0, 3.0]]);
let mask = Data::<bool, 2>::from([[true, false], [false, true]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let mask = Tensor::<TestAutodiffBackend, 2, Bool>::from_bool_devauto(mask);
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[4.0, 7.0], [2.0, 3.0]]).require_grad();
let mask = Tensor::<TestAutodiffBackend, 2, Bool>::from([[true, false], [false, true]]);
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = tensor_3.mask_fill(mask, 2.0);
@ -26,16 +22,10 @@ mod tests {
#[test]
fn should_diff_mask_where() {
let tensor_1 =
TestAutodiffTensor::from_data_devauto([[1.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_2 =
TestAutodiffTensor::from_data_devauto([[4.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_3 =
TestAutodiffTensor::from_data_devauto([[8.8, 9.8], [10.8, 11.8]]).require_grad();
let mask = Tensor::<TestAutodiffBackend, 2, Bool>::from_data_devauto([
[true, false],
[false, true],
]);
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[4.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_3 = TestAutodiffTensor::from([[8.8, 9.8], [10.8, 11.8]]).require_grad();
let mask = Tensor::<TestAutodiffBackend, 2, Bool>::from([[true, false], [false, true]]);
let tensor_4 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_5 = tensor_4.clone().matmul(tensor_3.clone());

View File

@ -5,11 +5,8 @@ mod tests {
#[test]
fn should_diff_matmul() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[4.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let grads = tensor_3.backward();
@ -27,13 +24,9 @@ mod tests {
#[test]
fn test_matmul_complex_1() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_3 = TestAutodiffTensor::from_data_devauto(data_3).require_grad();
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [13.0, -3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[4.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_3 = TestAutodiffTensor::from([[2.0, 2.0], [2.0, 2.0]]).require_grad();
let tensor_4 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_5 = tensor_4.matmul(tensor_3);
@ -49,13 +42,9 @@ mod tests {
#[test]
fn test_matmul_complex_2() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_3 = TestAutodiffTensor::from_data_devauto(data_3).require_grad();
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [13.0, -3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[4.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_3 = TestAutodiffTensor::from([[2.0, 2.0], [2.0, 2.0]]).require_grad();
let tensor_4 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_5 = tensor_4.matmul(tensor_3.clone());

View File

@ -5,10 +5,8 @@ mod tests {
#[test]
fn should_diff_max_dim() {
let tensor_1 =
TestAutodiffTensor::from_floats_devauto([[1.0, 7.0], [-2.0, -3.0]]).require_grad();
let tensor_2 =
TestAutodiffTensor::from_floats_devauto([[4.0, -7.0], [2.0, 3.0]]).require_grad();
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [-2.0, -3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[4.0, -7.0], [2.0, 3.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = tensor_1.clone().mul(tensor_3.max_dim(1).unsqueeze());
@ -27,10 +25,8 @@ mod tests {
#[test]
fn should_diff_min_dim() {
let tensor_1 =
TestAutodiffTensor::from_floats_devauto([[1.0, 7.0], [-2.0, -3.0]]).require_grad();
let tensor_2 =
TestAutodiffTensor::from_floats_devauto([[4.0, -7.0], [2.0, 3.0]]).require_grad();
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [-2.0, -3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[4.0, -7.0], [2.0, 3.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = tensor_1.clone().mul(tensor_3.min_dim(1).unsqueeze());

View File

@ -10,11 +10,9 @@ mod tests {
let stride = 1;
let dilation = 1;
let x = TestAutodiffTensor::from_floats_devauto([[[
0.9861, 0.5474, 0.4477, 0.0732, 0.3548, 0.8221,
]]])
.require_grad();
let x_grad_expected = TestAutodiffTensor::from_floats_devauto([[[1., 1., 0., 0., 0., 1.]]]);
let x = TestAutodiffTensor::from([[[0.9861, 0.5474, 0.4477, 0.0732, 0.3548, 0.8221]]])
.require_grad();
let x_grad_expected = TestAutodiffTensor::from([[[1., 1., 0., 0., 0., 1.]]]);
let output = max_pool1d(x.clone(), kernel_size, stride, padding, dilation);
let grads = output.backward();
@ -33,13 +31,13 @@ mod tests {
let stride = 1;
let dilation = 2;
let x = TestAutodiffTensor::from_floats_devauto([[[
let x = TestAutodiffTensor::from([[[
0.5388, 0.0676, 0.7122, 0.8316, 0.0653, 0.9154, 0.1536, 0.9089, 0.8016, 0.7518, 0.2073,
0.0501, 0.8811, 0.5604, 0.5075, 0.4384, 0.9963, 0.9698, 0.4988, 0.2609, 0.3391, 0.2230,
0.4610, 0.5365, 0.6880,
]]])
.require_grad();
let x_grad_expected = TestAutodiffTensor::from_floats_devauto([[[
let x_grad_expected = TestAutodiffTensor::from([[[
0., 0., 1., 0., 0., 3., 0., 1., 2., 1., 0., 0., 2., 0., 0., 0., 4., 4., 0., 0., 0., 0.,
0., 0., 1.,
]]]);
@ -61,13 +59,13 @@ mod tests {
let stride = 1;
let dilation = 1;
let x = TestAutodiffTensor::from_floats_devauto([[[
let x = TestAutodiffTensor::from([[[
0.5388, 0.0676, 0.7122, 0.8316, 0.0653, 0.9154, 0.1536, 0.9089, 0.8016, 0.7518, 0.2073,
0.0501, 0.8811, 0.5604, 0.5075, 0.4384, 0.9963, 0.9698, 0.4988, 0.2609, 0.3391, 0.2230,
0.4610, 0.5365, 0.6880,
]]])
.require_grad();
let x_grad_expected = TestAutodiffTensor::from_floats_devauto([[[
let x_grad_expected = TestAutodiffTensor::from([[[
0., 0., 0., 2., 0., 4., 0., 2., 1., 0., 0., 0., 4., 0., 0., 0., 4., 1., 1., 0., 0., 0.,
1., 1., 1.,
]]]);
@ -89,13 +87,13 @@ mod tests {
let stride = 1;
let dilation = 1;
let x = TestAutodiffTensor::from_floats_devauto([[[
let x = TestAutodiffTensor::from([[[
0.5388, 0.0676, 0.7122, 0.8316, 0.0653, 0.9154, 0.1536, 0.9089, 0.8016, 0.7518, 0.2073,
0.0501, 0.8811, 0.5604, 0.5075, 0.4384, 0.9963, 0.9698, 0.4988, 0.2609, 0.3391, 0.2230,
0.4610, 0.5365, 0.6880,
]]])
.require_grad();
let x_grad_expected = TestAutodiffTensor::from_floats_devauto([[[
let x_grad_expected = TestAutodiffTensor::from([[[
1., 0., 1., 2., 0., 4., 0., 2., 1., 0., 0., 0., 4., 0., 0., 0., 4., 1., 1., 0., 0., 0.,
1., 1., 3.,
]]]);

View File

@ -14,14 +14,14 @@ mod tests {
let dilation_1 = 1;
let dilation_2 = 1;
let x = TestAutodiffTensor::from_floats_devauto([[[
let x = TestAutodiffTensor::from([[[
[0.2479, 0.6386, 0.3166, 0.5742],
[0.7065, 0.1940, 0.6305, 0.8959],
[0.5416, 0.8602, 0.8129, 0.1662],
[0.3358, 0.3059, 0.8293, 0.0990],
]]])
.require_grad();
let x_grad_expected = TestAutodiffTensor::from_floats_devauto([[[
let x_grad_expected = TestAutodiffTensor::from([[[
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 2.0],
[0.0, 2.0, 0.0, 0.0],
@ -55,14 +55,14 @@ mod tests {
let dilation_1 = 1;
let dilation_2 = 1;
let x = TestAutodiffTensor::from_floats_devauto([[[
let x = TestAutodiffTensor::from([[[
[0.2479, 0.6386, 0.3166, 0.5742],
[0.7065, 0.1940, 0.6305, 0.8959],
[0.5416, 0.8602, 0.8129, 0.1662],
[0.3358, 0.3059, 0.8293, 0.0990],
]]])
.require_grad();
let x_grad_expected = TestAutodiffTensor::from_floats_devauto([[[
let x_grad_expected = TestAutodiffTensor::from([[[
[1., 3., 0., 2.],
[3., 0., 0., 4.],
[1., 4., 0., 1.],
@ -96,14 +96,14 @@ mod tests {
let dilation_1 = 2;
let dilation_2 = 2;
let x = TestAutodiffTensor::from_floats_devauto([[[
let x = TestAutodiffTensor::from([[[
[0.2479, 0.6386, 0.3166, 0.5742],
[0.7065, 0.1940, 0.6305, 0.8959],
[0.5416, 0.8602, 0.8129, 0.1662],
[0.3358, 0.3059, 0.8293, 0.0990],
]]])
.require_grad();
let x_grad_expected = TestAutodiffTensor::from_floats_devauto([[[
let x_grad_expected = TestAutodiffTensor::from([[[
[0., 0., 0., 0.],
[1., 1., 1., 2.],
[0., 4., 4., 0.],
@ -137,7 +137,7 @@ mod tests {
let dilation_1 = 1;
let dilation_2 = 1;
let x = TestAutodiffTensor::from_floats_devauto([[[
let x = TestAutodiffTensor::from([[[
[0.5388, 0.0676, 0.7122, 0.8316, 0.0653],
[0.9154, 0.1536, 0.9089, 0.8016, 0.7518],
[0.2073, 0.0501, 0.8811, 0.5604, 0.5075],
@ -145,7 +145,7 @@ mod tests {
[0.3391, 0.2230, 0.4610, 0.5365, 0.6880],
]]])
.require_grad();
let x_grad_expected = TestAutodiffTensor::from_floats_devauto([[[
let x_grad_expected = TestAutodiffTensor::from([[[
[0., 0., 0., 3., 0.],
[4., 0., 2., 1., 0.],
[0., 0., 0., 0., 0.],

View File

@ -8,8 +8,8 @@ mod tests {
let data_1 = Data::from([1.0, 7.0]);
let data_2 = Data::from([4.0, 7.0]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1.clone()).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2.clone()).require_grad();
let tensor_1 = TestAutodiffTensor::from(data_1.clone()).require_grad();
let tensor_2 = TestAutodiffTensor::from(data_2.clone()).require_grad();
let tensor_3 = tensor_1.clone().mul(tensor_2.clone());
let grads = tensor_3.backward();
@ -24,9 +24,7 @@ mod tests {
#[test]
fn should_diff_mul_scalar() {
let data = Data::from([2.0, 5.0]);
let tensor = TestAutodiffTensor::from_data_devauto(data).require_grad();
let tensor = TestAutodiffTensor::from([2.0, 5.0]).require_grad();
let tensor_out = tensor.clone().mul_scalar(4.0);
let grads = tensor_out.backward();
@ -38,13 +36,9 @@ mod tests {
#[test]
fn test_mul_complex_1() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_3 = TestAutodiffTensor::from_data_devauto(data_3).require_grad();
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [13.0, -3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[4.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_3 = TestAutodiffTensor::from([[2.0, 2.0], [2.0, 2.0]]).require_grad();
let tensor_4 = tensor_1.clone().mul(tensor_2.clone());
let tensor_5 = tensor_4.mul(tensor_3);

View File

@ -9,8 +9,8 @@ mod tests {
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let with_move = || {
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1.clone()).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2.clone()).require_grad();
let tensor_1 = TestAutodiffTensor::from(data_1.clone()).require_grad();
let tensor_2 = TestAutodiffTensor::from(data_2.clone()).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = tensor_3.clone().matmul(tensor_2.clone());
@ -51,8 +51,8 @@ mod tests {
(grad_1, grad_2)
};
let without_move = || {
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1.clone()).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2.clone()).require_grad();
let tensor_1 = TestAutodiffTensor::from(data_1.clone()).require_grad();
let tensor_2 = TestAutodiffTensor::from(data_2.clone()).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = tensor_3.clone().matmul(tensor_2.clone());

View File

@ -5,11 +5,8 @@ mod tests {
#[test]
fn should_diff_neg() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, 7.0], [2.0, 3.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[4.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone().neg());
let tensor_4 = tensor_3.neg();

View File

@ -5,11 +5,8 @@ mod tests {
#[test]
fn should_diff_powf() {
let data_1 = Data::<f32, 2>::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::<f32, 2>::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([[0.0, 1.0], [3.0, 4.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[6.0, 7.0], [9.0, 10.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone().powf(0.4));
let tensor_4 = tensor_3.matmul(tensor_2.clone());

View File

@ -5,9 +5,7 @@ mod tests {
#[test]
fn should_diff_recip() {
let data = Data::from([2.0, 5.0, 0.4]);
let tensor = TestAutodiffTensor::from_data_devauto(data).require_grad();
let tensor = TestAutodiffTensor::from([2.0, 5.0, 0.4]).require_grad();
let tensor_out = tensor.clone().recip();
let grads = tensor_out.backward();

View File

@ -5,11 +5,8 @@ mod tests {
#[test]
fn should_diff_relu() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [-2.0, -3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[4.0, -7.0], [2.0, 3.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = activation::relu(tensor_3);

View File

@ -5,11 +5,8 @@ mod tests {
#[test]
fn should_diff_reshape() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f32, 1> = Data::from([4.0, 7.0, 2.0, 3.0]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([4.0, 7.0, 2.0, 3.0]).require_grad();
let tensor_3 = tensor_2.clone().reshape([2, 2]);
let tensor_4 = tensor_1.clone().matmul(tensor_3);

View File

@ -5,10 +5,8 @@ mod tests {
#[test]
fn test_select_grad() {
let tensor_1 =
TestAutodiffTensor::from_data_devauto(Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]))
.require_grad();
let indices = Tensor::<TestAutodiffBackend, 1, Int>::from_data_devauto(Data::from([1, 0]));
let tensor_1 = TestAutodiffTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]).require_grad();
let indices = Tensor::<TestAutodiffBackend, 1, Int>::from([1, 0]);
let tensor_2 = tensor_1.clone().matmul(tensor_1.clone().transpose());
let tensor_3 = tensor_1.clone().select(0, indices);
@ -26,13 +24,9 @@ mod tests {
#[test]
fn test_select_assign_grad() {
let tensor_1 =
TestAutodiffTensor::from_data_devauto(Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]))
.require_grad();
let values =
TestAutodiffTensor::from_data_devauto(Data::from([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]))
.require_grad();
let indices = Tensor::<TestAutodiffBackend, 1, Int>::from_data_devauto(Data::from([1, 0]));
let tensor_1 = TestAutodiffTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]).require_grad();
let values = TestAutodiffTensor::from([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]).require_grad();
let indices = Tensor::<TestAutodiffBackend, 1, Int>::from([1, 0]);
let tensor_2 = tensor_1.clone().matmul(tensor_1.clone().transpose());
let tensor_3 = tensor_1.clone().select_assign(0, indices, values.clone());

View File

@ -5,11 +5,8 @@ mod tests {
#[test]
fn should_diff_sin() {
let data_1 = Data::<f32, 2>::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::<f32, 2>::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([[0.0, 1.0], [3.0, 4.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[6.0, 7.0], [9.0, 10.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone().sin());
let tensor_4 = tensor_3.matmul(tensor_2.clone());

View File

@ -5,11 +5,9 @@ mod tests {
#[test]
fn should_diff_matmul_with_slice() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0, 100.0], [2.0, 3.0, 15.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_2 =
TestAutodiffTensor::from([[4.0, 7.0, 100.0], [2.0, 3.0, 15.0]]).require_grad();
let tensor_3 = tensor_2.clone().slice([0..2, 0..2]);
let tensor_4 = tensor_1.clone().matmul(tensor_3);
@ -27,13 +25,9 @@ mod tests {
#[test]
fn should_diff_matmul_with_slice_assign() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_assigned: Data<f32, 2> = Data::from([[9.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_assigned = TestAutodiffTensor::from_data_devauto(data_assigned).require_grad();
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[4.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_assigned = TestAutodiffTensor::from([[9.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = tensor_3.slice_assign([0..1, 0..1], tensor_assigned);
@ -50,13 +44,9 @@ mod tests {
#[test]
fn should_diff_matmul_with_slice_assign_complex() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[9.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_3 = TestAutodiffTensor::from_data_devauto(data_3).require_grad();
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[4.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_3 = TestAutodiffTensor::from([[9.0]]).require_grad();
let tensor_4 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_5 = tensor_2.clone().slice([0..1, 0..1]);

View File

@ -5,10 +5,10 @@ mod tests {
#[test]
fn test_softmax_grad() {
let data_1 = Data::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_1 = Tensor::<TestAutodiffBackend, 2>::from_data_devauto(data_1).require_grad();
let tensor_2 = Tensor::<TestAutodiffBackend, 2>::from_data_devauto(data_2).require_grad();
let tensor_1 =
Tensor::<TestAutodiffBackend, 2>::from([[0.0, 1.0], [3.0, 4.0]]).require_grad();
let tensor_2 =
Tensor::<TestAutodiffBackend, 2>::from([[6.0, 7.0], [9.0, 10.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = activation::softmax(tensor_3, 1).matmul(tensor_2.clone());
@ -27,10 +27,10 @@ mod tests {
#[test]
fn test_log_softmax_grad() {
let data_1 = Data::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_1 = Tensor::<TestAutodiffBackend, 2>::from_data_devauto(data_1).require_grad();
let tensor_2 = Tensor::<TestAutodiffBackend, 2>::from_data_devauto(data_2).require_grad();
let tensor_1 =
Tensor::<TestAutodiffBackend, 2>::from([[0.0, 1.0], [3.0, 4.0]]).require_grad();
let tensor_2 =
Tensor::<TestAutodiffBackend, 2>::from([[6.0, 7.0], [9.0, 10.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = activation::log_softmax(tensor_3, 1).matmul(tensor_2.clone());
@ -49,11 +49,10 @@ mod tests {
#[test]
fn test_quiet_softmax_grad() {
let data_1 = Data::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_1 = Tensor::<TestAutodiffBackend, 2>::from_data_devauto(data_1).require_grad();
let tensor_2 = Tensor::<TestAutodiffBackend, 2>::from_data_devauto(data_2).require_grad();
let tensor_1 =
Tensor::<TestAutodiffBackend, 2>::from([[0.0, 1.0], [3.0, 4.0]]).require_grad();
let tensor_2 =
Tensor::<TestAutodiffBackend, 2>::from([[6.0, 7.0], [9.0, 10.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone());
let tensor_4 = activation::softmax(tensor_3, 1).matmul(tensor_2.clone());

View File

@ -5,11 +5,8 @@ mod tests {
#[test]
fn should_diff_sqrt() {
let data_1 = Data::<f32, 2>::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::<f32, 2>::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([[0.0, 1.0], [3.0, 4.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[6.0, 7.0], [9.0, 10.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone().sqrt());
let tensor_4 = tensor_3.matmul(tensor_2.clone());

View File

@ -5,11 +5,8 @@ mod tests {
#[test]
fn should_diff_sub() {
let data_1 = Data::from([2.0, 5.0]);
let data_2 = Data::from([4.0, 1.0]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([2.0, 5.0]).require_grad();
let tensor_2 = TestAutodiffTensor::from([4.0, 1.0]).require_grad();
let tensor_3 = tensor_1.clone().sub(tensor_2.clone());
let grads = tensor_3.backward();
@ -24,8 +21,7 @@ mod tests {
#[test]
fn should_diff_sub_scalar() {
let data = Data::from([2.0, 10.0]);
let tensor = TestAutodiffTensor::from_data_devauto(data).require_grad();
let tensor = TestAutodiffTensor::from([2.0, 10.0]).require_grad();
let tensor_out = tensor.clone().sub_scalar(5.0);
let grads = tensor_out.backward();
@ -37,13 +33,9 @@ mod tests {
#[test]
fn test_sub_complex_1() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_3 = TestAutodiffTensor::from_data_devauto(data_3).require_grad();
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [13.0, -3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[4.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_3 = TestAutodiffTensor::from([[2.0, 2.0], [2.0, 2.0]]).require_grad();
let tensor_4 = tensor_1.clone().sub(tensor_2.clone());
let tensor_5 = tensor_4.sub(tensor_3).sub_scalar(5.0);

View File

@ -5,11 +5,8 @@ mod tests {
#[test]
fn should_diff_tanh() {
let data_1 = Data::<f32, 2>::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::<f32, 2>::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([[0.0, 1.0], [3.0, 4.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[6.0, 7.0], [9.0, 10.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone().tanh());
let tensor_4 = tensor_3.matmul(tensor_2.clone());

View File

@ -5,11 +5,8 @@ mod tests {
#[test]
fn should_diff_transpose() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, 7.0], [2.0, 3.0]]);
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
let tensor_1 = TestAutodiffTensor::from([[1.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_2 = TestAutodiffTensor::from([[4.0, 7.0], [2.0, 3.0]]).require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone().transpose());
let tensor_4 = tensor_3.transpose();
@ -24,16 +21,12 @@ mod tests {
#[test]
fn should_diff_swap_dims() {
let tensor_1 = TestAutodiffTensor::from_floats_devauto([
[[0.0, 1.0], [3.0, 4.0]],
[[6.0, 7.0], [9.0, 10.0]],
])
.require_grad();
let tensor_2 = TestAutodiffTensor::from_floats_devauto([
[[1.0, 4.0], [2.0, 5.0]],
[[7.0, 10.0], [8.0, 11.0]],
])
.require_grad();
let tensor_1 =
TestAutodiffTensor::from([[[0.0, 1.0], [3.0, 4.0]], [[6.0, 7.0], [9.0, 10.0]]])
.require_grad();
let tensor_2 =
TestAutodiffTensor::from([[[1.0, 4.0], [2.0, 5.0]], [[7.0, 10.0], [8.0, 11.0]]])
.require_grad();
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone().swap_dims(0, 2));
let tensor_4 = tensor_3.matmul(tensor_2.clone().swap_dims(1, 2));

View File

@ -47,7 +47,7 @@ impl<B: Backend> Batcher<MNISTItem, MNISTBatch<B>> for MNISTBatcher<B> {
let images = items
.iter()
.map(|item| Data::<f32, 2>::from(item.image))
.map(|data| Tensor::<B, 2>::from_data_devauto(data.convert()))
.map(|data| Tensor::<B, 2>::from(data.convert()))
.map(|tensor| tensor.reshape([1, 28, 28]))
// Normalize: make between [0,1] and make the mean=0 and std=1
// values mean=0.1307,std=0.3081 are from the PyTorch MNIST example
@ -57,7 +57,7 @@ impl<B: Backend> Batcher<MNISTItem, MNISTBatch<B>> for MNISTBatcher<B> {
let targets = items
.iter()
.map(|item| Tensor::<B, 1, Int>::from_data_devauto(Data::from([(item.label as i64).elem()])))
.map(|item| Tensor::<B, 1, Int>::from([(item.label as i64).elem()]))
.collect();
let images = Tensor::cat(images, 0).to_device(&self.device);

View File

@ -36,33 +36,32 @@ for the sake of simplicity, we ignore type signatures. For more details, refer t
Those operations are available for all tensor kinds: `Int`, `Float`, and `Bool`.
| Burn | PyTorch Equivalent |
| ---------------------------------------- | ------------------------------------ |
| `Tensor::empty_devauto(shape)` | `torch.empty(shape)` |
| `Tensor::empty(shape, device)` | `torch.empty(shape, device=device)` |
| `tensor.dims()` | `tensor.size()` |
| `tensor.shape()` | `tensor.shape` |
| `tensor.reshape(shape)` | `tensor.view(shape)` |
| `tensor.flatten(start_dim, end_dim)` | `tensor.flatten(start_dim, end_dim)` |
| `tensor.squeeze(dim)` | `tensor.squeeze(dim)` |
| `tensor.unsqueeze()` | `tensor.unsqueeze(0)` |
| `tensor.unsqueeze_dim(dim)` | `tensor.unsqueeze(dim)` |
| `tensor.slice(ranges)` | `tensor[(*ranges,)]` |
| `tensor.slice_assign(ranges, values)` | `tensor[(*ranges,)] = values` |
| `tensor.narrow(dim, start, length)` | `tensor.narrow(dim, start, length)` |
| `tensor.chunk(num_chunks, dim)` | `tensor.chunk(num_chunks, dim)` |
| `tensor.device()` | `tensor.device` |
| `tensor.to_device(device)` | `tensor.to(device)` |
| `tensor.repeat(2, 4)` | `tensor.repeat([1, 1, 4])` |
| `tensor.equal(other)` | `x == y` |
| `Tensor::cat(tensors, dim)` | `torch.cat(tensors, dim)` |
| `tensor.into_data()` | N/A |
| `tensor.to_data()` | N/A |
| `Tensor::from_data_devauto(data)` | N/A |
| `Tensor::from_data(data, device)` | N/A |
| `tensor.into_primitive()` | N/A |
| `Tensor::from_primitive(primitive)` | N/A |
| `Tensor::stack(tensors, dim)` | torch.stack(tensors, dim)` |
| Burn | PyTorch Equivalent |
| ------------------------------------- | ------------------------------------ |
| `Tensor::empty_devauto(shape)` | `torch.empty(shape)` |
| `Tensor::empty(shape, device)` | `torch.empty(shape, device=device)` |
| `tensor.dims()` | `tensor.size()` |
| `tensor.shape()` | `tensor.shape` |
| `tensor.reshape(shape)` | `tensor.view(shape)` |
| `tensor.flatten(start_dim, end_dim)` | `tensor.flatten(start_dim, end_dim)` |
| `tensor.squeeze(dim)` | `tensor.squeeze(dim)` |
| `tensor.unsqueeze()` | `tensor.unsqueeze(0)` |
| `tensor.unsqueeze_dim(dim)` | `tensor.unsqueeze(dim)` |
| `tensor.slice(ranges)` | `tensor[(*ranges,)]` |
| `tensor.slice_assign(ranges, values)` | `tensor[(*ranges,)] = values` |
| `tensor.narrow(dim, start, length)` | `tensor.narrow(dim, start, length)` |
| `tensor.chunk(num_chunks, dim)` | `tensor.chunk(num_chunks, dim)` |
| `tensor.device()` | `tensor.device` |
| `tensor.to_device(device)` | `tensor.to(device)` |
| `tensor.repeat(2, 4)` | `tensor.repeat([1, 1, 4])` |
| `tensor.equal(other)` | `x == y` |
| `Tensor::cat(tensors, dim)` | `torch.cat(tensors, dim)` |
| `tensor.into_data()` | N/A |
| `tensor.to_data()` | N/A |
| `Tensor::from_data(data, device)` | N/A |
| `tensor.into_primitive()` | N/A |
| `Tensor::from_primitive(primitive)` | N/A |
| `Tensor::stack(tensors, dim)` | torch.stack(tensors, dim)` |
### Numeric Operations
@ -124,49 +123,48 @@ Those operations are available for numeric tensor kinds: `Float` and `Int`.
Those operations are only available for `Float` tensors.
| Burn API | PyTorch Equivalent |
| --------------------------------------------------- | ---------------------------------- |
| `tensor.exp()` | `tensor.exp()` |
| `tensor.log()` | `tensor.log()` |
| `tensor.log1p()` | `tensor.log1p()` |
| `tensor.erf()` | `tensor.erf()` |
| `tensor.powf(value)` | `tensor.pow(value)` |
| `tensor.sqrt()` | `tensor.sqrt()` |
| `tensor.recip()` | `tensor.reciprocal()` |
| `tensor.cos()` | `tensor.cos()` |
| `tensor.sin()` | `tensor.sin()` |
| `tensor.tanh()` | `tensor.tanh()` |
| `tensor.from_floats_devauto(floats)` | N/A |
| `tensor.from_floats(floats, device)` | N/A |
| `tensor.int()` | Similar to `tensor.to(torch.long)` |
| `tensor.zeros_like()` | `torch.zeros_like(tensor)` |
| `tensor.ones_like()` | `torch.ones_like(tensor)` |
| `tensor.random_like(distribution)` | `torch.rand_like()` only uniform |
| `tensor.one_hot(index, num_classes)` | N/A |
| `tensor.transpose()` | `tensor.T` |
| `tensor.swap_dims(dim1, dim2)` | `tensor.transpose(dim1, dim2)` |
| `tensor.matmul(other)` | `tensor.matmul(other)` |
| `tensor.var(dim)` | `tensor.var(dim)` |
| `tensor.var_bias(dim)` | N/A |
| `tensor.var_mean(dim)` | N/A |
| `tensor.var_mean_bias(dim)` | N/A |
| `tensor.random_devauto(shape, distribution)` | N/A |
| `tensor.random(shape, distribution, device)` | N/A |
| `tensor.to_full_precision()` | `tensor.to(torch.float)` |
| `tensor.from_full_precision(tensor)` | N/A |
| Burn API | PyTorch Equivalent |
| -------------------------------------------- | ---------------------------------- |
| `tensor.exp()` | `tensor.exp()` |
| `tensor.log()` | `tensor.log()` |
| `tensor.log1p()` | `tensor.log1p()` |
| `tensor.erf()` | `tensor.erf()` |
| `tensor.powf(value)` | `tensor.pow(value)` |
| `tensor.sqrt()` | `tensor.sqrt()` |
| `tensor.recip()` | `tensor.reciprocal()` |
| `tensor.cos()` | `tensor.cos()` |
| `tensor.sin()` | `tensor.sin()` |
| `tensor.tanh()` | `tensor.tanh()` |
| `tensor.from_floats(floats, device)` | N/A |
| `tensor.int()` | Similar to `tensor.to(torch.long)` |
| `tensor.zeros_like()` | `torch.zeros_like(tensor)` |
| `tensor.ones_like()` | `torch.ones_like(tensor)` |
| `tensor.random_like(distribution)` | `torch.rand_like()` only uniform |
| `tensor.one_hot(index, num_classes)` | N/A |
| `tensor.transpose()` | `tensor.T` |
| `tensor.swap_dims(dim1, dim2)` | `tensor.transpose(dim1, dim2)` |
| `tensor.matmul(other)` | `tensor.matmul(other)` |
| `tensor.var(dim)` | `tensor.var(dim)` |
| `tensor.var_bias(dim)` | N/A |
| `tensor.var_mean(dim)` | N/A |
| `tensor.var_mean_bias(dim)` | N/A |
| `tensor.random_devauto(shape, distribution)` | N/A |
| `tensor.random(shape, distribution, device)` | N/A |
| `tensor.to_full_precision()` | `tensor.to(torch.float)` |
| `tensor.from_full_precision(tensor)` | N/A |
# Int Operations
Those operations are only available for `Int` tensors.
| Burn API | PyTorch Equivalent |
| --------------------------------------------- | ------------------------------------------------------- |
| `tensor.from_ints(ints)` | N/A |
| `tensor.float()` | Similar to `tensor.to(torch.float)` |
| `tensor.arange_devauto(5..10)` | `tensor.arange(start=5, end=10)` |
| `tensor.arange(5..10, device) ` | `tensor.arange(start=5, end=10, device=device)` |
| `tensor.arange_step_devauto(5..10, 2)` | `tensor.arange(start=5, end=10, step=2)` |
| `tensor.arange_step(5..10, 2, device)` | `tensor.arange(start=5, end=10, step=2, device=device)` |
| Burn API | PyTorch Equivalent |
| -------------------------------------- | ------------------------------------------------------- |
| `tensor.from_ints(ints)` | N/A |
| `tensor.float()` | Similar to `tensor.to(torch.float)` |
| `tensor.arange_devauto(5..10)` | `tensor.arange(start=5, end=10)` |
| `tensor.arange(5..10, device) ` | `tensor.arange(start=5, end=10, device=device)` |
| `tensor.arange_step_devauto(5..10, 2)` | `tensor.arange(start=5, end=10, step=2)` |
| `tensor.arange_step(5..10, 2, device)` | `tensor.arange(start=5, end=10, step=2, device=device)` |
# Bool Operations
@ -180,16 +178,16 @@ Those operations are only available for `Bool` tensors.
## Activation Functions
| Burn API | PyTorch Equivalent |
| ------------------------------------------ | ----------------------------------------------------- |
| `activation::gelu(tensor)` | Similar to `nn.functional.gelu(tensor)` |
| `activation::log_sigmoid(tensor)` | Similar to `nn.functional.log_sigmoid(tensor)` |
| `activation::log_softmax(tensor, dim)` | Similar to `nn.functional.log_softmax(tensor, dim)` |
| `activation::mish(tensor)` | Similar to `nn.functional.mish(tensor)` |
| `activation::quiet_softmax(tensor, dim)` | Similar to `nn.functional.quiet_softmax(tensor, dim)` |
| `activation::relu(tensor)` | Similar to `nn.functional.relu(tensor)` |
| `activation::sigmoid(tensor)` | Similar to `nn.functional.sigmoid(tensor)` |
| `activation::silu(tensor)` | Similar to `nn.functional.silu(tensor)` |
| `activation::softmax(tensor, dim)` | Similar to `nn.functional.softmax(tensor, dim)` |
| `activation::softplus(tensor, beta)` | Similar to `nn.functional.softplus(tensor, beta)` |
| `activation::tanh(tensor)` | Similar to `nn.functional.tanh(tensor)` |
| Burn API | PyTorch Equivalent |
| ---------------------------------------- | ----------------------------------------------------- |
| `activation::gelu(tensor)` | Similar to `nn.functional.gelu(tensor)` |
| `activation::log_sigmoid(tensor)` | Similar to `nn.functional.log_sigmoid(tensor)` |
| `activation::log_softmax(tensor, dim)` | Similar to `nn.functional.log_softmax(tensor, dim)` |
| `activation::mish(tensor)` | Similar to `nn.functional.mish(tensor)` |
| `activation::quiet_softmax(tensor, dim)` | Similar to `nn.functional.quiet_softmax(tensor, dim)` |
| `activation::relu(tensor)` | Similar to `nn.functional.relu(tensor)` |
| `activation::sigmoid(tensor)` | Similar to `nn.functional.sigmoid(tensor)` |
| `activation::silu(tensor)` | Similar to `nn.functional.silu(tensor)` |
| `activation::softmax(tensor, dim)` | Similar to `nn.functional.softmax(tensor, dim)` |
| `activation::softplus(tensor, beta)` | Similar to `nn.functional.softplus(tensor, beta)` |
| `activation::tanh(tensor)` | Similar to `nn.functional.tanh(tensor)` |

View File

@ -54,7 +54,7 @@ type Backend = Wgpu;
fn main() {
// Creation of two tensors, the first with explicit values and the second one with ones, with the same shape as the first
let tensor_1 = Tensor::<Backend, 2>::from_data_devauto([[2., 3.], [4., 5.]]);
let tensor_1 = Tensor::<Backend, 2>::from([[2., 3.], [4., 5.]]);
let tensor_2 = Tensor::<Backend, 2>::ones_like(&tensor_1);
// Print the element-wise addition (done with the WGPU backend) of the two tensors.
@ -88,7 +88,6 @@ datasets. Therefore, in order to run examples, you will need to install Python.
instructions on the [official website](https://www.python.org/downloads/) to install Python on your
computer.
Many Burn examples are available in the [examples](https://github.com/tracel-ai/burn/tree/main/examples)
directory.
To run one, please refer to the example's README.md for the specific command to
execute.
Many Burn examples are available in the
[examples](https://github.com/tracel-ai/burn/tree/main/examples) directory. To run one, please refer
to the example's README.md for the specific command to execute.

View File

@ -113,7 +113,7 @@ mod tests {
#[test]
fn test_clip_by_value() {
let gradient: Tensor<TestBackend, 2> = Tensor::from_floats_devauto([
let gradient: Tensor<TestBackend, 2> = Tensor::from([
[0.6294, 0.0940, 0.8176, 0.8824, 0.5228, 0.4310],
[0.7152, 0.9559, 0.7893, 0.5684, 0.5939, 0.8883],
]);
@ -128,7 +128,7 @@ mod tests {
#[test]
fn test_clip_by_norm() {
let gradient: Tensor<TestBackend, 2> = Tensor::from_floats_devauto([
let gradient: Tensor<TestBackend, 2> = Tensor::from([
[0.6294, 0.0940, 0.8176, 0.8824, 0.5228, 0.4310],
[0.7152, 0.9559, 0.7893, 0.5684, 0.5939, 0.8883],
]);

View File

@ -70,7 +70,7 @@ pub fn generate_padding_mask<B: Backend>(
tensor = tensor.slice_assign(
[index..index + 1, 0..tokens.len()],
Tensor::from_data_devauto(Data::new(
Tensor::from(Data::new(
tokens.into_iter().map(|e| (e as i64).elem()).collect(),
Shape::new([1, seq_length]),
)),

View File

@ -2,6 +2,7 @@ use crate as burn;
use crate::{config::Config, module::Module};
use burn_tensor::activation::sigmoid;
use burn_tensor::Data;
use burn_tensor::{backend::Backend, Int, Tensor};
/// Configuration to create a [Binary Cross-entropy loss](BinaryCrossEntropyLoss).
@ -36,7 +37,7 @@ impl BinaryCrossEntropyLossConfig {
weights: self
.weights
.as_ref()
.map(|e| Tensor::<B, 1>::from_floats_devauto(e.as_slice())),
.map(|e| Tensor::<B, 1>::from(Data::from(e.as_slice()).convert())),
smoothing: self.smoothing,
logits: self.logits,
}
@ -124,7 +125,7 @@ mod tests {
let [batch_size] = [4];
let logits =
Tensor::<TestBackend, 1>::random_devauto([batch_size], Distribution::Normal(0., 1.0));
let targets = Tensor::<TestBackend, 1, Int>::from_data_devauto(Data::from([0, 1, 0, 1]));
let targets = Tensor::<TestBackend, 1, Int>::from([0, 1, 0, 1]);
let loss_1 = BinaryCrossEntropyLossConfig::new()
.init()
@ -141,7 +142,7 @@ mod tests {
let [batch_size] = [4];
let logits =
Tensor::<TestBackend, 1>::random_devauto([batch_size], Distribution::Normal(0., 1.0));
let targets = Tensor::<TestBackend, 1, Int>::from_data_devauto(Data::from([0, 1, 0, 1]));
let targets = Tensor::<TestBackend, 1, Int>::from([0, 1, 0, 1]);
let weights = [3., 7.];
let loss_1 = BinaryCrossEntropyLossConfig::new()
@ -152,7 +153,7 @@ mod tests {
let loss_2 = targets.clone().float() * logits.clone().log()
+ (-targets.float() + 1) * (-logits + 1).log();
let loss_2 = loss_2 * Tensor::from_floats_devauto([3., 7., 3., 7.]);
let loss_2 = loss_2 * Tensor::from([3., 7., 3., 7.]);
let loss_2 = loss_2.neg().sum() / (3. + 3. + 7. + 7.);
loss_1.into_data().assert_approx_eq(&loss_2.into_data(), 3);
}
@ -162,7 +163,7 @@ mod tests {
let [batch_size] = [4];
let logits =
Tensor::<TestBackend, 1>::random_devauto([batch_size], Distribution::Normal(0., 1.0));
let targets = Tensor::<TestBackend, 1, Int>::from_data_devauto(Data::from([0, 1, 0, 1]));
let targets = Tensor::<TestBackend, 1, Int>::from([0, 1, 0, 1]);
let loss_1 = BinaryCrossEntropyLossConfig::new()
.with_smoothing(Some(0.1))

View File

@ -4,6 +4,7 @@ use crate::{config::Config, module::Module};
use alloc::vec;
use alloc::vec::Vec;
use burn_tensor::activation::log_softmax;
use burn_tensor::Data;
use burn_tensor::{backend::Backend, Bool, Int, Tensor};
/// Configuration to create a [Cross-entropy loss](CrossEntropyLoss).
@ -44,7 +45,7 @@ impl CrossEntropyLossConfig {
weights: self
.weights
.as_ref()
.map(|e| Tensor::<B, 1>::from_floats_devauto(e.as_slice())),
.map(|e| Tensor::<B, 1>::from(Data::from(e.as_slice()).convert())),
smoothing: self.smoothing,
logits: self.logits,
}
@ -230,14 +231,13 @@ mod tests {
[batch_size, num_targets],
Distribution::Normal(0., 1.0),
);
let targets =
Tensor::<TestBackend, 1, Int>::from_data_devauto(Data::from([2, 0, 4, 1]));
let targets_logits = Tensor::<TestBackend, 2>::from_data_devauto(Data::from([
let targets = Tensor::<TestBackend, 1, Int>::from([2, 0, 4, 1]);
let targets_logits = Tensor::<TestBackend, 2>::from([
[0.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
]));
]);
(logits, targets, targets_logits)
}};
}
@ -249,15 +249,15 @@ mod tests {
[batch_size, num_targets],
Distribution::Normal(0., 1.0),
);
let targets = Tensor::<TestBackend, 1, Int>::from_data_devauto(
let targets = Tensor::<TestBackend, 1, Int>::from(
Data::<i64, 1>::from([2, 0, 4, pad_index as i64]).convert(),
);
let targets_logits = Tensor::<TestBackend, 2>::from_data_devauto(Data::from([
let targets_logits = Tensor::<TestBackend, 2>::from([
[0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]));
]);
(logits, targets, targets_logits)
}};
}
@ -273,7 +273,7 @@ mod tests {
let tensor = log_softmax(logits, 1);
let loss_2 = tensor
* targets_logits
* Tensor::<TestBackend, 1>::from_floats_devauto(weights.as_slice())
* Tensor::<TestBackend, 1>::from(weights.as_slice())
.unsqueeze()
.repeat(0, 4);
let loss_2 = loss_2.sum().neg() / (1. + 2. + 3. + 5.);
@ -358,12 +358,12 @@ mod tests {
let (logits, targets, _) = setup!();
let smoothed_targets =
CrossEntropyLoss::compute_smoothed_targets(logits.dims(), targets, 0.05);
let targets_logits = Tensor::<TestBackend, 2>::from_data_devauto(Data::from([
let targets_logits = Tensor::<TestBackend, 2>::from([
[0.01, 0.01, 0.96, 0.01, 0.01],
[0.96, 0.01, 0.01, 0.01, 0.01],
[0.01, 0.01, 0.01, 0.01, 0.96],
[0.01, 0.96, 0.01, 0.01, 0.01],
]));
]);
smoothed_targets
.into_data()
.assert_approx_eq(&targets_logits.into_data(), 3);
@ -376,12 +376,12 @@ mod tests {
.with_smoothing(Some(0.05))
.init()
.forward(logits.clone(), targets);
let targets_logits = Tensor::<TestBackend, 2>::from_data_devauto(Data::from([
let targets_logits = Tensor::<TestBackend, 2>::from([
[0.01, 0.01, 0.96, 0.01, 0.01],
[0.96, 0.01, 0.01, 0.01, 0.01],
[0.01, 0.01, 0.01, 0.01, 0.96],
[0.01, 0.96, 0.01, 0.01, 0.01],
]));
]);
let x = log_softmax(logits, 1);
let loss_2 = (x * targets_logits).sum_dim(1).mean().neg();

View File

@ -60,11 +60,9 @@ mod tests {
#[test]
fn test_mse_loss() {
let logits =
Tensor::<TestBackend, 2>::from_data_devauto(Data::from([[1.0, 2.0], [3.0, 4.0]]));
let logits = Tensor::<TestBackend, 2>::from([[1.0, 2.0], [3.0, 4.0]]);
let targets =
Tensor::<TestBackend, 2>::from_data_devauto(Data::from([[2.0, 1.0], [3.0, 2.0]]));
let targets = Tensor::<TestBackend, 2>::from([[2.0, 1.0], [3.0, 2.0]]);
let mse = MSELoss::new();
let loss_no_reduction = mse.forward_no_reduction(logits.clone(), targets.clone());

View File

@ -142,7 +142,7 @@ mod tests {
assert!(module.gamma.is_none());
assert!(module.beta.is_none());
let input = Tensor::from_data_devauto(Data::from([
let input = Tensor::from([
[
[-0.3034, 0.2726, -0.9659],
[-1.1845, -1.3236, 0.0172],
@ -159,7 +159,7 @@ mod tests {
[-0.9535, 0.1281, 0.4372],
[-0.2845, 0.3488, 0.5641],
],
]));
]);
let output = module.forward(input);
@ -208,7 +208,7 @@ mod tests {
.to_data()
.assert_approx_eq(&Data::zeros([6]), 3);
let input = Tensor::from_data_devauto(Data::from([
let input = Tensor::from(Data::from([
[
[0.3345, 0.4429, 0.6639],
[0.5041, 0.4175, 0.8437],

View File

@ -145,7 +145,7 @@ pub fn generate_sinusoids<B: Backend>(
[length, d_model].into(),
);
Tensor::<B, 2>::from_data_devauto(data.convert())
Tensor::<B, 2>::from(data.convert())
}
#[cfg(test)]
@ -171,7 +171,7 @@ mod tests {
assert_eq!(output.shape().dims, [batch_size, length, d_model]);
let expected = Tensor::<TestBackend, 3>::from_floats_devauto([
let expected = Tensor::<TestBackend, 3>::from([
[
[0.00000, 1.00000, 0.00000, 1.00000, 0.00000, 1.00000],
[0.84147, 0.54030, 0.04640, 0.99892, 0.00215, 1.00000],
@ -192,7 +192,7 @@ mod tests {
let sinusoids = generate_sinusoids::<TestBackend>(12, 6, 10_000);
// The values are taken from the pytorch reference implementation
let expected = Tensor::<TestBackend, 2>::from_floats_devauto([
let expected = Tensor::<TestBackend, 2>::from([
[0.00000, 1.00000, 0.00000, 1.00000, 0.00000, 1.00000],
[0.84147, 0.54030, 0.04640, 0.99892, 0.00215, 1.00000],
[0.90930, -0.41615, 0.09270, 0.99569, 0.00431, 0.99999],

View File

@ -199,12 +199,12 @@ mod tests {
]),
Data::from([-0.3905, 0.0884, -0.0970, 0.1176, 0.1366, 0.0130]),
);
let x_1 = Tensor::from_floats_devauto([
let x_1 = Tensor::from([
[0.6294, 0.0940, 0.8176, 0.8824, 0.5228, 0.4310],
[0.7152, 0.9559, 0.7893, 0.5684, 0.5939, 0.8883],
])
.require_grad();
let x_2 = Tensor::from_floats_devauto([
let x_2 = Tensor::from([
[0.8491, 0.2108, 0.8939, 0.4433, 0.5527, 0.2528],
[0.3270, 0.0412, 0.5538, 0.9605, 0.3195, 0.9085],
])
@ -258,8 +258,8 @@ mod tests {
bias: Data<f32, 1>,
) -> nn::Linear<TestAutodiffBackend> {
let record = nn::LinearRecord {
weight: Param::from(Tensor::from_data_devauto(weight)),
bias: Some(Param::from(Tensor::from_data_devauto(bias))),
weight: Param::from(Tensor::from(weight)),
bias: Some(Param::from(Tensor::from(bias))),
};
nn::LinearConfig::new(6, 6).init_with(record)

View File

@ -232,12 +232,12 @@ mod tests {
]),
Data::from([-0.3905, 0.0884, -0.0970, 0.1176, 0.1366, 0.0130]),
);
let x_1 = Tensor::from_floats_devauto([
let x_1 = Tensor::from([
[0.6294, 0.0940, 0.8176, 0.8824, 0.5228, 0.4310],
[0.7152, 0.9559, 0.7893, 0.5684, 0.5939, 0.8883],
])
.require_grad();
let x_2 = Tensor::from_floats_devauto([
let x_2 = Tensor::from([
[0.8491, 0.2108, 0.8939, 0.4433, 0.5527, 0.2528],
[0.3270, 0.0412, 0.5538, 0.9605, 0.3195, 0.9085],
])
@ -302,7 +302,7 @@ mod tests {
Data::from([-0.3905, 0.0884, -0.0970, 0.1176, 0.1366, 0.0130]),
);
let x = Tensor::from_floats_devauto([
let x = Tensor::from([
[0.8491, 0.2108, 0.8939, 0.4433, 0.5527, 0.2528],
[0.3270, 0.0412, 0.5538, 0.9605, 0.3195, 0.9085],
])
@ -332,8 +332,8 @@ mod tests {
bias: Data<f32, 1>,
) -> nn::Linear<TestAutodiffBackend> {
let record = nn::LinearRecord {
weight: Param::from(Tensor::from_data_devauto(weight)),
bias: Some(Param::from(Tensor::from_data_devauto(bias))),
weight: Param::from(Tensor::from(weight)),
bias: Some(Param::from(Tensor::from(bias))),
};
nn::LinearConfig::new(6, 6).init_with(record)

View File

@ -244,12 +244,12 @@ mod tests {
]),
Data::from([-0.3905, 0.0884, -0.0970, 0.1176, 0.1366, 0.0130]),
);
let x_1 = Tensor::from_floats_devauto([
let x_1 = Tensor::from([
[0.6294, 0.0940, 0.8176, 0.8824, 0.5228, 0.4310],
[0.7152, 0.9559, 0.7893, 0.5684, 0.5939, 0.8883],
])
.require_grad();
let x_2 = Tensor::from_floats_devauto([
let x_2 = Tensor::from([
[0.8491, 0.2108, 0.8939, 0.4433, 0.5527, 0.2528],
[0.3270, 0.0412, 0.5538, 0.9605, 0.3195, 0.9085],
])
@ -314,7 +314,7 @@ mod tests {
Data::from([-0.3905, 0.0884, -0.0970, 0.1176, 0.1366, 0.0130]),
);
let x = Tensor::from_floats_devauto([
let x = Tensor::from([
[0.8491, 0.2108, 0.8939, 0.4433, 0.5527, 0.2528],
[0.3270, 0.0412, 0.5538, 0.9605, 0.3195, 0.9085],
])
@ -344,8 +344,8 @@ mod tests {
bias: Data<f32, 1>,
) -> nn::Linear<TestAutodiffBackend> {
let record = nn::LinearRecord {
weight: Param::from(Tensor::from_data_devauto(weight)),
bias: Some(Param::from(Tensor::from_data_devauto(bias))),
weight: Param::from(Tensor::from(weight)),
bias: Some(Param::from(Tensor::from(bias))),
};
nn::LinearConfig::new(6, 6).init_with(record)

View File

@ -360,12 +360,12 @@ mod tests {
]),
Data::from([0.5, 0.5, 0.5, 0.5, 0.5, 0.5]),
);
let x_1 = Tensor::from_floats_devauto([
let x_1 = Tensor::from([
[0.6294, 0.0940, 0.8176, 0.8824, 0.5228, 0.4310],
[0.7152, 0.9559, 0.7893, 0.5684, 0.5939, 0.8883],
])
.require_grad();
let x_2 = Tensor::from_floats_devauto([
let x_2 = Tensor::from([
[0.8491, 0.2108, 0.8939, 0.4433, 0.5527, 0.2528],
[0.3270, 0.0412, 0.5538, 0.9605, 0.3195, 0.9085],
])
@ -428,12 +428,12 @@ mod tests {
]),
Data::from([-0.3905, 0.0884, -0.0970, 0.1176, 0.1366, 0.0130]),
);
let x_1 = Tensor::from_floats_devauto([
let x_1 = Tensor::from([
[0.6294, 0.0940, 0.8176, 0.8824, 0.5228, 0.4310],
[0.7152, 0.9559, 0.7893, 0.5684, 0.5939, 0.8883],
])
.require_grad();
let x_2 = Tensor::from_floats_devauto([
let x_2 = Tensor::from([
[0.8491, 0.2108, 0.8939, 0.4433, 0.5527, 0.2528],
[0.3270, 0.0412, 0.5538, 0.9605, 0.3195, 0.9085],
])
@ -497,8 +497,8 @@ mod tests {
bias: Data<f32, 1>,
) -> nn::Linear<TestAutodiffBackend> {
let record = nn::LinearRecord {
weight: Param::from(Tensor::from_data_devauto(weight)),
bias: Some(Param::from(Tensor::from_data_devauto(bias))),
weight: Param::from(Tensor::from(weight)),
bias: Some(Param::from(Tensor::from(bias))),
};
nn::LinearConfig::new(6, 6).init_with(record)

View File

@ -97,7 +97,7 @@ impl<B: Backend, const D: usize> Record for Tensor<B, D> {
}
fn from_item<S: PrecisionSettings>(item: Self::Item<S>) -> Self {
Tensor::from_data_devauto(item.data.convert::<B::FloatElem>())
Tensor::from(item.data.convert::<B::FloatElem>())
}
}
@ -113,7 +113,7 @@ impl<B: Backend, const D: usize> Record for Tensor<B, D, Int> {
}
fn from_item<S: PrecisionSettings>(item: Self::Item<S>) -> Self {
Tensor::from_data_devauto(item.data.convert())
Tensor::from(item.data.convert())
}
}
@ -129,6 +129,6 @@ impl<B: Backend, const D: usize> Record for Tensor<B, D, Bool> {
}
fn from_item<S: PrecisionSettings>(item: Self::Item<S>) -> Self {
Tensor::from_data_devauto(item.data)
Tensor::from(item.data)
}
}

View File

@ -70,7 +70,7 @@ mod tests {
let model: add::Model<Backend> = add::Model::default();
// Run the model
let input = Tensor::<Backend, 4>::from_floats_devauto([[[[1., 2., 3., 4.]]]]);
let input = Tensor::<Backend, 4>::from([[[[1., 2., 3., 4.]]]]);
let scalar = 2f64;
let output = model.forward(input, scalar);
let expected = Data::from([[[[9., 10., 11., 12.]]]]);
@ -84,7 +84,7 @@ mod tests {
let model: add_int::Model<Backend> = add_int::Model::default();
// Run the model
let input = Tensor::<Backend, 4, Int>::from_ints_devauto([[[[1, 2, 3, 4]]]]);
let input = Tensor::<Backend, 4, Int>::from([[[[1, 2, 3, 4]]]]);
let scalar = 2;
let output = model.forward(input, scalar);
let expected = Data::from([[[[9, 11, 13, 15]]]]);
@ -98,7 +98,7 @@ mod tests {
let model: sub::Model<Backend> = sub::Model::default();
// Run the model
let input = Tensor::<Backend, 4>::from_floats_devauto([[[[1., 2., 3., 4.]]]]);
let input = Tensor::<Backend, 4>::from([[[[1., 2., 3., 4.]]]]);
let scalar = 3.0f64;
let output = model.forward(input, scalar);
let expected = Data::from([[[[6., 7., 8., 9.]]]]);
@ -112,7 +112,7 @@ mod tests {
let model: sub_int::Model<Backend> = sub_int::Model::default();
// Run the model
let input = Tensor::<Backend, 4, Int>::from_ints_devauto([[[[1, 2, 3, 4]]]]);
let input = Tensor::<Backend, 4, Int>::from([[[[1, 2, 3, 4]]]]);
let scalar = 3;
let output = model.forward(input, scalar);
let expected = Data::from([[[[6, 6, 6, 6]]]]);
@ -125,7 +125,7 @@ mod tests {
let model: mul::Model<Backend> = mul::Model::default();
// Run the model
let input = Tensor::<Backend, 4>::from_floats_devauto([[[[1., 2., 3., 4.]]]]);
let input = Tensor::<Backend, 4>::from([[[[1., 2., 3., 4.]]]]);
let scalar = 6.0f64;
let output = model.forward(input, scalar);
let expected = Data::from([[[[126., 252., 378., 504.]]]]);
@ -250,10 +250,9 @@ mod tests {
fn erf() {
let model: erf::Model<Backend> = erf::Model::default();
let input = Tensor::<Backend, 4>::from_data_devauto([[[[1.0, 2.0, 3.0, 4.0]]]]);
let input = Tensor::<Backend, 4>::from([[[[1.0, 2.0, 3.0, 4.0]]]]);
let output = model.forward(input);
let expected =
Tensor::<Backend, 4>::from_data_devauto([[[[0.8427, 0.9953, 1.0000, 1.0000]]]]);
let expected = Tensor::<Backend, 4>::from([[[[0.8427, 0.9953, 1.0000, 1.0000]]]]);
output.to_data().assert_approx_eq(&expected.to_data(), 4);
}
@ -264,8 +263,8 @@ mod tests {
let model: gather::Model<Backend> = gather::Model::default();
// Run the model
let input = Tensor::<Backend, 2>::from_floats_devauto([[1., 2.], [3., 4.]]);
let index = Tensor::<Backend, 2, Int>::from_ints_devauto([[0, 0], [1, 0]]);
let input = Tensor::<Backend, 2>::from([[1., 2.], [3., 4.]]);
let index = Tensor::<Backend, 2, Int>::from([[0, 0], [1, 0]]);
let output = model.forward(input, index);
let expected = Data::from([[1., 1.], [4., 3.]]);
@ -550,7 +549,7 @@ mod tests {
let model: equal::Model<Backend> = equal::Model::default();
// Run the model
let input = Tensor::<Backend, 4>::from_floats_devauto([[[[1., 1., 1., 1.]]]]);
let input = Tensor::<Backend, 4>::from([[[[1., 1., 1., 1.]]]]);
let scalar = 2f64;
let (tensor_out, scalar_out) = model.forward(input, scalar);
@ -791,7 +790,7 @@ mod tests {
fn test_model_creation_with_a_default_device() {
let model: neg::Model<Backend> = neg::Model::new_devauto();
let input1 = Tensor::<Backend, 4>::from_floats_devauto([[[[1.0, 4.0, 9.0, 25.0]]]]);
let input1 = Tensor::<Backend, 4>::from([[[[1.0, 4.0, 9.0, 25.0]]]]);
let input2 = 99f64;
let (output1, output2) = model.forward(input1, input2);

View File

@ -81,19 +81,19 @@ macro_rules! batch_norm_serialize {
BatchNormRecord {
gamma: Param::new(
ParamId::new(),
Tensor::from_data_devauto($self.gamma.clone().convert()),
Tensor::from($self.gamma.clone().convert()),
),
beta: Param::new(
ParamId::new(),
Tensor::from_data_devauto($self.beta.clone().convert()),
Tensor::from($self.beta.clone().convert()),
),
running_mean: Param::new(
ParamId::new(),
Tensor::from_data_devauto($self.running_mean.clone().convert()),
Tensor::from($self.running_mean.clone().convert()),
),
running_var: Param::new(
ParamId::new(),
Tensor::from_data_devauto($self.running_var.clone().convert()),
Tensor::from($self.running_var.clone().convert()),
),
epsilon: ConstantRecord::new(),
momentum: ConstantRecord::new(),

View File

@ -93,14 +93,12 @@ impl<PS: PrecisionSettings> NodeCodegen<PS> for Conv1dNode<PS> {
let record = Conv1dRecord::<SerializationBackend> {
weight: Param::new(
ParamId::new(),
Tensor::from_data_devauto(self.data_weights.clone().convert()),
Tensor::from(self.data_weights.clone().convert()),
),
bias: self.data_bias.as_ref().map(|bias| {
Param::new(
ParamId::new(),
Tensor::from_data_devauto(bias.clone().convert()),
)
}),
bias: self
.data_bias
.as_ref()
.map(|bias| Param::new(ParamId::new(), Tensor::from(bias.clone().convert()))),
stride: ConstantRecord::new(),
kernel_size: ConstantRecord::new(),
dilation: ConstantRecord::new(),

View File

@ -92,14 +92,12 @@ impl<PS: PrecisionSettings> NodeCodegen<PS> for Conv2dNode<PS> {
let record = Conv2dRecord::<SerializationBackend> {
weight: Param::new(
ParamId::new(),
Tensor::from_data_devauto(self.data_weights.clone().convert()),
Tensor::from(self.data_weights.clone().convert()),
),
bias: self.data_bias.as_ref().map(|bias| {
Param::new(
ParamId::new(),
Tensor::from_data_devauto(bias.clone().convert()),
)
}),
bias: self
.data_bias
.as_ref()
.map(|bias| Param::new(ParamId::new(), Tensor::from(bias.clone().convert()))),
stride: [ConstantRecord::new(); 2],
kernel_size: [ConstantRecord::new(); 2],
dilation: [ConstantRecord::new(); 2],

View File

@ -92,14 +92,12 @@ impl<PS: PrecisionSettings> NodeCodegen<PS> for ConvTranspose2dNode<PS> {
let record = ConvTranspose2dRecord::<SerializationBackend> {
weight: Param::new(
ParamId::new(),
Tensor::from_data_devauto(self.data_weights.clone().convert()),
Tensor::from(self.data_weights.clone().convert()),
),
bias: self.data_bias.as_ref().map(|bias| {
Param::new(
ParamId::new(),
Tensor::from_data_devauto(bias.clone().convert()),
)
}),
bias: self
.data_bias
.as_ref()
.map(|bias| Param::new(ParamId::new(), Tensor::from(bias.clone().convert()))),
stride: [ConstantRecord::new(); 2],
kernel_size: [ConstantRecord::new(); 2],
dilation: [ConstantRecord::new(); 2],

View File

@ -85,14 +85,12 @@ impl<PS: PrecisionSettings> NodeCodegen<PS> for LinearNode<PS> {
let record = LinearRecord::<SerializationBackend> {
weight: Param::new(
ParamId::new(),
Tensor::from_data_devauto(self.data_weights.clone().convert()),
Tensor::from(self.data_weights.clone().convert()),
),
bias: self.data_bias.as_ref().map(|bias| {
Param::new(
ParamId::new(),
Tensor::from_data_devauto(bias.clone().convert()),
)
}),
bias: self
.data_bias
.as_ref()
.map(|bias| Param::new(ParamId::new(), Tensor::from(bias.clone().convert()))),
};
let item = Record::into_item::<PS>(record);

View File

@ -231,7 +231,7 @@ impl UnaryNode {
// TODO: Implement this after tensor Int is implemented (@antimora 8/2/2023)
// TODO: If the input is scalar and the output type is a tensor,
// we should generate another code block. (@antimora 8/4/2023)
// Tensor::from_data_devauto(Data::from([#input]).convert()).unsqueeze();
// Tensor::from(Data::from([#input]).convert()).unsqueeze();
todo!()
}

View File

@ -282,7 +282,7 @@ mod tests {
#[test]
fn should_not_update_inplace_after_reshape() {
let tensor_1 = Tensor::<LibTorch<f32>, 1>::from_floats_devauto([4.0, 4.0]);
let tensor_1 = Tensor::<LibTorch<f32>, 1>::from([4.0, 4.0]);
let tensor_2 = tensor_1.clone();
let tensor_3 = tensor_2.reshape([1, 2]).add_scalar(2.0);
@ -292,7 +292,7 @@ mod tests {
#[test]
fn should_not_update_inplace_after_slice() {
let tensor_1 = Tensor::<LibTorch<f32>, 1>::from_floats_devauto([4.0, 4.0]);
let tensor_1 = Tensor::<LibTorch<f32>, 1>::from([4.0, 4.0]);
let tensor_2 = tensor_1.clone();
let tensor_3 = tensor_2.slice([0..2]).add_scalar(2.0);

View File

@ -809,7 +809,7 @@ where
/// use burn_tensor::{Tensor, T};
///
/// fn example<B: Backend>() {
/// let tensor = Tensor::<B, 2>::from_floats_devauto([[1.0, 2.0], [3.0, 4.0]]);
/// let tensor = Tensor::<B, 2>::from([[1.0, 2.0], [3.0, 4.0]]);
/// let transposed = tensor^T;
/// }
/// ```

View File

@ -136,7 +136,7 @@ where
/// use burn_tensor::Tensor;
///
/// fn example<B: Backend>() {
/// let float_tensor = Tensor::<B, 1>::from_floats_devauto([1.0, 2.0]);
/// let float_tensor = Tensor::<B, 1>::from([1.0, 2.0]);
/// let int_tensor = float_tensor.int();
/// }
/// ```

View File

@ -508,7 +508,7 @@ where
/// use burn_tensor::{Int, Tensor};
///
/// fn example<B: Backend>() {
/// let tensor = Tensor::<B, 2, Int>::from_ints_devauto([
/// let tensor = Tensor::<B, 2, Int>::from([
/// [1, 2, 3],
/// [4, 5, 6],
/// [7, 8, 9]
@ -535,7 +535,7 @@ where
/// use burn_tensor::{Int, Tensor};
///
/// fn example<B: Backend>() {
/// let tensor = Tensor::<B, 2, Int>::from_ints_devauto([
/// let tensor = Tensor::<B, 2, Int>::from([
/// [1, 2, 3],
/// [4, 5, 6],
/// [7, 8, 9]

View File

@ -45,7 +45,7 @@ mod tests {
}
fn run(&self, args: &Self::Args, inplace: bool) -> Data<f32, 2> {
let lhs = TestTensor::from_data_devauto(args.clone().convert());
let lhs = TestTensor::from(args.clone().convert());
if inplace {
$ops(lhs).into_data().convert()
@ -83,8 +83,8 @@ mod tests {
}
fn run(&self, (lhs_arg, rhs_arg): &Self::Args, inplace: bool) -> Data<f32, 2> {
let lhs = TestTensor::from_data_devauto(lhs_arg.clone().convert());
let rhs = TestTensor::from_data_devauto(rhs_arg.clone().convert());
let lhs = TestTensor::from(lhs_arg.clone().convert());
let rhs = TestTensor::from(rhs_arg.clone().convert());
if inplace {
$ops(lhs, rhs).into_data().convert()
@ -119,7 +119,7 @@ mod tests {
}
fn run(&self, args: &Self::Args, inplace: bool) -> Data<f32, 2> {
let lhs = TestTensorInt::from_data_devauto(args.clone().convert());
let lhs = TestTensorInt::from(args.clone().convert());
if inplace {
$ops(lhs).into_data().convert()
@ -157,8 +157,8 @@ mod tests {
}
fn run(&self, (lhs_arg, rhs_arg): &Self::Args, inplace: bool) -> Data<f32, 2> {
let lhs = TestTensorInt::from_data_devauto(lhs_arg.clone().convert());
let rhs = TestTensorInt::from_data_devauto(rhs_arg.clone().convert());
let lhs = TestTensorInt::from(lhs_arg.clone().convert());
let rhs = TestTensorInt::from(rhs_arg.clone().convert());
if inplace {
$ops(lhs, rhs).into_data().convert()
@ -355,7 +355,7 @@ mod tests {
clone_invariance_test!(
unary: Select,
ops_float: |tensor: TestTensor<2>| {
let indices = TestTensorInt::from_ints_devauto([1, 2, 0, 5]);
let indices = TestTensorInt::from([1, 2, 0, 5]);
tensor.select(0, indices)
}
);
@ -483,7 +483,7 @@ mod tests {
clone_invariance_test!(
binary: SelectAssign,
ops_float: |tensor: TestTensor<2>, values: TestTensor<2>| {
let indices = TestTensorInt::from_ints_devauto([1, 2, 0, 5]);
let indices = TestTensorInt::from([1, 2, 0, 5]);
let values = values.select(0, indices.clone());
tensor.select_assign(0, indices, values)
}
@ -635,7 +635,7 @@ mod tests {
clone_invariance_test!(
unary: Select,
ops_int: |tensor: TestTensorInt<2>| {
let indices = TestTensorInt::from_ints_devauto([1, 2, 0, 5]);
let indices = TestTensorInt::from([1, 2, 0, 5]);
tensor.select(0, indices)
}
);
@ -717,7 +717,7 @@ mod tests {
clone_invariance_test!(
binary: SelectAssign,
ops_int: |tensor: TestTensorInt<2>, values: TestTensorInt<2>| {
let indices = TestTensorInt::from_ints_devauto([1, 2, 0, 5]);
let indices = TestTensorInt::from([1, 2, 0, 5]);
let values = values.select(0, indices.clone());
tensor.select_assign(0, indices, values)
}

View File

@ -56,7 +56,7 @@ mod tests {
impl AdaptiveAvgPool1dTestCase {
fn assert_output(self, y: TestTensor<3>) {
let shape_x = Shape::new([self.batch_size, self.channels, self.length]);
let x = TestTensor::from_data_devauto(
let x = TestTensor::from(
TestTensorInt::arange_devauto(0..shape_x.num_elements())
.reshape(shape_x)
.into_data()

View File

@ -79,7 +79,7 @@ mod tests {
length: 4,
};
test.assert_output(TestTensor::from_floats_devauto([
test.assert_output(TestTensor::from([
[[171., 294.], [415., 781.], [659., 1268.], [903., 1755.]],
[[495., 726.], [1387., 2185.], [2279., 3644.], [3171., 5103.]],
]));
@ -105,18 +105,18 @@ mod tests {
self.channels_in / self.groups,
self.kernel_size,
]);
let weight = TestTensor::from_data_devauto(
let weight = TestTensor::from(
TestTensorInt::arange_devauto(0..shape_weight.num_elements())
.reshape(shape_weight)
.into_data()
.convert(),
);
let bias = TestTensor::from_data_devauto(
let bias = TestTensor::from(
TestTensorInt::arange_devauto(0..self.channels_out)
.into_data()
.convert(),
);
let x = TestTensor::from_data_devauto(
let x = TestTensor::from(
TestTensorInt::arange_devauto(0..shape_x.num_elements())
.reshape(shape_x)
.into_data()

View File

@ -83,7 +83,7 @@ mod tests {
length: 4,
};
test.assert_output(TestTensor::from_floats_devauto([[
test.assert_output(TestTensor::from([[
[0., 1., 4., 7.],
[32., 59., 71., 59.],
]]));
@ -110,18 +110,18 @@ mod tests {
self.channels_out / self.groups,
self.kernel_size,
]);
let weights = TestTensor::from_data_devauto(
let weights = TestTensor::from(
TestTensorInt::arange_devauto(0..shape_weights.num_elements())
.reshape(shape_weights)
.into_data()
.convert(),
);
let bias = TestTensor::from_data_devauto(
let bias = TestTensor::from(
TestTensorInt::arange_devauto(0..self.channels_out)
.into_data()
.convert(),
);
let x = TestTensor::from_data_devauto(
let x = TestTensor::from(
TestTensorInt::arange_devauto(0..shape_x.num_elements())
.reshape(shape_x)
.into_data()

View File

@ -5,8 +5,8 @@ mod tests {
use burn_tensor::{Bool, Data, Int, Tensor};
#[test]
fn should_support_cat_ops_2d_dim0() {
let tensor_1 = TestTensor::from_data_devauto([[1.0, 2.0, 3.0]]);
let tensor_2 = TestTensor::from_data_devauto([[4.0, 5.0, 6.0]]);
let tensor_1 = TestTensor::from([[1.0, 2.0, 3.0]]);
let tensor_2 = TestTensor::from([[4.0, 5.0, 6.0]]);
let data_actual = TestTensor::cat(vec![tensor_1, tensor_2], 0).into_data();
@ -16,8 +16,8 @@ mod tests {
#[test]
fn should_support_cat_ops_int() {
let tensor_1 = Tensor::<TestBackend, 2, Int>::from_data_devauto([[1, 2, 3]]);
let tensor_2 = Tensor::<TestBackend, 2, Int>::from_data_devauto([[4, 5, 6]]);
let tensor_1 = Tensor::<TestBackend, 2, Int>::from([[1, 2, 3]]);
let tensor_2 = Tensor::<TestBackend, 2, Int>::from([[4, 5, 6]]);
let data_actual = Tensor::cat(vec![tensor_1, tensor_2], 0).into_data();
@ -27,8 +27,8 @@ mod tests {
#[test]
fn should_support_cat_ops_bool() {
let tensor_1 = Tensor::<TestBackend, 2, Bool>::from_data_devauto([[false, true, true]]);
let tensor_2 = Tensor::<TestBackend, 2, Bool>::from_data_devauto([[true, true, false]]);
let tensor_1 = Tensor::<TestBackend, 2, Bool>::from([[false, true, true]]);
let tensor_2 = Tensor::<TestBackend, 2, Bool>::from([[true, true, false]]);
let data_actual = Tensor::cat(vec![tensor_1, tensor_2], 0).into_data();
@ -38,8 +38,8 @@ mod tests {
#[test]
fn should_support_cat_ops_2d_dim1() {
let tensor_1 = TestTensor::from_data_devauto([[1.0, 2.0, 3.0]]);
let tensor_2 = TestTensor::from_data_devauto([[4.0, 5.0, 6.0]]);
let tensor_1 = TestTensor::from([[1.0, 2.0, 3.0]]);
let tensor_2 = TestTensor::from([[4.0, 5.0, 6.0]]);
let data_actual = TestTensor::cat(vec![tensor_1, tensor_2], 1).into_data();
@ -49,8 +49,8 @@ mod tests {
#[test]
fn should_support_cat_ops_3d() {
let tensor_1 = TestTensor::from_data_devauto([[[1.0, 2.0, 3.0]], [[1.1, 2.1, 3.1]]]);
let tensor_2 = TestTensor::from_data_devauto([[[4.0, 5.0, 6.0]]]);
let tensor_1 = TestTensor::from([[[1.0, 2.0, 3.0]], [[1.1, 2.1, 3.1]]]);
let tensor_2 = TestTensor::from([[[4.0, 5.0, 6.0]]]);
let data_actual = TestTensor::cat(vec![tensor_1, tensor_2], 0).into_data();
@ -61,8 +61,8 @@ mod tests {
#[test]
#[should_panic]
fn should_panic_when_dimensions_are_not_the_same() {
let tensor_1 = TestTensor::from_data_devauto([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]);
let tensor_2 = TestTensor::from_data_devauto([[4.0, 5.0]]);
let tensor_1 = TestTensor::from([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]);
let tensor_2 = TestTensor::from([[4.0, 5.0]]);
TestTensor::cat(vec![tensor_1, tensor_2], 0).into_data();
}
@ -77,8 +77,8 @@ mod tests {
#[test]
#[should_panic]
fn should_panic_when_cat_exceeds_dimension() {
let tensor_1 = TestTensor::from_data_devauto([[[1.0, 2.0, 3.0]], [[1.1, 2.1, 3.1]]]);
let tensor_2 = TestTensor::from_data_devauto([[[4.0, 5.0, 6.0]]]);
let tensor_1 = TestTensor::from([[[1.0, 2.0, 3.0]], [[1.1, 2.1, 3.1]]]);
let tensor_2 = TestTensor::from([[[4.0, 5.0, 6.0]]]);
TestTensor::cat(vec![tensor_1, tensor_2], 3).into_data();
}

View File

@ -75,7 +75,7 @@ mod tests {
#[test]
fn test_chunk_multi_dimension() {
let tensors: Vec<Tensor<TestBackend, 2, Int>> =
Tensor::from_data_devauto(Data::from([[0, 1, 2, 3]])).chunk(2, 1);
Tensor::from(Data::from([[0, 1, 2, 3]])).chunk(2, 1);
assert_eq!(tensors.len(), 2);
let expected = vec![Data::from([[0, 1]]), Data::from([[2, 3]])];

View File

@ -6,8 +6,7 @@ mod tests {
#[test]
fn clamp_min() {
// test float tensor
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = tensor.clamp_min(2.0).into_data();
@ -15,8 +14,7 @@ mod tests {
assert_eq!(data_expected, data_actual);
// test int tensor
let data = Data::from([[0, 1, 2], [3, 4, 5]]);
let tensor = Tensor::<TestBackend, 2, Int>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2, Int>::from([[0, 1, 2], [3, 4, 5]]);
let data_actual = tensor.clamp_min(2).into_data();
let data_expected = Data::from([[2, 2, 2], [3, 4, 5]]);
assert_eq!(data_expected, data_actual);
@ -25,8 +23,7 @@ mod tests {
#[test]
fn clamp_max() {
// test float tensor
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = tensor.clamp_max(2.0).into_data();
@ -34,8 +31,7 @@ mod tests {
assert_eq!(data_expected, data_actual);
// test int tensor
let data = Data::from([[0, 1, 2], [3, 4, 5]]);
let tensor = Tensor::<TestBackend, 2, Int>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2, Int>::from([[0, 1, 2], [3, 4, 5]]);
let data_actual = tensor.clamp_max(4).into_data();
let data_expected = Data::from([[0, 1, 2], [3, 4, 4]]);
assert_eq!(data_expected, data_actual);
@ -44,15 +40,13 @@ mod tests {
#[test]
fn clamp_min_max() {
// test float tensor
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = tensor.clamp(1.0, 4.0).into_data();
let data_expected = Data::from([[1.0, 1.0, 2.0], [3.0, 4.0, 4.0]]);
assert_eq!(data_expected, data_actual);
// test int tensor
let data = Data::from([[0, 1, 2], [3, 4, 5]]);
let tensor = Tensor::<TestBackend, 2, Int>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2, Int>::from([[0, 1, 2], [3, 4, 5]]);
let data_actual = tensor.clamp(1, 4).into_data();
let data_expected = Data::from([[1, 1, 2], [3, 4, 4]]);
assert_eq!(data_expected, data_actual);

View File

@ -5,8 +5,7 @@ mod tests {
#[test]
fn should_support_cos_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = tensor.cos().into_data();

View File

@ -5,7 +5,7 @@ mod tests {
#[test]
fn should_support_zeros_like() {
let tensor = TestTensor::from_floats_devauto([
let tensor = TestTensor::from([
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]],
]);
@ -20,7 +20,7 @@ mod tests {
#[test]
fn should_support_ones_like() {
let tensor = TestTensor::from_floats_devauto([
let tensor = TestTensor::from([
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]],
]);
@ -35,7 +35,7 @@ mod tests {
#[test]
fn should_support_randoms_like() {
let tensor = TestTensor::from_floats_devauto([
let tensor = TestTensor::from([
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]],
]);

View File

@ -5,10 +5,8 @@ mod tests {
#[test]
fn should_support_div_ops() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[1.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_2 = Tensor::<TestBackend, 2>::from([[1.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let output = tensor_1 / tensor_2;
@ -19,10 +17,8 @@ mod tests {
#[test]
fn test_div_broadcast() {
let data_1 = Data::from([[0.0, 1.0, 2.0]]);
let data_2 = Data::from([[1.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0]]);
let tensor_2 = Tensor::<TestBackend, 2>::from([[1.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = (tensor_1 / tensor_2).into_data();
@ -32,9 +28,8 @@ mod tests {
#[test]
fn should_support_div_scalar_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let scalar = 2.0;
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let output = tensor / scalar;
@ -45,10 +40,8 @@ mod tests {
#[test]
fn should_support_div_ops_int() {
let data_1 = Data::from([[0, 1, 2], [3, 4, 5]]);
let data_2 = Data::from([[1, 1, 2], [1, 1, 2]]);
let tensor_1 = Tensor::<TestBackend, 2, Int>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2, Int>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2, Int>::from([[0, 1, 2], [3, 4, 5]]);
let tensor_2 = Tensor::<TestBackend, 2, Int>::from([[1, 1, 2], [1, 1, 2]]);
let output = tensor_1 / tensor_2;
@ -59,10 +52,8 @@ mod tests {
#[test]
fn test_div_broadcast_int() {
let data_1 = Data::from([[0, 1, 2]]);
let data_2 = Data::from([[1, 1, 2], [3, 4, 5]]);
let tensor_1 = Tensor::<TestBackend, 2, Int>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2, Int>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2, Int>::from([[0, 1, 2]]);
let tensor_2 = Tensor::<TestBackend, 2, Int>::from([[1, 1, 2], [3, 4, 5]]);
let data_actual = (tensor_1 / tensor_2).into_data();
@ -72,9 +63,8 @@ mod tests {
#[test]
fn should_support_div_scalar_ops_int() {
let data = Data::from([[0, 1, 2], [3, 4, 5]]);
let scalar = 2;
let tensor = Tensor::<TestBackend, 2, Int>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2, Int>::from([[0, 1, 2], [3, 4, 5]]);
let output = tensor / scalar;

View File

@ -5,8 +5,7 @@ mod tests {
#[test]
fn should_support_erf_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = tensor.erf().into_data();
@ -16,8 +15,7 @@ mod tests {
#[test]
fn should_support_erf_ops_with_negative_number() {
let data = Data::from([[-0.056, -0.043, -0.089], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[-0.056, -0.043, -0.089], [3.0, 4.0, 5.0]]);
let data_actual = tensor.erf().into_data();

View File

@ -5,8 +5,7 @@ mod tests {
#[test]
fn should_support_exp_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = tensor.exp().into_data();

View File

@ -5,8 +5,8 @@ mod tests {
#[test]
fn should_gather_1d_dim0() {
let tensor = TestTensor::from_floats_devauto([0.0, 1.0, 2.0]);
let indices = TestTensorInt::from_ints_devauto([1, 1, 0, 1, 2]);
let tensor = TestTensor::from([0.0, 1.0, 2.0]);
let indices = TestTensorInt::from([1, 1, 0, 1, 2]);
let output = tensor.gather(0, indices);
@ -15,8 +15,8 @@ mod tests {
#[test]
fn should_gather_1d_dim0_int() {
let tensor = TestTensorInt::from_ints_devauto([5, 6, 7]);
let indices = TestTensorInt::from_ints_devauto([1, 1, 0, 1, 2]);
let tensor = TestTensorInt::from([5, 6, 7]);
let indices = TestTensorInt::from([1, 1, 0, 1, 2]);
let output = tensor.gather(0, indices);
@ -25,8 +25,8 @@ mod tests {
#[test]
fn should_gather_2d_dim0() {
let tensor = TestTensor::from_floats_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indices = TestTensorInt::from_ints_devauto([[0, 1, 0], [1, 0, 1]]);
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indices = TestTensorInt::from([[0, 1, 0], [1, 0, 1]]);
let output = tensor.gather(0, indices);
@ -38,8 +38,8 @@ mod tests {
#[test]
fn should_gather_2d_dim1() {
let tensor = TestTensor::from_floats_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indices = TestTensorInt::from_ints_devauto([[2, 1, 0, 0], [2, 0, 1, 2]]);
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indices = TestTensorInt::from([[2, 1, 0, 0], [2, 0, 1, 2]]);
let output = tensor.gather(1, indices);
@ -51,12 +51,11 @@ mod tests {
#[test]
fn should_gather_3d_dim1() {
let tensor = TestTensor::from_floats_devauto([
let tensor = TestTensor::from([
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]],
]);
let indices =
TestTensorInt::from_ints_devauto([[[1, 0, 0], [0, 1, 0]], [[0, 0, 1], [0, 1, 1]]]);
let indices = TestTensorInt::from([[[1, 0, 0], [0, 1, 0]], [[0, 0, 1], [0, 1, 1]]]);
let output = tensor.gather(1, indices);
@ -71,8 +70,8 @@ mod tests {
#[test]
fn should_gather_2d_only_1dim() {
let tensor = TestTensor::from_floats_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indices = TestTensorInt::from_ints_devauto([[1, 2]]).reshape([2, 1]);
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indices = TestTensorInt::from([[1, 2]]).reshape([2, 1]);
let output = tensor.gather(1, indices);
@ -81,9 +80,9 @@ mod tests {
#[test]
fn should_scatter_1d() {
let tensor = TestTensor::from_floats_devauto([0.0, 0.0, 0.0]);
let values = TestTensor::from_floats_devauto([5.0, 4.0, 3.0]);
let indices = TestTensorInt::from_ints_devauto([1, 0, 2]);
let tensor = TestTensor::from([0.0, 0.0, 0.0]);
let values = TestTensor::from([5.0, 4.0, 3.0]);
let indices = TestTensorInt::from([1, 0, 2]);
let output = tensor.scatter(0, indices, values);
@ -92,9 +91,9 @@ mod tests {
#[test]
fn should_scatter_1d_int() {
let tensor = TestTensorInt::from_ints_devauto([0, 0, 0]);
let values = TestTensorInt::from_ints_devauto([5, 4, 3]);
let indices = TestTensorInt::from_ints_devauto([1, 0, 2]);
let tensor = TestTensorInt::from([0, 0, 0]);
let values = TestTensorInt::from([5, 4, 3]);
let indices = TestTensorInt::from([1, 0, 2]);
let output = tensor.scatter(0, indices, values);
@ -103,9 +102,9 @@ mod tests {
#[test]
fn should_scatter_2d_dim0() {
let tensor = TestTensor::from_floats_devauto([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]);
let values = TestTensor::from_floats_devauto([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]);
let indices = TestTensorInt::from_ints_devauto([[1, 0, 1], [1, 1, 0]]);
let tensor = TestTensor::from([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]);
let values = TestTensor::from([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]);
let indices = TestTensorInt::from([[1, 0, 1], [1, 1, 0]]);
let output = tensor.scatter(0, indices, values);
@ -117,9 +116,9 @@ mod tests {
#[test]
fn should_scatter_2d_dim1() {
let tensor = TestTensor::from_floats_devauto([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]);
let values = TestTensor::from_floats_devauto([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]);
let indices = TestTensorInt::from_ints_devauto([[1, 0, 2], [1, 2, 0]]);
let tensor = TestTensor::from([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]);
let values = TestTensor::from([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]);
let indices = TestTensorInt::from([[1, 0, 2], [1, 2, 0]]);
let output = tensor.scatter(1, indices, values);
@ -131,16 +130,15 @@ mod tests {
#[test]
fn should_scatter_3d_dim1() {
let tensor = TestTensor::from_floats_devauto([
let tensor = TestTensor::from([
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]],
]);
let values = TestTensor::from_floats_devauto([
let values = TestTensor::from([
[[12.0, 13.0, 14.0], [15.0, 16.0, 17.0]],
[[18.0, 19.0, 20.0], [21.0, 22.0, 23.0]],
]);
let indices =
TestTensorInt::from_ints_devauto([[[1, 0, 0], [0, 1, 0]], [[0, 0, 1], [0, 1, 1]]]);
let indices = TestTensorInt::from([[[1, 0, 0], [0, 1, 0]], [[0, 0, 1], [0, 1, 1]]]);
let output = tensor.scatter(1, indices, values);
@ -155,9 +153,9 @@ mod tests {
#[test]
fn should_scatter_2d_dim1_diff_shape() {
let tensor = TestTensor::from_floats_devauto([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]);
let values = TestTensor::from_floats_devauto([[1.0], [4.0]]);
let indices = TestTensorInt::from_ints_devauto([[1], [2]]);
let tensor = TestTensor::from([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]);
let values = TestTensor::from([[1.0], [4.0]]);
let indices = TestTensorInt::from([[1], [2]]);
let output = tensor.scatter(1, indices, values);
@ -170,9 +168,9 @@ mod tests {
#[test]
#[should_panic]
fn scatter_should_panic_on_mismatch_of_shapes() {
let tensor = TestTensor::from_floats_devauto([0.0, 0.0, 0.0]);
let values = TestTensor::from_floats_devauto([5.0, 4.0]);
let indices = TestTensorInt::from_ints_devauto([1, 0, 2]);
let tensor = TestTensor::from([0.0, 0.0, 0.0]);
let values = TestTensor::from([5.0, 4.0]);
let indices = TestTensorInt::from([1, 0, 2]);
tensor.scatter(0, indices, values);
}

View File

@ -5,10 +5,9 @@ mod test {
#[test]
fn test_1d_iter_last_item() {
let data = [1, 2, 3, 4];
let tensor = Tensor::<TestBackend, 1, Int>::from_ints_devauto(data);
let tensor = Tensor::<TestBackend, 1, Int>::from([1, 2, 3, 4]);
assert_eq!(
Tensor::<TestBackend, 1, Int>::from_ints_devauto([4]).into_data(),
Tensor::<TestBackend, 1, Int>::from([4]).into_data(),
tensor.iter_dim(0).last().unwrap().into_data()
)
}
@ -21,24 +20,24 @@ mod test {
#[test]
fn test_transposed() {
let data = [
let tensor = Tensor::<TestBackend, 2>::from([
[1., 2., 3., 1., 2.],
[4., 5., 6., 1., 2.],
[7., 8., 9., 1., 2.],
];
let tensor = Tensor::<TestBackend, 2>::from_floats_devauto(data);
]);
let lhs = tensor.clone().slice([1..2, 0..5]);
let rhs = tensor.transpose().iter_dim(1).nth(1).unwrap();
assert_eq!(lhs.into_data().value, rhs.into_data().value);
}
fn test_iteration_over_low_dim() {
let data = [[
[1., 2., 3., 1., 2.],
[4., 5., 6., 1., 2.],
[7., 8., 9., 1., 2.],
]; 5];
let tensor = Tensor::<TestBackend, 3>::from_floats_devauto(data);
let tensor = Tensor::<TestBackend, 3>::from(
[[
[1., 2., 3., 1., 2.],
[4., 5., 6., 1., 2.],
[7., 8., 9., 1., 2.],
]; 5],
);
let lhs = tensor.iter_dim(2).nth(1).unwrap();
let rhs = Data::from([2., 5., 8.]);
assert_eq!(lhs.into_data().value, rhs.value);

View File

@ -5,8 +5,7 @@ mod tests {
#[test]
fn should_support_log_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = tensor.log().into_data();

View File

@ -5,8 +5,7 @@ mod tests {
#[test]
fn should_support_exp_log1p() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = tensor.log1p().into_data();

View File

@ -115,8 +115,8 @@ mod tests {
{
let data_1 = Data::<f32, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]).convert();
let data_2 = Data::<f32, 2>::from([[1.0, 1.0, 1.0], [4.0, 3.0, 5.0]]).convert();
let tensor_1 = Tensor::<TestBackend, 2, K>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2, K>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2, K>::from(data_1);
let tensor_2 = Tensor::<TestBackend, 2, K>::from(data_2);
let data_actual_cloned = tensor_1.clone().equal(tensor_2.clone());
let data_actual_inplace = tensor_1.equal(tensor_2);
@ -132,7 +132,7 @@ mod tests {
E: Element,
{
let data_1 = Data::<f32, 2>::from([[0.0, 1.0, 2.0], [3.0, 2.0, 5.0]]).convert();
let tensor_1 = Tensor::<TestBackend, 2, K>::from_data_devauto(data_1);
let tensor_1 = Tensor::<TestBackend, 2, K>::from(data_1);
let data_actual_cloned = tensor_1.clone().equal_elem(2);
let data_actual_inplace = tensor_1.equal_elem(2);
@ -148,7 +148,7 @@ mod tests {
E: Element,
{
let data_1 = Data::<f32, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]).convert();
let tensor_1 = Tensor::<TestBackend, 2, K>::from_data_devauto(data_1);
let tensor_1 = Tensor::<TestBackend, 2, K>::from(data_1);
let data_actual_cloned = tensor_1.clone().greater_elem(4);
let data_actual_inplace = tensor_1.greater_elem(4);
@ -164,7 +164,7 @@ mod tests {
E: Element,
{
let data_1 = Data::<f32, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]).convert();
let tensor_1 = Tensor::<TestBackend, 2, K>::from_data_devauto(data_1);
let tensor_1 = Tensor::<TestBackend, 2, K>::from(data_1);
let data_actual_cloned = tensor_1.clone().greater_equal_elem(4.0);
let data_actual_inplace = tensor_1.greater_equal_elem(4.0);
@ -181,8 +181,8 @@ mod tests {
{
let data_1 = Data::<f32, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]).convert();
let data_2 = Data::<f32, 2>::from([[1.0, 1.0, 1.0], [4.0, 3.0, 50.0]]).convert();
let tensor_1 = Tensor::<TestBackend, 2, K>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2, K>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2, K>::from(data_1);
let tensor_2 = Tensor::<TestBackend, 2, K>::from(data_2);
let data_actual_cloned = tensor_1.clone().greater(tensor_2.clone());
let data_actual_inplace = tensor_1.greater(tensor_2);
@ -199,8 +199,8 @@ mod tests {
{
let data_1 = Data::<f32, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]).convert();
let data_2 = Data::<f32, 2>::from([[1.0, 1.0, 1.0], [4.0, 3.0, 50.0]]).convert();
let tensor_1 = Tensor::<TestBackend, 2, K>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2, K>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2, K>::from(data_1);
let tensor_2 = Tensor::<TestBackend, 2, K>::from(data_2);
let data_actual_cloned = tensor_1.clone().greater_equal(tensor_2.clone());
let data_actual_inplace = tensor_1.greater_equal(tensor_2);
@ -216,7 +216,7 @@ mod tests {
E: Element,
{
let data_1 = Data::<f32, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]).convert();
let tensor_1 = Tensor::<TestBackend, 2, K>::from_data_devauto(data_1);
let tensor_1 = Tensor::<TestBackend, 2, K>::from(data_1);
let data_actual_cloned = tensor_1.clone().lower_elem(4.0);
let data_actual_inplace = tensor_1.lower_elem(4.0);
@ -232,7 +232,7 @@ mod tests {
E: Element,
{
let data_1 = Data::<f32, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]).convert();
let tensor_1 = Tensor::<TestBackend, 2, K>::from_data_devauto(data_1);
let tensor_1 = Tensor::<TestBackend, 2, K>::from(data_1);
let data_actual_cloned = tensor_1.clone().lower_equal_elem(4.0);
let data_actual_inplace = tensor_1.lower_equal_elem(4.0);
@ -249,8 +249,8 @@ mod tests {
{
let data_1 = Data::<f32, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]).convert();
let data_2 = Data::<f32, 2>::from([[1.0, 1.0, 1.0], [4.0, 3.0, 50.0]]).convert();
let tensor_1 = Tensor::<TestBackend, 2, K>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2, K>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2, K>::from(data_1);
let tensor_2 = Tensor::<TestBackend, 2, K>::from(data_2);
let data_actual_cloned = tensor_1.clone().lower(tensor_2.clone());
let data_actual_inplace = tensor_1.lower(tensor_2);
@ -267,8 +267,8 @@ mod tests {
{
let data_1 = Data::<f32, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]).convert();
let data_2 = Data::<f32, 2>::from([[1.0, 1.0, 1.0], [4.0, 3.0, 50.0]]).convert();
let tensor_1 = Tensor::<TestBackend, 2, K>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2, K>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2, K>::from(data_1);
let tensor_2 = Tensor::<TestBackend, 2, K>::from(data_2);
let data_actual_cloned = tensor_1.clone().lower_equal(tensor_2.clone());
let data_actual_inplace = tensor_1.lower_equal(tensor_2);
@ -280,10 +280,10 @@ mod tests {
#[test]
fn should_support_bool_equal() {
let data_1 = Data::from([[false, true, true], [true, false, true]]);
let data_2 = Data::from([[false, false, true], [false, true, true]]);
let tensor_1 = Tensor::<TestBackend, 2, Bool>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2, Bool>::from_data_devauto(data_2);
let tensor_1 =
Tensor::<TestBackend, 2, Bool>::from([[false, true, true], [true, false, true]]);
let tensor_2 =
Tensor::<TestBackend, 2, Bool>::from([[false, false, true], [false, true, true]]);
let data_actual_cloned = tensor_1.clone().equal(tensor_2.clone());
let data_actual_inplace = tensor_1.equal(tensor_2);
@ -296,7 +296,7 @@ mod tests {
#[test]
fn should_support_bool_not() {
let data_1 = Data::from([[false, true, true], [true, true, false]]);
let tensor_1 = Tensor::<TestBackend, 2, Bool>::from_data_devauto(data_1);
let tensor_1 = Tensor::<TestBackend, 2, Bool>::from(data_1);
let data_actual_cloned = tensor_1.clone().bool_not();
let data_actual_inplace = tensor_1.bool_not();

View File

@ -5,13 +5,9 @@ mod tests {
#[test]
fn should_support_mask_where_ops() {
let tensor = TestTensor::from_data_devauto([[1.0, 7.0], [2.0, 3.0]]);
let mask = Tensor::<TestBackend, 2, Bool>::from_bool_devauto(Data::from([
[true, false],
[false, true],
]));
let value =
Tensor::<TestBackend, 2>::from_data_devauto(Data::from([[1.8, 2.8], [3.8, 4.8]]));
let tensor = TestTensor::from([[1.0, 7.0], [2.0, 3.0]]);
let mask = Tensor::<TestBackend, 2, Bool>::from([[true, false], [false, true]]);
let value = Tensor::<TestBackend, 2>::from([[1.8, 2.8], [3.8, 4.8]]);
let data_actual = tensor.mask_where(mask, value).into_data();
@ -21,11 +17,8 @@ mod tests {
#[test]
fn should_support_mask_fill_ops() {
let tensor = TestTensor::from_data_devauto([[1.0, 7.0], [2.0, 3.0]]);
let mask = Tensor::<TestBackend, 2, Bool>::from_bool_devauto(Data::from([
[true, false],
[false, true],
]));
let tensor = TestTensor::from([[1.0, 7.0], [2.0, 3.0]]);
let mask = Tensor::<TestBackend, 2, Bool>::from([[true, false], [false, true]]);
let data_actual = tensor.mask_fill(mask, 2.0).to_data();
@ -35,13 +28,9 @@ mod tests {
#[test]
fn should_support_int_mask_where_ops() {
let tensor = Tensor::<TestBackend, 2, Int>::from_data_devauto([[1, 7], [2, 3]]);
let mask = Tensor::<TestBackend, 2, Bool>::from_bool_devauto(Data::from([
[true, false],
[false, true],
]));
let value =
Tensor::<TestBackend, 2, Int>::from_data_devauto(Data::from([[8, 9], [10, 11]]));
let tensor = Tensor::<TestBackend, 2, Int>::from([[1, 7], [2, 3]]);
let mask = Tensor::<TestBackend, 2, Bool>::from([[true, false], [false, true]]);
let value = Tensor::<TestBackend, 2, Int>::from([[8, 9], [10, 11]]);
let data_actual = tensor.mask_where(mask, value).into_data();
@ -51,11 +40,8 @@ mod tests {
#[test]
fn should_support_int_mask_fill_ops() {
let tensor = Tensor::<TestBackend, 2, Int>::from_data_devauto([[1, 7], [2, 3]]);
let mask = Tensor::<TestBackend, 2, Bool>::from_bool_devauto(Data::from([
[true, false],
[false, true],
]));
let tensor = Tensor::<TestBackend, 2, Int>::from([[1, 7], [2, 3]]);
let mask = Tensor::<TestBackend, 2, Bool>::from([[true, false], [false, true]]);
let data_actual = tensor.mask_fill(mask, 9).to_data();

View File

@ -5,8 +5,8 @@ mod tests {
#[test]
fn test_matmul_d2() {
let tensor_1 = TestTensor::from_floats_devauto([[1.0, 7.0], [2.0, 3.0], [1.0, 5.0]]);
let tensor_2 = TestTensor::from_floats_devauto([[4.0, 7.0, 5.0], [2.0, 3.0, 5.0]]);
let tensor_1 = TestTensor::from([[1.0, 7.0], [2.0, 3.0], [1.0, 5.0]]);
let tensor_2 = TestTensor::from([[4.0, 7.0, 5.0], [2.0, 3.0, 5.0]]);
let tensor_3 = tensor_1.matmul(tensor_2);
@ -18,8 +18,8 @@ mod tests {
#[test]
fn test_matmul_d3() {
let tensor_1 = TestTensor::from_floats_devauto([[[1.0, 7.0], [2.0, 3.0]]]);
let tensor_2 = TestTensor::from_floats_devauto([[[4.0, 7.0], [2.0, 3.0]]]);
let tensor_1 = TestTensor::from([[[1.0, 7.0], [2.0, 3.0]]]);
let tensor_2 = TestTensor::from([[[4.0, 7.0], [2.0, 3.0]]]);
let tensor_3 = tensor_1.matmul(tensor_2);
@ -31,9 +31,8 @@ mod tests {
#[test]
fn test_matmul_broadcast_1() {
let tensor_1 = TestTensor::from_floats_devauto([[[1.0, 7.0], [2.0, 3.0]]]);
let tensor_2 =
TestTensor::from_floats_devauto([[[4.0, 7.0], [2.0, 3.0]], [[2.0, 5.0], [6.0, 3.0]]]);
let tensor_1 = TestTensor::from([[[1.0, 7.0], [2.0, 3.0]]]);
let tensor_2 = TestTensor::from([[[4.0, 7.0], [2.0, 3.0]], [[2.0, 5.0], [6.0, 3.0]]]);
let tensor_3 = tensor_1.matmul(tensor_2);
@ -45,8 +44,8 @@ mod tests {
#[test]
fn test_matmul_simple_1() {
let tensor_1 = TestTensor::from_floats_devauto([[5.0, 14.0], [14.0, 50.0]]);
let tensor_2 = TestTensor::from_floats_devauto([[3.0, 4.0, 5.0], [0.0, 1.0, 2.0]]);
let tensor_1 = TestTensor::from([[5.0, 14.0], [14.0, 50.0]]);
let tensor_2 = TestTensor::from([[3.0, 4.0, 5.0], [0.0, 1.0, 2.0]]);
let tensor_3 = tensor_1.matmul(tensor_2);
@ -58,8 +57,8 @@ mod tests {
#[test]
fn test_matmul_simple_2() {
let tensor_1 = TestTensor::from_floats_devauto([[1.0, 2.0, 3.0, 4.0]]);
let tensor_2 = TestTensor::from_floats_devauto([[3.0], [4.0], [5.0], [6.0]]);
let tensor_1 = TestTensor::from([[1.0, 2.0, 3.0, 4.0]]);
let tensor_2 = TestTensor::from([[3.0], [4.0], [5.0], [6.0]]);
let tensor_3 = tensor_1.matmul(tensor_2);
@ -68,14 +67,8 @@ mod tests {
#[test]
fn test_matmul_simple_3() {
let tensor_1 = TestTensor::from_floats_devauto([
[3., 3., 3.],
[4., 4., 4.],
[5., 5., 5.],
[6., 6., 6.],
]);
let tensor_2 =
TestTensor::from_floats_devauto([[1., 2., 3., 4.], [1., 2., 3., 4.], [1., 2., 3., 4.]]);
let tensor_1 = TestTensor::from([[3., 3., 3.], [4., 4., 4.], [5., 5., 5.], [6., 6., 6.]]);
let tensor_2 = TestTensor::from([[1., 2., 3., 4.], [1., 2., 3., 4.], [1., 2., 3., 4.]]);
let tensor_3 = tensor_1.matmul(tensor_2);
@ -93,9 +86,8 @@ mod tests {
#[test]
#[should_panic]
fn should_panic_when_inner_dimensions_are_not_equal() {
let tensor_1 = TestTensor::from_floats_devauto([[3., 3.], [4., 4.], [5., 5.], [6., 6.]]);
let tensor_2 =
TestTensor::from_floats_devauto([[1., 2., 3., 4.], [1., 2., 3., 4.], [1., 2., 3., 4.]]);
let tensor_1 = TestTensor::from([[3., 3.], [4., 4.], [5., 5.], [6., 6.]]);
let tensor_2 = TestTensor::from([[1., 2., 3., 4.], [1., 2., 3., 4.], [1., 2., 3., 4.]]);
let tensor_3 = tensor_1.matmul(tensor_2);

View File

@ -5,7 +5,7 @@ mod tests {
#[test]
fn test_max_dim_2d() {
let tensor = TestTensor::from_floats_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let output_actual = tensor.max_dim(1);
@ -15,7 +15,7 @@ mod tests {
#[test]
fn test_max_dim_with_indices_2d_with_dim_0th() {
let tensor = TestTensor::from_floats_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let (output_actual, index_actual) = tensor.max_dim_with_indices(0);
@ -28,7 +28,7 @@ mod tests {
#[test]
fn test_max_dim_with_indices_2d() {
let tensor = TestTensor::from_floats_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let (output_actual, index_actual) = tensor.max_dim_with_indices(1);
@ -41,7 +41,7 @@ mod tests {
#[test]
fn test_min_dim_2d() {
let tensor = TestTensor::from_floats_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let output_actual = tensor.min_dim(1);
@ -51,7 +51,7 @@ mod tests {
#[test]
fn test_min_dim_with_indices_2d() {
let tensor = TestTensor::from_floats_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let (output_actual, index_actual) = tensor.min_dim_with_indices(1);
@ -64,7 +64,7 @@ mod tests {
#[test]
fn test_sum_dim_2d() {
let tensor = TestTensor::from_floats_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let output_actual = tensor.clone().sum_dim(1);
@ -79,7 +79,7 @@ mod tests {
#[test]
fn test_mean_dim_2d() {
let tensor = TestTensor::from_floats_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let output_actual = tensor.clone().mean_dim(1);
@ -94,7 +94,7 @@ mod tests {
#[test]
fn test_min_dim_2d_with_0th_dim() {
let tensor = TestTensor::from_floats_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let output_actual = tensor.min_dim(0);
let output_expected = Data::from([[0., 1., 2.]]);
@ -103,7 +103,7 @@ mod tests {
#[test]
fn test_max_dim_2d_with_0th_dim() {
let tensor = TestTensor::from_floats_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let output_actual = tensor.max_dim(0);
@ -113,7 +113,7 @@ mod tests {
#[test]
fn test_min_dim_with_indices_2d_with_0th_dim() {
let tensor = TestTensor::from_floats_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let (output_actual, index_actual) = tensor.min_dim_with_indices(0);

View File

@ -5,10 +5,8 @@ mod tests {
#[test]
fn should_support_mul_ops() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_2 = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let output = tensor_1 * tensor_2;
@ -19,10 +17,8 @@ mod tests {
#[test]
fn test_mul_broadcast() {
let data_1 = Data::from([[0.0, 1.0, 2.0]]);
let data_2 = Data::from([[3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0]]);
let tensor_2 = Tensor::<TestBackend, 2>::from([[3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]);
let data_actual = (tensor_1 * tensor_2).into_data();
@ -32,10 +28,8 @@ mod tests {
#[test]
fn test_mul_broadcast_2_dims() {
let tensor_1: Tensor<TestBackend, 2> =
Tensor::from_data_devauto([0.0, 1.0, 2.0]).reshape([3, 1]);
let tensor_2: Tensor<TestBackend, 2> =
Tensor::from_data_devauto([3.0, 4.0, 5.0]).reshape([1, 3]);
let tensor_1: Tensor<TestBackend, 2> = Tensor::from([0.0, 1.0, 2.0]).reshape([3, 1]);
let tensor_2: Tensor<TestBackend, 2> = Tensor::from([3.0, 4.0, 5.0]).reshape([1, 3]);
let data_actual = (tensor_1 * tensor_2).into_data();
@ -45,9 +39,8 @@ mod tests {
#[test]
fn should_support_mul_scalar_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let scalar = 2.0;
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let output = tensor * scalar;
@ -58,10 +51,8 @@ mod tests {
#[test]
fn should_support_mul_ops_int() {
let data_1 = Data::from([[0, 1, 2], [3, 4, 5]]);
let data_2 = Data::from([[0, 1, 2], [3, 4, 5]]);
let tensor_1 = Tensor::<TestBackend, 2, Int>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2, Int>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2, Int>::from([[0, 1, 2], [3, 4, 5]]);
let tensor_2 = Tensor::<TestBackend, 2, Int>::from([[0, 1, 2], [3, 4, 5]]);
let output = tensor_1 * tensor_2;
@ -72,10 +63,8 @@ mod tests {
#[test]
fn test_mul_broadcast_int() {
let data_1 = Data::from([[0, 1, 2]]);
let data_2 = Data::from([[3, 4, 5], [6, 7, 8]]);
let tensor_1 = Tensor::<TestBackend, 2, Int>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2, Int>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2, Int>::from([[0, 1, 2]]);
let tensor_2 = Tensor::<TestBackend, 2, Int>::from([[3, 4, 5], [6, 7, 8]]);
let data_actual = (tensor_1 * tensor_2).into_data();
@ -85,9 +74,8 @@ mod tests {
#[test]
fn should_support_mul_scalar_ops_int() {
let data = Data::from([[0, 1, 2], [3, 4, 5]]);
let scalar = 2;
let tensor = Tensor::<TestBackend, 2, Int>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2, Int>::from([[0, 1, 2], [3, 4, 5]]);
let output = tensor * scalar;

View File

@ -6,7 +6,7 @@ mod tests {
#[test]
fn test_narrow() {
let tensor: Tensor<TestBackend, 2> =
Tensor::from_data_devauto(Data::from([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]));
Tensor::from([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]);
let output = tensor.clone().narrow(0, 0, 2);
assert_eq!(output.shape(), Shape::from([2, 3]));
@ -25,7 +25,7 @@ mod tests {
#[should_panic]
fn test_narrow_invalid_dim() {
let tensor: Tensor<TestBackend, 2> =
Tensor::from_data_devauto(Data::from([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]));
Tensor::from([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]);
let output = tensor.narrow(2, 0, 2);
}
@ -34,7 +34,7 @@ mod tests {
#[should_panic]
fn test_narrow_invalid_start() {
let tensor: Tensor<TestBackend, 2> =
Tensor::from_data_devauto(Data::from([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]));
Tensor::from([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]);
let output = tensor.narrow(0, 3, 2);
}
@ -43,7 +43,7 @@ mod tests {
#[should_panic]
fn test_narrow_invalid_zero_length() {
let tensor: Tensor<TestBackend, 2> =
Tensor::from_data_devauto(Data::from([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]));
Tensor::from([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]);
let output = tensor.narrow(0, 1, 0);
}
@ -52,7 +52,7 @@ mod tests {
#[should_panic]
fn test_narrow_invalid_length() {
let tensor: Tensor<TestBackend, 2> =
Tensor::from_data_devauto(Data::from([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]));
Tensor::from([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]);
let output = tensor.narrow(0, 0, 4);
}

View File

@ -5,8 +5,7 @@ mod tests {
#[test]
fn should_support_neg_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = tensor.neg().into_data();

View File

@ -5,8 +5,7 @@ mod tests {
#[test]
fn should_support_powf_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = tensor.powf(0.71).into_data();
@ -16,8 +15,7 @@ mod tests {
#[test]
fn should_support_neg_power() {
let data = Data::from([[1.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[1.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = tensor.powf(-0.33).into_data();
@ -27,8 +25,7 @@ mod tests {
#[test]
fn should_support_neg_values_with_even_power() {
let data = Data::from([[0.0, -1.0, -2.0], [-3.0, -4.0, -5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, -1.0, -2.0], [-3.0, -4.0, -5.0]]);
let data_actual = tensor.powf(4.0).into_data();
@ -38,8 +35,7 @@ mod tests {
#[test]
fn should_support_neg_values_with_odd_power() {
let data = Data::from([[0.0, -1.0, -2.0], [-3.0, -4.0, -5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, -1.0, -2.0], [-3.0, -4.0, -5.0]]);
let data_actual = tensor.powf(3.0).into_data();

View File

@ -5,8 +5,7 @@ mod tests {
#[test]
fn should_support_recip_ops() {
let data = Data::from([[0.5, 1.0, 2.0], [3.0, -4.0, -5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.5, 1.0, 2.0], [3.0, -4.0, -5.0]]);
let data_actual = tensor.recip().into_data();

View File

@ -5,8 +5,7 @@ mod tests {
#[test]
fn should_support_repeat_ops() {
let data = Data::from([[0.0, 1.0, 2.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0]]);
let data_actual = tensor.repeat(0, 4).into_data();
@ -21,8 +20,7 @@ mod tests {
#[test]
fn should_support_bool_repeat_ops() {
let data = Data::from([[true, false, false]]);
let tensor = Tensor::<TestBackend, 2, Bool>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2, Bool>::from([[true, false, false]]);
let data_actual = tensor.repeat(0, 4).into_data();
@ -37,8 +35,7 @@ mod tests {
#[test]
fn should_support_int_repeat_ops() {
let data = Data::from([[0, 1, 2]]);
let tensor = Tensor::<TestBackend, 2, Int>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2, Int>::from([[0, 1, 2]]);
let data_actual = tensor.repeat(0, 4).into_data();

View File

@ -5,8 +5,7 @@ mod tests {
#[test]
fn should_support_reshape_1d() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = Tensor::<TestBackend, 1>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 1>::from([0.0, 1.0, 2.0]);
let data_actual = tensor.clone().reshape([1, 3]).into_data();
let data_expected = Data::from([[0.0, 1.0, 2.0]]);
@ -15,8 +14,7 @@ mod tests {
#[test]
fn should_support_reshape_int() {
let data = Data::from([0, 1, 2]);
let tensor = Tensor::<TestBackend, 1, Int>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 1, Int>::from([0, 1, 2]);
let data_actual = tensor.clone().reshape([1, 3]).into_data();
let data_expected = Data::from([[0, 1, 2]]);
@ -25,8 +23,7 @@ mod tests {
#[test]
fn should_support_reshape_bool() {
let data = Data::from([false, true, false]);
let tensor = Tensor::<TestBackend, 1, Bool>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 1, Bool>::from([false, true, false]);
let data_actual = tensor.clone().reshape([1, 3]).into_data();
let data_expected = Data::from([[false, true, false]]);
@ -35,8 +32,7 @@ mod tests {
#[test]
fn should_support_reshape_2d() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = tensor.clone().reshape([6]).into_data();
let data_expected = Data::from([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]);
@ -45,13 +41,12 @@ mod tests {
#[test]
fn should_support_dim_infererence() {
let data = Data::from([
let tensor = Tensor::<TestBackend, 2>::from([
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8.0],
[9.0, 10.0, 11.0],
]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
// Infer the dimension via -1
let reshaped = tensor.clone().reshape([2, -1]);
@ -73,16 +68,14 @@ mod tests {
#[test]
#[should_panic]
fn multiple_neg_ones() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = Tensor::<TestBackend, 1>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 1>::from([0.0, 1.0, 2.0]);
let data_actual = tensor.reshape([-1, -1]).into_data();
}
#[test]
#[should_panic]
fn neg_value() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = Tensor::<TestBackend, 1>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 1>::from([0.0, 1.0, 2.0]);
let data_actual = tensor.reshape([-2, -1]).into_data();
}
}

View File

@ -5,8 +5,8 @@ mod tests {
#[test]
fn should_select_1d() {
let tensor = TestTensor::from_data_devauto([0.0, 1.0, 2.0]);
let indices = TestTensorInt::from_data_devauto([1, 1, 0, 1, 2]);
let tensor = TestTensor::from([0.0, 1.0, 2.0]);
let indices = TestTensorInt::from([1, 1, 0, 1, 2]);
let output = tensor.select(0, indices);
@ -15,8 +15,8 @@ mod tests {
#[test]
fn should_select_1d_int() {
let tensor = TestTensorInt::from_data_devauto([5, 6, 7]);
let indices = TestTensorInt::from_data_devauto([1, 1, 0, 1, 2]);
let tensor = TestTensorInt::from([5, 6, 7]);
let indices = TestTensorInt::from([1, 1, 0, 1, 2]);
let output = tensor.select(0, indices);
@ -25,8 +25,8 @@ mod tests {
#[test]
fn should_select_2d_dim0_same_num_dim() {
let tensor = TestTensor::from_data_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indices = TestTensorInt::from_data_devauto(([1, 0]));
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indices = TestTensorInt::from(([1, 0]));
let output = tensor.select(0, indices);
@ -38,8 +38,8 @@ mod tests {
#[test]
fn should_select_2d_dim0_more_num_dim() {
let tensor = TestTensor::from_data_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indices = TestTensorInt::from_data_devauto([1, 0, 1, 1]);
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indices = TestTensorInt::from([1, 0, 1, 1]);
let output = tensor.select(0, indices);
@ -56,8 +56,8 @@ mod tests {
#[test]
fn should_select_2d_dim1() {
let tensor = TestTensor::from_data_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indices = TestTensorInt::from_data_devauto([1, 1, 0, 1, 2]);
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indices = TestTensorInt::from([1, 1, 0, 1, 2]);
let output = tensor.select(1, indices);
@ -69,9 +69,9 @@ mod tests {
#[test]
fn should_select_assign_1d() {
let tensor = TestTensor::from_data_devauto([0.0, 1.0, 2.0]);
let values = TestTensor::from_data_devauto([5.0, 4.0, 3.0, 2.0, 1.0]);
let indices = TestTensorInt::from_data_devauto(Data::from([1, 1, 0, 1, 2]));
let tensor = TestTensor::from([0.0, 1.0, 2.0]);
let values = TestTensor::from([5.0, 4.0, 3.0, 2.0, 1.0]);
let indices = TestTensorInt::from([1, 1, 0, 1, 2]);
let output = tensor.select_assign(0, indices, values);
@ -80,9 +80,9 @@ mod tests {
#[test]
fn should_select_assign_1d_int() {
let tensor = TestTensorInt::from_data_devauto([7, 8, 9]);
let values = TestTensorInt::from_data_devauto([5, 4, 3, 2, 1]);
let indices = TestTensorInt::from_data_devauto(Data::from([1, 1, 0, 1, 2]));
let tensor = TestTensorInt::from([7, 8, 9]);
let values = TestTensorInt::from([5, 4, 3, 2, 1]);
let indices = TestTensorInt::from([1, 1, 0, 1, 2]);
let output = tensor.select_assign(0, indices, values);
@ -91,9 +91,9 @@ mod tests {
#[test]
fn should_select_assign_2d_dim0() {
let tensor = TestTensor::from_data_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let values = TestTensor::from_data_devauto([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]);
let indices = TestTensorInt::from_data_devauto(Data::from([1, 0]));
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let values = TestTensor::from([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]);
let indices = TestTensorInt::from([1, 0]);
let output = tensor.select_assign(0, indices, values);
@ -105,9 +105,9 @@ mod tests {
#[test]
fn should_select_assign_2d_dim1() {
let tensor = TestTensor::from_data_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let values = TestTensor::from_data_devauto([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]);
let indices = TestTensorInt::from_data_devauto(Data::from([1, 0, 2]));
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let values = TestTensor::from([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]);
let indices = TestTensorInt::from([1, 0, 2]);
let output = tensor.select_assign(1, indices, values);
@ -120,8 +120,8 @@ mod tests {
#[test]
#[should_panic]
fn should_select_panic_invalid_dimension() {
let tensor = TestTensor::from_data_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indices = TestTensorInt::from_data_devauto([1, 1, 0, 1, 2]);
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indices = TestTensorInt::from([1, 1, 0, 1, 2]);
tensor.select(10, indices);
}

View File

@ -5,8 +5,7 @@ mod tests {
#[test]
fn should_support_sin_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = tensor.sin().into_data();

View File

@ -6,7 +6,7 @@ mod tests {
#[test]
fn should_support_full_sliceing_1d() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = Tensor::<TestBackend, 1>::from_data_devauto(data.clone());
let tensor = Tensor::<TestBackend, 1>::from(data.clone());
let data_actual = tensor.slice([0..3]).into_data();
@ -15,8 +15,7 @@ mod tests {
#[test]
fn should_support_partial_sliceing_1d() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = Tensor::<TestBackend, 1>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 1>::from([0.0, 1.0, 2.0]);
let data_actual = tensor.slice([1..3]).into_data();
@ -27,7 +26,7 @@ mod tests {
#[test]
fn should_support_full_sliceing_2d() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data.clone());
let tensor = Tensor::<TestBackend, 2>::from(data.clone());
let data_actual_1 = tensor.clone().slice([0..2]).into_data();
let data_actual_2 = tensor.slice([0..2, 0..3]).into_data();
@ -38,8 +37,7 @@ mod tests {
#[test]
fn should_support_partial_sliceing_2d() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = tensor.slice([0..2, 0..2]).into_data();
@ -49,7 +47,7 @@ mod tests {
#[test]
fn should_support_partial_sliceing_3d() {
let tensor = TestTensor::from_floats_devauto([
let tensor = TestTensor::from([
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]],
]);
@ -62,7 +60,7 @@ mod tests {
#[test]
fn should_support_partial_sliceing_3d_non_contiguous() {
let tensor = TestTensor::from_floats_devauto([
let tensor = TestTensor::from([
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]],
]);
@ -75,11 +73,8 @@ mod tests {
#[test]
fn should_support_slice_assign_1d() {
let data = Data::from([0.0, 1.0, 2.0]);
let data_assigned = Data::from([10.0, 5.0]);
let tensor = Tensor::<TestBackend, 1>::from_data_devauto(data);
let tensor_assigned = Tensor::<TestBackend, 1>::from_data_devauto(data_assigned);
let tensor = Tensor::<TestBackend, 1>::from([0.0, 1.0, 2.0]);
let tensor_assigned = Tensor::<TestBackend, 1>::from([10.0, 5.0]);
let data_actual = tensor.slice_assign([0..2], tensor_assigned).into_data();
@ -89,11 +84,8 @@ mod tests {
#[test]
fn should_support_slice_assign_2d() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_assigned = Data::from([[10.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor_assigned = Tensor::<TestBackend, 2>::from_data_devauto(data_assigned);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_assigned = Tensor::<TestBackend, 2>::from([[10.0, 5.0]]);
let data_actual = tensor
.slice_assign([1..2, 0..2], tensor_assigned)
@ -105,7 +97,7 @@ mod tests {
#[test]
fn slice_should_not_corrupt_potentially_inplace_operations() {
let tensor = Tensor::<TestBackend, 1, Int>::from_data_devauto([1, 2, 3, 4, 5]);
let tensor = Tensor::<TestBackend, 1, Int>::from([1, 2, 3, 4, 5]);
let tensor = tensor.clone().slice([0..3]) + tensor.clone().slice([2..5]);
assert_eq!(tensor.into_data(), Data::from([4, 6, 8]));
@ -113,8 +105,8 @@ mod tests {
#[test]
fn slice_assign_should_not_corrupt_potentially_inplace_operations() {
let tensor = Tensor::<TestBackend, 1, Int>::from_data_devauto([1, 2, 3, 4, 5]);
let values = Tensor::<TestBackend, 1, Int>::from_data_devauto([10, 20, 30]);
let tensor = Tensor::<TestBackend, 1, Int>::from([1, 2, 3, 4, 5]);
let values = Tensor::<TestBackend, 1, Int>::from([10, 20, 30]);
let tensor_1 = tensor.clone().slice_assign([0..3], values);
let tensor_2 = tensor + 2;
@ -126,7 +118,7 @@ mod tests {
#[should_panic]
fn should_panic_when_slice_exceeds_dimension() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = Tensor::<TestBackend, 1>::from_data_devauto(data.clone());
let tensor = Tensor::<TestBackend, 1>::from(data.clone());
let data_actual = tensor.slice([0..4]).into_data();
@ -137,7 +129,7 @@ mod tests {
#[should_panic]
fn should_panic_when_slice_with_too_many_dimensions() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = Tensor::<TestBackend, 1>::from_data_devauto(data.clone());
let tensor = Tensor::<TestBackend, 1>::from(data.clone());
let data_actual = tensor.slice([0..1, 0..1]).into_data();
@ -148,7 +140,7 @@ mod tests {
#[should_panic]
fn should_panic_when_slice_is_desc() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = Tensor::<TestBackend, 1>::from_data_devauto(data.clone());
let tensor = Tensor::<TestBackend, 1>::from(data.clone());
#[allow(clippy::reversed_empty_ranges)]
let data_actual = tensor.slice([2..1]).into_data();
@ -160,7 +152,7 @@ mod tests {
#[should_panic]
fn should_panic_when_slice_is_equal() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = Tensor::<TestBackend, 1>::from_data_devauto(data.clone());
let tensor = Tensor::<TestBackend, 1>::from(data.clone());
let data_actual = tensor.slice([1..1]).into_data();

View File

@ -6,8 +6,7 @@ mod tests {
#[test]
fn should_support_sqrt_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = tensor.sqrt().into_data();

View File

@ -6,8 +6,8 @@ mod tests {
#[test]
fn should_support_stack_ops_2d_dim0() {
let tensor_1: Tensor<TestBackend, 2> = Tensor::from_data_devauto([[1.0, 2.0, 3.0]]);
let tensor_2: Tensor<TestBackend, 2> = Tensor::from_data_devauto([[4.0, 5.0, 6.0]]);
let tensor_1: Tensor<TestBackend, 2> = Tensor::from([[1.0, 2.0, 3.0]]);
let tensor_2: Tensor<TestBackend, 2> = Tensor::from([[4.0, 5.0, 6.0]]);
let output = Tensor::stack(vec![tensor_1, tensor_2], 0);
@ -17,8 +17,8 @@ mod tests {
#[test]
fn should_support_stack_ops_int() {
let tensor_1 = Tensor::<TestBackend, 2, Int>::from_data_devauto([[1, 2, 3]]);
let tensor_2 = Tensor::<TestBackend, 2, Int>::from_data_devauto([[4, 5, 6]]);
let tensor_1 = Tensor::<TestBackend, 2, Int>::from([[1, 2, 3]]);
let tensor_2 = Tensor::<TestBackend, 2, Int>::from([[4, 5, 6]]);
let output = Tensor::stack(vec![tensor_1, tensor_2], 0);
@ -28,8 +28,8 @@ mod tests {
#[test]
fn should_support_stack_ops_bool() {
let tensor_1 = Tensor::<TestBackend, 2, Bool>::from_data_devauto([[false, true, true]]);
let tensor_2 = Tensor::<TestBackend, 2, Bool>::from_data_devauto([[true, true, false]]);
let tensor_1 = Tensor::<TestBackend, 2, Bool>::from([[false, true, true]]);
let tensor_2 = Tensor::<TestBackend, 2, Bool>::from([[true, true, false]]);
let output = Tensor::stack(vec![tensor_1, tensor_2], 0);
@ -39,8 +39,8 @@ mod tests {
#[test]
fn should_support_stack_ops_2d_dim1() {
let tensor_1: Tensor<TestBackend, 2> = Tensor::from_data_devauto([[1.0, 2.0, 3.0]]);
let tensor_2: Tensor<TestBackend, 2> = Tensor::from_data_devauto([[4.0, 5.0, 6.0]]);
let tensor_1: Tensor<TestBackend, 2> = Tensor::from([[1.0, 2.0, 3.0]]);
let tensor_2: Tensor<TestBackend, 2> = Tensor::from([[4.0, 5.0, 6.0]]);
let output = Tensor::stack(vec![tensor_1, tensor_2], 1);
@ -51,9 +51,9 @@ mod tests {
#[test]
fn should_support_stack_ops_3d() {
let tensor_1: Tensor<TestBackend, 3> =
TestTensor::from_data_devauto([[[1.0, 2.0, 3.0]], [[1.1, 2.1, 3.1]]]);
TestTensor::from([[[1.0, 2.0, 3.0]], [[1.1, 2.1, 3.1]]]);
let tensor_2: Tensor<TestBackend, 3> =
TestTensor::from_data_devauto([[[4.0, 5.0, 6.0]], [[4.1, 5.1, 6.1]]]);
TestTensor::from([[[4.0, 5.0, 6.0]], [[4.1, 5.1, 6.1]]]);
let output = Tensor::stack(vec![tensor_1, tensor_2], 0);
@ -67,9 +67,8 @@ mod tests {
#[test]
#[should_panic]
fn should_panic_when_dimensions_are_not_the_same() {
let tensor_1: Tensor<TestBackend, 2> =
Tensor::from_data_devauto([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]);
let tensor_2: Tensor<TestBackend, 2> = Tensor::from_data_devauto([[4.0, 5.0]]);
let tensor_1: Tensor<TestBackend, 2> = Tensor::from([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]);
let tensor_2: Tensor<TestBackend, 2> = Tensor::from([[4.0, 5.0]]);
let output: Tensor<TestBackend, 3> = Tensor::stack(vec![tensor_1, tensor_2], 0);
}
@ -84,9 +83,8 @@ mod tests {
#[test]
#[should_panic]
fn should_panic_when_stack_exceeds_dimension() {
let tensor_1: Tensor<TestBackend, 3> =
Tensor::from_data_devauto([[[1.0, 2.0, 3.0]], [[1.1, 2.1, 3.1]]]);
let tensor_2: Tensor<TestBackend, 3> = Tensor::from_data_devauto([[[4.0, 5.0, 6.0]]]);
let tensor_1: Tensor<TestBackend, 3> = Tensor::from([[[1.0, 2.0, 3.0]], [[1.1, 2.1, 3.1]]]);
let tensor_2: Tensor<TestBackend, 3> = Tensor::from([[[4.0, 5.0, 6.0]]]);
let output: Tensor<TestBackend, 4> = TestTensor::stack(vec![tensor_1, tensor_2], 3);
}

View File

@ -5,23 +5,19 @@ mod tests {
#[test]
fn should_support_sub_ops() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]]);
let data_expected = Data::from([[-6.0, -6.0, -6.0], [-6.0, -6.0, -6.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_2 = Tensor::<TestBackend, 2>::from([[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]]);
let data_actual = (tensor_1 - tensor_2).into_data();
let data_expected = Data::from([[-6.0, -6.0, -6.0], [-6.0, -6.0, -6.0]]);
assert_eq!(data_expected, data_actual);
}
#[test]
fn test_sub_broadcast() {
let data_1 = Data::from([[0.0, 1.0, 2.0]]);
let data_2 = Data::from([[3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0]]);
let tensor_2 = Tensor::<TestBackend, 2>::from([[3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]);
let data_actual = (tensor_1 - tensor_2).into_data();
@ -31,9 +27,8 @@ mod tests {
#[test]
fn should_support_sub_scalar_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let scalar = 2.0;
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let output = tensor - scalar;
@ -44,23 +39,19 @@ mod tests {
#[test]
fn should_support_sub_ops_int() {
let data_1 = Data::from([[0, 1, 2], [3, 4, 5]]);
let data_2 = Data::from([[6, 7, 8], [9, 10, 11]]);
let data_expected = Data::from([[-6, -6, -6], [-6, -6, -6]]);
let tensor_1 = Tensor::<TestBackend, 2, Int>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2, Int>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2, Int>::from([[0, 1, 2], [3, 4, 5]]);
let tensor_2 = Tensor::<TestBackend, 2, Int>::from([[6, 7, 8], [9, 10, 11]]);
let data_actual = (tensor_1 - tensor_2).into_data();
let data_expected = Data::from([[-6, -6, -6], [-6, -6, -6]]);
assert_eq!(data_expected, data_actual);
}
#[test]
fn test_sub_broadcast_int() {
let data_1 = Data::from([[0, 1, 2]]);
let data_2 = Data::from([[3, 4, 5], [6, 7, 8]]);
let tensor_1 = Tensor::<TestBackend, 2, Int>::from_data_devauto(data_1);
let tensor_2 = Tensor::<TestBackend, 2, Int>::from_data_devauto(data_2);
let tensor_1 = Tensor::<TestBackend, 2, Int>::from([[0, 1, 2]]);
let tensor_2 = Tensor::<TestBackend, 2, Int>::from([[3, 4, 5], [6, 7, 8]]);
let data_actual = (tensor_1 - tensor_2).into_data();
@ -70,9 +61,8 @@ mod tests {
#[test]
fn should_support_sub_scalar_ops_int() {
let data = Data::from([[0, 1, 2], [3, 4, 5]]);
let scalar = 2;
let tensor = Tensor::<TestBackend, 2, Int>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2, Int>::from([[0, 1, 2], [3, 4, 5]]);
let output = tensor - scalar;

View File

@ -5,8 +5,7 @@ mod tests {
#[test]
fn should_support_tanh_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_actual = tensor.tanh().into_data();

View File

@ -5,7 +5,7 @@ mod tests {
#[test]
fn should_support_transpose_ops() {
let tensor = TestTensor::from_floats_devauto([
let tensor = TestTensor::from([
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]],
]);
@ -21,7 +21,7 @@ mod tests {
#[test]
fn should_support_swap_dims() {
let tensor = TestTensor::from_floats_devauto([
let tensor = TestTensor::from([
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]],
]);
@ -38,10 +38,8 @@ mod tests {
#[test]
fn should_support_transpose_ops_int() {
let tensor = Tensor::<TestBackend, 3, Int>::from_data_devauto([
[[0, 1, 2], [3, 4, 5]],
[[6, 7, 8], [9, 10, 11]],
]);
let tensor =
Tensor::<TestBackend, 3, Int>::from([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]);
let data_actual = tensor.transpose().into_data();
@ -51,10 +49,8 @@ mod tests {
#[test]
fn should_support_swap_dims_int() {
let tensor = Tensor::<TestBackend, 3, Int>::from_data_devauto([
[[0, 1, 2], [3, 4, 5]],
[[6, 7, 8], [9, 10, 11]],
]);
let tensor =
Tensor::<TestBackend, 3, Int>::from([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]);
let data_actual = tensor.swap_dims(0, 2).into_data();
@ -64,7 +60,7 @@ mod tests {
#[test]
fn should_support_transpose_bool() {
let tensor = Tensor::<TestBackend, 3, Bool>::from_data_devauto([
let tensor = Tensor::<TestBackend, 3, Bool>::from([
[[false, true, false], [false, false, false]],
[[false, false, true], [false, false, true]],
]);
@ -80,7 +76,7 @@ mod tests {
#[test]
fn should_support_swap_dims_bool() {
let tensor = Tensor::<TestBackend, 3, Bool>::from_data_devauto([
let tensor = Tensor::<TestBackend, 3, Bool>::from([
[[false, true, false], [false, false, false]],
[[false, false, true], [false, false, true]],
]);

View File

@ -6,7 +6,7 @@ mod tests {
#[test]
fn test_triu() {
let tensor: Tensor<TestBackend, 2> =
Tensor::from_data_devauto(Data::from([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]));
Tensor::from([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]);
let output = tensor.triu(0);
assert_eq!(
output.to_data(),
@ -16,8 +16,7 @@ mod tests {
#[test]
fn test_triu_positive_diagonal() {
let tensor: Tensor<TestBackend, 2, Int> =
Tensor::from_data_devauto(Data::from([[1, 1, 1], [1, 1, 1], [1, 1, 1]]));
let tensor: Tensor<TestBackend, 2, Int> = Tensor::from([[1, 1, 1], [1, 1, 1], [1, 1, 1]]);
let output = tensor.triu(1);
assert_eq!(
@ -28,8 +27,7 @@ mod tests {
#[test]
fn test_triu_negative_diagonal() {
let tensor: Tensor<TestBackend, 2, Int> =
Tensor::from_data_devauto(Data::from([[1, 1, 1], [1, 1, 1], [1, 1, 1]]));
let tensor: Tensor<TestBackend, 2, Int> = Tensor::from([[1, 1, 1], [1, 1, 1], [1, 1, 1]]);
let output = tensor.triu(-1);
assert_eq!(
@ -40,10 +38,10 @@ mod tests {
#[test]
fn test_triu_batch_tensors() {
let tensor: Tensor<TestBackend, 4, Int> = Tensor::from_data_devauto(Data::from([
let tensor: Tensor<TestBackend, 4, Int> = Tensor::from([
[[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]],
[[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]],
]));
]);
let output = tensor.triu(1);
assert_eq!(
output.to_data(),
@ -57,14 +55,14 @@ mod tests {
#[test]
#[should_panic]
fn test_triu_too_few_dims() {
let tensor: Tensor<TestBackend, 1, Int> = Tensor::from_data_devauto(Data::from([1, 2, 3]));
let tensor: Tensor<TestBackend, 1, Int> = Tensor::from([1, 2, 3]);
let output = tensor.triu(0);
}
#[test]
fn test_tril() {
let tensor: Tensor<TestBackend, 2> =
Tensor::from_data_devauto(Data::from([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]));
Tensor::from(Data::from([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]));
let output = tensor.tril(0);
assert_eq!(
output.to_data(),
@ -75,7 +73,7 @@ mod tests {
#[test]
fn test_tril_positive_diagonal() {
let tensor: Tensor<TestBackend, 2, Int> =
Tensor::from_data_devauto(Data::from([[1, 1, 1], [1, 1, 1], [1, 1, 1]]));
Tensor::from(Data::from([[1, 1, 1], [1, 1, 1], [1, 1, 1]]));
let output = tensor.tril(1);
assert_eq!(
@ -87,7 +85,7 @@ mod tests {
#[test]
fn test_tril_negative_diagonal() {
let tensor: Tensor<TestBackend, 2, Int> =
Tensor::from_data_devauto(Data::from([[1, 1, 1], [1, 1, 1], [1, 1, 1]]));
Tensor::from(Data::from([[1, 1, 1], [1, 1, 1], [1, 1, 1]]));
let output = tensor.tril(-1);
assert_eq!(
@ -98,10 +96,10 @@ mod tests {
#[test]
fn test_tril_batch_tensors() {
let tensor: Tensor<TestBackend, 4, Int> = Tensor::from_data_devauto(Data::from([
let tensor: Tensor<TestBackend, 4, Int> = Tensor::from([
[[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]],
[[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]],
]));
]);
let output = tensor.tril(1);
assert_eq!(
output.to_data(),
@ -115,7 +113,7 @@ mod tests {
#[test]
#[should_panic]
fn test_tril_too_few_dims() {
let tensor: Tensor<TestBackend, 1, Int> = Tensor::from_data_devauto(Data::from([1, 2, 3]));
let tensor: Tensor<TestBackend, 1, Int> = Tensor::from([1, 2, 3]);
let output = tensor.tril(0);
}
}

View File

@ -9,8 +9,7 @@ mod tests {
#[test]
fn test_cov_1() {
let data = Data::from([[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]]);
let data_actual = tensor.cov(1, 1).into_data();
@ -20,8 +19,7 @@ mod tests {
#[test]
fn test_cov_4() {
let data = Data::from([[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor = Tensor::<TestBackend, 2>::from([[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]]);
let data_actual = tensor.cov(1, 0).into_data();
@ -31,8 +29,8 @@ mod tests {
#[test]
fn test_cov_2() {
let data = Data::from([[0.5, 1.8], [0.2, -2.0], [3.0, -4.0], [5.0, 0.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
let tensor =
Tensor::<TestBackend, 2>::from([[0.5, 1.8], [0.2, -2.0], [3.0, -4.0], [5.0, 0.0]]);
let data_actual = tensor.cov(1, 1).into_data();
@ -47,13 +45,12 @@ mod tests {
#[test]
fn test_cov_3() {
let data = Data::from([
let tensor = Tensor::<TestBackend, 3>::from([
[[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]],
[[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]],
[[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]],
[[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]],
]);
let tensor = Tensor::<TestBackend, 3>::from_data_devauto(data);
let data_actual = tensor.cov(0, 1).into_data();
let data_expected = Tensor::<TestBackend, 3>::zeros_devauto([4, 4, 4]).to_data();
data_expected.assert_approx_eq(&data_actual, 3);

View File

@ -9,9 +9,8 @@ mod tests {
#[test]
fn test_display_2d_int_tensor() {
let int_data = Data::from([[1, 2, 3], [4, 5, 6], [7, 8, 9]]);
let tensor_int: burn_tensor::Tensor<TestBackend, 2, burn_tensor::Int> =
Tensor::from_data_devauto(int_data);
Tensor::from([[1, 2, 3], [4, 5, 6], [7, 8, 9]]);
let output = format!("{}", tensor_int);
let expected = format!(
@ -35,9 +34,8 @@ mod tests {
#[test]
fn test_display_2d_float_tensor() {
let float_data = Data::from([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6], [7.7, 8.8, 9.9]]);
let tensor_float: burn_tensor::Tensor<TestBackend, 2, burn_tensor::Float> =
Tensor::from_data_devauto(float_data);
Tensor::from([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6], [7.7, 8.8, 9.9]]);
let output = format!("{}", tensor_float);
let expected = format!(
@ -61,13 +59,11 @@ mod tests {
#[test]
fn test_display_2d_bool_tensor() {
let bool_data = Data::from([
let tensor_bool: burn_tensor::Tensor<TestBackend, 2, burn_tensor::Bool> = Tensor::from([
[true, false, true],
[false, true, false],
[false, true, true],
]);
let tensor_bool: burn_tensor::Tensor<TestBackend, 2, burn_tensor::Bool> =
Tensor::from_data_devauto(bool_data);
let output = format!("{}", tensor_bool);
let expected = format!(
@ -90,12 +86,10 @@ mod tests {
#[test]
fn test_display_3d_tensor() {
let data = Data::from([
let tensor: burn_tensor::Tensor<TestBackend, 3, burn_tensor::Int> = Tensor::from([
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]],
]);
let tensor: burn_tensor::Tensor<TestBackend, 3, burn_tensor::Int> =
Tensor::from_data_devauto(data);
let output = format!("{}", tensor);
let expected = format!(
@ -122,14 +116,11 @@ mod tests {
#[test]
fn test_display_4d_tensor() {
let data = Data::from([
let tensor: burn_tensor::Tensor<TestBackend, 4, burn_tensor::Int> = Tensor::from([
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
[[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]],
]);
let tensor: burn_tensor::Tensor<TestBackend, 4, burn_tensor::Int> =
Tensor::from_data_devauto(data);
let output = format!("{}", tensor);
let expected = format!(
r#"Tensor {{

View File

@ -9,7 +9,7 @@ mod tests {
#[test]
fn test_var() {
let tensor = TestTensor::from_data_devauto([[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]]);
let tensor = TestTensor::from([[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]]);
let data_actual = tensor.var(1).into_data();
@ -19,7 +19,7 @@ mod tests {
#[test]
fn test_var_mean() {
let tensor = TestTensor::from_data_devauto([[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]]);
let tensor = TestTensor::from([[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]]);
let (var, mean) = tensor.var_mean(1);
@ -32,7 +32,7 @@ mod tests {
#[test]
fn test_var_bias() {
let tensor = TestTensor::from_data_devauto([[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]]);
let tensor = TestTensor::from([[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]]);
let data_actual = tensor.var_bias(1).into_data();
@ -42,7 +42,7 @@ mod tests {
#[test]
fn test_var_mean_bias() {
let tensor = TestTensor::from_data_devauto([[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]]);
let tensor = TestTensor::from([[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]]);
let (var, mean) = tensor.var_mean_bias(1);

View File

@ -94,13 +94,13 @@ mod tests {
fn test_accuracy_without_padding() {
let mut metric = AccuracyMetric::<TestBackend>::new();
let input = AccuracyInput::new(
Tensor::from_data_devauto([
Tensor::from([
[0.0, 0.2, 0.8], // 2
[1.0, 2.0, 0.5], // 1
[0.4, 0.1, 0.2], // 0
[0.6, 0.7, 0.2], // 1
]),
Tensor::from_data_devauto([2, 2, 1, 1]),
Tensor::from([2, 2, 1, 1]),
);
let _entry = metric.update(&input, &MetricMetadata::fake());
@ -111,7 +111,7 @@ mod tests {
fn test_accuracy_with_padding() {
let mut metric = AccuracyMetric::<TestBackend>::new().with_pad_token(3);
let input = AccuracyInput::new(
Tensor::from_data_devauto([
Tensor::from([
[0.0, 0.2, 0.8, 0.0], // 2
[1.0, 2.0, 0.5, 0.0], // 1
[0.4, 0.1, 0.2, 0.0], // 0
@ -120,7 +120,7 @@ mod tests {
[0.0, 0.1, 0.2, 0.0], // Error on padding should not count
[0.6, 0.0, 0.2, 0.0], // Error on padding should not count
]),
Tensor::from_data_devauto([2, 2, 1, 1, 3, 3, 3]),
Tensor::from([2, 2, 1, 1, 3, 3, 3]),
);
let _entry = metric.update(&input, &MetricMetadata::fake());

View File

@ -20,7 +20,7 @@ pub(crate) mod test_utils {
impl<B: Backend> Adaptor<LossInput<B>> for f64 {
fn adapt(&self) -> LossInput<B> {
LossInput::new(Tensor::from_data_devauto([self.elem()]))
LossInput::new(Tensor::from([self.elem()]))
}
}

View File

@ -209,8 +209,8 @@ mod tests {
data_2: Data<f32, 2>,
variant: ImplementationDetails,
) -> Data<f32, 2> {
let tensor_1 = Tensor::<B, 2>::from_data_devauto(data_1.convert());
let tensor_2 = Tensor::<B, 2>::from_data_devauto(data_2.convert());
let tensor_1 = Tensor::<B, 2>::from(data_1.convert());
let tensor_2 = Tensor::<B, 2>::from(data_2.convert());
let tensor_3 = tensor_1.clone() + tensor_2;
let tensor_4 = tensor_3.clone() - tensor_1;
let mut tensor_5 = tensor_4.clone() + 5.0;

View File

@ -81,7 +81,7 @@ mod tests {
.collect::<Vec<_>>();
let tensors_ref = tensors
.iter()
.map(|tensor| Tensor::<ReferenceBackend, 2>::from_data_devauto(tensor.to_data()))
.map(|tensor| Tensor::<ReferenceBackend, 2>::from(tensor.to_data()))
.collect::<Vec<_>>();
let tensor = Tensor::<TestBackend, 2>::cat(tensors, dim);

View File

@ -32,7 +32,7 @@ mod tests {
#[test]
fn clamp_should_match_reference() {
let input = Tensor::<TestBackend, 4>::random_devauto([1, 5, 32, 32], Distribution::Default);
let input_ref = Tensor::<ReferenceBackend, 4>::from_data_devauto(input.to_data());
let input_ref = Tensor::<ReferenceBackend, 4>::from(input.to_data());
let output = input.clamp(0.3, 0.7);

View File

@ -94,9 +94,9 @@ mod tests {
Tensor::<TestBackend, 4>::random_devauto([6, 16, 32, 32], Distribution::Default);
let weight = Tensor::<TestBackend, 4>::random_devauto([12, 8, 3, 3], Distribution::Default);
let bias = Tensor::<TestBackend, 1>::random_devauto([12], Distribution::Default);
let input_ref = Tensor::<ReferenceBackend, 4>::from_data_devauto(input.to_data());
let weight_ref = Tensor::<ReferenceBackend, 4>::from_data_devauto(weight.to_data());
let bias_ref = Tensor::<ReferenceBackend, 1>::from_data_devauto(bias.to_data());
let input_ref = Tensor::<ReferenceBackend, 4>::from(input.to_data());
let weight_ref = Tensor::<ReferenceBackend, 4>::from(weight.to_data());
let bias_ref = Tensor::<ReferenceBackend, 1>::from(bias.to_data());
let options = burn_tensor::ops::ConvOptions::new([2, 3], [2, 3], [2, 3], 2);
let output = module::conv2d(input, weight, Some(bias), options.clone());

View File

@ -106,9 +106,9 @@ mod tests {
Distribution::Default,
);
let bias = Tensor::<TestBackend, 1>::random_devauto([out_channels], Distribution::Default);
let input_ref = Tensor::<ReferenceBackend, 4>::from_data_devauto(input.to_data());
let weight_ref = Tensor::<ReferenceBackend, 4>::from_data_devauto(weight.to_data());
let bias_ref = Tensor::<ReferenceBackend, 1>::from_data_devauto(bias.to_data());
let input_ref = Tensor::<ReferenceBackend, 4>::from(input.to_data());
let weight_ref = Tensor::<ReferenceBackend, 4>::from(weight.to_data());
let bias_ref = Tensor::<ReferenceBackend, 1>::from(bias.to_data());
let output = module::conv_transpose2d(input, weight, Some(bias), options.clone());
let output_ref = module::conv_transpose2d(input_ref, weight_ref, Some(bias_ref), options);

Some files were not shown because too many files have changed in this diff Show More