mirror of https://github.com/tracel-ai/burn.git
parent
1fd07fcb4a
commit
d82e6b157b
|
@ -5,11 +5,8 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn should_diff_log1p() {
|
||||
let data_1 = Data::<f32, 2>::from([[0.0, 1.0], [3.0, 4.0]]);
|
||||
let data_2 = Data::<f32, 2>::from([[6.0, 7.0], [9.0, 10.0]]);
|
||||
|
||||
let tensor_1 = TestAutodiffTensor::from_data_devauto(data_1).require_grad();
|
||||
let tensor_2 = TestAutodiffTensor::from_data_devauto(data_2).require_grad();
|
||||
let tensor_1 = TestAutodiffTensor::from([[0.0, 1.0], [3.0, 4.0]]).require_grad();
|
||||
let tensor_2 = TestAutodiffTensor::from([[6.0, 7.0], [9.0, 10.0]]).require_grad();
|
||||
|
||||
let tensor_3 = tensor_1.clone().matmul(tensor_2.clone().log1p());
|
||||
let tensor_4 = tensor_3.matmul(tensor_2.clone());
|
||||
|
|
|
@ -50,7 +50,7 @@ impl From<candle_core::Device> for CandleDevice {
|
|||
match device.location() {
|
||||
DeviceLocation::Cpu => CandleDevice::Cpu,
|
||||
DeviceLocation::Cuda { gpu_id } => CandleDevice::Cuda(gpu_id),
|
||||
DeviceLocation::Metal => panic!("Metal unsupported"),
|
||||
_ => panic!("Device unsupported: {device:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,8 @@ mod tests {
|
|||
pub type TestTensor<const D: usize> = burn_tensor::Tensor<TestBackend, D>;
|
||||
pub type ReferenceTensor<const D: usize> = burn_tensor::Tensor<ReferenceBackend, D>;
|
||||
pub type TestTensorInt<const D: usize> = burn_tensor::Tensor<TestBackend, D, burn_tensor::Int>;
|
||||
pub type TestTensorBool<const D: usize> =
|
||||
burn_tensor::Tensor<TestBackend, D, burn_tensor::Bool>;
|
||||
|
||||
type TestAutodiffBackend = burn_autodiff::Autodiff<TestBackend>;
|
||||
type TestAutodiffTensor<const D: usize> = burn_tensor::Tensor<TestAutodiffBackend, D>;
|
||||
|
|
|
@ -32,6 +32,8 @@ mod tests {
|
|||
type TestBackend = crate::NdArray<f32>;
|
||||
type TestTensor<const D: usize> = burn_tensor::Tensor<TestBackend, D>;
|
||||
type TestTensorInt<const D: usize> = burn_tensor::Tensor<TestBackend, D, burn_tensor::Int>;
|
||||
type TestTensorBool<const D: usize> = burn_tensor::Tensor<TestBackend, D, burn_tensor::Bool>;
|
||||
|
||||
use alloc::format;
|
||||
use alloc::vec;
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ mod tests {
|
|||
type TestBackend = crate::LibTorch<f32>;
|
||||
type TestTensor<const D: usize> = burn_tensor::Tensor<TestBackend, D>;
|
||||
type TestTensorInt<const D: usize> = burn_tensor::Tensor<TestBackend, D, burn_tensor::Int>;
|
||||
type TestTensorBool<const D: usize> = burn_tensor::Tensor<TestBackend, D, burn_tensor::Bool>;
|
||||
|
||||
burn_tensor::testgen_all!();
|
||||
burn_autodiff::testgen_all!();
|
||||
|
|
|
@ -27,6 +27,17 @@ where
|
|||
pub(crate) primitive: K::Primitive<D>,
|
||||
}
|
||||
|
||||
impl<B, const D: usize, K, T> From<T> for Tensor<B, D, K>
|
||||
where
|
||||
B: Backend,
|
||||
K: BasicOps<B>,
|
||||
T: Into<Data<K::Elem, D>>,
|
||||
{
|
||||
fn from(value: T) -> Self {
|
||||
Tensor::from_data(value.into(), &Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<B, const D: usize, K> Tensor<B, D, K>
|
||||
where
|
||||
B: Backend,
|
||||
|
|
|
@ -5,14 +5,10 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_gelu() {
|
||||
let data = Data::from([[
|
||||
let tensor = TestTensor::from([[
|
||||
0.5447, 0.9809, 0.4114, 0.1398, 0.8045, 0.4103, 0.2388, 0.5262, 0.6677, 0.6737,
|
||||
]]);
|
||||
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data)
|
||||
.clone()
|
||||
.clone();
|
||||
|
||||
let data_actual = activation::gelu(tensor).to_data();
|
||||
let data_actual = activation::gelu(tensor).into_data();
|
||||
|
||||
let data_expected = Data::from([[
|
||||
0.3851, 0.8207, 0.2714, 0.0777, 0.6351, 0.2704, 0.1419, 0.3687, 0.4993, 0.5051,
|
||||
|
|
|
@ -5,10 +5,9 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_mish() {
|
||||
let data = Data::from([[-0.4240, -0.9574, -0.2215], [-0.5767, 0.7218, -0.1620]]);
|
||||
let tensor = Tensor::<TestBackend, 2>::from_data(data);
|
||||
let tensor = TestTensor::from([[-0.4240, -0.9574, -0.2215], [-0.5767, 0.7218, -0.1620]]);
|
||||
|
||||
let data_actual = activation::mish(tensor).to_data();
|
||||
let data_actual = activation::mish(tensor).into_data();
|
||||
|
||||
let data_expected = Data::from([[-0.1971, -0.3006, -0.1172], [-0.2413, 0.5823, -0.0888]]);
|
||||
data_actual.assert_approx_eq(&data_expected, 4);
|
||||
|
|
|
@ -5,10 +5,9 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_quiet_softmax_d2() {
|
||||
let data = Data::from([[1.0, 7.0], [13.0, -3.0]]);
|
||||
let tensor = Tensor::<TestBackend, 2>::from_data(data);
|
||||
let tensor = TestTensor::from([[1.0, 7.0], [13.0, -3.0]]);
|
||||
|
||||
let data_actual = activation::quiet_softmax(tensor, 1).to_data();
|
||||
let data_actual = activation::quiet_softmax(tensor, 1).into_data();
|
||||
|
||||
let data_expected = Data::from([[2.47e-03, 9.975e-01], [1.0, 1.1254e-07]]);
|
||||
data_actual.assert_approx_eq(&data_expected, 4);
|
||||
|
|
|
@ -5,10 +5,9 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_relu_d2() {
|
||||
let data = Data::from([[0.0, -1.0, 2.0], [3.0, -4.0, 5.0]]);
|
||||
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
|
||||
let tensor = TestTensor::from([[0.0, -1.0, 2.0], [3.0, -4.0, 5.0]]);
|
||||
|
||||
let data_actual = activation::relu(tensor).to_data();
|
||||
let data_actual = activation::relu(tensor).into_data();
|
||||
|
||||
let data_expected = Data::from([[0.0, 0.0, 2.0], [3.0, 0.0, 5.0]]);
|
||||
assert_eq!(data_expected, data_actual);
|
||||
|
|
|
@ -5,10 +5,9 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_sigmoid() {
|
||||
let data = Data::from([[1.0, 7.0], [13.0, -3.0]]);
|
||||
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
|
||||
let tensor = TestTensor::from([[1.0, 7.0], [13.0, -3.0]]);
|
||||
|
||||
let data_actual = activation::sigmoid(tensor).to_data();
|
||||
let data_actual = activation::sigmoid(tensor).into_data();
|
||||
|
||||
let data_expected = Data::from([[0.7311, 0.9991], [1.0, 0.0474]]);
|
||||
data_actual.assert_approx_eq(&data_expected, 4);
|
||||
|
@ -16,10 +15,9 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_sigmoid_overflow() {
|
||||
let data = Data::from([f32::MAX, f32::MIN]);
|
||||
let tensor = Tensor::<TestBackend, 1>::from_data_devauto(data);
|
||||
let tensor = TestTensor::from([f32::MAX, f32::MIN]);
|
||||
|
||||
let data_actual = activation::sigmoid(tensor).to_data();
|
||||
let data_actual = activation::sigmoid(tensor).into_data();
|
||||
|
||||
let data_expected = Data::from([1.0, 0.0]);
|
||||
data_actual.assert_approx_eq(&data_expected, 4);
|
||||
|
|
|
@ -5,10 +5,9 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_silu() {
|
||||
let data = Data::from([[1.0, 2.0], [3.0, 4.0]]);
|
||||
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
|
||||
let tensor = TestTensor::from([[1.0, 2.0], [3.0, 4.0]]);
|
||||
|
||||
let data_actual = activation::silu(tensor).to_data();
|
||||
let data_actual = activation::silu(tensor).into_data();
|
||||
|
||||
let data_expected = Data::from([[0.7311, 1.7616], [2.8577, 3.9281]]);
|
||||
data_actual.assert_approx_eq(&data_expected, 4);
|
||||
|
|
|
@ -5,10 +5,9 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_softmax_d2() {
|
||||
let data = Data::from([[1.0, 7.0], [13.0, -3.0]]);
|
||||
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
|
||||
let tensor = TestTensor::from([[1.0, 7.0], [13.0, -3.0]]);
|
||||
|
||||
let data_actual = activation::softmax(tensor, 1).to_data();
|
||||
let data_actual = activation::softmax(tensor, 1).into_data();
|
||||
|
||||
let data_expected = Data::from([[2.47e-03, 9.975e-01], [1.0, 1.1254e-07]]);
|
||||
data_actual.assert_approx_eq(&data_expected, 4);
|
||||
|
|
|
@ -5,10 +5,12 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_softplus_d2() {
|
||||
let data = Data::from([[-0.4240, -0.9574, -0.2215], [-0.5767, 0.7218, -0.1620]]);
|
||||
let tensor = Tensor::<TestBackend, 2>::from_data(data);
|
||||
let tensor = Tensor::<TestBackend, 2>::from([
|
||||
[-0.4240, -0.9574, -0.2215],
|
||||
[-0.5767, 0.7218, -0.1620],
|
||||
]);
|
||||
|
||||
let data_actual_beta1 = activation::softplus(tensor.clone(), 1.0).to_data();
|
||||
let data_actual_beta1 = activation::softplus(tensor.clone(), 1.0).into_data();
|
||||
let data_expected_beta1 = Data::from([[0.5034, 0.3249, 0.5885], [0.4458, 1.1178, 0.6154]]);
|
||||
data_actual_beta1.assert_approx_eq(&data_expected_beta1, 4);
|
||||
|
||||
|
|
|
@ -5,10 +5,9 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_tanh() {
|
||||
let data = Data::from([[1., 2.], [3., 4.]]);
|
||||
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
|
||||
let tensor = TestTensor::from([[1., 2.], [3., 4.]]);
|
||||
|
||||
let data_actual = activation::tanh(tensor).to_data();
|
||||
let data_actual = activation::tanh(tensor).into_data();
|
||||
|
||||
let data_expected = Data::from([[0.7616, 0.9640], [0.9951, 0.9993]]);
|
||||
data_actual.assert_approx_eq(&data_expected, 4);
|
||||
|
|
|
@ -13,7 +13,7 @@ mod tests {
|
|||
length_out: 4,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[0.5, 2.5, 4.5, 6.5],
|
||||
[8.5, 10.5, 12.5, 14.5],
|
||||
]]));
|
||||
|
@ -28,10 +28,7 @@ mod tests {
|
|||
length_out: 3,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
[1.0, 3.0, 5.0],
|
||||
[8.0, 10.0, 12.0],
|
||||
]]));
|
||||
test.assert_output(TestTensor::from([[[1.0, 3.0, 5.0], [8.0, 10.0, 12.0]]]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -43,7 +40,7 @@ mod tests {
|
|||
length_out: 8,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0],
|
||||
[4.0, 4.0, 5.0, 5.0, 6.0, 6.0, 7.0, 7.0],
|
||||
]]));
|
||||
|
|
|
@ -15,7 +15,7 @@ mod tests {
|
|||
width_out: 4,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[
|
||||
[3.5000, 4.5000, 6.5000, 7.5000],
|
||||
[15.5000, 16.5000, 18.5000, 19.5000],
|
||||
|
@ -42,7 +42,7 @@ mod tests {
|
|||
width_out: 2,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[[5.0000, 8.0000], [15.5000, 18.5000], [26.0000, 29.0000]],
|
||||
[[40.0000, 43.0000], [50.5000, 53.5000], [61.0000, 64.0000]],
|
||||
]]));
|
||||
|
@ -59,7 +59,7 @@ mod tests {
|
|||
width_out: 4,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[
|
||||
[0.0000, 0.5000, 1.5000, 2.0000],
|
||||
[1.5000, 2.0000, 3.0000, 3.5000],
|
||||
|
@ -89,7 +89,7 @@ mod tests {
|
|||
impl AdaptiveAvgPool2dTestCase {
|
||||
fn assert_output(self, y: TestTensor<4>) {
|
||||
let shape_x = Shape::new([self.batch_size, self.channels, self.height, self.width]);
|
||||
let x = TestTensor::from_data_devauto(
|
||||
let x = TestTensor::from(
|
||||
TestTensorInt::arange_devauto(0..shape_x.num_elements())
|
||||
.reshape(shape_x)
|
||||
.into_data()
|
||||
|
|
|
@ -16,7 +16,7 @@ mod tests {
|
|||
count_include_pad: true,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[[1., 2., 3., 4.]]]));
|
||||
test.assert_output(TestTensor::from([[[1., 2., 3., 4.]]]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -31,7 +31,7 @@ mod tests {
|
|||
count_include_pad: true,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[0.3333, 2.0000, 4.0000],
|
||||
[4.3333, 8.0000, 10.0000],
|
||||
]]));
|
||||
|
@ -49,7 +49,7 @@ mod tests {
|
|||
count_include_pad: false,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[0.5000, 2.0000, 4.0000],
|
||||
[6.5000, 8.0000, 10.0000],
|
||||
]]));
|
||||
|
@ -68,7 +68,7 @@ mod tests {
|
|||
impl AvgPool1dTestCase {
|
||||
fn assert_output(self, y: TestTensor<3>) {
|
||||
let shape_x = Shape::new([self.batch_size, self.channels, self.length]);
|
||||
let x = TestTensor::from_data_devauto(
|
||||
let x = TestTensor::from(
|
||||
TestTensorInt::arange_devauto(0..shape_x.num_elements())
|
||||
.reshape(shape_x)
|
||||
.into_data()
|
||||
|
|
|
@ -20,7 +20,7 @@ mod tests {
|
|||
count_include_pad: true,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[[
|
||||
test.assert_output(TestTensor::from([[[
|
||||
[7., 8., 9., 10.],
|
||||
[13., 14., 15., 16.],
|
||||
[19., 20., 21., 22.],
|
||||
|
@ -44,7 +44,7 @@ mod tests {
|
|||
count_include_pad: true,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[[
|
||||
test.assert_output(TestTensor::from([[[
|
||||
[1.1667, 3.0000, 4.3333, 2.5000],
|
||||
[3.2500, 7.5000, 9.5000, 5.2500],
|
||||
[6.2500, 13.5000, 15.5000, 8.2500],
|
||||
|
@ -68,7 +68,7 @@ mod tests {
|
|||
count_include_pad: false,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[[
|
||||
test.assert_output(TestTensor::from([[[
|
||||
[3.5000, 4.5000, 6.5000, 7.5000],
|
||||
[6.5000, 7.5000, 9.5000, 10.5000],
|
||||
[12.5000, 13.5000, 15.5000, 16.5000],
|
||||
|
@ -93,7 +93,7 @@ mod tests {
|
|||
impl AvgPool2dTestCase {
|
||||
fn assert_output(self, y: TestTensor<4>) {
|
||||
let shape_x = Shape::new([self.batch_size, self.channels, self.height, self.width]);
|
||||
let x = TestTensor::from_data_devauto(
|
||||
let x = TestTensor::from(
|
||||
TestTensorInt::arange_devauto(0..shape_x.num_elements())
|
||||
.reshape(shape_x)
|
||||
.into_data()
|
||||
|
|
|
@ -19,7 +19,7 @@ mod tests {
|
|||
length: 4,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([
|
||||
test.assert_output(TestTensor::from([
|
||||
[[43., 67., 82., 49.], [104., 176., 227., 158.]],
|
||||
[[139., 187., 202., 113.], [392., 584., 635., 414.]],
|
||||
]));
|
||||
|
@ -39,7 +39,7 @@ mod tests {
|
|||
length: 4,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([
|
||||
test.assert_output(TestTensor::from([
|
||||
[[62., 38.], [159., 111.]],
|
||||
[[158., 102.], [447., 367.]],
|
||||
]));
|
||||
|
@ -59,7 +59,7 @@ mod tests {
|
|||
length: 4,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([
|
||||
test.assert_output(TestTensor::from([
|
||||
[[2., 5., 8., 3.], [42., 63., 75., 47.]],
|
||||
[[26., 29., 32., 11.], [114., 159., 171., 103.]],
|
||||
]));
|
||||
|
|
|
@ -24,7 +24,7 @@ mod tests {
|
|||
width: 4,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[
|
||||
[1196., 1796., 1916., 1264.],
|
||||
[1881., 2793., 2946., 1923.],
|
||||
|
@ -59,7 +59,7 @@ mod tests {
|
|||
width: 5,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[[312., 348., 384.], [492., 528., 564.], [672., 708., 744.]],
|
||||
[
|
||||
[3724., 3841., 3958.],
|
||||
|
@ -88,7 +88,7 @@ mod tests {
|
|||
width: 5,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([
|
||||
test.assert_output(TestTensor::from([
|
||||
[
|
||||
[[1845., 3789., 1926.], [3210., 6465., 3228.]],
|
||||
[[4276., 9082., 4789.], [8071., 16834., 8737.]],
|
||||
|
@ -130,18 +130,18 @@ mod tests {
|
|||
self.kernel_size_1,
|
||||
self.kernel_size_2,
|
||||
]);
|
||||
let weight = TestTensor::from_data_devauto(
|
||||
let weight = TestTensor::from(
|
||||
TestTensorInt::arange_devauto(0..shape_weight.num_elements())
|
||||
.reshape(shape_weight)
|
||||
.into_data()
|
||||
.convert(),
|
||||
);
|
||||
let bias = TestTensor::from_data_devauto(
|
||||
let bias = TestTensor::from(
|
||||
TestTensorInt::arange_devauto(0..self.channels_out)
|
||||
.into_data()
|
||||
.convert(),
|
||||
);
|
||||
let x = TestTensor::from_data_devauto(
|
||||
let x = TestTensor::from(
|
||||
TestTensorInt::arange_devauto(0..shape_x.num_elements())
|
||||
.reshape(shape_x)
|
||||
.into_data()
|
||||
|
|
|
@ -20,7 +20,7 @@ mod tests {
|
|||
length: 4,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[270., 453., 516., 387.],
|
||||
[352., 589., 679., 505.],
|
||||
]]));
|
||||
|
@ -41,7 +41,7 @@ mod tests {
|
|||
length: 4,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[28., 62., 36., 78., 44., 94., 52., 62.],
|
||||
[41., 93., 55., 121., 69., 149., 83., 93.],
|
||||
]]));
|
||||
|
@ -62,7 +62,7 @@ mod tests {
|
|||
length: 4,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[30., 64., 78., 76., 94., 52.],
|
||||
[49., 101., 127., 113., 143., 77.],
|
||||
]]));
|
||||
|
|
|
@ -26,10 +26,7 @@ mod tests {
|
|||
width: 2,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[[
|
||||
[5.0, 11.0],
|
||||
[23.0, 29.0],
|
||||
]]]));
|
||||
test.assert_output(TestTensor::from([[[[5.0, 11.0], [23.0, 29.0]]]]));
|
||||
}
|
||||
#[test]
|
||||
fn test_conv_transpose2d_simple_2() {
|
||||
|
@ -52,7 +49,7 @@ mod tests {
|
|||
width: 4,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[
|
||||
[9855., 15207., 15738., 10797.],
|
||||
[16290., 25119., 25956., 17793.],
|
||||
|
@ -95,7 +92,7 @@ mod tests {
|
|||
width: 2,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[[
|
||||
test.assert_output(TestTensor::from([[[
|
||||
[0.0, 0.0, 0.0, 1.0],
|
||||
[0.0, 0.0, 2.0, 3.0],
|
||||
[0.0, 2.0, 0.0, 3.0],
|
||||
|
@ -124,7 +121,7 @@ mod tests {
|
|||
width: 2,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[
|
||||
[126., 116., 136., 124., 146.],
|
||||
[108., 88., 114., 92., 120.],
|
||||
|
@ -163,7 +160,7 @@ mod tests {
|
|||
width: 4,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[
|
||||
[352., 728., 378., 780., 404., 832., 430., 452.],
|
||||
[784., 1616., 836., 1720., 888., 1824., 940., 992.],
|
||||
|
@ -208,7 +205,7 @@ mod tests {
|
|||
width: 2,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[[5., 11.], [23., 29.]],
|
||||
[[236., 258.], [302., 324.]],
|
||||
]]));
|
||||
|
@ -235,7 +232,7 @@ mod tests {
|
|||
width: 2,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_floats_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[
|
||||
[0.0000e+00, 0.0000e+00, 1.0000e+00, 2.0000e+00],
|
||||
[0.0000e+00, 5.0000e+00, 1.1000e+01, 1.1000e+01],
|
||||
|
@ -303,18 +300,18 @@ mod tests {
|
|||
self.kernel_size_1,
|
||||
self.kernel_size_2,
|
||||
]);
|
||||
let weights = TestTensor::from_data_devauto(
|
||||
let weights = TestTensor::from(
|
||||
TestTensorInt::arange_devauto(0..shape_weights.num_elements())
|
||||
.reshape(shape_weights)
|
||||
.into_data()
|
||||
.convert(),
|
||||
);
|
||||
let bias = TestTensor::from_data_devauto(
|
||||
let bias = TestTensor::from(
|
||||
TestTensorInt::arange_devauto(0..self.channels_out)
|
||||
.into_data()
|
||||
.convert(),
|
||||
);
|
||||
let x = TestTensor::from_data_devauto(
|
||||
let x = TestTensor::from(
|
||||
TestTensorInt::arange_devauto(0..shape_x.num_elements())
|
||||
.reshape(shape_x)
|
||||
.into_data()
|
||||
|
|
|
@ -7,8 +7,8 @@ mod tests {
|
|||
fn test_embedding_forward() {
|
||||
let weights = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
let indices = Data::from([[0, 1], [1, 1]]);
|
||||
let weights = Tensor::<TestBackend, 2>::from_data_devauto(weights);
|
||||
let indices = Tensor::<TestBackend, 2, Int>::from_data_devauto(indices);
|
||||
let weights = Tensor::<TestBackend, 2>::from(weights);
|
||||
let indices = Tensor::<TestBackend, 2, Int>::from(indices);
|
||||
|
||||
let output = embedding(weights, indices);
|
||||
let expected = Data::from([
|
||||
|
|
|
@ -13,11 +13,11 @@ mod tests {
|
|||
let stride = 1;
|
||||
let dilation = 1;
|
||||
|
||||
let x = TestTensor::from_floats_devauto([[
|
||||
let x = TestTensor::from([[
|
||||
[0.9861, 0.5474, 0.4477, 0.0732, 0.3548, 0.8221],
|
||||
[0.8148, 0.5474, 0.9490, 0.7890, 0.5537, 0.5689],
|
||||
]]);
|
||||
let y = TestTensor::from_floats_devauto([[
|
||||
let y = TestTensor::from([[
|
||||
[0.9861, 0.5474, 0.4477, 0.8221],
|
||||
[0.949, 0.949, 0.949, 0.789],
|
||||
]]);
|
||||
|
@ -34,8 +34,8 @@ mod tests {
|
|||
let stride = 2;
|
||||
let dilation = 1;
|
||||
|
||||
let x = TestTensor::from_floats_devauto([[[0.6309, 0.6112, 0.6998, 0.4708]]]);
|
||||
let y = TestTensor::from_floats_devauto([[[0.6309, 0.6998]]]);
|
||||
let x = TestTensor::from([[[0.6309, 0.6112, 0.6998, 0.4708]]]);
|
||||
let y = TestTensor::from([[[0.6309, 0.6998]]]);
|
||||
|
||||
let output = max_pool1d(x, kernel_size, stride, padding, dilation);
|
||||
|
||||
|
@ -49,8 +49,8 @@ mod tests {
|
|||
let stride = 1;
|
||||
let dilation = 1;
|
||||
|
||||
let x = TestTensor::from_floats_devauto([[[-0.6309, -0.6112, -0.6998, -0.4708]]]);
|
||||
let y = TestTensor::from_floats_devauto([[[-0.6112, -0.6112, -0.4708, -0.4708]]]);
|
||||
let x = TestTensor::from([[[-0.6309, -0.6112, -0.6998, -0.4708]]]);
|
||||
let y = TestTensor::from([[[-0.6112, -0.6112, -0.4708, -0.4708]]]);
|
||||
|
||||
let output = max_pool1d(x, kernel_size, stride, padding, dilation);
|
||||
|
||||
|
@ -64,11 +64,11 @@ mod tests {
|
|||
let stride = 1;
|
||||
let dilation = 2;
|
||||
|
||||
let x = TestTensor::from_floats_devauto([[
|
||||
let x = TestTensor::from([[
|
||||
[0.9861, 0.5474, 0.4477, 0.0732, 0.3548, 0.8221],
|
||||
[0.8148, 0.5474, 0.9490, 0.7890, 0.5537, 0.5689],
|
||||
]]);
|
||||
let y = TestTensor::from_floats_devauto([[
|
||||
let y = TestTensor::from([[
|
||||
[0.5474, 0.9861, 0.5474, 0.4477, 0.8221, 0.3548],
|
||||
[0.5474, 0.9490, 0.7890, 0.9490, 0.7890, 0.5537],
|
||||
]]);
|
||||
|
@ -85,9 +85,9 @@ mod tests {
|
|||
let stride = 1;
|
||||
let dilation = 1;
|
||||
|
||||
let x = TestTensor::from_floats_devauto([[[0.2479, 0.6386, 0.3166, 0.5742]]]);
|
||||
let x = TestTensor::from([[[0.2479, 0.6386, 0.3166, 0.5742]]]);
|
||||
let indices = Data::<IntElem, 3>::from([[[1, 1, 3]]]);
|
||||
let y = TestTensor::from_floats_devauto([[[0.6386, 0.6386, 0.5742]]]);
|
||||
let y = TestTensor::from([[[0.6386, 0.6386, 0.5742]]]);
|
||||
|
||||
let (output, output_indices) =
|
||||
max_pool1d_with_indices(x, kernel_size, stride, padding, dilation);
|
||||
|
@ -103,10 +103,9 @@ mod tests {
|
|||
let stride = 1;
|
||||
let dilation = 1;
|
||||
|
||||
let x = TestTensor::from_floats_devauto([[[0.5388, 0.0676, 0.7122, 0.8316, 0.0653]]]);
|
||||
let x = TestTensor::from([[[0.5388, 0.0676, 0.7122, 0.8316, 0.0653]]]);
|
||||
let indices = Data::<IntElem, 3>::from([[[0, 2, 3, 3, 3, 3]]]);
|
||||
let y =
|
||||
TestTensor::from_floats_devauto([[[0.5388, 0.7122, 0.8316, 0.8316, 0.8316, 0.8316]]]);
|
||||
let y = TestTensor::from([[[0.5388, 0.7122, 0.8316, 0.8316, 0.8316, 0.8316]]]);
|
||||
|
||||
let (output, output_indices) =
|
||||
max_pool1d_with_indices(x, kernel_size, stride, padding, dilation);
|
||||
|
|
|
@ -19,7 +19,7 @@ mod tests {
|
|||
let dilation_1 = 1;
|
||||
let dilation_2 = 1;
|
||||
|
||||
let x = TestTensor::from_floats_devauto([
|
||||
let x = TestTensor::from([
|
||||
[
|
||||
[
|
||||
[0.9861, 0.5474, 0.4477, 0.0732, 0.3548, 0.8221],
|
||||
|
@ -57,7 +57,7 @@ mod tests {
|
|||
],
|
||||
],
|
||||
]);
|
||||
let y = TestTensor::from_floats_devauto([
|
||||
let y = TestTensor::from([
|
||||
[
|
||||
[
|
||||
[0.9861, 0.9861, 0.9490, 0.9490, 0.8221, 0.8221],
|
||||
|
@ -120,7 +120,7 @@ mod tests {
|
|||
let dilation_1 = 1;
|
||||
let dilation_2 = 1;
|
||||
|
||||
let x = TestTensor::from_floats_devauto([[[
|
||||
let x = TestTensor::from([[[
|
||||
[0.6309, 0.6112, 0.6998],
|
||||
[0.4708, 0.9161, 0.5402],
|
||||
[0.4577, 0.7397, 0.9870],
|
||||
|
@ -128,7 +128,7 @@ mod tests {
|
|||
[0.6277, 0.5139, 0.4525],
|
||||
[0.9333, 0.9846, 0.5006],
|
||||
]]]);
|
||||
let y = TestTensor::from_floats_devauto([[[
|
||||
let y = TestTensor::from([[[
|
||||
[0.6309, 0.6998],
|
||||
[0.6309, 0.9870],
|
||||
[0.6380, 0.9870],
|
||||
|
@ -161,7 +161,7 @@ mod tests {
|
|||
let dilation_1 = 1;
|
||||
let dilation_2 = 1;
|
||||
|
||||
let x = TestTensor::from_floats_devauto([[[
|
||||
let x = TestTensor::from([[[
|
||||
[0.6309, 0.6112, 0.6998],
|
||||
[0.4708, 0.9161, 0.5402],
|
||||
[0.4577, 0.7397, 0.9870],
|
||||
|
@ -170,7 +170,7 @@ mod tests {
|
|||
[0.9333, 0.9846, 0.5006],
|
||||
]]])
|
||||
.neg();
|
||||
let y = TestTensor::from_floats_devauto([[[
|
||||
let y = TestTensor::from([[[
|
||||
[-0.4708, -0.4708, -0.5402],
|
||||
[-0.4577, -0.4577, -0.5402],
|
||||
[-0.4352, -0.4352, -0.4352],
|
||||
|
@ -203,7 +203,7 @@ mod tests {
|
|||
let dilation_1 = 2;
|
||||
let dilation_2 = 2;
|
||||
|
||||
let x = TestTensor::from_floats_devauto([[[
|
||||
let x = TestTensor::from([[[
|
||||
[0.9861, 0.9861, 0.9490, 0.9490, 0.8221, 0.8221],
|
||||
[0.9861, 0.9861, 0.9490, 0.9490, 0.8221, 0.8221],
|
||||
[0.9540, 0.9540, 0.9540, 0.9490, 0.7890, 0.7111],
|
||||
|
@ -211,7 +211,7 @@ mod tests {
|
|||
[0.9540, 0.9540, 0.9540, 0.9432, 0.8855, 0.8855],
|
||||
[0.5063, 0.9432, 0.9432, 0.9432, 0.8855, 0.8855],
|
||||
]]]);
|
||||
let y = TestTensor::from_floats_devauto([[[
|
||||
let y = TestTensor::from([[[
|
||||
[0.9861, 0.9861, 0.9540, 0.9490],
|
||||
[0.9861, 0.9861, 0.9540, 0.9490],
|
||||
[0.9540, 0.9540, 0.9540, 0.9490],
|
||||
|
@ -241,7 +241,7 @@ mod tests {
|
|||
let dilation_1 = 1;
|
||||
let dilation_2 = 1;
|
||||
|
||||
let x = TestTensor::from_floats_devauto([[[
|
||||
let x = TestTensor::from([[[
|
||||
[0.2479, 0.6386, 0.3166, 0.5742],
|
||||
[0.7065, 0.1940, 0.6305, 0.8959],
|
||||
[0.5416, 0.8602, 0.8129, 0.1662],
|
||||
|
@ -254,7 +254,7 @@ mod tests {
|
|||
[8, 9, 9, 14, 11],
|
||||
[12, 12, 14, 14, 15],
|
||||
]]]);
|
||||
let y = TestTensor::from_floats_devauto([[[
|
||||
let y = TestTensor::from([[[
|
||||
[0.2479, 0.6386, 0.6386, 0.5742, 0.5742],
|
||||
[0.7065, 0.7065, 0.6386, 0.8959, 0.8959],
|
||||
[0.7065, 0.8602, 0.8602, 0.8959, 0.8959],
|
||||
|
@ -287,7 +287,7 @@ mod tests {
|
|||
let dilation_1 = 1;
|
||||
let dilation_2 = 1;
|
||||
|
||||
let x = TestTensor::from_floats_devauto([[[
|
||||
let x = TestTensor::from([[[
|
||||
[0.5388, 0.0676, 0.7122, 0.8316, 0.0653],
|
||||
[0.9154, 0.1536, 0.9089, 0.8016, 0.7518],
|
||||
[0.2073, 0.0501, 0.8811, 0.5604, 0.5075],
|
||||
|
@ -302,7 +302,7 @@ mod tests {
|
|||
[15, 16, 24],
|
||||
[15, 16, 24],
|
||||
]]]);
|
||||
let y = TestTensor::from_floats_devauto([[[
|
||||
let y = TestTensor::from([[[
|
||||
[0.9154, 0.9089, 0.8316],
|
||||
[0.9154, 0.9089, 0.8316],
|
||||
[0.9154, 0.9963, 0.8316],
|
||||
|
|
|
@ -34,7 +34,7 @@ mod tests {
|
|||
width: 4,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_data_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[0., 1., 2., 4., 5., 6., 8., 9., 10.],
|
||||
[1., 2., 3., 5., 6., 7., 9., 10., 11.],
|
||||
[4., 5., 6., 8., 9., 10., 12., 13., 14.],
|
||||
|
@ -59,7 +59,7 @@ mod tests {
|
|||
width: 4,
|
||||
};
|
||||
|
||||
test.assert_output(TestTensor::from_data_devauto([[
|
||||
test.assert_output(TestTensor::from([[
|
||||
[0., 0.],
|
||||
[1., 5.],
|
||||
[3., 7.],
|
||||
|
@ -89,7 +89,7 @@ mod tests {
|
|||
impl Unfold4dTestCase {
|
||||
fn assert_shape(self, expected_shape: [usize; 3]) {
|
||||
let shape_x = Shape::new([self.batch_size, self.channels_in, self.height, self.width]);
|
||||
let x = TestTensor::from_data_devauto(
|
||||
let x = TestTensor::from(
|
||||
TestTensorInt::arange_devauto(0..shape_x.num_elements())
|
||||
.reshape(shape_x)
|
||||
.into_data()
|
||||
|
@ -111,7 +111,7 @@ mod tests {
|
|||
|
||||
fn assert_output(self, expected: TestTensor<3>) {
|
||||
let shape_x = Shape::new([self.batch_size, self.channels_in, self.height, self.width]);
|
||||
let x = TestTensor::from_data_devauto(
|
||||
let x = TestTensor::from(
|
||||
TestTensorInt::arange_devauto(0..shape_x.num_elements())
|
||||
.reshape(shape_x)
|
||||
.into_data()
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
#[burn_tensor_testgen::testgen(abs)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use burn_tensor::{Data, Int, Tensor};
|
||||
use burn_tensor::{Data, Tensor};
|
||||
|
||||
#[test]
|
||||
fn should_support_abs_ops_float() {
|
||||
let data = Data::from([[0.0, -1.0, 2.0], [3.0, 4.0, -5.0]]);
|
||||
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
|
||||
let tensor = TestTensor::from([[0.0, -1.0, 2.0], [3.0, 4.0, -5.0]]);
|
||||
|
||||
let data_actual = tensor.abs().into_data();
|
||||
|
||||
|
@ -16,8 +15,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn should_support_abs_ops_int() {
|
||||
let data = Data::from([[0, -1, 2], [3, 4, -5]]);
|
||||
let tensor = Tensor::<TestBackend, 2, Int>::from_data_devauto(data);
|
||||
let tensor = TestTensorInt::from([[0, -1, 2], [3, 4, -5]]);
|
||||
|
||||
let data_actual = tensor.abs().into_data();
|
||||
|
||||
|
|
|
@ -2,14 +2,12 @@
|
|||
mod tests {
|
||||
use super::*;
|
||||
use burn_tensor::backend::Backend;
|
||||
use burn_tensor::{Data, Int, Tensor};
|
||||
use burn_tensor::{Data, Tensor};
|
||||
|
||||
#[test]
|
||||
fn test_add_d2() {
|
||||
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
let data_2 = Data::from([[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]]);
|
||||
let tensor_1 = Tensor::<TestBackend, 2>::from_data_devauto(data_1);
|
||||
let tensor_2 = Tensor::<TestBackend, 2>::from_data_devauto(data_2);
|
||||
let tensor_1 = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
let tensor_2 = TestTensor::from([[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]]);
|
||||
|
||||
let data_actual = (tensor_1 + tensor_2).into_data();
|
||||
|
||||
|
@ -19,10 +17,8 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_add_broadcast() {
|
||||
let data_1 = Data::from([[0.0, 1.0, 2.0]]);
|
||||
let data_2 = Data::from([[3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]);
|
||||
let tensor_1 = Tensor::<TestBackend, 2>::from_data_devauto(data_1);
|
||||
let tensor_2 = Tensor::<TestBackend, 2>::from_data_devauto(data_2);
|
||||
let tensor_1 = TestTensor::from([[0.0, 1.0, 2.0]]);
|
||||
let tensor_2 = TestTensor::from([[3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]);
|
||||
|
||||
let data_actual = (tensor_1 + tensor_2).into_data();
|
||||
|
||||
|
@ -32,13 +28,10 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_add_different_strides_rhs() {
|
||||
let data_1 = Data::from([[0.0, 1.0], [2.0, 3.0]]);
|
||||
let data_2 = Data::from([[4.0, 5.0], [6.0, 7.0]]);
|
||||
|
||||
// We need to execute an operation after `from data` to trigger inplace in some backends.
|
||||
// Which is the operation that might be problematic in this case.
|
||||
let tensor_1 = Tensor::<TestBackend, 2>::from_data_devauto(data_1) * 1;
|
||||
let tensor_2 = Tensor::<TestBackend, 2>::from_data_devauto(data_2) * 1;
|
||||
let tensor_1 = TestTensor::from([[0.0, 1.0], [2.0, 3.0]]) * 1;
|
||||
let tensor_2 = TestTensor::from([[4.0, 5.0], [6.0, 7.0]]) * 1;
|
||||
|
||||
let data_actual = (tensor_1 + tensor_2.transpose()).into_data();
|
||||
|
||||
|
@ -48,13 +41,10 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_add_different_strides_lhs() {
|
||||
let data_1 = Data::from([[0.0, 1.0], [2.0, 3.0]]);
|
||||
let data_2 = Data::from([[4.0, 5.0], [6.0, 7.0]]);
|
||||
|
||||
// We need to execute an operation after `from data` to trigger inplace in some backends.
|
||||
// Which is the operation that might be problematic in this case.
|
||||
let tensor_1 = Tensor::<TestBackend, 2>::from_data_devauto(data_1) * 1;
|
||||
let tensor_2 = Tensor::<TestBackend, 2>::from_data_devauto(data_2) * 1;
|
||||
let tensor_1 = TestTensor::from([[0.0, 1.0], [2.0, 3.0]]) * 1;
|
||||
let tensor_2 = TestTensor::from([[4.0, 5.0], [6.0, 7.0]]) * 1;
|
||||
|
||||
let data_actual = (tensor_1.transpose() + tensor_2).into_data();
|
||||
|
||||
|
@ -64,13 +54,10 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_add_different_strides_broadcast() {
|
||||
let data_1 = Data::from([[0.0, 1.0], [2.0, 3.0]]);
|
||||
let data_2 = Data::from([[4.0, 5.0]]);
|
||||
|
||||
// We need to execute an operation after `from data` to trigger inplace in some backends.
|
||||
// Which is the operation that might be problematic in this case.
|
||||
let tensor_1 = Tensor::<TestBackend, 2>::from_data_devauto(data_1) * 1;
|
||||
let tensor_2 = Tensor::<TestBackend, 2>::from_data_devauto(data_2) * 1;
|
||||
let tensor_1 = TestTensor::from([[0.0, 1.0], [2.0, 3.0]]) * 1;
|
||||
let tensor_2 = TestTensor::from([[4.0, 5.0]]) * 1;
|
||||
|
||||
let data_actual = (tensor_1.transpose() + tensor_2).into_data();
|
||||
|
||||
|
@ -80,9 +67,8 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn should_support_add_scalar_ops() {
|
||||
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
let scalar = 2.0;
|
||||
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
|
||||
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
|
||||
let output = tensor + scalar;
|
||||
|
||||
|
@ -93,10 +79,8 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_add_d2_int() {
|
||||
let data_1 = Data::from([[0, 1, 2], [3, 4, 5]]);
|
||||
let data_2 = Data::from([[6, 7, 8], [9, 10, 11]]);
|
||||
let tensor_1 = Tensor::<TestBackend, 2, Int>::from_data_devauto(data_1);
|
||||
let tensor_2 = Tensor::<TestBackend, 2, Int>::from_data_devauto(data_2);
|
||||
let tensor_1 = TestTensorInt::from([[0, 1, 2], [3, 4, 5]]);
|
||||
let tensor_2 = TestTensorInt::from([[6, 7, 8], [9, 10, 11]]);
|
||||
|
||||
let data_actual = (tensor_1 + tensor_2).into_data();
|
||||
|
||||
|
@ -106,10 +90,8 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_add_broadcast_int() {
|
||||
let data_1 = Data::from([[0, 1, 2]]);
|
||||
let data_2 = Data::from([[3, 4, 5], [6, 7, 8]]);
|
||||
let tensor_1 = Tensor::<TestBackend, 2, Int>::from_data_devauto(data_1);
|
||||
let tensor_2 = Tensor::<TestBackend, 2, Int>::from_data_devauto(data_2);
|
||||
let tensor_1 = TestTensorInt::from([[0, 1, 2]]);
|
||||
let tensor_2 = TestTensorInt::from([[3, 4, 5], [6, 7, 8]]);
|
||||
|
||||
let data_actual = (tensor_1 + tensor_2).into_data();
|
||||
|
||||
|
@ -119,9 +101,8 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn should_support_add_scalar_ops_int() {
|
||||
let data = Data::from([[0, 1, 2], [3, 4, 5]]);
|
||||
let scalar = 2;
|
||||
let tensor = Tensor::<TestBackend, 2, Int>::from_data_devauto(data);
|
||||
let tensor = TestTensorInt::from([[0, 1, 2], [3, 4, 5]]);
|
||||
|
||||
let output = tensor + scalar;
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_should_mean() {
|
||||
let tensor = TestTensor::from_data_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
|
||||
let data_actual = tensor.mean().to_data();
|
||||
|
||||
|
@ -14,7 +14,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_should_mean_int() {
|
||||
let tensor = TestTensorInt::from_data_devauto([[2, 2, 2], [3, 4, 5]]);
|
||||
let tensor = TestTensorInt::from([[2, 2, 2], [3, 4, 5]]);
|
||||
|
||||
let data_actual = tensor.mean().to_data();
|
||||
|
||||
|
@ -23,7 +23,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_should_sum() {
|
||||
let tensor = TestTensor::from_data_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
|
||||
let data_actual = tensor.sum().to_data();
|
||||
|
||||
|
@ -32,7 +32,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_should_sum_int() {
|
||||
let tensor = TestTensorInt::from_data_devauto([[0, 1, 2], [3, 4, 5]]);
|
||||
let tensor = TestTensorInt::from([[0, 1, 2], [3, 4, 5]]);
|
||||
|
||||
let data_actual = tensor.sum().to_data();
|
||||
|
||||
|
@ -41,7 +41,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_should_mean_last_dim() {
|
||||
let tensor = TestTensor::from_data_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
|
||||
let data_actual = tensor.mean_dim(1).to_data();
|
||||
|
||||
|
@ -50,7 +50,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_should_sum_last_dim() {
|
||||
let tensor = TestTensor::from_data_devauto([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
let tensor = TestTensor::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
|
||||
let data_actual = tensor.sum_dim(1).to_data();
|
||||
|
||||
|
@ -59,7 +59,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_should_mean_last_dim_int() {
|
||||
let tensor = TestTensorInt::from_data_devauto([[0, 1, 2], [3, 4, 5]]);
|
||||
let tensor = TestTensorInt::from([[0, 1, 2], [3, 4, 5]]);
|
||||
|
||||
let data_actual = tensor.mean_dim(1).to_data();
|
||||
|
||||
|
@ -68,7 +68,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_should_sum_last_dim_int() {
|
||||
let tensor = TestTensorInt::from_data_devauto([[0, 1, 2], [3, 4, 5]]);
|
||||
let tensor = TestTensorInt::from([[0, 1, 2], [3, 4, 5]]);
|
||||
|
||||
let data_actual = tensor.sum_dim(1).to_data();
|
||||
|
||||
|
@ -77,7 +77,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_should_sum_first_dim() {
|
||||
let tensor = TestTensor::from_data_devauto([[3.0, 1.0, 2.0], [4.0, 2.0, 3.0]]);
|
||||
let tensor = TestTensor::from([[3.0, 1.0, 2.0], [4.0, 2.0, 3.0]]);
|
||||
|
||||
let data_actual = tensor.sum_dim(0).to_data();
|
||||
|
||||
|
@ -86,7 +86,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_should_mean_first_dim() {
|
||||
let tensor = TestTensor::from_data_devauto([[3.0, 1.0, 2.0], [4.0, 2.0, 3.0]]);
|
||||
let tensor = TestTensor::from([[3.0, 1.0, 2.0], [4.0, 2.0, 3.0]]);
|
||||
|
||||
let data_actual = tensor.mean_dim(0).to_data();
|
||||
|
||||
|
@ -95,7 +95,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_should_sum_mid_dim_3d_non_contiguous_1() {
|
||||
let tensor = TestTensor::from_data_devauto([
|
||||
let tensor = TestTensor::from([
|
||||
[[2.0, 4.0, 1.0], [7.0, -5.0, 3.0]],
|
||||
[[3.0, 1.0, 2.0], [4.0, 2.0, 3.0]],
|
||||
]);
|
||||
|
@ -110,7 +110,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_should_sum_mid_dim_3d_non_contiguous_2() {
|
||||
let tensor = TestTensor::from_data_devauto([
|
||||
let tensor = TestTensor::from([
|
||||
[[2.0, 4.0, 1.0], [7.0, -5.0, 3.0]],
|
||||
[[3.0, 1.0, 2.0], [4.0, 2.0, 3.0]],
|
||||
]);
|
||||
|
|
|
@ -1,71 +1,65 @@
|
|||
#[burn_tensor_testgen::testgen(arg)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use burn_tensor::{Data, Int, Tensor};
|
||||
use burn_tensor::{Data, Tensor};
|
||||
|
||||
#[test]
|
||||
fn test_argmax_2d_dim0() {
|
||||
let data = Data::from([[10.0, 11.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
|
||||
let tensor = TestTensor::from([[10.0, 11.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
|
||||
let data_actual = tensor.argmax(0);
|
||||
|
||||
let data_expected = Data::from([[0, 0, 1]]);
|
||||
assert_eq!(data_expected, data_actual.to_data());
|
||||
assert_eq!(data_expected, data_actual.into_data());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_argmin_2d_dim0() {
|
||||
let data = Data::from([[10.0, 11.0, 2.0], [30.0, 4.0, 5.0]]);
|
||||
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
|
||||
let tensor = TestTensor::from([[10.0, 11.0, 2.0], [30.0, 4.0, 5.0]]);
|
||||
|
||||
let data_actual = tensor.argmin(0);
|
||||
|
||||
let data_expected = Data::from([[0, 1, 0]]);
|
||||
assert_eq!(data_expected, data_actual.to_data());
|
||||
assert_eq!(data_expected, data_actual.into_data());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_argmax_2d_dim0_int() {
|
||||
let data = Data::from([[10, 11, 2], [3, 4, 5]]);
|
||||
let tensor = Tensor::<TestBackend, 2, Int>::from_data_devauto(data);
|
||||
let tensor = TestTensorInt::from([[10, 11, 2], [3, 4, 5]]);
|
||||
|
||||
let data_actual = tensor.argmax(0);
|
||||
|
||||
let data_expected = Data::from([[0, 0, 1]]);
|
||||
assert_eq!(data_expected, data_actual.to_data());
|
||||
assert_eq!(data_expected, data_actual.into_data());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_argmin_2d_dim0_int() {
|
||||
let data = Data::from([[10, 11, 2], [30, 4, 5]]);
|
||||
let tensor = Tensor::<TestBackend, 2, Int>::from_data_devauto(data);
|
||||
let tensor = TestTensorInt::from([[10, 11, 2], [30, 4, 5]]);
|
||||
|
||||
let data_actual = tensor.argmin(0);
|
||||
|
||||
let data_expected = Data::from([[0, 1, 0]]);
|
||||
assert_eq!(data_expected, data_actual.to_data());
|
||||
assert_eq!(data_expected, data_actual.into_data());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_argmax_2d_dim1() {
|
||||
let data = Data::from([[10.0, 11.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
|
||||
let tensor = TestTensor::from([[10.0, 11.0, 2.0], [3.0, 4.0, 5.0]]);
|
||||
|
||||
let data_actual = tensor.argmax(1);
|
||||
|
||||
let data_expected = Data::from([[1], [2]]);
|
||||
assert_eq!(data_expected, data_actual.to_data());
|
||||
assert_eq!(data_expected, data_actual.into_data());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_argmin_2d_dim1() {
|
||||
let data = Data::from([[10.0, 11.0, 2.0], [30.0, 4.0, 5.0]]);
|
||||
let tensor = Tensor::<TestBackend, 2>::from_data_devauto(data);
|
||||
let tensor = TestTensor::from([[10.0, 11.0, 2.0], [30.0, 4.0, 5.0]]);
|
||||
|
||||
let data_actual = tensor.argmin(1);
|
||||
|
||||
let data_expected = Data::from([[2], [1]]);
|
||||
assert_eq!(data_expected, data_actual.to_data());
|
||||
assert_eq!(data_expected, data_actual.into_data());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
#[burn_tensor_testgen::testgen(cast)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use burn_tensor::{Bool, Data, Int, Tensor};
|
||||
use burn_tensor::{Bool, Data, Tensor};
|
||||
|
||||
#[test]
|
||||
fn cast_float_to_int() {
|
||||
let tensor =
|
||||
Tensor::<TestBackend, 2>::from_data_devauto([[1.0, 2.0, 3.0], [4.4, 5.5, 6.6]]);
|
||||
let tensor = TestTensor::from([[1.0, 2.0, 3.0], [4.4, 5.5, 6.6]]);
|
||||
|
||||
let actual = tensor.int().into_data();
|
||||
let expected = Data::from([[1, 2, 3], [4, 5, 6]]);
|
||||
|
@ -15,7 +14,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn cast_int_to_float_tensor() {
|
||||
let tensor = Tensor::<TestBackend, 2, Int>::from_data_devauto([[1, 2, 3], [4, 5, 6]]);
|
||||
let tensor = TestTensorInt::from([[1, 2, 3], [4, 5, 6]]);
|
||||
|
||||
let actual = tensor.float().into_data();
|
||||
let expected = Data::from([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]);
|
||||
|
@ -24,10 +23,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn cast_bool_to_int_tensor() {
|
||||
let tensor = Tensor::<TestBackend, 2, Bool>::from_data_devauto([
|
||||
[true, false, true],
|
||||
[false, false, true],
|
||||
]);
|
||||
let tensor = TestTensorBool::from([[true, false, true], [false, false, true]]);
|
||||
|
||||
let actual = tensor.int().into_data();
|
||||
let expected = Data::from([[1, 0, 1], [0, 0, 1]]);
|
||||
|
@ -36,10 +32,8 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn cast_bool_to_float_tensor() {
|
||||
let tensor = Tensor::<TestBackend, 2, Bool>::from_data_devauto([
|
||||
[true, false, true],
|
||||
[false, false, true],
|
||||
]);
|
||||
let tensor =
|
||||
Tensor::<TestBackend, 2, Bool>::from([[true, false, true], [false, false, true]]);
|
||||
|
||||
let actual = tensor.float().into_data();
|
||||
let expected = Data::from([[1., 0., 1.], [0., 0., 1.]]);
|
||||
|
|
|
@ -128,6 +128,8 @@ mod tests {
|
|||
pub type TestBackend = Fusion<Wgpu>;
|
||||
pub type TestTensor<const D: usize> = burn_tensor::Tensor<TestBackend, D>;
|
||||
pub type TestTensorInt<const D: usize> = burn_tensor::Tensor<TestBackend, D, burn_tensor::Int>;
|
||||
pub type TestTensorBool<const D: usize> =
|
||||
burn_tensor::Tensor<TestBackend, D, burn_tensor::Bool>;
|
||||
|
||||
burn_tensor::testgen_all!();
|
||||
burn_autodiff::testgen_all!();
|
||||
|
|
|
@ -40,8 +40,11 @@ mod tests {
|
|||
pub type ReferenceBackend = burn_ndarray::NdArray<f32>;
|
||||
|
||||
pub type TestTensor<const D: usize> = burn_tensor::Tensor<TestBackend, D>;
|
||||
pub type ReferenceTensor<const D: usize> = burn_tensor::Tensor<ReferenceBackend, D>;
|
||||
pub type TestTensorInt<const D: usize> = burn_tensor::Tensor<TestBackend, D, burn_tensor::Int>;
|
||||
pub type TestTensorBool<const D: usize> =
|
||||
burn_tensor::Tensor<TestBackend, D, burn_tensor::Bool>;
|
||||
|
||||
pub type ReferenceTensor<const D: usize> = burn_tensor::Tensor<ReferenceBackend, D>;
|
||||
|
||||
burn_tensor::testgen_all!();
|
||||
burn_autodiff::testgen_all!();
|
||||
|
|
Loading…
Reference in New Issue