refactor: burn tensor testgen (#104)

This commit is contained in:
Nathaniel Simard 2022-11-16 21:02:32 -05:00 committed by GitHub
parent ab51c22a55
commit 713f078602
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
55 changed files with 1423 additions and 1320 deletions

View File

@ -31,7 +31,7 @@ jobs:
- name: check tests backend ndarray
run: |
cd burn-tensor
cargo test --no-default-features --features ndarray export_tests --tests
cargo test --no-default-features --features ndarray --tests
- name: check clippy backend ndarray
run: |

View File

@ -3,6 +3,7 @@ members = [
"burn",
"burn-derive",
"burn-tensor",
"burn-tensor-testgen",
"burn-dataset",
"burn-tch",
"examples/*",

View File

@ -13,5 +13,5 @@ pub use tensor_ops::*;
mod tests {
type TestBackend = crate::TchBackend<f32>;
burn_tensor::test_all!();
burn_tensor::testgen_all!();
}

View File

@ -0,0 +1,18 @@
[package]
name = "burn-tensor-testgen"
version = "0.2.3"
authors = ["nathanielsimard <nathaniel.simard.42@gmail.com>"]
description = "Burn tensor test gen crate."
repository = "https://github.com/burn-rs/burn/tree/main/burn-tensor-testgen"
readme="README.md"
license = "MIT/Apache-2.0"
edition = "2021"
[lib]
proc-macro = true
[dependencies]
syn = "1.0"
quote = "1.0"
proc-macro2 = "1.0"

View File

@ -0,0 +1 @@
../LICENSE-APACHE

View File

@ -0,0 +1 @@
../LICENSE-MIT

View File

View File

@ -0,0 +1,46 @@
use proc_macro::TokenStream;
use quote::{format_ident, quote};
#[proc_macro_attribute]
pub fn testgen(attr: TokenStream, item: TokenStream) -> TokenStream {
let item: proc_macro2::TokenStream = proc_macro2::TokenStream::from(item);
let attr: proc_macro2::TokenStream = proc_macro2::TokenStream::from(attr);
let macro_ident = format_ident!("testgen_{}", attr.to_string());
let macro_gen = quote! {
#[macro_export]
macro_rules! #macro_ident {
() => {
mod #attr {
use super::*;
type TestADBackend = burn_tensor::backend::ADBackendDecorator<TestBackend>;
type TestADTensor<const D: usize> = burn_tensor::Tensor<TestADBackend, D>;
#item
}
};
}
};
let test_gen = quote! {
#[cfg(test)]
use crate::tests::TestBackend;
#[cfg(test)]
use crate as burn_tensor;
#[cfg(test)]
type TestADBackend = burn_tensor::backend::ADBackendDecorator<TestBackend>;
#[cfg(test)]
type TestTensor<const D: usize> = burn_tensor::Tensor<TestBackend, D>;
#[cfg(test)]
type TestADTensor<const D: usize> = burn_tensor::Tensor<TestADBackend, D>;
#[cfg(test)]
#item
};
quote! {
#test_gen
#macro_gen
}
.into()
}

View File

@ -22,9 +22,10 @@ no-default-features = true
[features]
default = ["ndarray"]
ndarray = ["dep:ndarray", "dep:libm"]
export_tests = []
export_tests = ["dep:burn-tensor-testgen"]
[dependencies]
burn-tensor-testgen = { path = "../burn-tensor-testgen", optional = true }
num-traits = "0.2"
derive-new = "0.5"
rand = "0.8"
@ -39,3 +40,6 @@ nanoid = "0.4"
# Serialization
serde = { version = "1.0", features = ["derive"] }
[dev-dependencies]
burn-tensor-testgen = { path = "../burn-tensor-testgen" }

View File

@ -8,6 +8,8 @@ mod tensor;
#[cfg(feature = "export_tests")]
mod tests;
#[cfg(all(test, not(feature = "export_tests")))]
mod tests;
pub use half::f16;
pub use tensor::*;

View File

@ -1,19 +1,20 @@
#[macro_export]
macro_rules! test_gelu {
() => {
#[test]
fn test_gelu() {
let data = Data::from([[
0.5447, 0.9809, 0.4114, 0.1398, 0.8045, 0.4103, 0.2388, 0.5262, 0.6677, 0.6737,
]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
#[burn_tensor_testgen::testgen(gelu)]
mod tests {
use super::*;
use burn_tensor::{activation, Data, Tensor};
let data_actual = activation::gelu(&tensor).to_data();
#[test]
fn test_gelu() {
let data = Data::from([[
0.5447, 0.9809, 0.4114, 0.1398, 0.8045, 0.4103, 0.2388, 0.5262, 0.6677, 0.6737,
]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data).clone().clone();
let data_expected = Data::from([[
0.3851, 0.8207, 0.2714, 0.0777, 0.6351, 0.2704, 0.1419, 0.3687, 0.4993, 0.5051,
]]);
data_expected.assert_approx_eq(&data_actual, 3);
}
};
let data_actual = activation::gelu(&tensor).to_data();
let data_expected = Data::from([[
0.3851, 0.8207, 0.2714, 0.0777, 0.6351, 0.2704, 0.1419, 0.3687, 0.4993, 0.5051,
]]);
data_expected.assert_approx_eq(&data_actual, 3);
}
}

View File

@ -1,3 +1,3 @@
mod gelu;
mod relu;
mod softmax;
pub(crate) mod gelu;
pub(crate) mod relu;
pub(crate) mod softmax;

View File

@ -1,15 +1,16 @@
#[macro_export]
macro_rules! test_relu {
() => {
#[test]
fn test_relu_d2() {
let data = Data::from([[0.0, -1.0, 2.0], [3.0, -4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
#[burn_tensor_testgen::testgen(relu)]
mod tests {
use super::*;
use burn_tensor::{activation, Data, Tensor};
let data_actual = activation::relu(&tensor).to_data();
#[test]
fn test_relu_d2() {
let data = Data::from([[0.0, -1.0, 2.0], [3.0, -4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let data_expected = Data::from([[0.0, 0.0, 2.0], [3.0, 0.0, 5.0]]);
assert_eq!(data_expected, data_actual);
}
};
let data_actual = activation::relu(&tensor).to_data();
let data_expected = Data::from([[0.0, 0.0, 2.0], [3.0, 0.0, 5.0]]);
assert_eq!(data_expected, data_actual);
}
}

View File

@ -1,15 +1,16 @@
#[macro_export]
macro_rules! test_softmax {
() => {
#[test]
fn test_softmax_d2() {
let data = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
#[burn_tensor_testgen::testgen(softmax)]
mod tests {
use super::*;
use burn_tensor::{activation, Data, Tensor};
let data_actual = activation::softmax(&tensor, 1).to_data();
#[test]
fn test_softmax_d2() {
let data = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let data_expected = Data::from([[2.47e-03, 9.975e-01], [1.0, 1.1254e-07]]);
data_actual.assert_approx_eq(&data_expected, 4);
}
};
let data_actual = activation::softmax(&tensor, 1).to_data();
let data_expected = Data::from([[2.47e-03, 9.975e-01], [1.0, 1.1254e-07]]);
data_actual.assert_approx_eq(&data_expected, 4);
}
}

View File

@ -1,64 +1,65 @@
#[macro_export]
macro_rules! test_ad_add {
() => {
#[test]
fn should_diff_add() {
let data_1 = Data::from([2.0, 5.0]);
let data_2 = Data::from([4.0, 1.0]);
#[burn_tensor_testgen::testgen(ad_add)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let tensor_1 = Tensor::<TestADBackend, 1>::from_data(data_1);
let tensor_2 = Tensor::<TestADBackend, 1>::from_data(data_2);
#[test]
fn should_diff_add() {
let data_1 = Data::from([2.0, 5.0]);
let data_2 = Data::from([4.0, 1.0]);
let tensor_3 = tensor_1.clone() + tensor_2.clone();
let grads = tensor_3.backward();
let tensor_1 = Tensor::<TestADBackend, 1>::from_data(data_1);
let tensor_2 = Tensor::<TestADBackend, 1>::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.clone() + tensor_2.clone();
let grads = tensor_3.backward();
assert_eq!(grad_1.to_data(), Data::from([1.0, 1.0]));
assert_eq!(grad_2.to_data(), Data::from([1.0, 1.0]));
assert_eq!(tensor_3.into_data(), Data::from([6.0, 6.0]));
}
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
#[test]
fn should_diff_add_scalar() {
let data = Data::from([2.0, 10.0]);
assert_eq!(grad_1.to_data(), Data::from([1.0, 1.0]));
assert_eq!(grad_2.to_data(), Data::from([1.0, 1.0]));
assert_eq!(tensor_3.into_data(), Data::from([6.0, 6.0]));
}
let tensor = Tensor::<TestADBackend, 1>::from_data(data);
let tensor_out = tensor.add_scalar(5.0);
let grads = tensor_out.backward();
#[test]
fn should_diff_add_scalar() {
let data = Data::from([2.0, 10.0]);
let grad = tensor.grad(&grads).unwrap();
let tensor = Tensor::<TestADBackend, 1>::from_data(data);
let tensor_out = tensor.add_scalar(5.0);
let grads = tensor_out.backward();
assert_eq!(grad.to_data(), Data::from([1.0, 1.0]));
assert_eq!(tensor_out.into_data(), Data::from([7.0, 15.0]));
}
let grad = tensor.grad(&grads).unwrap();
#[test]
fn test_add_complex_1() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
assert_eq!(grad.to_data(), Data::from([1.0, 1.0]));
assert_eq!(tensor_out.into_data(), Data::from([7.0, 15.0]));
}
let tensor_1 = Tensor::<TestADBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestADBackend, 2>::from_data(data_2);
let tensor_3 = Tensor::<TestADBackend, 2>::from_data(data_3);
#[test]
fn test_add_complex_1() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
let tensor_4 = tensor_1.add(&tensor_2);
let tensor_5 = tensor_4
.add(&tensor_3)
.add_scalar(5.0)
.add(&tensor_1)
.add(&tensor_2);
let tensor_6 = tensor_1.add(&tensor_5);
let tensor_1 = Tensor::<TestADBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestADBackend, 2>::from_data(data_2);
let tensor_3 = Tensor::<TestADBackend, 2>::from_data(data_3);
let grads = tensor_6.backward();
let tensor_4 = tensor_1.add(&tensor_2);
let tensor_5 = tensor_4
.add(&tensor_3)
.add_scalar(5.0)
.add(&tensor_1)
.add(&tensor_2);
let tensor_6 = tensor_1.add(&tensor_5);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let grads = tensor_6.backward();
assert_eq!(grad_1.to_data(), Data::from([[3.0, 3.0], [3.0, 3.0]]));
assert_eq!(grad_2.to_data(), Data::from([[2.0, 2.0], [2.0, 2.0]]));
}
};
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
assert_eq!(grad_1.to_data(), Data::from([[3.0, 3.0], [3.0, 3.0]]));
assert_eq!(grad_2.to_data(), Data::from([[2.0, 2.0], [2.0, 2.0]]));
}
}

View File

@ -1,120 +1,121 @@
#[macro_export]
macro_rules! test_ad_aggregation {
() => {
#[test]
fn should_diff_mean() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);
#[burn_tensor_testgen::testgen(ad_aggregation)]
mod tests {
use super::*;
use burn_tensor::Data;
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_mean() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_1.mul(&tensor_3.mean().unsqueeze());
let grads = tensor_4.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_1.mul(&tensor_3.mean().unsqueeze());
let grads = tensor_4.backward();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[3.5, 9.5], [3.5, 9.5]]), 5);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[-0.75, -0.75], [3.0, 3.0]]), 5);
}
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
#[test]
fn should_diff_sum_1() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);
grad_1
.to_data()
.assert_approx_eq(&Data::from([[3.5, 9.5], [3.5, 9.5]]), 5);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[-0.75, -0.75], [3.0, 3.0]]), 5);
}
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_sum_1() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_1.mul(&tensor_3.sum().unsqueeze());
let grads = tensor_4.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_1.mul(&tensor_3.sum().unsqueeze());
let grads = tensor_4.backward();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[14.0, 38.0], [14.0, 38.0]]), 5);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[-3.0, -3.0], [12.0, 12.0]]), 5);
}
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
#[test]
fn should_diff_sum_2() {
let data_1 = Data::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::from([[6.0, 7.0], [9.0, 10.0]]);
grad_1
.to_data()
.assert_approx_eq(&Data::from([[14.0, 38.0], [14.0, 38.0]]), 5);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[-3.0, -3.0], [12.0, 12.0]]), 5);
}
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_sum_2() {
let data_1 = Data::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_3.sum_dim(1);
let tensor_5 = tensor_4.mul(&tensor_3);
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grads = tensor_5.sum().backward();
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_3.sum_dim(1);
let tensor_5 = tensor_4.mul(&tensor_3);
grad_1
.to_data()
.assert_approx_eq(&Data::from([[494.0, 722.0], [2990.0, 4370.0]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[690.0, 690.0], [958.0, 958.0]]), 3);
}
let grads = tensor_5.sum().backward();
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
#[test]
fn should_diff_mean_dim() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);
grad_1
.to_data()
.assert_approx_eq(&Data::from([[494.0, 722.0], [2990.0, 4370.0]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[690.0, 690.0], [958.0, 958.0]]), 3);
}
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_mean_dim() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_1.mul(&tensor_3.mean_dim(1).unsqueeze());
let grads = tensor_4.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_1.mul(&tensor_3.mean_dim(1).unsqueeze());
let grads = tensor_4.backward();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[4.0, 36.0], [3.0, -17.0]]), 5);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[9.0, 9.0], [35.5, 35.5]]), 5);
}
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
#[test]
fn should_diff_sum_dim() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);
grad_1
.to_data()
.assert_approx_eq(&Data::from([[4.0, 36.0], [3.0, -17.0]]), 5);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[9.0, 9.0], [35.5, 35.5]]), 5);
}
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_sum_dim() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_1.mul(&tensor_3.sum_dim(1).unsqueeze());
let grads = tensor_4.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_1.mul(&tensor_3.sum_dim(1).unsqueeze());
let grads = tensor_4.backward();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[8.0, 72.0], [6.0, -34.0]]), 5);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[18.0, 18.0], [71.0, 71.0]]), 5);
}
};
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[8.0, 72.0], [6.0, -34.0]]), 5);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[18.0, 18.0], [71.0, 71.0]]), 5);
}
}

View File

@ -1,68 +1,69 @@
#[macro_export]
macro_rules! test_ad_cat {
() => {
#[test]
fn should_diff_cat() {
let data_1 = Data::<_, 2>::from([[2.0, -1.0], [5.0, 2.0]]);
let data_2 = Data::<_, 2>::from([[5.0, 4.0], [-1.0, 4.0]]);
#[burn_tensor_testgen::testgen(ad_cat)]
mod tests {
use super::*;
use burn_tensor::Data;
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_cat() {
let data_1 = Data::<_, 2>::from([[2.0, -1.0], [5.0, 2.0]]);
let data_2 = Data::<_, 2>::from([[5.0, 4.0], [-1.0, 4.0]]);
let tensor_3 = tensor_1.matmul(&tensor_2);
let grads = tensor_3.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2);
let grads = tensor_3.backward();
let mut tensor_1_list = Vec::new();
let mut tensor_2_list = Vec::new();
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
for i in 0..2 {
tensor_1_list.push(TestADTensor::from_data(
tensor_1.index([i..i + 1]).to_data(),
));
tensor_2_list.push(TestADTensor::from_data(
tensor_2.index([i..i + 1]).to_data(),
));
}
let mut tensor_1_list = Vec::new();
let mut tensor_2_list = Vec::new();
let tensor_1_cat = TestADTensor::cat(tensor_1_list.clone(), 0);
let tensor_2_cat = TestADTensor::cat(tensor_2_list.clone(), 0);
let tensor_3_cat = tensor_1_cat.matmul(&tensor_2_cat);
let grads_cat = tensor_3_cat.backward();
let grad_1_cat = tensor_1_cat.grad(&grads_cat).unwrap();
let grad_2_cat = tensor_2_cat.grad(&grads_cat).unwrap();
let grad_1_list_1 = tensor_1_list.get(0).unwrap().grad(&grads_cat).unwrap();
let grad_1_list_2 = tensor_1_list.get(1).unwrap().grad(&grads_cat).unwrap();
let grad_2_list_1 = tensor_2_list.get(0).unwrap().grad(&grads_cat).unwrap();
let grad_2_list_2 = tensor_2_list.get(1).unwrap().grad(&grads_cat).unwrap();
grad_1.to_data().assert_approx_eq(&grad_1_cat.to_data(), 3);
grad_2.to_data().assert_approx_eq(&grad_2_cat.to_data(), 3);
grad_1
.index([0..1])
.to_data()
.assert_approx_eq(&grad_1_list_1.to_data(), 3);
grad_1
.index([1..2])
.to_data()
.assert_approx_eq(&grad_1_list_2.to_data(), 3);
grad_2
.index([0..1])
.to_data()
.assert_approx_eq(&grad_2_list_1.to_data(), 3);
grad_2
.index([1..2])
.to_data()
.assert_approx_eq(&grad_2_list_2.to_data(), 3);
for i in 0..2 {
tensor_1_list.push(TestADTensor::from_data(
tensor_1.index([i..i + 1]).to_data(),
));
tensor_2_list.push(TestADTensor::from_data(
tensor_2.index([i..i + 1]).to_data(),
));
}
};
let tensor_1_cat = TestADTensor::cat(tensor_1_list.clone(), 0);
let tensor_2_cat = TestADTensor::cat(tensor_2_list.clone(), 0);
let tensor_3_cat = tensor_1_cat.matmul(&tensor_2_cat);
let grads_cat = tensor_3_cat.backward();
let grad_1_cat = tensor_1_cat.grad(&grads_cat).unwrap();
let grad_2_cat = tensor_2_cat.grad(&grads_cat).unwrap();
let grad_1_list_1 = tensor_1_list.get(0).unwrap().grad(&grads_cat).unwrap();
let grad_1_list_2 = tensor_1_list.get(1).unwrap().grad(&grads_cat).unwrap();
let grad_2_list_1 = tensor_2_list.get(0).unwrap().grad(&grads_cat).unwrap();
let grad_2_list_2 = tensor_2_list.get(1).unwrap().grad(&grads_cat).unwrap();
grad_1.to_data().assert_approx_eq(&grad_1_cat.to_data(), 3);
grad_2.to_data().assert_approx_eq(&grad_2_cat.to_data(), 3);
grad_1
.index([0..1])
.to_data()
.assert_approx_eq(&grad_1_list_1.to_data(), 3);
grad_1
.index([1..2])
.to_data()
.assert_approx_eq(&grad_1_list_2.to_data(), 3);
grad_2
.index([0..1])
.to_data()
.assert_approx_eq(&grad_2_list_1.to_data(), 3);
grad_2
.index([1..2])
.to_data()
.assert_approx_eq(&grad_2_list_2.to_data(), 3);
}
}

View File

@ -1,29 +1,30 @@
#[macro_export]
macro_rules! test_ad_cross_entropy_loss {
() => {
#[test]
fn test_cross_entropy_loss_grad() {
let data_1 = Data::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::from([[6.0, 7.0], [9.0, 10.0]]);
let data_targets = Data::from([[0.8, 0.2], [0.9, 0.1]]);
#[burn_tensor_testgen::testgen(ad_cross_entropy_loss)]
mod tests {
use super::*;
use burn_tensor::{loss, Data, Tensor};
let tensor_1 = Tensor::<TestADBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestADBackend, 2>::from_data(data_2);
let tensor_targets = Tensor::<TestADBackend, 2>::from_data(data_targets);
#[test]
fn test_cross_entropy_loss_grad() {
let data_1 = Data::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::from([[6.0, 7.0], [9.0, 10.0]]);
let data_targets = Data::from([[0.8, 0.2], [0.9, 0.1]]);
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = loss::cross_entropy_with_logits(&tensor_3, &tensor_targets);
let tensor_1 = Tensor::<TestADBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestADBackend, 2>::from_data(data_2);
let tensor_targets = Tensor::<TestADBackend, 2>::from_data(data_targets);
let grads = tensor_4.backward();
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = loss::cross_entropy_with_logits(&tensor_3, &tensor_targets);
grad_1
.to_data()
.assert_approx_eq(&Data::from([[0.2655, 0.2655], [0.4496, 0.4496]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[-1.3486, 1.3486], [-2.0637, 2.0637]]), 3);
}
};
let grads = tensor_4.backward();
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[0.2655, 0.2655], [0.4496, 0.4496]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[-1.3486, 1.3486], [-2.0637, 2.0637]]), 3);
}
}

View File

@ -1,88 +1,89 @@
#[macro_export]
macro_rules! test_ad_div {
() => {
#[test]
fn should_diff_div() {
let data_1 = Data::from([1.0, 7.0]);
let data_2 = Data::from([4.0, 7.0]);
#[burn_tensor_testgen::testgen(ad_div)]
mod tests {
use super::*;
use burn_tensor::Data;
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_div() {
let data_1 = Data::from([1.0, 7.0]);
let data_2 = Data::from([4.0, 7.0]);
let tensor_3 = tensor_1.div(&tensor_2);
let grads = tensor_3.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.div(&tensor_2);
let grads = tensor_3.backward();
grad_1
.to_data()
.assert_approx_eq(&Data::from([0.25, 0.1429]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([-0.0625, -0.1429]), 3);
}
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
#[test]
fn should_diff_div_scalar() {
let data = Data::from([1.0, 7.0]);
grad_1
.to_data()
.assert_approx_eq(&Data::from([0.25, 0.1429]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([-0.0625, -0.1429]), 3);
}
let tensor = TestADTensor::from_data(data);
let tensor_out = tensor.div_scalar(4.0);
#[test]
fn should_diff_div_scalar() {
let data = Data::from([1.0, 7.0]);
let grads = tensor_out.backward();
let grad = tensor.grad(&grads).unwrap();
let tensor = TestADTensor::from_data(data);
let tensor_out = tensor.div_scalar(4.0);
assert_eq!(grad.to_data(), Data::from([0.25, 0.25]));
}
let grads = tensor_out.backward();
let grad = tensor.grad(&grads).unwrap();
#[test]
fn test_div_complex_1() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
assert_eq!(grad.to_data(), Data::from([0.25, 0.25]));
}
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let tensor_3 = TestADTensor::from_data(data_3);
#[test]
fn test_div_complex_1() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
let tensor_4 = tensor_1.div(&tensor_2);
let tensor_5 = tensor_4.div(&tensor_3);
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let tensor_3 = TestADTensor::from_data(data_3);
let grads = tensor_5.backward();
let tensor_4 = tensor_1.div(&tensor_2);
let tensor_5 = tensor_4.div(&tensor_3);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let grads = tensor_5.backward();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[0.1250, 0.0714], [0.25, 0.1667]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[-0.0312, -0.0714], [-1.6250, 0.1667]]), 3);
}
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
#[test]
fn test_div_complex_2() {
let data_1 = Data::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::from([[6.0, 7.0], [9.0, 10.0]]);
grad_1
.to_data()
.assert_approx_eq(&Data::from([[0.1250, 0.0714], [0.25, 0.1667]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[-0.0312, -0.0714], [-1.6250, 0.1667]]), 3);
}
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn test_div_complex_2() {
let data_1 = Data::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_3.div(&tensor_2);
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grads = tensor_4.sum().backward();
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_3.div(&tensor_2);
grad_1
.to_data()
.assert_approx_eq(&Data::from([[2.00, 2.9286], [1.3667, 2.0]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[0.0833, 0.0959], [-0.0556, -0.0671]]), 3);
}
};
let grads = tensor_4.sum().backward();
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[2.00, 2.9286], [1.3667, 2.0]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[0.0833, 0.0959], [-0.0556, -0.0671]]), 3);
}
}

View File

@ -1,27 +1,28 @@
#[macro_export]
macro_rules! test_ad_erf {
() => {
#[test]
fn should_diff_erf() {
let data_1 = Data::<f32, 2>::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::<f32, 2>::from([[6.0, 7.0], [9.0, 10.0]]);
#[burn_tensor_testgen::testgen(ad_erf)]
mod tests {
use super::*;
use burn_tensor::Data;
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_erf() {
let data_1 = Data::<f32, 2>::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::<f32, 2>::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_3 = tensor_1.matmul(&tensor_2.erf());
let tensor_4 = tensor_3.matmul(&tensor_2);
let grads = tensor_4.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2.erf());
let tensor_4 = tensor_3.matmul(&tensor_2);
let grads = tensor_4.backward();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[32.0, 32.0], [32.0, 32.0]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[8.0, 8.0], [8.0, 8.0]]), 3);
}
};
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[32.0, 32.0], [32.0, 32.0]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[8.0, 8.0], [8.0, 8.0]]), 3);
}
}

View File

@ -1,27 +1,28 @@
#[macro_export]
macro_rules! test_ad_exp {
() => {
#[test]
fn should_diff_exp() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);
#[burn_tensor_testgen::testgen(ad_exp)]
mod tests {
use super::*;
use burn_tensor::Data;
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_exp() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);
let tensor_3 = tensor_1.matmul(&tensor_2.exp());
let grads = tensor_3.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2.exp());
let grads = tensor_3.backward();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[54.5991, 27.4746], [54.5991, 27.4746]]), 3);
grad_2.to_data().assert_approx_eq(
&Data::from([[-5.4598e+01, -9.1188e-04], [2.9556e+01, 8.0342e+01]]),
3,
);
}
};
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[54.5991, 27.4746], [54.5991, 27.4746]]), 3);
grad_2.to_data().assert_approx_eq(
&Data::from([[-5.4598e+01, -9.1188e-04], [2.9556e+01, 8.0342e+01]]),
3,
);
}
}

View File

@ -1,76 +1,77 @@
#[macro_export]
macro_rules! test_ad_index {
() => {
#[test]
fn should_diff_matmul_with_index() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0, 100.0], [2.0, 3.0, 15.0]]);
#[burn_tensor_testgen::testgen(ad_index)]
mod tests {
use super::*;
use burn_tensor::Data;
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_matmul_with_index() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0, 100.0], [2.0, 3.0, 15.0]]);
let tensor_3 = tensor_2.index([0..2, 0..2]);
let tensor_4 = &tensor_1.matmul(&tensor_3);
let grads = tensor_4.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_2.index([0..2, 0..2]);
let tensor_4 = &tensor_1.matmul(&tensor_3);
let grads = tensor_4.backward();
assert_eq!(grad_1.to_data(), Data::from([[11.0, 5.0], [11.0, 5.0]]));
assert_eq!(
grad_2.to_data(),
Data::from([[3.0, 3.0, 0.0], [10.0, 10.0, 0.0]])
);
}
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
#[test]
fn should_diff_matmul_with_index_assign() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_assigned: Data<f32, 2> = Data::from([[9.0]]);
assert_eq!(grad_1.to_data(), Data::from([[11.0, 5.0], [11.0, 5.0]]));
assert_eq!(
grad_2.to_data(),
Data::from([[3.0, 3.0, 0.0], [10.0, 10.0, 0.0]])
);
}
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let tensor_assigned = TestADTensor::from_data(data_assigned);
#[test]
fn should_diff_matmul_with_index_assign() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_assigned: Data<f32, 2> = Data::from([[9.0]]);
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_3.index_assign([0..1, 0..1], &tensor_assigned);
let tensor_5 = &tensor_4.matmul(&tensor_1);
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let tensor_assigned = TestADTensor::from_data(data_assigned);
let grads = tensor_5.backward();
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_3.index_assign([0..1, 0..1], &tensor_assigned);
let tensor_5 = &tensor_4.matmul(&tensor_1);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let grads = tensor_5.backward();
assert_eq!(grad_1.to_data(), Data::from([[58.0, 38.0], [118.0, 82.0]]));
assert_eq!(grad_2.to_data(), Data::from([[16.0, 15.0], [24.0, 50.0]]));
}
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
#[test]
fn should_diff_matmul_with_index_assign_complex() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[9.0]]);
assert_eq!(grad_1.to_data(), Data::from([[58.0, 38.0], [118.0, 82.0]]));
assert_eq!(grad_2.to_data(), Data::from([[16.0, 15.0], [24.0, 50.0]]));
}
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let tensor_3 = TestADTensor::from_data(data_3);
#[test]
fn should_diff_matmul_with_index_assign_complex() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[9.0]]);
let tensor_4 = tensor_1.matmul(&tensor_2);
let tensor_5 = tensor_2.index([0..1, 0..1]);
let tensor_6 = tensor_5.mul(&tensor_3);
let tensor_7 = tensor_4.index_assign([0..1, 0..1], &tensor_6);
let tensor_8 = &tensor_7.matmul(&tensor_1);
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let tensor_3 = TestADTensor::from_data(data_3);
let grads = tensor_8.backward();
let tensor_4 = tensor_1.matmul(&tensor_2);
let tensor_5 = tensor_2.index([0..1, 0..1]);
let tensor_6 = tensor_5.mul(&tensor_3);
let tensor_7 = tensor_4.index_assign([0..1, 0..1], &tensor_6);
let tensor_8 = &tensor_7.matmul(&tensor_1);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let grad_3 = tensor_3.grad(&grads).unwrap();
let grads = tensor_8.backward();
assert_eq!(grad_3.to_data(), Data::from([[32.0]]));
assert_eq!(grad_1.to_data(), Data::from([[85.0, 65.0], [118.0, 82.0]]));
assert_eq!(grad_2.to_data(), Data::from([[88.0, 15.0], [24.0, 50.0]]));
}
};
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let grad_3 = tensor_3.grad(&grads).unwrap();
assert_eq!(grad_3.to_data(), Data::from([[32.0]]));
assert_eq!(grad_1.to_data(), Data::from([[85.0, 65.0], [118.0, 82.0]]));
assert_eq!(grad_2.to_data(), Data::from([[88.0, 15.0], [24.0, 50.0]]));
}
}

View File

@ -1,27 +1,28 @@
#[macro_export]
macro_rules! test_ad_log {
() => {
#[test]
fn should_diff_log() {
let data_1 = Data::<f32, 2>::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::<f32, 2>::from([[6.0, 7.0], [9.0, 10.0]]);
#[burn_tensor_testgen::testgen(ad_log)]
mod tests {
use super::*;
use burn_tensor::Data;
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_log() {
let data_1 = Data::<f32, 2>::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::<f32, 2>::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_3 = tensor_1.matmul(&tensor_2.log());
let tensor_4 = tensor_3.matmul(&tensor_2);
let grads = tensor_4.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2.log());
let tensor_4 = tensor_3.matmul(&tensor_2);
let grads = tensor_4.backward();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[60.2652, 72.3130], [60.2652, 72.3130]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[22.8614, 24.5043], [24.5729, 26.8507]]), 3);
}
};
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[60.2652, 72.3130], [60.2652, 72.3130]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[22.8614, 24.5043], [24.5729, 26.8507]]), 3);
}
}

View File

@ -1,25 +1,26 @@
#[macro_export]
macro_rules! test_ad_mask {
() => {
#[test]
fn should_diff_mask() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, 7.0], [2.0, 3.0]]);
let mask = Data::<bool, 2>::from([[true, false], [false, true]]);
#[burn_tensor_testgen::testgen(ad_mask)]
mod tests {
use super::*;
use burn_tensor::{BoolTensor, Data};
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let mask = BoolTensor::from_data(mask);
#[test]
fn should_diff_mask() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, 7.0], [2.0, 3.0]]);
let mask = Data::<bool, 2>::from([[true, false], [false, true]]);
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_3.mask_fill(&mask, 2.0);
let grads = tensor_4.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let mask = BoolTensor::from_data(mask);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_3.mask_fill(&mask, 2.0);
let grads = tensor_4.backward();
assert_eq!(grad_1.to_data(), Data::from([[7.0, 3.0], [4.0, 2.0]]));
assert_eq!(grad_2.to_data(), Data::from([[2.0, 1.0], [3.0, 7.0]]));
}
};
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
assert_eq!(grad_1.to_data(), Data::from([[7.0, 3.0], [4.0, 2.0]]));
assert_eq!(grad_2.to_data(), Data::from([[2.0, 1.0], [3.0, 7.0]]));
}
}

View File

@ -1,77 +1,78 @@
#[macro_export]
macro_rules! test_ad_matmul {
() => {
#[test]
fn should_diff_matmul() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
#[burn_tensor_testgen::testgen(ad_matmul)]
mod tests {
use super::*;
use burn_tensor::Data;
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_matmul() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let tensor_3 = &tensor_1.matmul(&tensor_2);
let grads = tensor_3.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = &tensor_1.matmul(&tensor_2);
let grads = tensor_3.backward();
assert_eq!(grad_1.to_data(), Data::from([[11.0, 5.0], [11.0, 5.0]]));
assert_eq!(grad_2.to_data(), Data::from([[3.0, 3.0], [10.0, 10.0]]));
assert_eq!(
tensor_3.clone().into_data(),
Data::from([[18.0, 28.0], [14.0, 23.0]])
);
}
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
#[test]
fn test_matmul_complex_1() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
assert_eq!(grad_1.to_data(), Data::from([[11.0, 5.0], [11.0, 5.0]]));
assert_eq!(grad_2.to_data(), Data::from([[3.0, 3.0], [10.0, 10.0]]));
assert_eq!(
tensor_3.clone().into_data(),
Data::from([[18.0, 28.0], [14.0, 23.0]])
);
}
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let tensor_3 = TestADTensor::from_data(data_3);
#[test]
fn test_matmul_complex_1() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
let tensor_4 = tensor_1.matmul(&tensor_2);
let tensor_5 = tensor_4.matmul(&tensor_3);
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let tensor_3 = TestADTensor::from_data(data_3);
let grads = tensor_5.backward();
let tensor_4 = tensor_1.matmul(&tensor_2);
let tensor_5 = tensor_4.matmul(&tensor_3);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let grads = tensor_5.backward();
assert_eq!(grad_1.to_data(), Data::from([[44.0, 20.0], [44.0, 20.0]]));
assert_eq!(grad_2.to_data(), Data::from([[56.0, 56.0], [16.0, 16.0]]));
}
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
#[test]
fn test_matmul_complex_2() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
assert_eq!(grad_1.to_data(), Data::from([[44.0, 20.0], [44.0, 20.0]]));
assert_eq!(grad_2.to_data(), Data::from([[56.0, 56.0], [16.0, 16.0]]));
}
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let tensor_3 = TestADTensor::from_data(data_3);
#[test]
fn test_matmul_complex_2() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
let tensor_4 = tensor_1.matmul(&tensor_2);
let tensor_5 = tensor_4.matmul(&tensor_3);
let tensor_6 = tensor_1.matmul(&tensor_5);
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let tensor_3 = TestADTensor::from_data(data_3);
let grads = tensor_6.backward();
let tensor_4 = tensor_1.matmul(&tensor_2);
let tensor_5 = tensor_4.matmul(&tensor_3);
let tensor_6 = tensor_1.matmul(&tensor_5);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let grads = tensor_6.backward();
assert_eq!(
grad_1.to_data(),
Data::from([[800.0, 792.0], [360.0, 592.0]])
);
assert_eq!(
grad_2.to_data(),
Data::from([[264., 264.0], [344.0, 344.0]])
);
}
};
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
assert_eq!(
grad_1.to_data(),
Data::from([[800.0, 792.0], [360.0, 592.0]])
);
assert_eq!(
grad_2.to_data(),
Data::from([[264., 264.0], [344.0, 344.0]])
);
}
}

View File

@ -1,63 +1,64 @@
#[macro_export]
macro_rules! test_ad_mul {
() => {
#[test]
fn should_diff_mul() {
let data_1 = Data::from([1.0, 7.0]);
let data_2 = Data::from([4.0, 7.0]);
#[burn_tensor_testgen::testgen(ad_mul)]
mod tests {
use super::*;
use burn_tensor::Data;
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
#[test]
fn should_diff_mul() {
let data_1 = Data::from([1.0, 7.0]);
let data_2 = Data::from([4.0, 7.0]);
let tensor_3 = tensor_1.mul(&tensor_2);
let grads = tensor_3.backward();
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.mul(&tensor_2);
let grads = tensor_3.backward();
assert_eq!(grad_1.to_data(), data_2);
assert_eq!(grad_2.to_data(), data_1);
assert_eq!(tensor_3.into_data(), Data::from([4.0, 49.0]));
}
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
#[test]
fn should_diff_mul_scalar() {
let data = Data::from([2.0, 5.0]);
assert_eq!(grad_1.to_data(), data_2);
assert_eq!(grad_2.to_data(), data_1);
assert_eq!(tensor_3.into_data(), Data::from([4.0, 49.0]));
}
let tensor = TestADTensor::from_data(data);
let tensor_out = tensor.mul_scalar(4.0);
#[test]
fn should_diff_mul_scalar() {
let data = Data::from([2.0, 5.0]);
let grads = tensor_out.backward();
let grad = tensor.grad(&grads).unwrap();
let tensor = TestADTensor::from_data(data);
let tensor_out = tensor.mul_scalar(4.0);
assert_eq!(tensor_out.into_data(), Data::from([8.0, 20.0]));
assert_eq!(grad.to_data(), Data::from([4.0, 4.0]));
}
let grads = tensor_out.backward();
let grad = tensor.grad(&grads).unwrap();
#[test]
fn test_mul_complex_1() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
assert_eq!(tensor_out.into_data(), Data::from([8.0, 20.0]));
assert_eq!(grad.to_data(), Data::from([4.0, 4.0]));
}
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let tensor_3 = TestADTensor::from_data(data_3);
#[test]
fn test_mul_complex_1() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
let tensor_4 = tensor_1.mul(&tensor_2);
let tensor_5 = tensor_4.mul(&tensor_3);
let tensor_6 = tensor_1.mul(&tensor_5);
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let tensor_3 = TestADTensor::from_data(data_3);
let grads = tensor_6.backward();
let tensor_4 = tensor_1.mul(&tensor_2);
let tensor_5 = tensor_4.mul(&tensor_3);
let tensor_6 = tensor_1.mul(&tensor_5);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let grads = tensor_6.backward();
assert_eq!(
grad_1.to_data(),
Data::from([[16.0, 196.0], [104.0, -36.0]])
);
assert_eq!(grad_2.to_data(), Data::from([[2.0, 98.0], [338.0, 18.0]]));
}
};
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
assert_eq!(
grad_1.to_data(),
Data::from([[16.0, 196.0], [104.0, -36.0]])
);
assert_eq!(grad_2.to_data(), Data::from([[2.0, 98.0], [338.0, 18.0]]));
}
}

View File

@ -1,23 +1,24 @@
#[macro_export]
macro_rules! test_ad_neg {
() => {
#[test]
fn should_diff_neg() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, 7.0], [2.0, 3.0]]);
#[burn_tensor_testgen::testgen(ad_neg)]
mod tests {
use super::*;
use burn_tensor::Data;
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_neg() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, 7.0], [2.0, 3.0]]);
let tensor_3 = tensor_1.matmul(&tensor_2.neg());
let tensor_4 = tensor_3.neg();
let grads = tensor_4.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2.neg());
let tensor_4 = tensor_3.neg();
let grads = tensor_4.backward();
assert_eq!(grad_1.to_data(), Data::from([[11.0, 5.0], [11.0, 5.0]]));
assert_eq!(grad_2.to_data(), Data::from([[3.0, 3.0], [10.0, 10.0]]));
}
};
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
assert_eq!(grad_1.to_data(), Data::from([[11.0, 5.0], [11.0, 5.0]]));
assert_eq!(grad_2.to_data(), Data::from([[3.0, 3.0], [10.0, 10.0]]));
}
}

View File

@ -1,27 +1,28 @@
#[macro_export]
macro_rules! test_ad_powf {
() => {
#[test]
fn should_diff_powf() {
let data_1 = Data::<f32, 2>::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::<f32, 2>::from([[6.0, 7.0], [9.0, 10.0]]);
#[burn_tensor_testgen::testgen(ad_powf)]
mod tests {
use super::*;
use burn_tensor::Data;
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_powf() {
let data_1 = Data::<f32, 2>::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::<f32, 2>::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_3 = tensor_1.matmul(&tensor_2.powf(0.4));
let tensor_4 = tensor_3.matmul(&tensor_2);
let grads = tensor_4.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2.powf(0.4));
let tensor_4 = tensor_3.matmul(&tensor_2);
let grads = tensor_4.backward();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[68.0, 79.0328], [68.0, 79.0328]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[23.5081, 25.2779], [26.0502, 28.6383]]), 3);
}
};
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[68.0, 79.0328], [68.0, 79.0328]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[23.5081, 25.2779], [26.0502, 28.6383]]), 3);
}
}

View File

@ -1,24 +1,25 @@
#[macro_export]
macro_rules! test_ad_relu {
() => {
#[test]
fn should_diff_relu() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);
#[burn_tensor_testgen::testgen(ad_relu)]
mod tests {
use super::*;
use burn_tensor::{activation, Data};
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_relu() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [-2.0, -3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, -7.0], [2.0, 3.0]]);
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = activation::relu(&tensor_3);
let tensor_5 = tensor_4.matmul(&tensor_2);
let grads = tensor_5.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = activation::relu(&tensor_3);
let tensor_5 = tensor_4.matmul(&tensor_2);
let grads = tensor_5.backward();
assert_eq!(grad_1.to_data(), Data::from([[-47.0, 9.0], [-35.0, 15.0]]));
assert_eq!(grad_2.to_data(), Data::from([[15.0, 13.0], [-2.0, 39.0]]));
}
};
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
assert_eq!(grad_1.to_data(), Data::from([[-47.0, 9.0], [-35.0, 15.0]]));
assert_eq!(grad_2.to_data(), Data::from([[15.0, 13.0], [-2.0, 39.0]]));
}
}

View File

@ -1,23 +1,24 @@
#[macro_export]
macro_rules! test_ad_reshape {
() => {
#[test]
fn should_diff_reshape() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f32, 1> = Data::from([4.0, 7.0, 2.0, 3.0]);
#[burn_tensor_testgen::testgen(ad_reshape)]
mod tests {
use super::*;
use burn_tensor::Data;
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_reshape() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f32, 1> = Data::from([4.0, 7.0, 2.0, 3.0]);
let tensor_3 = tensor_2.reshape([2, 2]);
let tensor_4 = &tensor_1.matmul(&tensor_3);
let grads = tensor_4.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_2.reshape([2, 2]);
let tensor_4 = &tensor_1.matmul(&tensor_3);
let grads = tensor_4.backward();
assert_eq!(grad_1.to_data(), Data::from([[11.0, 5.0], [11.0, 5.0]]));
assert_eq!(grad_2.to_data(), Data::from([3.0, 3.0, 10.0, 10.0]));
}
};
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
assert_eq!(grad_1.to_data(), Data::from([[11.0, 5.0], [11.0, 5.0]]));
assert_eq!(grad_2.to_data(), Data::from([3.0, 3.0, 10.0, 10.0]));
}
}

View File

@ -1,48 +1,49 @@
#[macro_export]
macro_rules! test_ad_softmax {
() => {
#[test]
fn test_softmax_grad() {
let data_1 = Data::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_1 = Tensor::<TestADBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestADBackend, 2>::from_data(data_2);
#[burn_tensor_testgen::testgen(ad_softmax)]
mod tests {
use super::*;
use burn_tensor::{activation, Data, Tensor};
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = activation::softmax(&tensor_3, 1).matmul(&tensor_2);
#[test]
fn test_softmax_grad() {
let data_1 = Data::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_1 = Tensor::<TestADBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestADBackend, 2>::from_data(data_2);
let grads = tensor_4.backward();
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = activation::softmax(&tensor_3, 1).matmul(&tensor_2);
grad_1
.to_data()
.assert_approx_eq(&Data::from([[1.1797, 1.1797], [0.0055, 0.0055]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[0.2534, 0.2862], [0.5286, 2.9317]]), 3);
}
let grads = tensor_4.backward();
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
#[test]
fn test_log_softmax_grad() {
let data_1 = Data::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_1 = Tensor::<TestADBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestADBackend, 2>::from_data(data_2);
grad_1
.to_data()
.assert_approx_eq(&Data::from([[1.1797, 1.1797], [0.0055, 0.0055]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[0.2534, 0.2862], [0.5286, 2.9317]]), 3);
}
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = activation::log_softmax(&tensor_3, 1).matmul(&tensor_2);
#[test]
fn test_log_softmax_grad() {
let data_1 = Data::from([[0.0, 1.0], [3.0, 4.0]]);
let data_2 = Data::from([[6.0, 7.0], [9.0, 10.0]]);
let tensor_1 = Tensor::<TestADBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestADBackend, 2>::from_data(data_2);
let grads = tensor_4.backward();
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = activation::log_softmax(&tensor_3, 1).matmul(&tensor_2);
grad_1
.to_data()
.assert_approx_eq(&Data::from([[-4.3939, -4.3939], [-12.9709, -12.9709]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[30.5984, -47.2267], [55.9631, -56.5914]]), 3);
}
};
let grads = tensor_4.backward();
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
grad_1
.to_data()
.assert_approx_eq(&Data::from([[-4.3939, -4.3939], [-12.9709, -12.9709]]), 3);
grad_2
.to_data()
.assert_approx_eq(&Data::from([[30.5984, -47.2267], [55.9631, -56.5914]]), 3);
}
}

View File

@ -1,59 +1,60 @@
#[macro_export]
macro_rules! test_ad_sub {
() => {
#[test]
fn should_diff_sub() {
let data_1 = Data::from([2.0, 5.0]);
let data_2 = Data::from([4.0, 1.0]);
#[burn_tensor_testgen::testgen(ad_sub)]
mod tests {
use super::*;
use burn_tensor::Data;
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_sub() {
let data_1 = Data::from([2.0, 5.0]);
let data_2 = Data::from([4.0, 1.0]);
let tensor_3 = tensor_1.sub(&tensor_2);
let grads = tensor_3.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.sub(&tensor_2);
let grads = tensor_3.backward();
assert_eq!(grad_1.to_data(), Data::from([1.0, 1.0]));
assert_eq!(grad_2.to_data(), Data::from([-1.0, -1.0]));
assert_eq!(tensor_3.into_data(), Data::from([-2.0, 4.0]));
}
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
#[test]
fn should_diff_sub_scalar() {
let data = Data::from([2.0, 10.0]);
let tensor = TestADTensor::from_data(data);
let tensor_out = tensor.sub_scalar(5.0);
let grads = tensor_out.backward();
assert_eq!(grad_1.to_data(), Data::from([1.0, 1.0]));
assert_eq!(grad_2.to_data(), Data::from([-1.0, -1.0]));
assert_eq!(tensor_3.into_data(), Data::from([-2.0, 4.0]));
}
let grad = tensor.grad(&grads).unwrap();
#[test]
fn should_diff_sub_scalar() {
let data = Data::from([2.0, 10.0]);
let tensor = TestADTensor::from_data(data);
let tensor_out = tensor.sub_scalar(5.0);
let grads = tensor_out.backward();
assert_eq!(grad.to_data(), Data::from([1.0, 1.0]));
assert_eq!(tensor_out.into_data(), Data::from([-3.0, 5.0]));
}
let grad = tensor.grad(&grads).unwrap();
#[test]
fn test_sub_complex_1() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
assert_eq!(grad.to_data(), Data::from([1.0, 1.0]));
assert_eq!(tensor_out.into_data(), Data::from([-3.0, 5.0]));
}
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let tensor_3 = TestADTensor::from_data(data_3);
#[test]
fn test_sub_complex_1() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f32, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
let tensor_4 = tensor_1.sub(&tensor_2);
let tensor_5 = tensor_4.sub(&tensor_3).sub_scalar(5.0);
let tensor_6 = tensor_1.sub(&tensor_5);
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let tensor_3 = TestADTensor::from_data(data_3);
let grads = tensor_6.backward();
let tensor_4 = tensor_1.sub(&tensor_2);
let tensor_5 = tensor_4.sub(&tensor_3).sub_scalar(5.0);
let tensor_6 = tensor_1.sub(&tensor_5);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let grads = tensor_6.backward();
assert_eq!(grad_1.to_data(), Data::from([[0.0, 0.0], [0.0, 0.0]]));
assert_eq!(grad_2.to_data(), Data::from([[1.0, 1.0], [1.0, 1.0]]));
}
};
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
assert_eq!(grad_1.to_data(), Data::from([[0.0, 0.0], [0.0, 0.0]]));
assert_eq!(grad_2.to_data(), Data::from([[1.0, 1.0], [1.0, 1.0]]));
}
}

View File

@ -1,50 +1,49 @@
#[macro_export]
macro_rules! test_ad_transpose {
() => {
#[test]
fn should_diff_transpose() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, 7.0], [2.0, 3.0]]);
#[burn_tensor_testgen::testgen(ad_transpose)]
mod tests {
use super::*;
use burn_tensor::Data;
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_transpose() {
let data_1 = Data::<f32, 2>::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2 = Data::<f32, 2>::from([[4.0, 7.0], [2.0, 3.0]]);
let tensor_3 = tensor_1.matmul(&tensor_2.transpose());
let tensor_4 = tensor_3.transpose();
let grads = tensor_4.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2.transpose());
let tensor_4 = tensor_3.transpose();
let grads = tensor_4.backward();
assert_eq!(grad_1.to_data(), Data::from([[6.0, 10.0], [6.0, 10.0]]));
assert_eq!(grad_2.to_data(), Data::from([[3.0, 10.0], [3.0, 10.0]]));
}
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
#[test]
fn should_diff_swap_dims() {
let data_1 =
Data::<f32, 3>::from([[[0.0, 1.0], [3.0, 4.0]], [[6.0, 7.0], [9.0, 10.0]]]);
let data_2 =
Data::<f32, 3>::from([[[1.0, 4.0], [2.0, 5.0]], [[7.0, 10.0], [8.0, 11.0]]]);
assert_eq!(grad_1.to_data(), Data::from([[6.0, 10.0], [6.0, 10.0]]));
assert_eq!(grad_2.to_data(), Data::from([[3.0, 10.0], [3.0, 10.0]]));
}
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
#[test]
fn should_diff_swap_dims() {
let data_1 = Data::<f32, 3>::from([[[0.0, 1.0], [3.0, 4.0]], [[6.0, 7.0], [9.0, 10.0]]]);
let data_2 = Data::<f32, 3>::from([[[1.0, 4.0], [2.0, 5.0]], [[7.0, 10.0], [8.0, 11.0]]]);
let tensor_3 = tensor_1.matmul(&tensor_2.swap_dims(0, 2));
let tensor_4 = tensor_3.matmul(&tensor_2.swap_dims(1, 2));
let grads = tensor_4.backward();
let tensor_1 = TestADTensor::from_data(data_1);
let tensor_2 = TestADTensor::from_data(data_2);
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
let tensor_3 = tensor_1.matmul(&tensor_2.swap_dims(0, 2));
let tensor_4 = tensor_3.matmul(&tensor_2.swap_dims(1, 2));
let grads = tensor_4.backward();
assert_eq!(
grad_1.to_data(),
Data::from([[[66., 78.], [66., 78.]], [[270., 306.], [270., 306.]]])
);
assert_eq!(
grad_2.to_data(),
Data::from([[[22., 286.], [28., 316.]], [[172., 652.], [190., 694.]]])
);
}
};
let grad_1 = tensor_1.grad(&grads).unwrap();
let grad_2 = tensor_2.grad(&grads).unwrap();
assert_eq!(
grad_1.to_data(),
Data::from([[[66., 78.], [66., 78.]], [[270., 306.], [270., 306.]]])
);
assert_eq!(
grad_2.to_data(),
Data::from([[[22., 286.], [28., 316.]], [[172., 652.], [190., 694.]]])
);
}
}

View File

@ -4,67 +4,62 @@ mod module;
mod ops;
mod stats;
#[cfg(test)]
type TestBackend = crate::backend::NdArrayBackend<f32>;
#[macro_export]
macro_rules! test_all {
macro_rules! testgen_all {
() => {
use burn_tensor::activation::*;
use burn_tensor::backend::Backend;
use burn_tensor::module::*;
use burn_tensor::*;
type TestADTensor<const D: usize> = burn_tensor::Tensor<TestADBackend, D>;
type TestADBackend = burn_tensor::backend::ADBackendDecorator<TestBackend>;
// test activation
burn_tensor::test_gelu!();
burn_tensor::test_relu!();
burn_tensor::test_softmax!();
burn_tensor::testgen_gelu!();
burn_tensor::testgen_relu!();
burn_tensor::testgen_softmax!();
// test ad
burn_tensor::test_ad_add!();
burn_tensor::test_ad_aggregation!();
burn_tensor::test_ad_cat!();
burn_tensor::test_ad_cross_entropy_loss!();
burn_tensor::test_ad_div!();
burn_tensor::test_ad_erf!();
burn_tensor::test_ad_exp!();
burn_tensor::test_ad_index!();
burn_tensor::test_ad_log!();
burn_tensor::test_ad_mask!();
burn_tensor::test_ad_matmul!();
burn_tensor::test_ad_mul!();
burn_tensor::test_ad_neg!();
burn_tensor::test_ad_powf!();
burn_tensor::test_ad_relu!();
burn_tensor::test_ad_reshape!();
burn_tensor::test_ad_softmax!();
burn_tensor::test_ad_sub!();
burn_tensor::test_ad_transpose!();
burn_tensor::testgen_ad_add!();
burn_tensor::testgen_ad_aggregation!();
burn_tensor::testgen_ad_cat!();
burn_tensor::testgen_ad_cross_entropy_loss!();
burn_tensor::testgen_ad_div!();
burn_tensor::testgen_ad_erf!();
burn_tensor::testgen_ad_exp!();
burn_tensor::testgen_ad_index!();
burn_tensor::testgen_ad_log!();
burn_tensor::testgen_ad_mask!();
burn_tensor::testgen_ad_matmul!();
burn_tensor::testgen_ad_mul!();
burn_tensor::testgen_ad_neg!();
burn_tensor::testgen_ad_powf!();
burn_tensor::testgen_ad_relu!();
burn_tensor::testgen_ad_reshape!();
burn_tensor::testgen_ad_softmax!();
burn_tensor::testgen_ad_sub!();
burn_tensor::testgen_ad_transpose!();
// test module
burn_tensor::test_module_backward!();
burn_tensor::test_module_forward!();
burn_tensor::testgen_module_backward!();
burn_tensor::testgen_module_forward!();
// test ops
burn_tensor::test_add!();
burn_tensor::test_aggregation!();
burn_tensor::test_arg!();
burn_tensor::test_div!();
burn_tensor::test_erf!();
burn_tensor::test_exp!();
burn_tensor::test_index!();
burn_tensor::test_map_comparison!();
burn_tensor::test_mask!();
burn_tensor::test_matmul!();
burn_tensor::test_mul!();
burn_tensor::test_neg!();
burn_tensor::test_powf!();
burn_tensor::test_repeat!();
burn_tensor::test_reshape!();
burn_tensor::test_sub!();
burn_tensor::test_transpose!();
burn_tensor::testgen_add!();
burn_tensor::testgen_aggregation!();
burn_tensor::testgen_arg!();
burn_tensor::testgen_div!();
burn_tensor::testgen_erf!();
burn_tensor::testgen_exp!();
burn_tensor::testgen_index!();
burn_tensor::testgen_map_comparison!();
burn_tensor::testgen_mask!();
burn_tensor::testgen_matmul!();
burn_tensor::testgen_mul!();
burn_tensor::testgen_neg!();
burn_tensor::testgen_powf!();
burn_tensor::testgen_repeat!();
burn_tensor::testgen_reshape!();
burn_tensor::testgen_sub!();
burn_tensor::testgen_transpose!();
// test stats
burn_tensor::test_stats!();
burn_tensor::testgen_stats!();
};
}

View File

@ -1,27 +1,27 @@
#[macro_export]
macro_rules! test_module_backward {
() => {
#[test]
fn test_embedding_backward() {
let weights = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indexes = Data::from([[0, 1], [1, 1]]);
let x = Data::from([
[[1.0, 2.0], [4.0, 5.0], [3.0, 4.0]],
[[4.0, 5.0], [8.0, 5.0], [1.0, 9.0]],
]);
let weights = Tensor::<TestADBackend, 2>::from_data(weights);
let indexes =
Tensor::<<TestADBackend as Backend>::IntegerBackend, 2>::from_data(indexes);
let x = Tensor::<TestADBackend, 3>::from_data(x);
#[burn_tensor_testgen::testgen(module_backward)]
mod tests {
use super::*;
use burn_tensor::{backend::Backend, module::embedding, Data, Tensor};
let output = embedding(&weights, &indexes);
let output = output.matmul(&x);
let grads = output.backward();
#[test]
fn test_embedding_backward() {
let weights = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indexes = Data::from([[0, 1], [1, 1]]);
let x = Data::from([
[[1.0, 2.0], [4.0, 5.0], [3.0, 4.0]],
[[4.0, 5.0], [8.0, 5.0], [1.0, 9.0]],
]);
let weights = Tensor::<TestADBackend, 2>::from_data(weights);
let indexes = Tensor::<<TestADBackend as Backend>::IntegerBackend, 2>::from_data(indexes);
let x = Tensor::<TestADBackend, 3>::from_data(x);
let grad = weights.grad(&grads).unwrap();
let expected =
Data::<<TestADBackend as Backend>::Elem, 2>::from([[3., 9., 7.], [21., 35., 27.]]);
assert_eq!(grad.to_data(), expected);
}
};
let output = embedding(&weights, &indexes);
let output = output.matmul(&x);
let grads = output.backward();
let grad = weights.grad(&grads).unwrap();
let expected =
Data::<<TestADBackend as Backend>::Elem, 2>::from([[3., 9., 7.], [21., 35., 27.]]);
assert_eq!(grad.to_data(), expected);
}
}

View File

@ -1,19 +1,20 @@
#[macro_export]
macro_rules! test_module_forward {
() => {
#[test]
fn test_embedding_forward() {
let weights = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indexes = Data::from([[0, 1], [1, 1]]);
let weights = Tensor::<TestBackend, 2>::from_data(weights);
let indexes = Tensor::<<TestBackend as Backend>::IntegerBackend, 2>::from_data(indexes);
#[burn_tensor_testgen::testgen(module_forward)]
mod tests {
use super::*;
use burn_tensor::{backend::Backend, module::embedding, Data, Tensor};
let output = embedding(&weights, &indexes);
let expected = Data::from([
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
]);
assert_eq!(output.to_data(), expected);
}
};
#[test]
fn test_embedding_forward() {
let weights = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let indexes = Data::from([[0, 1], [1, 1]]);
let weights = Tensor::<TestBackend, 2>::from_data(weights);
let indexes = Tensor::<<TestBackend as Backend>::IntegerBackend, 2>::from_data(indexes);
let output = embedding(&weights, &indexes);
let expected = Data::from([
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[3.0, 4.0, 5.0], [3.0, 4.0, 5.0]],
]);
assert_eq!(output.to_data(), expected);
}
}

View File

@ -1,17 +1,18 @@
#[macro_export]
macro_rules! test_add {
() => {
#[test]
fn test_add_d2() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
#[burn_tensor_testgen::testgen(add)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let data_actual = (tensor_1 + tensor_2).into_data();
#[test]
fn test_add_d2() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
let data_expected = Data::from([[6.0, 8.0, 10.0], [12.0, 14.0, 16.0]]);
assert_eq!(data_expected, data_actual);
}
};
let data_actual = (tensor_1 + tensor_2).into_data();
let data_expected = Data::from([[6.0, 8.0, 10.0], [12.0, 14.0, 16.0]]);
assert_eq!(data_expected, data_actual);
}
}

View File

@ -1,44 +1,45 @@
#[macro_export]
macro_rules! test_aggregation {
() => {
#[test]
fn test_should_mean() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
#[burn_tensor_testgen::testgen(aggregation)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let data_actual = tensor.mean().to_data();
#[test]
fn test_should_mean() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
assert_eq!(data_actual, Data::from([15.0 / 6.0]));
}
let data_actual = tensor.mean().to_data();
#[test]
fn test_should_sum() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
assert_eq!(data_actual, Data::from([15.0 / 6.0]));
}
let data_actual = tensor.sum().to_data();
#[test]
fn test_should_sum() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
assert_eq!(data_actual, Data::from([15.0]));
}
let data_actual = tensor.sum().to_data();
#[test]
fn test_should_mean_dim() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
assert_eq!(data_actual, Data::from([15.0]));
}
let data_actual = tensor.mean_dim(1).to_data();
#[test]
fn test_should_mean_dim() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
assert_eq!(data_actual, Data::from([[3.0 / 3.0], [12.0 / 3.0]]));
}
let data_actual = tensor.mean_dim(1).to_data();
#[test]
fn test_should_sum_dim() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
assert_eq!(data_actual, Data::from([[3.0 / 3.0], [12.0 / 3.0]]));
}
let data_actual = tensor.sum_dim(1).to_data();
#[test]
fn test_should_sum_dim() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
assert_eq!(data_actual, Data::from([[3.0], [12.0]]));
}
};
let data_actual = tensor.sum_dim(1).to_data();
assert_eq!(data_actual, Data::from([[3.0], [12.0]]));
}
}

View File

@ -1,15 +1,16 @@
#[macro_export]
macro_rules! test_arg {
() => {
#[test]
fn test_argmax_2d() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
#[burn_tensor_testgen::testgen(arg)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let data_actual = tensor.argmax(1);
#[test]
fn test_argmax_2d() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let data_expected = Data::from([[2], [2]]);
assert_eq!(data_expected, data_actual.to_data());
}
};
let data_actual = tensor.argmax(1);
let data_expected = Data::from([[2], [2]]);
assert_eq!(data_expected, data_actual.to_data());
}
}

View File

@ -1,31 +1,32 @@
#[macro_export]
macro_rules! test_div {
() => {
#[test]
fn should_support_div_ops() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[1.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
#[burn_tensor_testgen::testgen(div)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let output = tensor_1 / tensor_2;
#[test]
fn should_support_div_ops() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[1.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
let data_actual = output.into_data();
let data_expected = Data::from([[0.0, 1.0, 1.0], [1.0, 1.0, 1.0]]);
assert_eq!(data_expected, data_actual);
}
let output = tensor_1 / tensor_2;
#[test]
fn should_support_div_scalar_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let scalar = 2.0;
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let data_actual = output.into_data();
let data_expected = Data::from([[0.0, 1.0, 1.0], [1.0, 1.0, 1.0]]);
assert_eq!(data_expected, data_actual);
}
let output = tensor / scalar;
#[test]
fn should_support_div_scalar_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let scalar = 2.0;
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let data_actual = output.into_data();
let data_expected = Data::from([[0.0, 0.5, 1.0], [1.5, 2.0, 2.5]]);
assert_eq!(data_expected, data_actual);
}
};
let output = tensor / scalar;
let data_actual = output.into_data();
let data_expected = Data::from([[0.0, 0.5, 1.0], [1.5, 2.0, 2.5]]);
assert_eq!(data_expected, data_actual);
}
}

View File

@ -1,15 +1,16 @@
#[macro_export]
macro_rules! test_erf {
() => {
#[test]
fn should_support_erf_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
#[burn_tensor_testgen::testgen(erf)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let data_actual = tensor.erf().into_data();
#[test]
fn should_support_erf_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let data_expected = Data::from([[0.0000, 0.8427, 0.9953], [1.0000, 1.0000, 1.0000]]);
data_expected.assert_approx_eq(&data_actual, 3);
}
};
let data_actual = tensor.erf().into_data();
let data_expected = Data::from([[0.0000, 0.8427, 0.9953], [1.0000, 1.0000, 1.0000]]);
data_expected.assert_approx_eq(&data_actual, 3);
}
}

View File

@ -1,15 +1,16 @@
#[macro_export]
macro_rules! test_exp {
() => {
#[test]
fn should_support_exp_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
#[burn_tensor_testgen::testgen(exp)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let data_actual = tensor.exp().into_data();
#[test]
fn should_support_exp_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let data_expected = Data::from([[1.0, 2.71830, 7.3891], [20.0855, 54.5981, 148.4132]]);
data_expected.assert_approx_eq(&data_actual, 3);
}
};
let data_actual = tensor.exp().into_data();
let data_expected = Data::from([[1.0, 2.71830, 7.3891], [20.0855, 54.5981, 148.4132]]);
data_expected.assert_approx_eq(&data_actual, 3);
}
}

View File

@ -1,78 +1,79 @@
#[macro_export]
macro_rules! test_index {
() => {
#[test]
fn should_support_full_indexing_1d() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = Tensor::<TestBackend, 1>::from_data(data.clone());
#[burn_tensor_testgen::testgen(index)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let data_actual = tensor.index([0..3]).into_data();
#[test]
fn should_support_full_indexing_1d() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = Tensor::<TestBackend, 1>::from_data(data.clone());
assert_eq!(data, data_actual);
}
let data_actual = tensor.index([0..3]).into_data();
#[test]
fn should_support_partial_indexing_1d() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = Tensor::<TestBackend, 1>::from_data(data);
assert_eq!(data, data_actual);
}
let data_actual = tensor.index([1..3]).into_data();
#[test]
fn should_support_partial_indexing_1d() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = Tensor::<TestBackend, 1>::from_data(data);
let data_expected = Data::from([1.0, 2.0]);
assert_eq!(data_expected, data_actual);
}
let data_actual = tensor.index([1..3]).into_data();
#[test]
fn should_support_full_indexing_2d() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data.clone());
let data_expected = Data::from([1.0, 2.0]);
assert_eq!(data_expected, data_actual);
}
let data_actual_1 = tensor.index([0..2]).into_data();
let data_actual_2 = tensor.index([0..2, 0..3]).into_data();
#[test]
fn should_support_full_indexing_2d() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data.clone());
assert_eq!(data, data_actual_1);
assert_eq!(data, data_actual_2);
}
let data_actual_1 = tensor.index([0..2]).into_data();
let data_actual_2 = tensor.index([0..2, 0..3]).into_data();
#[test]
fn should_support_partial_indexing_2d() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
assert_eq!(data, data_actual_1);
assert_eq!(data, data_actual_2);
}
let data_actual = tensor.index([0..2, 0..2]).into_data();
#[test]
fn should_support_partial_indexing_2d() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let data_expected = Data::from([[0.0, 1.0], [3.0, 4.0]]);
assert_eq!(data_expected, data_actual);
}
let data_actual = tensor.index([0..2, 0..2]).into_data();
#[test]
fn should_support_indexe_assign_1d() {
let data = Data::from([0.0, 1.0, 2.0]);
let data_assigned = Data::from([10.0, 5.0]);
let data_expected = Data::from([[0.0, 1.0], [3.0, 4.0]]);
assert_eq!(data_expected, data_actual);
}
let tensor = Tensor::<TestBackend, 1>::from_data(data);
let tensor_assigned = Tensor::<TestBackend, 1>::from_data(data_assigned);
#[test]
fn should_support_indexe_assign_1d() {
let data = Data::from([0.0, 1.0, 2.0]);
let data_assigned = Data::from([10.0, 5.0]);
let data_actual = tensor.index_assign([0..2], &tensor_assigned).into_data();
let tensor = Tensor::<TestBackend, 1>::from_data(data);
let tensor_assigned = Tensor::<TestBackend, 1>::from_data(data_assigned);
let data_expected = Data::from([10.0, 5.0, 2.0]);
assert_eq!(data_expected, data_actual);
}
let data_actual = tensor.index_assign([0..2], &tensor_assigned).into_data();
#[test]
fn should_support_indexe_assign_2d() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_assigned = Data::from([[10.0, 5.0]]);
let data_expected = Data::from([10.0, 5.0, 2.0]);
assert_eq!(data_expected, data_actual);
}
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let tensor_assigned = Tensor::<TestBackend, 2>::from_data(data_assigned);
#[test]
fn should_support_indexe_assign_2d() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_assigned = Data::from([[10.0, 5.0]]);
let data_actual = tensor
.index_assign([1..2, 0..2], &tensor_assigned)
.into_data();
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let tensor_assigned = Tensor::<TestBackend, 2>::from_data(data_assigned);
let data_expected = Data::from([[0.0, 1.0, 2.0], [10.0, 5.0, 5.0]]);
assert_eq!(data_expected, data_actual);
}
};
let data_actual = tensor
.index_assign([1..2, 0..2], &tensor_assigned)
.into_data();
let data_expected = Data::from([[0.0, 1.0, 2.0], [10.0, 5.0, 5.0]]);
assert_eq!(data_expected, data_actual);
}
}

View File

@ -1,100 +1,101 @@
#[macro_export]
macro_rules! test_map_comparison {
() => {
#[test]
fn test_greater_scalar() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
#[burn_tensor_testgen::testgen(map_comparison)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let data_actual = tensor_1.greater_scalar(4.0);
#[test]
fn test_greater_scalar() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let data_expected = Data::from([[false, false, false], [false, false, true]]);
assert_eq!(data_expected, data_actual.to_data());
}
let data_actual = tensor_1.greater_scalar(4.0);
#[test]
fn test_greater_equal_scalar() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let data_expected = Data::from([[false, false, false], [false, false, true]]);
assert_eq!(data_expected, data_actual.to_data());
}
let data_actual = tensor_1.greater_equal_scalar(4.0);
#[test]
fn test_greater_equal_scalar() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let data_expected = Data::from([[false, false, false], [false, true, true]]);
assert_eq!(data_expected, data_actual.to_data());
}
let data_actual = tensor_1.greater_equal_scalar(4.0);
#[test]
fn test_greater() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[1.0, 1.0, 1.0], [4.0, 3.0, 50.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
let data_expected = Data::from([[false, false, false], [false, true, true]]);
assert_eq!(data_expected, data_actual.to_data());
}
let data_actual = tensor_1.greater(&tensor_2);
#[test]
fn test_greater() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[1.0, 1.0, 1.0], [4.0, 3.0, 50.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
let data_expected = Data::from([[false, false, true], [false, true, false]]);
assert_eq!(data_expected, data_actual.to_data());
}
let data_actual = tensor_1.greater(&tensor_2);
#[test]
fn test_greater_equal() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[1.0, 1.0, 1.0], [4.0, 3.0, 50.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
let data_expected = Data::from([[false, false, true], [false, true, false]]);
assert_eq!(data_expected, data_actual.to_data());
}
let data_actual = tensor_1.greater_equal(&tensor_2);
#[test]
fn test_greater_equal() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[1.0, 1.0, 1.0], [4.0, 3.0, 50.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
let data_expected = Data::from([[false, true, true], [false, true, false]]);
assert_eq!(data_expected, data_actual.to_data());
}
let data_actual = tensor_1.greater_equal(&tensor_2);
#[test]
fn test_lower_scalar() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let data_expected = Data::from([[false, true, true], [false, true, false]]);
assert_eq!(data_expected, data_actual.to_data());
}
let data_actual = tensor_1.lower_scalar(4.0);
#[test]
fn test_lower_scalar() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let data_expected = Data::from([[true, true, true], [true, false, false]]);
assert_eq!(data_expected, data_actual.to_data());
}
let data_actual = tensor_1.lower_scalar(4.0);
#[test]
fn test_lower_equal_scalar() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let data_expected = Data::from([[true, true, true], [true, false, false]]);
assert_eq!(data_expected, data_actual.to_data());
}
let data_actual = tensor_1.lower_equal_scalar(4.0);
#[test]
fn test_lower_equal_scalar() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let data_expected = Data::from([[true, true, true], [true, true, false]]);
assert_eq!(data_expected, data_actual.to_data());
}
let data_actual = tensor_1.lower_equal_scalar(4.0);
#[test]
fn test_lower() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[1.0, 1.0, 1.0], [4.0, 3.0, 50.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
let data_expected = Data::from([[true, true, true], [true, true, false]]);
assert_eq!(data_expected, data_actual.to_data());
}
let data_actual = tensor_1.lower(&tensor_2);
#[test]
fn test_lower() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[1.0, 1.0, 1.0], [4.0, 3.0, 50.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
let data_expected = Data::from([[true, false, false], [true, false, true]]);
assert_eq!(data_expected, data_actual.to_data());
}
let data_actual = tensor_1.lower(&tensor_2);
#[test]
fn test_lower_equal() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[1.0, 1.0, 1.0], [4.0, 3.0, 50.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
let data_expected = Data::from([[true, false, false], [true, false, true]]);
assert_eq!(data_expected, data_actual.to_data());
}
let data_actual = tensor_1.lower_equal(&tensor_2);
#[test]
fn test_lower_equal() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[1.0, 1.0, 1.0], [4.0, 3.0, 50.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
let data_expected = Data::from([[true, true, false], [true, false, true]]);
assert_eq!(data_expected, data_actual.to_data());
}
};
let data_actual = tensor_1.lower_equal(&tensor_2);
let data_expected = Data::from([[true, true, false], [true, false, true]]);
assert_eq!(data_expected, data_actual.to_data());
}
}

View File

@ -1,16 +1,17 @@
#[macro_export]
macro_rules! test_mask {
() => {
#[test]
fn should_support_mask_ops() {
let tensor = Tensor::<TestBackend, 2>::from_data(Data::from([[1.0, 7.0], [2.0, 3.0]]));
let mask =
BoolTensor::<TestBackend, 2>::from_data(Data::from([[true, false], [false, true]]));
#[burn_tensor_testgen::testgen(mask)]
mod tests {
use super::*;
use burn_tensor::{BoolTensor, Data, Tensor};
let data_actual = tensor.mask_fill(&mask, 2.0).to_data();
#[test]
fn should_support_mask_ops() {
let tensor = Tensor::<TestBackend, 2>::from_data(Data::from([[1.0, 7.0], [2.0, 3.0]]));
let mask =
BoolTensor::<TestBackend, 2>::from_data(Data::from([[true, false], [false, true]]));
let data_expected = Data::from([[2.0, 7.0], [2.0, 2.0]]);
assert_eq!(data_expected, data_actual);
}
};
let data_actual = tensor.mask_fill(&mask, 2.0).to_data();
let data_expected = Data::from([[2.0, 7.0], [2.0, 2.0]]);
assert_eq!(data_expected, data_actual);
}
}

View File

@ -1,34 +1,35 @@
#[macro_export]
macro_rules! test_matmul {
() => {
#[test]
fn test_matmul_d2() {
let data_1 = Data::from([[1.0, 7.0], [2.0, 3.0], [1.0, 5.0]]);
let data_2 = Data::from([[4.0, 7.0, 5.0], [2.0, 3.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
#[burn_tensor_testgen::testgen(matmul)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let tensor_3 = tensor_1.matmul(&tensor_2);
#[test]
fn test_matmul_d2() {
let data_1 = Data::from([[1.0, 7.0], [2.0, 3.0], [1.0, 5.0]]);
let data_2 = Data::from([[4.0, 7.0, 5.0], [2.0, 3.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
assert_eq!(
tensor_3.into_data(),
Data::from([[18.0, 28.0, 40.0], [14.0, 23.0, 25.0], [14.0, 22.0, 30.0]])
);
}
let tensor_3 = tensor_1.matmul(&tensor_2);
#[test]
fn test_matmul_d3() {
let data_1 = Data::from([[[1.0, 7.0], [2.0, 3.0]]]);
let data_2 = Data::from([[[4.0, 7.0], [2.0, 3.0]]]);
let tensor_1 = Tensor::<TestBackend, 3>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 3>::from_data(data_2);
assert_eq!(
tensor_3.into_data(),
Data::from([[18.0, 28.0, 40.0], [14.0, 23.0, 25.0], [14.0, 22.0, 30.0]])
);
}
let tensor_3 = tensor_1.matmul(&tensor_2);
#[test]
fn test_matmul_d3() {
let data_1 = Data::from([[[1.0, 7.0], [2.0, 3.0]]]);
let data_2 = Data::from([[[4.0, 7.0], [2.0, 3.0]]]);
let tensor_1 = Tensor::<TestBackend, 3>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 3>::from_data(data_2);
assert_eq!(
tensor_3.into_data(),
Data::from([[[18.0, 28.0], [14.0, 23.0]]])
);
}
};
let tensor_3 = tensor_1.matmul(&tensor_2);
assert_eq!(
tensor_3.into_data(),
Data::from([[[18.0, 28.0], [14.0, 23.0]]])
);
}
}

View File

@ -1,31 +1,32 @@
#[macro_export]
macro_rules! test_mul {
() => {
#[test]
fn should_support_mul_ops() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
#[burn_tensor_testgen::testgen(mul)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let output = tensor_1 * tensor_2;
#[test]
fn should_support_mul_ops() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
let data_actual = output.into_data();
let data_expected = Data::from([[0.0, 1.0, 4.0], [9.0, 16.0, 25.0]]);
assert_eq!(data_expected, data_actual);
}
let output = tensor_1 * tensor_2;
#[test]
fn should_support_mul_scalar_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let scalar = 2.0;
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let data_actual = output.into_data();
let data_expected = Data::from([[0.0, 1.0, 4.0], [9.0, 16.0, 25.0]]);
assert_eq!(data_expected, data_actual);
}
let output = tensor * scalar;
#[test]
fn should_support_mul_scalar_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let scalar = 2.0;
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let data_actual = output.into_data();
let data_expected = Data::from([[0.0, 2.0, 4.0], [6.0, 8.0, 10.0]]);
assert_eq!(data_expected, data_actual);
}
};
let output = tensor * scalar;
let data_actual = output.into_data();
let data_expected = Data::from([[0.0, 2.0, 4.0], [6.0, 8.0, 10.0]]);
assert_eq!(data_expected, data_actual);
}
}

View File

@ -1,15 +1,16 @@
#[macro_export]
macro_rules! test_neg {
() => {
#[test]
fn should_support_neg_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
#[burn_tensor_testgen::testgen(neg)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let data_actual = tensor.neg().into_data();
#[test]
fn should_support_neg_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let data_expected = Data::from([[-0.0, -1.0, -2.0], [-3.0, -4.0, -5.0]]);
assert_eq!(data_expected, data_actual);
}
};
let data_actual = tensor.neg().into_data();
let data_expected = Data::from([[-0.0, -1.0, -2.0], [-3.0, -4.0, -5.0]]);
assert_eq!(data_expected, data_actual);
}
}

View File

@ -1,15 +1,16 @@
#[macro_export]
macro_rules! test_powf {
() => {
#[test]
fn should_support_powf_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
#[burn_tensor_testgen::testgen(powf)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let data_actual = tensor.powf(0.71).into_data();
#[test]
fn should_support_powf_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let data_expected = Data::from([[0.0, 1.0, 1.6358], [2.182, 2.6759, 3.1352]]);
data_expected.assert_approx_eq(&data_actual, 3);
}
};
let data_actual = tensor.powf(0.71).into_data();
let data_expected = Data::from([[0.0, 1.0, 1.6358], [2.182, 2.6759, 3.1352]]);
data_expected.assert_approx_eq(&data_actual, 3);
}
}

View File

@ -1,20 +1,21 @@
#[macro_export]
macro_rules! test_repeat {
() => {
#[test]
fn should_support_repeat_ops() {
let data = Data::from([[0.0, 1.0, 2.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
#[burn_tensor_testgen::testgen(repeat)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let data_actual = tensor.repeat(0, 4).into_data();
#[test]
fn should_support_repeat_ops() {
let data = Data::from([[0.0, 1.0, 2.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let data_expected = Data::from([
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
]);
assert_eq!(data_expected, data_actual);
}
};
let data_actual = tensor.repeat(0, 4).into_data();
let data_expected = Data::from([
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
[0.0, 1.0, 2.0],
]);
assert_eq!(data_expected, data_actual);
}
}

View File

@ -1,26 +1,27 @@
#[macro_export]
macro_rules! test_reshape {
() => {
#[test]
fn should_support_reshape_1d() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = Tensor::<TestBackend, 1>::from_data(data);
#[burn_tensor_testgen::testgen(reshape)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let data_actual = tensor.reshape(Shape::new([1, 3])).into_data();
#[test]
fn should_support_reshape_1d() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = Tensor::<TestBackend, 1>::from_data(data);
let data_expected = Data::from([[0.0, 1.0, 2.0]]);
assert_eq!(data_expected, data_actual);
}
let data_actual = tensor.reshape([1, 3]).into_data();
#[test]
fn should_support_reshape_2d() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let data_expected = Data::from([[0.0, 1.0, 2.0]]);
assert_eq!(data_expected, data_actual);
}
let data_actual = tensor.reshape(Shape::new([6])).into_data();
#[test]
fn should_support_reshape_2d() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let data_expected = Data::from([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]);
assert_eq!(data_expected, data_actual);
}
};
let data_actual = tensor.reshape([6]).into_data();
let data_expected = Data::from([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]);
assert_eq!(data_expected, data_actual);
}
}

View File

@ -1,17 +1,18 @@
#[macro_export]
macro_rules! test_sub {
() => {
#[test]
fn should_support_sub_ops() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]]);
let data_expected = Data::from([[-6.0, -6.0, -6.0], [-6.0, -6.0, -6.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
#[burn_tensor_testgen::testgen(sub)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let data_actual = (tensor_1 - tensor_2).into_data();
#[test]
fn should_support_sub_ops() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]]);
let data_expected = Data::from([[-6.0, -6.0, -6.0], [-6.0, -6.0, -6.0]]);
let tensor_1 = Tensor::<TestBackend, 2>::from_data(data_1);
let tensor_2 = Tensor::<TestBackend, 2>::from_data(data_2);
assert_eq!(data_expected, data_actual);
}
};
let data_actual = (tensor_1 - tensor_2).into_data();
assert_eq!(data_expected, data_actual);
}
}

View File

@ -1,39 +1,40 @@
#[macro_export]
macro_rules! test_transpose {
() => {
#[test]
fn should_support_transpose_ops() {
let data = Data::from([
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]],
]);
let tensor = Tensor::<TestBackend, 3>::from_data(data);
#[burn_tensor_testgen::testgen(transpose)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let data_actual = tensor.transpose().into_data();
#[test]
fn should_support_transpose_ops() {
let data = Data::from([
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]],
]);
let tensor = Tensor::<TestBackend, 3>::from_data(data);
let data_expected = Data::from([
[[0.0, 3.0], [1.0, 4.0], [2.0, 5.0]],
[[6.0, 9.0], [7.0, 10.0], [8.0, 11.0]],
]);
data_expected.assert_approx_eq(&data_actual, 3);
}
let data_actual = tensor.transpose().into_data();
#[test]
fn should_support_swap_dims() {
let data = Data::from([
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]],
]);
let tensor = Tensor::<TestBackend, 3>::from_data(data);
let data_expected = Data::from([
[[0.0, 3.0], [1.0, 4.0], [2.0, 5.0]],
[[6.0, 9.0], [7.0, 10.0], [8.0, 11.0]],
]);
data_expected.assert_approx_eq(&data_actual, 3);
}
let data_actual = tensor.swap_dims(0, 2).into_data();
#[test]
fn should_support_swap_dims() {
let data = Data::from([
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]],
]);
let tensor = Tensor::<TestBackend, 3>::from_data(data);
let data_expected = Data::from([
[[0.0, 6.0], [3.0, 9.0]],
[[1.0, 7.0], [4.0, 10.0]],
[[2.0, 8.0], [5.0, 11.0]],
]);
data_expected.assert_approx_eq(&data_actual, 3);
}
};
let data_actual = tensor.swap_dims(0, 2).into_data();
let data_expected = Data::from([
[[0.0, 6.0], [3.0, 9.0]],
[[1.0, 7.0], [4.0, 10.0]],
[[2.0, 8.0], [5.0, 11.0]],
]);
data_expected.assert_approx_eq(&data_actual, 3);
}
}

View File

@ -1,15 +1,16 @@
#[macro_export]
macro_rules! test_stats {
() => {
#[test]
fn test_var() {
let data = Data::from([[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
#[burn_tensor_testgen::testgen(stats)]
mod tests {
use super::*;
use burn_tensor::{Data, Tensor};
let data_actual = tensor.var(1).into_data();
#[test]
fn test_var() {
let data = Data::from([[0.5, 1.8, 0.2, -2.0], [3.0, -4.0, 5.0, 0.0]]);
let tensor = Tensor::<TestBackend, 2>::from_data(data);
let data_expected = Data::from([[2.4892], [15.3333]]);
data_expected.assert_approx_eq(&data_actual, 3);
}
};
let data_actual = tensor.var(1).into_data();
let data_expected = Data::from([[2.4892], [15.3333]]);
data_expected.assert_approx_eq(&data_actual, 3);
}
}

View File

@ -1,4 +0,0 @@
pub type TestBackend = burn_tensor::backend::NdArrayBackend<f32>;
#[cfg(feature = "export_tests")]
burn_tensor::test_all!();