Refactor serialization of backend-comparison benchmarks (#1131)

* Refactor serialization of benchmarks

* flatten benchmarks data to make it easier to save documents to a database and
query them
* split some information into their own fields like backend and device
* add new seralized info:
  - computed values (mean, median, variance, min, max)
  - number of samples
  - operation name
  - tensor shapes if any
* serialize to separate files, one file per benchmark run
* simplify persistence module to only a save method

* Update bench save file format to use name and uuid

* Compute serialized fields count automatically via a macro

* Rework naming of benchmarks, shapes and add options field

Remove operations field
Correctly create one file per ran benchmark

* Serialize benchmark num_repeats

* Fix expect message to follow the 'should' convention

* Cargo fmt :-)

* Make Clippy happy

* Save files in the burn subdirectory

* Change name of custom_gelu bench to just gelu

* Remove num_repeats from backend-comparison benchmarks

* Fix wrong variable name to compute the median

* Remove false positive possibility in test_mean_duration
This commit is contained in:
Sylvain Benner 2024-01-12 11:15:00 -05:00 committed by GitHub
parent 6983cede31
commit 9bd2d7b7d4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 322 additions and 163 deletions

View File

@ -1,10 +1,9 @@
use backend_comparison::persistence::Persistence;
use backend_comparison::persistence::save;
use burn::tensor::{backend::Backend, Distribution, Shape, Tensor};
use burn_common::benchmark::{run_benchmark, Benchmark};
pub struct BinaryBenchmark<B: Backend, const D: usize> {
shape: Shape<D>,
num_repeats: usize,
device: B::Device,
}
@ -12,14 +11,16 @@ impl<B: Backend, const D: usize> Benchmark for BinaryBenchmark<B, D> {
type Args = (Tensor<B, D>, Tensor<B, D>);
fn name(&self) -> String {
"Binary Ops".into()
"binary".into()
}
fn shapes(&self) -> Vec<Vec<usize>> {
vec![self.shape.dims.into()]
}
fn execute(&self, (lhs, rhs): Self::Args) {
for _ in 0..self.num_repeats {
// Choice of add is arbitrary
B::add(lhs.clone().into_primitive(), rhs.clone().into_primitive());
}
// Choice of add is arbitrary
B::add(lhs.clone().into_primitive(), rhs.clone().into_primitive());
}
fn prepare(&self) -> Self::Args {
@ -38,11 +39,10 @@ impl<B: Backend, const D: usize> Benchmark for BinaryBenchmark<B, D> {
fn bench<B: Backend>(device: &B::Device) {
let benchmark = BinaryBenchmark::<B, 3> {
shape: [32, 512, 1024].into(),
num_repeats: 10,
device: device.clone(),
};
Persistence::persist::<B>(vec![run_benchmark(benchmark)], device)
save::<B>(vec![run_benchmark(benchmark)], device).unwrap();
}
fn main() {

View File

@ -1,4 +1,4 @@
use backend_comparison::persistence::Persistence;
use backend_comparison::persistence::save;
use burn::tensor::{backend::Backend, Distribution, Shape, Tensor};
use burn_common::benchmark::{run_benchmark, Benchmark};
use core::f64::consts::SQRT_2;
@ -24,7 +24,15 @@ impl<B: Backend, const D: usize> Benchmark for CustomGeluBenchmark<B, D> {
type Args = Tensor<B, D>;
fn name(&self) -> String {
format!("Gelu {:?}", self.kind)
"gelu".into()
}
fn options(&self) -> Option<String> {
Some(format!("{:?}", self.kind))
}
fn shapes(&self) -> Vec<Vec<usize>> {
vec![self.shape.dims.into()]
}
fn execute(&self, args: Self::Args) {
@ -96,7 +104,7 @@ fn bench<B: Backend>(device: &B::Device) {
let custom_erf_gelu =
CustomGeluBenchmark::<B, D>::new(shape, device.clone(), GeluKind::WithCustomErf);
Persistence::persist::<B>(
save::<B>(
vec![
run_benchmark(reference_gelu),
run_benchmark(reference_erf_gelu),
@ -104,6 +112,7 @@ fn bench<B: Backend>(device: &B::Device) {
],
device,
)
.unwrap();
}
fn main() {

View File

@ -1,4 +1,4 @@
use backend_comparison::persistence::Persistence;
use backend_comparison::persistence::save;
use burn::tensor::{backend::Backend, Data, Distribution, Shape, Tensor};
use burn_common::benchmark::{run_benchmark, Benchmark};
use derive_new::new;
@ -6,7 +6,6 @@ use derive_new::new;
#[derive(new)]
struct ToDataBenchmark<B: Backend, const D: usize> {
shape: Shape<D>,
num_repeats: usize,
device: B::Device,
}
@ -14,13 +13,15 @@ impl<B: Backend, const D: usize> Benchmark for ToDataBenchmark<B, D> {
type Args = Tensor<B, D>;
fn name(&self) -> String {
format!("to-data-{:?}-{}", self.shape.dims, self.num_repeats)
"to_data".into()
}
fn shapes(&self) -> Vec<Vec<usize>> {
vec![self.shape.dims.into()]
}
fn execute(&self, args: Self::Args) {
for _ in 0..self.num_repeats {
let _data = args.to_data();
}
let _data = args.to_data();
}
fn prepare(&self) -> Self::Args {
@ -35,7 +36,6 @@ impl<B: Backend, const D: usize> Benchmark for ToDataBenchmark<B, D> {
#[derive(new)]
struct FromDataBenchmark<B: Backend, const D: usize> {
shape: Shape<D>,
num_repeats: usize,
device: B::Device,
}
@ -43,13 +43,15 @@ impl<B: Backend, const D: usize> Benchmark for FromDataBenchmark<B, D> {
type Args = (Data<B::FloatElem, D>, B::Device);
fn name(&self) -> String {
format!("from-data-{:?}-{}", self.shape.dims, self.num_repeats)
"from_data".into()
}
fn shapes(&self) -> Vec<Vec<usize>> {
vec![self.shape.dims.into()]
}
fn execute(&self, (data, device): Self::Args) {
for _ in 0..self.num_repeats {
let _data = Tensor::<B, D>::from_data(data.clone(), &device);
}
let _data = Tensor::<B, D>::from_data(data.clone(), &device);
}
fn prepare(&self) -> Self::Args {
@ -72,15 +74,15 @@ impl<B: Backend, const D: usize> Benchmark for FromDataBenchmark<B, D> {
fn bench<B: Backend>(device: &B::Device) {
const D: usize = 3;
let shape: Shape<D> = [32, 512, 1024].into();
let num_repeats = 10;
let to_benchmark = ToDataBenchmark::<B, D>::new(shape.clone(), num_repeats, device.clone());
let from_benchmark = FromDataBenchmark::<B, D>::new(shape, num_repeats, device.clone());
let to_benchmark = ToDataBenchmark::<B, D>::new(shape.clone(), device.clone());
let from_benchmark = FromDataBenchmark::<B, D>::new(shape, device.clone());
Persistence::persist::<B>(
save::<B>(
vec![run_benchmark(to_benchmark), run_benchmark(from_benchmark)],
device,
)
.unwrap();
}
fn main() {

View File

@ -1,4 +1,4 @@
use backend_comparison::persistence::Persistence;
use backend_comparison::persistence::save;
use burn::tensor::{backend::Backend, Distribution, Shape, Tensor};
use burn_common::benchmark::{run_benchmark, Benchmark};
use derive_new::new;
@ -7,7 +7,6 @@ use derive_new::new;
struct MatmulBenchmark<B: Backend, const D: usize> {
shape_lhs: Shape<D>,
shape_rhs: Shape<D>,
num_repeats: usize,
device: B::Device,
}
@ -15,10 +14,11 @@ impl<B: Backend, const D: usize> Benchmark for MatmulBenchmark<B, D> {
type Args = (Tensor<B, D>, Tensor<B, D>);
fn name(&self) -> String {
format!(
"Matmul {:?} x {:?}",
self.shape_lhs.dims, self.shape_rhs.dims
)
"matmul".into()
}
fn shapes(&self) -> Vec<Vec<usize>> {
vec![self.shape_lhs.dims.into(), self.shape_rhs.dims.into()]
}
fn num_samples(&self) -> usize {
@ -26,9 +26,7 @@ impl<B: Backend, const D: usize> Benchmark for MatmulBenchmark<B, D> {
}
fn execute(&self, (lhs, rhs): Self::Args) {
for _ in 0..self.num_repeats {
lhs.clone().matmul(rhs.clone());
}
lhs.clone().matmul(rhs.clone());
}
fn prepare(&self) -> Self::Args {
@ -46,7 +44,6 @@ impl<B: Backend, const D: usize> Benchmark for MatmulBenchmark<B, D> {
#[allow(dead_code)]
fn bench<B: Backend>(device: &B::Device) {
const D: usize = 3;
let num_repeats = 3;
let batch_size = 3;
let m = 1024;
let k = 2048;
@ -54,8 +51,9 @@ fn bench<B: Backend>(device: &B::Device) {
let shape_lhs = [batch_size, m, k].into();
let shape_rhs = [batch_size, k, n].into();
let benchmark = MatmulBenchmark::<B, D>::new(shape_lhs, shape_rhs, num_repeats, device.clone());
Persistence::persist::<B>(vec![run_benchmark(benchmark)], device)
let benchmark = MatmulBenchmark::<B, D>::new(shape_lhs, shape_rhs, device.clone());
save::<B>(vec![run_benchmark(benchmark)], device).unwrap();
}
fn main() {

View File

@ -1,4 +1,4 @@
use backend_comparison::persistence::Persistence;
use backend_comparison::persistence::save;
use burn::tensor::{backend::Backend, Distribution, Shape, Tensor};
use burn_common::benchmark::{run_benchmark, Benchmark};
use derive_new::new;
@ -6,7 +6,6 @@ use derive_new::new;
#[derive(new)]
struct UnaryBenchmark<B: Backend, const D: usize> {
shape: Shape<D>,
num_repeats: usize,
device: B::Device,
}
@ -14,14 +13,16 @@ impl<B: Backend, const D: usize> Benchmark for UnaryBenchmark<B, D> {
type Args = Tensor<B, D>;
fn name(&self) -> String {
"Unary Ops".into()
"unary".into()
}
fn shapes(&self) -> Vec<Vec<usize>> {
vec![self.shape.dims.into()]
}
fn execute(&self, args: Self::Args) {
for _ in 0..self.num_repeats {
// Choice of tanh is arbitrary
B::tanh(args.clone().into_primitive());
}
// Choice of tanh is arbitrary
B::tanh(args.clone().into_primitive());
}
fn prepare(&self) -> Self::Args {
@ -37,11 +38,10 @@ impl<B: Backend, const D: usize> Benchmark for UnaryBenchmark<B, D> {
fn bench<B: Backend>(device: &B::Device) {
const D: usize = 3;
let shape: Shape<D> = [32, 512, 1024].into();
let num_repeats = 10;
let benchmark = UnaryBenchmark::<B, D>::new(shape, num_repeats, device.clone());
let benchmark = UnaryBenchmark::<B, D>::new(shape, device.clone());
Persistence::persist::<B>(vec![run_benchmark(benchmark)], device)
save::<B>(vec![run_benchmark(benchmark)], device).unwrap();
}
fn main() {

View File

@ -1,100 +1,120 @@
use std::{
collections::HashMap,
fs::{create_dir_all, File},
path::PathBuf,
time::Duration,
};
use std::fs;
use burn::tensor::backend::Backend;
use burn::{
serde::{ser::SerializeStruct, Serialize, Serializer},
tensor::backend::Backend,
};
use burn_common::benchmark::BenchmarkResult;
use dirs;
use serde_json;
type BenchmarkCommitResults = HashMap<String, BenchmarkOpResults>;
type BenchmarkOpResults = HashMap<String, BenchmarkBackendResults>;
type BenchmarkBackendResults = HashMap<String, StampedBenchmarks>;
type StampedBenchmarks = HashMap<u128, Vec<Duration>>;
#[derive(Default)]
pub struct Persistence {
results: BenchmarkCommitResults,
#[derive(Default, Clone)]
pub struct BenchmarkRecord {
backend: String,
device: String,
results: BenchmarkResult,
}
impl Persistence {
/// Updates the cached backend comparison json file with new benchmarks results.
///
/// The file has the following structure:
///
/// {
/// "GIT_COMMIT_HASH":
/// {
/// "BENCHMARK_NAME (OP + SHAPE)": {
/// "BACKEND_NAME-DEVICE": {
/// "TIMESTAMP": \[
/// DURATIONS
/// \]
/// }
/// }
/// }
/// }
pub fn persist<B: Backend>(benches: Vec<BenchmarkResult>, device: &B::Device) {
for bench in benches.iter() {
println!("{}", bench);
}
let cache_file = dirs::home_dir()
.expect("Could not get home directory")
.join(".cache")
.join("backend-comparison")
.join("db.json");
/// Save the benchmarks results on disk.
///
/// The structure is flat so that it can be easily queried from a database
/// like MongoDB.
///
/// ```txt
/// [
/// {
/// "backend": "backend name",
/// "device": "device name",
/// "git_hash": "hash",
/// "name": "benchmark name",
/// "operation": "operation name",
/// "shapes": ["shape dimension", "shape dimension", ...],
/// "timestamp": "timestamp",
/// "numSamples": "number of samples",
/// "min": "duration in seconds",
/// "max": "duration in seconds",
/// "median": "duration in seconds",
/// "mean": "duration in seconds",
/// "variance": "duration in seconds"
/// "rawDurations": ["duration 1", "duration 2", ...],
/// },
/// { ... }
/// ]
/// ```
pub fn save<B: Backend>(
benches: Vec<BenchmarkResult>,
device: &B::Device,
) -> Result<Vec<BenchmarkRecord>, std::io::Error> {
let cache_dir = dirs::home_dir()
.expect("Home directory should exist")
.join(".cache")
.join("burn")
.join("backend-comparison");
let mut cache = Self::load(&cache_file);
cache.update::<B>(device, benches);
cache.save(&cache_file);
println!("Persisting to {:?}", cache_file);
if !cache_dir.exists() {
fs::create_dir_all(&cache_dir)?;
}
/// Load the cache from disk.
fn load(path: &PathBuf) -> Self {
let results = match File::open(path) {
Ok(file) => serde_json::from_reader(file)
.expect("Should have parsed to BenchmarkCommitResults struct"),
Err(_) => HashMap::default(),
};
let records: Vec<BenchmarkRecord> = benches
.into_iter()
.map(|bench| BenchmarkRecord {
backend: B::name().to_string(),
device: format!("{:?}", device),
results: bench,
})
.collect();
Self { results }
for record in records.clone() {
let file_name = format!(
"bench_{}_{}.json",
record.results.name, record.results.timestamp
);
let file_path = cache_dir.join(file_name);
let file = fs::File::create(file_path).expect("Benchmark file should exist or be created");
serde_json::to_writer_pretty(file, &record)
.expect("Benchmark file should be updated with benchmark results");
}
/// Save the cache on disk.
fn save(&self, path: &PathBuf) {
if let Some(parent) = path.parent() {
create_dir_all(parent).expect("Unable to create directory");
}
let file = File::create(path).expect("Unable to create backend comparison file");
Ok(records)
}
serde_json::to_writer_pretty(file, &self.results)
.expect("Unable to write to backend comparison file");
}
/// Macro to easily serialize each field in a flatten manner.
/// This macro automatically computes the number of fields to serialize
/// and allows specifying a custom serialization key for each field.
macro_rules! serialize_fields {
($serializer:expr, $record:expr, $(($key:expr, $field:expr)),*) => {{
// Hacky way to get the fields count
let fields_count = [ $(stringify!($key),)+ ].len();
let mut state = $serializer.serialize_struct("BenchmarkRecord", fields_count)?;
$(
state.serialize_field($key, $field)?;
)*
state.end()
}};
}
/// Update the cache with the given [benchmark results](BenchmarkResult).
///
/// Assumes only that benches share the same backend and device.
/// It could run faster if we assumed they have the same git hash
fn update<B: Backend>(&mut self, device: &B::Device, benches: Vec<BenchmarkResult>) {
let backend_key = format!("{}-{:?}", B::name(), device);
for bench in benches {
let mut benchmark_op_results = self.results.remove(&bench.git_hash).unwrap_or_default();
let mut benchmark_backend_results =
benchmark_op_results.remove(&bench.name).unwrap_or_default();
let mut stamped_benchmarks = benchmark_backend_results
.remove(&backend_key)
.unwrap_or_default();
stamped_benchmarks.insert(bench.timestamp, bench.durations.durations);
benchmark_backend_results.insert(backend_key.clone(), stamped_benchmarks);
benchmark_op_results.insert(bench.name, benchmark_backend_results);
self.results.insert(bench.git_hash, benchmark_op_results);
}
impl Serialize for BenchmarkRecord {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serialize_fields!(
serializer,
self,
("backend", &self.backend),
("device", &self.device),
("gitHash", &self.results.git_hash),
("max", &self.results.computed.max.as_micros()),
("mean", &self.results.computed.mean.as_micros()),
("median", &self.results.computed.median.as_micros()),
("min", &self.results.computed.min.as_micros()),
("name", &self.results.name),
("numSamples", &self.results.raw.durations.len()),
("options", &self.results.options),
("rawDurations", &self.results.raw.durations),
("shapes", &self.results.shapes),
("timestamp", &self.results.timestamp),
("variance", &self.results.computed.variance.as_micros())
)
}
}

View File

@ -1,5 +1,6 @@
use alloc::format;
use alloc::string::String;
use alloc::vec;
use alloc::vec::Vec;
use core::fmt::Display;
use core::time::Duration;
@ -12,27 +13,30 @@ use std::time::Instant;
use web_time::Instant;
/// Results of a benchmark run.
#[derive(new, Debug, Clone, Serialize, Deserialize)]
#[derive(new, Debug, Default, Clone, Serialize, Deserialize)]
pub struct BenchmarkDurations {
/// All durations of the run, in the order they were benchmarked
pub durations: Vec<Duration>,
}
impl BenchmarkDurations {
/// Returns the median duration among all durations
pub fn median_duration(&self) -> Duration {
/// Returns a tuple of durations: (min, max, median)
fn min_max_median_durations(&self) -> (Duration, Duration, Duration) {
let mut sorted = self.durations.clone();
sorted.sort();
*sorted.get(sorted.len() / 2).unwrap()
let min = *sorted.first().unwrap();
let max = *sorted.last().unwrap();
let median = *sorted.get(sorted.len() / 2).unwrap();
(min, max, median)
}
/// Returns the median duration among all durations
pub(crate) fn mean_duration(&self) -> Duration {
self.durations.iter().sum::<Duration>() / self.durations.len() as u32
}
}
impl Display for BenchmarkDurations {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let mean = self.mean_duration();
/// Returns the variance durations for the durations
pub(crate) fn variance_duration(&self, mean: Duration) -> Duration {
let var = self
.durations
.iter()
@ -42,13 +46,20 @@ impl Display for BenchmarkDurations {
})
.sum::<Duration>()
/ self.durations.len() as u32;
var
}
}
let mut sorted = self.durations.clone();
sorted.sort();
let min = sorted.first().unwrap();
let max = sorted.last().unwrap();
let median = sorted.get(sorted.len() / 2).unwrap();
impl Display for BenchmarkDurations {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let computed = BenchmarkComputations::new(self);
let BenchmarkComputations {
mean,
median,
variance,
min,
max,
} = computed;
let num_sample = self.durations.len();
f.write_str(
@ -57,7 +68,7 @@ impl Display for BenchmarkDurations {
Result
Samples {num_sample}
Mean {mean:.3?}
Variance {var:.3?}
Variance {variance:.3?}
Median {median:.3?}
Min {min:.3?}
Max {max:.3?}
@ -68,6 +79,36 @@ impl Display for BenchmarkDurations {
}
}
/// Computed values from benchmark durations.
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct BenchmarkComputations {
/// Mean of all the durations.
pub mean: Duration,
/// Median of all the durations.
pub median: Duration,
/// Variance of all the durations.
pub variance: Duration,
/// Minimum duration amongst all durations.
pub min: Duration,
/// Maximum duration amongst all durations.
pub max: Duration,
}
impl BenchmarkComputations {
/// Compute duration values and return a BenchmarkComputations struct
pub fn new(durations: &BenchmarkDurations) -> Self {
let mean = durations.mean_duration();
let (min, max, median) = durations.min_max_median_durations();
Self {
mean,
median,
min,
max,
variance: durations.variance_duration(mean),
}
}
}
/// Benchmark trait.
pub trait Benchmark {
/// Benchmark arguments.
@ -83,13 +124,22 @@ pub trait Benchmark {
fn prepare(&self) -> Self::Args;
/// Execute the benchmark and returns the time it took to complete.
fn execute(&self, args: Self::Args);
/// Number of samples required to have a statistical significance.
/// Number of samples per run required to have a statistical significance.
fn num_samples(&self) -> usize {
10
}
/// Name of the benchmark.
/// Name of the benchmark, should be short and it should match the name
/// defined in the crate Cargo.toml
fn name(&self) -> String;
/// Wait for computations to be over
/// The options passed to the benchmark.
fn options(&self) -> Option<String> {
None
}
/// Shapes dimensions
fn shapes(&self) -> Vec<Vec<usize>> {
vec![]
}
/// Wait for computed to be over
fn sync(&self);
/// Run the benchmark a number of times.
fn run(&self) -> BenchmarkDurations {
@ -125,15 +175,22 @@ pub trait Benchmark {
}
/// Result of a benchmark run, with metadata
#[derive(Default, Clone)]
pub struct BenchmarkResult {
/// Individual results of the run
pub durations: BenchmarkDurations,
/// Time just before the run
pub timestamp: u128,
/// Individual raw results of the run
pub raw: BenchmarkDurations,
/// Computed values for the run
pub computed: BenchmarkComputations,
/// Git commit hash of the commit in which the run occurred
pub git_hash: String,
/// Name of the benchmark, normally with operation name and shapes
/// Name of the benchmark
pub name: String,
/// Options passed to the benchmark
pub options: Option<String>,
/// Shape dimensions
pub shapes: Vec<Vec<usize>>,
/// Time just before the run
pub timestamp: u128,
}
impl Display for BenchmarkResult {
@ -145,7 +202,7 @@ impl Display for BenchmarkResult {
Git Hash: {}
Benchmarking - {}{}
",
self.timestamp, self.git_hash, self.name, self.durations
self.timestamp, self.git_hash, self.name, self.raw
)
.as_str(),
)
@ -167,10 +224,83 @@ where
.output()
.unwrap();
let git_hash = String::from_utf8(output.stdout).unwrap().trim().to_string();
let durations = benchmark.run();
BenchmarkResult {
timestamp,
raw: durations.clone(),
computed: BenchmarkComputations::new(&durations),
git_hash,
name: benchmark.name(),
durations: benchmark.run(),
options: benchmark.options(),
shapes: benchmark.shapes(),
timestamp,
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloc::vec;
#[test]
fn test_min_max_median_durations_even_number_of_samples() {
let durations = BenchmarkDurations {
durations: vec![
Duration::new(10, 0),
Duration::new(20, 0),
Duration::new(30, 0),
Duration::new(40, 0),
Duration::new(50, 0),
],
};
let (min, max, median) = durations.min_max_median_durations();
assert_eq!(min, Duration::from_secs(10));
assert_eq!(max, Duration::from_secs(50));
assert_eq!(median, Duration::from_secs(30));
}
#[test]
fn test_min_max_median_durations_odd_number_of_samples() {
let durations = BenchmarkDurations {
durations: vec![
Duration::new(18, 5),
Duration::new(20, 0),
Duration::new(30, 0),
Duration::new(40, 0),
],
};
let (min, max, median) = durations.min_max_median_durations();
assert_eq!(min, Duration::from_nanos(18000000005_u64));
assert_eq!(max, Duration::from_secs(40));
assert_eq!(median, Duration::from_secs(30));
}
#[test]
fn test_mean_duration() {
let durations = BenchmarkDurations {
durations: vec![
Duration::new(10, 0),
Duration::new(20, 0),
Duration::new(30, 0),
Duration::new(40, 0),
],
};
let mean = durations.mean_duration();
assert_eq!(mean, Duration::from_secs(25));
}
#[test]
fn test_variance_duration() {
let durations = BenchmarkDurations {
durations: vec![
Duration::new(10, 0),
Duration::new(20, 0),
Duration::new(30, 0),
Duration::new(40, 0),
Duration::new(50, 0),
],
};
let mean = durations.mean_duration();
let variance = durations.variance_duration(mean);
assert_eq!(variance, Duration::from_secs(200));
}
}

View File

@ -31,7 +31,7 @@ impl<S: ComputeServer, C: ComputeChannel<S>> Benchmark for TuneBenchmark<S, C> {
}
fn name(&self) -> String {
"Autotune".to_string()
"autotune".to_string()
}
fn sync(&self) {

View File

@ -8,7 +8,7 @@ use core::time::Duration;
use alloc::boxed::Box;
use alloc::string::ToString;
use alloc::vec::Vec;
use burn_common::benchmark::{Benchmark, BenchmarkDurations};
use burn_common::benchmark::{Benchmark, BenchmarkComputations, BenchmarkDurations};
use crate::channel::ComputeChannel;
use crate::client::ComputeClient;
@ -95,10 +95,10 @@ impl<S: ComputeServer, C: ComputeChannel<S>> Tuner<S, C> {
let mut fastest_tunable = None;
for (i, result) in results.into_iter().enumerate() {
let duration = result.median_duration();
let computed = BenchmarkComputations::new(&result);
if duration < smallest_duration {
smallest_duration = duration;
if computed.median < smallest_duration {
smallest_duration = computed.median;
fastest_tunable = Some(i);
}
}