!26700 [MD][Profiler][Autotune] Add test, logs, clean state

Merge pull request !26700 from harshvardhangupta/add_test_at_prof
This commit is contained in:
i-robot 2021-11-25 19:38:33 +00:00 committed by Gitee
commit d8fbc26419
6 changed files with 277 additions and 90 deletions

View File

@ -64,8 +64,11 @@ Status IteratorConsumer::RegisterProfilingManager() {
// This should never happen
CHECK_FAIL_RETURN_UNEXPECTED(profiler_state != ProfilingManager::kEnabledTreeRegistered,
"Something went wrong. Current tree is already registered with the MD Profiler");
if (profiler_state == ProfilingManager::kEnabledDifferentTreeRegistered) {
MS_LOG(WARNING) << "MD Profiler is already enabled for a different tree.";
if (profiler_state == ProfilingManager::kEnabledDifferentTreeRegistered && profiling_manager_->IsProfiling()) {
MS_LOG(WARNING) << "Dataset Profiling is already enabled for a different data pipeline.";
} else if (profiler_state == ProfilingManager::kEnabledDifferentTreeRegistered &&
profiling_manager_->IsAutotuning()) {
MS_LOG(WARNING) << "AutoTune for dataset is already enabled for a different data pipeline.";
} else if (profiler_state == ProfilingRegistrationState::kEnabledTreeNotRegistered) {
// Profiling infrastructures need to be initialized before Op launching
// Setup profiling manager
@ -73,10 +76,11 @@ Status IteratorConsumer::RegisterProfilingManager() {
// dataset_iterator node is used for graph mode
std::shared_ptr<Tracing> iterator_tracing = std::make_shared<DatasetIteratorTracing>();
RETURN_IF_NOT_OK(profiling_manager_->RegisterTracingNode(iterator_tracing));
RETURN_IF_NOT_OK(tree_adapter_->SetProfilingManagerPtr(profiling_manager_, iterator_tracing));
// Launch Monitor Thread
RETURN_IF_NOT_OK(profiling_manager_->LaunchMonitor());
} else {
MS_LOG(INFO) << "Unable to register this tree with ProfilingManager.";
}
return Status::OK();
}
@ -86,42 +90,39 @@ Status ToDevice::RegisterProfilingManager() {
// This should never happen
CHECK_FAIL_RETURN_UNEXPECTED(profiler_state != ProfilingManager::kEnabledTreeRegistered,
"Something went wrong. Current tree is already registered with the MD Profiler");
if (profiler_state == ProfilingManager::kEnabledDifferentTreeRegistered) {
MS_LOG(WARNING) << "MD Profiler is already enabled for a different tree.";
if (profiler_state == ProfilingManager::kEnabledDifferentTreeRegistered && profiling_manager_->IsProfiling()) {
MS_LOG(WARNING) << "Dataset Profiling is already enabled for a different data pipeline.";
} else if (profiler_state == ProfilingManager::kEnabledDifferentTreeRegistered &&
profiling_manager_->IsAutotuning()) {
MS_LOG(WARNING) << "AutoTune for dataset is already enabled for a different data pipeline.";
} else if (profiler_state == ProfilingRegistrationState::kEnabledTreeNotRegistered) {
// Profiling infrastructures need to be initialized before Op launching
// Setup profiling manager
RETURN_IF_NOT_OK(profiling_manager_->RegisterTree(this->tree_adapter_.get()));
// device_queue node is used for graph mode
std::shared_ptr<Tracing> device_queue_tracing = std::make_shared<DeviceQueueTracing>();
RETURN_IF_NOT_OK(profiling_manager_->RegisterTracingNode(device_queue_tracing));
RETURN_IF_NOT_OK(tree_adapter_->SetProfilingManagerPtr(profiling_manager_));
// Launch Monitor Thread
RETURN_IF_NOT_OK(profiling_manager_->LaunchMonitor());
} else {
MS_LOG(INFO) << "Unable to register this tree with ProfilingManager.";
}
return Status::OK();
}
Status TreeConsumer::RegisterProfilingManager() {
auto profiler_state = profiling_manager_->GetProfilerTreeState(tree_adapter_->tree_.get());
if (profiler_state == ProfilingRegistrationState::kEnabledTreeNotRegistered) {
return {StatusCode::kMDUnexpectedError, "Profiling is not supported for this consumer."};
if (profiling_manager_->IsProfiling()) {
return {StatusCode::kMDUnexpectedError, "Dataset Profiling is not supported for this kind of dataset."};
}
return Status::OK();
}
Status TreeConsumer::InitAutoTune() {
if (profiling_manager_->IsAutotuning()) {
// future improvement to show tree UUID in log
MS_LOG(WARNING) << "MD Auto-tune is already running for another tree";
return Status::OK();
}
auto profiler_state = profiling_manager_->GetProfilerTreeState(tree_adapter_->tree_.get());
if (profiler_state == ProfilingRegistrationState::kNotEnabled) {
// Init ProfilingManager to `Enable` it.
RETURN_IF_NOT_OK(profiling_manager_->Init());
RETURN_IF_NOT_OK(profiling_manager_->Init(true));
// Register this tree
RETURN_IF_NOT_OK(RegisterProfilingManager());
// Start Profiler
@ -129,12 +130,19 @@ Status TreeConsumer::InitAutoTune() {
// AutoTune object and thread init
autotune_ = std::make_unique<AutoTune>(this->tree_adapter_.get(), GetProfilingManager());
RETURN_IF_NOT_OK(autotune_->LaunchThread());
// Set flag to distinguish between MD Profiler and MD Autotuning
// to generate appropriate logs
profiling_manager_->EnableAutotuneFlag();
} else if (profiler_state == ProfilingManager::kEnabledDifferentTreeRegistered && profiling_manager_->IsProfiling()) {
MS_LOG(WARNING) << "Cannot enable AutoTune for the current data pipeline as Dataset Profiling is enabled for "
"another data pipeline.";
} else if (profiler_state == ProfilingManager::kEnabledDifferentTreeRegistered &&
profiling_manager_->IsAutotuning()) {
MS_LOG(WARNING)
<< "Cannot enable AutoTune for the current data pipeline as it is already enabled for another data pipeline.";
} else if (profiler_state == ProfilingManager::kEnabledTreeRegistered && profiling_manager_->IsProfiling()) {
MS_LOG(WARNING)
<< "Cannot enable AutoTune for the current data pipeline as Dataset Profiling is already enabled for the "
"current data pipeline.";
} else {
MS_LOG(WARNING) << "Unable to start MD Auto-tune as MD Profiler is already enabled. Disable MD Profiler to "
"continue with auto-tune.";
MS_LOG(WARNING) << "Cannot enable AutoTune for the current data pipeline.";
}
return Status::OK();
}
@ -147,10 +155,12 @@ Status IteratorConsumer::Init(std::shared_ptr<DatasetNode> d) {
RETURN_IF_NOT_OK(tree_adapter_->Compile(std::move(d), num_epochs_));
#ifndef ENABLE_SECURITY
profiling_manager_ = GlobalContext::profiling_manager();
if (profiling_manager_->IsProfiling()) {
// Init has been called already
RETURN_IF_NOT_OK(RegisterProfilingManager());
}
if (GlobalContext::config_manager()->enable_autotune()) {
RETURN_IF_NOT_OK(InitAutoTune());
} else {
RETURN_IF_NOT_OK(RegisterProfilingManager());
}
#endif
return Status::OK();
@ -261,10 +271,12 @@ Status ToDevice::Init(std::shared_ptr<DatasetNode> d) {
RETURN_IF_NOT_OK(tree_adapter_->Compile(std::move(d), num_epochs_));
#ifndef ENABLE_SECURITY
profiling_manager_ = GlobalContext::profiling_manager();
if (profiling_manager_->IsProfiling()) {
// Init has been called already
RETURN_IF_NOT_OK(RegisterProfilingManager());
}
if (GlobalContext::config_manager()->enable_autotune()) {
RETURN_IF_NOT_OK(InitAutoTune());
} else {
RETURN_IF_NOT_OK(RegisterProfilingManager());
}
#endif
return Status::OK();

View File

@ -226,6 +226,10 @@ Status ProcessCpuInfo::Sample(uint64_t total_time_elapsed) {
}
Status ThreadCpuInfo::Sample(uint64_t total_time_elapsed) {
if (last_sampling_failed_) {
// thread is probably terminated
return Status::OK();
}
std::ifstream file("/proc/" + std::to_string(pid_) + "/task/" + std::to_string(tid_) + "/stat");
if (!file.is_open()) {
MS_LOG(INFO) << "Failed to open /proc/" << pid_ << "/task/" << tid_ << "/stat file";

View File

@ -209,7 +209,7 @@ Status Tracing::GetEmptyQueueFrequency(int32_t start_step, int32_t end_step, flo
std::vector<int32_t> sizes;
RETURN_IF_NOT_OK(GetConnectorSize(start_step, end_step, &sizes));
int32_t total = end_step - start_step + 1;
CHECK_FAIL_RETURN_UNEXPECTED(total <= 0, "Start step is greater than end step.");
CHECK_FAIL_RETURN_UNEXPECTED(total > 0, "Start step is greater than end step.");
uint32_t count = std::count(sizes.begin(), sizes.end(), 0);
*empty_queue_freq = static_cast<float_t>(count) / static_cast<float_t>(total);
return Status::OK();
@ -222,7 +222,7 @@ Status Tracing::Init() {
// Constructor
ProfilingManager::ProfilingManager()
: profiling_state_(ProfilingState::kProfilingStateUnBegun), enabled_(false), tree_(nullptr), autotuning_(false) {}
: profiling_state_(ProfilingState::kProfilingStateUnBegun), tree_(nullptr), autotuning_(false), profiling_(false) {}
bool ProfilingManager::IsProfilingEnable(const ExecutionTree *tree) const {
auto external_state = GetProfilerTreeState(tree);
@ -231,8 +231,10 @@ bool ProfilingManager::IsProfilingEnable(const ExecutionTree *tree) const {
Status ProfilingManager::RegisterTree(TreeAdapter *tree_adapter) {
CHECK_FAIL_RETURN_UNEXPECTED(tree_ == nullptr, "Another tree is already registered.");
CHECK_FAIL_RETURN_UNEXPECTED(enabled_ == true, "MD Profiler is disabled. Cannot register a tree.");
CHECK_FAIL_RETURN_UNEXPECTED((autotuning_ || profiling_) == true,
"MD Profiler is disabled. Cannot register the tree.");
tree_ = tree_adapter->tree_.get();
MS_LOG(INFO) << "Registering tree: " + tree_->GetUniqueId();
perf_monitor_ = std::make_unique<Monitor>(this);
// Register all sampling nodes here.
// Tracing node registration is the responsibility of the Consumer
@ -639,21 +641,27 @@ Status ProfilingManager::Reset() {
tree_ = nullptr;
profiling_state_ = ProfilingState::kProfilingStateUnBegun;
autotuning_ = false;
profiling_ = false;
return Status::OK();
}
Status ProfilingManager::Init() {
Status ProfilingManager::Init(const bool for_autotune) {
// Reinitialization should only be done in case of UT with sequential pipelines and should not be used externally.
// Reinitialization with parallel data pipelines can have unexpected consequences.
CHECK_FAIL_RETURN_UNEXPECTED(!autotuning_, "Stop MD Autotune before initializing the MD Profiler.");
CHECK_FAIL_RETURN_UNEXPECTED(!profiling_, "Stop MD Profiler before initializing it.");
CHECK_FAIL_RETURN_UNEXPECTED(profiling_state_ != ProfilingState::kProfilingStateRunning,
"Stop MD Profiler before reinitializing it.");
Reset();
CHECK_FAIL_RETURN_UNEXPECTED(profiling_state_ == ProfilingState::kProfilingStateUnBegun,
"MD Profiler is in an unexpected state.");
// Enable profiling
enabled_ = true;
MS_LOG(INFO) << "MD profiler is initialized successfully.";
if (for_autotune) {
autotuning_ = true;
MS_LOG(INFO) << "MD profiler is initialized successfully for autotuning.";
} else {
profiling_ = true;
MS_LOG(INFO) << "MD profiler is initialized successfully for profiling.";
}
return Status::OK();
}
@ -690,8 +698,14 @@ Status ProfilingManager::Stop() {
RETURN_IF_NOT_OK(node.second->Stop());
}
profiling_state_ = ProfilingState::kProfilingStateFinished;
enabled_ = false;
MS_LOG(INFO) << "MD profiler is stopped.";
if (autotuning_) {
autotuning_ = false;
MS_LOG(INFO) << "MD Autotune is stopped.";
}
if (profiling_) {
profiling_ = false;
MS_LOG(INFO) << "MD Profiler is stopped.";
}
return Status::OK();
}
@ -737,9 +751,10 @@ Status ProfilingManager::Save(const std::string &profile_data_path) {
}
ProfilingManager::ProfilingRegistrationState ProfilingManager::GetProfilerTreeState(const ExecutionTree *tree) const {
if (!enabled_) return kNotEnabled;
auto enabled = (profiling_ || autotuning_);
if (!enabled) return kNotEnabled;
if (tree_ == nullptr) {
return enabled_ ? kEnabledTreeNotRegistered : kNotEnabled;
return kEnabledTreeNotRegistered;
} else {
return tree_ == tree ? kEnabledTreeRegistered : kEnabledDifferentTreeRegistered;
}

View File

@ -417,8 +417,9 @@ class ProfilingManager {
Status RegisterTracingNode(std::shared_ptr<Tracing> node);
/// \brief API to initialize profiling manager
/// \param for_autotune flag to indicate if Profiler is initialized for autotuning or profiling purposes
/// \return Status object with the error code
Status Init();
Status Init(bool for_autotune = false);
/// \brief API to signal the profiling nodes to start collecting data
/// \return Status object with the error code
@ -436,12 +437,13 @@ class ProfilingManager {
/// \return number of epochs
int32_t GetNumOfProfiledEpochs() { return epoch_end_step_.size() - 1; }
/// Determine if the Profiler is being used for autotuning_
/// Determine if the Profiler is being used for autotuning.
/// \return boolean
bool IsAutotuning() { return autotuning_; }
/// \brief Setter for autotuning_ bool flag
void EnableAutotuneFlag() { autotuning_ = true; }
/// Determine if the Profiler is being used for profiling.
/// \return boolean
bool IsProfiling() { return profiling_; }
// Registration state for the profiler
enum ProfilingRegistrationState {
@ -466,13 +468,13 @@ class ProfilingManager {
kProfilingStateFinished,
};
ProfilingState profiling_state_; // show current state of ProfilingManager (running, or paused)
std::atomic<bool> enabled_;
std::unordered_map<std::string, std::shared_ptr<Tracing>> tracing_nodes_;
std::unordered_map<std::string, std::shared_ptr<Sampling>> sampling_nodes_;
ExecutionTree *tree_; // ExecutionTree pointer
std::vector<uint64_t> epoch_end_ts_; // End of epoch timestamp
std::vector<uint32_t> epoch_end_step_; // End of epoch step number
bool autotuning_; // flag to indicate if Profiler is being used for autotuning_
std::atomic<bool> autotuning_; // flag to indicate if ProfilingManager is being used for auto-tuning the pipeline
std::atomic<bool> profiling_; // flag to indicate if ProfilingManager is being used for profiling the pipeline
// Register profile node to tree
// @param node - Profiling node

View File

@ -446,16 +446,16 @@ def get_enable_autotune():
def set_autotune_interval(interval):
"""
Set the default interval (in milliseconds) for data pipeline auto-tuning.
Set the default interval (in milliseconds) for data pipeline autotuning.
Args:
interval (int): Interval (in milliseconds) to be used for data pipeline auto-tuning.
interval (int): Interval (in milliseconds) to be used for data pipeline autotuning.
Raises:
ValueError: If interval is invalid when interval <= 0 or interval > MAX_INT_32.
Examples:
>>> # Set a new global configuration value for the auto-tuning interval.
>>> # Set a new global configuration value for the autotuning interval.
>>> ds.config.set_autotune_interval(100)
"""
if not isinstance(interval, int):
@ -467,13 +467,13 @@ def set_autotune_interval(interval):
def get_autotune_interval():
"""
Get the global configuration of sampling interval of pipeline auto-tuning.
Get the global configuration of sampling interval of pipeline autotuning.
Returns:
int, interval (in milliseconds) for data pipeline auto-tuning.
int, interval (in milliseconds) for data pipeline autotuning.
Examples:
>>> # Get the global configuration of the auto-tuning interval.
>>> # Get the global configuration of the autotuning interval.
>>> # If set_autotune_interval() is never called before, the default value(100) will be returned.
>>> autotune_interval = ds.config.get_autotune_interval()
"""

View File

@ -17,60 +17,214 @@ Testing Autotune support in DE
"""
import numpy as np
import pytest
import mindspore._c_dataengine as cde
import mindspore.dataset as ds
def test_autotune_simple_pipeline():
"""
Feature: Auto-tuning
Description: test simple pipeline of autotune - Generator -> Shuffle -> Batch
Expectation: pipeline runs successfully
"""
ds.config.set_enable_autotune(True)
# pylint: disable=unused-variable
@pytest.mark.forked
class TestAutotuneWithProfiler:
def test_autotune_after_profiler_with_1_pipeline(self, capfd):
"""
Feature: Autotuning with Profiler
Description: Test Autotune enabled together with MD Profiler with a single pipeline
Expectation: Enable MD Profiler and print appropriate warning logs when trying to enable Autotune
"""
md_profiler = cde.GlobalContext.profiling_manager()
md_profiler.init()
md_profiler.start()
ds.config.set_enable_autotune(True)
source = [(np.array([x]),) for x in range(1024)]
data1 = ds.GeneratorDataset(source, ["data"])
data1 = data1.shuffle(64)
data1 = data1.batch(32)
itr1 = data1.create_dict_iterator(num_epochs=5)
source = [(np.array([x]),) for x in range(1024)]
data1 = ds.GeneratorDataset(source, ["data"])
data1 = data1.shuffle(64)
data1 = data1.batch(32)
_, err = capfd.readouterr()
assert "Cannot enable AutoTune for the current data pipeline as Dataset Profiling is already enabled for the " \
"current data pipeline." in err
# Uncomment the following two lines to see complete stdout and stderr output in pytest summary output
# sys.stdout.write(_)
# sys.stderr.write(err)
itr = data1.create_dict_iterator(num_epochs=5)
for _ in range(5):
for _ in itr:
pass
md_profiler.stop()
ds.config.set_enable_autotune(False)
ds.config.set_enable_autotune(False)
def test_autotune_after_profiler_with_2_pipeline(self, capfd):
"""
Feature: Autotuning with Profiler
Description: Test Autotune enabled together with MD Profiler with two pipelines
Expectation: Enable MD Profiler for first tree and print appropriate warning log when trying to
enable Autotune for the first tree. Print appropriate warning logs when trying to enable both MD Profiler
and Autotune for second tree.
"""
md_profiler = cde.GlobalContext.profiling_manager()
md_profiler.init()
md_profiler.start()
ds.config.set_enable_autotune(True)
source = [(np.array([x]),) for x in range(1024)]
data1 = ds.GeneratorDataset(source, ["data"])
data1 = data1.shuffle(64)
data1 = data1.batch(32)
itr1 = data1.create_dict_iterator(num_epochs=5)
_, err = capfd.readouterr()
assert "Cannot enable AutoTune for the current data pipeline as Dataset Profiling is already enabled for the " \
"current data pipeline." in err
# Uncomment the following two lines to see complete stdout and stderr output in pytest summary output
# sys.stdout.write(_)
# sys.stderr.write(err)
def test_autotune_config():
"""
Feature: Auto-tuning
Description: test basic config of autotune
Expectation: config can be set successfully
"""
autotune_state = ds.config.get_enable_autotune()
assert autotune_state is False
itr2 = data1.create_dict_iterator(num_epochs=5)
ds.config.set_enable_autotune(False)
autotune_state = ds.config.get_enable_autotune()
assert autotune_state is False
_, err = capfd.readouterr()
assert "Dataset Profiling is already enabled for a different data pipeline." in err
assert "Cannot enable AutoTune for the current data pipeline as Dataset Profiling is enabled for another data" \
" pipeline." in err
# Uncomment the following two lines to see complete stdout and stderr output in pytest summary output
# sys.stdout.write(_)
# sys.stderr.write(err)
with pytest.raises(TypeError):
ds.config.set_enable_autotune(1)
md_profiler.stop()
ds.config.set_enable_autotune(True)
autotune_interval = ds.config.get_autotune_interval()
assert autotune_interval == 100
def test_autotune_with_2_pipeline(self, capfd):
"""
Feature: Autotuning
Description: Test Autotune two pipelines
Expectation: Enable MD Profiler and print appropriate warning logs
when trying to enable Autotune for second tree.
"""
ds.config.set_enable_autotune(True)
source = [(np.array([x]),) for x in range(1024)]
data1 = ds.GeneratorDataset(source, ["data"])
data1 = data1.shuffle(64)
data1 = data1.batch(32)
itr1 = data1.create_dict_iterator(num_epochs=5)
itr2 = data1.create_dict_iterator(num_epochs=5)
ds.config.set_autotune_interval(200)
autotune_interval = ds.config.get_autotune_interval()
assert autotune_interval == 200
_, err = capfd.readouterr()
assert "Cannot enable AutoTune for the current data pipeline as it is already enabled for another data " \
"pipeline." in err
# Uncomment the following two lines to see complete stdout and stderr output in pytest summary output
# sys.stdout.write(_)
# sys.stderr.write(err)
with pytest.raises(TypeError):
ds.config.set_autotune_interval(20.012)
ds.config.set_enable_autotune(False)
with pytest.raises(ValueError):
ds.config.set_autotune_interval(-999)
def test_delayed_autotune_with_2_pipeline(self, capfd):
"""
Feature: Autotuning
Description: Test delayed Autotune with two pipelines
Expectation: Enable MD Profiler for second tree and no warnings logs should be printed.
"""
source = [(np.array([x]),) for x in range(1024)]
data1 = ds.GeneratorDataset(source, ["data"])
data1 = data1.shuffle(64)
data1 = data1.batch(32)
itr1 = data1.create_dict_iterator(num_epochs=5)
ds.config.set_enable_autotune(True)
itr2 = data1.create_dict_iterator(num_epochs=5)
ds.config.set_enable_autotune(False)
if __name__ == "__main__":
test_autotune_simple_pipeline()
test_autotune_config()
_, err = capfd.readouterr()
assert err == ''
# Uncomment the following two lines to see complete stdout and stderr output in pytest summary output
# sys.stdout.write(_)
# sys.stderr.write(err)
def test_delayed_start_autotune_with_3_pipeline(self, capfd):
"""
Feature: Autotuning
Description: Test delayed Autotune and early stop with three pipelines
Expectation: Enable MD Profiler for second tree and no warnings logs should be printed.
"""
source = [(np.array([x]),) for x in range(1024)]
data1 = ds.GeneratorDataset(source, ["data"])
data1 = data1.shuffle(64)
data1 = data1.batch(32)
itr1 = data1.create_dict_iterator(num_epochs=5)
ds.config.set_enable_autotune(True)
itr2 = data1.create_dict_iterator(num_epochs=5)
ds.config.set_enable_autotune(False)
itr3 = data1.create_dict_iterator(num_epochs=5)
_, err = capfd.readouterr()
assert err == ''
# Uncomment the following two lines to see complete stdout and stderr output in pytest summary output
# sys.stdout.write(_)
# sys.stderr.write(err)
def test_autotune_before_profiler(self):
"""
Feature: Autotuning with Profiler
Description: Test Autotune with Profiler when Profiler is Initialized after autotune
Expectation: Initialization of Profiler should throw an error.
"""
# enable AT for 1st tree
# profiler init should throw an error
source = [(np.array([x]),) for x in range(1024)]
data1 = ds.GeneratorDataset(source, ["data"])
data1 = data1.shuffle(64)
data1 = data1.batch(32)
ds.config.set_enable_autotune(True)
itr1 = data1.create_dict_iterator(num_epochs=5)
ds.config.set_enable_autotune(False)
md_profiler = cde.GlobalContext.profiling_manager()
with pytest.raises(RuntimeError) as excinfo:
md_profiler.init()
assert "Unexpected error. Stop MD Autotune before initializing the MD Profiler." in str(excinfo.value)
def test_autotune_simple_pipeline(self):
"""
Feature: Autotuning
Description: test simple pipeline of autotune - Generator -> Shuffle -> Batch
Expectation: pipeline runs successfully
"""
ds.config.set_enable_autotune(True)
source = [(np.array([x]),) for x in range(1024)]
data1 = ds.GeneratorDataset(source, ["data"])
data1 = data1.shuffle(64)
data1 = data1.batch(32)
itr = data1.create_dict_iterator(num_epochs=5)
for _ in range(5):
for _ in itr:
pass
ds.config.set_enable_autotune(False)
def test_autotune_config(self):
"""
Feature: Autotuning
Description: test basic config of autotune
Expectation: config can be set successfully
"""
autotune_state = ds.config.get_enable_autotune()
assert autotune_state is False
ds.config.set_enable_autotune(False)
autotune_state = ds.config.get_enable_autotune()
assert autotune_state is False
with pytest.raises(TypeError):
ds.config.set_enable_autotune(1)
autotune_interval = ds.config.get_autotune_interval()
assert autotune_interval == 100
ds.config.set_autotune_interval(200)
autotune_interval = ds.config.get_autotune_interval()
assert autotune_interval == 200
with pytest.raises(TypeError):
ds.config.set_autotune_interval(20.012)
with pytest.raises(ValueError):
ds.config.set_autotune_interval(-999)