* flaml v0.3

* low cost partial config
This commit is contained in:
Chi Wang 2021-04-06 11:37:52 -07:00 committed by GitHub
parent 37d7518a4c
commit b7a91e0385
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 161 additions and 68 deletions

View File

@ -38,7 +38,7 @@ jobs:
run: | run: |
python -m pip install --upgrade pip python -m pip install --upgrade pip
pip install -e .[test] pip install -e .[test]
- name: If linux or max, install ray - name: If linux or mac, install ray
if: matrix.os == 'macOS-latest' || matrix.os == 'ubuntu-latest' if: matrix.os == 'macOS-latest' || matrix.os == 'ubuntu-latest'
run: | run: |
pip install -e .[ray] pip install -e .[ray]

View File

@ -40,7 +40,7 @@ automl.fit(X_train, y_train, task="classification", estimator_list=["lgbm"])
* You can also run generic ray-tune style hyperparameter tuning for a custom function. * You can also run generic ray-tune style hyperparameter tuning for a custom function.
```python ```python
from flaml import tune from flaml import tune
tune.run(train_with_config, config={…}, init_config={…}, time_budget_s=3600) tune.run(train_with_config, config={…}, low_cost_partial_config={…}, time_budget_s=3600)
``` ```
## Installation ## Installation

View File

@ -51,6 +51,7 @@ class SearchState:
self.init_eci = learner_class.cost_relative2lgbm() self.init_eci = learner_class.cost_relative2lgbm()
self._search_space_domain = {} self._search_space_domain = {}
self.init_config = {} self.init_config = {}
self.low_cost_partial_config = {}
self.cat_hp_cost = {} self.cat_hp_cost = {}
self.data_size = data_size self.data_size = data_size
search_space = learner_class.search_space( search_space = learner_class.search_space(
@ -60,6 +61,9 @@ class SearchState:
self._search_space_domain[name] = space['domain'] self._search_space_domain[name] = space['domain']
if 'init_value' in space: if 'init_value' in space:
self.init_config[name] = space['init_value'] self.init_config[name] = space['init_value']
if 'low_cost_init_value' in space:
self.low_cost_partial_config[name] = space[
'low_cost_init_value']
if 'cat_hp_cost' in space: if 'cat_hp_cost' in space:
self.cat_hp_cost[name] = space['cat_hp_cost'] self.cat_hp_cost[name] = space['cat_hp_cost']
self._hp_names = list(self._search_space_domain.keys()) self._hp_names = list(self._search_space_domain.keys())
@ -1017,12 +1021,15 @@ class AutoML:
keys[1]: x2, keys[1]: x2,
}) })
self._max_iter_per_learner = len(points_to_evaluate) self._max_iter_per_learner = len(points_to_evaluate)
low_cost_partial_config = None
else: else:
points_to_evaluate=[search_state.init_config] points_to_evaluate=[search_state.init_config]
low_cost_partial_config = search_state.low_cost_partial_config
if self._hpo_method in ('bs', 'cfo', 'grid'): if self._hpo_method in ('bs', 'cfo', 'grid'):
algo = SearchAlgo(metric='val_loss', mode='min', algo = SearchAlgo(metric='val_loss', mode='min',
space=search_space, space=search_space,
points_to_evaluate=points_to_evaluate, points_to_evaluate=points_to_evaluate,
low_cost_partial_config=low_cost_partial_config,
cat_hp_cost=search_state.cat_hp_cost, cat_hp_cost=search_state.cat_hp_cost,
prune_attr=prune_attr, prune_attr=prune_attr,
min_resource=min_resource, min_resource=min_resource,
@ -1048,7 +1055,6 @@ class AutoML:
start_run_time = time.time() start_run_time = time.time()
# warnings.filterwarnings("ignore") # warnings.filterwarnings("ignore")
analysis = tune.run(search_state.training_function, analysis = tune.run(search_state.training_function,
init_config=None,
search_alg=search_state.search_alg, search_alg=search_state.search_alg,
time_budget_s=budget_left, time_budget_s=budget_left,
verbose=max(self.verbose-1,0), #local_dir='logs/tune_results', verbose=max(self.verbose-1,0), #local_dir='logs/tune_results',

View File

@ -182,10 +182,12 @@ class LGBMEstimator(BaseEstimator):
'n_estimators': { 'n_estimators': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1), 'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4, 'init_value': 4,
'low_cost_init_value': 4,
}, },
'max_leaves': { 'max_leaves': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1), 'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4, 'init_value': 4,
'low_cost_init_value': 4,
}, },
'min_data_in_leaf': { 'min_data_in_leaf': {
'domain': tune.qloguniform(lower=2, upper=2**7, q=1), 'domain': tune.qloguniform(lower=2, upper=2**7, q=1),
@ -304,10 +306,12 @@ class XGBoostEstimator(SKLearnEstimator):
'n_estimators': { 'n_estimators': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1), 'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4, 'init_value': 4,
'low_cost_init_value': 4,
}, },
'max_leaves': { 'max_leaves': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1), 'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4, 'init_value': 4,
'low_cost_init_value': 4,
}, },
'min_child_weight': { 'min_child_weight': {
'domain': tune.loguniform(lower=0.001, upper=128), 'domain': tune.loguniform(lower=0.001, upper=128),
@ -463,6 +467,7 @@ class RandomForestEstimator(SKLearnEstimator, LGBMEstimator):
'n_estimators': { 'n_estimators': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1), 'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4, 'init_value': 4,
'low_cost_init_value': 4,
}, },
'max_features': { 'max_features': {
'domain': tune.loguniform(lower=0.1, upper=1.0), 'domain': tune.loguniform(lower=0.1, upper=1.0),
@ -596,6 +601,7 @@ class CatBoostEstimator(BaseEstimator):
'early_stopping_rounds': { 'early_stopping_rounds': {
'domain': tune.qloguniform(lower=10, upper=upper, q=1), 'domain': tune.qloguniform(lower=10, upper=upper, q=1),
'init_value': 10, 'init_value': 10,
'low_cost_init_value': 10,
}, },
'learning_rate': { 'learning_rate': {
'domain': tune.loguniform(lower=.005, upper=.2), 'domain': tune.loguniform(lower=.005, upper=.2),
@ -707,6 +713,7 @@ class KNeighborsEstimator(BaseEstimator):
'n_neighbors': { 'n_neighbors': {
'domain': tune.qloguniform(lower=1, upper=upper, q=1), 'domain': tune.qloguniform(lower=1, upper=upper, q=1),
'init_value': 5, 'init_value': 5,
'low_cost_init_value': 1,
}, },
} }

View File

@ -32,7 +32,8 @@ class BlendSearch(Searcher):
metric: Optional[str] = None, metric: Optional[str] = None,
mode: Optional[str] = None, mode: Optional[str] = None,
space: Optional[dict] = None, space: Optional[dict] = None,
points_to_evaluate: Optional[List[Dict]] = None, points_to_evaluate: Optional[List[dict]] = None,
low_cost_partial_config: Optional[dict] = None,
cat_hp_cost: Optional[dict] = None, cat_hp_cost: Optional[dict] = None,
prune_attr: Optional[str] = None, prune_attr: Optional[str] = None,
min_resource: Optional[float] = None, min_resource: Optional[float] = None,
@ -50,14 +51,14 @@ class BlendSearch(Searcher):
mode: A string in ['min', 'max'] to specify the objective as mode: A string in ['min', 'max'] to specify the objective as
space: A dictionary to specify the search space. space: A dictionary to specify the search space.
points_to_evaluate: Initial parameter suggestions to be run first. points_to_evaluate: Initial parameter suggestions to be run first.
The first element needs to be a dictionary from a subset of low_cost_partial_config: A dictionary from a subset of
controlled dimensions to the initial low-cost values. controlled dimensions to the initial low-cost values.
e.g., e.g.,
.. code-block:: python
[{'epochs': 1}]
.. code-block:: python
{'n_estimators': 4, 'max_leaves': 4}
cat_hp_cost: A dictionary from a subset of categorical dimensions cat_hp_cost: A dictionary from a subset of categorical dimensions
to the relative cost of each choice. to the relative cost of each choice.
e.g., e.g.,
@ -92,9 +93,8 @@ class BlendSearch(Searcher):
seed: An integer of the random seed. seed: An integer of the random seed.
''' '''
self._metric, self._mode = metric, mode self._metric, self._mode = metric, mode
if points_to_evaluate: init_config = points_to_evaluate[0] init_config = low_cost_partial_config or {}
else: init_config = {} self._points_to_evaluate = points_to_evaluate or []
self._points_to_evaluate = points_to_evaluate
if global_search_alg is not None: if global_search_alg is not None:
self._gs = global_search_alg self._gs = global_search_alg
elif getattr(self, '__name__', None) != 'CFO': elif getattr(self, '__name__', None) != 'CFO':
@ -301,10 +301,10 @@ class BlendSearch(Searcher):
# logger.debug(f"random config {config}") # logger.debug(f"random config {config}")
skip = self._should_skip(choice, trial_id, config) skip = self._should_skip(choice, trial_id, config)
if skip: return None if skip: return None
# if not choice: print(config)
if choice or self._valid(config): if choice or self._valid(config):
# LS or valid or no backup choice # LS or valid or no backup choice
self._trial_proposed_by[trial_id] = choice self._trial_proposed_by[trial_id] = choice
if not choice: print(config)
else: # invalid config proposed by GS else: # invalid config proposed by GS
# if not self._use_rs: # if not self._use_rs:
# self._search_thread_pool[choice].on_trial_complete( # self._search_thread_pool[choice].on_trial_complete(

View File

@ -42,8 +42,10 @@ class FLOW2(Searcher):
'''Constructor '''Constructor
Args: Args:
init_config: a dictionary from a subset of controlled dimensions init_config: a dictionary of a partial or full initial config,
to the initial low-cost values. e.g. {'epochs':1} e.g. from a subset of controlled dimensions
to the initial low-cost values.
e.g. {'epochs':1}
metric: A string of the metric name to optimize for. metric: A string of the metric name to optimize for.
minimization or maximization. minimization or maximization.
mode: A string in ['min', 'max'] to specify the objective as mode: A string in ['min', 'max'] to specify the objective as

View File

@ -27,7 +27,7 @@ analysis = tune.run(
'x': tune.qloguniform(lower=1, upper=100000, q=1), 'x': tune.qloguniform(lower=1, upper=100000, q=1),
'y': tune.randint(lower=1, upper=100000) 'y': tune.randint(lower=1, upper=100000)
}, # the search space }, # the search space
init_config={'x':1}, # a initial (partial) config with low cost low_cost_partial_config={'x':1}, # a initial (partial) config with low cost
metric='metric', # the name of the metric used for optimization metric='metric', # the name of the metric used for optimization
mode='min', # the optimization mode, 'min' or 'max' mode='min', # the optimization mode, 'min' or 'max'
num_samples=-1, # the maximal number of configs to try, -1 means infinite num_samples=-1, # the maximal number of configs to try, -1 means infinite
@ -71,7 +71,7 @@ analysis = raytune.run(
num_samples=-1, # the maximal number of configs to try, -1 means infinite num_samples=-1, # the maximal number of configs to try, -1 means infinite
time_budget_s=60, # the time budget in seconds time_budget_s=60, # the time budget in seconds
local_dir='logs/', # the local directory to store logs local_dir='logs/', # the local directory to store logs
search_alg=CFO(points_to_evaluate=[{'x':1}]) # or BlendSearch search_alg=CFO(low_cost_partial_config=[{'x':1}]) # or BlendSearch
) )
print(analysis.best_trial.last_result) # the best trial's result print(analysis.best_trial.last_result) # the best trial's result
@ -124,7 +124,7 @@ Example:
```python ```python
from flaml import CFO from flaml import CFO
tune.run(... tune.run(...
search_alg = CFO(points_to_evaluate=[init_config]), search_alg = CFO(low_cost_partial_config=low_cost_partial_config),
) )
``` ```
@ -157,7 +157,7 @@ Example:
# require: pip install flaml[blendsearch] # require: pip install flaml[blendsearch]
from flaml import BlendSearch from flaml import BlendSearch
tune.run(... tune.run(...
search_alg = BlendSearch(points_to_evaluate=[init_config]), search_alg = BlendSearch(low_cost_partial_config=low_cost_partial_config),
) )
``` ```

View File

@ -3,7 +3,7 @@
* Licensed under the MIT License. See LICENSE file in the * Licensed under the MIT License. See LICENSE file in the
* project root for license information. * project root for license information.
''' '''
from typing import Optional, Union from typing import Optional, Union, List
import datetime, time import datetime, time
try: try:
from ray.tune.analysis import ExperimentAnalysis as EA from ray.tune.analysis import ExperimentAnalysis as EA
@ -53,7 +53,6 @@ def report(_metric=None, **kwargs):
analysis = tune.run( analysis = tune.run(
compute_with_config, compute_with_config,
init_config={},
config={ config={
'x': tune.qloguniform(lower=1, upper=1000000, q=1), 'x': tune.qloguniform(lower=1, upper=1000000, q=1),
'y': tune.randint(lower=1, upper=1000000) 'y': tune.randint(lower=1, upper=1000000)
@ -100,8 +99,9 @@ def report(_metric=None, **kwargs):
def run(training_function, def run(training_function,
init_config: dict,
config: Optional[dict] = None, config: Optional[dict] = None,
points_to_evaluate: Optional[List[dict]] = None,
low_cost_partial_config: Optional[dict] = None,
cat_hp_cost: Optional[dict] = None, cat_hp_cost: Optional[dict] = None,
metric: Optional[str] = None, metric: Optional[str] = None,
mode: Optional[str] = None, mode: Optional[str] = None,
@ -136,7 +136,6 @@ def run(training_function,
analysis = tune.run( analysis = tune.run(
compute_with_config, compute_with_config,
init_config={},
config={ config={
'x': tune.qloguniform(lower=1, upper=1000000, q=1), 'x': tune.qloguniform(lower=1, upper=1000000, q=1),
'y': tune.randint(lower=1, upper=1000000) 'y': tune.randint(lower=1, upper=1000000)
@ -148,15 +147,17 @@ def run(training_function,
Args: Args:
training_function: A user-defined training function. training_function: A user-defined training function.
init_config: A dictionary from a subset of controlled dimensions config: A dictionary to specify the search space.
to the initial low-cost values. e.g., points_to_evaluate: A list of initial hyperparameter
configurations to run first.
low_cost_partial_config: A dictionary from a subset of
controlled dimensions to the initial low-cost values.
e.g.,
.. code-block:: python .. code-block:: python
{'epochs': 1} {'n_estimators': 4, 'max_leaves': 4}
If no such dimension, pass an empty dict {}.
config: A dictionary to specify the search space.
cat_hp_cost: A dictionary from a subset of categorical dimensions cat_hp_cost: A dictionary from a subset of categorical dimensions
to the relative cost of each choice. to the relative cost of each choice.
e.g., e.g.,
@ -195,9 +196,9 @@ def run(training_function,
from flaml import BlendSearch from flaml import BlendSearch
algo = BlendSearch(metric='val_loss', mode='min', algo = BlendSearch(metric='val_loss', mode='min',
space=search_space, space=search_space,
points_to_evaluate=points_to_evaluate) low_cost_partial_config=low_cost_partial_config)
for i in range(10): for i in range(10):
analysis = tune.run(compute_with_config, init_config=None, analysis = tune.run(compute_with_config,
search_alg=algo, use_ray=False) search_alg=algo, use_ray=False)
print(analysis.trials[-1].last_result) print(analysis.trials[-1].last_result)
@ -242,8 +243,9 @@ def run(training_function,
if search_alg is None: if search_alg is None:
from ..searcher.blendsearch import BlendSearch from ..searcher.blendsearch import BlendSearch
search_alg = BlendSearch(points_to_evaluate=[init_config], search_alg = BlendSearch(metric=metric, mode=mode,
metric=metric, mode=mode, points_to_evaluate=points_to_evaluate,
low_cost_partial_config=low_cost_partial_config,
cat_hp_cost=cat_hp_cost, cat_hp_cost=cat_hp_cost,
space=config, prune_attr=prune_attr, space=config, prune_attr=prune_attr,
min_resource=min_resource, min_resource=min_resource,

View File

@ -1 +1 @@
__version__ = "0.2.10" __version__ = "0.3.0"

View File

@ -1139,7 +1139,7 @@
"analysis = flaml.tune.run(\n", "analysis = flaml.tune.run(\n",
" train_distilbert,\n", " train_distilbert,\n",
" config=search_space,\n", " config=search_space,\n",
" init_config={\n", " low_cost_partial_config={\n",
" \"num_train_epochs\": 1,\n", " \"num_train_epochs\": 1,\n",
" },\n", " },\n",
" metric=HP_METRIC,\n", " metric=HP_METRIC,\n",

View File

@ -61,7 +61,7 @@ setuptools.setup(
], ],
"nni": [ "nni": [
"nni", "nni",
] ],
}, },
classifiers=[ classifiers=[
"Programming Language :: Python :: 3", "Programming Language :: Python :: 3",

View File

@ -148,16 +148,16 @@ def _test_deberta(method='BlendSearch'):
algo = OptunaSearch() algo = OptunaSearch()
elif 'CFO' == method: elif 'CFO' == method:
from flaml import CFO from flaml import CFO
algo = CFO(points_to_evaluate=[{ algo = CFO(low_cost_partial_config={
"num_train_epochs": 1, "num_train_epochs": 1,
"per_device_train_batch_size": 128, "per_device_train_batch_size": 128,
}]) })
elif 'BlendSearch' == method: elif 'BlendSearch' == method:
from flaml import BlendSearch from flaml import BlendSearch
algo = BlendSearch(points_to_evaluate=[{ algo = BlendSearch(low_cost_partial_config={
"num_train_epochs": 1, "num_train_epochs": 1,
"per_device_train_batch_size": 128, "per_device_train_batch_size": 128,
}]) })
elif 'Dragonfly' == method: elif 'Dragonfly' == method:
from ray.tune.suggest.dragonfly import DragonflySearch from ray.tune.suggest.dragonfly import DragonflySearch
algo = DragonflySearch() algo = DragonflySearch()

View File

@ -119,14 +119,14 @@ def _test_distillbert(method='BlendSearch'):
algo = OptunaSearch() algo = OptunaSearch()
elif 'CFO' == method: elif 'CFO' == method:
from flaml import CFO from flaml import CFO
algo = CFO(points_to_evaluate=[{ algo = CFO(low_cost_partial_config={
"num_train_epochs": 1, "num_train_epochs": 1,
}]) })
elif 'BlendSearch' == method: elif 'BlendSearch' == method:
from flaml import BlendSearch from flaml import BlendSearch
algo = BlendSearch(points_to_evaluate=[{ algo = BlendSearch(low_cost_partial_config={
"num_train_epochs": 1, "num_train_epochs": 1,
}]) })
elif 'Dragonfly' == method: elif 'Dragonfly' == method:
from ray.tune.suggest.dragonfly import DragonflySearch from ray.tune.suggest.dragonfly import DragonflySearch
algo = DragonflySearch() algo = DragonflySearch()

View File

@ -148,16 +148,16 @@ def _test_electra(method='BlendSearch'):
algo = OptunaSearch() algo = OptunaSearch()
elif 'CFO' == method: elif 'CFO' == method:
from flaml import CFO from flaml import CFO
algo = CFO(points_to_evaluate=[{ algo = CFO(low_cost_partial_config={
"num_train_epochs": 1, "num_train_epochs": 1,
"per_device_train_batch_size": 128, "per_device_train_batch_size": 128,
}]) })
elif 'BlendSearch' == method: elif 'BlendSearch' == method:
from flaml import BlendSearch from flaml import BlendSearch
algo = BlendSearch(points_to_evaluate=[{ algo = BlendSearch(low_cost_partial_config={
"num_train_epochs": 1, "num_train_epochs": 1,
"per_device_train_batch_size": 128, "per_device_train_batch_size": 128,
}]) })
elif 'Dragonfly' == method: elif 'Dragonfly' == method:
from ray.tune.suggest.dragonfly import DragonflySearch from ray.tune.suggest.dragonfly import DragonflySearch
algo = DragonflySearch() algo = DragonflySearch()

View File

@ -148,16 +148,16 @@ def _test_roberta(method='BlendSearch'):
algo = OptunaSearch() algo = OptunaSearch()
elif 'CFO' == method: elif 'CFO' == method:
from flaml import CFO from flaml import CFO
algo = CFO(points_to_evaluate=[{ algo = CFO(low_cost_partial_config={
"num_train_epochs": 1, "num_train_epochs": 1,
"per_device_train_batch_size": 128, "per_device_train_batch_size": 128,
}]) })
elif 'BlendSearch' == method: elif 'BlendSearch' == method:
from flaml import BlendSearch from flaml import BlendSearch
algo = BlendSearch(points_to_evaluate=[{ algo = BlendSearch(low_cost_partial_config={
"num_train_epochs": 1, "num_train_epochs": 1,
"per_device_train_batch_size": 128, "per_device_train_batch_size": 128,
}]) })
elif 'Dragonfly' == method: elif 'Dragonfly' == method:
from ray.tune.suggest.dragonfly import DragonflySearch from ray.tune.suggest.dragonfly import DragonflySearch
algo = DragonflySearch() algo = DragonflySearch()

View File

@ -3,5 +3,5 @@ from flaml.searcher.blendsearch import BlendSearchTuner as BST
class BlendSearchTuner(BST): class BlendSearchTuner(BST):
# for best performance pass low cost initial parameters here # for best performance pass low cost initial parameters here
def __init__(self, points_to_evaluate=[{"hidden_size":128}]): def __init__(self, low_cost_partial_config={"hidden_size":128}):
super.__init__(self, points_to_evaluate=points_to_evaluate) super.__init__(self, low_cost_partial_config=low_cost_partial_config)

View File

@ -230,7 +230,8 @@ def cifar10_main(method='BlendSearch', num_samples=10, max_num_epochs=100,
if method == 'BlendSearch': if method == 'BlendSearch':
result = tune.run( result = tune.run(
ray.tune.with_parameters(train_cifar, data_dir=data_dir), ray.tune.with_parameters(train_cifar, data_dir=data_dir),
init_config={ config=config,
low_cost_partial_config={
"l1": 2, "l1": 2,
"l2": 2, "l2": 2,
"num_epochs": 1, "num_epochs": 1,
@ -242,7 +243,6 @@ def cifar10_main(method='BlendSearch', num_samples=10, max_num_epochs=100,
min_resource=1, min_resource=1,
report_intermediate_result=True, report_intermediate_result=True,
resources_per_trial={"cpu": 2, "gpu": gpus_per_trial}, resources_per_trial={"cpu": 2, "gpu": gpus_per_trial},
config=config,
local_dir='logs/', local_dir='logs/',
num_samples=num_samples, num_samples=num_samples,
time_budget_s=time_budget_s, time_budget_s=time_budget_s,
@ -260,12 +260,12 @@ def cifar10_main(method='BlendSearch', num_samples=10, max_num_epochs=100,
algo = OptunaSearch() algo = OptunaSearch()
elif 'CFO' == method: elif 'CFO' == method:
from flaml import CFO from flaml import CFO
algo = CFO(points_to_evaluate=[{ algo = CFO(low_cost_partial_config={
"l1": 2, "l1": 2,
"l2": 2, "l2": 2,
"num_epochs": 1, "num_epochs": 1,
"batch_size": 4, "batch_size": 4,
}]) })
elif 'Nevergrad' == method: elif 'Nevergrad' == method:
from ray.tune.suggest.nevergrad import NevergradSearch from ray.tune.suggest.nevergrad import NevergradSearch
import nevergrad as ng import nevergrad as ng

View File

@ -64,9 +64,9 @@ def _test_xgboost(method='BlendSearch'):
if method == 'BlendSearch': if method == 'BlendSearch':
analysis = tune.run( analysis = tune.run(
train_breast_cancer, train_breast_cancer,
init_config={ config=search_space,
low_cost_partial_config={
"max_depth": 1, "max_depth": 1,
"min_child_weight": 3,
}, },
cat_hp_cost={ cat_hp_cost={
"min_child_weight": [6, 3, 2], "min_child_weight": [6, 3, 2],
@ -78,7 +78,6 @@ def _test_xgboost(method='BlendSearch'):
report_intermediate_result=True, report_intermediate_result=True,
# You can add "gpu": 0.1 to allocate GPUs # You can add "gpu": 0.1 to allocate GPUs
resources_per_trial={"cpu": 1}, resources_per_trial={"cpu": 1},
config=search_space,
local_dir='logs/', local_dir='logs/',
num_samples=num_samples*n_cpu, num_samples=num_samples*n_cpu,
time_budget_s=time_budget_s, time_budget_s=time_budget_s,
@ -96,10 +95,9 @@ def _test_xgboost(method='BlendSearch'):
algo = OptunaSearch() algo = OptunaSearch()
elif 'CFO' == method: elif 'CFO' == method:
from flaml import CFO from flaml import CFO
algo = CFO(points_to_evaluate=[{ algo = CFO(low_cost_partial_config={
"max_depth": 1, "max_depth": 1,
"min_child_weight": 3, }, cat_hp_cost={
}], cat_hp_cost={
"min_child_weight": [6, 3, 2], "min_child_weight": [6, 3, 2],
}) })
elif 'Dragonfly' == method: elif 'Dragonfly' == method:
@ -169,12 +167,12 @@ def test_nested():
analysis = tune.run( analysis = tune.run(
simple_func, simple_func,
init_config={ config=search_space,
low_cost_partial_config={
"cost_related": {"a": 1,} "cost_related": {"a": 1,}
}, },
metric="metric", metric="metric",
mode="min", mode="min",
config=search_space,
local_dir='logs/', local_dir='logs/',
num_samples=-1, num_samples=-1,
time_budget_s=1) time_budget_s=1)

View File

@ -0,0 +1,78 @@
import unittest
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
import numpy as np
from flaml.automl import AutoML
from flaml.model import XGBoostSklearnEstimator
from flaml import tune
dataset = "credit-g"
class XGBoost2D(XGBoostSklearnEstimator):
@classmethod
def search_space(cls, data_size, task):
upper = min(32768,int(data_size))
return {
'n_estimators': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4,
},
'max_leaves': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4,
},
}
def _test_simple(method=None, size_ratio=1.0):
automl = AutoML()
automl.add_learner(learner_name = 'XGBoost2D',
learner_class = XGBoost2D)
try:
X, y = fetch_openml(name=dataset, return_X_y=True)
except:
from sklearn.datasets import load_wine
X, y = load_wine(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33,
random_state=42)
final_size = int(len(y_train)*size_ratio)
X_train = X_train[:final_size]
y_train = y_train[:final_size]
automl_settings = {
"estimator_list": ['XGBoost2D'],
# "metric": 'accuracy',
"task": 'classification',
"log_file_name": f"test/xgboost2d_{dataset}_{method}_{final_size}.log",
# "model_history": True,
# "log_training_metric": True,
# "split_type": split_type,
"n_jobs": 1,
"hpo_method": method,
"log_type": "all",
"time_budget": 3600,
}
automl.fit(X_train=X_train, y_train=y_train, **automl_settings)
def _test_grid_1():
_test_simple(method="grid", size_ratio=1.0/3.0)
def _test_grid_2():
_test_simple(method="grid", size_ratio=2.0/3.0)
def _test_grid_4():
_test_simple(method="grid", size_ratio=0.5)
def _test_grid_3():
_test_simple(method="grid", size_ratio=1.0)
if __name__ == "__main__":
unittest.main()