From a229a6112a551ce84728f6c75b8c26320930cc15 Mon Sep 17 00:00:00 2001 From: Qingyun Wu Date: Mon, 23 Aug 2021 19:36:51 -0400 Subject: [PATCH] Support parallel and add random search (#167) * non hashable value out of signature * parallel trials * add random in _search_parallel * fix bug in retraining * check memory constraint before training * retrain_full * log custom metric * retraining budget check * sample size check before retrain * remove 'time2eval' from result * report 'total_search_time' in result * rename total_search_time to wall_clock_time * rename train_loss boolean to log_training_metric * set default train_loss to None * exclude oom result * log retrained model * no subsample * doc str * notebook * predicted value is NaN for sarimax * version Co-authored-by: Chi Wang Co-authored-by: Qingyun Wu --- flaml/automl.py | 3466 +++++++++++---------- flaml/data.py | 4 +- flaml/ml.py | 48 +- flaml/model.py | 34 +- flaml/nlp/autotransformers.py | 1 + flaml/searcher/flow2.py | 25 +- flaml/training_log.py | 9 +- flaml/tune/space.py | 1 + flaml/version.py | 2 +- notebook/automl_in_sklearn_pipeline.ipynb | 1054 ++----- notebook/flaml_automl.ipynb | 806 +++-- notebook/flaml_azureml.ipynb | 360 +-- notebook/flaml_forecast.ipynb | 1571 ++++++---- notebook/flaml_lightgbm.ipynb | 1326 ++++---- notebook/flaml_xgboost.ipynb | 1008 +++--- test/test_automl.py | 80 +- test/test_notebook_example.py | 1 + test/test_python_log.py | 8 +- test/test_xgboost2d.py | 9 +- test/tune/example.py | 5 +- test/tune/test_tune.py | 1 + 21 files changed, 5142 insertions(+), 4677 deletions(-) diff --git a/flaml/automl.py b/flaml/automl.py index 8d91c1b618..4b2f7b0dcc 100644 --- a/flaml/automl.py +++ b/flaml/automl.py @@ -1,1657 +1,1809 @@ -'''! - * Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See LICENSE file in the - * project root for license information. -''' -import time -from typing import Callable, Optional -import warnings -from functools import partial -import numpy as np -from scipy.sparse import issparse -from sklearn.model_selection import train_test_split, RepeatedStratifiedKFold, \ - RepeatedKFold, GroupKFold, TimeSeriesSplit -from sklearn.utils import shuffle -import pandas as pd - -from .ml import compute_estimator, train_estimator, get_estimator_class, \ - get_classification_objective -from .config import ( - MIN_SAMPLE_TRAIN, MEM_THRES, RANDOM_SEED, - SMALL_LARGE_THRES, CV_HOLDOUT_THRESHOLD, SPLIT_RATIO, N_SPLITS, - SAMPLE_MULTIPLY_FACTOR) -from .data import concat -from . import tune -from .training_log import training_log_reader, training_log_writer - -import logging - -logger = logging.getLogger(__name__) -logger_formatter = logging.Formatter( - '[%(name)s: %(asctime)s] {%(lineno)d} %(levelname)s - %(message)s', - '%m-%d %H:%M:%S') - -try: - import mlflow -except ImportError: - mlflow = None - - -class SearchState: - - @property - def search_space(self): - return self._search_space_domain - - @property - def estimated_cost4improvement(self): - return max(self.time_best_found - self.time_best_found_old, - self.total_time_used - self.time_best_found) - - def __init__(self, learner_class, data_size, task, starting_point=None): - self.init_eci = learner_class.cost_relative2lgbm() - self._search_space_domain = {} - self.init_config = {} - self.low_cost_partial_config = {} - self.cat_hp_cost = {} - self.data_size = data_size - self.ls_ever_converged = False - search_space = learner_class.search_space( - data_size=data_size, task=task) - for name, space in search_space.items(): - assert 'domain' in space - self._search_space_domain[name] = space['domain'] - if 'init_value' in space: - self.init_config[name] = space['init_value'] - if 'low_cost_init_value' in space: - self.low_cost_partial_config[name] = space[ - 'low_cost_init_value'] - if 'cat_hp_cost' in space: - self.cat_hp_cost[name] = space['cat_hp_cost'] - # if a starting point is provided, set the init config to be - # the starting point provided - if starting_point is not None and starting_point.get(name) is not None: - self.init_config[name] = starting_point[name] - self._hp_names = list(self._search_space_domain.keys()) - self.search_alg = None - self.best_config = None - self.best_loss = self.best_loss_old = np.inf - self.total_time_used = 0 - self.total_iter = 0 - self.base_eci = None - self.time_best_found = 0 - self.time2eval_best = 0 - self.time2eval_best_old = 0 - self.trained_estimator = None - self.sample_size = None - self.trial_time = 0 - - def update(self, analysis, time_used, save_model_history=False): - if not analysis.trials: - return - result = analysis.trials[-1].last_result - if result: - config = result['config'] - if config and 'FLAML_sample_size' in config: - self.sample_size = config['FLAML_sample_size'] - else: - self.sample_size = self.data_size - obj = result['val_loss'] - train_loss = result['train_loss'] - time2eval = result['time2eval'] - trained_estimator = result[ - 'trained_estimator'] - del result['trained_estimator'] # free up RAM - else: - obj, time2eval, trained_estimator = np.inf, 0.0, None - train_loss = config = None - self.trial_time = time2eval - self.total_time_used += time_used - self.total_iter += 1 - - if self.base_eci is None: - self.base_eci = time_used - if (obj is not None) and (self.best_loss is None or obj < self.best_loss): - self.best_loss_old = self.best_loss if self.best_loss < np.inf \ - else 2 * obj - self.best_loss = obj - self.time_best_found_old = self.time_best_found - self.time_best_found = self.total_time_used - self.iter_best_found = self.total_iter - self.best_config = config - self.best_config_sample_size = self.sample_size - self.best_config_train_time = time_used - if time2eval: - self.time2eval_best_old = self.time2eval_best - self.time2eval_best = time2eval - if self.trained_estimator and trained_estimator and \ - self.trained_estimator != trained_estimator and \ - not save_model_history: - self.trained_estimator.cleanup() - if trained_estimator: - self.trained_estimator = trained_estimator - self.train_loss, self.val_loss, self.config = train_loss, obj, config - - def get_hist_config_sig(self, sample_size, config): - config_values = tuple([config[k] for k in self._hp_names]) - config_sig = str(sample_size) + '_' + str(config_values) - return config_sig - - def est_retrain_time(self, retrain_sample_size): - assert self.best_config_sample_size is not None, \ - 'need to first get best_config_sample_size' - return (self.time2eval_best * retrain_sample_size - / self.best_config_sample_size) - - -class AutoMLState: - - def _prepare_sample_train_data(self, sample_size): - full_size = len(self.y_train) - sampled_weight = None - if sample_size <= full_size: - if isinstance(self.X_train, pd.DataFrame): - sampled_X_train = self.X_train.iloc[:sample_size] - else: - sampled_X_train = self.X_train[:sample_size] - sampled_y_train = self.y_train[:sample_size] - weight = self.fit_kwargs.get('sample_weight') - if weight is not None: - sampled_weight = weight[:sample_size] - else: - sampled_X_train = concat(self.X_train, self.X_val) - sampled_y_train = np.concatenate([self.y_train, self.y_val]) - weight = self.fit_kwargs.get('sample_weight') - if weight is not None: - sampled_weight = np.concatenate([weight, self.weight_val]) - return sampled_X_train, sampled_y_train, sampled_weight - - def _compute_with_config_base(self, - estimator, - config_w_resource): - if 'FLAML_sample_size' in config_w_resource: - sample_size = int(config_w_resource['FLAML_sample_size']) - else: - sample_size = self.data_size - sampled_X_train, sampled_y_train, sampled_weight = \ - self._prepare_sample_train_data(sample_size) - if sampled_weight is not None: - weight = self.fit_kwargs['sample_weight'] - self.fit_kwargs['sample_weight'] = sampled_weight - else: - weight = None - config = config_w_resource.copy() - if 'FLAML_sample_size' in config: - del config['FLAML_sample_size'] - time_left = self.time_budget - self.time_from_start - budget = time_left if sample_size == self.data_size else \ - time_left / 2 * sample_size / self.data_size - - trained_estimator, val_loss, train_loss, time2eval, pred_time = \ - compute_estimator( - sampled_X_train, - sampled_y_train, - self.X_val, - self.y_val, - self.weight_val, - min(budget, self.train_time_limit), - self.kf, - config, - self.task, - estimator, - self.eval_method, - self.metric, - self.best_loss, - self.n_jobs, - self.learner_classes.get(estimator), - self.log_training_metric, - self.fit_kwargs) - result = { - 'pred_time': pred_time, - 'time2eval': time2eval, - 'train_loss': train_loss, - 'val_loss': val_loss, - 'trained_estimator': trained_estimator - } - if sampled_weight is not None: - self.fit_kwargs['sample_weight'] = weight - # tune.report(**result) - return result - - def _train_with_config( - self, estimator, config_w_resource, sample_size=None - ): - config = config_w_resource.copy() - if 'FLAML_sample_size' in config: - if not sample_size: - sample_size = config['FLAML_sample_size'] - del config['FLAML_sample_size'] - assert sample_size is not None - sampled_X_train, sampled_y_train, sampled_weight = \ - self._prepare_sample_train_data(sample_size) - if sampled_weight is not None: - weight = self.fit_kwargs['sample_weight'] - self.fit_kwargs['sample_weight'] = sampled_weight - else: - weight = None - budget = None if self.time_budget is None else ( - self.time_budget - self.time_from_start) - estimator, train_time = train_estimator( - sampled_X_train, - sampled_y_train, - config, - self.task, - estimator, - self.n_jobs, - self.learner_classes.get(estimator), - budget, - self.fit_kwargs) - if sampled_weight is not None: - self.fit_kwargs['sample_weight'] = weight - return estimator, train_time - - -class AutoML: - '''The AutoML class - - Example: - - .. code-block:: python - - automl = AutoML() - automl_settings = { - "time_budget": 60, - "metric": 'accuracy', - "task": 'classification', - "log_file_name": 'test/mylog.log', - } - automl.fit(X_train = X_train, y_train = y_train, - **automl_settings) - - ''' - - from .version import __version__ - - def __init__(self): - self._track_iter = 0 - self._state = AutoMLState() - self._state.learner_classes = {} - - @property - def model_history(self): - '''A dictionary of iter->model, storing the models when - the best model is updated each time. - ''' - return self._model_history - - @property - def config_history(self): - '''A dictionary of iter->(estimator, config, time), - storing the best estimator, config, and the time when the best - model is updated each time. - ''' - return self._config_history - - @property - def model(self): - '''An object with `predict()` and `predict_proba()` method (for - classification), storing the best trained model. - ''' - if self._trained_estimator: - return self._trained_estimator - else: - return None - - def best_model_for_estimator(self, estimator_name): - '''Return the best model found for a particular estimator - - Args: - estimator_name: a str of the estimator's name - - Returns: - An object with `predict()` and `predict_proba()` method (for - classification), storing the best trained model for estimator_name. - ''' - if estimator_name in self._search_states: - state = self._search_states[estimator_name] - if hasattr(state, 'trained_estimator'): - return state.trained_estimator - return None - - @property - def best_estimator(self): - '''A string indicating the best estimator found.''' - return self._best_estimator - - @property - def best_iteration(self): - '''An integer of the iteration number where the best - config is found.''' - return self._best_iteration - - @property - def best_config(self): - '''A dictionary of the best configuration.''' - return self._search_states[self._best_estimator].best_config - - @property - def best_config_per_estimator(self): - '''A dictionary of all estimators' best configuration.''' - return {e: e_search_state.best_config for e, e_search_state in - self._search_states.items()} - - @property - def best_loss(self): - '''A float of the best loss found - ''' - return self._state.best_loss - - @property - def best_config_train_time(self): - '''A float of the seconds taken by training the - best config.''' - return self._search_states[self._best_estimator].best_config_train_time - - @property - def classes_(self): - '''A list of n_classes elements for class labels.''' - if self._label_transformer: - return self._label_transformer.classes_.tolist() - if self._trained_estimator: - return self._trained_estimator.classes_.tolist() - return None - - def predict(self, X_test, freq=None): - '''Predict label from features. - - Args: - X_test: A numpy array of featurized instances, shape n * m, - or a pandas dataframe with one column with timestamp values - for 'forecasting' task. - freq: str or pandas offset, default=None | The frequency of the - time-series. - - Returns: - A numpy array of shape n * 1 - - each element is a predicted class - label for an instance. - ''' - if self._trained_estimator is None: - warnings.warn( - "No estimator is trained. Please run fit with enough budget.") - return None - X_test = self._preprocess(X_test) - if self._state.task == 'forecast': - X_test_df = pd.DataFrame(X_test) - X_test_col = list(X_test.columns)[0] - X_test_df = X_test_df.rename(columns={X_test_col: 'ds'}) - y_pred = self._trained_estimator.predict(X_test_df, freq=freq) - else: - y_pred = self._trained_estimator.predict(X_test) - if y_pred.ndim > 1 and isinstance(y_pred, np.ndarray): - y_pred = y_pred.flatten() - if self._label_transformer: - return self._label_transformer.inverse_transform(pd.Series( - y_pred)) - else: - return y_pred - - def predict_proba(self, X_test): - '''Predict the probability of each class from features, only works for - classification problems. - - Args: - X_test: A numpy array of featurized instances, shape n * m. - - Returns: - A numpy array of shape n * c. c is the # classes. Each element at - (i, j) is the probability for instance i to be in class j. - ''' - X_test = self._preprocess(X_test) - proba = self._trained_estimator.predict_proba(X_test) - return proba - - def _preprocess(self, X): - if issparse(X): - X = X.tocsr() - if self._transformer: - X = self._transformer.transform(X) - return X - - def _validate_data(self, X_train_all, y_train_all, dataframe, label, - X_val=None, y_val=None): - if self._state.task == 'forecast': - if dataframe is not None and label is not None: - dataframe = dataframe.copy() - dataframe = dataframe.rename(columns={label[0]: 'ds', label[1]: 'y'}) - elif dataframe is not None: - if ('ds' not in dataframe) or ('y' not in dataframe): - raise ValueError( - 'For forecasting task, Dataframe must have columns "ds" and "y" ' - 'with the dates and values respectively.' - ) - elif (X_train_all is not None) and (y_train_all is not None): - dataframe = pd.DataFrame(X_train_all) - time_col = list(dataframe.columns)[0] - dataframe = dataframe.rename(columns={time_col: 'ds'}) - dataframe['y'] = pd.Series(y_train_all) - X_train_all = None - y_train_all = None - label = 'y' - - if X_train_all is not None and y_train_all is not None: - if not (isinstance(X_train_all, np.ndarray) or issparse(X_train_all) - or isinstance(X_train_all, pd.DataFrame)): - raise ValueError( - "X_train_all must be a numpy array, a pandas dataframe, " - "or Scipy sparse matrix.") - if not (isinstance(y_train_all, np.ndarray) - or isinstance(y_train_all, pd.Series)): - raise ValueError( - "y_train_all must be a numpy array or a pandas series.") - if X_train_all.size == 0 or y_train_all.size == 0: - raise ValueError("Input data must not be empty.") - if isinstance(y_train_all, np.ndarray): - y_train_all = y_train_all.flatten() - if X_train_all.shape[0] != y_train_all.shape[0]: - raise ValueError( - "# rows in X_train must match length of y_train.") - self._df = isinstance(X_train_all, pd.DataFrame) - self._nrow, self._ndim = X_train_all.shape - X, y = X_train_all, y_train_all - elif dataframe is not None and label is not None: - if not isinstance(dataframe, pd.DataFrame): - raise ValueError("dataframe must be a pandas DataFrame") - if label not in dataframe.columns: - raise ValueError("label must a column name in dataframe") - self._df = True - X = dataframe.drop(columns=label) - self._nrow, self._ndim = X.shape - y = dataframe[label] - else: - raise ValueError( - "either X_train+y_train or dataframe+label are required") - if issparse(X_train_all) or self._state.task == 'forecast': - self._transformer = self._label_transformer = False - self._X_train_all, self._y_train_all = X, y - else: - from .data import DataTransformer - self._transformer = DataTransformer() - self._X_train_all, self._y_train_all = \ - self._transformer.fit_transform(X, y, self._state.task) - self._label_transformer = self._transformer.label_transformer - self._sample_weight_full = self._state.fit_kwargs.get('sample_weight') - if X_val is not None and y_val is not None: - if not (isinstance(X_val, np.ndarray) or issparse(X_val) - or isinstance(X_val, pd.DataFrame)): - raise ValueError( - "X_val must be None, a numpy array, a pandas dataframe, " - "or Scipy sparse matrix.") - if not (isinstance(y_val, np.ndarray) - or isinstance(y_val, pd.Series)): - raise ValueError( - "y_val must be None, a numpy array or a pandas series.") - if X_val.size == 0 or y_val.size == 0: - raise ValueError( - "Validation data are expected to be nonempty. " - "Use None for X_val and y_val if no validation data.") - if isinstance(y_val, np.ndarray): - y_val = y_val.flatten() - if X_val.shape[0] != y_val.shape[0]: - raise ValueError("# rows in X_val must match length of y_val.") - if self._transformer: - self._state.X_val = self._transformer.transform(X_val) - else: - self._state.X_val = X_val - if self._label_transformer: - self._state.y_val = self._label_transformer.transform(y_val) - else: - self._state.y_val = y_val - else: - self._state.X_val = self._state.y_val = None - - def _prepare_data(self, - eval_method, - split_ratio, - n_splits, - period=None): - X_val, y_val = self._state.X_val, self._state.y_val - if issparse(X_val): - X_val = X_val.tocsr() - X_train_all, y_train_all = \ - self._X_train_all, self._y_train_all - if issparse(X_train_all): - X_train_all = X_train_all.tocsr() - if (self._state.task == 'binary:logistic' or self._state.task == 'multi:softmax') \ - and self._state.fit_kwargs.get('sample_weight') is None \ - and self._split_type != 'time': - # logger.info(f"label {pd.unique(y_train_all)}") - label_set, counts = np.unique(y_train_all, return_counts=True) - # augment rare classes - rare_threshld = 20 - rare = counts < rare_threshld - rare_label, rare_counts = label_set[rare], counts[rare] - for i, label in enumerate(rare_label): - count = rare_count = rare_counts[i] - rare_index = y_train_all == label - n = len(y_train_all) - while count < rare_threshld: - if self._df: - X_train_all = concat(X_train_all, - X_train_all.iloc[:n].loc[rare_index]) - else: - X_train_all = concat(X_train_all, - X_train_all[:n][rare_index, :]) - if isinstance(y_train_all, pd.Series): - y_train_all = concat(y_train_all, - y_train_all.iloc[:n].loc[rare_index]) - else: - y_train_all = np.concatenate([y_train_all, - y_train_all[:n][rare_index]]) - count += rare_count - logger.info( - f"class {label} augmented from {rare_count} to {count}") - SHUFFLE_SPLIT_TYPES = ['uniform', 'stratified'] - if self._split_type in SHUFFLE_SPLIT_TYPES: - if 'sample_weight' in self._state.fit_kwargs: - X_train_all, y_train_all, self._state.fit_kwargs[ - 'sample_weight'] = shuffle( - X_train_all, y_train_all, - self._state.fit_kwargs['sample_weight'], - random_state=RANDOM_SEED) - elif hasattr(self._state, 'groups') and self._state.groups is not None: - X_train_all, y_train_all, self._state.groups = shuffle( - X_train_all, y_train_all, self._state.groups, - random_state=RANDOM_SEED) - else: - X_train_all, y_train_all = shuffle( - X_train_all, y_train_all, random_state=RANDOM_SEED) - if self._df: - X_train_all.reset_index(drop=True, inplace=True) - if isinstance(y_train_all, pd.Series): - y_train_all.reset_index(drop=True, inplace=True) - - X_train, y_train = X_train_all, y_train_all - if X_val is None: - # if eval_method = holdout, make holdout data - if eval_method == 'holdout' and self._split_type == 'time': - if 'period' in self._state.fit_kwargs: - num_samples = X_train_all.shape[0] - split_idx = num_samples - self._state.fit_kwargs.get('period') - X_train = X_train_all[:split_idx] - y_train = y_train_all[:split_idx] - X_val = X_train_all[split_idx:] - y_val = y_train_all[split_idx:] - else: - if 'sample_weight' in self._state.fit_kwargs: - X_train, X_val, y_train, y_val, self._state.fit_kwargs[ - 'sample_weight'], self._state.weight_val = \ - train_test_split( - X_train_all, - y_train_all, - self._state.fit_kwargs['sample_weight'], - test_size=split_ratio, - shuffle=False) - else: - X_train, X_val, y_train, y_val = train_test_split( - X_train_all, - y_train_all, - test_size=split_ratio, - shuffle=False) - elif self._state.task != 'regression' and eval_method == 'holdout': - # for classification, make sure the labels are complete in both - # training and validation data - label_set, first = np.unique(y_train_all, return_index=True) - rest = [] - last = 0 - first.sort() - for i in range(len(first)): - rest.extend(range(last, first[i])) - last = first[i] + 1 - rest.extend(range(last, len(y_train_all))) - X_first = X_train_all.iloc[first] if self._df else X_train_all[ - first] - X_rest = X_train_all.iloc[rest] if self._df else X_train_all[rest] - y_rest = y_train_all.iloc[rest] if isinstance( - y_train_all, pd.Series) else y_train_all[rest] - stratify = y_rest if self._split_type == 'stratified' else \ - None - if 'sample_weight' in self._state.fit_kwargs: - X_train, X_val, y_train, y_val, weight_train, weight_val = \ - train_test_split( - X_rest, - y_rest, - self._state.fit_kwargs['sample_weight'][rest], - test_size=split_ratio, - random_state=RANDOM_SEED) - weight1 = self._state.fit_kwargs['sample_weight'][first] - self._state.weight_val = concat(weight1, weight_val) - self._state.fit_kwargs['sample_weight'] = concat( - weight1, weight_train) - else: - X_train, X_val, y_train, y_val = train_test_split( - X_rest, - y_rest, - test_size=split_ratio, - stratify=stratify, - random_state=RANDOM_SEED) - X_train = concat(X_first, X_train) - y_train = concat( - label_set, y_train) if self._df else np.concatenate( - [label_set, y_train]) - X_val = concat(X_first, X_val) - y_val = concat(label_set, y_val) if self._df else \ - np.concatenate([label_set, y_val]) - elif eval_method == 'holdout' and self._state.task == 'regression': - if 'sample_weight' in self._state.fit_kwargs: - X_train, X_val, y_train, y_val, self._state.fit_kwargs[ - 'sample_weight'], self._state.weight_val = \ - train_test_split( - X_train_all, - y_train_all, - self._state.fit_kwargs['sample_weight'], - test_size=split_ratio, - random_state=RANDOM_SEED) - else: - X_train, X_val, y_train, y_val = train_test_split( - X_train_all, - y_train_all, - test_size=split_ratio, - random_state=RANDOM_SEED) - self._state.data_size = X_train.shape[0] - if X_val is None: - self.data_size_full = self._state.data_size - else: - self.data_size_full = self._state.data_size + X_val.shape[0] - self._state.X_train, self._state.y_train, self._state.X_val, \ - self._state.y_val = (X_train, y_train, X_val, y_val) - if hasattr(self._state, 'groups') and self._state.groups is not None: - logger.info("Using GroupKFold") - assert len(self._state.groups) == y_train_all.size, \ - "the length of groups must match the number of examples" - assert len(np.unique(self._state.groups)) >= n_splits, \ - "the number of groups must be equal or larger than n_splits" - self._state.kf = GroupKFold(n_splits) - self._state.kf.groups = self._state.groups - elif self._split_type == "stratified": - logger.info("Using StratifiedKFold") - assert y_train_all.size >= n_splits, ( - f"{n_splits}-fold cross validation" - f" requires input data with at least {n_splits} examples.") - assert y_train_all.size >= 2 * n_splits, ( - f"{n_splits}-fold cross validation with metric=r2 " - f"requires input data with at least {n_splits*2} examples.") - self._state.kf = RepeatedStratifiedKFold( - n_splits=n_splits, n_repeats=1, random_state=RANDOM_SEED) - elif self._split_type == "time": - logger.info("Using TimeSeriesSplit") - if self._state.task == 'forecast': - self._state.kf = TimeSeriesSplit( - n_splits=n_splits, test_size=self._state.fit_kwargs.get('period')) - else: - self._state.kf = TimeSeriesSplit(n_splits=n_splits) - else: - logger.info("Using RepeatedKFold") - self._state.kf = RepeatedKFold( - n_splits=n_splits, n_repeats=1, random_state=RANDOM_SEED) - - def add_learner(self, - learner_name, - learner_class): - '''Add a customized learner - - Args: - learner_name: A string of the learner's name - learner_class: A subclass of flaml.model.BaseEstimator - ''' - self._state.learner_classes[learner_name] = learner_class - - def get_estimator_from_log(self, log_file_name, record_id, task): - '''Get the estimator from log file - - Args: - log_file_name: A string of the log file name - record_id: An integer of the record ID in the file, - 0 corresponds to the first trial - task: A string of the task type, - 'binary', 'multi', or 'regression' - - Returns: - An estimator object for the given configuration - ''' - - with training_log_reader(log_file_name) as reader: - record = reader.get_record(record_id) - estimator = record.learner - config = record.config - - estimator, _ = train_estimator( - None, None, config, task, estimator, - estimator_class=self._state.learner_classes.get(estimator)) - return estimator - - def retrain_from_log(self, - log_file_name, - X_train=None, - y_train=None, - dataframe=None, - label=None, - time_budget=0, - task='classification', - eval_method='auto', - split_ratio=SPLIT_RATIO, - n_splits=N_SPLITS, - split_type="stratified", - n_jobs=1, - train_best=True, - train_full=False, - record_id=-1, - **fit_kwargs): - '''Retrain from log file - - Args: - time_budget: A float number of the time budget in seconds - log_file_name: A string of the log file name - X_train: A numpy array of training data in shape n*m - y_train: A numpy array of labels in shape n*1 - task: A string of the task type, e.g., - 'classification', 'regression' - eval_method: A string of resampling strategy, one of - ['auto', 'cv', 'holdout'] - split_ratio: A float of the validation data percentage for holdout - n_splits: An integer of the number of folds for cross-validation - n_jobs: An integer of the number of threads for training - train_best: A boolean of whether to train the best config in the - time budget; if false, train the last config in the budget - train_full: A boolean of whether to train on the full data. If true, - eval_method and sample_size in the log file will be ignored - record_id: the ID of the training log record from which the model will - be retrained. By default `record_id = -1` which means this will be - ignored. `record_id = 0` corresponds to the first trial, and - when `record_id >= 0`, `time_budget` will be ignored. - **fit_kwargs: Other key word arguments to pass to fit() function of - the searched learners, such as sample_weight - ''' - self._state.task = task - self._state.fit_kwargs = fit_kwargs - self._validate_data(X_train, y_train, dataframe, label) - - logger.info('log file name {}'.format(log_file_name)) - - best_config = None - best_val_loss = float('+inf') - best_estimator = None - sample_size = None - time_used = 0.0 - training_duration = 0 - best = None - with training_log_reader(log_file_name) as reader: - if record_id >= 0: - best = reader.get_record(record_id) - else: - for record in reader.records(): - time_used = record.total_search_time - if time_used > time_budget: - break - training_duration = time_used - val_loss = record.validation_loss - if val_loss <= best_val_loss or not train_best: - if val_loss == best_val_loss and train_best: - size = record.sample_size - if size > sample_size: - best = record - best_val_loss = val_loss - sample_size = size - else: - best = record - size = record.sample_size - best_val_loss = val_loss - sample_size = size - if not training_duration: - from .model import BaseEstimator as Estimator - self._trained_estimator = Estimator() - self._trained_estimator.model = None - return training_duration - if not best: - return - best_estimator = best.learner - best_config = best.config - sample_size = len(self._y_train_all) if train_full \ - else best.sample_size - - logger.info( - 'estimator = {}, config = {}, #training instances = {}'.format( - best_estimator, best_config, sample_size)) - # Partially copied from fit() function - # Initilize some attributes required for retrain_from_log - self._state.task = task - if self._state.task == 'classification': - self._state.task = get_classification_objective( - len(np.unique(self._y_train_all))) - assert split_type in ["stratified", "uniform", "time"] - self._split_type = split_type - elif self._state.task == 'regression': - if split_type in ["uniform", "time"]: - self._split_type = split_type - else: - self._split_type = "uniform" - elif self._state.task == 'forecast': - self._split_type = "time" - if record_id >= 0: - eval_method = 'cv' - elif eval_method == 'auto': - eval_method = self._decide_eval_method(time_budget) - self.modelcount = 0 - self._prepare_data(eval_method, split_ratio, n_splits) - self._state.time_budget = None - self._state.n_jobs = n_jobs - self._trained_estimator = self._state._train_with_config( - best_estimator, best_config, sample_size)[0] - logger.info('retrain from log succeeded') - return training_duration - - def _decide_eval_method(self, time_budget): - if self._state.X_val is not None: - return 'holdout' - nrow, dim = self._nrow, self._ndim - if nrow * dim / 0.9 < SMALL_LARGE_THRES * ( - time_budget / 3600) and nrow < CV_HOLDOUT_THRESHOLD: - # time allows or sampling can be used and cv is necessary - return 'cv' - else: - return 'holdout' - - @property - def search_space(self) -> dict: - '''Search space - Must be called after fit(...) (use max_iter=0 to prevent actual fitting) - - Returns: - A dict of the search space - ''' - estimator_list = self.estimator_list - if len(estimator_list) == 1: - estimator = estimator_list[0] - space = self._search_states[estimator].search_space.copy() - space['learner'] = estimator - return space - choices = [] - for estimator in estimator_list: - space = self._search_states[estimator].search_space.copy() - space['learner'] = estimator - choices.append(space) - return {'ml': tune.choice(choices)} - - @property - def low_cost_partial_config(self) -> dict: - '''Low cost partial config - - Returns: - A dict. - (a) if there is only one estimator in estimator_list, each key is a - hyperparameter name. - (b) otherwise, it is a nested dict with 'ml' as the key, and - a list of the low_cost_partial_configs as the value, corresponding - to each learner's low_cost_partial_config; the estimator index as - an integer corresponding to the cheapest learner is appeneded to the - list at the end. - - ''' - if len(self.estimator_list) == 1: - estimator = self.estimator_list[0] - c = self._search_states[estimator].low_cost_partial_config - return c - else: - configs = [] - for estimator in self.estimator_list: - c = self._search_states[estimator].low_cost_partial_config - configs.append(c) - configs.append(np.argmin([ - self._state.learner_classes.get(estimator).cost_relative2lgbm() - for estimator in self.estimator_list])) - config = {'ml': configs} - return config - - @property - def cat_hp_cost(self) -> dict: - '''Categorical hyperparameter cost - - Returns: - A dict. - (a) if there is only one estimator in estimator_list, each key is a - hyperparameter name. - (b) otherwise, it is a nested dict with 'ml' as the key, and - a list of the cat_hp_cost's as the value, corresponding - to each learner's cat_hp_cost; the cost relative to lgbm for each - learner (as a list itself) is appended to the list at the end. - - ''' - if len(self.estimator_list) == 1: - estimator = self.estimator_list[0] - c = self._search_states[estimator].cat_hp_cost - return c - else: - configs = [] - for estimator in self.estimator_list: - c = self._search_states[estimator].cat_hp_cost - configs.append(c) - configs.append([ - self._state.learner_classes.get(estimator).cost_relative2lgbm() - for estimator in self.estimator_list]) - config = {'ml': configs} - return config - - @property - def points_to_evalaute(self) -> dict: - '''Initial points to evaluate - - Returns: - A list of dicts. Each dict is the initial point for each learner - ''' - points = [] - for estimator in self.estimator_list: - config = self._search_states[estimator].init_config - config['learner'] = estimator - if len(self.estimator_list) > 1: - points.append({'ml': config}) - else: - points.append(config) - return points - - @property - def prune_attr(self) -> Optional[str]: - '''Attribute for pruning - - Returns: - A string for the sample size attribute or None - ''' - return 'FLAML_sample_size' if self._sample else None - - @property - def min_resource(self) -> Optional[float]: - '''Attribute for pruning - - Returns: - A float for the minimal sample size or None - ''' - return MIN_SAMPLE_TRAIN if self._sample else None - - @property - def max_resource(self) -> Optional[float]: - '''Attribute for pruning - - Returns: - A float for the maximal sample size or None - ''' - return self._state.data_size if self._sample else None - - @property - def trainable(self) -> Callable[[dict], Optional[float]]: - '''Training function - - Returns: - A function that evaluates each config and returns the loss - ''' - self._state.time_from_start = 0 - for estimator in self.estimator_list: - search_state = self._search_states[estimator] - if not hasattr(search_state, 'training_function'): - search_state.training_function = partial( - AutoMLState._compute_with_config_base, - self._state, estimator) - states = self._search_states - - def train(config: dict): - sample_size = config.get('FLAML_sample_size') - config = config.get('ml', config).copy() - if sample_size: - config['FLAML_sample_size'] = sample_size - estimator = config['learner'] - del config['learner'] - result = states[estimator].training_function(config) - return result - - return train - - @property - def size(self) -> Callable[[dict], float]: - '''Size function - - Returns: - A function that returns the mem size in bytes for a config - ''' - - def size_func(config: dict) -> float: - config = config.get('ml', config) - estimator = config['learner'] - learner_class = self._state.learner_classes.get(estimator) - return learner_class.size(config) - - return size_func - - @property - def metric_constraints(self) -> list: - '''Metric constraints - - Returns: - A list of the metric constraints - ''' - constraints = [] - if np.isfinite(self._pred_time_limit): - constraints.append( - ('pred_time', '<=', self._pred_time_limit)) - return constraints - - def fit(self, - X_train=None, - y_train=None, - dataframe=None, - label=None, - metric='auto', - task='classification', - n_jobs=-1, - log_file_name='flaml.log', - estimator_list='auto', - time_budget=60, - max_iter=1000000, - sample=True, - ensemble=False, - eval_method='auto', - log_type='better', - model_history=False, - split_ratio=SPLIT_RATIO, - n_splits=N_SPLITS, - log_training_metric=False, - mem_thres=MEM_THRES, - pred_time_limit=np.inf, - train_time_limit=np.inf, - X_val=None, - y_val=None, - sample_weight_val=None, - groups=None, - verbose=1, - retrain_full=True, - split_type="stratified", - learner_selector='sample', - hpo_method=None, - starting_points={}, - seed=None, - **fit_kwargs): - '''Find a model for a given task - - Args: - X_train: A numpy array or a pandas dataframe of training data in - shape (n, m) - For 'forecast' task, X_train should be timestamp - y_train: A numpy array or a pandas series of labels in shape (n,) - For 'forecast' task, y_train should be value - dataframe: A dataframe of training data including label column - For 'forecast' task, dataframe must be specified and should - have two columns: timestamp and value - label: A str of the label column name for 'classification' or - 'regression' task or a tuple of strings for timestamp and - value columns for 'forecasting' task - Note: If X_train and y_train are provided, - dataframe and label are ignored; - If not, dataframe and label must be provided. - metric: A string of the metric name or a function, - e.g., 'accuracy', 'roc_auc', 'roc_auc_ovr', 'roc_auc_ovo', - 'f1', 'micro_f1', 'macro_f1', 'log_loss', 'mape', 'mae', 'mse', 'r2' - for 'forecast' task, use 'mape' - if passing a customized metric function, the function needs to - have the follwing signature: - - .. code-block:: python - - def custom_metric( - X_test, y_test, estimator, labels, - X_train, y_train, weight_test=None, weight_train=None - ): - return metric_to_minimize, metrics_to_log - - which returns a float number as the minimization objective, - and a tuple of floats or a dictionary as the metrics to log - task: A string of the task type, e.g., - 'classification', 'regression', 'forecast' - n_jobs: An integer of the number of threads for training - log_file_name: A string of the log file name - estimator_list: A list of strings for estimator names, or 'auto' - e.g., - - .. code-block:: python - - ['lgbm', 'xgboost', 'catboost', 'rf', 'extra_tree'] - - time_budget: A float number of the time budget in seconds - max_iter: An integer of the maximal number of iterations - sample: A boolean of whether to sample the training data during - search - eval_method: A string of resampling strategy, one of - ['auto', 'cv', 'holdout'] - split_ratio: A float of the valiation data percentage for holdout - n_splits: An integer of the number of folds for cross - validation - log_type: A string of the log type, one of - ['better', 'all'] - 'better' only logs configs with better loss than previos iters - 'all' logs all the tried configs - model_history: A boolean of whether to keep the history of best - models in the history property. Make sure memory is large - enough if setting to True. - log_training_metric: A boolean of whether to log the training - metric for each model. - mem_thres: A float of the memory size constraint in bytes - pred_time_limit: A float of the prediction latency constraint in seconds - train_time_limit: A float of the training time constraint in seconds - X_val: None or a numpy array or a pandas dataframe of validation data - y_val: None or a numpy array or a pandas series of validation labels - sample_weight_val: None or a numpy array of the sample weight of - validation data. - groups: None or an array-like of shape (n,) | Group labels for the - samples used while splitting the dataset into train/valid set - verbose: int, default=1 | Controls the verbosity, higher means more - messages. - hpo_method: str or None, default=None | The hyperparameter - optimization method. When it is None, CFO is used. - No need to set when using flaml's default search space or using - a simple customized search space. When set to 'bs', BlendSearch - is used. BlendSearch can be tried when the search space is - complex, for example, containing multiple disjoint, discontinuous - subspaces. - starting_points: A dictionary to specify the starting hyperparameter - config for the estimators. - Keys are the name of the estimators, and values are the starting - hyperparamter configurations for the corresponding estimators. - seed: int or None, default=None | The random seed for np.random. - **fit_kwargs: Other key word arguments to pass to fit() function of - the searched learners, such as sample_weight. Include period as - a key word argument for 'forecast' task. - ''' - self._start_time_flag = time.time() - self._state.task = task - self._state.log_training_metric = log_training_metric - self._state.fit_kwargs = fit_kwargs - self._state.weight_val = sample_weight_val - self._state.groups = groups - - self._validate_data(X_train, y_train, dataframe, label, X_val, y_val) - self._search_states = {} # key: estimator name; value: SearchState - self._random = np.random.RandomState(RANDOM_SEED) - if seed is not None: - np.random.seed(seed) - self._learner_selector = learner_selector - old_level = logger.getEffectiveLevel() - self.verbose = verbose - if verbose == 0: - logger.setLevel(logging.WARNING) - if self._state.task == 'classification': - self._state.task = get_classification_objective( - len(np.unique(self._y_train_all))) - assert split_type in ["stratified", "uniform", "time"] - self._split_type = split_type - elif self._state.task == 'regression': - if split_type in ["uniform", "time"]: - self._split_type = split_type - else: - self._split_type = "uniform" - elif self._state.task == 'forecast': - if split_type is not None and split_type != 'time': - raise ValueError("split_type must be 'time' when task is 'forecast'. ") - self._split_type = "time" - if self._state.task == 'forecast' and self._state.fit_kwargs.get('period') is None: - raise TypeError("missing 1 required argument for 'forecast' task: 'period'. ") - if eval_method == 'auto' or self._state.X_val is not None: - eval_method = self._decide_eval_method(time_budget) - self._state.eval_method = eval_method - if (not mlflow or not mlflow.active_run()) and not logger.handlers: - # Add the console handler. - _ch = logging.StreamHandler() - _ch.setFormatter(logger_formatter) - logger.addHandler(_ch) - logger.info("Evaluation method: {}".format(eval_method)) - - self._retrain_full = retrain_full and ( - eval_method == 'holdout' and self._state.X_val is None) - if self._state.task != 'forecast': - self._prepare_data(eval_method, split_ratio, n_splits) - else: - self._prepare_data(eval_method, split_ratio, n_splits, - period=self._state.fit_kwargs.get('period')) - self._sample = sample and eval_method != 'cv' and ( - MIN_SAMPLE_TRAIN * SAMPLE_MULTIPLY_FACTOR < self._state.data_size) - if 'auto' == metric: - if 'binary' in self._state.task: - metric = 'roc_auc' - elif 'multi' in self._state.task: - metric = 'log_loss' - elif self._state.task == 'forecast': - metric = 'mape' - else: - metric = 'r2' - self._state.metric = metric - if metric in ['r2', 'accuracy', 'roc_auc', 'roc_auc_ovr', 'roc_auc_ovo', - 'f1', 'ap', 'micro_f1', 'macro_f1']: - error_metric = f"1-{metric}" - elif isinstance(metric, str): - error_metric = metric - else: - error_metric = 'customized metric' - logger.info(f'Minimizing error metric: {error_metric}') - - if 'auto' == estimator_list: - estimator_list = ['lgbm', 'rf', 'catboost', 'xgboost', 'extra_tree'] - if 'regression' != self._state.task: - estimator_list += ['lrl1'] - if self._state.task == 'forecast': - estimator_list = ['fbprophet', 'arima', 'sarimax'] - for estimator_name in estimator_list: - if estimator_name not in self._state.learner_classes: - self.add_learner( - estimator_name, - get_estimator_class(self._state.task, estimator_name)) - # set up learner search space - for estimator_name in estimator_list: - estimator_class = self._state.learner_classes[estimator_name] - estimator_class.init() - self._search_states[estimator_name] = SearchState( - learner_class=estimator_class, - data_size=self._state.data_size, task=self._state.task, - starting_point=starting_points.get(estimator_name) - ) - logger.info("List of ML learners in AutoML Run: {}".format( - estimator_list)) - self.estimator_list = estimator_list - self._hpo_method = hpo_method or 'cfo' - self._state.time_budget = time_budget - self._active_estimators = estimator_list.copy() - self._ensemble = ensemble - self._max_iter = max_iter - self._mem_thres = mem_thres - self._pred_time_limit = pred_time_limit - self._state.train_time_limit = train_time_limit - self._log_type = log_type - self.split_ratio = split_ratio - self._save_model_history = model_history - self._state.n_jobs = n_jobs - if log_file_name: - with training_log_writer(log_file_name) as save_helper: - self._training_log = save_helper - self._search() - else: - self._training_log = None - self._search() - logger.info("fit succeeded") - logger.info(f"Time taken to find the best model: {self._time_taken_best_iter}") - if self._time_taken_best_iter >= time_budget * 0.7 and not \ - all(self._ever_converged_per_learner.values()): - logger.warn("Time taken to find the best model is {0:.0f}% of the " - "provided time budget and not all estimators' hyperparameter " - "search converged. Consider increasing the time budget.".format( - self._time_taken_best_iter / time_budget * 100)) - - if verbose == 0: - logger.setLevel(old_level) - - def _search(self): - # initialize the search_states - self._eci = [] - self._state.best_loss = float('+inf') - self._state.time_from_start = 0 - self._estimator_index = None - self._best_iteration = 0 - self._time_taken_best_iter = 0 - self._model_history = {} - self._config_history = {} - self._max_iter_per_learner = 1000000 # TODO - self._iter_per_learner = dict([(e, 0) for e in self.estimator_list]) - self._ever_converged_per_learner = dict([(e, False) for e in self.estimator_list]) - self._fullsize_reached = False - self._trained_estimator = None - self._best_estimator = None - self._retrained_config = {} - self._warn_threshold = 10 - - est_retrain_time = next_trial_time = 0 - best_config_sig = None - # use ConcurrencyLimiter to limit the amount of concurrency when - # using a search algorithm - better = True # whether we find a better model in one trial - if self._ensemble: - self.best_model = {} - try: - from ray import __version__ as ray_version - assert ray_version >= '1.0.0' - from ray.tune.suggest import ConcurrencyLimiter - except (ImportError, AssertionError): - from .searcher.suggestion import ConcurrencyLimiter - if self._hpo_method in ('cfo', 'grid'): - from flaml import CFO as SearchAlgo - elif 'optuna' == self._hpo_method: - try: - assert ray_version >= '1.0.0' - from ray.tune.suggest.optuna import OptunaSearch as SearchAlgo - except (ImportError, AssertionError): - from .searcher.suggestion import OptunaSearch as SearchAlgo - elif 'bs' == self._hpo_method: - from flaml import BlendSearch as SearchAlgo - elif 'cfocat' == self._hpo_method: - from flaml.searcher.cfo_cat import CFOCat as SearchAlgo - else: - raise NotImplementedError( - f"hpo_method={self._hpo_method} is not recognized. " - "'cfo' and 'bs' are supported.") - - for self._track_iter in range(self._max_iter): - if self._estimator_index is None: - estimator = self._active_estimators[0] - else: - estimator = self._select_estimator(self._active_estimators) - if not estimator: - break - logger.info( - f"iteration {self._track_iter}, current learner {estimator}") - search_state = self._search_states[estimator] - self._state.time_from_start = time.time() - self._start_time_flag - time_left = self._state.time_budget - self._state.time_from_start - budget_left = time_left if not self._retrain_full or better or ( - not self.best_estimator) or self._search_states[ - self.best_estimator].sample_size < self._state.data_size \ - else time_left - est_retrain_time - if not search_state.search_alg: - search_state.training_function = partial( - AutoMLState._compute_with_config_base, - self._state, estimator) - search_space = search_state.search_space - if self._sample: - prune_attr = 'FLAML_sample_size' - min_resource = MIN_SAMPLE_TRAIN - max_resource = self._state.data_size - else: - prune_attr = min_resource = max_resource = None - learner_class = self._state.learner_classes.get(estimator) - if 'grid' == self._hpo_method: # for synthetic exp only - points_to_evaluate = [] - space = search_space - keys = list(space.keys()) - domain0, domain1 = space[keys[0]], space[keys[1]] - for x1 in range(domain0.lower, domain0.upper + 1): - for x2 in range(domain1.lower, domain1.upper + 1): - points_to_evaluate.append({ - keys[0]: x1, - keys[1]: x2, - }) - self._max_iter_per_learner = len(points_to_evaluate) - low_cost_partial_config = None - else: - points_to_evaluate = [search_state.init_config] - low_cost_partial_config = search_state.low_cost_partial_config - if self._hpo_method in ('bs', 'cfo', 'grid', 'cfocat'): - algo = SearchAlgo( - metric='val_loss', mode='min', space=search_space, - points_to_evaluate=points_to_evaluate, - low_cost_partial_config=low_cost_partial_config, - cat_hp_cost=search_state.cat_hp_cost, - prune_attr=prune_attr, - min_resource=min_resource, - max_resource=max_resource, - config_constraints=[ - (learner_class.size, '<=', self._mem_thres) - ], - metric_constraints=self.metric_constraints, - ) - else: - algo = SearchAlgo( - metric='val_loss', mode='min', space=search_space, - points_to_evaluate=points_to_evaluate, - ) - search_state.search_alg = ConcurrencyLimiter(algo, - max_concurrent=1) - else: - search_space = None - if self._hpo_method in ('bs', 'cfo', 'cfocat'): - search_state.search_alg.set_search_properties( - metric=None, mode=None, - config={ - 'metric_target': self._state.best_loss, - }, - ) - start_run_time = time.time() - analysis = tune.run( - search_state.training_function, - search_alg=search_state.search_alg, - time_budget_s=min(budget_left, self._state.train_time_limit), - verbose=max(self.verbose - 1, 0), - use_ray=False) - time_used = time.time() - start_run_time - better = False - if analysis.trials: - search_state.update(analysis, time_used=time_used, - save_model_history=self._save_model_history) - if self._estimator_index is None: - eci_base = search_state.init_eci - self._eci.append(search_state.estimated_cost4improvement) - for e in self.estimator_list[1:]: - self._eci.append(self._search_states[e].init_eci - / eci_base * self._eci[0]) - self._estimator_index = 0 - self._state.time_from_start = time.time() - self._start_time_flag - # logger.info(f"{self._search_states[estimator].sample_size}, {data_size}") - if search_state.sample_size == self._state.data_size: - self._iter_per_learner[estimator] += 1 - if not self._fullsize_reached: - self._fullsize_reached = True - if search_state.best_loss < self._state.best_loss: - best_config_sig = estimator + search_state.get_hist_config_sig( - self.data_size_full, - search_state.best_config) - self._state.best_loss = search_state.best_loss - self._best_estimator = estimator - est_retrain_time = search_state.est_retrain_time( - self.data_size_full) if ( - best_config_sig not in self._retrained_config) else 0 - self._config_history[self._track_iter] = ( - estimator, - search_state.best_config, - self._state.time_from_start) - if self._save_model_history: - self._model_history[ - self._track_iter] = search_state.trained_estimator - elif self._trained_estimator: - del self._trained_estimator - self._trained_estimator = None - self._trained_estimator = search_state.trained_estimator - self._best_iteration = self._track_iter - self._time_taken_best_iter = self._state.time_from_start - better = True - next_trial_time = search_state.time2eval_best - if better or self._log_type == 'all': - if self._training_log: - self._training_log.append( - self._iter_per_learner[estimator], - search_state.train_loss, - search_state.trial_time, - self._state.time_from_start, - search_state.val_loss, - search_state.config, - search_state.best_loss, - search_state.best_config, - estimator, - search_state.sample_size) - if mlflow is not None and mlflow.active_run(): - with mlflow.start_run(nested=True): - mlflow.log_metric('iter_counter', - self._iter_per_learner[estimator]) - mlflow.log_param('train_loss', - search_state.train_loss) - mlflow.log_metric('trial_time', - search_state.trial_time) - mlflow.log_metric('total_search_time', - self._state.time_from_start) - mlflow.log_metric('validation_loss', - search_state.val_loss) - mlflow.log_param('config', - search_state.config) - mlflow.log_param('learner', - estimator) - mlflow.log_param('sample_size', - search_state.sample_size) - mlflow.log_metric('best_validation_loss', - search_state.best_loss) - mlflow.log_param('best_config', - search_state.best_config) - mlflow.log_param('best_learner', - self._best_estimator) - logger.info( - " at {:.1f}s,\tbest {}'s error={:.4f},\tbest {}'s error={:.4f}".format( - self._state.time_from_start, - estimator, - search_state.best_loss, - self._best_estimator, - self._state.best_loss)) - searcher = search_state.search_alg.searcher - if searcher.is_ls_ever_converged and not self._ever_converged_per_learner[estimator]: - self._ever_converged_per_learner[estimator] = searcher.is_ls_ever_converged - if all(self._ever_converged_per_learner.values()) and \ - self._state.time_from_start > self._warn_threshold * self._time_taken_best_iter: - logger.warn("All estimator hyperparameters local search has converged at least once, " - f"and the total search time exceeds {self._warn_threshold} times the time taken " - "to find the best model.") - self._warn_threshold *= 10 - else: - logger.info(f"no enough budget for learner {estimator}") - if self._estimator_index is not None: - self._active_estimators.remove(estimator) - self._estimator_index -= 1 - if self._retrain_full and best_config_sig and not better and ( - self._search_states[ - self._best_estimator].sample_size == self._state.data_size - ) and (est_retrain_time - <= self._state.time_budget - self._state.time_from_start - <= est_retrain_time + next_trial_time): - self._trained_estimator, \ - retrain_time = self._state._train_with_config( - self._best_estimator, - self._search_states[self._best_estimator].best_config, - self.data_size_full) - logger.info("retrain {} for {:.1f}s".format( - estimator, retrain_time)) - self._retrained_config[best_config_sig] = retrain_time - est_retrain_time = 0 - self._state.time_from_start = time.time() - self._start_time_flag - if (self._state.time_from_start >= self._state.time_budget - or not self._active_estimators): - break - if self._ensemble and self._best_estimator: - time_left = self._state.time_budget - self._state.time_from_start - time_ensemble = self._search_states[ - self._best_estimator].time2eval_best - if time_left < time_ensemble < 2 * time_left: - break - # Add a checkpoint for the current best config to the log. - if self._training_log: - self._training_log.checkpoint() - if self._best_estimator: - self._selected = self._search_states[self._best_estimator] - self._trained_estimator = self._selected.trained_estimator - self.modelcount = sum( - search_state.total_iter - for search_state in self._search_states.values()) - if self._trained_estimator: - logger.info(f'selected model: {self._trained_estimator.model}') - if self._ensemble: - search_states = list(x for x in self._search_states.items() - if x[1].trained_estimator) - search_states.sort(key=lambda x: x[1].best_loss) - estimators = [(x[0], x[1].trained_estimator) - for x in search_states[:2]] - estimators += [ - (x[0], x[1].trained_estimator) for x in search_states[2:] - if x[1].best_loss < 4 * self._selected.best_loss] - logger.info(estimators) - if len(estimators) <= 1: - return - if self._state.task != "regression": - from sklearn.ensemble import StackingClassifier as Stacker - for e in estimators: - e[1]._estimator_type = 'classifier' - else: - from sklearn.ensemble import StackingRegressor as Stacker - best_m = self._trained_estimator - stacker = Stacker(estimators, best_m, n_jobs=self._state.n_jobs, - passthrough=True) - if self._sample_weight_full is not None: - self._state.fit_kwargs[ - 'sample_weight'] = self._sample_weight_full - stacker.fit(self._X_train_all, self._y_train_all, - **self._state.fit_kwargs) - logger.info(f'ensemble: {stacker}') - self._trained_estimator = stacker - self._trained_estimator.model = stacker - else: - self._selected = self._trained_estimator = None - self.modelcount = 0 - if self.model and mlflow is not None and mlflow.active_run(): - mlflow.sklearn.log_model(self.model, 'best_model') - - def __del__(self): - if hasattr(self, '_trained_estimator') and self._trained_estimator \ - and hasattr(self._trained_estimator, 'cleanup'): - self._trained_estimator.cleanup() - del self._trained_estimator - - def _select_estimator(self, estimator_list): - if self._learner_selector == 'roundrobin': - self._estimator_index += 1 - if self._estimator_index == len(estimator_list): - self._estimator_index = 0 - return estimator_list[self._estimator_index] - min_estimated_cost, selected = np.Inf, None - inv = [] - untried_exists = False - for i, estimator in enumerate(estimator_list): - if estimator in self._search_states and ( - self._search_states[estimator].sample_size - ): # sample_size=None meaning no result - search_state = self._search_states[estimator] - if (self._search_states[estimator].time2eval_best - > self._state.time_budget - self._state.time_from_start - or self._iter_per_learner[estimator] - >= self._max_iter_per_learner): - inv.append(0) - continue - estimated_cost = search_state.estimated_cost4improvement - if search_state.sample_size < self._state.data_size: - estimated_cost = min( - estimated_cost, - search_state.time2eval_best * min( - SAMPLE_MULTIPLY_FACTOR, - self._state.data_size / search_state.sample_size)) - gap = search_state.best_loss - self._state.best_loss - if gap > 0 and not self._ensemble: - delta_loss = (search_state.best_loss_old - - search_state.best_loss) or search_state.best_loss - delta_time = (search_state.total_time_used - - search_state.time_best_found_old) or 1e-10 - speed = delta_loss / delta_time - if speed: - estimated_cost = max(2 * gap / speed, estimated_cost) - if estimated_cost == 0: - estimated_cost = 1e-10 - inv.append(1 / estimated_cost) - else: - estimated_cost = self._eci[i] - inv.append(0) - untried_exists = True - if estimated_cost < min_estimated_cost: - min_estimated_cost = estimated_cost - selected = estimator - if untried_exists or not selected: - state = self._search_states.get(selected) - if not (state and state.sample_size): - return selected - s = sum(inv) - p = self._random.rand() - q = 0 - for i in range(len(inv)): - if inv[i]: - q += inv[i] / s - if p < q: - return estimator_list[i] +'''! + * Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See LICENSE file in the + * project root for license information. +''' +import time +from typing import Callable, Optional +import warnings +from functools import partial +import numpy as np +from scipy.sparse import issparse +from sklearn.model_selection import train_test_split, RepeatedStratifiedKFold, \ + RepeatedKFold, GroupKFold, TimeSeriesSplit +from sklearn.utils import shuffle +import pandas as pd +import logging + +from .ml import compute_estimator, train_estimator, get_estimator_class, \ + get_classification_objective +from .config import ( + MIN_SAMPLE_TRAIN, MEM_THRES, RANDOM_SEED, + SMALL_LARGE_THRES, CV_HOLDOUT_THRESHOLD, SPLIT_RATIO, N_SPLITS, + SAMPLE_MULTIPLY_FACTOR) +from .data import concat +from . import tune +from .training_log import training_log_reader, training_log_writer + +logger = logging.getLogger(__name__) +logger_formatter = logging.Formatter( + '[%(name)s: %(asctime)s] {%(lineno)d} %(levelname)s - %(message)s', + '%m-%d %H:%M:%S') + +try: + import mlflow +except ImportError: + mlflow = None + + +class SearchState: + + @property + def search_space(self): + return self._search_space_domain + + @property + def estimated_cost4improvement(self): + return max(self.time_best_found - self.time_best_found_old, + self.total_time_used - self.time_best_found) + + def __init__(self, learner_class, data_size, task, starting_point=None): + self.init_eci = learner_class.cost_relative2lgbm() + self._search_space_domain = {} + self.init_config = {} + self.low_cost_partial_config = {} + self.cat_hp_cost = {} + self.data_size = data_size + self.ls_ever_converged = False + self.learner_class = learner_class + search_space = learner_class.search_space( + data_size=data_size, task=task) + for name, space in search_space.items(): + assert 'domain' in space + self._search_space_domain[name] = space['domain'] + if 'init_value' in space: + self.init_config[name] = space['init_value'] + if 'low_cost_init_value' in space: + self.low_cost_partial_config[name] = space[ + 'low_cost_init_value'] + if 'cat_hp_cost' in space: + self.cat_hp_cost[name] = space['cat_hp_cost'] + # if a starting point is provided, set the init config to be + # the starting point provided + if starting_point is not None and starting_point.get(name) is not None: + self.init_config[name] = starting_point[name] + self._hp_names = list(self._search_space_domain.keys()) + self.search_alg = None + self.best_config = None + self.best_loss = self.best_loss_old = np.inf + self.total_time_used = 0 + self.total_iter = 0 + self.base_eci = None + self.time_best_found = 0 + self.time2eval_best = 0 + self.time2eval_best_old = 0 + self.trained_estimator = None + self.sample_size = None + self.trial_time = 0 + + def update(self, result, time_used, save_model_history=False): + if result: + config = result['config'] + if config and 'FLAML_sample_size' in config: + self.sample_size = config['FLAML_sample_size'] + else: + self.sample_size = self.data_size + obj = result['val_loss'] + train_loss = result['train_loss'] + time2eval = result['time_total_s'] + trained_estimator = result['trained_estimator'] + del result['trained_estimator'] # free up RAM + else: + obj, time2eval, trained_estimator = np.inf, 0.0, None + train_loss = config = None + self.trial_time = time2eval + self.total_time_used += time_used + self.total_iter += 1 + + if self.base_eci is None: + self.base_eci = time_used + if (obj is not None) and (self.best_loss is None or obj < self.best_loss): + self.best_loss_old = self.best_loss if self.best_loss < np.inf \ + else 2 * obj + self.best_loss = obj + self.time_best_found_old = self.time_best_found + self.time_best_found = self.total_time_used + self.iter_best_found = self.total_iter + self.best_config = config + self.best_config_sample_size = self.sample_size + self.best_config_train_time = time_used + if time2eval: + self.time2eval_best_old = self.time2eval_best + self.time2eval_best = time2eval + if self.trained_estimator and trained_estimator and \ + self.trained_estimator != trained_estimator and \ + not save_model_history: + self.trained_estimator.cleanup() + if trained_estimator: + self.trained_estimator = trained_estimator + self.train_loss, self.val_loss, self.config = train_loss, obj, config + + def get_hist_config_sig(self, sample_size, config): + config_values = tuple([config[k] for k in self._hp_names]) + config_sig = str(sample_size) + '_' + str(config_values) + return config_sig + + def est_retrain_time(self, retrain_sample_size): + assert self.best_config_sample_size is not None, \ + 'need to first get best_config_sample_size' + return (self.time2eval_best * retrain_sample_size + / self.best_config_sample_size) + + +class AutoMLState: + + def _prepare_sample_train_data(self, sample_size): + full_size = len(self.y_train) + sampled_weight = None + if sample_size <= full_size: + if isinstance(self.X_train, pd.DataFrame): + sampled_X_train = self.X_train.iloc[:sample_size] + else: + sampled_X_train = self.X_train[:sample_size] + sampled_y_train = self.y_train[:sample_size] + weight = self.fit_kwargs.get('sample_weight') + if weight is not None: + sampled_weight = weight[:sample_size] + else: + sampled_X_train = self.X_train_all + sampled_y_train = self.y_train_all + if 'sample_weight' in self.fit_kwargs: + sampled_weight = self.sample_weight_all + return sampled_X_train, sampled_y_train, sampled_weight + + def _compute_with_config_base(self, + estimator, + config_w_resource): + if 'FLAML_sample_size' in config_w_resource: + sample_size = int(config_w_resource['FLAML_sample_size']) + else: + sample_size = self.data_size + sampled_X_train, sampled_y_train, sampled_weight = \ + self._prepare_sample_train_data(sample_size) + if sampled_weight is not None: + weight = self.fit_kwargs['sample_weight'] + self.fit_kwargs['sample_weight'] = sampled_weight + else: + weight = None + config = config_w_resource.copy() + if 'FLAML_sample_size' in config: + del config['FLAML_sample_size'] + time_left = self.time_budget - self.time_from_start + budget = time_left if sample_size == self.data_size else \ + time_left / 2 * sample_size / self.data_size + + trained_estimator, val_loss, train_loss, _, pred_time = \ + compute_estimator( + sampled_X_train, + sampled_y_train, + self.X_val, + self.y_val, + self.weight_val, + min(budget, self.train_time_limit), + self.kf, + config, + self.task, + estimator, + self.eval_method, + self.metric, + self.best_loss, + self.n_jobs, + self.learner_classes.get(estimator), + self.log_training_metric, + self.fit_kwargs) + result = { + 'pred_time': pred_time, + 'wall_clock_time': time.time() - self._start_time_flag, + 'train_loss': train_loss, + 'val_loss': val_loss, + 'trained_estimator': trained_estimator + } + if sampled_weight is not None: + self.fit_kwargs['sample_weight'] = weight + # tune.report(**result) + return result + + def _train_with_config( + self, estimator, config_w_resource, sample_size=None + ): + config = config_w_resource.copy() + if 'FLAML_sample_size' in config: + if not sample_size: + sample_size = config['FLAML_sample_size'] + del config['FLAML_sample_size'] + assert sample_size is not None + sampled_X_train, sampled_y_train, sampled_weight = \ + self._prepare_sample_train_data(sample_size) + if sampled_weight is not None: + weight = self.fit_kwargs['sample_weight'] + self.fit_kwargs['sample_weight'] = sampled_weight + else: + weight = None + budget = None if self.time_budget is None else ( + self.time_budget - self.time_from_start) + estimator, train_time = train_estimator( + sampled_X_train, + sampled_y_train, + config, + self.task, + estimator, + self.n_jobs, + self.learner_classes.get(estimator), + budget, + self.fit_kwargs) + if sampled_weight is not None: + self.fit_kwargs['sample_weight'] = weight + return estimator, train_time + + +def size(state: AutoMLState, config: dict) -> float: + '''Size function + + Returns: + The mem size in bytes for a config + ''' + config = config.get('ml', config) + estimator = config['learner'] + learner_class = state.learner_classes.get(estimator) + return learner_class.size(config) + + +class AutoML: + '''The AutoML class + + Example: + + .. code-block:: python + + automl = AutoML() + automl_settings = { + "time_budget": 60, + "metric": 'accuracy', + "task": 'classification', + "log_file_name": 'test/mylog.log', + } + automl.fit(X_train = X_train, y_train = y_train, + **automl_settings) + + ''' + + from .version import __version__ + + def __init__(self): + self._track_iter = 0 + self._state = AutoMLState() + self._state.learner_classes = {} + + @property + def model_history(self): + '''A dictionary of iter->model, storing the models when + the best model is updated each time. + ''' + return self._model_history + + @property + def config_history(self): + '''A dictionary of iter->(estimator, config, time), + storing the best estimator, config, and the time when the best + model is updated each time. + ''' + return self._config_history + + @property + def model(self): + '''An object with `predict()` and `predict_proba()` method (for + classification), storing the best trained model. + ''' + if self._trained_estimator: + return self._trained_estimator + else: + return None + + def best_model_for_estimator(self, estimator_name): + '''Return the best model found for a particular estimator + + Args: + estimator_name: a str of the estimator's name + + Returns: + An object with `predict()` and `predict_proba()` method (for + classification), storing the best trained model for estimator_name. + ''' + if estimator_name in self._search_states: + state = self._search_states[estimator_name] + if hasattr(state, 'trained_estimator'): + return state.trained_estimator + return None + + @property + def best_estimator(self): + '''A string indicating the best estimator found.''' + return self._best_estimator + + @property + def best_iteration(self): + '''An integer of the iteration number where the best + config is found.''' + return self._best_iteration + + @property + def best_config(self): + '''A dictionary of the best configuration.''' + return self._search_states[self._best_estimator].best_config + + @property + def best_config_per_estimator(self): + '''A dictionary of all estimators' best configuration.''' + return {e: e_search_state.best_config for e, e_search_state in + self._search_states.items()} + + @property + def best_loss(self): + '''A float of the best loss found + ''' + return self._state.best_loss + + @property + def best_config_train_time(self): + '''A float of the seconds taken by training the + best config.''' + return self._search_states[self._best_estimator].best_config_train_time + + @property + def classes_(self): + '''A list of n_classes elements for class labels.''' + if self._label_transformer: + return self._label_transformer.classes_.tolist() + if self._trained_estimator: + return self._trained_estimator.classes_.tolist() + return None + + def predict(self, X_test, freq=None): + '''Predict label from features. + + Args: + X_test: A numpy array of featurized instances, shape n * m, + or a pandas dataframe with one column with timestamp values + for 'forecasting' task. + freq: str or pandas offset, default=None | The frequency of the + time-series. + + Returns: + A numpy array of shape n * 1 - - each element is a predicted class + label for an instance. + ''' + if self._trained_estimator is None: + warnings.warn( + "No estimator is trained. Please run fit with enough budget.") + return None + X_test = self._preprocess(X_test) + if self._state.task == 'forecast': + X_test_df = pd.DataFrame(X_test) + X_test_col = list(X_test.columns)[0] + X_test_df = X_test_df.rename(columns={X_test_col: 'ds'}) + y_pred = self._trained_estimator.predict(X_test_df, freq=freq) + else: + y_pred = self._trained_estimator.predict(X_test) + if y_pred.ndim > 1 and isinstance(y_pred, np.ndarray): + y_pred = y_pred.flatten() + if self._label_transformer: + return self._label_transformer.inverse_transform(pd.Series( + y_pred)) + else: + return y_pred + + def predict_proba(self, X_test): + '''Predict the probability of each class from features, only works for + classification problems. + + Args: + X_test: A numpy array of featurized instances, shape n * m. + + Returns: + A numpy array of shape n * c. c is the # classes. Each element at + (i, j) is the probability for instance i to be in class j. + ''' + X_test = self._preprocess(X_test) + proba = self._trained_estimator.predict_proba(X_test) + return proba + + def _preprocess(self, X): + if issparse(X): + X = X.tocsr() + if self._transformer: + X = self._transformer.transform(X) + return X + + def _validate_data(self, X_train_all, y_train_all, dataframe, label, + X_val=None, y_val=None): + if self._state.task == 'forecast': + if dataframe is not None and label is not None: + dataframe = dataframe.copy() + dataframe = dataframe.rename(columns={label[0]: 'ds', label[1]: 'y'}) + elif dataframe is not None: + if ('ds' not in dataframe) or ('y' not in dataframe): + raise ValueError( + 'For forecasting task, Dataframe must have columns "ds" and "y" ' + 'with the dates and values respectively.' + ) + elif (X_train_all is not None) and (y_train_all is not None): + dataframe = pd.DataFrame(X_train_all) + time_col = list(dataframe.columns)[0] + dataframe = dataframe.rename(columns={time_col: 'ds'}) + dataframe['y'] = pd.Series(y_train_all) + X_train_all = None + y_train_all = None + label = 'y' + + if X_train_all is not None and y_train_all is not None: + if not (isinstance(X_train_all, np.ndarray) or issparse(X_train_all) + or isinstance(X_train_all, pd.DataFrame)): + raise ValueError( + "X_train_all must be a numpy array, a pandas dataframe, " + "or Scipy sparse matrix.") + if not (isinstance(y_train_all, np.ndarray) + or isinstance(y_train_all, pd.Series)): + raise ValueError( + "y_train_all must be a numpy array or a pandas series.") + if X_train_all.size == 0 or y_train_all.size == 0: + raise ValueError("Input data must not be empty.") + if isinstance(y_train_all, np.ndarray): + y_train_all = y_train_all.flatten() + if X_train_all.shape[0] != y_train_all.shape[0]: + raise ValueError( + "# rows in X_train must match length of y_train.") + self._df = isinstance(X_train_all, pd.DataFrame) + self._nrow, self._ndim = X_train_all.shape + X, y = X_train_all, y_train_all + elif dataframe is not None and label is not None: + if not isinstance(dataframe, pd.DataFrame): + raise ValueError("dataframe must be a pandas DataFrame") + if label not in dataframe.columns: + raise ValueError("label must a column name in dataframe") + self._df = True + X = dataframe.drop(columns=label) + self._nrow, self._ndim = X.shape + y = dataframe[label] + else: + raise ValueError( + "either X_train+y_train or dataframe+label are required") + if issparse(X_train_all) or self._state.task == 'forecast': + self._transformer = self._label_transformer = False + self._X_train_all, self._y_train_all = X, y + else: + from .data import DataTransformer + self._transformer = DataTransformer() + self._X_train_all, self._y_train_all = \ + self._transformer.fit_transform(X, y, self._state.task) + self._label_transformer = self._transformer.label_transformer + self._sample_weight_full = self._state.fit_kwargs.get('sample_weight') + if X_val is not None and y_val is not None: + if not (isinstance(X_val, np.ndarray) or issparse(X_val) + or isinstance(X_val, pd.DataFrame)): + raise ValueError( + "X_val must be None, a numpy array, a pandas dataframe, " + "or Scipy sparse matrix.") + if not (isinstance(y_val, np.ndarray) + or isinstance(y_val, pd.Series)): + raise ValueError( + "y_val must be None, a numpy array or a pandas series.") + if X_val.size == 0 or y_val.size == 0: + raise ValueError( + "Validation data are expected to be nonempty. " + "Use None for X_val and y_val if no validation data.") + if isinstance(y_val, np.ndarray): + y_val = y_val.flatten() + if X_val.shape[0] != y_val.shape[0]: + raise ValueError("# rows in X_val must match length of y_val.") + if self._transformer: + self._state.X_val = self._transformer.transform(X_val) + else: + self._state.X_val = X_val + if self._label_transformer: + self._state.y_val = self._label_transformer.transform(y_val) + else: + self._state.y_val = y_val + else: + self._state.X_val = self._state.y_val = None + + def _prepare_data(self, + eval_method, + split_ratio, + n_splits, + period=None): + X_val, y_val = self._state.X_val, self._state.y_val + if issparse(X_val): + X_val = X_val.tocsr() + X_train_all, y_train_all = self._X_train_all, self._y_train_all + if issparse(X_train_all): + X_train_all = X_train_all.tocsr() + if self._state.task in ('binary:logistic', 'multi:softmax') \ + and self._state.fit_kwargs.get('sample_weight') is None \ + and self._split_type != 'time': + # logger.info(f"label {pd.unique(y_train_all)}") + label_set, counts = np.unique(y_train_all, return_counts=True) + # augment rare classes + rare_threshld = 20 + rare = counts < rare_threshld + rare_label, rare_counts = label_set[rare], counts[rare] + for i, label in enumerate(rare_label): + count = rare_count = rare_counts[i] + rare_index = y_train_all == label + n = len(y_train_all) + while count < rare_threshld: + if self._df: + X_train_all = concat(X_train_all, + X_train_all.iloc[:n].loc[rare_index]) + else: + X_train_all = concat(X_train_all, + X_train_all[:n][rare_index, :]) + if isinstance(y_train_all, pd.Series): + y_train_all = concat(y_train_all, + y_train_all.iloc[:n].loc[rare_index]) + else: + y_train_all = np.concatenate([y_train_all, + y_train_all[:n][rare_index]]) + count += rare_count + logger.info( + f"class {label} augmented from {rare_count} to {count}") + SHUFFLE_SPLIT_TYPES = ['uniform', 'stratified'] + if self._split_type in SHUFFLE_SPLIT_TYPES: + if self._sample_weight_full is not None: + X_train_all, y_train_all, self._state.sample_weight_all = \ + shuffle(X_train_all, y_train_all, self._sample_weight_full, + random_state=RANDOM_SEED) + self._state.fit_kwargs[ + 'sample_weight'] = self._state.sample_weight_all + elif hasattr(self._state, 'groups') and self._state.groups is not None: + X_train_all, y_train_all, self._state.groups = shuffle( + X_train_all, y_train_all, self._state.groups, + random_state=RANDOM_SEED) + else: + X_train_all, y_train_all = shuffle( + X_train_all, y_train_all, random_state=RANDOM_SEED) + if self._df: + X_train_all.reset_index(drop=True, inplace=True) + if isinstance(y_train_all, pd.Series): + y_train_all.reset_index(drop=True, inplace=True) + + X_train, y_train = X_train_all, y_train_all + if X_val is None: + # if eval_method = holdout, make holdout data + if eval_method == 'holdout' and self._split_type == 'time': + if 'period' in self._state.fit_kwargs: + num_samples = X_train_all.shape[0] + split_idx = num_samples - self._state.fit_kwargs.get('period') + X_train = X_train_all[:split_idx] + y_train = y_train_all[:split_idx] + X_val = X_train_all[split_idx:] + y_val = y_train_all[split_idx:] + else: + if 'sample_weight' in self._state.fit_kwargs: + X_train, X_val, y_train, y_val, self._state.fit_kwargs[ + 'sample_weight'], self._state.weight_val = \ + train_test_split( + X_train_all, + y_train_all, + self._state.fit_kwargs['sample_weight'], + test_size=split_ratio, + shuffle=False) + else: + X_train, X_val, y_train, y_val = train_test_split( + X_train_all, + y_train_all, + test_size=split_ratio, + shuffle=False) + elif self._state.task != 'regression' and eval_method == 'holdout': + # for classification, make sure the labels are complete in both + # training and validation data + label_set, first = np.unique(y_train_all, return_index=True) + rest = [] + last = 0 + first.sort() + for i in range(len(first)): + rest.extend(range(last, first[i])) + last = first[i] + 1 + rest.extend(range(last, len(y_train_all))) + X_first = X_train_all.iloc[first] if self._df else X_train_all[ + first] + X_rest = X_train_all.iloc[rest] if self._df else X_train_all[rest] + y_rest = y_train_all.iloc[rest] if isinstance( + y_train_all, pd.Series) else y_train_all[rest] + stratify = y_rest if self._split_type == 'stratified' else \ + None + if 'sample_weight' in self._state.fit_kwargs: + X_train, X_val, y_train, y_val, weight_train, weight_val = \ + train_test_split( + X_rest, + y_rest, + self._state.fit_kwargs['sample_weight'][rest], + test_size=split_ratio, + random_state=RANDOM_SEED) + weight1 = self._state.fit_kwargs['sample_weight'][first] + self._state.weight_val = concat(weight1, weight_val) + self._state.fit_kwargs['sample_weight'] = concat( + weight1, weight_train) + else: + X_train, X_val, y_train, y_val = train_test_split( + X_rest, + y_rest, + test_size=split_ratio, + stratify=stratify, + random_state=RANDOM_SEED) + X_train = concat(X_first, X_train) + y_train = concat( + label_set, y_train) if self._df else np.concatenate( + [label_set, y_train]) + X_val = concat(X_first, X_val) + y_val = concat(label_set, y_val) if self._df else \ + np.concatenate([label_set, y_val]) + elif eval_method == 'holdout' and self._state.task == 'regression': + if 'sample_weight' in self._state.fit_kwargs: + X_train, X_val, y_train, y_val, self._state.fit_kwargs[ + 'sample_weight'], self._state.weight_val = \ + train_test_split( + X_train_all, + y_train_all, + self._state.fit_kwargs['sample_weight'], + test_size=split_ratio, + random_state=RANDOM_SEED) + else: + X_train, X_val, y_train, y_val = train_test_split( + X_train_all, + y_train_all, + test_size=split_ratio, + random_state=RANDOM_SEED) + self._state.data_size = X_train.shape[0] + self.data_size_full = len(y_train_all) + self._state.X_train, self._state.y_train, self._state.X_val, \ + self._state.y_val = (X_train, y_train, X_val, y_val) + self._state.X_train_all = X_train_all + self._state.y_train_all = y_train_all + if hasattr(self._state, 'groups') and self._state.groups is not None: + logger.info("Using GroupKFold") + assert len(self._state.groups) == y_train_all.size, \ + "the length of groups must match the number of examples" + assert len(np.unique(self._state.groups)) >= n_splits, \ + "the number of groups must be equal or larger than n_splits" + self._state.kf = GroupKFold(n_splits) + self._state.kf.groups = self._state.groups + elif self._split_type == "stratified": + logger.info("Using StratifiedKFold") + assert y_train_all.size >= n_splits, ( + f"{n_splits}-fold cross validation" + f" requires input data with at least {n_splits} examples.") + assert y_train_all.size >= 2 * n_splits, ( + f"{n_splits}-fold cross validation with metric=r2 " + f"requires input data with at least {n_splits*2} examples.") + self._state.kf = RepeatedStratifiedKFold( + n_splits=n_splits, n_repeats=1, random_state=RANDOM_SEED) + elif self._split_type == "time": + logger.info("Using TimeSeriesSplit") + if self._state.task == 'forecast': + self._state.kf = TimeSeriesSplit( + n_splits=n_splits, test_size=self._state.fit_kwargs.get('period')) + else: + self._state.kf = TimeSeriesSplit(n_splits=n_splits) + else: + logger.info("Using RepeatedKFold") + self._state.kf = RepeatedKFold( + n_splits=n_splits, n_repeats=1, random_state=RANDOM_SEED) + + def add_learner(self, + learner_name, + learner_class): + '''Add a customized learner + + Args: + learner_name: A string of the learner's name + learner_class: A subclass of flaml.model.BaseEstimator + ''' + self._state.learner_classes[learner_name] = learner_class + + def get_estimator_from_log(self, log_file_name, record_id, task): + '''Get the estimator from log file + + Args: + log_file_name: A string of the log file name + record_id: An integer of the record ID in the file, + 0 corresponds to the first trial + task: A string of the task type, + 'binary', 'multi', or 'regression' + + Returns: + An estimator object for the given configuration + ''' + + with training_log_reader(log_file_name) as reader: + record = reader.get_record(record_id) + estimator = record.learner + config = record.config + + estimator, _ = train_estimator( + None, None, config, task, estimator, + estimator_class=self._state.learner_classes.get(estimator)) + return estimator + + def retrain_from_log(self, + log_file_name, + X_train=None, + y_train=None, + dataframe=None, + label=None, + time_budget=0, + task='classification', + eval_method='auto', + split_ratio=SPLIT_RATIO, + n_splits=N_SPLITS, + split_type="stratified", + n_jobs=1, + train_best=True, + train_full=False, + record_id=-1, + **fit_kwargs): + '''Retrain from log file + + Args: + time_budget: A float number of the time budget in seconds + log_file_name: A string of the log file name + X_train: A numpy array of training data in shape n*m + y_train: A numpy array of labels in shape n*1 + task: A string of the task type, e.g., + 'classification', 'regression' + eval_method: A string of resampling strategy, one of + ['auto', 'cv', 'holdout'] + split_ratio: A float of the validation data percentage for holdout + n_splits: An integer of the number of folds for cross-validation + n_jobs: An integer of the number of threads for training + train_best: A boolean of whether to train the best config in the + time budget; if false, train the last config in the budget + train_full: A boolean of whether to train on the full data. If true, + eval_method and sample_size in the log file will be ignored + record_id: the ID of the training log record from which the model will + be retrained. By default `record_id = -1` which means this will be + ignored. `record_id = 0` corresponds to the first trial, and + when `record_id >= 0`, `time_budget` will be ignored. + **fit_kwargs: Other key word arguments to pass to fit() function of + the searched learners, such as sample_weight + ''' + self._state.task = task + self._state.fit_kwargs = fit_kwargs + self._validate_data(X_train, y_train, dataframe, label) + + logger.info('log file name {}'.format(log_file_name)) + + best_config = None + best_val_loss = float('+inf') + best_estimator = None + sample_size = None + time_used = 0.0 + training_duration = 0 + best = None + with training_log_reader(log_file_name) as reader: + if record_id >= 0: + best = reader.get_record(record_id) + else: + for record in reader.records(): + time_used = record.wall_clock_time + if time_used > time_budget: + break + training_duration = time_used + val_loss = record.validation_loss + if val_loss <= best_val_loss or not train_best: + if val_loss == best_val_loss and train_best: + size = record.sample_size + if size > sample_size: + best = record + best_val_loss = val_loss + sample_size = size + else: + best = record + size = record.sample_size + best_val_loss = val_loss + sample_size = size + if not training_duration: + from .model import BaseEstimator as Estimator + self._trained_estimator = Estimator() + self._trained_estimator.model = None + return training_duration + if not best: + return + best_estimator = best.learner + best_config = best.config + sample_size = len(self._y_train_all) if train_full \ + else best.sample_size + + logger.info( + 'estimator = {}, config = {}, #training instances = {}'.format( + best_estimator, best_config, sample_size)) + # Partially copied from fit() function + # Initilize some attributes required for retrain_from_log + self._state.task = task + if self._state.task == 'classification': + self._state.task = get_classification_objective( + len(np.unique(self._y_train_all))) + assert split_type in ["stratified", "uniform", "time"] + self._split_type = split_type + elif self._state.task == 'regression': + if split_type in ["uniform", "time"]: + self._split_type = split_type + else: + self._split_type = "uniform" + elif self._state.task == 'forecast': + self._split_type = "time" + if record_id >= 0: + eval_method = 'cv' + elif eval_method == 'auto': + eval_method = self._decide_eval_method(time_budget) + self.modelcount = 0 + self._prepare_data(eval_method, split_ratio, n_splits) + self._state.time_budget = None + self._state.n_jobs = n_jobs + self._trained_estimator = self._state._train_with_config( + best_estimator, best_config, sample_size)[0] + logger.info('retrain from log succeeded') + return training_duration + + def _decide_eval_method(self, time_budget): + if self._state.X_val is not None: + return 'holdout' + nrow, dim = self._nrow, self._ndim + if nrow * dim / 0.9 < SMALL_LARGE_THRES * ( + time_budget / 3600) and nrow < CV_HOLDOUT_THRESHOLD: + # time allows or sampling can be used and cv is necessary + return 'cv' + else: + return 'holdout' + + @property + def search_space(self) -> dict: + '''Search space + Must be called after fit(...) (use max_iter=0 to prevent actual fitting) + + Returns: + A dict of the search space + ''' + estimator_list = self.estimator_list + if len(estimator_list) == 1: + estimator = estimator_list[0] + space = self._search_states[estimator].search_space.copy() + space['learner'] = estimator + return space + choices = [] + for estimator in estimator_list: + space = self._search_states[estimator].search_space.copy() + space['learner'] = estimator + choices.append(space) + return {'ml': tune.choice(choices)} + + @property + def low_cost_partial_config(self) -> dict: + '''Low cost partial config + + Returns: + A dict. + (a) if there is only one estimator in estimator_list, each key is a + hyperparameter name. + (b) otherwise, it is a nested dict with 'ml' as the key, and + a list of the low_cost_partial_configs as the value, corresponding + to each learner's low_cost_partial_config; the estimator index as + an integer corresponding to the cheapest learner is appeneded to the + list at the end. + + ''' + if len(self.estimator_list) == 1: + estimator = self.estimator_list[0] + c = self._search_states[estimator].low_cost_partial_config + return c + else: + configs = [] + for estimator in self.estimator_list: + c = self._search_states[estimator].low_cost_partial_config + configs.append(c) + configs.append(np.argmin([ + self._state.learner_classes.get(estimator).cost_relative2lgbm() + for estimator in self.estimator_list])) + config = {'ml': configs} + return config + + @property + def cat_hp_cost(self) -> dict: + '''Categorical hyperparameter cost + + Returns: + A dict. + (a) if there is only one estimator in estimator_list, each key is a + hyperparameter name. + (b) otherwise, it is a nested dict with 'ml' as the key, and + a list of the cat_hp_cost's as the value, corresponding + to each learner's cat_hp_cost; the cost relative to lgbm for each + learner (as a list itself) is appended to the list at the end. + + ''' + if len(self.estimator_list) == 1: + estimator = self.estimator_list[0] + c = self._search_states[estimator].cat_hp_cost + return c + else: + configs = [] + for estimator in self.estimator_list: + c = self._search_states[estimator].cat_hp_cost + configs.append(c) + configs.append([ + self._state.learner_classes.get(estimator).cost_relative2lgbm() + for estimator in self.estimator_list]) + config = {'ml': configs} + return config + + @property + def points_to_evaluate(self) -> dict: + '''Initial points to evaluate + + Returns: + A list of dicts. Each dict is the initial point for each learner + ''' + points = [] + for estimator in self.estimator_list: + config = self._search_states[estimator].init_config + config['learner'] = estimator + if len(self.estimator_list) > 1: + points.append({'ml': config}) + else: + points.append(config) + return points + + @property + def prune_attr(self) -> Optional[str]: + '''Attribute for pruning + + Returns: + A string for the sample size attribute or None + ''' + return 'FLAML_sample_size' if self._sample else None + + @property + def min_resource(self) -> Optional[float]: + '''Attribute for pruning + + Returns: + A float for the minimal sample size or None + ''' + return MIN_SAMPLE_TRAIN if self._sample else None + + @property + def max_resource(self) -> Optional[float]: + '''Attribute for pruning + + Returns: + A float for the maximal sample size or None + ''' + return self._state.data_size if self._sample else None + + @property + def trainable(self) -> Callable[[dict], Optional[float]]: + '''Training function + + Returns: + A function that evaluates each config and returns the loss + ''' + self._state.time_from_start = 0 + for estimator in self.estimator_list: + search_state = self._search_states[estimator] + if not hasattr(search_state, 'training_function'): + search_state.training_function = partial( + AutoMLState._compute_with_config_base, + self._state, estimator) + states = self._search_states + mem_res = self._mem_thres + + def train(config: dict): + sample_size = config.get('FLAML_sample_size') + config = config.get('ml', config).copy() + if sample_size: + config['FLAML_sample_size'] = sample_size + estimator = config['learner'] + # check memory constraints before training + if states[estimator].learner_class.size(config) <= mem_res: + del config['learner'] + result = states[estimator].training_function(config) + return result + else: + return {'pred_time': 0, + 'wall_clock_time': None, + 'train_loss': np.inf, + 'val_loss': np.inf, + 'trained_estimator': None + } + return train + + @property + def metric_constraints(self) -> list: + '''Metric constraints + + Returns: + A list of the metric constraints + ''' + constraints = [] + if np.isfinite(self._pred_time_limit): + constraints.append( + ('pred_time', '<=', self._pred_time_limit)) + return constraints + + def fit(self, + X_train=None, + y_train=None, + dataframe=None, + label=None, + metric='auto', + task='classification', + n_jobs=-1, + log_file_name='flaml.log', + estimator_list='auto', + time_budget=60, + max_iter=1000000, + sample=True, + ensemble=False, + eval_method='auto', + log_type='better', + model_history=False, + split_ratio=SPLIT_RATIO, + n_splits=N_SPLITS, + log_training_metric=False, + mem_thres=MEM_THRES, + pred_time_limit=np.inf, + train_time_limit=np.inf, + X_val=None, + y_val=None, + sample_weight_val=None, + groups=None, + verbose=1, + retrain_full=True, + split_type="stratified", + learner_selector='sample', + hpo_method=None, + starting_points={}, + seed=None, + n_concurrent_trials=1, + **fit_kwargs): + '''Find a model for a given task + + Args: + X_train: A numpy array or a pandas dataframe of training data in + shape (n, m). For 'forecast' task, X_train should contain a + single column of timestamps. + y_train: A numpy array or a pandas series of labels in shape (n, ). + dataframe: A dataframe of training data including label column. + For 'forecast' task, dataframe must be specified and should + have two columns: timestamp and value. + label: A str of the label column name for 'classification' or + 'regression' task, e.g., 'label'; + or a tuple of strings for timestamp and value columns for + 'forecasting' task, e.g., ('timestamp', 'value'). + Note: If X_train and y_train are provided, + dataframe and label are ignored; + If not, dataframe and label must be provided. + metric: A string of the metric name or a function, + e.g., 'accuracy', 'roc_auc', 'roc_auc_ovr', 'roc_auc_ovo', + 'f1', 'micro_f1', 'macro_f1', 'log_loss', 'mae', 'mse', 'r2', + 'mape'. + If passing a customized metric function, the function needs to + have the follwing signature: + + .. code-block:: python + + def custom_metric( + X_test, y_test, estimator, labels, + X_train, y_train, weight_test=None, weight_train=None + ): + return metric_to_minimize, metrics_to_log + + which returns a float number as the minimization objective, + and a tuple of floats or a dictionary as the metrics to log. + task: A string of the task type, e.g., + 'classification', 'regression', 'forecast'. + n_jobs: An integer of the number of threads for training. + log_file_name: A string of the log file name. + estimator_list: A list of strings for estimator names, or 'auto' + e.g., + + .. code-block:: python + + ['lgbm', 'xgboost', 'catboost', 'rf', 'extra_tree'] + + time_budget: A float number of the time budget in seconds. + max_iter: An integer of the maximal number of iterations. + sample: A boolean of whether to sample the training data during + search. + eval_method: A string of resampling strategy, one of + ['auto', 'cv', 'holdout']. + split_ratio: A float of the valiation data percentage for holdout. + n_splits: An integer of the number of folds for cross - validation. + log_type: A string of the log type, one of + ['better', 'all']. + 'better' only logs configs with better loss than previos iters + 'all' logs all the tried configs. + model_history: A boolean of whether to keep the history of best + models in the history property. Make sure memory is large + enough if setting to True. + log_training_metric: A boolean of whether to log the training + metric for each model. + mem_thres: A float of the memory size constraint in bytes. + pred_time_limit: A float of the prediction latency constraint in seconds. + train_time_limit: A float of the training time constraint in seconds. + X_val: None or a numpy array or a pandas dataframe of validation data. + y_val: None or a numpy array or a pandas series of validation labels. + sample_weight_val: None or a numpy array of the sample weight of + validation data. + groups: None or an array-like of shape (n,) | Group labels for the + samples used while splitting the dataset into train/valid set. + verbose: int, default=1 | Controls the verbosity, higher means more + messages. + retrain_full: bool or str, default=True | whether to retrain the + selected model on the full training data when using holdout. + True - retrain only after search finishes; False - no retraining; + 'budget' - do best effort to retrain without violating the time + budget. + hpo_method: str or None, default=None | The hyperparameter + optimization method. When it is None, CFO is used. + No need to set when using flaml's default search space or using + a simple customized search space. When set to 'bs', BlendSearch + is used. BlendSearch can be tried when the search space is + complex, for example, containing multiple disjoint, discontinuous + subspaces. When set to 'random' and the argument 'n_concurrent_trials' + is larger than 1, RandomSearch is used. + starting_points: A dictionary to specify the starting hyperparameter + config for the estimators. + Keys are the name of the estimators, and values are the starting + hyperparamter configurations for the corresponding estimators. + seed: int or None, default=None | The random seed for np.random. + n_concurrent_trials: [Experimental] int, default=1 | The number of + concurrent trials. For n_concurrent_trials > 1, installation of + ray is required: `pip install flaml[ray]`. + **fit_kwargs: Other key word arguments to pass to fit() function of + the searched learners, such as sample_weight. Include period as + a key word argument for 'forecast' task. + ''' + self._state._start_time_flag = self._start_time_flag = time.time() + self._state.task = task + self._state.log_training_metric = log_training_metric + self._state.fit_kwargs = fit_kwargs + self._state.weight_val = sample_weight_val + self._state.groups = groups + + self._validate_data(X_train, y_train, dataframe, label, X_val, y_val) + self._search_states = {} # key: estimator name; value: SearchState + self._random = np.random.RandomState(RANDOM_SEED) + if seed is not None: + np.random.seed(seed) + self._learner_selector = learner_selector + old_level = logger.getEffectiveLevel() + self.verbose = verbose + if verbose == 0: + logger.setLevel(logging.WARNING) + if self._state.task == 'classification': + self._state.task = get_classification_objective( + len(np.unique(self._y_train_all))) + assert split_type in ["stratified", "uniform", "time"] + self._split_type = split_type + elif self._state.task == 'regression': + if split_type in ["uniform", "time"]: + self._split_type = split_type + else: + self._split_type = "uniform" + elif self._state.task == 'forecast': + if split_type is not None and split_type != 'time': + raise ValueError( + "split_type must be 'time' when task is 'forecast'.") + self._split_type = "time" + if self._state.fit_kwargs.get('period') is None: + raise TypeError( + "missing 1 required argument for 'forecast' task: 'period'.") + if eval_method == 'auto' or self._state.X_val is not None: + eval_method = self._decide_eval_method(time_budget) + self._state.eval_method = eval_method + if (not mlflow or not mlflow.active_run()) and not logger.handlers: + # Add the console handler. + _ch = logging.StreamHandler() + _ch.setFormatter(logger_formatter) + logger.addHandler(_ch) + logger.info("Evaluation method: {}".format(eval_method)) + + self._retrain_in_budget = retrain_full == 'budget' and ( + eval_method == 'holdout' and self._state.X_val is None) + self._retrain_final = retrain_full is True and ( + eval_method == 'holdout' and self._state.X_val is None) or ( + eval_method == 'cv') + if self._state.task != 'forecast': + self._prepare_data(eval_method, split_ratio, n_splits) + else: + self._prepare_data(eval_method, split_ratio, n_splits, + period=self._state.fit_kwargs['period']) + self._sample = sample and eval_method != 'cv' and ( + MIN_SAMPLE_TRAIN * SAMPLE_MULTIPLY_FACTOR < self._state.data_size) + if 'auto' == metric: + if 'binary' in self._state.task: + metric = 'roc_auc' + elif 'multi' in self._state.task: + metric = 'log_loss' + elif self._state.task == 'forecast': + metric = 'mape' + else: + metric = 'r2' + self._state.metric = metric + if metric in ['r2', 'accuracy', 'roc_auc', 'roc_auc_ovr', 'roc_auc_ovo', + 'f1', 'ap', 'micro_f1', 'macro_f1']: + error_metric = f"1-{metric}" + elif isinstance(metric, str): + error_metric = metric + else: + error_metric = 'customized metric' + logger.info(f'Minimizing error metric: {error_metric}') + + if 'auto' == estimator_list: + if self._state.task == 'forecast': + estimator_list = ['fbprophet', 'arima', 'sarimax'] + else: + estimator_list = [ + 'lgbm', 'rf', 'catboost', 'xgboost', 'extra_tree'] + if 'regression' != self._state.task: + estimator_list += ['lrl1'] + for estimator_name in estimator_list: + if estimator_name not in self._state.learner_classes: + self.add_learner( + estimator_name, + get_estimator_class(self._state.task, estimator_name)) + # set up learner search space + for estimator_name in estimator_list: + estimator_class = self._state.learner_classes[estimator_name] + estimator_class.init() + self._search_states[estimator_name] = SearchState( + learner_class=estimator_class, + data_size=self._state.data_size, task=self._state.task, + starting_point=starting_points.get(estimator_name) + ) + logger.info("List of ML learners in AutoML Run: {}".format( + estimator_list)) + self.estimator_list = estimator_list + self._hpo_method = hpo_method or 'cfo' + self._state.time_budget = time_budget + self._active_estimators = estimator_list.copy() + self._ensemble = ensemble + self._max_iter = max_iter + self._mem_thres = mem_thres + self._pred_time_limit = pred_time_limit + self._state.train_time_limit = train_time_limit + self._log_type = log_type + self.split_ratio = split_ratio + self._save_model_history = model_history + self._state.n_jobs = n_jobs + self._n_concurrent_trials = n_concurrent_trials + if log_file_name: + with training_log_writer(log_file_name) as save_helper: + self._training_log = save_helper + self._search() + else: + self._training_log = None + self._search() + if self._best_estimator: + logger.info("fit succeeded") + logger.info(f"Time taken to find the best model: {self._time_taken_best_iter}") + if self._time_taken_best_iter >= time_budget * 0.7 and not all( + state.search_alg and state.search_alg.searcher.is_ls_ever_converged + for state in self._search_states.values() + ): + logger.warn("Time taken to find the best model is {0:.0f}% of the " + "provided time budget and not all estimators' hyperparameter " + "search converged. Consider increasing the time budget.".format( + self._time_taken_best_iter / time_budget * 100)) + + if verbose == 0: + logger.setLevel(old_level) + + def _search_parallel(self): + try: + from ray import __version__ as ray_version + assert ray_version >= '1.0.0' + import ray + from ray.tune.suggest import ConcurrencyLimiter + except (ImportError, AssertionError): + raise ImportError( + "n_concurrent_trial > 1 requires installation of ray. " + "Please run pip install flaml[ray]") + if self._hpo_method in ('cfo', 'grid'): + from flaml import CFO as SearchAlgo + elif 'optuna' == self._hpo_method: + from ray.tune.suggest.optuna import OptunaSearch as SearchAlgo + elif 'bs' == self._hpo_method: + from flaml import BlendSearch as SearchAlgo + elif 'cfocat' == self._hpo_method: + from flaml.searcher.cfo_cat import CFOCat as SearchAlgo + elif 'random' == self._hpo_method: + from ray.tune.suggest import BasicVariantGenerator as SearchAlgo + from ray.tune.sample import Domain as RayDomain + from .tune.sample import Domain + else: + raise NotImplementedError( + f"hpo_method={self._hpo_method} is not recognized. " + "'cfo' and 'bs' are supported.") + if self._hpo_method == 'random': + # Any point in points_to_evaluate must consist of hyperparamters + # that are tunable, which can be identified by checking whether + # the corresponding value in the search space is an instance of + # the 'Domain' class from flaml or ray.tune + points_to_evaluate = self.points_to_evaluate.copy() + to_del = [] + for k, v in self.search_space.items(): + if not (isinstance(v, Domain) or isinstance(v, RayDomain)): + to_del.append(k) + for k in to_del: + for p in points_to_evaluate: + del p[k] + + search_alg = SearchAlgo(max_concurrent=self._n_concurrent_trials, + points_to_evaluate=points_to_evaluate + ) + else: + search_alg = SearchAlgo( + metric='val_loss', + space=self.search_space, + low_cost_partial_config=self.low_cost_partial_config, + points_to_evaluate=self.points_to_evaluate, + cat_hp_cost=self.cat_hp_cost, + prune_attr=self.prune_attr, + min_resource=self.min_resource, + max_resource=self.max_resource, + config_constraints=[(partial(size, self._state), '<=', self._mem_thres)], + metric_constraints=self.metric_constraints) + search_alg = ConcurrencyLimiter(search_alg, self._n_concurrent_trials) + self._state.time_from_start = time.time() - self._start_time_flag + time_left = self._state.time_budget - self._state.time_from_start + search_alg.set_search_properties(None, None, config={ + 'time_budget_s': time_left}) + resources_per_trial = { + "cpu": self._state.n_jobs} if self._state.n_jobs > 1 else None + analysis = ray.tune.run( + self.trainable, search_alg=search_alg, config=self.search_space, + metric='val_loss', mode='min', resources_per_trial=resources_per_trial, + time_budget_s=self._state.time_budget, num_samples=self._max_iter) + # logger.info([trial.last_result for trial in analysis.trials]) + trials = sorted((trial for trial in analysis.trials if trial.last_result + and trial.last_result['wall_clock_time'] is not None), + key=lambda x: x.last_result['wall_clock_time']) + for _track_iter, trial in enumerate(trials): + result = trial.last_result + better = False + if result: + config = result['config'] + estimator = config.get('ml', config)['learner'] + search_state = self._search_states[estimator] + search_state.update(result, 0, self._save_model_history) + if result['wall_clock_time'] is not None: + self._state.time_from_start = result['wall_clock_time'] + if search_state.sample_size == self._state.data_size: + self._iter_per_learner[estimator] += 1 + if not self._fullsize_reached: + self._fullsize_reached = True + if search_state.best_loss < self._state.best_loss: + self._state.best_loss = search_state.best_loss + self._best_estimator = estimator + self._config_history[_track_iter] = ( + self._best_estimator, config, self._time_taken_best_iter) + if self._save_model_history: + self._model_history[_track_iter] = search_state.trained_estimator + self._trained_estimator = search_state.trained_estimator + self._best_iteration = _track_iter + self._time_taken_best_iter = self._state.time_from_start + better = True + self._search_states[estimator].best_config = config + if (better or self._log_type == 'all') and self._training_log: + self._training_log.append( + self._iter_per_learner[estimator], + search_state.train_loss, + search_state.trial_time, + self._state.time_from_start, + search_state.val_loss, + config, + self._state.best_loss, + search_state.best_config, + estimator, + search_state.sample_size) + + def _search_sequential(self): + try: + from ray import __version__ as ray_version + assert ray_version >= '1.0.0' + from ray.tune.suggest import ConcurrencyLimiter + except (ImportError, AssertionError): + from .searcher.suggestion import ConcurrencyLimiter + if self._hpo_method in ('cfo', 'grid'): + from flaml import CFO as SearchAlgo + elif 'optuna' == self._hpo_method: + try: + assert ray_version >= '1.0.0' + from ray.tune.suggest.optuna import OptunaSearch as SearchAlgo + except (ImportError, AssertionError): + from .searcher.suggestion import OptunaSearch as SearchAlgo + elif 'bs' == self._hpo_method: + from flaml import BlendSearch as SearchAlgo + elif 'cfocat' == self._hpo_method: + from flaml.searcher.cfo_cat import CFOCat as SearchAlgo + else: + raise NotImplementedError( + f"hpo_method={self._hpo_method} is not recognized. " + "'cfo' and 'bs' are supported.") + + est_retrain_time = next_trial_time = 0 + best_config_sig = None + better = True # whether we find a better model in one trial + if self._ensemble: + self.best_model = {} + for self._track_iter in range(self._max_iter): + if self._estimator_index is None: + estimator = self._active_estimators[0] + else: + estimator = self._select_estimator(self._active_estimators) + if not estimator: + break + logger.info( + f"iteration {self._track_iter}, current learner {estimator}") + search_state = self._search_states[estimator] + self._state.time_from_start = time.time() - self._start_time_flag + time_left = self._state.time_budget - self._state.time_from_start + budget_left = time_left if not self._retrain_in_budget or better or ( + not self.best_estimator) or self._search_states[ + self.best_estimator].sample_size < self._state.data_size \ + else time_left - est_retrain_time + if not search_state.search_alg: + search_state.training_function = partial( + AutoMLState._compute_with_config_base, + self._state, estimator) + search_space = search_state.search_space + if self._sample: + prune_attr = 'FLAML_sample_size' + min_resource = MIN_SAMPLE_TRAIN + max_resource = self._state.data_size + else: + prune_attr = min_resource = max_resource = None + learner_class = self._state.learner_classes.get(estimator) + if 'grid' == self._hpo_method: # for synthetic exp only + points_to_evaluate = [] + space = search_space + keys = list(space.keys()) + domain0, domain1 = space[keys[0]], space[keys[1]] + for x1 in range(domain0.lower, domain0.upper + 1): + for x2 in range(domain1.lower, domain1.upper + 1): + points_to_evaluate.append({ + keys[0]: x1, + keys[1]: x2, + }) + self._max_iter_per_learner = len(points_to_evaluate) + low_cost_partial_config = None + else: + points_to_evaluate = [search_state.init_config] + low_cost_partial_config = search_state.low_cost_partial_config + if self._hpo_method in ('bs', 'cfo', 'grid', 'cfocat'): + algo = SearchAlgo( + metric='val_loss', mode='min', space=search_space, + points_to_evaluate=points_to_evaluate, + low_cost_partial_config=low_cost_partial_config, + cat_hp_cost=search_state.cat_hp_cost, + prune_attr=prune_attr, + min_resource=min_resource, + max_resource=max_resource, + config_constraints=[ + (learner_class.size, '<=', self._mem_thres) + ], + metric_constraints=self.metric_constraints, + ) + else: + algo = SearchAlgo( + metric='val_loss', mode='min', space=search_space, + points_to_evaluate=points_to_evaluate, + ) + search_state.search_alg = ConcurrencyLimiter(algo, + max_concurrent=1) + # search_state.search_alg = algo + else: + search_space = None + if self._hpo_method in ('bs', 'cfo', 'cfocat'): + search_state.search_alg.set_search_properties( + metric=None, mode=None, + config={ + 'metric_target': self._state.best_loss, + }, + ) + start_run_time = time.time() + analysis = tune.run( + search_state.training_function, + search_alg=search_state.search_alg, + time_budget_s=min(budget_left, self._state.train_time_limit), + verbose=max(self.verbose - 1, 0), + use_ray=False) + time_used = time.time() - start_run_time + better = False + if analysis.trials: + result = analysis.trials[-1].last_result + search_state.update(result, + time_used=time_used, + save_model_history=self._save_model_history) + if self._estimator_index is None: + eci_base = search_state.init_eci + self._eci.append(search_state.estimated_cost4improvement) + for e in self.estimator_list[1:]: + self._eci.append(self._search_states[e].init_eci + / eci_base * self._eci[0]) + self._estimator_index = 0 + if result['wall_clock_time'] is not None: + self._state.time_from_start = result['wall_clock_time'] + # logger.info(f"{self._search_states[estimator].sample_size}, {data_size}") + if search_state.sample_size == self._state.data_size: + self._iter_per_learner[estimator] += 1 + if not self._fullsize_reached: + self._fullsize_reached = True + if search_state.best_loss < self._state.best_loss: + best_config_sig = estimator + search_state.get_hist_config_sig( + self.data_size_full, + search_state.best_config) + self._state.best_loss = search_state.best_loss + self._best_estimator = estimator + est_retrain_time = search_state.est_retrain_time( + self.data_size_full) if ( + best_config_sig not in self._retrained_config) else 0 + self._config_history[self._track_iter] = ( + estimator, + search_state.best_config, + self._state.time_from_start) + if self._save_model_history: + self._model_history[ + self._track_iter] = search_state.trained_estimator + elif self._trained_estimator: + del self._trained_estimator + self._trained_estimator = None + self._trained_estimator = search_state.trained_estimator + self._best_iteration = self._track_iter + self._time_taken_best_iter = self._state.time_from_start + better = True + next_trial_time = search_state.time2eval_best + if better or self._log_type == 'all': + if self._training_log: + self._training_log.append( + self._iter_per_learner[estimator], + search_state.train_loss, + search_state.trial_time, + self._state.time_from_start, + search_state.val_loss, + search_state.config, + search_state.best_loss, + search_state.best_config, + estimator, + search_state.sample_size) + if mlflow is not None and mlflow.active_run(): + with mlflow.start_run(nested=True): + mlflow.log_metric('iter_counter', + self._iter_per_learner[estimator]) + mlflow.log_param('train_loss', + search_state.train_loss) + mlflow.log_metric('trial_time', + search_state.trial_time) + mlflow.log_metric('wall_clock_time', + self._state.time_from_start) + mlflow.log_metric('validation_loss', + search_state.val_loss) + mlflow.log_param('config', + search_state.config) + mlflow.log_param('learner', + estimator) + mlflow.log_param('sample_size', + search_state.sample_size) + mlflow.log_metric('best_validation_loss', + search_state.best_loss) + mlflow.log_param('best_config', + search_state.best_config) + mlflow.log_param('best_learner', + self._best_estimator) + logger.info( + " at {:.1f}s,\tbest {}'s error={:.4f},\tbest {}'s error={:.4f}".format( + self._state.time_from_start, + estimator, + search_state.best_loss, + self._best_estimator, + self._state.best_loss)) + if all(state.search_alg and state.search_alg.searcher.is_ls_ever_converged + for state in self._search_states.values()) and ( + self._state.time_from_start + > self._warn_threshold * self._time_taken_best_iter): + logger.warn("All estimator hyperparameters local search has converged at least once, " + f"and the total search time exceeds {self._warn_threshold} times the time taken " + "to find the best model.") + self._warn_threshold *= 10 + else: + logger.info(f"no enough budget for learner {estimator}") + if self._estimator_index is not None: + self._active_estimators.remove(estimator) + self._estimator_index -= 1 + if self._retrain_in_budget and best_config_sig and est_retrain_time \ + and not better and self._search_states[ + self._best_estimator].sample_size == self._state.data_size and ( + est_retrain_time + <= self._state.time_budget - self._state.time_from_start + <= est_retrain_time + next_trial_time): + self._trained_estimator, \ + retrain_time = self._state._train_with_config( + self._best_estimator, + self._search_states[self._best_estimator].best_config, + self.data_size_full) + logger.info("retrain {} for {:.1f}s".format( + self._best_estimator, retrain_time)) + self._retrained_config[best_config_sig] = retrain_time + est_retrain_time = 0 + self._state.time_from_start = time.time() - self._start_time_flag + if (self._state.time_from_start >= self._state.time_budget + or not self._active_estimators): + break + if self._ensemble and self._best_estimator: + time_left = self._state.time_budget - self._state.time_from_start + time_ensemble = self._search_states[ + self._best_estimator].time2eval_best + if time_left < time_ensemble < 2 * time_left: + break + + def _search(self): + # initialize the search_states + self._eci = [] + self._state.best_loss = float('+inf') + self._state.time_from_start = 0 + self._estimator_index = None + self._best_iteration = 0 + self._time_taken_best_iter = 0 + self._model_history = {} + self._config_history = {} + self._max_iter_per_learner = 1000000 # TODO + self._iter_per_learner = dict([(e, 0) for e in self.estimator_list]) + self._fullsize_reached = False + self._trained_estimator = None + self._best_estimator = None + self._retrained_config = {} + self._warn_threshold = 10 + + if self._n_concurrent_trials == 1: + self._search_sequential() + else: + self._search_parallel() + # Add a checkpoint for the current best config to the log. + if self._training_log: + self._training_log.checkpoint() + if self._best_estimator: + self._selected = self._search_states[self._best_estimator] + self.modelcount = sum( + search_state.total_iter + for search_state in self._search_states.values()) + if self._trained_estimator: + logger.info(f'selected model: {self._trained_estimator.model}') + if self._ensemble: + search_states = list(x for x in self._search_states.items() + if x[1].trained_estimator) + search_states.sort(key=lambda x: x[1].best_loss) + estimators = [(x[0], x[1].trained_estimator) + for x in search_states[:2]] + estimators += [ + (x[0], x[1].trained_estimator) for x in search_states[2:] + if x[1].best_loss < 4 * self._selected.best_loss] + logger.info(estimators) + if len(estimators) <= 1: + return + if self._state.task != "regression": + from sklearn.ensemble import StackingClassifier as Stacker + for e in estimators: + e[1]._estimator_type = 'classifier' + else: + from sklearn.ensemble import StackingRegressor as Stacker + best_m = self._trained_estimator + stacker = Stacker(estimators, best_m, n_jobs=self._state.n_jobs, + passthrough=True) + if self._sample_weight_full is not None: + self._state.fit_kwargs[ + 'sample_weight'] = self._sample_weight_full + stacker.fit(self._X_train_all, self._y_train_all, + **self._state.fit_kwargs) + logger.info(f'ensemble: {stacker}') + self._trained_estimator = stacker + self._trained_estimator.model = stacker + elif self._retrain_final: + # reset time budget for retraining + self._state.time_from_start -= self._state.time_budget + if (self._state.time_budget - self._state.time_from_start + > self._selected.est_retrain_time(self.data_size_full)) \ + and self._selected.best_config_sample_size == self._state.data_size: + self._trained_estimator, \ + retrain_time = self._state._train_with_config( + self._best_estimator, + self._search_states[self._best_estimator].best_config, + self.data_size_full) + logger.info("retrain {} for {:.1f}s".format( + self._best_estimator, retrain_time)) + if self._trained_estimator: + logger.info( + f'retrained model: {self._trained_estimator.model}') + else: + logger.info( + "not retraining because the time budget is too small.") + else: + self._selected = self._trained_estimator = None + self.modelcount = 0 + if self.model and mlflow is not None and mlflow.active_run(): + mlflow.sklearn.log_model(self.model, 'best_model') + + def __del__(self): + if hasattr(self, '_trained_estimator') and self._trained_estimator \ + and hasattr(self._trained_estimator, 'cleanup'): + self._trained_estimator.cleanup() + del self._trained_estimator + + def _select_estimator(self, estimator_list): + if self._learner_selector == 'roundrobin': + self._estimator_index += 1 + if self._estimator_index == len(estimator_list): + self._estimator_index = 0 + return estimator_list[self._estimator_index] + min_estimated_cost, selected = np.Inf, None + inv = [] + untried_exists = False + for i, estimator in enumerate(estimator_list): + if estimator in self._search_states and ( + self._search_states[estimator].sample_size + ): # sample_size=None meaning no result + search_state = self._search_states[estimator] + if (self._search_states[estimator].time2eval_best + > self._state.time_budget - self._state.time_from_start + or self._iter_per_learner[estimator] + >= self._max_iter_per_learner): + inv.append(0) + continue + estimated_cost = search_state.estimated_cost4improvement + if search_state.sample_size < self._state.data_size: + estimated_cost = min( + estimated_cost, + search_state.time2eval_best * min( + SAMPLE_MULTIPLY_FACTOR, + self._state.data_size / search_state.sample_size)) + gap = search_state.best_loss - self._state.best_loss + if gap > 0 and not self._ensemble: + delta_loss = (search_state.best_loss_old + - search_state.best_loss) or search_state.best_loss + delta_time = (search_state.total_time_used + - search_state.time_best_found_old) or 1e-10 + speed = delta_loss / delta_time + if speed: + estimated_cost = max(2 * gap / speed, estimated_cost) + if estimated_cost == 0: + estimated_cost = 1e-10 + inv.append(1 / estimated_cost) + else: + estimated_cost = self._eci[i] + inv.append(0) + untried_exists = True + if estimated_cost < min_estimated_cost: + min_estimated_cost = estimated_cost + selected = estimator + if untried_exists or not selected: + state = self._search_states.get(selected) + if not (state and state.sample_size): + return selected + s = sum(inv) + p = self._random.rand() + q = 0 + for i in range(len(inv)): + if inv[i]: + q += inv[i] / s + if p < q: + return estimator_list[i] diff --git a/flaml/data.py b/flaml/data.py index 33b5270446..b2eaeed19b 100644 --- a/flaml/data.py +++ b/flaml/data.py @@ -141,14 +141,14 @@ def get_output_from_log(filename, time_budget): best_config_list = [] with training_log_reader(filename) as reader: for record in reader.records(): - time_used = record.total_search_time + time_used = record.wall_clock_time val_loss = record.validation_loss config = record.config learner = record.learner.split('_')[0] sample_size = record.sample_size train_loss = record.logged_metric - if time_used < time_budget: + if time_used < time_budget and np.isfinite(val_loss): if val_loss < best_val_loss: best_val_loss = val_loss best_config = config diff --git a/flaml/ml.py b/flaml/ml.py index c26b8513d9..58a4659c27 100644 --- a/flaml/ml.py +++ b/flaml/ml.py @@ -102,8 +102,11 @@ def sklearn_metric_loss_score( score = log_loss( y_true, y_predict, labels=labels, sample_weight=sample_weight) elif 'mape' in metric_name: - score = mean_absolute_percentage_error( - y_true, y_predict) + try: + score = mean_absolute_percentage_error( + y_true, y_predict) + except ValueError: + return np.inf elif 'micro_f1' in metric_name: score = 1 - f1_score( y_true, y_predict, sample_weight=sample_weight, average='micro') @@ -141,21 +144,23 @@ def get_y_pred(estimator, X, eval_metric, obj, freq=None): def get_test_loss( estimator, X_train, y_train, X_test, y_test, weight_test, - eval_metric, obj, labels=None, budget=None, train_loss=False, fit_kwargs={} + eval_metric, obj, labels=None, budget=None, log_training_metric=False, fit_kwargs={} ): start = time.time() - train_time = estimator.fit(X_train, y_train, budget, **fit_kwargs) + estimator.fit(X_train, y_train, budget, **fit_kwargs) if isinstance(eval_metric, str): pred_start = time.time() test_pred_y = get_y_pred(estimator, X_test, eval_metric, obj) pred_time = (time.time() - pred_start) / X_test.shape[0] test_loss = sklearn_metric_loss_score(eval_metric, test_pred_y, y_test, labels, weight_test) - if train_loss is not False: + if log_training_metric: test_pred_y = get_y_pred(estimator, X_train, eval_metric, obj) train_loss = sklearn_metric_loss_score( eval_metric, test_pred_y, y_train, labels, fit_kwargs.get('sample_weight')) + else: + train_loss = None else: # customized metric function test_loss, metrics = eval_metric( X_test, y_test, estimator, labels, X_train, y_train, @@ -174,40 +179,41 @@ def train_model(estimator, X_train, y_train, budget, fit_kwargs={}): def evaluate_model( estimator, X_train, y_train, X_val, y_val, weight_val, - budget, kf, task, eval_method, eval_metric, best_val_loss, train_loss=False, + budget, kf, task, eval_method, eval_metric, best_val_loss, log_training_metric=False, fit_kwargs={} ): if 'holdout' in eval_method: val_loss, train_loss, train_time, pred_time = evaluate_model_holdout( estimator, X_train, y_train, X_val, y_val, weight_val, budget, - task, eval_metric, train_loss=train_loss, + task, eval_metric, log_training_metric=log_training_metric, fit_kwargs=fit_kwargs) else: val_loss, train_loss, train_time, pred_time = evaluate_model_CV( estimator, X_train, y_train, budget, kf, task, - eval_metric, best_val_loss, train_loss=train_loss, + eval_metric, best_val_loss, log_training_metric=log_training_metric, fit_kwargs=fit_kwargs) return val_loss, train_loss, train_time, pred_time def evaluate_model_holdout( estimator, X_train, y_train, X_val, y_val, - weight_val, budget, task, eval_metric, train_loss=False, + weight_val, budget, task, eval_metric, log_training_metric=False, fit_kwargs={} ): val_loss, train_time, train_loss, pred_time = get_test_loss( estimator, X_train, y_train, X_val, y_val, weight_val, eval_metric, - task, budget=budget, train_loss=train_loss, fit_kwargs=fit_kwargs) + task, budget=budget, log_training_metric=log_training_metric, fit_kwargs=fit_kwargs) return val_loss, train_loss, train_time, pred_time def evaluate_model_CV( estimator, X_train_all, y_train_all, budget, kf, - task, eval_metric, best_val_loss, train_loss=False, fit_kwargs={} + task, eval_metric, best_val_loss, log_training_metric=False, fit_kwargs={} ): start_time = time.time() total_val_loss = 0 total_train_loss = None + train_loss = None train_time = pred_time = 0 valid_fold_num = total_fold_num = 0 n = kf.get_n_splits() @@ -231,7 +237,7 @@ def evaluate_model_CV( kf = kf.split(X_train_split) rng = np.random.RandomState(2020) val_loss_list = [] - budget_per_train = budget / (n + 1) + budget_per_train = budget / n if 'sample_weight' in fit_kwargs: weight = fit_kwargs['sample_weight'] weight_val = None @@ -259,13 +265,13 @@ def evaluate_model_CV( val_loss_i, train_time_i, train_loss_i, pred_time_i = get_test_loss( estimator, X_train, y_train, X_val, y_val, weight_val, eval_metric, task, labels, budget_per_train, - train_loss=train_loss, fit_kwargs=fit_kwargs) + log_training_metric=log_training_metric, fit_kwargs=fit_kwargs) if weight is not None: fit_kwargs['sample_weight'] = weight valid_fold_num += 1 total_fold_num += 1 total_val_loss += val_loss_i - if train_loss is not False: + if log_training_metric or not isinstance(eval_metric, str): if isinstance(total_train_loss, list): total_train_loss = [ total_train_loss[i] + v for i, v in enumerate(train_loss_i)] @@ -286,7 +292,7 @@ def evaluate_model_CV( break val_loss = np.max(val_loss_list) n = total_fold_num - if train_loss is not False: + if log_training_metric or not isinstance(eval_metric, str): if isinstance(total_train_loss, list): train_loss = [v / n for v in total_train_loss] elif isinstance(total_train_loss, dict): @@ -294,17 +300,17 @@ def evaluate_model_CV( else: train_loss = total_train_loss / n pred_time /= n - budget -= time.time() - start_time - if val_loss < best_val_loss and budget > budget_per_train: - estimator.cleanup() - estimator.fit(X_train_all, y_train_all, budget, **fit_kwargs) + # budget -= time.time() - start_time + # if val_loss < best_val_loss and budget > budget_per_train: + # estimator.cleanup() + # estimator.fit(X_train_all, y_train_all, budget, **fit_kwargs) return val_loss, train_loss, train_time, pred_time def compute_estimator( X_train, y_train, X_val, y_val, weight_val, budget, kf, config_dic, task, estimator_name, eval_method, eval_metric, - best_val_loss=np.Inf, n_jobs=1, estimator_class=None, train_loss=False, + best_val_loss=np.Inf, n_jobs=1, estimator_class=None, log_training_metric=False, fit_kwargs={} ): estimator_class = estimator_class or get_estimator_class( @@ -313,7 +319,7 @@ def compute_estimator( **config_dic, task=task, n_jobs=n_jobs) val_loss, train_loss, train_time, pred_time = evaluate_model( estimator, X_train, y_train, X_val, y_val, weight_val, budget, kf, task, - eval_method, eval_metric, best_val_loss, train_loss=train_loss, + eval_method, eval_metric, best_val_loss, log_training_metric=log_training_metric, fit_kwargs=fit_kwargs) return estimator, val_loss, train_loss, train_time, pred_time diff --git a/flaml/model.py b/flaml/model.py index b9ae3f52cb..0ab812fa15 100644 --- a/flaml/model.py +++ b/flaml/model.py @@ -222,10 +222,10 @@ class LGBMEstimator(BaseEstimator): 'domain': tune.loguniform(lower=1 / 1024, upper=1.0), 'init_value': 0.1, }, - 'subsample': { - 'domain': tune.uniform(lower=0.1, upper=1.0), - 'init_value': 1.0, - }, + # 'subsample': { + # 'domain': tune.uniform(lower=0.1, upper=1.0), + # 'init_value': 1.0, + # }, 'log_max_bin': { 'domain': tune.lograndint(lower=3, upper=11), 'init_value': 8, @@ -252,28 +252,30 @@ class LGBMEstimator(BaseEstimator): def __init__(self, task='binary:logistic', log_max_bin=8, **params): super().__init__(task, **params) - # Default: ‘regression’ for LGBMRegressor, - # ‘binary’ or ‘multiclass’ for LGBMClassifier - if 'regression' in task: - objective = 'regression' - elif 'binary' in task: - objective = 'binary' - elif 'multi' in task: - objective = 'multiclass' - else: - objective = 'regression' + if "objective" not in self.params: + # Default: ‘regression’ for LGBMRegressor, + # ‘binary’ or ‘multiclass’ for LGBMClassifier + if 'regression' in task: + objective = 'regression' + elif 'binary' in task: + objective = 'binary' + elif 'multi' in task: + objective = 'multiclass' + else: + objective = 'regression' + self.params["objective"] = objective if "n_estimators" in self.params: self.params["n_estimators"] = int(round(self.params["n_estimators"])) if "num_leaves" in self.params: self.params["num_leaves"] = int(round(self.params["num_leaves"])) if "min_child_samples" in self.params: self.params["min_child_samples"] = int(round(self.params["min_child_samples"])) - if "objective" not in self.params: - self.params["objective"] = objective if "max_bin" not in self.params: self.params['max_bin'] = 1 << int(round(log_max_bin)) - 1 if "verbose" not in self.params: self.params['verbose'] = -1 + # if "subsample_freq" not in self.params: + # self.params['subsample_freq'] = 1 if 'regression' in task: self.estimator_class = LGBMRegressor else: diff --git a/flaml/nlp/autotransformers.py b/flaml/nlp/autotransformers.py index 2233cc39c2..7661d37323 100644 --- a/flaml/nlp/autotransformers.py +++ b/flaml/nlp/autotransformers.py @@ -748,6 +748,7 @@ class AutoTransformers: self._set_metric(custom_metric_name, custom_metric_mode_name) self._set_task() self._fp16 = fp16 + ray.shutdown() ray.init(local_mode=ray_local_mode) self._set_search_space(**custom_hpo_args) diff --git a/flaml/searcher/flow2.py b/flaml/searcher/flow2.py index 5ad75d89f3..9c1fadf791 100644 --- a/flaml/searcher/flow2.py +++ b/flaml/searcher/flow2.py @@ -3,6 +3,7 @@ * Licensed under the MIT License. See LICENSE file in the * project root for license information. ''' +from flaml.tune.sample import Domain from typing import Dict, Optional, Tuple import numpy as np try: @@ -140,7 +141,7 @@ class FLOW2(Searcher): if str(sampler) != 'Normal': self._bounded_keys.append(key) if not hier: - self._space_keys = sorted(self._space.keys()) + self._space_keys = sorted(self._tunable_keys) self._hierarchical = hier if (self.prune_attr and self.prune_attr not in self._space and self.max_resource): @@ -499,18 +500,28 @@ class FLOW2(Searcher): else: space = self._space value_list = [] + # self._space_keys doesn't contain keys with const values, + # e.g., "eval_metric": ["logloss", "error"]. keys = sorted(config.keys()) if self._hierarchical else self._space_keys for key in keys: value = config[key] if key == self.prune_attr: value_list.append(value) - # else key must be in self.space - # get rid of list type or constant, - # e.g., "eval_metric": ["logloss", "error"] - elif isinstance(space[key], sample.Integer): - value_list.append(int(round(value))) else: - value_list.append(value) + # key must be in space + domain = space[key] + if self._hierarchical: + # can't remove constant for hierarchical search space, + # e.g., learner + if not (domain is None or type(domain) in (str, int, float) + or isinstance(domain, sample.Domain)): + # not domain or hashable + # get rid of list type for hierarchical search space. + continue + if isinstance(domain, sample.Integer): + value_list.append(int(round(value))) + else: + value_list.append(value) return tuple(value_list) @property diff --git a/flaml/training_log.py b/flaml/training_log.py index a343314d69..b58850c18e 100644 --- a/flaml/training_log.py +++ b/flaml/training_log.py @@ -16,7 +16,7 @@ class TrainingLogRecord(object): iter_per_learner: int, logged_metric: float, trial_time: float, - total_search_time: float, + wall_clock_time: float, validation_loss, config, best_validation_loss, @@ -27,7 +27,7 @@ class TrainingLogRecord(object): self.iter_per_learner = iter_per_learner self.logged_metric = logged_metric self.trial_time = trial_time - self.total_search_time = total_search_time + self.wall_clock_time = wall_clock_time self.validation_loss = validation_loss self.config = config self.best_validation_loss = best_validation_loss @@ -71,7 +71,7 @@ class TrainingLogWriter(object): it_counter: int, train_loss: float, trial_time: float, - total_search_time: float, + wall_clock_time: float, validation_loss, config, best_validation_loss, @@ -86,7 +86,7 @@ class TrainingLogWriter(object): it_counter, train_loss, trial_time, - total_search_time, + wall_clock_time, validation_loss, config, best_validation_loss, @@ -95,6 +95,7 @@ class TrainingLogWriter(object): sample_size) if validation_loss < self.current_best_loss or \ validation_loss == self.current_best_loss and \ + self.current_sample_size is not None and \ sample_size > self.current_sample_size: self.current_best_loss = validation_loss self.current_sample_size = sample_size diff --git a/flaml/tune/space.py b/flaml/tune/space.py index ff4d5fa98e..59cd295b5d 100644 --- a/flaml/tune/space.py +++ b/flaml/tune/space.py @@ -363,6 +363,7 @@ def indexof(domain: Dict, config: Dict) -> int: continue # print(domain.const[i]) if all(config[key] == value for key, value in domain.const[i].items()): + # assumption: the concatenation of constants is a unique identifier return i return None diff --git a/flaml/version.py b/flaml/version.py index ce25754b4b..906d362f7d 100644 --- a/flaml/version.py +++ b/flaml/version.py @@ -1 +1 @@ -__version__ = "0.5.13" +__version__ = "0.6.0" diff --git a/notebook/automl_in_sklearn_pipeline.ipynb b/notebook/automl_in_sklearn_pipeline.ipynb index 644ffd04c2..469321540d 100644 --- a/notebook/automl_in_sklearn_pipeline.ipynb +++ b/notebook/automl_in_sklearn_pipeline.ipynb @@ -2,27 +2,26 @@ "cells": [ { "cell_type": "markdown", - "metadata": {}, "source": [ "Copyright (c) 2021. All rights reserved.\n", "\n", "Contributed by: @bnriiitb\n", "\n", "Licensed under the MIT License." - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "# Using AutoML in Sklearn Pipeline\n", "\n", "This tutorial will help you understand how FLAML's AutoML can be used as a transformer in the Sklearn pipeline." - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "\n", "## 1.Introduction\n", @@ -43,11 +42,11 @@ "```bash\n", "pip install flaml[notebook]\n", "```" - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "### 1.2 Why are pipelines a silver bullet?\n", "\n", @@ -63,42 +62,47 @@ "* Allow hyperparameter tuning across the estimators\n", "* Easier to share and collaborate with multiple users (bug fixes, enhancements etc)\n", "* Enforce the implementation and order of steps" - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "#### As FLAML's AutoML module can be used a transformer in the Sklearn's pipeline we can get all the benefits of pipeline and thereby write extremley clean, and resuable code." - ] + ], + "metadata": {} }, { "cell_type": "code", "execution_count": 44, - "metadata": {}, - "outputs": [], "source": [ "!pip install flaml[notebook];" - ] + ], + "outputs": [], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "## 2. Classification Example\n", "### Load data and preprocess\n", "\n", "Download [Airlines dataset](https://www.openml.org/d/1169) from OpenML. The task is to predict whether a given flight will be delayed, given the information of the scheduled departure." - ] + ], + "metadata": {} }, { "cell_type": "code", - "execution_count": 45, - "metadata": {}, + "execution_count": 4, + "source": [ + "from flaml.data import load_openml_dataset\n", + "X_train, X_test, y_train, y_test = load_openml_dataset(\n", + " dataset_id=1169, data_dir='./', random_state=1234, dataset_format='array')" + ], "outputs": [ { - "name": "stdout", "output_type": "stream", + "name": "stdout", "text": [ "load dataset from ./openml_ds1169.pkl\n", "Dataset name: airlines\n", @@ -107,61 +111,38 @@ ] } ], - "source": [ - "from flaml.data import load_openml_dataset\n", - "X_train, X_test, y_train, y_test = load_openml_dataset(dataset_id=1169, data_dir='./',random_state=1234)" - ] + "metadata": {} }, { "cell_type": "code", - "execution_count": 46, - "metadata": {}, + "execution_count": 5, + "source": [ + "X_train[0]" + ], "outputs": [ { + "output_type": "execute_result", "data": { "text/plain": [ "array([ 12., 2648., 4., 15., 4., 450., 67.], dtype=float32)" ] }, - "execution_count": 46, "metadata": {}, - "output_type": "execute_result" + "execution_count": 5 } ], - "source": [ - "X_train[0]" - ] + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "## 3. Create a Pipeline" - ] + ], + "metadata": {} }, { "cell_type": "code", - "execution_count": 47, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
Pipeline(steps=[('imputuer', SimpleImputer()),\n",
-       "                ('standardizer', StandardScaler()),\n",
-       "                ('automl', )])
SimpleImputer()
StandardScaler()
" - ], - "text/plain": [ - "Pipeline(steps=[('imputuer', SimpleImputer()),\n", - " ('standardizer', StandardScaler()),\n", - " ('automl', )])" - ] - }, - "execution_count": 47, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": 6, "source": [ "import sklearn\n", "from sklearn import set_config\n", @@ -182,21 +163,39 @@ " (\"automl\", automl)\n", "])\n", "automl_pipeline" - ] + ], + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "Pipeline(steps=[('imputuer', SimpleImputer()),\n", + " ('standardizer', StandardScaler()),\n", + " ('automl', )])" + ], + "text/html": [ + "
Pipeline(steps=[('imputuer', SimpleImputer()),\n",
+       "                ('standardizer', StandardScaler()),\n",
+       "                ('automl', )])
SimpleImputer()
StandardScaler()
" + ] + }, + "metadata": {}, + "execution_count": 6 + } + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "### Run FLAML\n", "In the FLAML automl run configuration, users can specify the task type, time budget, error metric, learner list, whether to subsample, resampling strategy type, and so on. All these arguments have default values which will be used if users do not provide them. For example, the default ML learners of FLAML are `['lgbm', 'xgboost', 'catboost', 'rf', 'extra_tree', 'lrl1']`. " - ] + ], + "metadata": {} }, { "cell_type": "code", - "execution_count": 48, - "metadata": {}, - "outputs": [], + "execution_count": 7, "source": [ "settings = {\n", " \"time_budget\": 60, # total running time in seconds\n", @@ -205,656 +204,213 @@ " \"estimator_list\":['xgboost','catboost','lgbm'],\n", " \"log_file_name\": 'airlines_experiment.log', # flaml log file\n", "}" - ] + ], + "outputs": [], + "metadata": {} }, { "cell_type": "code", - "execution_count": 49, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:30] {884} INFO - Evaluation method: holdout\n", - "[flaml.automl: 08-09 19:49:30] {591} INFO - Using StratifiedKFold\n", - "[flaml.automl: 08-09 19:49:30] {905} INFO - Minimizing error metric: 1-accuracy\n", - "[flaml.automl: 08-09 19:49:30] {924} INFO - List of ML learners in AutoML Run: ['xgboost', 'catboost', 'lgbm']\n", - "[flaml.automl: 08-09 19:49:30] {986} INFO - iteration 0 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n", - "[flaml.automl: 08-09 19:49:30] {1134} INFO - at 0.4s,\tbest xgboost's error=0.3755,\tbest xgboost's error=0.3755\n", - "[flaml.automl: 08-09 19:49:30] {986} INFO - iteration 1 current learner lgbm\n", - "[flaml.automl: 08-09 19:49:30] {1134} INFO - at 0.4s,\tbest lgbm's error=0.3704,\tbest lgbm's error=0.3704\n", - "[flaml.automl: 08-09 19:49:30] {986} INFO - iteration 2 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n", - "[flaml.automl: 08-09 19:49:30] {1134} INFO - at 0.5s,\tbest xgboost's error=0.3755,\tbest lgbm's error=0.3704\n", - "[flaml.automl: 08-09 19:49:30] {986} INFO - iteration 3 current learner lgbm\n", - "[flaml.automl: 08-09 19:49:30] {1134} INFO - at 0.5s,\tbest lgbm's error=0.3704,\tbest lgbm's error=0.3704\n", - "[flaml.automl: 08-09 19:49:30] {986} INFO - iteration 4 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n", - "[flaml.automl: 08-09 19:49:31] {1134} INFO - at 0.6s,\tbest xgboost's error=0.3643,\tbest xgboost's error=0.3643\n", - "[flaml.automl: 08-09 19:49:31] {986} INFO - iteration 5 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=20, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=20\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=4 will be ignored. Current value: num_leaves=31\n", - "[LightGBM] [Warning] min_data_in_leaf is set=32, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=32\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=4 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:31] {1134} INFO - at 0.6s,\tbest xgboost's error=0.3643,\tbest xgboost's error=0.3643\n", - "[flaml.automl: 08-09 19:49:31] {986} INFO - iteration 6 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n", - "[flaml.automl: 08-09 19:49:31] {1134} INFO - at 0.7s,\tbest xgboost's error=0.3624,\tbest xgboost's error=0.3624\n", - "[flaml.automl: 08-09 19:49:31] {986} INFO - iteration 7 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n", - "[flaml.automl: 08-09 19:49:31] {1134} INFO - at 0.8s,\tbest xgboost's error=0.3605,\tbest xgboost's error=0.3605\n", - "[flaml.automl: 08-09 19:49:31] {986} INFO - iteration 8 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n", - "[flaml.automl: 08-09 19:49:31] {1134} INFO - at 0.8s,\tbest xgboost's error=0.3605,\tbest xgboost's error=0.3605\n", - "[flaml.automl: 08-09 19:49:31] {986} INFO - iteration 9 current learner lgbm\n", - "[flaml.automl: 08-09 19:49:31] {1134} INFO - at 0.9s,\tbest lgbm's error=0.3704,\tbest xgboost's error=0.3605\n", - "[flaml.automl: 08-09 19:49:31] {986} INFO - iteration 10 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=25, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=25\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=4 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:31] {1134} INFO - at 1.1s,\tbest xgboost's error=0.3605,\tbest xgboost's error=0.3605\n", - "[flaml.automl: 08-09 19:49:31] {986} INFO - iteration 11 current learner lgbm\n", - "[flaml.automl: 08-09 19:49:31] {1134} INFO - at 1.1s,\tbest lgbm's error=0.3704,\tbest xgboost's error=0.3605\n", - "[flaml.automl: 08-09 19:49:31] {986} INFO - iteration 12 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n", - "[flaml.automl: 08-09 19:49:31] {1134} INFO - at 1.2s,\tbest xgboost's error=0.3605,\tbest xgboost's error=0.3605\n", - "[flaml.automl: 08-09 19:49:31] {986} INFO - iteration 13 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=7, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=7\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=8 will be ignored. Current value: num_leaves=31\n", - "[LightGBM] [Warning] min_data_in_leaf is set=39, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=39\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=4 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:31] {1134} INFO - at 1.4s,\tbest lgbm's error=0.3658,\tbest xgboost's error=0.3605\n", - "[flaml.automl: 08-09 19:49:31] {986} INFO - iteration 14 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n", - "[flaml.automl: 08-09 19:49:31] {1134} INFO - at 1.4s,\tbest xgboost's error=0.3605,\tbest xgboost's error=0.3605\n", - "[flaml.automl: 08-09 19:49:31] {986} INFO - iteration 15 current learner lgbm\n", - "[flaml.automl: 08-09 19:49:32] {1134} INFO - at 1.6s,\tbest lgbm's error=0.3588,\tbest lgbm's error=0.3588\n", - "[flaml.automl: 08-09 19:49:32] {986} INFO - iteration 16 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n", - "[flaml.automl: 08-09 19:49:32] {1134} INFO - at 1.6s,\tbest xgboost's error=0.3605,\tbest lgbm's error=0.3588\n", - "[flaml.automl: 08-09 19:49:32] {986} INFO - iteration 17 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=35, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=35\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=17 will be ignored. Current value: num_leaves=31\n", - "[LightGBM] [Warning] min_data_in_leaf is set=17, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=17\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=4 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:32] {1134} INFO - at 1.7s,\tbest lgbm's error=0.3588,\tbest lgbm's error=0.3588\n", - "[flaml.automl: 08-09 19:49:32] {986} INFO - iteration 18 current learner lgbm\n", - "[flaml.automl: 08-09 19:49:32] {1134} INFO - at 1.8s,\tbest lgbm's error=0.3588,\tbest lgbm's error=0.3588\n", - "[flaml.automl: 08-09 19:49:32] {986} INFO - iteration 19 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=59, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=59\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=51 will be ignored. Current value: num_leaves=31\n", - "[LightGBM] [Warning] min_data_in_leaf is set=74, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=74\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=5 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:32] {1134} INFO - at 2.0s,\tbest lgbm's error=0.3588,\tbest lgbm's error=0.3588\n", - "[flaml.automl: 08-09 19:49:32] {986} INFO - iteration 20 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n", - "[flaml.automl: 08-09 19:49:32] {1134} INFO - at 2.1s,\tbest xgboost's error=0.3531,\tbest xgboost's error=0.3531\n", - "[flaml.automl: 08-09 19:49:32] {986} INFO - iteration 21 current learner catboost\n", - "[flaml.automl: 08-09 19:49:32] {1134} INFO - at 2.3s,\tbest catboost's error=0.3595,\tbest xgboost's error=0.3531\n", - "[flaml.automl: 08-09 19:49:32] {986} INFO - iteration 22 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n", - "[flaml.automl: 08-09 19:49:33] {1134} INFO - at 2.6s,\tbest xgboost's error=0.3531,\tbest xgboost's error=0.3531\n", - "[flaml.automl: 08-09 19:49:33] {986} INFO - iteration 23 current learner catboost\n", - "[flaml.automl: 08-09 19:49:33] {1134} INFO - at 2.8s,\tbest catboost's error=0.3595,\tbest xgboost's error=0.3531\n", - "[flaml.automl: 08-09 19:49:33] {986} INFO - iteration 24 current learner lgbm\n", - "[flaml.automl: 08-09 19:49:33] {1134} INFO - at 2.9s,\tbest lgbm's error=0.3588,\tbest xgboost's error=0.3531\n", - "[flaml.automl: 08-09 19:49:33] {986} INFO - iteration 25 current learner catboost\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=20, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=20\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=13 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:33] {1134} INFO - at 3.1s,\tbest catboost's error=0.3587,\tbest xgboost's error=0.3531\n", - "[flaml.automl: 08-09 19:49:33] {986} INFO - iteration 26 current learner lgbm\n", - "[flaml.automl: 08-09 19:49:33] {1134} INFO - at 3.2s,\tbest lgbm's error=0.3588,\tbest xgboost's error=0.3531\n", - "[flaml.automl: 08-09 19:49:33] {986} INFO - iteration 27 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=36, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=36\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=5 will be ignored. Current value: num_leaves=31\n", - "[LightGBM] [Warning] min_data_in_leaf is set=35, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=35\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=17 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:33] {1134} INFO - at 3.4s,\tbest lgbm's error=0.3517,\tbest lgbm's error=0.3517\n", - "[flaml.automl: 08-09 19:49:33] {986} INFO - iteration 28 current learner lgbm\n", - "[flaml.automl: 08-09 19:49:34] {1134} INFO - at 3.6s,\tbest lgbm's error=0.3517,\tbest lgbm's error=0.3517\n", - "[flaml.automl: 08-09 19:49:34] {986} INFO - iteration 29 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=31, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=31\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=8 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:34] {1134} INFO - at 3.8s,\tbest xgboost's error=0.3527,\tbest lgbm's error=0.3517\n", - "[flaml.automl: 08-09 19:49:34] {986} INFO - iteration 30 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n", - "[flaml.automl: 08-09 19:49:34] {1134} INFO - at 3.9s,\tbest xgboost's error=0.3527,\tbest lgbm's error=0.3517\n", - "[flaml.automl: 08-09 19:49:34] {986} INFO - iteration 31 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n", - "[flaml.automl: 08-09 19:49:35] {1134} INFO - at 4.9s,\tbest xgboost's error=0.3517,\tbest xgboost's error=0.3517\n", - "[flaml.automl: 08-09 19:49:35] {986} INFO - iteration 32 current learner lgbm\n", - "[flaml.automl: 08-09 19:49:35] {1134} INFO - at 4.9s,\tbest lgbm's error=0.3517,\tbest xgboost's error=0.3517\n", - "[flaml.automl: 08-09 19:49:35] {986} INFO - iteration 33 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=26, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=26\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=111 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:35] {1134} INFO - at 5.2s,\tbest xgboost's error=0.3517,\tbest xgboost's error=0.3517\n", - "[flaml.automl: 08-09 19:49:35] {986} INFO - iteration 34 current learner catboost\n", - "[flaml.automl: 08-09 19:49:35] {1134} INFO - at 5.4s,\tbest catboost's error=0.3587,\tbest xgboost's error=0.3517\n", - "[flaml.automl: 08-09 19:49:35] {986} INFO - iteration 35 current learner lgbm\n", - "[flaml.automl: 08-09 19:49:36] {1134} INFO - at 5.6s,\tbest lgbm's error=0.3514,\tbest lgbm's error=0.3514\n", - "[flaml.automl: 08-09 19:49:36] {986} INFO - iteration 36 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=31, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=31\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=4 will be ignored. Current value: num_leaves=31\n", - "[LightGBM] [Warning] min_data_in_leaf is set=35, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=35\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=7 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:36] {1134} INFO - at 5.8s,\tbest lgbm's error=0.3501,\tbest lgbm's error=0.3501\n", - "[flaml.automl: 08-09 19:49:36] {986} INFO - iteration 37 current learner lgbm\n", - "[flaml.automl: 08-09 19:49:36] {1134} INFO - at 6.0s,\tbest lgbm's error=0.3501,\tbest lgbm's error=0.3501\n", - "[flaml.automl: 08-09 19:49:36] {986} INFO - iteration 38 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=41, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=41\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=11 will be ignored. Current value: num_leaves=31\n", - "[LightGBM] [Warning] min_data_in_leaf is set=51, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=51\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=44 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:37] {1134} INFO - at 6.7s,\tbest lgbm's error=0.3492,\tbest lgbm's error=0.3492\n", - "[flaml.automl: 08-09 19:49:37] {986} INFO - iteration 39 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=74, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=74\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=6 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:37] {1134} INFO - at 7.3s,\tbest lgbm's error=0.3492,\tbest lgbm's error=0.3492\n", - "[flaml.automl: 08-09 19:49:37] {986} INFO - iteration 40 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=35, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=35\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=52 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:39] {1134} INFO - at 9.5s,\tbest lgbm's error=0.3492,\tbest lgbm's error=0.3492\n", - "[flaml.automl: 08-09 19:49:39] {986} INFO - iteration 41 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n", - "[flaml.automl: 08-09 19:49:42] {1134} INFO - at 12.4s,\tbest xgboost's error=0.3517,\tbest lgbm's error=0.3492\n", - "[flaml.automl: 08-09 19:49:42] {986} INFO - iteration 42 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=51, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=51\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=44 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:44] {1134} INFO - at 14.3s,\tbest lgbm's error=0.3424,\tbest lgbm's error=0.3424\n", - "[flaml.automl: 08-09 19:49:44] {986} INFO - iteration 43 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=26, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=26\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=170 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:45] {1134} INFO - at 15.5s,\tbest lgbm's error=0.3424,\tbest lgbm's error=0.3424\n", - "[flaml.automl: 08-09 19:49:45] {986} INFO - iteration 44 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=53, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=53\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=12 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:48] {1134} INFO - at 18.2s,\tbest lgbm's error=0.3424,\tbest lgbm's error=0.3424\n", - "[flaml.automl: 08-09 19:49:48] {986} INFO - iteration 45 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=100, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=100\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=18 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:49] {1134} INFO - at 19.1s,\tbest lgbm's error=0.3407,\tbest lgbm's error=0.3407\n", - "[flaml.automl: 08-09 19:49:49] {986} INFO - iteration 46 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=128, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=128\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=70 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:51] {1134} INFO - at 20.8s,\tbest lgbm's error=0.3407,\tbest lgbm's error=0.3407\n", - "[flaml.automl: 08-09 19:49:51] {986} INFO - iteration 47 current learner catboost\n", - "[flaml.automl: 08-09 19:49:51] {1134} INFO - at 21.0s,\tbest catboost's error=0.3587,\tbest lgbm's error=0.3407\n", - "[flaml.automl: 08-09 19:49:51] {986} INFO - iteration 48 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=128, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=128\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=16 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:52] {1134} INFO - at 22.2s,\tbest lgbm's error=0.3376,\tbest lgbm's error=0.3376\n", - "[flaml.automl: 08-09 19:49:52] {986} INFO - iteration 49 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=56, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=56\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=52 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:53] {1134} INFO - at 23.0s,\tbest lgbm's error=0.3376,\tbest lgbm's error=0.3376\n", - "[flaml.automl: 08-09 19:49:53] {986} INFO - iteration 50 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=98, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=98\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=4 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:49:56] {1134} INFO - at 26.5s,\tbest lgbm's error=0.3351,\tbest lgbm's error=0.3351\n", - "[flaml.automl: 08-09 19:49:56] {986} INFO - iteration 51 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=97, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=97\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=39 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:50:00] {1134} INFO - at 29.9s,\tbest lgbm's error=0.3351,\tbest lgbm's error=0.3351\n", - "[flaml.automl: 08-09 19:50:00] {986} INFO - iteration 52 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=128, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=128\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=4 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:50:05] {1134} INFO - at 35.0s,\tbest lgbm's error=0.3351,\tbest lgbm's error=0.3351\n", - "[flaml.automl: 08-09 19:50:05] {986} INFO - iteration 53 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n", - "[flaml.automl: 08-09 19:50:05] {1134} INFO - at 35.3s,\tbest xgboost's error=0.3517,\tbest lgbm's error=0.3351\n", - "[flaml.automl: 08-09 19:50:05] {986} INFO - iteration 54 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=43, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=43\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=11 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:50:07] {1134} INFO - at 36.9s,\tbest lgbm's error=0.3351,\tbest lgbm's error=0.3351\n", - "[flaml.automl: 08-09 19:50:07] {986} INFO - iteration 55 current learner catboost\n", - "[flaml.automl: 08-09 19:50:07] {1134} INFO - at 37.4s,\tbest catboost's error=0.3515,\tbest lgbm's error=0.3351\n", - "[flaml.automl: 08-09 19:50:07] {986} INFO - iteration 56 current learner catboost\n", - "[flaml.automl: 08-09 19:50:08] {1134} INFO - at 37.6s,\tbest catboost's error=0.3515,\tbest lgbm's error=0.3351\n", - "[flaml.automl: 08-09 19:50:08] {986} INFO - iteration 57 current learner catboost\n", - "[flaml.automl: 08-09 19:50:08] {1134} INFO - at 37.9s,\tbest catboost's error=0.3515,\tbest lgbm's error=0.3351\n", - "[flaml.automl: 08-09 19:50:08] {986} INFO - iteration 58 current learner catboost\n", - "[flaml.automl: 08-09 19:50:08] {1134} INFO - at 38.1s,\tbest catboost's error=0.3515,\tbest lgbm's error=0.3351\n", - "[flaml.automl: 08-09 19:50:08] {986} INFO - iteration 59 current learner catboost\n", - "[flaml.automl: 08-09 19:50:08] {1134} INFO - at 38.3s,\tbest catboost's error=0.3515,\tbest lgbm's error=0.3351\n", - "[flaml.automl: 08-09 19:50:08] {986} INFO - iteration 60 current learner catboost\n", - "[flaml.automl: 08-09 19:50:09] {1134} INFO - at 38.6s,\tbest catboost's error=0.3515,\tbest lgbm's error=0.3351\n", - "[flaml.automl: 08-09 19:50:09] {986} INFO - iteration 61 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=55, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=55\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=16 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:50:12] {1134} INFO - at 42.5s,\tbest lgbm's error=0.3328,\tbest lgbm's error=0.3328\n", - "[flaml.automl: 08-09 19:50:12] {986} INFO - iteration 62 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=117, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=117\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=4 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:50:14] {1134} INFO - at 44.4s,\tbest lgbm's error=0.3328,\tbest lgbm's error=0.3328\n", - "[flaml.automl: 08-09 19:50:14] {986} INFO - iteration 63 current learner catboost\n", - "[flaml.automl: 08-09 19:50:15] {1134} INFO - at 44.7s,\tbest catboost's error=0.3515,\tbest lgbm's error=0.3328\n", - "[flaml.automl: 08-09 19:50:15] {986} INFO - iteration 64 current learner catboost\n", - "[flaml.automl: 08-09 19:50:18] {1134} INFO - at 47.9s,\tbest catboost's error=0.3435,\tbest lgbm's error=0.3328\n", - "[flaml.automl: 08-09 19:50:18] {986} INFO - iteration 65 current learner lgbm\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=128, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=128\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=12 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:50:23] {1134} INFO - at 52.8s,\tbest lgbm's error=0.3328,\tbest lgbm's error=0.3328\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[LightGBM] [Warning] min_data_in_leaf is set=55, min_child_samples=20 will be ignored. Current value: min_data_in_leaf=55\n", - "[LightGBM] [Warning] num_leaves is set=31, max_leaves=16 will be ignored. Current value: num_leaves=31\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-09 19:50:26] {1156} INFO - retrain lgbm for 3.3s\n", - "[flaml.automl: 08-09 19:50:26] {986} INFO - iteration 66 current learner catboost\n", - "[flaml.automl: 08-09 19:50:27] {1134} INFO - at 57.4s,\tbest catboost's error=0.3435,\tbest lgbm's error=0.3328\n", - "[flaml.automl: 08-09 19:50:29] {1156} INFO - retrain catboost for 1.3s\n", - "[flaml.automl: 08-09 19:50:29] {986} INFO - iteration 67 current learner xgboost\n", - "/Users/budigam.nagaraju/opt/anaconda3/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n", - "[flaml.automl: 08-09 19:50:29] {1134} INFO - at 58.9s,\tbest xgboost's error=0.3517,\tbest lgbm's error=0.3328\n", - "[flaml.automl: 08-09 19:50:30] {1156} INFO - retrain xgboost for 0.9s\n", - "[flaml.automl: 08-09 19:50:30] {1181} INFO - selected model: LGBMClassifier(colsample_bytree=0.7560357004495271,\n", - " learning_rate=0.28478479182882205, max_bin=31, max_leaves=16,\n", - " min_data_in_leaf=55, n_estimators=746, objective='binary',\n", - " reg_alpha=0.0009765625, reg_lambda=0.032652090008547976,\n", - " subsample=0.8847635935300631)\n", - "[flaml.automl: 08-09 19:50:30] {939} INFO - fit succeeded\n" - ] - }, - { - "data": { - "text/html": [ - "
Pipeline(steps=[('imputuer', SimpleImputer()),\n",
-       "                ('standardizer', StandardScaler()),\n",
-       "                ('automl', )])
SimpleImputer()
StandardScaler()
" - ], - "text/plain": [ - "Pipeline(steps=[('imputuer', SimpleImputer()),\n", - " ('standardizer', StandardScaler()),\n", - " ('automl', )])" - ] - }, - "execution_count": 49, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": 8, "source": [ "automl_pipeline.fit(X_train, y_train, \n", " automl__time_budget=settings['time_budget'],\n", " automl__metric=settings['metric'],\n", " automl__estimator_list=settings['estimator_list'],\n", " automl__log_training_metric=True)" - ] + ], + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "[flaml.automl: 08-22 21:32:13] {1130} INFO - Evaluation method: holdout\n", + "[flaml.automl: 08-22 21:32:14] {624} INFO - Using StratifiedKFold\n", + "[flaml.automl: 08-22 21:32:14] {1155} INFO - Minimizing error metric: 1-accuracy\n", + "[flaml.automl: 08-22 21:32:14] {1175} INFO - List of ML learners in AutoML Run: ['xgboost', 'catboost', 'lgbm']\n", + "[flaml.automl: 08-22 21:32:14] {1358} INFO - iteration 0, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:14] {1515} INFO - at 0.5s,\tbest xgboost's error=0.3755,\tbest xgboost's error=0.3755\n", + "[flaml.automl: 08-22 21:32:14] {1358} INFO - iteration 1, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:14] {1515} INFO - at 0.6s,\tbest xgboost's error=0.3755,\tbest xgboost's error=0.3755\n", + "[flaml.automl: 08-22 21:32:14] {1358} INFO - iteration 2, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:14] {1515} INFO - at 0.6s,\tbest xgboost's error=0.3755,\tbest xgboost's error=0.3755\n", + "[flaml.automl: 08-22 21:32:14] {1358} INFO - iteration 3, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:14] {1515} INFO - at 0.7s,\tbest xgboost's error=0.3755,\tbest xgboost's error=0.3755\n", + "[flaml.automl: 08-22 21:32:14] {1358} INFO - iteration 4, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:14] {1515} INFO - at 0.7s,\tbest xgboost's error=0.3679,\tbest xgboost's error=0.3679\n", + "[flaml.automl: 08-22 21:32:14] {1358} INFO - iteration 5, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:14] {1515} INFO - at 0.8s,\tbest lgbm's error=0.3811,\tbest xgboost's error=0.3679\n", + "[flaml.automl: 08-22 21:32:14] {1358} INFO - iteration 6, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:14] {1515} INFO - at 0.8s,\tbest xgboost's error=0.3679,\tbest xgboost's error=0.3679\n", + "[flaml.automl: 08-22 21:32:14] {1358} INFO - iteration 7, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:14] {1515} INFO - at 0.9s,\tbest xgboost's error=0.3679,\tbest xgboost's error=0.3679\n", + "[flaml.automl: 08-22 21:32:14] {1358} INFO - iteration 8, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:14] {1515} INFO - at 1.0s,\tbest xgboost's error=0.3679,\tbest xgboost's error=0.3679\n", + "[flaml.automl: 08-22 21:32:14] {1358} INFO - iteration 9, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:15] {1515} INFO - at 1.1s,\tbest lgbm's error=0.3811,\tbest xgboost's error=0.3679\n", + "[flaml.automl: 08-22 21:32:15] {1358} INFO - iteration 10, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:15] {1515} INFO - at 1.1s,\tbest lgbm's error=0.3755,\tbest xgboost's error=0.3679\n", + "[flaml.automl: 08-22 21:32:15] {1358} INFO - iteration 11, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:15] {1515} INFO - at 1.2s,\tbest xgboost's error=0.3637,\tbest xgboost's error=0.3637\n", + "[flaml.automl: 08-22 21:32:15] {1358} INFO - iteration 12, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:15] {1515} INFO - at 1.4s,\tbest xgboost's error=0.3594,\tbest xgboost's error=0.3594\n", + "[flaml.automl: 08-22 21:32:15] {1358} INFO - iteration 13, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:15] {1515} INFO - at 1.5s,\tbest xgboost's error=0.3594,\tbest xgboost's error=0.3594\n", + "[flaml.automl: 08-22 21:32:15] {1358} INFO - iteration 14, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:15] {1515} INFO - at 1.7s,\tbest xgboost's error=0.3591,\tbest xgboost's error=0.3591\n", + "[flaml.automl: 08-22 21:32:15] {1358} INFO - iteration 15, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:15] {1515} INFO - at 1.7s,\tbest lgbm's error=0.3647,\tbest xgboost's error=0.3591\n", + "[flaml.automl: 08-22 21:32:15] {1358} INFO - iteration 16, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:15] {1515} INFO - at 2.0s,\tbest xgboost's error=0.3585,\tbest xgboost's error=0.3585\n", + "[flaml.automl: 08-22 21:32:15] {1358} INFO - iteration 17, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:16] {1515} INFO - at 2.0s,\tbest lgbm's error=0.3647,\tbest xgboost's error=0.3585\n", + "[flaml.automl: 08-22 21:32:16] {1358} INFO - iteration 18, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:16] {1515} INFO - at 2.1s,\tbest lgbm's error=0.3629,\tbest xgboost's error=0.3585\n", + "[flaml.automl: 08-22 21:32:16] {1358} INFO - iteration 19, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:16] {1515} INFO - at 2.3s,\tbest xgboost's error=0.3553,\tbest xgboost's error=0.3553\n", + "[flaml.automl: 08-22 21:32:16] {1358} INFO - iteration 20, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:16] {1515} INFO - at 2.6s,\tbest xgboost's error=0.3553,\tbest xgboost's error=0.3553\n", + "[flaml.automl: 08-22 21:32:16] {1358} INFO - iteration 21, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:16] {1515} INFO - at 2.7s,\tbest xgboost's error=0.3553,\tbest xgboost's error=0.3553\n", + "[flaml.automl: 08-22 21:32:16] {1358} INFO - iteration 22, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:16] {1515} INFO - at 2.8s,\tbest lgbm's error=0.3629,\tbest xgboost's error=0.3553\n", + "[flaml.automl: 08-22 21:32:16] {1358} INFO - iteration 23, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:16] {1515} INFO - at 2.9s,\tbest lgbm's error=0.3629,\tbest xgboost's error=0.3553\n", + "[flaml.automl: 08-22 21:32:16] {1358} INFO - iteration 24, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:17] {1515} INFO - at 3.1s,\tbest xgboost's error=0.3520,\tbest xgboost's error=0.3520\n", + "[flaml.automl: 08-22 21:32:17] {1358} INFO - iteration 25, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:17] {1515} INFO - at 3.3s,\tbest xgboost's error=0.3520,\tbest xgboost's error=0.3520\n", + "[flaml.automl: 08-22 21:32:17] {1358} INFO - iteration 26, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:17] {1515} INFO - at 3.4s,\tbest lgbm's error=0.3573,\tbest xgboost's error=0.3520\n", + "[flaml.automl: 08-22 21:32:17] {1358} INFO - iteration 27, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:17] {1515} INFO - at 3.5s,\tbest lgbm's error=0.3573,\tbest xgboost's error=0.3520\n", + "[flaml.automl: 08-22 21:32:17] {1358} INFO - iteration 28, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:17] {1515} INFO - at 3.9s,\tbest xgboost's error=0.3520,\tbest xgboost's error=0.3520\n", + "[flaml.automl: 08-22 21:32:17] {1358} INFO - iteration 29, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:18] {1515} INFO - at 4.1s,\tbest xgboost's error=0.3520,\tbest xgboost's error=0.3520\n", + "[flaml.automl: 08-22 21:32:18] {1358} INFO - iteration 30, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:18] {1515} INFO - at 4.8s,\tbest xgboost's error=0.3485,\tbest xgboost's error=0.3485\n", + "[flaml.automl: 08-22 21:32:18] {1358} INFO - iteration 31, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:19] {1515} INFO - at 5.2s,\tbest lgbm's error=0.3573,\tbest xgboost's error=0.3485\n", + "[flaml.automl: 08-22 21:32:19] {1358} INFO - iteration 32, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:19] {1515} INFO - at 5.7s,\tbest xgboost's error=0.3485,\tbest xgboost's error=0.3485\n", + "[flaml.automl: 08-22 21:32:19] {1358} INFO - iteration 33, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:20] {1515} INFO - at 6.6s,\tbest xgboost's error=0.3485,\tbest xgboost's error=0.3485\n", + "[flaml.automl: 08-22 21:32:20] {1358} INFO - iteration 34, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:20] {1515} INFO - at 6.9s,\tbest lgbm's error=0.3481,\tbest lgbm's error=0.3481\n", + "[flaml.automl: 08-22 21:32:20] {1358} INFO - iteration 35, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:21] {1515} INFO - at 7.2s,\tbest lgbm's error=0.3481,\tbest lgbm's error=0.3481\n", + "[flaml.automl: 08-22 21:32:21] {1358} INFO - iteration 36, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:21] {1515} INFO - at 7.4s,\tbest lgbm's error=0.3481,\tbest lgbm's error=0.3481\n", + "[flaml.automl: 08-22 21:32:21] {1358} INFO - iteration 37, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:22] {1515} INFO - at 8.2s,\tbest xgboost's error=0.3485,\tbest lgbm's error=0.3481\n", + "[flaml.automl: 08-22 21:32:22] {1358} INFO - iteration 38, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:22] {1515} INFO - at 8.5s,\tbest lgbm's error=0.3481,\tbest lgbm's error=0.3481\n", + "[flaml.automl: 08-22 21:32:22] {1358} INFO - iteration 39, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:22] {1515} INFO - at 8.8s,\tbest lgbm's error=0.3481,\tbest lgbm's error=0.3481\n", + "[flaml.automl: 08-22 21:32:22] {1358} INFO - iteration 40, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:23] {1515} INFO - at 9.7s,\tbest xgboost's error=0.3485,\tbest lgbm's error=0.3481\n", + "[flaml.automl: 08-22 21:32:23] {1358} INFO - iteration 41, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:25] {1515} INFO - at 11.7s,\tbest lgbm's error=0.3481,\tbest lgbm's error=0.3481\n", + "[flaml.automl: 08-22 21:32:25] {1358} INFO - iteration 42, current learner catboost\n", + "[flaml.automl: 08-22 21:32:26] {1515} INFO - at 12.2s,\tbest catboost's error=0.3647,\tbest lgbm's error=0.3481\n", + "[flaml.automl: 08-22 21:32:26] {1358} INFO - iteration 43, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:28] {1515} INFO - at 14.4s,\tbest lgbm's error=0.3427,\tbest lgbm's error=0.3427\n", + "[flaml.automl: 08-22 21:32:28] {1358} INFO - iteration 44, current learner catboost\n", + "[flaml.automl: 08-22 21:32:28] {1515} INFO - at 14.6s,\tbest catboost's error=0.3647,\tbest lgbm's error=0.3427\n", + "[flaml.automl: 08-22 21:32:28] {1358} INFO - iteration 45, current learner catboost\n", + "[flaml.automl: 08-22 21:32:28] {1515} INFO - at 14.8s,\tbest catboost's error=0.3601,\tbest lgbm's error=0.3427\n", + "[flaml.automl: 08-22 21:32:28] {1358} INFO - iteration 46, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:30] {1515} INFO - at 16.9s,\tbest lgbm's error=0.3427,\tbest lgbm's error=0.3427\n", + "[flaml.automl: 08-22 21:32:30] {1358} INFO - iteration 47, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:34] {1515} INFO - at 21.0s,\tbest xgboost's error=0.3332,\tbest xgboost's error=0.3332\n", + "[flaml.automl: 08-22 21:32:34] {1358} INFO - iteration 48, current learner catboost\n", + "[flaml.automl: 08-22 21:32:35] {1515} INFO - at 21.1s,\tbest catboost's error=0.3601,\tbest xgboost's error=0.3332\n", + "[flaml.automl: 08-22 21:32:35] {1358} INFO - iteration 49, current learner lgbm\n", + "[flaml.automl: 08-22 21:32:37] {1515} INFO - at 23.2s,\tbest lgbm's error=0.3409,\tbest xgboost's error=0.3332\n", + "[flaml.automl: 08-22 21:32:37] {1358} INFO - iteration 50, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:38] {1515} INFO - at 24.6s,\tbest xgboost's error=0.3332,\tbest xgboost's error=0.3332\n", + "[flaml.automl: 08-22 21:32:38] {1358} INFO - iteration 51, current learner xgboost\n", + "[flaml.automl: 08-22 21:32:53] {1515} INFO - at 40.0s,\tbest xgboost's error=0.3279,\tbest xgboost's error=0.3279\n", + "[flaml.automl: 08-22 21:32:53] {1358} INFO - iteration 52, current learner xgboost\n", + "[flaml.automl: 08-22 21:33:01] {1515} INFO - at 47.6s,\tbest xgboost's error=0.3279,\tbest xgboost's error=0.3279\n", + "[flaml.automl: 08-22 21:33:01] {1358} INFO - iteration 53, current learner catboost\n", + "[flaml.automl: 08-22 21:33:01] {1515} INFO - at 47.7s,\tbest catboost's error=0.3601,\tbest xgboost's error=0.3279\n", + "[flaml.automl: 08-22 21:33:01] {1358} INFO - iteration 54, current learner catboost\n", + "[flaml.automl: 08-22 21:33:02] {1515} INFO - at 48.2s,\tbest catboost's error=0.3601,\tbest xgboost's error=0.3279\n", + "[flaml.automl: 08-22 21:33:02] {1358} INFO - iteration 55, current learner catboost\n", + "[flaml.automl: 08-22 21:33:02] {1515} INFO - at 48.5s,\tbest catboost's error=0.3552,\tbest xgboost's error=0.3279\n", + "[flaml.automl: 08-22 21:33:02] {1358} INFO - iteration 56, current learner catboost\n", + "[flaml.automl: 08-22 21:33:02] {1515} INFO - at 48.7s,\tbest catboost's error=0.3552,\tbest xgboost's error=0.3279\n", + "[flaml.automl: 08-22 21:33:02] {1358} INFO - iteration 57, current learner catboost\n", + "[flaml.automl: 08-22 21:33:02] {1515} INFO - at 49.0s,\tbest catboost's error=0.3552,\tbest xgboost's error=0.3279\n", + "[flaml.automl: 08-22 21:33:02] {1358} INFO - iteration 58, current learner catboost\n", + "[flaml.automl: 08-22 21:33:03] {1515} INFO - at 49.1s,\tbest catboost's error=0.3552,\tbest xgboost's error=0.3279\n", + "[flaml.automl: 08-22 21:33:03] {1358} INFO - iteration 59, current learner catboost\n", + "[flaml.automl: 08-22 21:33:03] {1515} INFO - at 49.4s,\tbest catboost's error=0.3552,\tbest xgboost's error=0.3279\n", + "[flaml.automl: 08-22 21:33:03] {1358} INFO - iteration 60, current learner catboost\n", + "[flaml.automl: 08-22 21:33:06] {1515} INFO - at 52.2s,\tbest catboost's error=0.3453,\tbest xgboost's error=0.3279\n", + "[flaml.automl: 08-22 21:33:06] {1358} INFO - iteration 61, current learner catboost\n", + "[flaml.automl: 08-22 21:33:07] {1515} INFO - at 53.9s,\tbest catboost's error=0.3453,\tbest xgboost's error=0.3279\n", + "[flaml.automl: 08-22 21:33:07] {1358} INFO - iteration 62, current learner catboost\n", + "[flaml.automl: 08-22 21:33:09] {1515} INFO - at 55.3s,\tbest catboost's error=0.3453,\tbest xgboost's error=0.3279\n", + "[flaml.automl: 08-22 21:33:09] {1358} INFO - iteration 63, current learner catboost\n", + "[flaml.automl: 08-22 21:33:10] {1515} INFO - at 56.4s,\tbest catboost's error=0.3453,\tbest xgboost's error=0.3279\n", + "[flaml.automl: 08-22 21:33:10] {1358} INFO - iteration 64, current learner catboost\n", + "[flaml.automl: 08-22 21:33:11] {1515} INFO - at 57.5s,\tbest catboost's error=0.3453,\tbest xgboost's error=0.3279\n", + "[flaml.automl: 08-22 21:33:11] {1358} INFO - iteration 65, current learner lgbm\n", + "[flaml.automl: 08-22 21:33:13] {1515} INFO - at 59.8s,\tbest lgbm's error=0.3409,\tbest xgboost's error=0.3279\n", + "[flaml.automl: 08-22 21:33:13] {1592} INFO - selected model: XGBClassifier(base_score=0.5, booster='gbtree',\n", + " colsample_bylevel=0.810466508891351, colsample_bynode=1,\n", + " colsample_bytree=0.8005378817953572, gamma=0, gpu_id=-1,\n", + " grow_policy='lossguide', importance_type='gain',\n", + " interaction_constraints='', learning_rate=0.06234183309508761,\n", + " max_delta_step=0, max_depth=0, max_leaves=1797,\n", + " min_child_weight=0.07275175679381725, missing=nan,\n", + " monotone_constraints='()', n_estimators=63, n_jobs=-1,\n", + " num_parallel_tree=1, random_state=0, reg_alpha=0.5768305704485758,\n", + " reg_lambda=6.867180836557797, scale_pos_weight=1,\n", + " subsample=0.9814772488195874, tree_method='hist',\n", + " use_label_encoder=False, validate_parameters=1, verbosity=0)\n", + "[flaml.automl: 08-22 21:33:26] {1633} INFO - retrain xgboost for 13.0s\n", + "[flaml.automl: 08-22 21:33:26] {1636} INFO - retrained model: XGBClassifier(base_score=0.5, booster='gbtree',\n", + " colsample_bylevel=0.810466508891351, colsample_bynode=1,\n", + " colsample_bytree=0.8005378817953572, gamma=0, gpu_id=-1,\n", + " grow_policy='lossguide', importance_type='gain',\n", + " interaction_constraints='', learning_rate=0.06234183309508761,\n", + " max_delta_step=0, max_depth=0, max_leaves=1797,\n", + " min_child_weight=0.07275175679381725, missing=nan,\n", + " monotone_constraints='()', n_estimators=63, n_jobs=-1,\n", + " num_parallel_tree=1, random_state=0, reg_alpha=0.5768305704485758,\n", + " reg_lambda=6.867180836557797, scale_pos_weight=1,\n", + " subsample=0.9814772488195874, tree_method='hist',\n", + " use_label_encoder=False, validate_parameters=1, verbosity=0)\n", + "[flaml.automl: 08-22 21:33:26] {1199} INFO - fit succeeded\n", + "[flaml.automl: 08-22 21:33:26] {1200} INFO - Time taken to find the best model: 40.023393869400024\n" + ] + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "Pipeline(steps=[('imputuer', SimpleImputer()),\n", + " ('standardizer', StandardScaler()),\n", + " ('automl', )])" + ], + "text/html": [ + "
Pipeline(steps=[('imputuer', SimpleImputer()),\n",
+       "                ('standardizer', StandardScaler()),\n",
+       "                ('automl', )])
SimpleImputer()
StandardScaler()
" + ] + }, + "metadata": {}, + "execution_count": 8 + } + ], + "metadata": {} }, { "cell_type": "code", - "execution_count": 51, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Best ML leaner: lgbm\n", - "Best hyperparmeter config: {'n_estimators': 746.0, 'max_leaves': 16.0, 'min_data_in_leaf': 55.0, 'learning_rate': 0.28478479182882205, 'subsample': 0.8847635935300631, 'log_max_bin': 5.0, 'colsample_bytree': 0.7560357004495271, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.032652090008547976, 'FLAML_sample_size': 364083}\n", - "Best accuracy on validation data: 0.6672\n", - "Training duration of best run: 3.921 s\n" - ] - } - ], + "execution_count": 9, "source": [ "# Get the automl object from the pipeline\n", "automl = automl_pipeline.steps[2][1]\n", @@ -864,73 +420,63 @@ "print('Best hyperparmeter config:', automl.best_config)\n", "print('Best accuracy on validation data: {0:.4g}'.format(1-automl.best_loss))\n", "print('Training duration of best run: {0:.4g} s'.format(automl.best_config_train_time))" - ] + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Best ML leaner: xgboost\n", + "Best hyperparmeter config: {'n_estimators': 63, 'max_leaves': 1797, 'min_child_weight': 0.07275175679381725, 'learning_rate': 0.06234183309508761, 'subsample': 0.9814772488195874, 'colsample_bylevel': 0.810466508891351, 'colsample_bytree': 0.8005378817953572, 'reg_alpha': 0.5768305704485758, 'reg_lambda': 6.867180836557797, 'FLAML_sample_size': 364083}\n", + "Best accuracy on validation data: 0.6721\n", + "Training duration of best run: 15.45 s\n" + ] + } + ], + "metadata": {} }, { "cell_type": "code", - "execution_count": 52, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
LGBMClassifier(colsample_bytree=0.7560357004495271,\n",
-       "               learning_rate=0.28478479182882205, max_bin=31, max_leaves=16,\n",
-       "               min_data_in_leaf=55, n_estimators=746, objective='binary',\n",
-       "               reg_alpha=0.0009765625, reg_lambda=0.032652090008547976,\n",
-       "               subsample=0.8847635935300631)
" - ], - "text/plain": [ - "LGBMClassifier(colsample_bytree=0.7560357004495271,\n", - " learning_rate=0.28478479182882205, max_bin=31, max_leaves=16,\n", - " min_data_in_leaf=55, n_estimators=746, objective='binary',\n", - " reg_alpha=0.0009765625, reg_lambda=0.032652090008547976,\n", - " subsample=0.8847635935300631)" - ] - }, - "execution_count": 52, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": 10, "source": [ "automl.model" - ] + ], + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "" + ] + }, + "metadata": {}, + "execution_count": 10 + } + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "## 4. Persist the model binary file" - ] + ], + "metadata": {} }, { "cell_type": "code", - "execution_count": 53, - "metadata": {}, - "outputs": [], + "execution_count": 11, "source": [ "# Persist the automl object as pickle file\n", "import pickle\n", "with open('automl.pkl', 'wb') as f:\n", " pickle.dump(automl, f, pickle.HIGHEST_PROTOCOL)" - ] + ], + "outputs": [], + "metadata": {} }, { "cell_type": "code", - "execution_count": 54, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Predicted labels [0 1 1 ... 0 1 0]\n", - "True labels [0 0 0 ... 1 0 1]\n", - "Predicted probas [0.36424183 0.59111937 0.64600957 0.27020691 0.23272711]\n" - ] - } - ], + "execution_count": 12, "source": [ "# Performance inference on the testing dataset\n", "y_pred = automl_pipeline.predict(X_test)\n", @@ -938,14 +484,25 @@ "print('True labels', y_test)\n", "y_pred_proba = automl_pipeline.predict_proba(X_test)[:,1]\n", "print('Predicted probas ',y_pred_proba[:5])" - ] + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Predicted labels [0 1 1 ... 0 1 0]\n", + "True labels [0 0 0 ... 1 0 1]\n", + "Predicted probas [0.3764987 0.6126277 0.699604 0.27359942 0.25294745]\n" + ] + } + ], + "metadata": {} } ], "metadata": { "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" + "name": "python3", + "display_name": "Python 3.8.0 64-bit ('blend': conda)" }, "language_info": { "codemirror_mode": { @@ -957,7 +514,10 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.3" + "version": "3.8.0" + }, + "interpreter": { + "hash": "0cfea3304185a9579d09e0953576b57c8581e46e6ebc6dfeb681bc5a511f7544" } }, "nbformat": 4, diff --git a/notebook/flaml_automl.ipynb b/notebook/flaml_automl.ipynb index 80c427778d..aa4919d672 100644 --- a/notebook/flaml_automl.ipynb +++ b/notebook/flaml_automl.ipynb @@ -68,7 +68,7 @@ "output_type": "stream", "name": "stdout", "text": [ - "load dataset from ./openml_ds1169.pkl\n", + "download dataset from openml\n", "Dataset name: airlines\n", "X_train.shape: (404537, 7), y_train.shape: (404537,);\n", "X_test.shape: (134846, 7), y_test.shape: (134846,)\n" @@ -140,222 +140,291 @@ "output_type": "stream", "name": "stderr", "text": [ - "[flaml.automl: 08-13 17:55:42] {1121} INFO - Evaluation method: holdout\n", - "[flaml.automl: 08-13 17:55:42] {618} INFO - Using StratifiedKFold\n", - "[flaml.automl: 08-13 17:55:42] {1142} INFO - Minimizing error metric: 1-accuracy\n", - "[flaml.automl: 08-13 17:55:42] {1162} INFO - List of ML learners in AutoML Run: ['lgbm', 'rf', 'catboost', 'xgboost', 'extra_tree', 'lrl1']\n", - "[flaml.automl: 08-13 17:55:42] {1252} INFO - iteration 0, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:43] {1405} INFO - at 1.9s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n", - "[flaml.automl: 08-13 17:55:43] {1252} INFO - iteration 1, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:43] {1405} INFO - at 1.9s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n", - "[flaml.automl: 08-13 17:55:43] {1252} INFO - iteration 2, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:43] {1405} INFO - at 2.0s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n", - "[flaml.automl: 08-13 17:55:43] {1252} INFO - iteration 3, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:43] {1405} INFO - at 2.1s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n", - "[flaml.automl: 08-13 17:55:43] {1252} INFO - iteration 4, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:43] {1405} INFO - at 2.2s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n", - "[flaml.automl: 08-13 17:55:43] {1252} INFO - iteration 5, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:43] {1405} INFO - at 2.2s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n", - "[flaml.automl: 08-13 17:55:43] {1252} INFO - iteration 6, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:44] {1405} INFO - at 2.3s,\tbest lgbm's error=0.3742,\tbest lgbm's error=0.3742\n", - "[flaml.automl: 08-13 17:55:44] {1252} INFO - iteration 7, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:44] {1405} INFO - at 2.4s,\tbest lgbm's error=0.3613,\tbest lgbm's error=0.3613\n", - "[flaml.automl: 08-13 17:55:44] {1252} INFO - iteration 8, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:44] {1405} INFO - at 2.5s,\tbest lgbm's error=0.3613,\tbest lgbm's error=0.3613\n", - "[flaml.automl: 08-13 17:55:44] {1252} INFO - iteration 9, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:44] {1405} INFO - at 2.7s,\tbest lgbm's error=0.3613,\tbest lgbm's error=0.3613\n", - "[flaml.automl: 08-13 17:55:44] {1252} INFO - iteration 10, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:44] {1405} INFO - at 2.8s,\tbest lgbm's error=0.3613,\tbest lgbm's error=0.3613\n", - "[flaml.automl: 08-13 17:55:44] {1252} INFO - iteration 11, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:44] {1405} INFO - at 2.9s,\tbest lgbm's error=0.3613,\tbest lgbm's error=0.3613\n", - "[flaml.automl: 08-13 17:55:44] {1252} INFO - iteration 12, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:44] {1405} INFO - at 3.1s,\tbest lgbm's error=0.3573,\tbest lgbm's error=0.3573\n", - "[flaml.automl: 08-13 17:55:44] {1252} INFO - iteration 13, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:44] {1405} INFO - at 3.2s,\tbest lgbm's error=0.3573,\tbest lgbm's error=0.3573\n", - "[flaml.automl: 08-13 17:55:44] {1252} INFO - iteration 14, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:45] {1405} INFO - at 3.5s,\tbest lgbm's error=0.3570,\tbest lgbm's error=0.3570\n", - "[flaml.automl: 08-13 17:55:45] {1252} INFO - iteration 15, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:45] {1405} INFO - at 3.8s,\tbest lgbm's error=0.3570,\tbest lgbm's error=0.3570\n", - "[flaml.automl: 08-13 17:55:45] {1252} INFO - iteration 16, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:45] {1405} INFO - at 4.1s,\tbest lgbm's error=0.3510,\tbest lgbm's error=0.3510\n", - "[flaml.automl: 08-13 17:55:45] {1252} INFO - iteration 17, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:46] {1405} INFO - at 4.3s,\tbest lgbm's error=0.3510,\tbest lgbm's error=0.3510\n", - "[flaml.automl: 08-13 17:55:46] {1252} INFO - iteration 18, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:46] {1405} INFO - at 5.0s,\tbest lgbm's error=0.3510,\tbest lgbm's error=0.3510\n", - "[flaml.automl: 08-13 17:55:46] {1252} INFO - iteration 19, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:46] {1405} INFO - at 5.2s,\tbest lgbm's error=0.3510,\tbest lgbm's error=0.3510\n", - "[flaml.automl: 08-13 17:55:46] {1252} INFO - iteration 20, current learner xgboost\n", - "[flaml.automl: 08-13 17:55:47] {1405} INFO - at 5.4s,\tbest xgboost's error=0.3787,\tbest lgbm's error=0.3510\n", - "[flaml.automl: 08-13 17:55:47] {1252} INFO - iteration 21, current learner xgboost\n", - "[flaml.automl: 08-13 17:55:47] {1405} INFO - at 5.5s,\tbest xgboost's error=0.3769,\tbest lgbm's error=0.3510\n", - "[flaml.automl: 08-13 17:55:47] {1252} INFO - iteration 22, current learner xgboost\n", - "[flaml.automl: 08-13 17:55:47] {1405} INFO - at 5.7s,\tbest xgboost's error=0.3769,\tbest lgbm's error=0.3510\n", - "[flaml.automl: 08-13 17:55:47] {1252} INFO - iteration 23, current learner xgboost\n", - "[flaml.automl: 08-13 17:55:47] {1405} INFO - at 5.8s,\tbest xgboost's error=0.3769,\tbest lgbm's error=0.3510\n", - "[flaml.automl: 08-13 17:55:47] {1252} INFO - iteration 24, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:48] {1405} INFO - at 6.6s,\tbest lgbm's error=0.3510,\tbest lgbm's error=0.3510\n", - "[flaml.automl: 08-13 17:55:48] {1252} INFO - iteration 25, current learner extra_tree\n", - "[flaml.automl: 08-13 17:55:48] {1405} INFO - at 6.8s,\tbest extra_tree's error=0.3773,\tbest lgbm's error=0.3510\n", - "[flaml.automl: 08-13 17:55:48] {1252} INFO - iteration 26, current learner extra_tree\n", - "[flaml.automl: 08-13 17:55:48] {1405} INFO - at 7.0s,\tbest extra_tree's error=0.3760,\tbest lgbm's error=0.3510\n", - "[flaml.automl: 08-13 17:55:48] {1252} INFO - iteration 27, current learner rf\n", - "[flaml.automl: 08-13 17:55:48] {1405} INFO - at 7.2s,\tbest rf's error=0.3787,\tbest lgbm's error=0.3510\n", - "[flaml.automl: 08-13 17:55:48] {1252} INFO - iteration 28, current learner rf\n", - "[flaml.automl: 08-13 17:55:49] {1405} INFO - at 7.4s,\tbest rf's error=0.3709,\tbest lgbm's error=0.3510\n", - "[flaml.automl: 08-13 17:55:49] {1252} INFO - iteration 29, current learner xgboost\n", - "[flaml.automl: 08-13 17:55:49] {1405} INFO - at 7.5s,\tbest xgboost's error=0.3765,\tbest lgbm's error=0.3510\n", - "[flaml.automl: 08-13 17:55:49] {1252} INFO - iteration 30, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:51] {1405} INFO - at 9.5s,\tbest lgbm's error=0.3465,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:55:51] {1252} INFO - iteration 31, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:53] {1405} INFO - at 11.4s,\tbest lgbm's error=0.3465,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:55:53] {1252} INFO - iteration 32, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:55] {1405} INFO - at 13.8s,\tbest lgbm's error=0.3465,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:55:55] {1252} INFO - iteration 33, current learner rf\n", - "[flaml.automl: 08-13 17:55:55] {1405} INFO - at 14.1s,\tbest rf's error=0.3709,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:55:55] {1252} INFO - iteration 34, current learner lgbm\n", - "[flaml.automl: 08-13 17:55:57] {1405} INFO - at 15.4s,\tbest lgbm's error=0.3465,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:55:57] {1252} INFO - iteration 35, current learner rf\n", - "[flaml.automl: 08-13 17:55:57] {1405} INFO - at 15.7s,\tbest rf's error=0.3709,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:55:57] {1252} INFO - iteration 36, current learner extra_tree\n", - "[flaml.automl: 08-13 17:55:57] {1405} INFO - at 15.8s,\tbest extra_tree's error=0.3760,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:55:57] {1252} INFO - iteration 37, current learner rf\n", - "[flaml.automl: 08-13 17:55:57] {1405} INFO - at 16.0s,\tbest rf's error=0.3654,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:55:57] {1252} INFO - iteration 38, current learner rf\n", - "[flaml.automl: 08-13 17:55:57] {1405} INFO - at 16.3s,\tbest rf's error=0.3634,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:55:57] {1252} INFO - iteration 39, current learner lgbm\n", - "[flaml.automl: 08-13 17:56:02] {1405} INFO - at 20.5s,\tbest lgbm's error=0.3465,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:56:02] {1252} INFO - iteration 40, current learner rf\n", - "[flaml.automl: 08-13 17:56:02] {1405} INFO - at 20.8s,\tbest rf's error=0.3634,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:56:02] {1252} INFO - iteration 41, current learner extra_tree\n", - "[flaml.automl: 08-13 17:56:02] {1405} INFO - at 21.0s,\tbest extra_tree's error=0.3760,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:56:02] {1252} INFO - iteration 42, current learner lgbm\n", - "[flaml.automl: 08-13 17:56:03] {1405} INFO - at 21.9s,\tbest lgbm's error=0.3465,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:56:03] {1252} INFO - iteration 43, current learner rf\n", - "[flaml.automl: 08-13 17:56:04] {1405} INFO - at 22.3s,\tbest rf's error=0.3634,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:56:04] {1252} INFO - iteration 44, current learner catboost\n", - "[flaml.automl: 08-13 17:56:05] {1405} INFO - at 23.6s,\tbest catboost's error=0.3602,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:56:05] {1252} INFO - iteration 45, current learner extra_tree\n", - "[flaml.automl: 08-13 17:56:05] {1405} INFO - at 23.7s,\tbest extra_tree's error=0.3709,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:56:05] {1252} INFO - iteration 46, current learner catboost\n", - "[flaml.automl: 08-13 17:56:06] {1405} INFO - at 24.5s,\tbest catboost's error=0.3602,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:56:06] {1252} INFO - iteration 47, current learner catboost\n", - "[flaml.automl: 08-13 17:56:06] {1405} INFO - at 24.9s,\tbest catboost's error=0.3602,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:56:06] {1252} INFO - iteration 48, current learner catboost\n", - "[flaml.automl: 08-13 17:56:07] {1405} INFO - at 25.6s,\tbest catboost's error=0.3602,\tbest lgbm's error=0.3465\n", - "[flaml.automl: 08-13 17:56:07] {1252} INFO - iteration 49, current learner lgbm\n", - "[flaml.automl: 08-13 17:56:13] {1405} INFO - at 31.4s,\tbest lgbm's error=0.3335,\tbest lgbm's error=0.3335\n", - "[flaml.automl: 08-13 17:56:13] {1252} INFO - iteration 50, current learner catboost\n", - "[flaml.automl: 08-13 17:56:13] {1405} INFO - at 31.8s,\tbest catboost's error=0.3602,\tbest lgbm's error=0.3335\n", - "[flaml.automl: 08-13 17:56:13] {1252} INFO - iteration 51, current learner extra_tree\n", - "[flaml.automl: 08-13 17:56:13] {1405} INFO - at 32.1s,\tbest extra_tree's error=0.3709,\tbest lgbm's error=0.3335\n", - "[flaml.automl: 08-13 17:56:13] {1252} INFO - iteration 52, current learner catboost\n", - "[flaml.automl: 08-13 17:56:14] {1405} INFO - at 32.6s,\tbest catboost's error=0.3602,\tbest lgbm's error=0.3335\n", - "[flaml.automl: 08-13 17:56:14] {1252} INFO - iteration 53, current learner catboost\n", - "[flaml.automl: 08-13 17:56:18] {1405} INFO - at 36.3s,\tbest catboost's error=0.3493,\tbest lgbm's error=0.3335\n", - "[flaml.automl: 08-13 17:56:18] {1252} INFO - iteration 54, current learner lgbm\n", - "[flaml.automl: 08-13 17:56:22] {1405} INFO - at 40.9s,\tbest lgbm's error=0.3335,\tbest lgbm's error=0.3335\n", - "[flaml.automl: 08-13 17:56:22] {1252} INFO - iteration 55, current learner lgbm\n", - "[flaml.automl: 08-13 17:56:31] {1405} INFO - at 50.0s,\tbest lgbm's error=0.3335,\tbest lgbm's error=0.3335\n", - "[flaml.automl: 08-13 17:56:31] {1252} INFO - iteration 56, current learner extra_tree\n", - "[flaml.automl: 08-13 17:56:31] {1405} INFO - at 50.2s,\tbest extra_tree's error=0.3664,\tbest lgbm's error=0.3335\n", - "[flaml.automl: 08-13 17:56:31] {1252} INFO - iteration 57, current learner rf\n", - "[flaml.automl: 08-13 17:56:32] {1405} INFO - at 50.4s,\tbest rf's error=0.3634,\tbest lgbm's error=0.3335\n", - "[flaml.automl: 08-13 17:56:32] {1252} INFO - iteration 58, current learner rf\n", - "[flaml.automl: 08-13 17:56:32] {1405} INFO - at 50.7s,\tbest rf's error=0.3634,\tbest lgbm's error=0.3335\n", - "[flaml.automl: 08-13 17:56:32] {1252} INFO - iteration 59, current learner xgboost\n", - "[flaml.automl: 08-13 17:56:32] {1405} INFO - at 50.8s,\tbest xgboost's error=0.3765,\tbest lgbm's error=0.3335\n", - "[flaml.automl: 08-13 17:56:32] {1252} INFO - iteration 60, current learner lgbm\n", - "[flaml.automl: 08-13 17:56:34] {1405} INFO - at 52.6s,\tbest lgbm's error=0.3335,\tbest lgbm's error=0.3335\n", - "[flaml.automl: 08-13 17:56:34] {1252} INFO - iteration 61, current learner extra_tree\n", - "[flaml.automl: 08-13 17:56:34] {1405} INFO - at 52.9s,\tbest extra_tree's error=0.3643,\tbest lgbm's error=0.3335\n", - "[flaml.automl: 08-13 17:56:34] {1252} INFO - iteration 62, current learner lgbm\n", - "[flaml.automl: 08-13 17:57:04] {1405} INFO - at 82.4s,\tbest lgbm's error=0.3277,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:57:04] {1252} INFO - iteration 63, current learner extra_tree\n", - "[flaml.automl: 08-13 17:57:04] {1405} INFO - at 82.6s,\tbest extra_tree's error=0.3643,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:57:04] {1252} INFO - iteration 64, current learner extra_tree\n", - "[flaml.automl: 08-13 17:57:04] {1405} INFO - at 82.9s,\tbest extra_tree's error=0.3643,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:57:04] {1252} INFO - iteration 65, current learner lgbm\n", - "[flaml.automl: 08-13 17:57:35] {1405} INFO - at 113.3s,\tbest lgbm's error=0.3277,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:57:35] {1252} INFO - iteration 66, current learner catboost\n", - "[flaml.automl: 08-13 17:57:36] {1405} INFO - at 114.6s,\tbest catboost's error=0.3493,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:57:36] {1252} INFO - iteration 67, current learner extra_tree\n", - "[flaml.automl: 08-13 17:57:36] {1405} INFO - at 114.9s,\tbest extra_tree's error=0.3643,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:57:36] {1252} INFO - iteration 68, current learner catboost\n", - "[flaml.automl: 08-13 17:57:38] {1405} INFO - at 117.1s,\tbest catboost's error=0.3493,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:57:38] {1252} INFO - iteration 69, current learner lgbm\n", - "[flaml.automl: 08-13 17:58:18] {1405} INFO - at 156.9s,\tbest lgbm's error=0.3277,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:18] {1252} INFO - iteration 70, current learner catboost\n", - "[flaml.automl: 08-13 17:58:20] {1405} INFO - at 159.2s,\tbest catboost's error=0.3493,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:20] {1252} INFO - iteration 71, current learner xgboost\n", - "[flaml.automl: 08-13 17:58:21] {1405} INFO - at 159.6s,\tbest xgboost's error=0.3667,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:21] {1252} INFO - iteration 72, current learner xgboost\n", - "[flaml.automl: 08-13 17:58:21] {1405} INFO - at 159.8s,\tbest xgboost's error=0.3667,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:21] {1252} INFO - iteration 73, current learner extra_tree\n", - "[flaml.automl: 08-13 17:58:21] {1405} INFO - at 160.1s,\tbest extra_tree's error=0.3643,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:21] {1252} INFO - iteration 74, current learner xgboost\n", - "[flaml.automl: 08-13 17:58:22] {1405} INFO - at 160.4s,\tbest xgboost's error=0.3667,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:22] {1252} INFO - iteration 75, current learner extra_tree\n", - "[flaml.automl: 08-13 17:58:22] {1405} INFO - at 160.8s,\tbest extra_tree's error=0.3636,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:22] {1252} INFO - iteration 76, current learner rf\n", - "[flaml.automl: 08-13 17:58:23] {1405} INFO - at 161.5s,\tbest rf's error=0.3634,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:23] {1252} INFO - iteration 77, current learner rf\n", - "[flaml.automl: 08-13 17:58:23] {1405} INFO - at 162.0s,\tbest rf's error=0.3634,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:23] {1252} INFO - iteration 78, current learner xgboost\n", - "[flaml.automl: 08-13 17:58:24] {1405} INFO - at 162.5s,\tbest xgboost's error=0.3625,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:24] {1252} INFO - iteration 79, current learner catboost\n", - "[flaml.automl: 08-13 17:58:26] {1405} INFO - at 164.4s,\tbest catboost's error=0.3493,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:26] {1252} INFO - iteration 80, current learner rf\n", - "[flaml.automl: 08-13 17:58:26] {1405} INFO - at 165.2s,\tbest rf's error=0.3634,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:26] {1252} INFO - iteration 81, current learner xgboost\n", - "[flaml.automl: 08-13 17:58:27] {1405} INFO - at 165.5s,\tbest xgboost's error=0.3625,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:27] {1252} INFO - iteration 82, current learner catboost\n", - "[flaml.automl: 08-13 17:58:28] {1405} INFO - at 166.6s,\tbest catboost's error=0.3493,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:28] {1252} INFO - iteration 83, current learner xgboost\n", - "[flaml.automl: 08-13 17:58:28] {1405} INFO - at 166.9s,\tbest xgboost's error=0.3625,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:28] {1252} INFO - iteration 84, current learner xgboost\n", - "[flaml.automl: 08-13 17:58:28] {1405} INFO - at 167.3s,\tbest xgboost's error=0.3611,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:28] {1252} INFO - iteration 85, current learner xgboost\n", - "[flaml.automl: 08-13 17:58:29] {1405} INFO - at 167.6s,\tbest xgboost's error=0.3611,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:29] {1252} INFO - iteration 86, current learner xgboost\n", - "[flaml.automl: 08-13 17:58:29] {1405} INFO - at 167.9s,\tbest xgboost's error=0.3611,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:29] {1252} INFO - iteration 87, current learner xgboost\n", - "[flaml.automl: 08-13 17:58:30] {1405} INFO - at 168.4s,\tbest xgboost's error=0.3611,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:30] {1252} INFO - iteration 88, current learner rf\n", - "[flaml.automl: 08-13 17:58:30] {1405} INFO - at 168.9s,\tbest rf's error=0.3634,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:30] {1252} INFO - iteration 89, current learner rf\n", - "[flaml.automl: 08-13 17:58:31] {1405} INFO - at 169.7s,\tbest rf's error=0.3634,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:31] {1252} INFO - iteration 90, current learner extra_tree\n", - "[flaml.automl: 08-13 17:58:31] {1405} INFO - at 170.0s,\tbest extra_tree's error=0.3636,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:31] {1252} INFO - iteration 91, current learner catboost\n", - "[flaml.automl: 08-13 17:58:33] {1405} INFO - at 171.9s,\tbest catboost's error=0.3493,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:33] {1252} INFO - iteration 92, current learner extra_tree\n", - "[flaml.automl: 08-13 17:58:33] {1405} INFO - at 172.3s,\tbest extra_tree's error=0.3636,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:33] {1252} INFO - iteration 93, current learner catboost\n", - "[flaml.automl: 08-13 17:58:35] {1405} INFO - at 174.2s,\tbest catboost's error=0.3493,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:58:35] {1252} INFO - iteration 94, current learner catboost\n", - "[flaml.automl: 08-13 17:59:07] {1405} INFO - at 205.6s,\tbest catboost's error=0.3372,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:59:07] {1252} INFO - iteration 95, current learner lgbm\n", - "[flaml.automl: 08-13 17:59:15] {1405} INFO - at 213.3s,\tbest lgbm's error=0.3277,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:59:15] {1252} INFO - iteration 96, current learner extra_tree\n", - "[flaml.automl: 08-13 17:59:15] {1405} INFO - at 213.9s,\tbest extra_tree's error=0.3636,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 17:59:15] {1252} INFO - iteration 97, current learner catboost\n", - "[flaml.automl: 08-13 18:00:34] {1405} INFO - at 293.1s,\tbest catboost's error=0.3372,\tbest lgbm's error=0.3277\n", - "[flaml.automl: 08-13 18:00:34] {1252} INFO - iteration 98, current learner xgboost\n", - "[flaml.automl: 08-13 18:00:34] {1422} INFO - no enough budget for learner xgboost\n", - "[flaml.automl: 08-13 18:00:34] {1252} INFO - iteration 99, current learner lrl1\n", + "[flaml.automl: 08-22 20:58:37] {1130} INFO - Evaluation method: holdout\n", + "[flaml.automl: 08-22 20:58:37] {624} INFO - Using StratifiedKFold\n", + "[flaml.automl: 08-22 20:58:37] {1155} INFO - Minimizing error metric: 1-accuracy\n", + "[flaml.automl: 08-22 20:58:37] {1175} INFO - List of ML learners in AutoML Run: ['lgbm', 'rf', 'catboost', 'xgboost', 'extra_tree', 'lrl1']\n", + "[flaml.automl: 08-22 20:58:37] {1358} INFO - iteration 0, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:37] {1515} INFO - at 1.1s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n", + "[flaml.automl: 08-22 20:58:37] {1358} INFO - iteration 1, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:38] {1515} INFO - at 1.2s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n", + "[flaml.automl: 08-22 20:58:38] {1358} INFO - iteration 2, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:38] {1515} INFO - at 1.3s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n", + "[flaml.automl: 08-22 20:58:38] {1358} INFO - iteration 3, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:38] {1515} INFO - at 1.4s,\tbest lgbm's error=0.3661,\tbest lgbm's error=0.3661\n", + "[flaml.automl: 08-22 20:58:38] {1358} INFO - iteration 4, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:38] {1515} INFO - at 1.5s,\tbest lgbm's error=0.3645,\tbest lgbm's error=0.3645\n", + "[flaml.automl: 08-22 20:58:38] {1358} INFO - iteration 5, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:38] {1515} INFO - at 1.6s,\tbest lgbm's error=0.3645,\tbest lgbm's error=0.3645\n", + "[flaml.automl: 08-22 20:58:38] {1358} INFO - iteration 6, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:38] {1515} INFO - at 1.7s,\tbest lgbm's error=0.3645,\tbest lgbm's error=0.3645\n", + "[flaml.automl: 08-22 20:58:38] {1358} INFO - iteration 7, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:38] {1515} INFO - at 1.8s,\tbest lgbm's error=0.3610,\tbest lgbm's error=0.3610\n", + "[flaml.automl: 08-22 20:58:38] {1358} INFO - iteration 8, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:38] {1515} INFO - at 1.9s,\tbest lgbm's error=0.3610,\tbest lgbm's error=0.3610\n", + "[flaml.automl: 08-22 20:58:38] {1358} INFO - iteration 9, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:39] {1515} INFO - at 2.4s,\tbest lgbm's error=0.3610,\tbest lgbm's error=0.3610\n", + "[flaml.automl: 08-22 20:58:39] {1358} INFO - iteration 10, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:39] {1515} INFO - at 2.7s,\tbest lgbm's error=0.3604,\tbest lgbm's error=0.3604\n", + "[flaml.automl: 08-22 20:58:39] {1358} INFO - iteration 11, current learner xgboost\n", + "[flaml.automl: 08-22 20:58:39] {1515} INFO - at 2.9s,\tbest xgboost's error=0.3787,\tbest lgbm's error=0.3604\n", + "[flaml.automl: 08-22 20:58:39] {1358} INFO - iteration 12, current learner xgboost\n", + "[flaml.automl: 08-22 20:58:39] {1515} INFO - at 2.9s,\tbest xgboost's error=0.3769,\tbest lgbm's error=0.3604\n", + "[flaml.automl: 08-22 20:58:39] {1358} INFO - iteration 13, current learner extra_tree\n", + "[flaml.automl: 08-22 20:58:39] {1515} INFO - at 3.0s,\tbest extra_tree's error=0.3788,\tbest lgbm's error=0.3604\n", + "[flaml.automl: 08-22 20:58:39] {1358} INFO - iteration 14, current learner extra_tree\n", + "[flaml.automl: 08-22 20:58:39] {1515} INFO - at 3.1s,\tbest extra_tree's error=0.3763,\tbest lgbm's error=0.3604\n", + "[flaml.automl: 08-22 20:58:39] {1358} INFO - iteration 15, current learner rf\n", + "[flaml.automl: 08-22 20:58:40] {1515} INFO - at 3.3s,\tbest rf's error=0.3787,\tbest lgbm's error=0.3604\n", + "[flaml.automl: 08-22 20:58:40] {1358} INFO - iteration 16, current learner rf\n", + "[flaml.automl: 08-22 20:58:40] {1515} INFO - at 3.4s,\tbest rf's error=0.3689,\tbest lgbm's error=0.3604\n", + "[flaml.automl: 08-22 20:58:40] {1358} INFO - iteration 17, current learner xgboost\n", + "[flaml.automl: 08-22 20:58:40] {1515} INFO - at 3.5s,\tbest xgboost's error=0.3765,\tbest lgbm's error=0.3604\n", + "[flaml.automl: 08-22 20:58:40] {1358} INFO - iteration 18, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:40] {1515} INFO - at 3.8s,\tbest lgbm's error=0.3549,\tbest lgbm's error=0.3549\n", + "[flaml.automl: 08-22 20:58:40] {1358} INFO - iteration 19, current learner rf\n", + "[flaml.automl: 08-22 20:58:40] {1515} INFO - at 3.9s,\tbest rf's error=0.3689,\tbest lgbm's error=0.3549\n", + "[flaml.automl: 08-22 20:58:40] {1358} INFO - iteration 20, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:41] {1515} INFO - at 4.2s,\tbest lgbm's error=0.3549,\tbest lgbm's error=0.3549\n", + "[flaml.automl: 08-22 20:58:41] {1358} INFO - iteration 21, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:41] {1515} INFO - at 4.7s,\tbest lgbm's error=0.3549,\tbest lgbm's error=0.3549\n", + "[flaml.automl: 08-22 20:58:41] {1358} INFO - iteration 22, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:41] {1515} INFO - at 5.0s,\tbest lgbm's error=0.3549,\tbest lgbm's error=0.3549\n", + "[flaml.automl: 08-22 20:58:41] {1358} INFO - iteration 23, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:42] {1515} INFO - at 5.4s,\tbest lgbm's error=0.3549,\tbest lgbm's error=0.3549\n", + "[flaml.automl: 08-22 20:58:42] {1358} INFO - iteration 24, current learner extra_tree\n", + "[flaml.automl: 08-22 20:58:42] {1515} INFO - at 5.5s,\tbest extra_tree's error=0.3763,\tbest lgbm's error=0.3549\n", + "[flaml.automl: 08-22 20:58:42] {1358} INFO - iteration 25, current learner extra_tree\n", + "[flaml.automl: 08-22 20:58:42] {1515} INFO - at 5.6s,\tbest extra_tree's error=0.3763,\tbest lgbm's error=0.3549\n", + "[flaml.automl: 08-22 20:58:42] {1358} INFO - iteration 26, current learner rf\n", + "[flaml.automl: 08-22 20:58:42] {1515} INFO - at 5.8s,\tbest rf's error=0.3689,\tbest lgbm's error=0.3549\n", + "[flaml.automl: 08-22 20:58:42] {1358} INFO - iteration 27, current learner rf\n", + "[flaml.automl: 08-22 20:58:42] {1515} INFO - at 5.9s,\tbest rf's error=0.3668,\tbest lgbm's error=0.3549\n", + "[flaml.automl: 08-22 20:58:42] {1358} INFO - iteration 28, current learner extra_tree\n", + "[flaml.automl: 08-22 20:58:42] {1515} INFO - at 6.0s,\tbest extra_tree's error=0.3763,\tbest lgbm's error=0.3549\n", + "[flaml.automl: 08-22 20:58:42] {1358} INFO - iteration 29, current learner extra_tree\n", + "[flaml.automl: 08-22 20:58:42] {1515} INFO - at 6.2s,\tbest extra_tree's error=0.3747,\tbest lgbm's error=0.3549\n", + "[flaml.automl: 08-22 20:58:42] {1358} INFO - iteration 30, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:45] {1515} INFO - at 8.5s,\tbest lgbm's error=0.3491,\tbest lgbm's error=0.3491\n", + "[flaml.automl: 08-22 20:58:45] {1358} INFO - iteration 31, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:47] {1515} INFO - at 10.3s,\tbest lgbm's error=0.3422,\tbest lgbm's error=0.3422\n", + "[flaml.automl: 08-22 20:58:47] {1358} INFO - iteration 32, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:49] {1515} INFO - at 12.4s,\tbest lgbm's error=0.3422,\tbest lgbm's error=0.3422\n", + "[flaml.automl: 08-22 20:58:49] {1358} INFO - iteration 33, current learner xgboost\n", + "[flaml.automl: 08-22 20:58:49] {1515} INFO - at 12.5s,\tbest xgboost's error=0.3746,\tbest lgbm's error=0.3422\n", + "[flaml.automl: 08-22 20:58:49] {1358} INFO - iteration 34, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:50] {1515} INFO - at 13.9s,\tbest lgbm's error=0.3422,\tbest lgbm's error=0.3422\n", + "[flaml.automl: 08-22 20:58:50] {1358} INFO - iteration 35, current learner rf\n", + "[flaml.automl: 08-22 20:58:50] {1515} INFO - at 14.1s,\tbest rf's error=0.3668,\tbest lgbm's error=0.3422\n", + "[flaml.automl: 08-22 20:58:50] {1358} INFO - iteration 36, current learner extra_tree\n", + "[flaml.automl: 08-22 20:58:50] {1515} INFO - at 14.2s,\tbest extra_tree's error=0.3747,\tbest lgbm's error=0.3422\n", + "[flaml.automl: 08-22 20:58:50] {1358} INFO - iteration 37, current learner xgboost\n", + "[flaml.automl: 08-22 20:58:51] {1515} INFO - at 14.3s,\tbest xgboost's error=0.3673,\tbest lgbm's error=0.3422\n", + "[flaml.automl: 08-22 20:58:51] {1358} INFO - iteration 38, current learner xgboost\n", + "[flaml.automl: 08-22 20:58:51] {1515} INFO - at 14.3s,\tbest xgboost's error=0.3673,\tbest lgbm's error=0.3422\n", + "[flaml.automl: 08-22 20:58:51] {1358} INFO - iteration 39, current learner xgboost\n", + "[flaml.automl: 08-22 20:58:51] {1515} INFO - at 14.4s,\tbest xgboost's error=0.3617,\tbest lgbm's error=0.3422\n", + "[flaml.automl: 08-22 20:58:51] {1358} INFO - iteration 40, current learner xgboost\n", + "[flaml.automl: 08-22 20:58:51] {1515} INFO - at 14.6s,\tbest xgboost's error=0.3617,\tbest lgbm's error=0.3422\n", + "[flaml.automl: 08-22 20:58:51] {1358} INFO - iteration 41, current learner xgboost\n", + "[flaml.automl: 08-22 20:58:51] {1515} INFO - at 14.7s,\tbest xgboost's error=0.3617,\tbest lgbm's error=0.3422\n", + "[flaml.automl: 08-22 20:58:51] {1358} INFO - iteration 42, current learner lgbm\n", + "[flaml.automl: 08-22 20:58:54] {1515} INFO - at 17.6s,\tbest lgbm's error=0.3385,\tbest lgbm's error=0.3385\n", + "[flaml.automl: 08-22 20:58:54] {1358} INFO - iteration 43, current learner xgboost\n", + "[flaml.automl: 08-22 20:58:54] {1515} INFO - at 17.7s,\tbest xgboost's error=0.3591,\tbest lgbm's error=0.3385\n", + "[flaml.automl: 08-22 20:58:54] {1358} INFO - iteration 44, current learner catboost\n", + "[flaml.automl: 08-22 20:58:55] {1515} INFO - at 18.5s,\tbest catboost's error=0.3602,\tbest lgbm's error=0.3385\n", + "[flaml.automl: 08-22 20:58:55] {1358} INFO - iteration 45, current learner extra_tree\n", + "[flaml.automl: 08-22 20:58:55] {1515} INFO - at 18.7s,\tbest extra_tree's error=0.3747,\tbest lgbm's error=0.3385\n", + "[flaml.automl: 08-22 20:58:55] {1358} INFO - iteration 46, current learner catboost\n", + "[flaml.automl: 08-22 20:58:56] {1515} INFO - at 19.4s,\tbest catboost's error=0.3602,\tbest lgbm's error=0.3385\n", + "[flaml.automl: 08-22 20:58:56] {1358} INFO - iteration 47, current learner catboost\n", + "[flaml.automl: 08-22 20:58:56] {1515} INFO - at 19.7s,\tbest catboost's error=0.3602,\tbest lgbm's error=0.3385\n", + "[flaml.automl: 08-22 20:58:56] {1358} INFO - iteration 48, current learner catboost\n", + "[flaml.automl: 08-22 20:58:57] {1515} INFO - at 20.8s,\tbest catboost's error=0.3602,\tbest lgbm's error=0.3385\n", + "[flaml.automl: 08-22 20:58:57] {1358} INFO - iteration 49, current learner lgbm\n", + "[flaml.automl: 08-22 20:59:02] {1515} INFO - at 25.8s,\tbest lgbm's error=0.3378,\tbest lgbm's error=0.3378\n", + "[flaml.automl: 08-22 20:59:02] {1358} INFO - iteration 50, current learner catboost\n", + "[flaml.automl: 08-22 20:59:02] {1515} INFO - at 26.1s,\tbest catboost's error=0.3602,\tbest lgbm's error=0.3378\n", + "[flaml.automl: 08-22 20:59:02] {1358} INFO - iteration 51, current learner xgboost\n", + "[flaml.automl: 08-22 20:59:03] {1515} INFO - at 26.3s,\tbest xgboost's error=0.3591,\tbest lgbm's error=0.3378\n", + "[flaml.automl: 08-22 20:59:03] {1358} INFO - iteration 52, current learner rf\n", + "[flaml.automl: 08-22 20:59:03] {1515} INFO - at 26.5s,\tbest rf's error=0.3659,\tbest lgbm's error=0.3378\n", + "[flaml.automl: 08-22 20:59:03] {1358} INFO - iteration 53, current learner rf\n", + "[flaml.automl: 08-22 20:59:03] {1515} INFO - at 26.7s,\tbest rf's error=0.3633,\tbest lgbm's error=0.3378\n", + "[flaml.automl: 08-22 20:59:03] {1358} INFO - iteration 54, current learner lgbm\n", + "[flaml.automl: 08-22 20:59:06] {1515} INFO - at 29.9s,\tbest lgbm's error=0.3378,\tbest lgbm's error=0.3378\n", + "[flaml.automl: 08-22 20:59:06] {1358} INFO - iteration 55, current learner lgbm\n", + "[flaml.automl: 08-22 20:59:12] {1515} INFO - at 35.8s,\tbest lgbm's error=0.3378,\tbest lgbm's error=0.3378\n", + "[flaml.automl: 08-22 20:59:12] {1358} INFO - iteration 56, current learner catboost\n", + "[flaml.automl: 08-22 20:59:14] {1515} INFO - at 37.6s,\tbest catboost's error=0.3493,\tbest lgbm's error=0.3378\n", + "[flaml.automl: 08-22 20:59:14] {1358} INFO - iteration 57, current learner rf\n", + "[flaml.automl: 08-22 20:59:14] {1515} INFO - at 37.8s,\tbest rf's error=0.3633,\tbest lgbm's error=0.3378\n", + "[flaml.automl: 08-22 20:59:14] {1358} INFO - iteration 58, current learner rf\n", + "[flaml.automl: 08-22 20:59:14] {1515} INFO - at 38.0s,\tbest rf's error=0.3633,\tbest lgbm's error=0.3378\n", + "[flaml.automl: 08-22 20:59:14] {1358} INFO - iteration 59, current learner catboost\n", + "[flaml.automl: 08-22 20:59:21] {1515} INFO - at 45.0s,\tbest catboost's error=0.3493,\tbest lgbm's error=0.3378\n", + "[flaml.automl: 08-22 20:59:21] {1358} INFO - iteration 60, current learner lgbm\n", + "[flaml.automl: 08-22 20:59:25] {1515} INFO - at 48.6s,\tbest lgbm's error=0.3346,\tbest lgbm's error=0.3346\n", + "[flaml.automl: 08-22 20:59:25] {1358} INFO - iteration 61, current learner catboost\n", + "[flaml.automl: 08-22 20:59:26] {1515} INFO - at 50.0s,\tbest catboost's error=0.3475,\tbest lgbm's error=0.3346\n", + "[flaml.automl: 08-22 20:59:26] {1358} INFO - iteration 62, current learner lgbm\n", + "[flaml.automl: 08-22 20:59:30] {1515} INFO - at 54.1s,\tbest lgbm's error=0.3335,\tbest lgbm's error=0.3335\n", + "[flaml.automl: 08-22 20:59:30] {1358} INFO - iteration 63, current learner rf\n", + "[flaml.automl: 08-22 20:59:31] {1515} INFO - at 54.4s,\tbest rf's error=0.3633,\tbest lgbm's error=0.3335\n", + "[flaml.automl: 08-22 20:59:31] {1358} INFO - iteration 64, current learner catboost\n", + "[flaml.automl: 08-22 20:59:32] {1515} INFO - at 55.5s,\tbest catboost's error=0.3475,\tbest lgbm's error=0.3335\n", + "[flaml.automl: 08-22 20:59:32] {1358} INFO - iteration 65, current learner lgbm\n", + "[flaml.automl: 08-22 20:59:35] {1515} INFO - at 59.1s,\tbest lgbm's error=0.3334,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 20:59:35] {1358} INFO - iteration 66, current learner lgbm\n", + "[flaml.automl: 08-22 20:59:42] {1515} INFO - at 65.4s,\tbest lgbm's error=0.3334,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 20:59:42] {1358} INFO - iteration 67, current learner xgboost\n", + "[flaml.automl: 08-22 20:59:42] {1515} INFO - at 65.5s,\tbest xgboost's error=0.3591,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 20:59:42] {1358} INFO - iteration 68, current learner lgbm\n", + "[flaml.automl: 08-22 20:59:44] {1515} INFO - at 67.7s,\tbest lgbm's error=0.3334,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 20:59:44] {1358} INFO - iteration 69, current learner lgbm\n", + "[flaml.automl: 08-22 20:59:46] {1515} INFO - at 70.2s,\tbest lgbm's error=0.3334,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 20:59:46] {1358} INFO - iteration 70, current learner rf\n", + "[flaml.automl: 08-22 20:59:47] {1515} INFO - at 70.3s,\tbest rf's error=0.3633,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 20:59:47] {1358} INFO - iteration 71, current learner xgboost\n", + "[flaml.automl: 08-22 20:59:47] {1515} INFO - at 70.5s,\tbest xgboost's error=0.3591,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 20:59:47] {1358} INFO - iteration 72, current learner rf\n", + "[flaml.automl: 08-22 20:59:47] {1515} INFO - at 70.8s,\tbest rf's error=0.3633,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 20:59:47] {1358} INFO - iteration 73, current learner extra_tree\n", + "[flaml.automl: 08-22 20:59:47] {1515} INFO - at 70.9s,\tbest extra_tree's error=0.3747,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 20:59:47] {1358} INFO - iteration 74, current learner catboost\n", + "[flaml.automl: 08-22 20:59:48] {1515} INFO - at 71.9s,\tbest catboost's error=0.3475,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 20:59:48] {1358} INFO - iteration 75, current learner extra_tree\n", + "[flaml.automl: 08-22 20:59:48] {1515} INFO - at 72.1s,\tbest extra_tree's error=0.3747,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 20:59:48] {1358} INFO - iteration 76, current learner lgbm\n", + "[flaml.automl: 08-22 20:59:54] {1515} INFO - at 77.6s,\tbest lgbm's error=0.3334,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 20:59:54] {1358} INFO - iteration 77, current learner lgbm\n", + "[flaml.automl: 08-22 20:59:56] {1515} INFO - at 79.7s,\tbest lgbm's error=0.3334,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 20:59:56] {1358} INFO - iteration 78, current learner xgboost\n", + "[flaml.automl: 08-22 20:59:56] {1515} INFO - at 79.8s,\tbest xgboost's error=0.3591,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 20:59:56] {1358} INFO - iteration 79, current learner rf\n", + "[flaml.automl: 08-22 20:59:56] {1515} INFO - at 80.2s,\tbest rf's error=0.3598,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 20:59:56] {1358} INFO - iteration 80, current learner lgbm\n", + "[flaml.automl: 08-22 21:00:02] {1515} INFO - at 85.8s,\tbest lgbm's error=0.3334,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 21:00:02] {1358} INFO - iteration 81, current learner extra_tree\n", + "[flaml.automl: 08-22 21:00:02] {1515} INFO - at 86.0s,\tbest extra_tree's error=0.3747,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 21:00:02] {1358} INFO - iteration 82, current learner rf\n", + "[flaml.automl: 08-22 21:00:03] {1515} INFO - at 86.2s,\tbest rf's error=0.3598,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 21:00:03] {1358} INFO - iteration 83, current learner xgboost\n", + "[flaml.automl: 08-22 21:00:03] {1515} INFO - at 86.5s,\tbest xgboost's error=0.3574,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 21:00:03] {1358} INFO - iteration 84, current learner xgboost\n", + "[flaml.automl: 08-22 21:00:03] {1515} INFO - at 86.8s,\tbest xgboost's error=0.3574,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 21:00:03] {1358} INFO - iteration 85, current learner extra_tree\n", + "[flaml.automl: 08-22 21:00:03] {1515} INFO - at 87.0s,\tbest extra_tree's error=0.3747,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 21:00:03] {1358} INFO - iteration 86, current learner catboost\n", + "[flaml.automl: 08-22 21:00:08] {1515} INFO - at 91.9s,\tbest catboost's error=0.3475,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 21:00:08] {1358} INFO - iteration 87, current learner xgboost\n", + "[flaml.automl: 08-22 21:00:09] {1515} INFO - at 92.2s,\tbest xgboost's error=0.3534,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 21:00:09] {1358} INFO - iteration 88, current learner xgboost\n", + "[flaml.automl: 08-22 21:00:09] {1515} INFO - at 92.5s,\tbest xgboost's error=0.3534,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 21:00:09] {1358} INFO - iteration 89, current learner rf\n", + "[flaml.automl: 08-22 21:00:09] {1515} INFO - at 93.1s,\tbest rf's error=0.3598,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 21:00:09] {1358} INFO - iteration 90, current learner xgboost\n", + "[flaml.automl: 08-22 21:00:10] {1515} INFO - at 93.9s,\tbest xgboost's error=0.3504,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 21:00:10] {1358} INFO - iteration 91, current learner catboost\n", + "[flaml.automl: 08-22 21:00:17] {1515} INFO - at 101.0s,\tbest catboost's error=0.3378,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 21:00:17] {1358} INFO - iteration 92, current learner extra_tree\n", + "[flaml.automl: 08-22 21:00:17] {1515} INFO - at 101.2s,\tbest extra_tree's error=0.3697,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 21:00:17] {1358} INFO - iteration 93, current learner xgboost\n", + "[flaml.automl: 08-22 21:00:18] {1515} INFO - at 101.6s,\tbest xgboost's error=0.3504,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 21:00:18] {1358} INFO - iteration 94, current learner xgboost\n", + "[flaml.automl: 08-22 21:00:21] {1515} INFO - at 104.4s,\tbest xgboost's error=0.3504,\tbest lgbm's error=0.3334\n", + "[flaml.automl: 08-22 21:00:21] {1358} INFO - iteration 95, current learner lgbm\n", + "[flaml.automl: 08-22 21:00:24] {1515} INFO - at 107.4s,\tbest lgbm's error=0.3316,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:00:24] {1358} INFO - iteration 96, current learner extra_tree\n", + "[flaml.automl: 08-22 21:00:24] {1515} INFO - at 107.6s,\tbest extra_tree's error=0.3688,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:00:24] {1358} INFO - iteration 97, current learner catboost\n", + "[flaml.automl: 08-22 21:00:52] {1515} INFO - at 135.7s,\tbest catboost's error=0.3378,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:00:52] {1358} INFO - iteration 98, current learner xgboost\n", + "[flaml.automl: 08-22 21:00:53] {1515} INFO - at 136.4s,\tbest xgboost's error=0.3504,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:00:53] {1358} INFO - iteration 99, current learner catboost\n", + "[flaml.automl: 08-22 21:00:59] {1515} INFO - at 142.3s,\tbest catboost's error=0.3378,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:00:59] {1358} INFO - iteration 100, current learner rf\n", + "[flaml.automl: 08-22 21:00:59] {1515} INFO - at 142.7s,\tbest rf's error=0.3572,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:00:59] {1358} INFO - iteration 101, current learner extra_tree\n", + "[flaml.automl: 08-22 21:00:59] {1515} INFO - at 142.9s,\tbest extra_tree's error=0.3688,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:00:59] {1358} INFO - iteration 102, current learner catboost\n", + "[flaml.automl: 08-22 21:02:06] {1515} INFO - at 209.6s,\tbest catboost's error=0.3378,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:02:06] {1358} INFO - iteration 103, current learner lgbm\n", + "[flaml.automl: 08-22 21:02:10] {1515} INFO - at 213.2s,\tbest lgbm's error=0.3316,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:02:10] {1358} INFO - iteration 104, current learner catboost\n", + "[flaml.automl: 08-22 21:02:10] {1515} INFO - at 213.6s,\tbest catboost's error=0.3378,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:02:10] {1358} INFO - iteration 105, current learner rf\n", + "[flaml.automl: 08-22 21:02:10] {1515} INFO - at 214.0s,\tbest rf's error=0.3572,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:02:10] {1358} INFO - iteration 106, current learner rf\n", + "[flaml.automl: 08-22 21:02:11] {1515} INFO - at 214.3s,\tbest rf's error=0.3572,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:02:11] {1358} INFO - iteration 107, current learner extra_tree\n", + "[flaml.automl: 08-22 21:02:11] {1515} INFO - at 214.4s,\tbest extra_tree's error=0.3605,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:02:11] {1358} INFO - iteration 108, current learner extra_tree\n", + "[flaml.automl: 08-22 21:02:11] {1515} INFO - at 214.6s,\tbest extra_tree's error=0.3605,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:02:11] {1358} INFO - iteration 109, current learner extra_tree\n", + "[flaml.automl: 08-22 21:02:11] {1515} INFO - at 214.8s,\tbest extra_tree's error=0.3605,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:02:11] {1358} INFO - iteration 110, current learner extra_tree\n", + "[flaml.automl: 08-22 21:02:11] {1515} INFO - at 215.0s,\tbest extra_tree's error=0.3605,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:02:11] {1358} INFO - iteration 111, current learner lgbm\n", + "[flaml.automl: 08-22 21:02:20] {1515} INFO - at 223.7s,\tbest lgbm's error=0.3316,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:02:20] {1358} INFO - iteration 112, current learner extra_tree\n", + "[flaml.automl: 08-22 21:02:20] {1515} INFO - at 223.9s,\tbest extra_tree's error=0.3605,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:02:20] {1358} INFO - iteration 113, current learner extra_tree\n", + "[flaml.automl: 08-22 21:02:21] {1515} INFO - at 224.7s,\tbest extra_tree's error=0.3605,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:02:21] {1358} INFO - iteration 114, current learner extra_tree\n", + "[flaml.automl: 08-22 21:02:22] {1515} INFO - at 225.9s,\tbest extra_tree's error=0.3605,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:02:22] {1358} INFO - iteration 115, current learner lgbm\n", + "[flaml.automl: 08-22 21:02:24] {1515} INFO - at 227.4s,\tbest lgbm's error=0.3316,\tbest lgbm's error=0.3316\n", + "[flaml.automl: 08-22 21:02:24] {1358} INFO - iteration 116, current learner lgbm\n", + "[flaml.automl: 08-22 21:02:29] {1515} INFO - at 232.6s,\tbest lgbm's error=0.3299,\tbest lgbm's error=0.3299\n", + "[flaml.automl: 08-22 21:02:29] {1358} INFO - iteration 117, current learner extra_tree\n", + "[flaml.automl: 08-22 21:02:30] {1515} INFO - at 233.3s,\tbest extra_tree's error=0.3605,\tbest lgbm's error=0.3299\n", + "[flaml.automl: 08-22 21:02:30] {1358} INFO - iteration 118, current learner xgboost\n", + "[flaml.automl: 08-22 21:02:35] {1515} INFO - at 238.5s,\tbest xgboost's error=0.3406,\tbest lgbm's error=0.3299\n", + "[flaml.automl: 08-22 21:02:35] {1358} INFO - iteration 119, current learner lgbm\n", + "[flaml.automl: 08-22 21:02:38] {1515} INFO - at 241.5s,\tbest lgbm's error=0.3299,\tbest lgbm's error=0.3299\n", + "[flaml.automl: 08-22 21:02:38] {1358} INFO - iteration 120, current learner xgboost\n", + "[flaml.automl: 08-22 21:02:41] {1515} INFO - at 244.7s,\tbest xgboost's error=0.3406,\tbest lgbm's error=0.3299\n", + "[flaml.automl: 08-22 21:02:41] {1358} INFO - iteration 121, current learner xgboost\n", + "[flaml.automl: 08-22 21:02:50] {1515} INFO - at 253.9s,\tbest xgboost's error=0.3382,\tbest lgbm's error=0.3299\n", + "[flaml.automl: 08-22 21:02:50] {1358} INFO - iteration 122, current learner extra_tree\n", + "[flaml.automl: 08-22 21:02:51] {1515} INFO - at 254.9s,\tbest extra_tree's error=0.3605,\tbest lgbm's error=0.3299\n", + "[flaml.automl: 08-22 21:02:51] {1358} INFO - iteration 123, current learner xgboost\n", + "[flaml.automl: 08-22 21:02:55] {1515} INFO - at 258.2s,\tbest xgboost's error=0.3382,\tbest lgbm's error=0.3299\n", + "[flaml.automl: 08-22 21:02:55] {1358} INFO - iteration 124, current learner lgbm\n", + "[flaml.automl: 08-22 21:03:12] {1515} INFO - at 275.9s,\tbest lgbm's error=0.3299,\tbest lgbm's error=0.3299\n", + "[flaml.automl: 08-22 21:03:12] {1358} INFO - iteration 125, current learner lgbm\n", + "[flaml.automl: 08-22 21:03:15] {1515} INFO - at 278.4s,\tbest lgbm's error=0.3299,\tbest lgbm's error=0.3299\n", + "[flaml.automl: 08-22 21:03:15] {1358} INFO - iteration 126, current learner lgbm\n", + "[flaml.automl: 08-22 21:03:17] {1515} INFO - at 280.3s,\tbest lgbm's error=0.3299,\tbest lgbm's error=0.3299\n", + "[flaml.automl: 08-22 21:03:17] {1358} INFO - iteration 127, current learner lgbm\n", + "[flaml.automl: 08-22 21:03:35] {1515} INFO - at 298.5s,\tbest lgbm's error=0.3268,\tbest lgbm's error=0.3268\n", + "[flaml.automl: 08-22 21:03:35] {1358} INFO - iteration 128, current learner rf\n", + "[flaml.automl: 08-22 21:03:35] {1515} INFO - at 298.6s,\tbest rf's error=0.3572,\tbest lgbm's error=0.3268\n", + "[flaml.automl: 08-22 21:03:35] {1358} INFO - iteration 129, current learner extra_tree\n", + "[flaml.automl: 08-22 21:03:36] {1515} INFO - at 299.5s,\tbest extra_tree's error=0.3573,\tbest lgbm's error=0.3268\n", + "[flaml.automl: 08-22 21:03:36] {1358} INFO - iteration 130, current learner rf\n", + "[flaml.automl: 08-22 21:03:36] {1515} INFO - at 299.6s,\tbest rf's error=0.3572,\tbest lgbm's error=0.3268\n", + "[flaml.automl: 08-22 21:03:36] {1358} INFO - iteration 131, current learner lrl1\n", "No low-cost partial config given to the search algorithm. For cost-frugal search, consider providing low-cost values for cost-related hps via 'low_cost_partial_config'.\n", - "[flaml.automl: 08-13 18:00:34] {1422} INFO - no enough budget for learner lrl1\n", - "[flaml.automl: 08-13 18:00:34] {1252} INFO - iteration 100, current learner rf\n", - "[flaml.automl: 08-13 18:00:34] {1422} INFO - no enough budget for learner rf\n", - "[flaml.automl: 08-13 18:00:34] {1252} INFO - iteration 101, current learner extra_tree\n", - "[flaml.automl: 08-13 18:00:34] {1422} INFO - no enough budget for learner extra_tree\n", - "[flaml.automl: 08-13 18:00:34] {1461} INFO - selected model: LGBMClassifier(colsample_bytree=0.8371742593966071,\n", - " learning_rate=0.10600185051767465, max_bin=128,\n", - " min_child_samples=44, n_estimators=2687, num_leaves=21,\n", - " objective='binary', reg_alpha=0.0027205185280732864,\n", - " reg_lambda=2.0031491728266557, verbose=-1)\n", - "[flaml.automl: 08-13 18:00:34] {1184} INFO - fit succeeded\n", - "[flaml.automl: 08-13 18:00:34] {1185} INFO - Time taken to find the best model: 82.42118620872498\n" + "/home/dmx/miniconda2/envs/blend/lib/python3.8/site-packages/sklearn/linear_model/_sag.py:328: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n", + " warnings.warn(\"The max_iter was reached which means \"\n", + "[flaml.automl: 08-22 21:03:36] {1515} INFO - at 299.9s,\tbest lrl1's error=0.4339,\tbest lgbm's error=0.3268\n", + "[flaml.automl: 08-22 21:03:36] {1592} INFO - selected model: LGBMClassifier(colsample_bytree=0.5874727619500845,\n", + " learning_rate=0.046670380654293554, max_bin=512,\n", + " min_child_samples=8, n_estimators=665, num_leaves=227,\n", + " objective='binary', reg_alpha=0.001975258376030875,\n", + " reg_lambda=0.006468020364053276, verbose=-1)\n", + "[flaml.automl: 08-22 21:03:55] {1633} INFO - retrain lgbm for 18.9s\n", + "[flaml.automl: 08-22 21:03:55] {1636} INFO - retrained model: LGBMClassifier(colsample_bytree=0.5874727619500845,\n", + " learning_rate=0.046670380654293554, max_bin=512,\n", + " min_child_samples=8, n_estimators=683, num_leaves=227,\n", + " objective='binary', reg_alpha=0.001975258376030875,\n", + " reg_lambda=0.006468020364053276, verbose=-1)\n", + "[flaml.automl: 08-22 21:03:55] {1199} INFO - fit succeeded\n", + "[flaml.automl: 08-22 21:03:55] {1200} INFO - Time taken to find the best model: 298.465936422348\n", + "[flaml.automl: 08-22 21:03:55] {1205} WARNING - Time taken to find the best model is 99% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n" ] } ], @@ -395,9 +464,9 @@ "name": "stdout", "text": [ "Best ML leaner: lgbm\n", - "Best hyperparmeter config: {'n_estimators': 2687, 'num_leaves': 21, 'min_child_samples': 44, 'learning_rate': 0.10600185051767465, 'subsample': 1.0, 'log_max_bin': 8, 'colsample_bytree': 0.8371742593966071, 'reg_alpha': 0.0027205185280732864, 'reg_lambda': 2.0031491728266557, 'FLAML_sample_size': 364083}\n", - "Best accuracy on validation data: 0.6723\n", - "Training duration of best run: 29.51 s\n" + "Best hyperparmeter config: {'n_estimators': 683, 'num_leaves': 227, 'min_child_samples': 8, 'learning_rate': 0.046670380654293554, 'log_max_bin': 10, 'colsample_bytree': 0.5874727619500845, 'reg_alpha': 0.001975258376030875, 'reg_lambda': 0.006468020364053276, 'FLAML_sample_size': 364083}\n", + "Best accuracy on validation data: 0.6732\n", + "Training duration of best run: 18.12 s\n" ] } ], @@ -419,11 +488,11 @@ "output_type": "execute_result", "data": { "text/plain": [ - "LGBMClassifier(colsample_bytree=0.8371742593966071,\n", - " learning_rate=0.10600185051767465, max_bin=128,\n", - " min_child_samples=44, n_estimators=2687, num_leaves=21,\n", - " objective='binary', reg_alpha=0.0027205185280732864,\n", - " reg_lambda=2.0031491728266557, verbose=-1)" + "LGBMClassifier(colsample_bytree=0.5874727619500845,\n", + " learning_rate=0.046670380654293554, max_bin=512,\n", + " min_child_samples=8, n_estimators=683, num_leaves=227,\n", + " objective='binary', reg_alpha=0.001975258376030875,\n", + " reg_lambda=0.006468020364053276, verbose=-1)" ] }, "metadata": {}, @@ -506,9 +575,9 @@ "output_type": "stream", "name": "stdout", "text": [ - "accuracy = 0.6721815997508269\n", - "roc_auc = 0.7257907244977934\n", - "log_loss = 0.6031168823610242\n" + "accuracy = 0.6741542203699035\n", + "roc_auc = 0.729539654237154\n", + "log_loss = 0.600001589965153\n" ] } ], @@ -547,15 +616,22 @@ "output_type": "stream", "name": "stdout", "text": [ - "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.09999999999999995, 'subsample': 1.0, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.09999999999999995, 'subsample': 1.0, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0, 'FLAML_sample_size': 10000}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 12, 'num_leaves': 4, 'min_child_samples': 30, 'learning_rate': 0.18850082505120694, 'subsample': 1.0, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.43802345595978204, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 12, 'num_leaves': 4, 'min_child_samples': 30, 'learning_rate': 0.18850082505120694, 'subsample': 1.0, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.43802345595978204, 'FLAML_sample_size': 10000}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 23, 'num_leaves': 4, 'min_child_samples': 39, 'learning_rate': 0.4839966785164543, 'subsample': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.8499027725496043, 'reg_alpha': 0.0015851927568202393, 'reg_lambda': 1.9570976003429281, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 23, 'num_leaves': 4, 'min_child_samples': 39, 'learning_rate': 0.4839966785164543, 'subsample': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.8499027725496043, 'reg_alpha': 0.0015851927568202393, 'reg_lambda': 1.9570976003429281, 'FLAML_sample_size': 10000}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 23, 'num_leaves': 4, 'min_child_samples': 39, 'learning_rate': 0.4839966785164543, 'subsample': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.8499027725496043, 'reg_alpha': 0.0015851927568202393, 'reg_lambda': 1.9570976003429281, 'FLAML_sample_size': 40000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 23, 'num_leaves': 4, 'min_child_samples': 39, 'learning_rate': 0.4839966785164543, 'subsample': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.8499027725496043, 'reg_alpha': 0.0015851927568202393, 'reg_lambda': 1.9570976003429281, 'FLAML_sample_size': 40000}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 74, 'num_leaves': 4, 'min_child_samples': 41, 'learning_rate': 0.17743258768982648, 'subsample': 1.0, 'log_max_bin': 7, 'colsample_bytree': 0.804982542436943, 'reg_alpha': 0.0009765625, 'reg_lambda': 3.547311998768578, 'FLAML_sample_size': 40000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 74, 'num_leaves': 4, 'min_child_samples': 41, 'learning_rate': 0.17743258768982648, 'subsample': 1.0, 'log_max_bin': 7, 'colsample_bytree': 0.804982542436943, 'reg_alpha': 0.0009765625, 'reg_lambda': 3.547311998768578, 'FLAML_sample_size': 40000}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 135, 'num_leaves': 7, 'min_child_samples': 21, 'learning_rate': 0.29597808772418305, 'subsample': 1.0, 'log_max_bin': 6, 'colsample_bytree': 0.7208090706891741, 'reg_alpha': 0.0017607866203119683, 'reg_lambda': 1.8488863473486183, 'FLAML_sample_size': 40000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 135, 'num_leaves': 7, 'min_child_samples': 21, 'learning_rate': 0.29597808772418305, 'subsample': 1.0, 'log_max_bin': 6, 'colsample_bytree': 0.7208090706891741, 'reg_alpha': 0.0017607866203119683, 'reg_lambda': 1.8488863473486183, 'FLAML_sample_size': 40000}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 135, 'num_leaves': 7, 'min_child_samples': 21, 'learning_rate': 0.29597808772418305, 'subsample': 1.0, 'log_max_bin': 6, 'colsample_bytree': 0.7208090706891741, 'reg_alpha': 0.0017607866203119683, 'reg_lambda': 1.8488863473486183, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 135, 'num_leaves': 7, 'min_child_samples': 21, 'learning_rate': 0.29597808772418305, 'subsample': 1.0, 'log_max_bin': 6, 'colsample_bytree': 0.7208090706891741, 'reg_alpha': 0.0017607866203119683, 'reg_lambda': 1.8488863473486183, 'FLAML_sample_size': 364083}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 529, 'num_leaves': 20, 'min_child_samples': 35, 'learning_rate': 0.22270723105961662, 'subsample': 1.0, 'log_max_bin': 7, 'colsample_bytree': 0.7169048987045028, 'reg_alpha': 0.01040607895773376, 'reg_lambda': 4.44145781234249, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 529, 'num_leaves': 20, 'min_child_samples': 35, 'learning_rate': 0.22270723105961662, 'subsample': 1.0, 'log_max_bin': 7, 'colsample_bytree': 0.7169048987045028, 'reg_alpha': 0.01040607895773376, 'reg_lambda': 4.44145781234249, 'FLAML_sample_size': 364083}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 2687, 'num_leaves': 21, 'min_child_samples': 44, 'learning_rate': 0.10600185051767465, 'subsample': 1.0, 'log_max_bin': 8, 'colsample_bytree': 0.8371742593966071, 'reg_alpha': 0.0027205185280732864, 'reg_lambda': 2.0031491728266557, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 2687, 'num_leaves': 21, 'min_child_samples': 44, 'learning_rate': 0.10600185051767465, 'subsample': 1.0, 'log_max_bin': 8, 'colsample_bytree': 0.8371742593966071, 'reg_alpha': 0.0027205185280732864, 'reg_lambda': 2.0031491728266557, 'FLAML_sample_size': 364083}}\n" + "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.09999999999999995, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.09999999999999995, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0, 'FLAML_sample_size': 10000}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 12, 'num_leaves': 4, 'min_child_samples': 15, 'learning_rate': 0.2712162364070373, 'log_max_bin': 10, 'colsample_bytree': 0.9285002286474459, 'reg_alpha': 0.002668211515123386, 'reg_lambda': 0.5215467339232843, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 12, 'num_leaves': 4, 'min_child_samples': 15, 'learning_rate': 0.2712162364070373, 'log_max_bin': 10, 'colsample_bytree': 0.9285002286474459, 'reg_alpha': 0.002668211515123386, 'reg_lambda': 0.5215467339232843, 'FLAML_sample_size': 10000}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 14, 'num_leaves': 5, 'min_child_samples': 9, 'learning_rate': 0.2835381908967212, 'log_max_bin': 9, 'colsample_bytree': 0.8304072431299575, 'reg_alpha': 0.0014132988481787994, 'reg_lambda': 0.033183495034912504, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 14, 'num_leaves': 5, 'min_child_samples': 9, 'learning_rate': 0.2835381908967212, 'log_max_bin': 9, 'colsample_bytree': 0.8304072431299575, 'reg_alpha': 0.0014132988481787994, 'reg_lambda': 0.033183495034912504, 'FLAML_sample_size': 10000}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 10000, 'Current Hyper-parameters': {'n_estimators': 53, 'num_leaves': 4, 'min_child_samples': 7, 'learning_rate': 0.15662398373030859, 'log_max_bin': 10, 'colsample_bytree': 0.7610534336273627, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0064258982194552745, 'FLAML_sample_size': 10000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 53, 'num_leaves': 4, 'min_child_samples': 7, 'learning_rate': 0.15662398373030859, 'log_max_bin': 10, 'colsample_bytree': 0.7610534336273627, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0064258982194552745, 'FLAML_sample_size': 10000}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 53, 'num_leaves': 4, 'min_child_samples': 7, 'learning_rate': 0.15662398373030859, 'log_max_bin': 10, 'colsample_bytree': 0.7610534336273627, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0064258982194552745, 'FLAML_sample_size': 40000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 53, 'num_leaves': 4, 'min_child_samples': 7, 'learning_rate': 0.15662398373030859, 'log_max_bin': 10, 'colsample_bytree': 0.7610534336273627, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0064258982194552745, 'FLAML_sample_size': 40000}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 40000, 'Current Hyper-parameters': {'n_estimators': 72, 'num_leaves': 5, 'min_child_samples': 6, 'learning_rate': 0.6822817724617938, 'log_max_bin': 10, 'colsample_bytree': 0.6975165606652173, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0012212021562021567, 'FLAML_sample_size': 40000}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 72, 'num_leaves': 5, 'min_child_samples': 6, 'learning_rate': 0.6822817724617938, 'log_max_bin': 10, 'colsample_bytree': 0.6975165606652173, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0012212021562021567, 'FLAML_sample_size': 40000}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 72, 'num_leaves': 5, 'min_child_samples': 6, 'learning_rate': 0.6822817724617938, 'log_max_bin': 10, 'colsample_bytree': 0.6975165606652173, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0012212021562021567, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 72, 'num_leaves': 5, 'min_child_samples': 6, 'learning_rate': 0.6822817724617938, 'log_max_bin': 10, 'colsample_bytree': 0.6975165606652173, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0012212021562021567, 'FLAML_sample_size': 364083}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 41, 'num_leaves': 21, 'min_child_samples': 4, 'learning_rate': 0.6001063475389127, 'log_max_bin': 10, 'colsample_bytree': 0.7789325923328158, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0022935398380276623, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 41, 'num_leaves': 21, 'min_child_samples': 4, 'learning_rate': 0.6001063475389127, 'log_max_bin': 10, 'colsample_bytree': 0.7789325923328158, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0022935398380276623, 'FLAML_sample_size': 364083}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 86, 'num_leaves': 47, 'min_child_samples': 4, 'learning_rate': 0.19470316589735667, 'log_max_bin': 9, 'colsample_bytree': 0.631882070615302, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0035266222848593562, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 86, 'num_leaves': 47, 'min_child_samples': 4, 'learning_rate': 0.19470316589735667, 'log_max_bin': 9, 'colsample_bytree': 0.631882070615302, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0035266222848593562, 'FLAML_sample_size': 364083}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 172, 'num_leaves': 22, 'min_child_samples': 2, 'learning_rate': 0.24053152671715367, 'log_max_bin': 10, 'colsample_bytree': 0.6820700830442894, 'reg_alpha': 0.004577823970660193, 'reg_lambda': 0.007561964401113763, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 172, 'num_leaves': 22, 'min_child_samples': 2, 'learning_rate': 0.24053152671715367, 'log_max_bin': 10, 'colsample_bytree': 0.6820700830442894, 'reg_alpha': 0.004577823970660193, 'reg_lambda': 0.007561964401113763, 'FLAML_sample_size': 364083}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 122, 'num_leaves': 63, 'min_child_samples': 2, 'learning_rate': 0.5931378900500869, 'log_max_bin': 10, 'colsample_bytree': 0.47445295252109176, 'reg_alpha': 0.0033202544897837413, 'reg_lambda': 0.004252702807634206, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 122, 'num_leaves': 63, 'min_child_samples': 2, 'learning_rate': 0.5931378900500869, 'log_max_bin': 10, 'colsample_bytree': 0.47445295252109176, 'reg_alpha': 0.0033202544897837413, 'reg_lambda': 0.004252702807634206, 'FLAML_sample_size': 364083}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 77, 'num_leaves': 362, 'min_child_samples': 2, 'learning_rate': 0.3674198928731587, 'log_max_bin': 10, 'colsample_bytree': 0.419059595944657, 'reg_alpha': 0.0014611649438296117, 'reg_lambda': 0.007191830853534194, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 77, 'num_leaves': 362, 'min_child_samples': 2, 'learning_rate': 0.3674198928731587, 'log_max_bin': 10, 'colsample_bytree': 0.419059595944657, 'reg_alpha': 0.0014611649438296117, 'reg_lambda': 0.007191830853534194, 'FLAML_sample_size': 364083}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 123, 'num_leaves': 63, 'min_child_samples': 4, 'learning_rate': 0.5931378900500864, 'log_max_bin': 9, 'colsample_bytree': 0.47445295252109176, 'reg_alpha': 0.0033202544897837413, 'reg_lambda': 0.004252702807634205, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 123, 'num_leaves': 63, 'min_child_samples': 4, 'learning_rate': 0.5931378900500864, 'log_max_bin': 9, 'colsample_bytree': 0.47445295252109176, 'reg_alpha': 0.0033202544897837413, 'reg_lambda': 0.004252702807634205, 'FLAML_sample_size': 364083}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 68, 'num_leaves': 194, 'min_child_samples': 3, 'learning_rate': 0.2431279112969044, 'log_max_bin': 8, 'colsample_bytree': 0.5862836266928158, 'reg_alpha': 0.008410564196066936, 'reg_lambda': 0.0024014793155735497, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 68, 'num_leaves': 194, 'min_child_samples': 3, 'learning_rate': 0.2431279112969044, 'log_max_bin': 8, 'colsample_bytree': 0.5862836266928158, 'reg_alpha': 0.008410564196066936, 'reg_lambda': 0.0024014793155735497, 'FLAML_sample_size': 364083}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 150, 'num_leaves': 169, 'min_child_samples': 6, 'learning_rate': 0.12389107062093162, 'log_max_bin': 9, 'colsample_bytree': 0.569229888724659, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.008985174012040886, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 150, 'num_leaves': 169, 'min_child_samples': 6, 'learning_rate': 0.12389107062093162, 'log_max_bin': 9, 'colsample_bytree': 0.569229888724659, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.008985174012040886, 'FLAML_sample_size': 364083}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 683, 'num_leaves': 227, 'min_child_samples': 8, 'learning_rate': 0.046670380654293554, 'log_max_bin': 10, 'colsample_bytree': 0.5874727619500845, 'reg_alpha': 0.001975258376030875, 'reg_lambda': 0.006468020364053276, 'FLAML_sample_size': 364083}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 683, 'num_leaves': 227, 'min_child_samples': 8, 'learning_rate': 0.046670380654293554, 'log_max_bin': 10, 'colsample_bytree': 0.5874727619500845, 'reg_alpha': 0.001975258376030875, 'reg_lambda': 0.006468020364053276, 'FLAML_sample_size': 364083}}\n" ] } ], @@ -587,8 +663,8 @@ "text/plain": [ "
" ], - "image/svg+xml": "\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n", - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nO3dfZxdVX3v8c+XECCgEDCBmwdCwiWJgmCCKYr4AFSayFWIFhG0rcWWaH2oljaUVEWKlxZv1F59mWrBaykqCKRhjBgZKSDUCCSBYJ5wMIanmaAJMVGEkSST3/1jrxN2TvZMTsLs2TNzvu/X67xy9tpr7/M7MyfzO2utvddSRGBmZlZvv6oDMDOz/skJwszMCjlBmJlZIScIMzMr5ARhZmaFnCDMzKyQE4TZPpD0JkltVcdhViYnCBtwJD0u6a1VxhAR/x0Rk8s6v6Tpku6V9KykjZLukXROWa9nVsQJwqyApCEVvvZ5wC3A9cBY4CjgcuAd+3AuSfL/c9sn/uDYoCFpP0mXSfqFpE2SbpZ0RG7/LZJ+Kek36dv5Cbl910n6qqRFkp4Dzkgtlb+TtCIdc5Okg1L90yW1547vtm7af6mkpyWtl/SXkkLScQXvQcAXgc9GxNcj4jcRsSMi7omIi1OdKyR9K3fM+HS+/dP2jyRdJWkx8DwwW9Kyutf5G0kL0/MDJX1e0pOSfiXpa5KGvcRfhw0CThA2mHwMmAm8BRgNbAbm5fb/AJgIHAk8BHy77vj3AlcBLwd+nMrOB2YAE4CTgD/v4fUL60qaAVwCvBU4Dji9h3NMBo4G5vdQpxF/Cswiey9fAyZLmpjb/17ghvT8amASMCXFN4asxWJNzgnCBpMPAZ+MiPaIeAG4Ajiv9s06Ir4REc/m9r1G0mG5478bEYvTN/bfp7IvR8T6iPg18D2yP6Ld6a7u+cC/R8TqiHg+vXZ3XpH+fbrRN92N69LrbY+I3wDfBS4ESInilcDC1GKZBfxNRPw6Ip4F/gm44CW+vg0CThA2mBwD3Cppi6QtwCNAF3CUpCGSrk7dT78FHk/HjMgd/1TBOX+Ze/488LIeXr+7uqPrzl30OjWb0r+jeqjTiPrXuIGUIMhaDy0pWY0EDgYezP3cbk/l1uScIGwweQp4W0QMzz0OiogOsj+K55J18xwGjE/HKHd8WVMbP0022FxzdA9128jexx/3UOc5sj/qNf+joE79e7kDGClpClmiqHUvPQN0AifkfmaHRURPidCahBOEDVRDJR2Ue+xP1td+laRjACSNlHRuqv9y4AWyb+gHk3Wj9JWbgYskvUrSwcCnu6sY2fz7lwCflnSRpEPT4PsbJV2Tqj0MvFnSuNRFNmdPAUTENrIro+YCR5AlDCJiB3At8C+SjgSQNEbS9H1+tzZoOEHYQLWI7Jtv7XEF8CVgIfBDSc8C9wOvS/WvB54AOoA1aV+fiIgfAF8G7gbW5l77hW7qzwfeA3wAWA/8CvjfZOMIRMQdwE3ACuBB4LYGQ7mBrAV1S0Rsz5X/fS2u1P32X2SD5dbk5AWDzPqWpFcBq4AD6/5Qm/UrbkGY9QFJ70z3GxwOfA74npOD9XdOEGZ944PABuAXZFdW/VW14ZjtmbuYzMyskFsQZmZWaP+qA+gtI0aMiPHjx1cdhpnZgPLggw8+ExGFN0YOmgQxfvx4li1btueKZma2k6QnutvnLiYzMyvkBGFmZoWcIMzMrJAThJmZFXKCMDOzQoPmKiYzs2bTsryDua1trN/Syejhw5g9fTIzp47ptfM7QZiZDUAtyzuYs2Alndu6AOjY0smcBSsBei1JuIvJzGwAmtvatjM51HRu62Jua1uvvYYThJnZALR+S+dele8LJwgzswFo9PBhe1W+L5wgzMwGoNnTJzNs6JBdyoYNHcLs6b23GKAHqc3MBqDaQPSl81ewtWsHY3wVk5mZ1cycOoYblzwJwE0fPLXXz+8uJjMzK1RqgpA0Q1KbpLWSLuumzvmS1khaLemGVHaGpIdzj99LmllmrGZmtqvSupgkDQHmAWcB7cBSSQsjYk2uzkRgDnBaRGyWdCRARNwNTEl1jgDWAj8sK1YzM9tdmS2IU4C1EbEuIrYC3wHOratzMTAvIjYDRMSGgvOcB/wgIp4vMVYzM6tTZoIYAzyV225PZXmTgEmSFku6X9KMgvNcANxY9AKSZklaJmnZxo0beyVoMzPLVD1IvT8wETgduBC4VtLw2k5Jo4ATgdaigyPimoiYFhHTRo4sXFLVzMz2UZkJogM4Orc9NpXltQMLI2JbRDwGPEqWMGrOB26NiG0lxmlmZgXKTBBLgYmSJkg6gKyraGFdnRay1gOSRpB1Oa3L7b+QbrqXzMysXKUliIjYDnyUrHvoEeDmiFgt6UpJ56RqrcAmSWuAu4HZEbEJQNJ4shbIPWXFaGZm3Sv1TuqIWAQsqiu7PPc8gEvSo/7Yx9l9UNvMzPpI1YPUZmbWTzlBmJlZIScIMzMr5ARhZmaFnCDMzKyQ14OwAatleQdzW9tYv6WT0SUslmLW7JwgbEBqWd7BnAUr6dzWBUDHlk7mLFgJ4CRh1kucIGxAmtvatjM51HRu6+LS+St2rrBl1gzWPP1bjh91aCnn9hiEDUjrt3QWlm/t2tHHkZhV6/hRh3LulHJazW5B2IA0evgwOgqSxJjhw0pZm9esGbkFYQPS7OmTGTZ0yC5lw4YOYfb0yRVFZDb4uAVhA1JtIPrS+SvY2rWDMb6KyazXOUHYgDVz6pidA9LuVjLrfe5iMjOzQk4QZmZWyAnCzMwKOUGYmVkhJwgzMyvkBGFmZoWcIMzMrJAThJmZFXKCMDOzQk4QZmZWyAnCzMwKOUGYmVkhJwgzMyvkBGFmZoWcIMzMrJAThJmZFXKCMDOzQk4QZmZWqNQEIWmGpDZJayVd1k2d8yWtkbRa0g258nGSfijpkbR/fJmxmpnZrkpbk1rSEGAecBbQDiyVtDAi1uTqTATmAKdFxGZJR+ZOcT1wVUTcIellwI6yYjUzs92V2YI4BVgbEesiYivwHeDcujoXA/MiYjNARGwAkHQ8sH9E3JHKfxcRz5cYq5mZ1SkzQYwBnsptt6eyvEnAJEmLJd0vaUaufIukBZKWS5qbWiS7kDRL0jJJyzZu3FjKmzAza1ZVD1LvD0wETgcuBK6VNDyVvwn4O+APgGOBP68/OCKuiYhpETFt5MiRfRWzmVlT2GOCkPSKfTx3B3B0bntsKstrBxZGxLaIeAx4lCxhtAMPp+6p7UALcPI+xmFmZvugkRbE/ZJukXS2JO3FuZcCEyVNkHQAcAGwsK5OC1nrAUkjyLqW1qVjh0uqNQvOBNZgZmZ9ppEEMQm4BvhT4OeS/knSpD0dlL75fxRoBR4Bbo6I1ZKulHROqtYKbJK0BrgbmB0RmyKii6x76U5JKwEB1+7tmzMzs323x8tcIyKAO4A7JJ0BfAv4sKSfApdFxH09HLsIWFRXdnnduS9Jj/pj7wBOavB9mJlZL9tjgkhjEH9C1oL4FfAxsq6iKcAtwIQyAzQzs2o0cqPcfcA3gZkR0Z4rXybpa+WEZWZmVWskQUxOXUG7iYjP9XI8ZmbWTzQySP3DdG8CAJIOl9RaYkxmZtYPNJIgRkbEltpGmhbjyB7qm5nZINBIguiSNK62IekYoLDLyczMBo9GxiA+CfxY0j1k9yO8CZhValRmZla5Ru6DuF3SycDrU9EnIuKZcsMyM7OqNboeRBewATgIOF4SEXFveWGZmVnVGrlR7i+Bj5NNtvcwWUviPrL5kczMbJBqZJD642RTbj8REWcAU4EtPR9iZmYDXSMJ4vcR8XsASQdGxM+AyeWGZWZmVWtkDKI93SjXQjZh32bgiXLDMjOzqjVyFdM709MrJN0NHAbcXmpUZmZWuR4TRFoHenVEvBIgIu7pk6jMzKxyPY5BpIV72vJ3UpuZWXNoZAzicGC1pCXAc7XCiDin+0PMzGygayRBfLr0KJpMy/IO5ra2sX5LJ6OHD2P29MnMnDqm6rDMzHbRyCC1xx16UcvyDuYsWEnnti4AOrZ0MmfBSgAnCTPrVxq5k/pZXpy99QBgKPBcRBxaZmCD1dzWtp3JoaZzWxeXzl/BjUuerCiqgWvN07/l+FH+KJqVoZEWxMtrzyUJOJcXJ+6zvbR+S2dh+dauHX0cyeBw/KhDOXeKW15mZWh0sj4A0tKjLZI+A1xWTkiD2+jhw+goSBJjhg/jpg+eWkFEZmbFGulielducz9gGvD70iIa5GZPn7zLGATAsKFDmD3ds5eYWf/SSAviHbnn24HHybqZbB/UBqIvnb+CrV07GOOrmMysn2pkDOKivgikmcycOmbngLS7lcysv9rjbK6S/iNN1lfbPlzSN8oNy8zMqtbIdN8nRcTO9R8iYjPZmhBmZjaINZIg9pN0eG1D0hHs5dVPZmY28DTyh/4LwH2Sbknb7wauKi8kMzPrDxoZpL5e0jJeXIP6XRGxptywzMysao0MUr8eeCoivhIRXyFbYe51jZxc0gxJbZLWSiq8sU7S+ZLWSFot6YZceZekh9NjYaNvyMzMekcjXUxfBU7Obf+uoGw3abGhecBZQDuwVNLCfOtD0kRgDnBaRGyWdGTuFJ0RMaWxt2FmZr2tkUFqpSk2AIiIHTSWWE4B1kbEuojYCnyH3W+wuxiYl66MIiI2NBa2mZmVrZEEsU7SX0samh4fB9Y1cNwY4Kncdnsqy5sETJK0WNL9kmbk9h0kaVkqn9nA65mZWS9qpCXwIeDLwKfIpv2+k+ybf2+9/kTgdGAscK+kE9N9F8dERIekY4G7JK2MiF/kD5Y0C5gFMG5cOauienEfM2tWe2xBRMSGiLggIo6MiKOAvyD7g74nHcDRue2xqSyvHVgYEdsi4jHgUbKEQUR0pH/XAT+i4Oa8iLgmIqZFxLSRI0c2ENLeqS3u07Glk+DFxX1alte/DTOzwaehG97SgPN04EKyQecfA7f0eBAsBSZKmkCWGC4A3ltXpyWd898ljSDrclqXbsx7PiJeSOWnAf+nsbfUe8pc3McL3ZhZf9djgpD0FrI/6mcDS8j+UB8bEc/v6cQRsV3SR4FWYAjwjYhYLelKYFlELEz7/kjSGqALmB0RmyS9Afg3STvIWjlXV3HvRZmL+3ihGzPr75S7QGnXHVI78CTZJa0tEfGspMciYkJfBtioadOmxbJly3r1nKddfVe3i/ssvuzMgiPMzAYWSQ9GxLSifT2NQcwHRgPvAd4h6RBeXJu6KcyePplhQ4fsUubFfcysWXSbICLiE8AEsrmYTgfagJHpzueX9U141Zo5dQz//K4TOWBI9mMaM3wY//yuE30Vk5k1hR7HININcncDd0sayosD1f8KjCg/vOp5cR8za1YNT9sdEduA24DbJA0rLyQzM+sPGrmTejcRUXx5j5mZDRr7lCDMzGzwc4IwM7NCexyDkDQJmA0ck68fEb4RwMxsEGtkkPoW4GvAtWR3O5uZWRNoJEFsj4ivlh6JmZn1K42MQXxP0ocljZJ0RO1RemRmZlapRloQ70//zs6VBXBs74djZmb9xR4TRH+dnM/MzMrVyFVMQ4G/At6cin4E/Fu6s9rMzAapRrqYvgoMJZt/CeBPU9lflhWUmZlVr5EE8QcR8Zrc9l2SflpWQGZm1j80chVTl6T/WduQdCy+H8LMbNBrpAUxm2y673WAyO6ovqjUqMzMrHKNXMV0p6SJQG0ZtbaIeKHcsMzMrGrdJghJZ0bEXZLeVbfrOElExIKSY6tUy/IO5ra2sX5LJ0OH7MfRR3gJDDNrLj21IN4C3AW8o2BfAIMyQbQs7+CKhavZ0vniVbxbu3bw2DPP0bK8w8uNmlnT6DZBRMRn0tMrI+Kx/D5Jg/LmuZblHcxZsJLObbuPwe8ImNva5gRhZk2jkauY/rOgbH5vB9IfzG1tK0wONeu3eCE9M2sePY1BvBI4ATisbhziUOCgsgOrwp4SwOjhHocws+bR0xjEZODtwHB2HYd4Fri4zKCqMnr4MDq6SRLDhg5h9vTJhfvMzAajnsYgvgt8V9KpEXFfH8ZUmdnTJxeOQRx+8FA+844TPP5gZk2lkRvllkv6CFl3086upYj4QGlRVaSWAC6dv4KtXTsYM3wYs6dPdmIws6bUSIL4JvAzYDpwJfA+4JEyg6rSzKljuHHJkwDc9MFTK47GzKw6jVzFdFxEfBp4LiL+A/hfwOvKDcvMzKrWSIKo3TG2RdKrgcOAI8sLyczM+oNGupiukXQ48GlgIfAy4PJSozIzs8rtsQUREV+PiM0RcU9EHBsRR0bE1xo5uaQZktokrZV0WTd1zpe0RtJqSTfU7TtUUrukrzT2dszMrLf0dKPcJT0dGBFf7Gm/pCHAPOAsoB1YKmlhRKzJ1ZkIzAFOi4jNkuq7rj4L3NvzWzAzszL01IJ4eXpMI1uTekx6fAg4uYFznwKsjYh1EbEV+A5wbl2di4F5EbEZICI21HZIei1wFPDDxt6KmZn1pp5ulPtHAEn3AidHxLNp+wrg+w2cewzwVG67nd2vfpqUzrkYGAJcERG3S9oP+ALwJ8Bbu3sBSbOAWQDjxo1rICQzM2tUI1cxHQVszW1vTWW9YX9gInA6cCFwraThwIeBRRHR3tPBEXFNREyLiGkjR47spZDMzAwau4rpemCJpFvT9kzgugaO6wCOzm2PTWV57cADEbENeEzSo2QJ41TgTZI+THbV1AGSfhcRhQPdvcELBJmZ7aqRq5iuIluDenN6XBQR/9zAuZcCEyVNkHQAcAHZZbJ5LWStBySNIOtyWhcR74uIcRExHvg74Pqyk8OcBSvp2NJJsOsCQWZmzarbBCHp0PTvEcDjZFNufBN4IpX1KCK2Ax8FWsmm5rg5IlZLulLSOalaK7BJ0hrgbmB2RGx6Ce9nnxStA1FbIMjMrFn11MV0A9l03w+SLTFao7R97J5OHhGLgEV1ZZfnngdwSXp0d47raKxLa591tw6EFwgys2bW01VMb0//DsrlRfO6WwfCCwSZWTPr6Ua5Hu91iIiHej+cahStA+EFgsys2fXUxfSFHvYFcGYvx1IZrwNhZra7nrqYzujLQKrmdSDMzHbVyH0QpGm+j2fXFeWuLysoMzOr3h4ThKTPkN2rcDzZFUlvA35MdgOdmZkNUo1MtXEe8IfALyPiIuA1ZIsGmZnZINZIguiMiB3A9nTz3AZ2nULDzMwGoUbGIJalCfSuJbtp7nfAfaVGZWZmlevpPoh5wA0R8eFU9DVJtwOHRsSKPonOzMwq01ML4lHg85JGATcDN0bE8r4Jy8zMqtbtGEREfCkiTgXeAmwCviHpZ5I+I2lSn0VoZmaVaGS67yci4nMRMZVsUZ+ZZLOzmpnZILbHBCFpf0nvkPRt4AdAG/Cu0iMzM7NK9TRIfRZZi+FsYAnwHWBWRDzXR7GZmVmFehqknkO2JsTfRsTmPorHzMz6iZ4m6xs0s7Wamdnea+ROajMza0JOEGZmVsgJwszMCjlBmJlZIScIMzMr5ARhZmaFnCDMzKyQE4SZmRVygjAzs0JOEGZmVsgJwszMCjlBmJlZIScIMzMrVGqCkDRDUpuktZIu66bO+ZLWSFot6YZUdoykhyQ9nMo/VGacZma2u57Wg3hJJA0B5gFnAe3AUkkLI2JNrs5EsnUnTouIzZKOTLueBk6NiBckvQxYlY5dX1a8Zma2qzJbEKcAayNiXURsJVuR7ty6OhcD82oLEkXEhvTv1oh4IdU5sOQ4zcysQJl/eMcAT+W221NZ3iRgkqTFku6XNKO2Q9LRklakc3zOrQczs75V9Tfz/YGJwOlk619fK2k4QEQ8FREnAccB75d0VP3BkmZJWiZp2caNG/swbDOzwa/MBNEBHJ3bHpvK8tqBhRGxLSIeAx4lSxg7pZbDKuBN9S8QEddExLSImDZy5MheDd7MrNmVmSCWAhMlTZB0AHABsLCuTgtZ6wFJI8i6nNZJGitpWCo/HHgj0FZirGZmVqe0BBER24GPAq3AI8DNEbFa0pWSzknVWoFNktYAdwOzI2IT8CrgAUk/Be4BPh8RK8uK1czMdlfaZa4AEbEIWFRXdnnueQCXpEe+zh3ASWXGZmZmPat6kNrMzPopJwgzMyvkBGFmZoWcIMzMrJAThJmZFXKCMDOzQk4QZmZWyAnCzMwKOUEALcs7OO3qu3jgsV+z/MkttCyvnzLKzKz5lHon9UDQsryDOQtW0rmtC4CtXTuYsyCb1WPm1PrZyc3MmkfTtyDmtrbtTA41ndu6mNvquQHNrLk1fYJYv6Vzr8rNzJpF0yeI0cOH7VW5mVmzaPoEMXv6ZIYNHbJL2bChQ5g9fXJFEZmZ9Q9NP0hdG4i+dP4KtnbtYMzwYcyePtkD1GbW9Jo+QUCWJG5c8iQAN33w1IqjMTPrH5q+i8nMzIo5QZiZWSEnCDMzK+QEYWZmhZwgzMyskBOEmZkVavoE4ZlczcyKNfV9EJ7J1cyse03dgvBMrmZm3WvqBOGZXM3MutfUCcIzuZqZda+pE4RncjUz615TD1LXBqLntraxfksnoz2Tq5nZTk2dICBLEk4IZma7a+ouJjMz616pCULSDEltktZKuqybOudLWiNptaQbUtkUSfelshWS3lNmnGZmtrvSupgkDQHmAWcB7cBSSQsjYk2uzkRgDnBaRGyWdGTa9TzwZxHxc0mjgQcltUbElrLiNTOzXZXZgjgFWBsR6yJiK/Ad4Ny6OhcD8yJiM0BEbEj/PhoRP0/P1wMbgJElxmpmZnXKTBBjgKdy2+2pLG8SMEnSYkn3S5pRfxJJpwAHAL8o2DdL0jJJyzZu3NiLoZuZWdVXMe0PTAROB8YC90o6sdaVJGkU8E3g/RGxo/7giLgGuCbV3SjpiT283gjgmd4Lv1f0x5jAce0tx9W4/hgTNG9cx3S3o8wE0QEcndsem8ry2oEHImIb8JikR8kSxlJJhwLfBz4ZEffv6cUiYo9dUJKWRcS0Rt9AX+iPMYHj2luOq3H9MSZwXEXK7GJaCkyUNEHSAcAFwMK6Oi1krQckjSDrclqX6t8KXB8R80uM0czMulFagoiI7cBHgVbgEeDmiFgt6UpJ56RqrcAmSWuAu4HZEbEJOB94M/Dnkh5OjyllxWpmZrsrdQwiIhYBi+rKLs89D+CS9MjX+RbwrRJCuqaEc75U/TEmcFx7y3E1rj/GBI5rN8r+RpuZme3KU22YmVkhJwgzMyvUFAmikTmh+iiOb0jaIGlVruwISXdI+nn69/AK4jpa0t25ObE+XnVskg6StETST1NM/5jKJ0h6IP0ub0pXvPU5SUMkLZd0W3+JS9LjklamizqWpbL+8PkaLmm+pJ9JekTSqVXHJWly7gKYhyX9VtIn+kFcf5M+76sk3Zj+H1T22Rr0CSI3J9TbgOOBCyUdX1E41wH1d4tfBtwZEROBO9N2X9sO/G1EHA+8HvhI+hlVGdsLwJkR8RpgCjBD0uuBzwH/EhHHAZuBv+jDmPI+TnZ1Xk1/ieuMiJiSu26+P3y+vgTcHhGvBF5D9nOrNK6IaEs/pynAa8nmf7u1yrgkjQH+GpgWEa8GhpDdHlDdZysiBvUDOBVozW3PAeZUGM94YFVuuw0YlZ6PAtr6wc/su2STLPaL2ICDgYeA15HdUbp/0e+2D+MZS/bH40zgNkD9JK7HgRF1ZZX+DoHDgMdIF8T0l7jqYvkjYHHVcfHi9ERHkF1hehswvcrP1qBvQdDYnFBVOioink7PfwkcVWUwksYDU4EHqDi21I3zMNlkjXeQzce1JbJ7bKC63+X/BS4FatO/vKKfxBXADyU9KGlWKqv68zUB2Aj8e+qS+7qkQ/pBXHkXADem55XFFREdwOeBJ4Gngd8AD1LhZ6sZEsSAEdlXhMquO5b0MuA/gU9ExG/z+6qILSK6IusCGEs2O/Ar+/L1i0h6O7AhIh6sOpYCb4yIk8m6Uz8i6c35nRV9vvYHTga+GhFTgeeo67ap8nOf+vPPAW6p39fXcaXxjnPJkupo4BB275LuU82QIBqZE6pKv0qTEtYmJ9xQRRCShpIlh29HxIL+FFtkkzfeTda8Hi6pdoNnFb/L04BzJD1ONoX9mWR97FXHVfsGSmTT5t9KllSr/h22A+0R8UDank+WMKqOq+ZtwEMR8au0XWVcbwUei4iNkc1Pt4Ds81bZZ6sZEkQjc0JVaSHw/vT8/WT9/31KkoD/BzwSEV/sD7FJGilpeHo+jGxM5BGyRHFeFTEBRMSciBgbEePJPkt3RcT7qo5L0iGSXl57TtavvoqKP18R8UvgKUmTU9EfAmuqjivnQl7sXoJq43oSeL2kg9P/ydrPqrrPVlUDQ335AM4GHiXrw/5khXHcSNa3uI3sm9VfkPVf3wn8HPgv4IgK4nojWVN6BfBwepxdZWzAScDyFNMq4PJUfiywBFhL1i1wYIW/z9OB2/pDXOn1f5oeq2uf837y+ZoCLEu/yxbg8H4S1yHAJuCwXFmlcQH/CPwsfea/CRxY5WfLU22YmVmhZuhiMjOzfeAEYWZmhZwgzMyskBOEmZkVcoIwM7NCThA2IEj6F0mfyG23Svp6bvsLki4pPhokXSfpvPT8R5J2WwRe0lBJV6eZPB+SdJ+kt6V9jytbN31v4975ut3sn5dmE10jqTM3u+h5khbV7gXpTZJG1Wah7Wb/AZLuzd2cZU3KCcIGisXAGwAk7QeMAE7I7X8D8JOX+BqfJZug7dWRTVkxE3j5SzxnjyLiI5FNJ3I28ItIM4xGxPyIODuyu8h72yXAtT3EtJXsXoD3lPDaNoA4QdhA8ROyqTYgSwyrgGclHS7pQOBVwEOSLpe0NM2nf026I3WPJB0MXAx8LCJeAIiIX0XEzQV1L0nnX1XXqvkzSSuUrWHxzYLjPptaFEMajOlxSSMkjVe2lsJ1kh6V9G1Jb5W0OLV2Tkn1D1G25siSNDHeud2c+o+B29MxJ6T6D6fYJ6Y6LcD7GonTBi83IW1AiIj1krZLGkfWWriPbFbLU8lmvVwZEVslfSUirgRIf6TfDnyvgZc4Dngy6iYprCfptcBFZFOPC3hA0j3AVuBTwBsi4hlJR9QdN6zEdIYAAAJpSURBVJesNXJR7NvdqccB7wY+QDZ9zHvJ7oA/B/gHstbOJ8mm/vhA6ppaIum/IuK5XBwTgM21JAh8CPhSRHw7TUVTS16rgD/YhzhtEHELwgaSn5Alh1qCuC+3vTjVOUPZ6lsrySbSO6HoRC/BG4FbI+K5iPgd2YRqb0qvdUtEPAMQEb/OHfNpsukcPrSPyQGySdxWRsQOsqk07kznWkm2xghk8y9dpmyK9B8BBwHj6s4zimz67Zr7gH+Q9PfAMRHRmeLvArbW5ney5uQEYQNJbRziRLJvuPeTtSDeAPxE0kHAvwLnRcSJZP3sBzV47rXAOEmH9nrU2Tf+19a3KvbSC7nnO3LbO3ixJ0DAH+fGMcZFRH7VO4BOcj+TiLiBrBXSCSySdGau7oHA719CzDbAOUHYQPITsi6jX0e2VsSvgeFkSeInvPiH7xlla1t0e/VQvYh4nmxG2y+lrpbajLLvrqv638DMNOPmIcA7U9ldwLslvSIdm08GtwNXA98v+Rt5K/Cx2riLpKkFdR7lxRYHko4F1kXEl8lmCT0plb8CeCayaaetSTlB2ECykuzqpfvryn4TEc+kK36uJWtdtJJ9c98bnyLrflkjaRXZko/1Cyc9RLa2+BKyVfe+HhHLI2I1cBVwj6SfAl+sO+6WFNvCNH15GT4LDAVWSFqdtneRxiN+Iem4VHQ+sCp1S70auD6VnwF8v6Q4bYDwbK5mTUbSO4HXRsSneqizALgsIh7tu8isv/FVTGZNJiJurXWFFUldbC1ODuYWhJmZFfIYhJmZFXKCMDOzQk4QZmZWyAnCzMwKOUGYmVmh/w898GrDfzxCqwAAAABJRU5ErkJggg==" + "image/svg+xml": "\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nO3de5xdZX3v8c+XIcBYhAETKBnuJYlCQQIRRbwgLQY5lUREBHpaxRa0Fk9bjrGkVkQ8VGyqfemrqRzwUEVFbsYxaiRSQajckoFAQkKDIdwyQRMgUS4jCZPf+WM9k6xs1t6zk8yaPXvv7/v12q/Z+1nPWuu3smF+81zWsxQRmJmZVdqp0QGYmdno5ARhZmaFnCDMzKyQE4SZmRVygjAzs0JOEGZmVsgJwmw7SHq7pOWNjsOsTE4Q1nQkPS7pjxsZQ0T8V0RMKuv4kqZKukPS85LWSrpd0mllnc+siBOEWQFJHQ089xnAjcA1wP7AvsDFwHu341iS5P/Pbbv4PxxrGZJ2knSRpEclPSvpBkl757bfKOlXkn6T/jo/IrftG5K+JmmepBeBd6WWyiclLU77XC9pt1T/REmrcvtXrZu2f0rS05JWS/pLSSHpsIJrEPBl4PMR8fWI+E1EbIqI2yPivFTnEknfzu1zcDrezunzzyVdJulO4CVghqTeivP8naS56f2ukv5F0pOSfi3pCkmdO/h1WAtwgrBW8glgOvBOYDywDpid2/4TYAKwD3A/8J2K/c8BLgNeC/wilZ0JnAIcAhwFfLjG+QvrSjoFuBD4Y+Aw4MQax5gEHADcVKNOPf4MOJ/sWq4AJkmakNt+DnBten85MBE4OsXXTdZisTbnBGGt5GPApyNiVUS8DFwCnDH4l3VEXB0Rz+e2vVHSnrn9fxARd6a/2H+Xyr4aEasj4jngh2S/RKupVvdM4D8iYmlEvJTOXc3r0s+n673oKr6RzvdKRPwG+AFwNkBKFK8H5qYWy/nA30XEcxHxPPBPwFk7eH5rAU4Q1koOAr4vab2k9cDDwACwr6QOSZen7qffAo+nfcbm9n+q4Ji/yr1/Cdi9xvmr1R1fceyi8wx6Nv3cr0adelSe41pSgiBrPfSkZDUOeA1wX+7f7eZUbm3OCcJayVPAeyKiK/faLSL6yH4pTiPr5tkTODjto9z+ZS1t/DTZYPOgA2rUXU52He+vUedFsl/qg36/oE7ltdwCjJN0NFmiGOxeegboB47I/ZvtGRG1EqG1CScIa1ZjJO2We+1M1td+maSDACSNkzQt1X8t8DLZX+ivIetGGSk3AOdKeoOk1wCfqVYxsvX3LwQ+I+lcSXukwfe3SboyVXsAeIekA1MX2cyhAoiIjWQzo2YBe5MlDCJiE3AV8K+S9gGQ1C1p6nZfrbUMJwhrVvPI/vIdfF0CfAWYC/xU0vPAPcCbU/1rgCeAPmBZ2jYiIuInwFeB24AVuXO/XKX+TcAHgY8Aq4FfA/+HbByBiLgFuB5YDNwH/KjOUK4la0HdGBGv5Mr/fjCu1P32n2SD5dbm5AcGmY0sSW8AHgJ2rfhFbTaquAVhNgIkvS/db7AX8EXgh04ONto5QZiNjI8Ca4BHyWZW/VVjwzEbmruYzMyskFsQZmZWaOdGBzBcxo4dGwcffHCjwzAzayr33XffMxFReGNkyySIgw8+mN7e3qErmpnZZpKeqLbNXUxmZlbICcLMzAo5QZiZWSEnCDMzK+QEYWZmhVpmFpOZWbvpWdTHrPnLWb2+n/FdncyYOonpk7uH7fhOEGZmTahnUR8z5yyhf+MAAH3r+5k5ZwnAsCUJdzGZmTWhWfOXb04Og/o3DjBr/vJhO4cThJlZE1q9vn+byreHE4SZWRMa39W5TeXbwwnCzKwJzZg6ic4xHVuVdY7pYMbU4XsYoAepzcya0OBA9KduWsyGgU10exaTmZkNmj65m+8ueBKA6z96/LAf311MZmZWyAnCzMwKOUGYmVkhJwgzMyvkQWozayplrz9kW5TagpB0iqTlklZIuqhKnTMlLZO0VNK1qexdkh7IvX4naXqZsZrZ6De4/lDf+n6CLesP9Szqa3RoLam0FoSkDmA2cDKwClgoaW5ELMvVmQDMBE6IiHWS9gGIiNuAo1OdvYEVwE/LitXMmkO19Yc+ddPizdM9282yp3/L4fvtUcqxy2xBHAesiIiVEbEBuA6YVlHnPGB2RKwDiIg1Bcc5A/hJRLxUYqxm1gSqrTO0YWDTCEcyehy+3x5MO7qcLrYyxyC6gadyn1cBb66oMxFA0p1AB3BJRNxcUecs4MtFJ5B0PnA+wIEHHjgMIZvZaDa+q5O+giTR3dVZyo1i7a7Rs5h2BiYAJwJnA1dJ6hrcKGk/4EhgftHOEXFlREyJiCnjxo0bgXDNrJFGYv0h26LMBNEHHJD7vH8qy1sFzI2IjRHxGPAIWcIYdCbw/YjYWGKcZtYkpk/u5gunH8kuHdmvru6uTr5w+pGexVSSMruYFgITJB1ClhjOAs6pqNND1nL4D0ljybqcVua2n002iG0GeIqjlb/+kG1RWoKIiFckXUDWPdQBXB0RSyVdCvRGxNy07d2SlgEDwIyIeBZA0sFkLZDby4rRmstIPGLRzLZQRDQ6hmExZcqU6O3tbXQYVqITLr+1cIByl46dmHxgV8Ee1qoGp3a6BbHjJN0XEVOKtjV6kNqsbp7iaIPKnNppW3ipDWsanuJoNrLcgrCm4SmOZiPLLQhrGiPxiEUz28IJokUVTQcFmn6KqKc4mo0cJ4gWVDQddMaND4Jg40BsLvMUUTOrxQmiBRWteLlx06unMzfrKphlrl5pZlt4kLoFVZsOWqQZp4h6iqPZyHALogVVmw5axFNEzawaJ4gmVzQYPWPqpK3GIADG7KStxiDAU0TNrDZ3MTWxao9fBF614uWsD7yRWWe8ke6uToRXwTSzobkF0cRqPX5x8oFd7DomW6Mo34XkhGBm9XILookNtTaRB3PNbEe4BdHEvDaRmZXJLYgm5rWJzKxMbkE0Ma9NZGZlcoJocl6byMzK4i4mMzMr5ARhZmaFnCDMzKyQE4SZmRXyIPUIKFovyTONzGy0c4IoWdHDe/ygHjNrBk4QJau1XtJwPajHD9AxszI4QQyDWl1IQ62XNBy85pKZlcEJYgcN1YXk9ZLMrFk5QeygobqQdhuzEzsJ8o+E9npJZtYMnCB20FBdSGN33xWAp57r93pJZtZUnCB2kLuQzKxVlXqjnKRTJC2XtELSRVXqnClpmaSlkq7NlR8o6aeSHk7bDy4z1u3lJbfNrFWV1oKQ1AHMBk4GVgELJc2NiGW5OhOAmcAJEbFO0j65Q1wDXBYRt0jaHRi+aT/DyEtum1mrKrOL6ThgRUSsBJB0HTANWJarcx4wOyLWAUTEmlT3cGDniLgllb9QYpw7zEtum1krGrKLSdLrtvPY3cBTuc+rUlneRGCipDsl3SPplFz5eklzJC2SNCu1SMzMbITUMwZxj6QbJZ0qScN8/p2BCcCJwNnAVZK6UvnbgU8CbwIOBT5cubOk8yX1Supdu3btMIdmZtbe6kkQE4ErgT8DfinpnyRNrGO/PuCA3Of9U1neKmBuRGyMiMeAR8gSxirggYhYGRGvAD3AMZUniIgrI2JKREwZN25cHSGZmVm9hkwQkbklIs4mGzP4ELBA0u2SanW4LwQmSDpE0i7AWcDcijo9ZK0HJI0lS0Yr075dkgZ/65/E1mMXZmZWsiEHqdMYxP8ka0H8GvgE2S/6o4EbgUOK9ouIVyRdAMwHOoCrI2KppEuB3oiYm7a9W9IyYACYERHPpvN+EvhZ6ta6D7hqh67UzMy2ST2zmO4GvgVMj4hVufJeSVfU2jEi5gHzKsouzr0P4ML0qtz3FuCoOuIzM7MS1JMgJqVf5K8SEV8c5nhGJT/wx8zaUT2D1D9NM4sAkLSXpPklxjSqDK7W2re+n2DLaq09iyrH283MWks9LYhxEbF+8EPBHc8trd4H/vihPWbWauppQQxIOnDwg6SDgMIup1ZU7wN//NAeM2s19bQgPg38QtLtgMhuYDu/1KhGEa/Wambtqp77IG4mu0nteuA64NiIaJsxCK/Wambtqt7F+gaANcBuwOGSiIg7ygtr9PBqrWbWruq5Ue4vgb8hWyrjAeAtZPdGnFRuaKOHV2s1s3ZUzyD135AtmPdERLwLmAysr72LmZk1u3oSxO8i4ncAknaNiP8G3AFvZtbi6hmDWJVulOsBbpG0Dnii3LDMzKzRhkwQEfG+9PYSSbcBewI3lxqVmZk1XM0EkZ7itjQiXg8QEbePSFRmZtZwNccgImIAWJ6/k9rMzNpDPWMQewFLJS0AXhwsjIjTSovKzMwarp4E8ZnSozAzs1GnnkFqjzuYmbWheu6kfp4tq7fuAowBXowIr21tZtbC6mlBvHbwfXo+9DSy5TbMzKyF1XMn9WaR6QGmlhSPmZmNEvV0MZ2e+7gTMAX4XWkRmZnZqFDPLKb35t6/AjxO1s1kZmYtrJ4xiHNHIhAzMxtdhhyDkPTNtFjf4Oe9JF1dblhmZtZo9QxSHxURm5//EBHryJ4JYWZmLayeBLGTpL0GP0jam/ofVWpmZk2qnl/0XwLulnRj+vwB4LLyQjIzs9GgnkHqayT1suUZ1KdHxLJywxodehb1MWv+clav72dMx04csHdno0MyMxsx9QxSvwV4KiL+LSL+jewJc28uP7TG6lnUx8w5S+hb308AGwY28dgzL9KzqK/RoZmZjYh6xiC+BryQ+/xCKmtps+Yvp3/jwFZlmyIrNzNrB/UkCEXE4GJ9RMQm6hyklnSKpOWSVki6qEqdMyUtk7RU0rW58gFJD6TX3HrON5xWr+/fpnIzs1ZTzy/6lZL+F1taDR8HVg61U3pc6WzgZGAVsFDS3Pz4haQJwEzghIhYJ2mf3CH6I+LoOq9j2I3v6qSvIBmM7/I4hJm1h3paEB8D3gr0kf2ifzNwXh37HQesiIiVEbEBuI5XL9FxHjA73VtBRKypN/CyzZg6ic4xHVuVdY7pYMbUSQ2KyMxsZA2ZICJiTUScFRH7RMS+wF8AJ9Zx7G7gqdznVaksbyIwUdKdku6RdEpu226SelP59KITSDo/1eldu3ZtHSHVb/rkbr5w+pHs0pH9E3V3dfKF049k+uTKSzAza031jiV0kC3xfTZZl9EvgBtr7lT/+SeQJZz9gTskHZnu3D4oIvokHQrcKmlJRDya3zkirgSuBJgyZUowzKZP7ua7C54E4PqPHj/chzczG9VqJghJ7wTOAU4FFgAnAIdGxEt1HLsPOCD3ef9UlrcKuDciNgKPSXqELGEsjIg+gIhYKennZMt7PIqZmY2Iql1MklYBXyBrLRweEe8nGziuJzkALAQmSDpE0i7AWUDlbKQeUneVpLFkXU4r04KAu+bKTwDa4uY8M7PRotYYxE3AeOCDwHsl/R5bnk09pIh4BbgAmA88DNwQEUslXSrptFRtPvCspGXAbcCMiHgWeAPQK+nBVH55u9y9bWY2WlTtYoqIv5X0d2R/4Z8N/DOwp6QzgXkR8UK1fXPHmAfMqyi7OPc+gAvTK1/nLuDI+i/DzMyGW81ZTOkZ1LdFxPnAIWSJYhrZU+XMzKyF1b1sdxpI/hHwI0m+W8zMrMXVc6Pcq0SE15swM2tx25UgzMys9TlBmJlZoSHHICRNBGYAB+XrR8RJVXcyM7OmV88g9Y3AFcBVwMAQdc3MrEXUkyBeiYiWf0CQmZltrZ4xiB9K+rik/STtPfgqPTIzM2uoeloQH0o/Z+TKAjh0+MMxM7PRYsgEERGHjEQgjdazqI9Z85ezen0/47s6mTF1kp/9YGZtrZ5ZTGOAvwLekYp+DvzfdGd1S+hZ1MfMOUvo35iNwfet72fmnCUNjsrMrLHqGYP4GnAs8O/pdSxbnk/dEmbNX745OQzq3zjAp25azLKnf9ugqMzMGqueMYg3RcQbc59vTctwt4zV64tXDtkwsInJB3Yx7Wh3NZlZ+6knQQxI+oPBx32mR4C21P0Q47s66StIEt1dnX7UqJm1rXq6mGYAt0n6uaTbgVuB/11uWCNrxtRJdI7p2Kqsc0wHM6ZOalBEZmaNV88spp9JmgAM/rZcHhEvlxvWyBqcrfSpmxazYWAT3Z7FZGZWPUFIOikibpV0esWmwyQREXNKjm1ETZ/czXcXPAngbiUzM2q3IN5J1p303oJtAbRUgjAzs63Veib1Z9PbSyPisfw2SW1x85yZWTurZ5D6ewVlNw13IGZmNrrUGoN4PXAEsGfFOMQewG5lB2ZmZo1VawxiEvAnQBdbj0M8D5xXZlBmZtZ4tcYgfgD8QNLxEXH3CMZkZmajQD13Ui+S9Ndk3U2bu5Yi4iOlRWVmZg1XzyD1t4DfB6YCtwP7k3UzmZlZC6snQRwWEZ8BXoyIbwL/A3hzuWGZmVmj1ZMgBp/7sF7SHwJ7AvuUF5KZmY0G9YxBXClpL+AzwFxgd+DiUqMyM7OGq2exvq+nt7fj51CbmbWNWjfKXVhrx4j48lAHl3QK8BWgA/h6RFxeUOdM4BKy9Z0ejIhzctv2AJYBPRFxwVDnMzOz4VOrBfHa9HMS8Cay7iXIbppbMNSBJXUAs4GTgVXAQklzI2JZrs4EYCZwQkSsk1Q5tvF54I56LsTMzIZXrRvlPgcg6Q7gmIh4Pn2+BPhxHcc+DlgRESvTftcB08haBIPOA2ZHxLp0zjWDGyQdC+wL3AxMqf+Stl3Poj5mzV9O3/p+dunYiZ5FfX4WhJm1vXpmMe0LbMh93pDKhtINPJX7vCqV5U0EJkq6U9I9qUsKSTsBXwI+WesEks6X1Cupd+3atXWE9Go9i/qYOWfJ5keObhjYxMw5S+hZ1LddxzMzaxX1JIhrgAWSLkmth3uBbwzT+XcGJgAnAmcDV0nqAj4OzIuIVbV2jogrI2JKREwZN27cdgXwuR8upX/j1o/Y7t84wKz5y7freGZmraKeWUyXSfoJ8PZUdG5ELKrj2H3AAbnP+6eyvFXAvRGxEXhM0iNkCeN44O2SPk42rXYXSS9ExEV1nLduPYv6WPfSxsJtq1OLwsysXdWaxbRHRPxW0t7A4+k1uG3viHhuiGMvBCakhwv1AWcB51TU6SFrOfyHpLFkXU4rI+JPc+f6MDBluJMDULOVML6rc7hPZ2bWVGq1IK4lW+77PrIpqIOUPte8JyIiXpF0ATCfbJrr1RGxVNKlQG9EzE3b3i1pGTAAzIiIZ7f7arZRrVbCjKmTRioMM7NRSRExdK0mMGXKlOjt7d2mfU64/NbNg9N5XZ1jeOCz7x6u0MzMRi1J90VE4UzRWl1Mx9Q6aETcv6OBNdqMqZOYOWfJVoPUnWM6uOS0IxoYlZnZ6FCri+lLNbYFcNIwxzLiBu91+NRNi9kwsInurk5mTJ3keyDMzKh9o9y7RjKQRpk+uZvvLngSgOs/enyDozEzGz3qWc2VtMz34Wz9RLlrygrKzMwab8gEIemzZDeyHQ7MA94D/ILsBjozM2tR9dxJfQbwR8CvIuJc4I1kDw0yM7MWVk+C6I+ITcArafntNWx9h7SZmbWgesYgetP6SFeR3TT3AnB3qVGZmVnD1boPYjZwbUR8PBVdIelmYI+IWDwi0ZmZWcPU6mJ6BPgXSY9L+mdJkyPi8VZLDj2L+lj05Hrufew5Trj8Vi/zbWaWVE0QEfGViDgeeCfwLHC1pP+W9FlJE0cswhINPgtiw8AmAPrW9/tZEGZmyZCD1BHxRER8MSImk628Oh14uPTIRsCs+cv9LAgzsyqGTBCSdpb0XknfAX4CLAdOLz2yEVBtNVc/C8LMrPYg9clkLYZTgQXAdcD5EfHiCMVWuvFdnYWrufpZEGZmtVsQM4G7gDdExGkRcW0rJQfIVnPtHNOxVVnnmA4/C8LMjNqL9TX9aq1DmT65m94nnuPb92SL9XVIvP/Ybq/mamZGfXdSt6yeRX18774tM5YGIvjefX2exWRmRpsnCM9iMjOrrq0ThGcxmZlV19YJotpsJc9iMjNr8wThWUxmZtXV9US5VuVnUpuZVdfWCQL8TGozs2rauovJzMyqc4IwM7NCThBmZlbICcLMzAo5QZiZWSEnCDMzK+QEYWZmhUpNEJJOkbRc0gpJF1Wpc6akZZKWSro2lR0k6X5JD6Tyj5UZp5mZvVppN8pJ6gBmAycDq4CFkuZGxLJcnQlkDyY6ISLWSdonbXoaOD4iXpa0O/BQ2nd1WfGamdnWymxBHAesiIiVEbGB7JGl0yrqnAfMjoh1ABGxJv3cEBEvpzq7lhynmZkVKPMXbzfwVO7zqlSWNxGYKOlOSfdIOmVwg6QDJC1Ox/hiUetB0vmSeiX1rl27toRLMDNrX43+y3xnYAJwInA2cJWkLoCIeCoijgIOAz4kad/KnSPiyoiYEhFTxo0bN4Jhm5m1vjITRB9wQO7z/qksbxUwNyI2RsRjwCNkCWOz1HJ4CHh7ibGamVmFMhPEQmCCpEMk7QKcBcytqNND1npA0liyLqeVkvaX1JnK9wLeBvg5oGZmI6i0BBERrwAXAPOBh4EbImKppEslnZaqzQeelbQMuA2YERHPAm8A7pX0IHA78C8RsaSsWM3M7NVKfR5ERMwD5lWUXZx7H8CF6ZWvcwtwVJmxmZlZbY0epDYzs1HKCcLMzAo5QZiZWSEnCDMzK+QEYWZmhZwgzMyskBOEmZkVcoIwM7NCThBmZlbICcLMzAo5QZiZWSEnCDMzK+QEYWZmhZwgzMyskBOEmZkVcoIwM7NCThBmZlbICcLMzAo5QZiZWSEnCDMzK+QEYWZmhdo+QfQs6mPRk+u597HnOOHyW+lZ1NfokMzMRoW2ThA9i/qYOWcJGwY2AdC3vp+Zc5Y4SZiZ0eYJYtb85fRvHNiqrH/jALPmL29QRGZmo0dbJ4jV6/u3qdzMrJ20dYIY39W5TeVmZu2krRPEjKmT6BzTsVVZ55gOZkyd1KCIzMxGj50bHUAjTZ/cDWRjEavX9zO+q5MZUydtLjcza2dtnSAgSxJOCGZmr9bWXUxmZlZdqQlC0imSlktaIemiKnXOlLRM0lJJ16ayoyXdncoWS/pgmXGamdmrldbFJKkDmA2cDKwCFkqaGxHLcnUmADOBEyJinaR90qaXgD+PiF9KGg/cJ2l+RKwvK14zM9tamS2I44AVEbEyIjYA1wHTKuqcB8yOiHUAEbEm/XwkIn6Z3q8G1gDjSozVzMwqlJkguoGncp9XpbK8icBESXdKukfSKZUHkXQcsAvwaMG28yX1Supdu3btMIZuZmaNnsW0MzABOBHYH7hD0pGDXUmS9gO+BXwoIjZV7hwRVwJXprprJT2xnXGMBZ7Zzn1HG1/L6ORrGZ1a6Vpg+67noGobykwQfcABuc/7p7K8VcC9EbEReEzSI2QJY6GkPYAfA5+OiHuGOllEbHcXlKTeiJiyvfuPJr6W0cnXMjq10rXA8F9PmV1MC4EJkg6RtAtwFjC3ok4PWesBSWPJupxWpvrfB66JiJtKjNHMzKooLUFExCvABcB84GHghohYKulSSaelavOBZyUtA24DZkTEs8CZwDuAD0t6IL2OLitWMzN7tVLHICJiHjCvouzi3PsALkyvfJ1vA98uM7YKV47gucrmaxmdfC2jUytdCwzz9Sj7HW1mZrY1L7VhZmaFnCDMzKxQWyeIetaKGs0kPS5pSRrE701le0u6RdIv08+9Gh1nNZKulrRG0kO5ssL4lflq+q4WSzqmcZG/WpVruURSX26ixam5bTPTtSyXNLUxUReTdICk23JrpP1NKm+676bGtTTddyNpN0kLJD2YruVzqfwQSfemmK9Ps0CRtGv6vCJtP3ibTxoRbfkCOsjuzj6U7E7tB4HDGx3XNl7D48DYirJ/Bi5K7y8CvtjoOGvE/w7gGOChoeIHTgV+Agh4C9n9Mw2/hiGu5RLgkwV1D0//ve0KHJL+O+xo9DXk4tsPOCa9fy3wSIq56b6bGtfSdN9N+vfdPb0fA9yb/r1vAM5K5VcAf5Xefxy4Ir0/C7h+W8/Zzi2IetaKakbTgG+m998Epjcwlpoi4g7guYriavFPI7svJiK7cbIr3Wk/KlS5lmqmAddFxMsR8Riwguy/x1EhIp6OiPvT++fJpql304TfTY1rqWbUfjfp3/eF9HFMegVwEjB4v1jl9zL4fd0E/JEkbcs52zlB1LNW1GgXwE8l3Sfp/FS2b0Q8nd7/Cti3MaFtt2rxN+v3dUHqdrk6193XNNeSuiUmk/212tTfTcW1QBN+N5I6JD1AtoDpLWQtnPWR3XcGW8e7+VrS9t8Ar9uW87VzgmgFb4uIY4D3AH8t6R35jZG1LZt2HnOzxw98DfgD4GjgaeBLjQ1n20jaHfge8LcR8dv8tmb7bgqupSm/m4gYiIijyZYuOg54fZnna+cEUc9aUaNaRPSln2vIliY5Dvj1YPM+/VzTuAi3S7X4m+77iohfp/+hNwFXsaWrYtRfi6QxZL9QvxMRc1JxU343RdfSzN8NQGQLmt4GHE/WpTd403M+3s3XkrbvCTy7Ledp5wRRz1pRo5ak35P02sH3wLuBh8iu4UOp2oeAHzQmwu1WLf65wJ+nGTNvAX6T6+4YlSr64d9H9v1Adi1npVkmh5AtULlgpOOrJvVT/z/g4Yj4cm5T03031a6lGb8bSeMkdaX3nWQPY3uYLFGckapVfi+D39cZwK2p5Ve/Ro/MN/JFNvviEbJ+vE83Op5tjP1QstkWDwJLB+Mn62P8GfBL4D+BvRsda41r+C5Z834jWd/pX1SLn2wGx+z0XS0BpjQ6/jqu5Vsp1sXpf9b9cvU/na5lOfCeRsdfcS1vI+s+Wgw8kF6nNuN3U+Namu67AY4CFqWYHwIuTuWHkiWxFcCNwK6pfLf0eUXafui2ntNLbZiZWaF27mIyM7ManCDMzKyQE4SZmRVygjAzs0JOEGZmVsgJwpqCpH+V9Le5z/MlfT33+UuSLizeGyR9Q9IZ6f3PJb3qwe6Sxki6PK1Wer+kuyW9J217XNlz07c17s3nrbJ9dlpNdJmk/tzqomdImjc47304SdpP0o9qbN9F0h25mwoiZ7UAAAOUSURBVK+sTTlBWLO4E3grgKSdgLHAEbntbwXu2sFzfJ5s9c8/jGwJk+lkK4CWJiL+OrKlE04FHo2Io9Prpog4NbI7ZofbhWR3D1eLaQPZ/Q4fLOHc1kScIKxZ3EW2rABkieEh4HlJe0naFXgDcL+kiyUtlPSQpCvrXb1S0muA84BPRMTLsHk5hhsK6l6Yjv9QRavmz9Pibw9K+lbBfp9PLYqOOmN6XNJYSQdL+u+07yOSviPpjyXdmVo7x6X6v5cWnlsgaZGkaqsTvx+4Oe1zRKr/QIp9QqrTA/xpPXFa63IT0ppCRKyW9IqkA8laC3eTrVZ5PNkqlUsiYoOkf4uISwHSL+k/AX5YxykOA56MikXpKkk6FjgXeDPZHcT3Srod2AD8I/DWiHhG0t4V+80ia42cG9t3d+phwAeAj5AtE3MO2V3CpwH/QNba+TTZcgofSV1TCyT9Z0S8mIvjEGDdYBIEPgZ8JSK+k5acGUxeDwFv2o44rYW4BWHN5C6y5DCYIO7Ofb4z1XmXsqdnLSFbJ/+IogPtgLcB34+IFyNbm38O8PZ0rhsj4hmAiMg/G+IzwJ4R8bHtTA4Aj0XEksgWl1sK/CwdawlwcKrzbuAiZctB/5xsqYUDK46zH7A29/lu4B8k/T1wUET0p/gHgA2D631Ze3KCsGYyOA5xJNlfuPeQtSDeCtwlaTfg34EzIuJIsn723eo89grgQEl7DHvU2V/8x1a2KrbRy7n3m3KfN7GlJ0DA+3PjGAdGxMMVx+kn928SEdeStUL6gXmSTsrV3RX43Q7EbE3OCcKayV1kXUbPRbZU83NAF1mSuIstv/ieUbb+f9XZQ5Ui4iWyVT+/oi3P9B0n6QMVVf8LmC7pNWkV3felsluBD0h6Xdo3nwxuBi4HflzyX+TzgU8MjrtImlxQ5xG2tDiQdCiwMiK+SrYK6FGp/HXAMxGxscR4bZRzgrBmsoRs9tI9FWW/iYhn0oyfq8haF/PJ/nLfFv9I1v2yTNJDwI+Aygfl3A98g2x1zHuBr0fEoohYClwG3C7pQeDLFfvdmGKbm5ZqLsPnyR5DuVjS0vR5K2k84lFJh6WiM4GHUrfUHwLXpPJ3AT8uKU5rEl7N1azNSHofcGxE/GONOnOAiyLikZGLzEYbz2IyazMR8f3BrrAiqYutx8nB3IIwM7NCHoMwM7NCThBmZlbICcLMzAo5QZiZWSEnCDMzK/T/AeiFdYCibO9ZAAAAAElFTkSuQmCC" }, "metadata": { "needs_background": "light" @@ -784,70 +860,184 @@ "output_type": "stream", "name": "stderr", "text": [ - "[flaml.automl: 08-13 18:00:59] {1121} INFO - Evaluation method: holdout\n", - "[flaml.automl: 08-13 18:01:00] {618} INFO - Using StratifiedKFold\n", - "[flaml.automl: 08-13 18:01:00] {1142} INFO - Minimizing error metric: 1-accuracy\n", - "[flaml.automl: 08-13 18:01:00] {1162} INFO - List of ML learners in AutoML Run: ['RGF', 'lgbm', 'rf', 'xgboost']\n", - "[flaml.automl: 08-13 18:01:00] {1252} INFO - iteration 0, current learner RGF\n", - "/home/dmx/miniconda2/envs/test/lib/python3.8/site-packages/rgf/utils.py:224: UserWarning: Cannot find FastRGF executable files. FastRGF estimators will be unavailable for usage.\n", + "[flaml.automl: 08-22 21:04:15] {1130} INFO - Evaluation method: holdout\n", + "[flaml.automl: 08-22 21:04:15] {624} INFO - Using StratifiedKFold\n", + "[flaml.automl: 08-22 21:04:15] {1155} INFO - Minimizing error metric: 1-accuracy\n", + "[flaml.automl: 08-22 21:04:15] {1175} INFO - List of ML learners in AutoML Run: ['RGF', 'lgbm', 'rf', 'xgboost']\n", + "[flaml.automl: 08-22 21:04:15] {1358} INFO - iteration 0, current learner RGF\n", + "/home/dmx/miniconda2/envs/blend/lib/python3.8/site-packages/rgf/utils.py:225: UserWarning: Cannot find FastRGF executable files. FastRGF estimators will be unavailable for usage.\n", " warnings.warn(\"Cannot find FastRGF executable files. \"\n", - "[flaml.automl: 08-13 18:01:06] {1405} INFO - at 8.1s,\tbest RGF's error=0.3787,\tbest RGF's error=0.3787\n", - "[flaml.automl: 08-13 18:01:06] {1252} INFO - iteration 1, current learner RGF\n", - "[flaml.automl: 08-13 18:01:10] {1405} INFO - at 11.4s,\tbest RGF's error=0.3787,\tbest RGF's error=0.3787\n", - "[flaml.automl: 08-13 18:01:10] {1252} INFO - iteration 2, current learner RGF\n", - "[flaml.automl: 08-13 18:01:14] {1405} INFO - at 15.8s,\tbest RGF's error=0.3787,\tbest RGF's error=0.3787\n", - "[flaml.automl: 08-13 18:01:14] {1252} INFO - iteration 3, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:15] {1405} INFO - at 16.2s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n", - "[flaml.automl: 08-13 18:01:15] {1252} INFO - iteration 4, current learner RGF\n", - "[flaml.automl: 08-13 18:01:18] {1405} INFO - at 19.8s,\tbest RGF's error=0.3787,\tbest lgbm's error=0.3777\n", - "[flaml.automl: 08-13 18:01:18] {1252} INFO - iteration 5, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:18] {1405} INFO - at 20.1s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n", - "[flaml.automl: 08-13 18:01:18] {1252} INFO - iteration 6, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:19] {1405} INFO - at 21.2s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n", - "[flaml.automl: 08-13 18:01:19] {1252} INFO - iteration 7, current learner RGF\n", - "[flaml.automl: 08-13 18:01:23] {1405} INFO - at 24.6s,\tbest RGF's error=0.3787,\tbest lgbm's error=0.3777\n", - "[flaml.automl: 08-13 18:01:23] {1252} INFO - iteration 8, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:24] {1405} INFO - at 25.2s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n", - "[flaml.automl: 08-13 18:01:24] {1252} INFO - iteration 9, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:24] {1405} INFO - at 25.4s,\tbest lgbm's error=0.3765,\tbest lgbm's error=0.3765\n", - "[flaml.automl: 08-13 18:01:24] {1252} INFO - iteration 10, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:24] {1405} INFO - at 25.6s,\tbest lgbm's error=0.3765,\tbest lgbm's error=0.3765\n", - "[flaml.automl: 08-13 18:01:24] {1252} INFO - iteration 11, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:24] {1405} INFO - at 25.9s,\tbest lgbm's error=0.3752,\tbest lgbm's error=0.3752\n", - "[flaml.automl: 08-13 18:01:24] {1252} INFO - iteration 12, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:24] {1405} INFO - at 26.1s,\tbest lgbm's error=0.3573,\tbest lgbm's error=0.3573\n", - "[flaml.automl: 08-13 18:01:24] {1252} INFO - iteration 13, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:25] {1405} INFO - at 27.0s,\tbest lgbm's error=0.3573,\tbest lgbm's error=0.3573\n", - "[flaml.automl: 08-13 18:01:25] {1252} INFO - iteration 14, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:26] {1405} INFO - at 27.3s,\tbest lgbm's error=0.3525,\tbest lgbm's error=0.3525\n", - "[flaml.automl: 08-13 18:01:26] {1252} INFO - iteration 15, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:27] {1405} INFO - at 28.5s,\tbest lgbm's error=0.3525,\tbest lgbm's error=0.3525\n", - "[flaml.automl: 08-13 18:01:27] {1252} INFO - iteration 16, current learner RGF\n", - "[flaml.automl: 08-13 18:01:33] {1405} INFO - at 34.6s,\tbest RGF's error=0.3684,\tbest lgbm's error=0.3525\n", - "[flaml.automl: 08-13 18:01:33] {1252} INFO - iteration 17, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:33] {1405} INFO - at 35.0s,\tbest lgbm's error=0.3525,\tbest lgbm's error=0.3525\n", - "[flaml.automl: 08-13 18:01:33] {1252} INFO - iteration 18, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:39] {1405} INFO - at 41.2s,\tbest lgbm's error=0.3501,\tbest lgbm's error=0.3501\n", - "[flaml.automl: 08-13 18:01:39] {1252} INFO - iteration 19, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:43] {1405} INFO - at 45.2s,\tbest lgbm's error=0.3501,\tbest lgbm's error=0.3501\n", - "[flaml.automl: 08-13 18:01:43] {1252} INFO - iteration 20, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:47] {1405} INFO - at 48.9s,\tbest lgbm's error=0.3501,\tbest lgbm's error=0.3501\n", - "[flaml.automl: 08-13 18:01:50] {1437} INFO - retrain lgbm for 2.3s\n", - "[flaml.automl: 08-13 18:01:50] {1252} INFO - iteration 21, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:51] {1405} INFO - at 53.2s,\tbest lgbm's error=0.3501,\tbest lgbm's error=0.3501\n", - "[flaml.automl: 08-13 18:01:51] {1252} INFO - iteration 22, current learner lgbm\n", - "[flaml.automl: 08-13 18:01:55] {1405} INFO - at 56.8s,\tbest lgbm's error=0.3501,\tbest lgbm's error=0.3501\n", - "[flaml.automl: 08-13 18:01:58] {1437} INFO - retrain lgbm for 2.4s\n", - "[flaml.automl: 08-13 18:01:58] {1252} INFO - iteration 23, current learner xgboost\n", - "[flaml.automl: 08-13 18:01:58] {1405} INFO - at 59.4s,\tbest xgboost's error=0.3787,\tbest lgbm's error=0.3501\n", - "[flaml.automl: 08-13 18:01:58] {1437} INFO - retrain xgboost for 0.7s\n", - "[flaml.automl: 08-13 18:01:58] {1461} INFO - selected model: LGBMClassifier(colsample_bytree=0.7967145599266738,\n", - " learning_rate=0.589471433950518, max_bin=256,\n", - " min_child_samples=44, n_estimators=22, num_leaves=12,\n", - " objective='binary', reg_alpha=0.040774029561503077,\n", - " reg_lambda=9.878828628614547, verbose=-1)\n", - "[flaml.automl: 08-13 18:01:58] {1184} INFO - fit succeeded\n", - "[flaml.automl: 08-13 18:01:58] {1185} INFO - Time taken to find the best model: 41.16750192642212\n" + "[flaml.automl: 08-22 21:04:18] {1515} INFO - at 3.7s,\tbest RGF's error=0.3840,\tbest RGF's error=0.3840\n", + "[flaml.automl: 08-22 21:04:18] {1358} INFO - iteration 1, current learner RGF\n", + "[flaml.automl: 08-22 21:04:19] {1515} INFO - at 5.2s,\tbest RGF's error=0.3840,\tbest RGF's error=0.3840\n", + "[flaml.automl: 08-22 21:04:19] {1358} INFO - iteration 2, current learner RGF\n", + "[flaml.automl: 08-22 21:04:21] {1515} INFO - at 6.7s,\tbest RGF's error=0.3840,\tbest RGF's error=0.3840\n", + "[flaml.automl: 08-22 21:04:21] {1358} INFO - iteration 3, current learner RGF\n", + "[flaml.automl: 08-22 21:04:22] {1515} INFO - at 8.2s,\tbest RGF's error=0.3840,\tbest RGF's error=0.3840\n", + "[flaml.automl: 08-22 21:04:22] {1358} INFO - iteration 4, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:23] {1515} INFO - at 8.3s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n", + "[flaml.automl: 08-22 21:04:23] {1358} INFO - iteration 5, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:23] {1515} INFO - at 8.3s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n", + "[flaml.automl: 08-22 21:04:23] {1358} INFO - iteration 6, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:23] {1515} INFO - at 8.4s,\tbest lgbm's error=0.3777,\tbest lgbm's error=0.3777\n", + "[flaml.automl: 08-22 21:04:23] {1358} INFO - iteration 7, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:23] {1515} INFO - at 8.6s,\tbest lgbm's error=0.3661,\tbest lgbm's error=0.3661\n", + "[flaml.automl: 08-22 21:04:23] {1358} INFO - iteration 8, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:23] {1515} INFO - at 8.7s,\tbest lgbm's error=0.3645,\tbest lgbm's error=0.3645\n", + "[flaml.automl: 08-22 21:04:23] {1358} INFO - iteration 9, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:23] {1515} INFO - at 8.8s,\tbest lgbm's error=0.3645,\tbest lgbm's error=0.3645\n", + "[flaml.automl: 08-22 21:04:23] {1358} INFO - iteration 10, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:23] {1515} INFO - at 8.9s,\tbest lgbm's error=0.3645,\tbest lgbm's error=0.3645\n", + "[flaml.automl: 08-22 21:04:23] {1358} INFO - iteration 11, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:23] {1515} INFO - at 9.1s,\tbest lgbm's error=0.3610,\tbest lgbm's error=0.3610\n", + "[flaml.automl: 08-22 21:04:23] {1358} INFO - iteration 12, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:23] {1515} INFO - at 9.2s,\tbest lgbm's error=0.3610,\tbest lgbm's error=0.3610\n", + "[flaml.automl: 08-22 21:04:23] {1358} INFO - iteration 13, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:24] {1515} INFO - at 9.4s,\tbest lgbm's error=0.3610,\tbest lgbm's error=0.3610\n", + "[flaml.automl: 08-22 21:04:24] {1358} INFO - iteration 14, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:24] {1515} INFO - at 9.5s,\tbest lgbm's error=0.3610,\tbest lgbm's error=0.3610\n", + "[flaml.automl: 08-22 21:04:24] {1358} INFO - iteration 15, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:24] {1515} INFO - at 9.7s,\tbest lgbm's error=0.3610,\tbest lgbm's error=0.3610\n", + "[flaml.automl: 08-22 21:04:24] {1358} INFO - iteration 16, current learner RGF\n", + "[flaml.automl: 08-22 21:04:25] {1515} INFO - at 11.1s,\tbest RGF's error=0.3840,\tbest lgbm's error=0.3610\n", + "[flaml.automl: 08-22 21:04:25] {1358} INFO - iteration 17, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:26] {1515} INFO - at 11.2s,\tbest lgbm's error=0.3610,\tbest lgbm's error=0.3610\n", + "[flaml.automl: 08-22 21:04:26] {1358} INFO - iteration 18, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:26] {1515} INFO - at 11.4s,\tbest lgbm's error=0.3610,\tbest lgbm's error=0.3610\n", + "[flaml.automl: 08-22 21:04:26] {1358} INFO - iteration 19, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:26] {1515} INFO - at 11.8s,\tbest lgbm's error=0.3610,\tbest lgbm's error=0.3610\n", + "[flaml.automl: 08-22 21:04:26] {1358} INFO - iteration 20, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:26] {1515} INFO - at 12.2s,\tbest lgbm's error=0.3592,\tbest lgbm's error=0.3592\n", + "[flaml.automl: 08-22 21:04:26] {1358} INFO - iteration 21, current learner RGF\n", + "[flaml.automl: 08-22 21:04:28] {1515} INFO - at 13.8s,\tbest RGF's error=0.3762,\tbest lgbm's error=0.3592\n", + "[flaml.automl: 08-22 21:04:28] {1358} INFO - iteration 22, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:28] {1515} INFO - at 14.1s,\tbest lgbm's error=0.3592,\tbest lgbm's error=0.3592\n", + "[flaml.automl: 08-22 21:04:28] {1358} INFO - iteration 23, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:29] {1515} INFO - at 14.5s,\tbest lgbm's error=0.3592,\tbest lgbm's error=0.3592\n", + "[flaml.automl: 08-22 21:04:29] {1358} INFO - iteration 24, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:29] {1515} INFO - at 14.8s,\tbest lgbm's error=0.3555,\tbest lgbm's error=0.3555\n", + "[flaml.automl: 08-22 21:04:29] {1358} INFO - iteration 25, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:29] {1515} INFO - at 15.1s,\tbest lgbm's error=0.3555,\tbest lgbm's error=0.3555\n", + "[flaml.automl: 08-22 21:04:29] {1358} INFO - iteration 26, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:30] {1515} INFO - at 16.1s,\tbest lgbm's error=0.3523,\tbest lgbm's error=0.3523\n", + "[flaml.automl: 08-22 21:04:30] {1358} INFO - iteration 27, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:31] {1515} INFO - at 16.4s,\tbest lgbm's error=0.3523,\tbest lgbm's error=0.3523\n", + "[flaml.automl: 08-22 21:04:31] {1358} INFO - iteration 28, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:31] {1515} INFO - at 17.1s,\tbest lgbm's error=0.3519,\tbest lgbm's error=0.3519\n", + "[flaml.automl: 08-22 21:04:31] {1358} INFO - iteration 29, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:32] {1515} INFO - at 17.4s,\tbest lgbm's error=0.3519,\tbest lgbm's error=0.3519\n", + "[flaml.automl: 08-22 21:04:32] {1358} INFO - iteration 30, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:32] {1515} INFO - at 17.9s,\tbest lgbm's error=0.3519,\tbest lgbm's error=0.3519\n", + "[flaml.automl: 08-22 21:04:32] {1358} INFO - iteration 31, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:33] {1515} INFO - at 18.5s,\tbest lgbm's error=0.3504,\tbest lgbm's error=0.3504\n", + "[flaml.automl: 08-22 21:04:33] {1358} INFO - iteration 32, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:33] {1515} INFO - at 18.9s,\tbest lgbm's error=0.3504,\tbest lgbm's error=0.3504\n", + "[flaml.automl: 08-22 21:04:33] {1358} INFO - iteration 33, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:35] {1515} INFO - at 20.5s,\tbest lgbm's error=0.3504,\tbest lgbm's error=0.3504\n", + "[flaml.automl: 08-22 21:04:35] {1358} INFO - iteration 34, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:35] {1515} INFO - at 20.9s,\tbest lgbm's error=0.3504,\tbest lgbm's error=0.3504\n", + "[flaml.automl: 08-22 21:04:35] {1358} INFO - iteration 35, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:36] {1515} INFO - at 21.3s,\tbest lgbm's error=0.3504,\tbest lgbm's error=0.3504\n", + "[flaml.automl: 08-22 21:04:36] {1358} INFO - iteration 36, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:40] {1515} INFO - at 25.7s,\tbest lgbm's error=0.3392,\tbest lgbm's error=0.3392\n", + "[flaml.automl: 08-22 21:04:40] {1358} INFO - iteration 37, current learner xgboost\n", + "[flaml.automl: 08-22 21:04:40] {1515} INFO - at 25.7s,\tbest xgboost's error=0.3787,\tbest lgbm's error=0.3392\n", + "[flaml.automl: 08-22 21:04:40] {1358} INFO - iteration 38, current learner xgboost\n", + "[flaml.automl: 08-22 21:04:40] {1515} INFO - at 25.8s,\tbest xgboost's error=0.3769,\tbest lgbm's error=0.3392\n", + "[flaml.automl: 08-22 21:04:40] {1358} INFO - iteration 39, current learner xgboost\n", + "[flaml.automl: 08-22 21:04:40] {1515} INFO - at 25.9s,\tbest xgboost's error=0.3765,\tbest lgbm's error=0.3392\n", + "[flaml.automl: 08-22 21:04:40] {1358} INFO - iteration 40, current learner rf\n", + "[flaml.automl: 08-22 21:04:40] {1515} INFO - at 26.1s,\tbest rf's error=0.3765,\tbest lgbm's error=0.3392\n", + "[flaml.automl: 08-22 21:04:40] {1358} INFO - iteration 41, current learner rf\n", + "[flaml.automl: 08-22 21:04:41] {1515} INFO - at 26.3s,\tbest rf's error=0.3689,\tbest lgbm's error=0.3392\n", + "[flaml.automl: 08-22 21:04:41] {1358} INFO - iteration 42, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:43] {1515} INFO - at 28.8s,\tbest lgbm's error=0.3392,\tbest lgbm's error=0.3392\n", + "[flaml.automl: 08-22 21:04:43] {1358} INFO - iteration 43, current learner rf\n", + "[flaml.automl: 08-22 21:04:43] {1515} INFO - at 28.9s,\tbest rf's error=0.3689,\tbest lgbm's error=0.3392\n", + "[flaml.automl: 08-22 21:04:43] {1358} INFO - iteration 44, current learner xgboost\n", + "[flaml.automl: 08-22 21:04:43] {1515} INFO - at 29.0s,\tbest xgboost's error=0.3746,\tbest lgbm's error=0.3392\n", + "[flaml.automl: 08-22 21:04:43] {1358} INFO - iteration 45, current learner xgboost\n", + "[flaml.automl: 08-22 21:04:43] {1515} INFO - at 29.1s,\tbest xgboost's error=0.3673,\tbest lgbm's error=0.3392\n", + "[flaml.automl: 08-22 21:04:43] {1358} INFO - iteration 46, current learner xgboost\n", + "[flaml.automl: 08-22 21:04:43] {1515} INFO - at 29.2s,\tbest xgboost's error=0.3673,\tbest lgbm's error=0.3392\n", + "[flaml.automl: 08-22 21:04:43] {1358} INFO - iteration 47, current learner xgboost\n", + "[flaml.automl: 08-22 21:04:44] {1515} INFO - at 29.3s,\tbest xgboost's error=0.3617,\tbest lgbm's error=0.3392\n", + "[flaml.automl: 08-22 21:04:44] {1358} INFO - iteration 48, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:51] {1515} INFO - at 36.4s,\tbest lgbm's error=0.3391,\tbest lgbm's error=0.3391\n", + "[flaml.automl: 08-22 21:04:51] {1358} INFO - iteration 49, current learner rf\n", + "[flaml.automl: 08-22 21:04:51] {1515} INFO - at 36.6s,\tbest rf's error=0.3689,\tbest lgbm's error=0.3391\n", + "[flaml.automl: 08-22 21:04:51] {1358} INFO - iteration 50, current learner xgboost\n", + "[flaml.automl: 08-22 21:04:51] {1515} INFO - at 36.7s,\tbest xgboost's error=0.3610,\tbest lgbm's error=0.3391\n", + "[flaml.automl: 08-22 21:04:51] {1358} INFO - iteration 51, current learner rf\n", + "[flaml.automl: 08-22 21:04:51] {1515} INFO - at 36.9s,\tbest rf's error=0.3689,\tbest lgbm's error=0.3391\n", + "[flaml.automl: 08-22 21:04:51] {1358} INFO - iteration 52, current learner rf\n", + "[flaml.automl: 08-22 21:04:51] {1515} INFO - at 37.1s,\tbest rf's error=0.3689,\tbest lgbm's error=0.3391\n", + "[flaml.automl: 08-22 21:04:51] {1358} INFO - iteration 53, current learner lgbm\n", + "[flaml.automl: 08-22 21:04:58] {1515} INFO - at 43.8s,\tbest lgbm's error=0.3391,\tbest lgbm's error=0.3391\n", + "[flaml.automl: 08-22 21:04:58] {1358} INFO - iteration 54, current learner RGF\n", + "[flaml.automl: 08-22 21:05:00] {1515} INFO - at 45.4s,\tbest RGF's error=0.3762,\tbest lgbm's error=0.3391\n", + "[flaml.automl: 08-22 21:05:00] {1358} INFO - iteration 55, current learner xgboost\n", + "[flaml.automl: 08-22 21:05:00] {1515} INFO - at 45.6s,\tbest xgboost's error=0.3610,\tbest lgbm's error=0.3391\n", + "[flaml.automl: 08-22 21:05:00] {1358} INFO - iteration 56, current learner lgbm\n", + "[flaml.automl: 08-22 21:05:08] {1515} INFO - at 53.7s,\tbest lgbm's error=0.3384,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:08] {1358} INFO - iteration 57, current learner rf\n", + "[flaml.automl: 08-22 21:05:08] {1515} INFO - at 53.9s,\tbest rf's error=0.3689,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:08] {1358} INFO - iteration 58, current learner rf\n", + "[flaml.automl: 08-22 21:05:08] {1515} INFO - at 54.1s,\tbest rf's error=0.3684,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:08] {1358} INFO - iteration 59, current learner RGF\n", + "[flaml.automl: 08-22 21:05:10] {1515} INFO - at 55.7s,\tbest RGF's error=0.3762,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:10] {1358} INFO - iteration 60, current learner xgboost\n", + "[flaml.automl: 08-22 21:05:10] {1515} INFO - at 55.8s,\tbest xgboost's error=0.3610,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:10] {1358} INFO - iteration 61, current learner RGF\n", + "[flaml.automl: 08-22 21:05:12] {1515} INFO - at 57.7s,\tbest RGF's error=0.3759,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:12] {1358} INFO - iteration 62, current learner xgboost\n", + "[flaml.automl: 08-22 21:05:12] {1515} INFO - at 57.8s,\tbest xgboost's error=0.3610,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:12] {1358} INFO - iteration 63, current learner xgboost\n", + "[flaml.automl: 08-22 21:05:12] {1515} INFO - at 57.9s,\tbest xgboost's error=0.3610,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:12] {1358} INFO - iteration 64, current learner rf\n", + "[flaml.automl: 08-22 21:05:12] {1515} INFO - at 58.1s,\tbest rf's error=0.3643,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:12] {1358} INFO - iteration 65, current learner rf\n", + "[flaml.automl: 08-22 21:05:13] {1515} INFO - at 58.4s,\tbest rf's error=0.3643,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:13] {1358} INFO - iteration 66, current learner rf\n", + "[flaml.automl: 08-22 21:05:13] {1515} INFO - at 58.5s,\tbest rf's error=0.3643,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:13] {1358} INFO - iteration 67, current learner rf\n", + "[flaml.automl: 08-22 21:05:13] {1515} INFO - at 58.7s,\tbest rf's error=0.3643,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:13] {1358} INFO - iteration 68, current learner rf\n", + "[flaml.automl: 08-22 21:05:13] {1515} INFO - at 58.9s,\tbest rf's error=0.3643,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:13] {1358} INFO - iteration 69, current learner rf\n", + "[flaml.automl: 08-22 21:05:13] {1515} INFO - at 59.0s,\tbest rf's error=0.3628,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:13] {1358} INFO - iteration 70, current learner xgboost\n", + "[flaml.automl: 08-22 21:05:13] {1515} INFO - at 59.1s,\tbest xgboost's error=0.3610,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:13] {1358} INFO - iteration 71, current learner rf\n", + "[flaml.automl: 08-22 21:05:13] {1515} INFO - at 59.2s,\tbest rf's error=0.3628,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:13] {1358} INFO - iteration 72, current learner xgboost\n", + "[flaml.automl: 08-22 21:05:14] {1515} INFO - at 59.3s,\tbest xgboost's error=0.3610,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:14] {1358} INFO - iteration 73, current learner xgboost\n", + "[flaml.automl: 08-22 21:05:14] {1515} INFO - at 59.4s,\tbest xgboost's error=0.3610,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:14] {1358} INFO - iteration 74, current learner xgboost\n", + "[flaml.automl: 08-22 21:05:14] {1515} INFO - at 59.5s,\tbest xgboost's error=0.3610,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:14] {1358} INFO - iteration 75, current learner rf\n", + "[flaml.automl: 08-22 21:05:14] {1515} INFO - at 59.6s,\tbest rf's error=0.3628,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:14] {1358} INFO - iteration 76, current learner rf\n", + "[flaml.automl: 08-22 21:05:14] {1515} INFO - at 59.7s,\tbest rf's error=0.3628,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:14] {1358} INFO - iteration 77, current learner xgboost\n", + "[flaml.automl: 08-22 21:05:14] {1515} INFO - at 59.8s,\tbest xgboost's error=0.3610,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:14] {1358} INFO - iteration 78, current learner rf\n", + "[flaml.automl: 08-22 21:05:14] {1515} INFO - at 59.9s,\tbest rf's error=0.3628,\tbest lgbm's error=0.3384\n", + "[flaml.automl: 08-22 21:05:14] {1592} INFO - selected model: LGBMClassifier(colsample_bytree=0.4612631642916683,\n", + " learning_rate=0.23563288484693434, max_bin=256,\n", + " min_child_samples=24, n_estimators=245, num_leaves=16,\n", + " objective='binary', reg_alpha=0.0030413642053562034,\n", + " reg_lambda=0.003910475784753646, verbose=-1)\n", + "[flaml.automl: 08-22 21:05:19] {1633} INFO - retrain lgbm for 5.1s\n", + "[flaml.automl: 08-22 21:05:19] {1636} INFO - retrained model: LGBMClassifier(colsample_bytree=0.4612631642916683,\n", + " learning_rate=0.23563288484693434, max_bin=256,\n", + " min_child_samples=24, n_estimators=245, num_leaves=16,\n", + " objective='binary', reg_alpha=0.0030413642053562034,\n", + " reg_lambda=0.003910475784753646, verbose=-1)\n", + "[flaml.automl: 08-22 21:05:19] {1199} INFO - fit succeeded\n", + "[flaml.automl: 08-22 21:05:19] {1200} INFO - Time taken to find the best model: 53.72299408912659\n", + "[flaml.automl: 08-22 21:05:19] {1205} WARNING - Time taken to find the best model is 90% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n" ] } ], @@ -878,7 +1068,7 @@ "output_type": "stream", "name": "stdout", "text": [ - "flaml accuracy = 0.6721815997508269\n" + "flaml accuracy = 0.6741542203699035\n" ] } ], @@ -967,14 +1157,6 @@ "xgb.fit(X, y_train)" ], "outputs": [ - { - "output_type": "stream", - "name": "stderr", - "text": [ - "/home/dmx/miniconda2/envs/test/lib/python3.8/site-packages/xgboost/sklearn.py:1146: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n", - " warnings.warn(label_encoder_deprecation_msg, UserWarning)\n" - ] - }, { "output_type": "execute_result", "data": { @@ -1019,11 +1201,11 @@ ], "metadata": { "interpreter": { - "hash": "ea9f131eb1b7663628f6445553ba215a834e2f0b4d18774746f0f47938ce4671" + "hash": "0cfea3304185a9579d09e0953576b57c8581e46e6ebc6dfeb681bc5a511f7544" }, "kernelspec": { "name": "python3", - "display_name": "Python 3.8.0 64-bit ('test': conda)" + "display_name": "Python 3.8.0 64-bit ('blend': conda)" }, "language_info": { "codemirror_mode": { diff --git a/notebook/flaml_azureml.ipynb b/notebook/flaml_azureml.ipynb index 95ebd06694..1acfa7e597 100644 --- a/notebook/flaml_azureml.ipynb +++ b/notebook/flaml_azureml.ipynb @@ -2,11 +2,6 @@ "cells": [ { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, "source": [ "Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved. \n", "\n", @@ -31,113 +26,101 @@ "```bash\n", "pip install flaml[notebook,azureml]\n", "```" - ] + ], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, - "outputs": [], "source": [ "!pip install flaml[notebook,azureml]" - ] + ], + "outputs": [], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "### Enable mlflow in AzureML workspace" - ] + ], + "metadata": {} }, { "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], + "execution_count": null, "source": [ "import mlflow\n", "from azureml.core import Workspace\n", "\n", "ws = Workspace.from_config()\n", "mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())" - ] + ], + "outputs": [], + "metadata": {} }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, "source": [ "## 2. Classification Example\n", "### Load data and preprocess\n", "\n", "Download [Airlines dataset](https://www.openml.org/d/1169) from OpenML. The task is to predict whether a given flight will be delayed, given the information of the scheduled departure." - ] + ], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, + "source": [ + "from flaml.data import load_openml_dataset\n", + "X_train, X_test, y_train, y_test = load_openml_dataset(dataset_id=1169, data_dir='./')" + ], + "outputs": [], "metadata": { "slideshow": { "slide_type": "subslide" }, "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "load dataset from ./openml_ds1169.pkl\n", - "Dataset name: airlines\n", - "X_train.shape: (404537, 7), y_train.shape: (404537,);\n", - "X_test.shape: (134846, 7), y_test.shape: (134846,)\n" - ] - } - ], - "source": [ - "from flaml.data import load_openml_dataset\n", - "X_train, X_test, y_train, y_test = load_openml_dataset(dataset_id=1169, data_dir='./')" - ] + } }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, "source": [ "### Run FLAML\n", "In the FLAML automl run configuration, users can specify the task type, time budget, error metric, learner list, whether to subsample, resampling strategy type, and so on. All these arguments have default values which will be used if users do not provide them. For example, the default ML learners of FLAML are `['lgbm', 'xgboost', 'catboost', 'rf', 'extra_tree', 'lrl1']`. " - ] - }, - { - "cell_type": "code", - "execution_count": 3, + ], "metadata": { "slideshow": { "slide_type": "slide" } - }, - "outputs": [], + } + }, + { + "cell_type": "code", + "execution_count": null, "source": [ "''' import AutoML class from flaml package '''\n", "from flaml import AutoML\n", "automl = AutoML()" - ] - }, - { - "cell_type": "code", - "execution_count": 4, + ], + "outputs": [], "metadata": { "slideshow": { "slide_type": "slide" } - }, - "outputs": [], + } + }, + { + "cell_type": "code", + "execution_count": null, "source": [ "settings = {\n", " \"time_budget\": 60, # total running time in seconds\n", @@ -147,191 +130,139 @@ " \"sample\": False, # whether to subsample training data\n", " \"log_file_name\": 'airlines_experiment.log', # flaml log file\n", "}" - ] - }, - { - "cell_type": "code", - "execution_count": 5, + ], + "outputs": [], "metadata": { "slideshow": { "slide_type": "slide" - }, - "tags": [] - }, - "outputs": [], + } + } + }, + { + "cell_type": "code", + "execution_count": null, "source": [ "mlflow.set_experiment(\"flaml\")\n", "with mlflow.start_run() as run:\n", " '''The main flaml automl API'''\n", " automl.fit(X_train=X_train, y_train=y_train, **settings)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "### Best model and metric" - ] - }, - { - "cell_type": "code", - "execution_count": 6, + ], + "outputs": [], "metadata": { "slideshow": { "slide_type": "slide" }, "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Best ML leaner: lgbm\n", - "Best hyperparmeter config: {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.1, 'subsample': 1.0, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}\n", - "Best accuracy on validation data: 0.6229\n", - "Training duration of best run: 1.288 s\n" - ] - } + } + }, + { + "cell_type": "markdown", + "source": [ + "### Best model and metric" ], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } + }, + { + "cell_type": "code", + "execution_count": null, "source": [ "''' retrieve best config and best learner'''\n", "print('Best ML leaner:', automl.best_estimator)\n", "print('Best hyperparmeter config:', automl.best_config)\n", "print('Best accuracy on validation data: {0:.4g}'.format(1 - automl.best_loss))\n", "print('Training duration of best run: {0:.4g} s'.format(automl.best_config_train_time))" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "outputs": [ - { - "data": { - "text/plain": [ - "LGBMClassifier(max_bin=255, n_estimators=4, num_leaves=4, objective='binary',\n", - " reg_alpha=0.0009765625, reg_lambda=1.0)" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } ], - "source": [ - "automl.model" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, "outputs": [], - "source": [ - "''' pickle and save the automl object '''\n", - "import pickle\n", - "with open('automl.pkl', 'wb') as f:\n", - " pickle.dump(automl, f, pickle.HIGHEST_PROTOCOL)" - ] - }, - { - "cell_type": "code", - "execution_count": 9, "metadata": { "slideshow": { "slide_type": "slide" }, "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Predicted labels [1 0 1 ... 0 0 0]\n", - "True labels [0 0 0 ... 0 1 0]\n" - ] - } + } + }, + { + "cell_type": "code", + "execution_count": null, + "source": [ + "automl.model" ], + "outputs": [], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } + }, + { + "cell_type": "code", + "execution_count": null, + "source": [ + "''' pickle and save the automl object '''\n", + "import pickle\n", + "with open('automl.pkl', 'wb') as f:\n", + " pickle.dump(automl, f, pickle.HIGHEST_PROTOCOL)" + ], + "outputs": [], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } + }, + { + "cell_type": "code", + "execution_count": null, "source": [ "''' compute predictions of testing dataset ''' \n", "y_pred = automl.predict(X_test)\n", "print('Predicted labels', y_pred)\n", "print('True labels', y_test)\n", "y_pred_proba = automl.predict_proba(X_test)[:,1]" - ] - }, - { - "cell_type": "code", - "execution_count": 10, + ], + "outputs": [], "metadata": { "slideshow": { "slide_type": "slide" }, "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "accuracy = 0.6262773830888569\n", - "roc_auc = 0.6402112531029138\n", - "log_loss = 0.6637970847245668\n", - "f1 = 0.35105656927257045\n" - ] - } - ], + } + }, + { + "cell_type": "code", + "execution_count": null, "source": [ "''' compute different metric values on testing dataset'''\n", "from flaml.ml import sklearn_metric_loss_score\n", "print('accuracy', '=', 1 - sklearn_metric_loss_score('accuracy', y_pred, y_test))\n", "print('roc_auc', '=', 1 - sklearn_metric_loss_score('roc_auc', y_pred_proba, y_test))\n", "print('log_loss', '=', sklearn_metric_loss_score('log_loss', y_pred_proba, y_test))" - ] + ], + "outputs": [], + "metadata": { + "slideshow": { + "slide_type": "slide" + }, + "tags": [] + } }, { "cell_type": "markdown", + "source": [ + "### Log history" + ], "metadata": { "slideshow": { "slide_type": "slide" } - }, - "source": [ - "### Log history" - ] + } }, { "cell_type": "code", - "execution_count": 11, - "metadata": { - "slideshow": { - "slide_type": "subslide" - }, - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'Current Learner': 'lgbm', 'Current Sample': 364083, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.1, 'subsample': 1.0, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.1, 'subsample': 1.0, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}}\n" - ] - } - ], + "execution_count": null, "source": [ "from flaml.data import get_output_from_log\n", "time_history, best_valid_loss_history, valid_loss_history, config_history, train_loss_history = \\\n", @@ -339,31 +270,18 @@ "\n", "for config in config_history:\n", " print(config)" - ] + ], + "outputs": [], + "metadata": { + "slideshow": { + "slide_type": "subslide" + }, + "tags": [] + } }, { "cell_type": "code", - "execution_count": 12, - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nO3de5xdVX338c+XcJc7CZSbXGqCSrUgI17wArZApCKoiGBtEVuotVitladQa8uDD31hrfbRmmoDpQiVIlCI0YqBys0CgUzkmtCEEFAmIAmQKCICCd/+sdeBzWHP5CTMmTOX7/v1Oq85e+219vmdPTPnd/bae68l20RERLTboNcBRETE6JQEERERjZIgIiKiURJEREQ0SoKIiIhGSRAREdEoCSJiPUh6q6RFvY4jopuSIGLMkXS/pN/uZQy2f2h7725tX9Jhkq6X9LikFZKuk/Tubr1eRJMkiIgGkib18LWPBi4Bzgd2BXYE/ho4Yj22JUn5P4/1kj+cGDckbSDpVEn3SnpU0sWStqutv0TSTyX9rHw736e27jxJX5P0PUlPAAeXI5VPS7qjtPmWpE1L/YMkDdTaD1q3rP8/kh6S9KCkP5RkSa9oeA8CvgR8zvY5tn9m+1nb19k+sdQ5XdK/1drsUba3YVm+VtKZkm4AfgmcIqm/7XX+TNLs8nwTSX8v6SeSHpb0dUmbvcRfR4wDSRAxnnwcOAp4O7AzsBKYUVt/BTAV2AH4EfDNtvYfBM4EtgT+u5QdA0wH9gReC3x4iNdvrCtpOvAp4LeBVwAHDbGNvYHdgEuHqNOJ3wNOonovXwf2ljS1tv6DwIXl+VnANGDfEt8uVEcsMcElQcR48lHgM7YHbD8FnA4c3fpmbftc24/X1v2mpK1r7b9t+4byjf1Xpewrth+0/RjwHaoP0cEMVvcY4F9tL7D9y/Lag9m+/Hyo0zc9iPPK6622/TPg28BxACVRvBKYXY5YTgL+zPZjth8H/hY49iW+fowDSRAxnuwOXC5plaRVwN3AGmBHSZMknVW6n34O3F/aTK61f6Bhmz+tPf8lsMUQrz9Y3Z3btt30Oi2Plp87DVGnE+2vcSElQVAdPcwqyWoKsDkwv7bfvl/KY4JLgojx5AHgnba3qT02tb2M6kPxSKpunq2BPUob1dp3a2jjh6hONrfsNkTdRVTv431D1HmC6kO95dca6rS/l6uAKZL2pUoUre6lR4AngX1q+2xr20MlwpggkiBirNpI0qa1x4ZUfe1nStodQNIUSUeW+lsCT1F9Q9+cqhtlpFwMnCDpVZI2Bz47WEVX4+9/CvispBMkbVVOvr9F0sxS7TbgbZJeXrrITltbALafoboy6gvAdlQJA9vPAmcD/yBpBwBJu0g6bL3fbYwbSRAxVn2P6ptv63E68GVgNnClpMeBucAbSv3zgR8Dy4CFZd2IsH0F8BXgGmBJ7bWfGqT+pcAHgI8ADwIPA/+P6jwCtq8CvgXcAcwHvtthKBdSHUFdYnt1rfwvWnGV7rf/ojpZHhOcMmFQxMiS9CrgLmCTtg/qiFElRxARI0DSe8r9BtsCnwe+k+QQo10SRMTI+CNgOXAv1ZVVf9zbcCLWLl1MERHRKEcQERHRaMNeBzBcJk+e7D322KPXYUREjCnz589/xHbjjZHjJkHsscce9Pf3r71iREQ8R9KPB1uXLqaIiGiUBBEREY2SICIiolESRERENEqCiIiIRkkQERHRKAkiIiIaJUFERESjJIiIiGiUBBEREY2SICIiolESRERENEqCiIiIRkkQERHRKAkiIiIaJUFERESjJIiIiGiUBBEREY26miAkTZe0SNISSacOUucYSQslLZB0Ya18jaTbymN2N+OMiIgX69qc1JImATOAQ4ABYJ6k2bYX1upMBU4DDrS9UtIOtU08aXvfbsUXERFD6+YRxAHAEttLbT8NXAQc2VbnRGCG7ZUAtpd3MZ6IiFgH3UwQuwAP1JYHSlndNGCapBskzZU0vbZuU0n9pfyopheQdFKp079ixYrhjT4iYoLrWhfTOrz+VOAgYFfgekmvsb0K2N32Mkl7AVdLutP2vfXGtmcCMwH6+vo8sqFHRIxv3TyCWAbsVlvetZTVDQCzbT9j+z5gMVXCwPay8nMpcC2wXxdjjYiINt1MEPOAqZL2lLQxcCzQfjXSLKqjByRNpupyWippW0mb1MoPBBYSEREjpmtdTLZXSzoZmANMAs61vUDSGUC/7dll3aGSFgJrgFNsPyrpzcA/S3qWKomdVb/6KSIiuk/2+Oi67+vrc39/f6/DiIgYUyTNt93XtC53UkdERKMkiIiIaJQEERERjZIgIiKiURJEREQ0SoKIiIhGSRAREdEoCSIiIholQURERKMkiIiIaJQEERERjZIgIiKiURJEREQ0SoKIiIhGSRAREdEoCSIiIholQURERKMkiIiIaJQEERERjZIgIiKiURJEREQ0SoKIiIhGSRAREdGoqwlC0nRJiyQtkXTqIHWOkbRQ0gJJF7at20rSgKSvdjPOiIh4sQ27tWFJk4AZwCHAADBP0mzbC2t1pgKnAQfaXilph7bNfA64vlsxRkTE4Lp5BHEAsMT2UttPAxcBR7bVORGYYXslgO3lrRWS9gd2BK7sYowRETGIbiaIXYAHassDpaxuGjBN0g2S5kqaDiBpA+CLwKeHegFJJ0nql9S/YsWKYQw9IiJ6fZJ6Q2AqcBBwHHC2pG2AjwHfsz0wVGPbM2332e6bMmVK14ONiJhIunYOAlgG7FZb3rWU1Q0AN9t+BrhP0mKqhPEm4K2SPgZsAWws6Re2G090R0TE8OvmEcQ8YKqkPSVtDBwLzG6rM4vq6AFJk6m6nJba/l3bL7e9B1U30/lJDhERI6trCcL2auBkYA5wN3Cx7QWSzpD07lJtDvCopIXANcApth/tVkwREdE52e51DMOir6/P/f39vQ4jImJMkTTfdl/TurUeQUjafvhDioiI0a6TLqa5ki6RdLgkdT2iiIgYFTpJENOAmcDvAfdI+ltJ07obVkRE9NpaE4QrV9k+jurO5+OBWyRdJ+lNXY8wIiJ6Yq33QZRzEB+iOoJ4GPg41eWq+wKXAHt2M8CIiOiNTm6Uuwm4ADiq7c7mfklf705YERHRa50kiL09yLWwtj8/zPFERMQo0clJ6ivL+EgASNpW0pwuxhQREaNAJwliiu1VrYUyNHf7vA0RETHOdJIg1kh6eWtB0u7A+Lj9OiIiBtXJOYjPAP8t6TpAwFuBk7oaVURE9NxaE4Tt70t6HfDGUvRJ2490N6yIiOi1TueDWAMsBzYFXi0J25krOiJiHOvkRrk/BD5BNeHPbVRHEjcB7+huaBER0UudnKT+BPB64Me2Dwb2A1YN3SQiIsa6ThLEr2z/CkDSJrb/B9i7u2FFRESvdXIOYqDcKDcLuErSSuDH3Q0rIiJ6rZOrmN5Tnp4u6Rpga+D7XY0qIiJ6bsgEIWkSsMD2KwFsXzciUUVERM8NeQ7C9hpgUf1O6oiImBg6OQexLbBA0i3AE61C2+/uWlQREdFznSSIz3Y9ioiIGHU6OUm93ucdJE0HvgxMAs6xfVZDnWOA06kGALzd9gfLgICXU3WBbQT8o+1MThQRMYI6uZP6cZ4fvXVjqg/sJ2xvtZZ2k4AZwCHAADBP0mzbC2t1pgKnAQfaXimpNYz4Q8CbbD8laQvgrtL2wXV8fxERsZ46OYLYsvVckoAjeX7gvqEcACyxvbS0vai0XVircyIwo8wxge3l5efTtTqb0NkNfRERMYzW6YPXlVnAYR1U3wV4oLY8UMrqpgHTJN0gaW7pkgJA0m6S7ijb+HzT0YOkkyT1S+pfsWLFuryViIhYi066mN5bW9wA6AN+NYyvPxU4iGowwOslvcb2KtsPAK+VtDMwS9Klth+uN7Y9E5gJ0NfXl0mMIiKGUSdXMR1Re74auJ+qq2htlgG71ZZ3LWV1A8DNtp8B7pO0mCphzGtVsP2gpLuoJiq6tIPXjYiIYdDJOYgT1nPb84CpkvakSgzHAh9sqzMLOA74V0mTqbqclkraFXjU9pOStgXeAvzDesYRERHrYa3nICR9owzW11reVtK5a2tnezVwMjAHuBu42PYCSWdIat1kNwd4VNJC4BrgFNuPAq8CbpZ0O3Ad8Pe271zXNxcREetP9tBd95Jutb3f2sp6ra+vz/39/b0OIyJiTJE033Zf07pOrmLaoHTztDa2HZ1PVRoREWNUJx/0XwRuknRJWX4/cGb3QoqIiNGgk5PU50vq5/k5qN9bvxs6IiLGp07ug3gj1ZwQXy3LW0l6g+2bux5dRET0TCfnIL4G/KK2/ItSFhER41gnCUKuXepk+1lykjoiYtzrJEEslfSnkjYqj08AS7sdWERE9FYnCeKjwJup7oYeAN5ANQprRESMY51cxbScapgMACRtBrwLuGTQRhERMeZ1NNy3pEmSDpd0AXAf8IHuhhUREb025BGEpLdTDbB3OHALcCCwl+1fjkBsERHRQ4MmCEkDwE+oLmn9tO3HJd2X5BARMTEM1cV0KbAzVXfSEZJexvNzU0dExDg3aIKw/UlgT6qxmA4CFgFTJB0jaYuRCS8iInplyHMQ5Qa5a4BrJG1ENRf1ccA/AZO7H17E2Dbr1mV8Yc4iHlz1JDtvsxmnHLY3R+3XPjV7xOjU8R3RZVrQ7wLfLZe6RsQQZt26jNMuu5Mnn1kDwLJVT3LaZdW8V0kSMRZ0dJlrO9tPDncgEePNF+Ysei45tDz5zBq+MGdRjyKKWDfrlSAiYu0eXNX8PWqw8ojRJgkiokt23qa5J3aw8ojRZq0JQtI0SWdLulLS1a3HSAQXMZadctjebLbRpBeUbbbRJE45bO8eRRSxbjo5SX0J8HXgbGDNWupGRNE6EZ2rmGKs6iRBrLadCYIi1sNR++2ShBBjVifnIL4j6WOSdpK0XevR9cgiIqKnOkkQxwOnADcC88ujv5ONS5ouaZGkJZJOHaTOMZIWSlog6cJStq+km0rZHZIyemxExAjrZD6IPddnw5ImATOAQ6gmGponabbthbU6U4HTgANtr5S0Q1n1S+D3bd8jaWdgvqQ5tletTywREbHu1pogyhAbfwy8rRRdC/xzubN6KAcAS2wvLdu5CDgSWFircyIww/ZKeG5yImwvblWw/aCk5cAUIAkiImKEdNLF9DVgf6rxl/6pPO/kpPUuwAO15YFSVjcNmCbpBklzJU1v34ikA4CNgXsb1p0kqV9S/4oVKzoIKSIiOtXJVUyvt/2bteWrJd0+jK8/lWq02F2B6yW9ptWVJGkn4ALgeNvPtje2PROYCdDX15ehyCMihlEnRxBrJP16a0HSXnR2P8QyYLfa8q6lrG4AmG37Gdv3AYupEgaStgL+E/iM7bkdvF5ERAyjThLEKVTDfV8r6TrgauDPO2g3D5gqaU9JGwPHArPb6syiOnpA0mSqLqelpf7lwPm2L+3onURExLDq5CqmH5SrjVrjAyyy/VQH7VZLOhmYA0wCzrW9QNIZQL/t2WXdoZIWUh2VnGL7UUkfojopvr2kD5dNftj2bev6BiMiYv2omhOoYYX0DttXS3pv03rbl3U1snXU19fn/v6Obs+IiIhC0nzbfU3rhjqCeDtVd9IRDesMjKoEERERw2vQBGH7b8rTM8oJ5OdIWq+b5yIiYuzo5CT1fzSU5cRxRMQ4N+gRhKRXAvsAW7edh9gK2LTbgUVERG8NdQ5ib+BdwDa88DzE41RDZERExDg21DmIbwPflvQm2zeNYEwRETEKdDLUxq2S/oSqu+m5riXbH+laVBER0XOdnKS+APg14DDgOqohMx7vZlAREdF7nSSIV9j+LPCE7W8AvwO8obthRUREr3WSIFrzPqyS9BvA1sAOQ9SPiIhxoJNzEDMlbQt8lmqwvS2Av+5qVBER0XOdDNZ3Tnl6HbBXd8OJiIjRYqgb5T41VEPbXxr+cCIiYrQY6ghiy/Jzb+D1PD+XwxHALd0MKiIiem+oG+X+L4Ck64HX2X68LJ9ONdNbRESMY51cxbQj8HRt+elSFhER41gnVzGdD9wi6fKyfBRwXtciioiIUaGTq5jOlHQF8NZSdILtW7sbVkRE9NpQVzFtZfvnkrYD7i+P1rrtbD/W/fAiIqJXhjqCuJBquO/5VFOMtqgs556IiIhxbKirmN5VfmZ60YiICWioLqbXDdXQ9o+GP5yIiBgthupi+uIQ6wy8Y5hjiYiIUWSoLqaDX+rGJU0HvgxMAs6xfVZDnWOA06mSzu22P1jKvw+8EfjvVndXRESMnE7ug6AM8/1qXjij3PlraTMJmAEcAgwA8yTNtr2wVmcqcBpwoO2VkurDiH8B2Bz4ow7fS0REDKO13kkt6W+AfyyPg4G/A97dwbYPAJbYXmr7aeAi4Mi2OicCM2yvBLC9vLXC9g/IzHURET3TyVAbRwO/BfzU9gnAb1JNGrQ2uwAP1JYHSlndNGCapBskzS1dUh2TdJKkfkn9K1asWJemERGxFp0kiCdtPwuslrQVsBzYbZhef0NgKnAQcBxwtqRtOm1se6btPtt9U6ZMGaaQIiICOjsH0V8+tM+mumnuF8BNHbRbxgsTya6lrG4AuNn2M8B9khZTJYx5HWw/IiK6aNAjCEkzJB1o+2O2V9n+OtUJ5+NLV9PazAOmStpT0sbAsTw/p0TLLKqjByRNpupyWroe7yMiIobZUEcQi4G/l7QTcDHw7+sySJ/t1ZJOBuZQXeZ6ru0Fks4A+m3PLusOlbQQWAOcYvtRAEk/BF4JbCFpAPgD23PW4z1GRMR6kO2hK0i7U337PxbYDPh3qmSxuPvhda6vr8/9/f29DiMiYkyRNN92X9O6tZ6ktv1j25+3vR/VieSjgLuHOcaIiBhlOrkPYkNJR0j6JnAFsAh4b9cji4iInhpqsL5DqI4YDgduobrR7STbT4xQbBER0UNDnaQ+jWpOiD9v3ekcERETx1CD9WW01oiICayTO6kjImICSoKIiIhGSRAREdEoCSIiIholQURERKMkiIiIaJQEERERjZIgIiKiURJEREQ0SoKIiIhGSRAREdEoCSIiIholQURERKMkiIiIaJQEERERjZIgIiKiURJEREQ0SoKIiIhGXU0QkqZLWiRpiaRTB6lzjKSFkhZIurBWfryke8rj+G7GGRERLzbonNQvlaRJwAzgEGAAmCdptu2FtTpTgdOAA22vlLRDKd8O+BugDzAwv7Rd2a14IyLihbp5BHEAsMT2UttPAxcBR7bVORGY0frgt728lB8GXGX7sbLuKmB6F2ONiIg23UwQuwAP1JYHSlndNGCapBskzZU0fR3aIukkSf2S+lesWDGMoUdERK9PUm8ITAUOAo4Dzpa0TaeNbc+03We7b8qUKV0KMSJiYupmglgG7FZb3rWU1Q0As20/Y/s+YDFVwuikbUREdFE3E8Q8YKqkPSVtDBwLzG6rM4vq6AFJk6m6nJYCc4BDJW0raVvg0FIWEREjpGtXMdleLelkqg/2ScC5thdIOgPotz2b5xPBQmANcIrtRwEkfY4qyQCcYfuxbsUaEREvJtu9jmFY9PX1ub+/v9dhRESMKZLm2+5rWtfrk9QRETFKJUFERESjJIiIiGiUBBEREY2SICIiolESRERENEqCiIiIRkkQERHRKAkiIiIaJUFERESjJIiIiGiUBBEREY2SICIiolESRERENEqCiIiIRkkQERHRKAkiIiIaJUFERESjJIiIiGiUBBEREY2SICIiolESRERENOpqgpA0XdIiSUskndqw/sOSVki6rTz+sLbu85LuKo8PdDPOiIh4sQ27tWFJk4AZwCHAADBP0mzbC9uqfsv2yW1tfwd4HbAvsAlwraQrbP+8W/FGRMQLdfMI4gBgie2ltp8GLgKO7LDtq4Hrba+2/QRwBzC9S3FGRESDbiaIXYAHassDpazd+yTdIelSSbuVstuB6ZI2lzQZOBjYrb2hpJMk9UvqX7FixXDHHxExofX6JPV3gD1svxa4CvgGgO0rge8BNwL/DtwErGlvbHum7T7bfVOmTBm5qCMiJoBuJohlvPBb/66l7Dm2H7X9VFk8B9i/tu5M2/vaPgQQsLiLsUZERJtuJoh5wFRJe0raGDgWmF2vIGmn2uK7gbtL+SRJ25fnrwVeC1zZxVgjIqJN165isr1a0snAHGAScK7tBZLOAPptzwb+VNK7gdXAY8CHS/ONgB9KAvg58CHbq7sVa0REvJhs9zqGYSFpBfDj9Wg6GXhkmMPplrES61iJE8ZOrGMlThg7sY6VOKG7se5uu/Ek7rhJEOtLUr/tvl7H0YmxEutYiRPGTqxjJU4YO7GOlTihd7H2+iqmiIgYpZIgIiKiURIEzOx1AOtgrMQ6VuKEsRPrWIkTxk6sYyVO6FGsE/4cRERENMsRRERENEqCiIiIRuMqQUg6V9JySXcNsv6U2twTd0laI2m7sq5x7opyJ/jNpfxb5a7wnsUqaTdJ10haKGmBpE/U2pwuaVmt3eG9irOsu1/SnWVdf63NdpKuknRP+bntS43zpcQqae9a+W2Sfi7pk6VNL/bp1pK+I+n28js+obbu+LLf7pF0fK18/7Kvl0j6ispdpr2KVdK+km4qZXeoNqeLpPMk3Vfbp/v2Ks6ybk0tltm18l797w+2Tw9u+zv9laSjyrph36cA2B43D+BtVPNI3NVB3SOAq8vzScC9wF7AxlSjyb66rLsYOLY8/zrwxz2OdSfgdeX5llRjVLViPR349GjYp2X5fmByQ72/A04tz08FPt/rWGvlk4CfUt081JN9Cvxla58AU6hGGdgY2A5YWn5uW55vW+rdAryRatyyK4B39jjWacDUUr4z8BCwTVk+Dzh6NOzTsvyLQdr05H9/qFhrdbYr5Zt3a5/aHl9HELavp9ppnTiOaqRYGGTuivIt7B3ApaXeN4Cjehmr7Yds/6g8f5xq/KqmYdSHxUvYp0M5kjJyL6Ngn7b5LeBe2+tzV35HOojTwJbl72+LUnc1cBhwle3HbK+kGgF5uqoxzbayPdfVp8X5jNw+bYzV9mLb95RtPAgsp/qw64qXsE8b9fh/v5NYjwausP3L4YhpMOMqQXRK0uZUExD9RykabO6K7YFVfn4cqMHmtOiahljr6/YA9gNurhWfXA7pzx2urptODBKngSslzZd0Uq18R9sPlec/BXYcoTCBofcp1aCS7YljpPfpV4FXAQ8CdwKfsP0sg/+d7lKet5ePhMFifY6kA6iOKu6tFZ9Z9uk/SNqkx3FuqmpembmtLht6+7+/1n1K89/psO/TCZkgqLoXbrDd6bfNXmqMVdIWVB9wn/TzU7F+Dfh1qqlaHwK+2OM432L7dcA7gT+R9Lb2RuUb70hfaz3YPt2YalThS2rFvdinhwG3UXXN7At8VdJWI/C662PIWMvRzQXACbUPudOAVwKvp+oq+Ysex7m7q2EsPgj8f0m/PgLxDKWTffoaqoFQW7qyTydqgmjPvoPNXfEosI2kDdvKR9KLvilI2ogqOXzT9mWtctsP215T/hHPpuo661mctpeVn8uBy2vxPFz+yFt/7MtHME5o/vYFVSL7ke2HWwU92qcnAJe5sgS4j+qff7C/02XleXv5SBgsVsqH2n8Cn7E9t9WgdJPa1Vww/0pv92n973QpcC3VUXkv//cHjbU4Brjc9jOtgm7t0wmXICRtDbwd+HatuHHuivLt9hqq/j6A49vajXispV/yX4C7bX+prX59fo33AI1XSYxQnC+TtGXrOXBoLZ7ZVPsSRsE+rXnReYke7dOfUJ0LQdKOwN5UJ6TnAIdK2rZ0dR0KzCnddT+X9Mby9/H7jNw+bYy1/B9dDpxv+9J6g9qXA1H16/dsn5Z9uUkpnwwcCCzs8f/+YL//lkH/Tod9nw73We9ePspOewh4hqrP8A+AjwIfrdX5MHBRQ9vDqa4IupfqG0+rfC+qK0SWUHU9bNLLWIG3UHXJ3EF1GHobcHhZdwFVn+UdVB/CO/Uwzr2orga7HVjQtk+3B34A3AP8F7DdKPj9v4zqW+PWbeUjvk+puhauLK97F9V8KK22Hyl/i0uoum1a5X2l7r1UfdgaiX06WKzAh0qb22qPfcu6q2v1/w3YoodxvrmU3V5+/kHb3/CI/++v5fe/B9WRzAZt2xz2fWo7Q21ERESzCdfFFBERnUmCiIiIRkkQERHRKAkiIiIaJUFERESjJIgYE8rwAZ+sLc+RdE5t+YuSPjVE+/MkHV2eXyvpRRPAS9pI0lmqRkr9karRSN9Z1t1frpNf17ife91B1s8oo28ulPRkbTTOoyV9T9I26/qaHcS0k6TvDrF+Y0nX124SiwkqCSLGihuorllH0gbAZGCf2vo3Aze+xNf4HNVoub/haoiQo6hGzO0a239ie1+q+3Dutb1veVxq+3Dbq7rwsp+iuit8sJieprpP5QOD1YmJIQkixoobgTeV5/tQ3RD0eO1O2FcBP5L015LmqZrvYWa5s3StygB+JwIfdzVcAa6G2bi4oe6nyvbvajuq+f0yWNrtki5oaPe5ckQxqcOY7pc0WdIekv6ntF0s6ZuSflvSDeVo54BS/2VlQMFbJN0q6chBNv0+4PulzT6l/m0l9qmlzizgdzuJM8avHELGmGD7QUmrJb2c6mjhJqrRNd8E/Ay40/bTkr5q+wyA8iH9LuA7HbzEK4Cf+PmBDxtJ2p9qrJw3UM29cLOk64Cngb8C3mz7EZVJk2rtvkB1NHKC1+/u1FcA76e6k3oe1cByb6EaXPAvqY52PkM1x8VHStfULZL+y/YTtTj2BFa2kiDVHbxftv3NMjxGK3ndRTXwW0xgOYKIseRGquTQShA31ZZvKHUOVjUL2J1U4/nv07Shl+AtVAOlPWH7F8BlwFvLa11i+xEAv3Ck2M9SDeHx0fVMDgD32b7T1aCBC4AflG3dSTX8AlRjM50q6TaqQec2BV7etp2dgBW15ZuAv5T0F1Sjmj5Z4l8DPN0aTysmpiSIGEta5yFeQ/UNdy7VEcSbgRslbQr8E9XMWq+h6mfftMNtLwFeru4Mqz0P2L/9qGIdPVV7/mxt+Vme7wkQ8L7aeYyX2767bbStqZ0AAAFaSURBVDtPUtsnti+kOgp5EviepHfU6m4C/OolxBxjXBJEjCU3UnUZPeZqCO7HgG2oksSNPP/B94iq+TIGvXqonauZuf4F+HLpakHSFEnvb6v6Q+AoSZuXUWrfU8quBt4vafvStp4Mvg+cBfxnl7+RzwE+3jrvImm/hjqLef6IA0l7AUttf4VqtNLXlvLtgUdcG1I6Jp4kiBhL7qS6emluW9nPbD9Srvg5m+roYg7VN/d18VdU3S8LVU0o/13gBeckXE33eh7VKJ83A+fYvtX2AuBM4DpJtwNfamt3SYlttqTN1jGuTn0O2Ai4Q9KCsvwC5XzEvZJeUYqOAe4q3VK/QTVdKcDBVHM5xASW0VwjJhhJ7wH2t/1XQ9S5DDjV9uKRiyxGm1zFFDHB2L681RXWpHSxzUpyiBxBREREo5yDiIiIRkkQERHRKAkiIiIaJUFERESjJIiIiGj0vxTlm8ZMzT1/AAAAAElFTkSuQmCC", - "image/svg+xml": "\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], + "execution_count": null, "source": [ "import matplotlib.pyplot as plt\n", "import numpy as np\n", @@ -374,18 +292,19 @@ "plt.scatter(time_history, 1 - np.array(valid_loss_history))\n", "plt.step(time_history, 1 - np.array(best_valid_loss_history), where='post')\n", "plt.show()" - ] + ], + "outputs": [], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } } ], "metadata": { "kernelspec": { - "display_name": "Python 3.8.0 64-bit", - "metadata": { - "interpreter": { - "hash": "0cfea3304185a9579d09e0953576b57c8581e46e6ebc6dfeb681bc5a511f7544" - } - }, - "name": "python3" + "name": "python3", + "display_name": "Python 3.8.0 64-bit ('blend': conda)" }, "language_info": { "codemirror_mode": { @@ -397,7 +316,10 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.0-final" + "version": "3.8.0" + }, + "interpreter": { + "hash": "0cfea3304185a9579d09e0953576b57c8581e46e6ebc6dfeb681bc5a511f7544" } }, "nbformat": 4, diff --git a/notebook/flaml_forecast.ipynb b/notebook/flaml_forecast.ipynb index 1b0ff68adf..438834144a 100644 --- a/notebook/flaml_forecast.ipynb +++ b/notebook/flaml_forecast.ipynb @@ -10,18 +10,16 @@ { "cell_type": "markdown", "source": [ - "## 1. Introduction\r\n", - "\r\n", - "FLAML is a Python library (https://github.com/microsoft/FLAML) designed to automatically produce accurate machine learning models with low computational cost. It is fast and cheap. The simple and lightweight design makes it easy to use and extend, such as adding new learners. FLAML can\r\n", - "\r\n", - " - serve as an economical AutoML engine,\r\n", - " - be used as a fast hyperparameter tuning tool, or\r\n", - " - be embedded in self-tuning software that requires low latency & resource in repetitive tuning tasks.\r\n", - " - In this notebook, we demonstrate how to use FLAML library to tune hyperparameters of XGBoost with a regression example.\r\n", - "\r\n", - "FLAML requires Python>=3.6. To run this notebook example, please install flaml with the notebook option:\r\n", - "\r\n", - "> pip install flaml[notebook]" + "## 1. Introduction\n", + "\n", + "FLAML is a Python library (https://github.com/microsoft/FLAML) designed to automatically produce accurate machine learning models with low computational cost. It is fast and cheap. The simple and lightweight design makes it easy to use and extend, such as adding new learners. FLAML can\n", + "\n", + " - serve as an economical AutoML engine,\n", + " - be used as a fast hyperparameter tuning tool, or\n", + " - be embedded in self-tuning software that requires low latency & resource in repetitive tuning tasks.\n", + " - In this notebook, we demonstrate how to use FLAML library to tune hyperparameters of XGBoost with a regression example.\n", + "\n", + "FLAML requires Python>=3.6. To run this notebook example, please install flaml with the notebook option:\n" ], "metadata": {} }, @@ -29,7 +27,7 @@ "cell_type": "code", "execution_count": null, "source": [ - "!pip install flaml[notebook]" + "!pip install flaml[notebook,forecast]" ], "outputs": [], "metadata": {} @@ -49,13 +47,13 @@ "cell_type": "code", "execution_count": 1, "source": [ - "import statsmodels.api as sm\r\n", - "data = sm.datasets.co2.load_pandas()\r\n", - "data = data.data\r\n", - "# data is given in weeks, but the task is to predict monthly, so use monthly averages instead\r\n", - "data = data['co2'].resample('MS').mean()\r\n", - "data = data.fillna(data.bfill()) # makes sure there are no missing values\r\n", - "data = data.to_frame().reset_index()\r\n", + "import statsmodels.api as sm\n", + "data = sm.datasets.co2.load_pandas()\n", + "data = data.data\n", + "# data is given in weeks, but the task is to predict monthly, so use monthly averages instead\n", + "data = data['co2'].resample('MS').mean()\n", + "data = data.fillna(data.bfill()) # makes sure there are no missing values\n", + "data = data.to_frame().reset_index()\n", "# data = data.rename(columns={'index': 'ds', 'co2': 'y'})" ], "outputs": [], @@ -65,14 +63,14 @@ "cell_type": "code", "execution_count": 2, "source": [ - "# split the data into a train dataframe and X_test and y_test dataframes, where the number of samples for test is equal to\r\n", - "# the number of periods the user wants to predict\r\n", - "num_samples = data.shape[0]\r\n", - "time_horizon = 12\r\n", - "split_idx = num_samples - time_horizon\r\n", - "X_train = data[:split_idx] # X_train is a dataframe with two columns: time and value\r\n", - "X_test = data[split_idx:]['index'].to_frame() # X_test is a dataframe with dates for prediction\r\n", - "y_test = data[split_idx:]['co2'].to_frame() # y_test is a dataframe of the values corresponding to the dates for prediction" + "# split the data into a train dataframe and X_test and y_test dataframes, where the number of samples for test is equal to\n", + "# the number of periods the user wants to predict\n", + "num_samples = data.shape[0]\n", + "time_horizon = 12\n", + "split_idx = num_samples - time_horizon\n", + "X_train = data[:split_idx] # X_train is a dataframe with two columns: time and value\n", + "X_test = data[split_idx:]['index'].to_frame() # X_test is a dataframe with dates for prediction\n", + "y_test = data[split_idx:]['co2'] # y_test is a series of the values corresponding to the dates for prediction" ], "outputs": [], "metadata": {} @@ -90,8 +88,8 @@ "cell_type": "code", "execution_count": 3, "source": [ - "''' import AutoML class from flaml package '''\r\n", - "from flaml import AutoML\r\n", + "''' import AutoML class from flaml package '''\n", + "from flaml import AutoML\n", "automl = AutoML()" ], "outputs": [], @@ -101,13 +99,15 @@ "cell_type": "code", "execution_count": 4, "source": [ - "settings = {\r\n", - " \"time_budget\": 300, # total running time in seconds\r\n", - " \"metric\": 'mape', # primary metric for validation: 'mape' is generally used for forecast tasks\r\n", - " \"task\": 'forecast', # task type\r\n", - " \"log_file_name\": 'CO2_forecast.log', # flaml log file\r\n", - " \"eval_method\": \"holdout\", # validation method can be chosen from ['auto', 'holdout', 'cv']\r\n", - " \"split_type\": 'time' # for foretask task, 'split_type' has to be 'time'\r\n", + "settings = {\n", + " \"time_budget\": 300, # total running time in seconds\n", + " \"metric\": 'mape', # primary metric for validation: 'mape' is generally used for forecast tasks\n", + " \"task\": 'forecast', # task type\n", + " \"log_file_name\": 'CO2_forecast.log', # flaml log file\n", + " \"eval_method\": \"holdout\", # validation method can be chosen from ['auto', 'holdout', 'cv']\n", + " # \"estimator_list\": [\"sarimax\"],\n", + " # \"verbose\": 3,\n", + " \"split_type\": 'time' # for foretask task, 'split_type' has to be 'time'\n", "}" ], "outputs": [], @@ -117,11 +117,11 @@ "cell_type": "code", "execution_count": 5, "source": [ - "'''The main flaml automl API'''\r\n", - "automl.fit(dataframe=X_train, # training data\r\n", - " label=('index', 'co2'), # For 'forecast' task, label should be a tuple of strings for timestamp and value columns\r\n", - " **settings, \r\n", - " period=time_horizon, # key word argument 'period' must be included for forecast task\r\n", + "'''The main flaml automl API'''\n", + "automl.fit(dataframe=X_train, # training data\n", + " label=('index', 'co2'), # For 'forecast' task, label should be a tuple of strings for timestamp and value columns\n", + " **settings, \n", + " period=time_horizon, # key word argument 'period' must be included for forecast task\n", " freq='M')" ], "outputs": [ @@ -129,739 +129,1091 @@ "output_type": "stream", "name": "stderr", "text": [ - "[flaml.automl: 08-20 19:19:25] {1208} INFO - Evaluation method: holdout\n", - "[flaml.automl: 08-20 19:19:25] {686} INFO - Using TimeSeriesSplit\n", - "[flaml.automl: 08-20 19:19:25] {1235} INFO - Minimizing error metric: mape\n", - "[flaml.automl: 08-20 19:19:25] {1257} INFO - List of ML learners in AutoML Run: ['fbprophet', 'arima', 'sarimax']\n", - "[flaml.automl: 08-20 19:19:25] {1347} INFO - iteration 0, current learner fbprophet\n", + "[flaml.automl: 08-23 15:56:25] {1219} INFO - Evaluation method: holdout\n", + "[flaml.automl: 08-23 15:56:25] {691} INFO - Using TimeSeriesSplit\n", + "[flaml.automl: 08-23 15:56:25] {1250} INFO - Minimizing error metric: mape\n", + "[flaml.automl: 08-23 15:56:25] {1274} INFO - List of ML learners in AutoML Run: ['fbprophet', 'arima', 'sarimax']\n", + "[flaml.automl: 08-23 15:56:25] {1457} INFO - iteration 0, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:19:30] {1500} INFO - at 4.7s,\tbest fbprophet's error=0.0007,\tbest fbprophet's error=0.0007\n", + "[flaml.automl: 08-23 15:56:30] {1614} INFO - at 4.7s,\tbest fbprophet's error=0.0007,\tbest fbprophet's error=0.0007\n", "INFO:flaml.automl: at 4.7s,\tbest fbprophet's error=0.0007,\tbest fbprophet's error=0.0007\n", - "[flaml.automl: 08-20 19:19:30] {1347} INFO - iteration 1, current learner fbprophet\n", + "[flaml.automl: 08-23 15:56:30] {1457} INFO - iteration 1, current learner fbprophet\n", "INFO:flaml.automl:iteration 1, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:19:33] {1500} INFO - at 7.5s,\tbest fbprophet's error=0.0006,\tbest fbprophet's error=0.0006\n", - "INFO:flaml.automl: at 7.5s,\tbest fbprophet's error=0.0006,\tbest fbprophet's error=0.0006\n", - "[flaml.automl: 08-20 19:19:33] {1347} INFO - iteration 2, current learner fbprophet\n", + "[flaml.automl: 08-23 15:56:31] {1614} INFO - at 6.1s,\tbest fbprophet's error=0.0006,\tbest fbprophet's error=0.0006\n", + "INFO:flaml.automl: at 6.1s,\tbest fbprophet's error=0.0006,\tbest fbprophet's error=0.0006\n", + "[flaml.automl: 08-23 15:56:31] {1457} INFO - iteration 2, current learner fbprophet\n", "INFO:flaml.automl:iteration 2, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:19:36] {1500} INFO - at 10.3s,\tbest fbprophet's error=0.0006,\tbest fbprophet's error=0.0006\n", - "INFO:flaml.automl: at 10.3s,\tbest fbprophet's error=0.0006,\tbest fbprophet's error=0.0006\n", - "[flaml.automl: 08-20 19:19:36] {1347} INFO - iteration 3, current learner fbprophet\n", + "[flaml.automl: 08-23 15:56:32] {1614} INFO - at 7.5s,\tbest fbprophet's error=0.0006,\tbest fbprophet's error=0.0006\n", + "INFO:flaml.automl: at 7.5s,\tbest fbprophet's error=0.0006,\tbest fbprophet's error=0.0006\n", + "[flaml.automl: 08-23 15:56:32] {1457} INFO - iteration 3, current learner fbprophet\n", "INFO:flaml.automl:iteration 3, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:19:39] {1500} INFO - at 13.2s,\tbest fbprophet's error=0.0006,\tbest fbprophet's error=0.0006\n", - "INFO:flaml.automl: at 13.2s,\tbest fbprophet's error=0.0006,\tbest fbprophet's error=0.0006\n", - "[flaml.automl: 08-20 19:19:39] {1347} INFO - iteration 4, current learner fbprophet\n", + "[flaml.automl: 08-23 15:56:34] {1614} INFO - at 8.9s,\tbest fbprophet's error=0.0006,\tbest fbprophet's error=0.0006\n", + "INFO:flaml.automl: at 8.9s,\tbest fbprophet's error=0.0006,\tbest fbprophet's error=0.0006\n", + "[flaml.automl: 08-23 15:56:34] {1457} INFO - iteration 4, current learner fbprophet\n", "INFO:flaml.automl:iteration 4, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:19:41] {1500} INFO - at 15.2s,\tbest fbprophet's error=0.0006,\tbest fbprophet's error=0.0006\n", - "INFO:flaml.automl: at 15.2s,\tbest fbprophet's error=0.0006,\tbest fbprophet's error=0.0006\n", - "[flaml.automl: 08-20 19:19:41] {1347} INFO - iteration 5, current learner arima\n", - "INFO:flaml.automl:iteration 5, current learner arima\n", - "[flaml.automl: 08-20 19:19:41] {1500} INFO - at 15.6s,\tbest arima's error=0.0120,\tbest fbprophet's error=0.0006\n", - "INFO:flaml.automl: at 15.6s,\tbest arima's error=0.0120,\tbest fbprophet's error=0.0006\n", - "[flaml.automl: 08-20 19:19:41] {1347} INFO - iteration 6, current learner fbprophet\n", - "INFO:flaml.automl:iteration 6, current learner fbprophet\n", + "[flaml.automl: 08-23 15:56:35] {1614} INFO - at 10.0s,\tbest fbprophet's error=0.0006,\tbest fbprophet's error=0.0006\n", + "INFO:flaml.automl: at 10.0s,\tbest fbprophet's error=0.0006,\tbest fbprophet's error=0.0006\n", + "[flaml.automl: 08-23 15:56:35] {1457} INFO - iteration 5, current learner fbprophet\n", + "INFO:flaml.automl:iteration 5, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:19:45] {1500} INFO - at 19.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 19.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:19:45] {1347} INFO - iteration 7, current learner fbprophet\n", + "[flaml.automl: 08-23 15:56:36] {1614} INFO - at 11.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 11.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:36] {1457} INFO - iteration 6, current learner arima\n", + "INFO:flaml.automl:iteration 6, current learner arima\n", + "[flaml.automl: 08-23 15:56:37] {1614} INFO - at 11.6s,\tbest arima's error=0.0120,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 11.6s,\tbest arima's error=0.0120,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:37] {1457} INFO - iteration 7, current learner fbprophet\n", "INFO:flaml.automl:iteration 7, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:19:47] {1500} INFO - at 21.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 21.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:19:47] {1347} INFO - iteration 8, current learner arima\n", + "[flaml.automl: 08-23 15:56:38] {1614} INFO - at 12.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 12.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:38] {1457} INFO - iteration 8, current learner arima\n", "INFO:flaml.automl:iteration 8, current learner arima\n", - "[flaml.automl: 08-20 19:19:48] {1500} INFO - at 22.5s,\tbest arima's error=0.0040,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 22.5s,\tbest arima's error=0.0040,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:19:48] {1347} INFO - iteration 9, current learner arima\n", + "[flaml.automl: 08-23 15:56:38] {1614} INFO - at 13.5s,\tbest arima's error=0.0043,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 13.5s,\tbest arima's error=0.0043,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:38] {1457} INFO - iteration 9, current learner arima\n", "INFO:flaml.automl:iteration 9, current learner arima\n", - "[flaml.automl: 08-20 19:19:48] {1500} INFO - at 22.9s,\tbest arima's error=0.0040,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 22.9s,\tbest arima's error=0.0040,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:19:48] {1347} INFO - iteration 10, current learner arima\n", + "[flaml.automl: 08-23 15:56:39] {1614} INFO - at 13.7s,\tbest arima's error=0.0043,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 13.7s,\tbest arima's error=0.0043,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:39] {1457} INFO - iteration 10, current learner arima\n", "INFO:flaml.automl:iteration 10, current learner arima\n", - "[flaml.automl: 08-20 19:19:50] {1500} INFO - at 24.2s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 24.2s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:19:50] {1347} INFO - iteration 11, current learner arima\n", + "[flaml.automl: 08-23 15:56:39] {1614} INFO - at 14.5s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 14.5s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:39] {1457} INFO - iteration 11, current learner arima\n", "INFO:flaml.automl:iteration 11, current learner arima\n", - "[flaml.automl: 08-20 19:19:51] {1500} INFO - at 25.4s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 25.4s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:19:51] {1347} INFO - iteration 12, current learner sarimax\n", - "INFO:flaml.automl:iteration 12, current learner sarimax\n", - "[flaml.automl: 08-20 19:19:51] {1500} INFO - at 25.9s,\tbest sarimax's error=0.0120,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 25.9s,\tbest sarimax's error=0.0120,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:19:51] {1347} INFO - iteration 13, current learner sarimax\n", - "INFO:flaml.automl:iteration 13, current learner sarimax\n", - "[flaml.automl: 08-20 19:19:52] {1500} INFO - at 26.3s,\tbest sarimax's error=0.0055,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 26.3s,\tbest sarimax's error=0.0055,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:19:52] {1347} INFO - iteration 14, current learner sarimax\n", + "[flaml.automl: 08-23 15:56:40] {1614} INFO - at 15.3s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 15.3s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:40] {1457} INFO - iteration 12, current learner arima\n", + "INFO:flaml.automl:iteration 12, current learner arima\n", + "[flaml.automl: 08-23 15:56:41] {1614} INFO - at 15.6s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 15.6s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:41] {1457} INFO - iteration 13, current learner arima\n", + "INFO:flaml.automl:iteration 13, current learner arima\n", + "[flaml.automl: 08-23 15:56:41] {1614} INFO - at 16.4s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 16.4s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:41] {1457} INFO - iteration 14, current learner sarimax\n", "INFO:flaml.automl:iteration 14, current learner sarimax\n", - "[flaml.automl: 08-20 19:19:52] {1500} INFO - at 26.7s,\tbest sarimax's error=0.0055,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 26.7s,\tbest sarimax's error=0.0055,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:19:52] {1347} INFO - iteration 15, current learner arima\n", - "INFO:flaml.automl:iteration 15, current learner arima\n", - "[flaml.automl: 08-20 19:19:53] {1500} INFO - at 27.3s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 27.3s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:19:53] {1347} INFO - iteration 16, current learner sarimax\n", + "[flaml.automl: 08-23 15:56:42] {1614} INFO - at 16.7s,\tbest sarimax's error=0.0120,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 16.7s,\tbest sarimax's error=0.0120,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:42] {1457} INFO - iteration 15, current learner sarimax\n", + "INFO:flaml.automl:iteration 15, current learner sarimax\n", + "[flaml.automl: 08-23 15:56:42] {1614} INFO - at 17.0s,\tbest sarimax's error=0.0055,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 17.0s,\tbest sarimax's error=0.0055,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:42] {1457} INFO - iteration 16, current learner sarimax\n", "INFO:flaml.automl:iteration 16, current learner sarimax\n", - "[flaml.automl: 08-20 19:19:54] {1500} INFO - at 28.2s,\tbest sarimax's error=0.0055,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 28.2s,\tbest sarimax's error=0.0055,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:19:54] {1347} INFO - iteration 17, current learner fbprophet\n", + "[flaml.automl: 08-23 15:56:42] {1614} INFO - at 17.2s,\tbest sarimax's error=0.0055,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 17.2s,\tbest sarimax's error=0.0055,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:42] {1457} INFO - iteration 17, current learner fbprophet\n", "INFO:flaml.automl:iteration 17, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:19:57] {1500} INFO - at 31.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 31.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:19:57] {1347} INFO - iteration 18, current learner sarimax\n", + "[flaml.automl: 08-23 15:56:44] {1614} INFO - at 18.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 18.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:44] {1457} INFO - iteration 18, current learner sarimax\n", "INFO:flaml.automl:iteration 18, current learner sarimax\n", - "[flaml.automl: 08-20 19:19:57] {1500} INFO - at 31.7s,\tbest sarimax's error=0.0031,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 31.7s,\tbest sarimax's error=0.0031,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:19:57] {1347} INFO - iteration 19, current learner arima\n", - "INFO:flaml.automl:iteration 19, current learner arima\n", - "[flaml.automl: 08-20 19:19:59] {1500} INFO - at 33.0s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 33.0s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:19:59] {1347} INFO - iteration 20, current learner sarimax\n", + "[flaml.automl: 08-23 15:56:44] {1614} INFO - at 19.3s,\tbest sarimax's error=0.0055,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 19.3s,\tbest sarimax's error=0.0055,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:44] {1457} INFO - iteration 19, current learner sarimax\n", + "INFO:flaml.automl:iteration 19, current learner sarimax\n", + "[flaml.automl: 08-23 15:56:45] {1614} INFO - at 19.5s,\tbest sarimax's error=0.0031,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 19.5s,\tbest sarimax's error=0.0031,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:45] {1457} INFO - iteration 20, current learner sarimax\n", "INFO:flaml.automl:iteration 20, current learner sarimax\n", - "[flaml.automl: 08-20 19:19:59] {1500} INFO - at 33.6s,\tbest sarimax's error=0.0031,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 33.6s,\tbest sarimax's error=0.0031,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:19:59] {1347} INFO - iteration 21, current learner fbprophet\n", + "[flaml.automl: 08-23 15:56:45] {1614} INFO - at 19.6s,\tbest sarimax's error=0.0031,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 19.6s,\tbest sarimax's error=0.0031,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:45] {1457} INFO - iteration 21, current learner fbprophet\n", "INFO:flaml.automl:iteration 21, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:20:02] {1500} INFO - at 36.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 36.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:02] {1347} INFO - iteration 22, current learner fbprophet\n", + "[flaml.automl: 08-23 15:56:46] {1614} INFO - at 20.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 20.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:46] {1457} INFO - iteration 22, current learner fbprophet\n", "INFO:flaml.automl:iteration 22, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:20:05] {1500} INFO - at 39.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 39.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:05] {1347} INFO - iteration 23, current learner sarimax\n", + "[flaml.automl: 08-23 15:56:47] {1614} INFO - at 22.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 22.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:47] {1457} INFO - iteration 23, current learner sarimax\n", "INFO:flaml.automl:iteration 23, current learner sarimax\n", - "[flaml.automl: 08-20 19:20:05] {1500} INFO - at 39.7s,\tbest sarimax's error=0.0031,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 39.7s,\tbest sarimax's error=0.0031,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:05] {1347} INFO - iteration 24, current learner sarimax\n", + "[flaml.automl: 08-23 15:56:47] {1614} INFO - at 22.5s,\tbest sarimax's error=0.0031,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 22.5s,\tbest sarimax's error=0.0031,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:47] {1457} INFO - iteration 24, current learner sarimax\n", "INFO:flaml.automl:iteration 24, current learner sarimax\n", - "[flaml.automl: 08-20 19:20:05] {1500} INFO - at 39.8s,\tbest sarimax's error=0.0031,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 39.8s,\tbest sarimax's error=0.0031,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:05] {1347} INFO - iteration 25, current learner arima\n", - "INFO:flaml.automl:iteration 25, current learner arima\n", - "[flaml.automl: 08-20 19:20:06] {1500} INFO - at 40.4s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 40.4s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:06] {1347} INFO - iteration 26, current learner sarimax\n", + "[flaml.automl: 08-23 15:56:48] {1614} INFO - at 22.5s,\tbest sarimax's error=0.0031,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 22.5s,\tbest sarimax's error=0.0031,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:48] {1457} INFO - iteration 25, current learner fbprophet\n", + "INFO:flaml.automl:iteration 25, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:56:49] {1614} INFO - at 24.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 24.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:49] {1457} INFO - iteration 26, current learner sarimax\n", "INFO:flaml.automl:iteration 26, current learner sarimax\n", - "[flaml.automl: 08-20 19:20:07] {1500} INFO - at 41.6s,\tbest sarimax's error=0.0022,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 41.6s,\tbest sarimax's error=0.0022,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:07] {1347} INFO - iteration 27, current learner sarimax\n", + "[flaml.automl: 08-23 15:56:50] {1614} INFO - at 24.9s,\tbest sarimax's error=0.0022,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 24.9s,\tbest sarimax's error=0.0022,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:50] {1457} INFO - iteration 27, current learner sarimax\n", "INFO:flaml.automl:iteration 27, current learner sarimax\n", - "[flaml.automl: 08-20 19:20:09] {1500} INFO - at 43.2s,\tbest sarimax's error=0.0022,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 43.2s,\tbest sarimax's error=0.0022,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:09] {1347} INFO - iteration 28, current learner sarimax\n", + "[flaml.automl: 08-23 15:56:51] {1614} INFO - at 25.8s,\tbest sarimax's error=0.0022,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 25.8s,\tbest sarimax's error=0.0022,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:51] {1457} INFO - iteration 28, current learner sarimax\n", "INFO:flaml.automl:iteration 28, current learner sarimax\n", - "[flaml.automl: 08-20 19:20:09] {1500} INFO - at 43.7s,\tbest sarimax's error=0.0022,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 43.7s,\tbest sarimax's error=0.0022,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:09] {1347} INFO - iteration 29, current learner fbprophet\n", + "[flaml.automl: 08-23 15:56:51] {1614} INFO - at 26.1s,\tbest sarimax's error=0.0022,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 26.1s,\tbest sarimax's error=0.0022,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:51] {1457} INFO - iteration 29, current learner fbprophet\n", "INFO:flaml.automl:iteration 29, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:20:12] {1500} INFO - at 46.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 46.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:12] {1347} INFO - iteration 30, current learner fbprophet\n", + "[flaml.automl: 08-23 15:56:52] {1614} INFO - at 27.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 27.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:52] {1457} INFO - iteration 30, current learner fbprophet\n", "INFO:flaml.automl:iteration 30, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:20:15] {1500} INFO - at 49.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 49.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:15] {1347} INFO - iteration 31, current learner fbprophet\n", + "[flaml.automl: 08-23 15:56:54] {1614} INFO - at 29.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 29.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:54] {1457} INFO - iteration 31, current learner fbprophet\n", "INFO:flaml.automl:iteration 31, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:20:18] {1500} INFO - at 52.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 52.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:18] {1347} INFO - iteration 32, current learner sarimax\n", + "[flaml.automl: 08-23 15:56:55] {1614} INFO - at 30.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 30.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:55] {1457} INFO - iteration 32, current learner sarimax\n", "INFO:flaml.automl:iteration 32, current learner sarimax\n", - "[flaml.automl: 08-20 19:20:20] {1500} INFO - at 54.3s,\tbest sarimax's error=0.0020,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 54.3s,\tbest sarimax's error=0.0020,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:20] {1347} INFO - iteration 33, current learner fbprophet\n", + "[flaml.automl: 08-23 15:56:56] {1614} INFO - at 31.2s,\tbest sarimax's error=0.0019,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 31.2s,\tbest sarimax's error=0.0019,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:56] {1457} INFO - iteration 33, current learner fbprophet\n", "INFO:flaml.automl:iteration 33, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:20:22] {1500} INFO - at 57.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 57.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:22] {1347} INFO - iteration 34, current learner arima\n", - "INFO:flaml.automl:iteration 34, current learner arima\n", - "[flaml.automl: 08-20 19:20:24] {1500} INFO - at 58.8s,\tbest arima's error=0.0016,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 58.8s,\tbest arima's error=0.0016,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:24] {1347} INFO - iteration 35, current learner sarimax\n", + "[flaml.automl: 08-23 15:56:58] {1614} INFO - at 32.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 32.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:58] {1457} INFO - iteration 34, current learner fbprophet\n", + "INFO:flaml.automl:iteration 34, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:56:59] {1614} INFO - at 34.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 34.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:56:59] {1457} INFO - iteration 35, current learner sarimax\n", "INFO:flaml.automl:iteration 35, current learner sarimax\n", - "[flaml.automl: 08-20 19:20:27] {1500} INFO - at 61.3s,\tbest sarimax's error=0.0020,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 61.3s,\tbest sarimax's error=0.0020,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:27] {1347} INFO - iteration 36, current learner arima\n", - "INFO:flaml.automl:iteration 36, current learner arima\n", - "[flaml.automl: 08-20 19:20:29] {1500} INFO - at 63.0s,\tbest arima's error=0.0016,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 63.0s,\tbest arima's error=0.0016,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:29] {1347} INFO - iteration 37, current learner arima\n", - "INFO:flaml.automl:iteration 37, current learner arima\n", - "[flaml.automl: 08-20 19:20:30] {1500} INFO - at 64.5s,\tbest arima's error=0.0016,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 64.5s,\tbest arima's error=0.0016,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:30] {1347} INFO - iteration 38, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:01] {1614} INFO - at 35.7s,\tbest sarimax's error=0.0019,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 35.7s,\tbest sarimax's error=0.0019,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:01] {1457} INFO - iteration 36, current learner fbprophet\n", + "INFO:flaml.automl:iteration 36, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:57:02] {1614} INFO - at 37.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 37.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:02] {1457} INFO - iteration 37, current learner fbprophet\n", + "INFO:flaml.automl:iteration 37, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:57:04] {1614} INFO - at 38.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 38.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:04] {1457} INFO - iteration 38, current learner fbprophet\n", "INFO:flaml.automl:iteration 38, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:20:33] {1500} INFO - at 67.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 67.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:33] {1347} INFO - iteration 39, current learner arima\n", - "INFO:flaml.automl:iteration 39, current learner arima\n", - "[flaml.automl: 08-20 19:20:35] {1500} INFO - at 69.3s,\tbest arima's error=0.0016,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 69.3s,\tbest arima's error=0.0016,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:35] {1347} INFO - iteration 40, current learner arima\n", - "INFO:flaml.automl:iteration 40, current learner arima\n", - "[flaml.automl: 08-20 19:20:36] {1500} INFO - at 70.3s,\tbest arima's error=0.0016,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 70.3s,\tbest arima's error=0.0016,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:36] {1347} INFO - iteration 41, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:05] {1614} INFO - at 40.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 40.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:05] {1457} INFO - iteration 39, current learner fbprophet\n", + "INFO:flaml.automl:iteration 39, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:57:06] {1614} INFO - at 41.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 41.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:06] {1457} INFO - iteration 40, current learner sarimax\n", + "INFO:flaml.automl:iteration 40, current learner sarimax\n", + "[flaml.automl: 08-23 15:57:07] {1614} INFO - at 41.6s,\tbest sarimax's error=0.0019,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 41.6s,\tbest sarimax's error=0.0019,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:07] {1457} INFO - iteration 41, current learner fbprophet\n", "INFO:flaml.automl:iteration 41, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:20:39] {1500} INFO - at 73.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 73.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:39] {1347} INFO - iteration 42, current learner arima\n", - "INFO:flaml.automl:iteration 42, current learner arima\n", - "[flaml.automl: 08-20 19:20:41] {1500} INFO - at 75.5s,\tbest arima's error=0.0014,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 75.5s,\tbest arima's error=0.0014,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:41] {1347} INFO - iteration 43, current learner sarimax\n", + "[flaml.automl: 08-23 15:57:08] {1614} INFO - at 42.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 42.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:08] {1457} INFO - iteration 42, current learner sarimax\n", + "INFO:flaml.automl:iteration 42, current learner sarimax\n", + "[flaml.automl: 08-23 15:57:09] {1614} INFO - at 43.6s,\tbest sarimax's error=0.0019,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 43.6s,\tbest sarimax's error=0.0019,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:09] {1457} INFO - iteration 43, current learner sarimax\n", "INFO:flaml.automl:iteration 43, current learner sarimax\n", - "[flaml.automl: 08-20 19:20:41] {1500} INFO - at 75.9s,\tbest sarimax's error=0.0020,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 75.9s,\tbest sarimax's error=0.0020,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:41] {1347} INFO - iteration 44, current learner arima\n", - "INFO:flaml.automl:iteration 44, current learner arima\n", - "[flaml.automl: 08-20 19:20:43] {1500} INFO - at 77.7s,\tbest arima's error=0.0014,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 77.7s,\tbest arima's error=0.0014,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:43] {1347} INFO - iteration 45, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:10] {1614} INFO - at 44.7s,\tbest sarimax's error=0.0019,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 44.7s,\tbest sarimax's error=0.0019,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:10] {1457} INFO - iteration 44, current learner fbprophet\n", + "INFO:flaml.automl:iteration 44, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:57:11] {1614} INFO - at 46.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 46.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:11] {1457} INFO - iteration 45, current learner fbprophet\n", "INFO:flaml.automl:iteration 45, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:20:46] {1500} INFO - at 80.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 80.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:46] {1347} INFO - iteration 46, current learner arima\n", + "[flaml.automl: 08-23 15:57:12] {1614} INFO - at 47.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 47.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:12] {1457} INFO - iteration 46, current learner arima\n", "INFO:flaml.automl:iteration 46, current learner arima\n", - "[flaml.automl: 08-20 19:20:48] {1500} INFO - at 82.2s,\tbest arima's error=0.0014,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 82.2s,\tbest arima's error=0.0014,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:48] {1347} INFO - iteration 47, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:13] {1614} INFO - at 48.4s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 48.4s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:13] {1457} INFO - iteration 47, current learner fbprophet\n", "INFO:flaml.automl:iteration 47, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:20:50] {1500} INFO - at 84.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 84.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:50] {1347} INFO - iteration 48, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:15] {1614} INFO - at 49.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 49.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:15] {1457} INFO - iteration 48, current learner fbprophet\n", "INFO:flaml.automl:iteration 48, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:20:53] {1500} INFO - at 87.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 87.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:53] {1347} INFO - iteration 49, current learner sarimax\n", + "[flaml.automl: 08-23 15:57:16] {1614} INFO - at 51.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 51.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:16] {1457} INFO - iteration 49, current learner sarimax\n", "INFO:flaml.automl:iteration 49, current learner sarimax\n", - "[flaml.automl: 08-20 19:20:54] {1500} INFO - at 88.3s,\tbest sarimax's error=0.0020,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 88.3s,\tbest sarimax's error=0.0020,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:54] {1347} INFO - iteration 50, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:17] {1614} INFO - at 51.9s,\tbest sarimax's error=0.0019,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 51.9s,\tbest sarimax's error=0.0019,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:17] {1457} INFO - iteration 50, current learner fbprophet\n", "INFO:flaml.automl:iteration 50, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:20:56] {1500} INFO - at 90.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 90.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:56] {1347} INFO - iteration 51, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:18] {1614} INFO - at 53.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 53.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:18] {1457} INFO - iteration 51, current learner fbprophet\n", "INFO:flaml.automl:iteration 51, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:20:59] {1500} INFO - at 93.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 93.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:20:59] {1347} INFO - iteration 52, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:20] {1614} INFO - at 54.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 54.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:20] {1457} INFO - iteration 52, current learner fbprophet\n", "INFO:flaml.automl:iteration 52, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:02] {1500} INFO - at 96.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 96.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:02] {1347} INFO - iteration 53, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:21] {1614} INFO - at 56.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 56.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:21] {1457} INFO - iteration 53, current learner fbprophet\n", "INFO:flaml.automl:iteration 53, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:05] {1500} INFO - at 99.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 99.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:05] {1347} INFO - iteration 54, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:22] {1614} INFO - at 57.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 57.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:22] {1457} INFO - iteration 54, current learner fbprophet\n", "INFO:flaml.automl:iteration 54, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:08] {1500} INFO - at 102.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 102.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:08] {1347} INFO - iteration 55, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:24] {1614} INFO - at 58.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 58.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:24] {1457} INFO - iteration 55, current learner fbprophet\n", "INFO:flaml.automl:iteration 55, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:11] {1500} INFO - at 105.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 105.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:11] {1347} INFO - iteration 56, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:25] {1614} INFO - at 60.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 60.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:25] {1457} INFO - iteration 56, current learner fbprophet\n", "INFO:flaml.automl:iteration 56, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:13] {1500} INFO - at 107.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 107.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:13] {1347} INFO - iteration 57, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:27] {1614} INFO - at 61.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 61.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:27] {1457} INFO - iteration 57, current learner fbprophet\n", "INFO:flaml.automl:iteration 57, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:16] {1500} INFO - at 110.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 110.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:16] {1347} INFO - iteration 58, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:29] {1614} INFO - at 63.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 63.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:29] {1457} INFO - iteration 58, current learner fbprophet\n", "INFO:flaml.automl:iteration 58, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:19] {1500} INFO - at 113.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 113.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:19] {1347} INFO - iteration 59, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:31] {1614} INFO - at 65.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 65.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:31] {1457} INFO - iteration 59, current learner fbprophet\n", "INFO:flaml.automl:iteration 59, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:21] {1500} INFO - at 115.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 115.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:21] {1347} INFO - iteration 60, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:32] {1614} INFO - at 67.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 67.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:32] {1457} INFO - iteration 60, current learner fbprophet\n", "INFO:flaml.automl:iteration 60, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:24] {1500} INFO - at 118.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 118.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:24] {1347} INFO - iteration 61, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:34] {1614} INFO - at 68.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 68.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:34] {1457} INFO - iteration 61, current learner fbprophet\n", "INFO:flaml.automl:iteration 61, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:27] {1500} INFO - at 121.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 121.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:27] {1347} INFO - iteration 62, current learner arima\n", - "INFO:flaml.automl:iteration 62, current learner arima\n", - "[flaml.automl: 08-20 19:21:29] {1500} INFO - at 123.8s,\tbest arima's error=0.0013,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 123.8s,\tbest arima's error=0.0013,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:29] {1347} INFO - iteration 63, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:35] {1614} INFO - at 70.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 70.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:35] {1457} INFO - iteration 62, current learner sarimax\n", + "INFO:flaml.automl:iteration 62, current learner sarimax\n", + "[flaml.automl: 08-23 15:57:36] {1614} INFO - at 71.2s,\tbest sarimax's error=0.0019,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 71.2s,\tbest sarimax's error=0.0019,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:36] {1457} INFO - iteration 63, current learner fbprophet\n", "INFO:flaml.automl:iteration 63, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:32] {1500} INFO - at 126.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 126.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:32] {1347} INFO - iteration 64, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:38] {1614} INFO - at 72.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 72.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:38] {1457} INFO - iteration 64, current learner fbprophet\n", "INFO:flaml.automl:iteration 64, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:35] {1500} INFO - at 129.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 129.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:35] {1347} INFO - iteration 65, current learner arima\n", - "INFO:flaml.automl:iteration 65, current learner arima\n", - "[flaml.automl: 08-20 19:21:36] {1500} INFO - at 130.9s,\tbest arima's error=0.0013,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 130.9s,\tbest arima's error=0.0013,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:36] {1347} INFO - iteration 66, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:39] {1614} INFO - at 74.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 74.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:39] {1457} INFO - iteration 65, current learner sarimax\n", + "INFO:flaml.automl:iteration 65, current learner sarimax\n", + "[flaml.automl: 08-23 15:57:40] {1614} INFO - at 74.6s,\tbest sarimax's error=0.0019,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 74.6s,\tbest sarimax's error=0.0019,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:40] {1457} INFO - iteration 66, current learner fbprophet\n", "INFO:flaml.automl:iteration 66, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:40] {1500} INFO - at 134.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 134.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:40] {1347} INFO - iteration 67, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:41] {1614} INFO - at 76.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 76.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:41] {1457} INFO - iteration 67, current learner fbprophet\n", "INFO:flaml.automl:iteration 67, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:42] {1500} INFO - at 136.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 136.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:42] {1347} INFO - iteration 68, current learner arima\n", - "INFO:flaml.automl:iteration 68, current learner arima\n", - "[flaml.automl: 08-20 19:21:44] {1500} INFO - at 138.9s,\tbest arima's error=0.0013,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 138.9s,\tbest arima's error=0.0013,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:44] {1347} INFO - iteration 69, current learner arima\n", + "[flaml.automl: 08-23 15:57:42] {1614} INFO - at 77.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 77.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:42] {1457} INFO - iteration 68, current learner fbprophet\n", + "INFO:flaml.automl:iteration 68, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:57:44] {1614} INFO - at 78.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 78.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:44] {1457} INFO - iteration 69, current learner arima\n", "INFO:flaml.automl:iteration 69, current learner arima\n", - "[flaml.automl: 08-20 19:21:47] {1500} INFO - at 141.2s,\tbest arima's error=0.0012,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 141.2s,\tbest arima's error=0.0012,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:47] {1347} INFO - iteration 70, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:44] {1614} INFO - at 79.5s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 79.5s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:44] {1457} INFO - iteration 70, current learner fbprophet\n", "INFO:flaml.automl:iteration 70, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:50] {1500} INFO - at 144.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 144.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:50] {1347} INFO - iteration 71, current learner sarimax\n", + "[flaml.automl: 08-23 15:57:46] {1614} INFO - at 80.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 80.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:46] {1457} INFO - iteration 71, current learner sarimax\n", "INFO:flaml.automl:iteration 71, current learner sarimax\n", - "[flaml.automl: 08-20 19:21:52] {1500} INFO - at 146.1s,\tbest sarimax's error=0.0020,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 146.1s,\tbest sarimax's error=0.0020,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:52] {1347} INFO - iteration 72, current learner fbprophet\n", - "INFO:flaml.automl:iteration 72, current learner fbprophet\n", - "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", - "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:54] {1500} INFO - at 148.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 148.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:54] {1347} INFO - iteration 73, current learner arima\n", - "INFO:flaml.automl:iteration 73, current learner arima\n", - "[flaml.automl: 08-20 19:21:56] {1500} INFO - at 150.4s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 150.4s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:56] {1347} INFO - iteration 74, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:47] {1614} INFO - at 81.9s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 81.9s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:47] {1457} INFO - iteration 72, current learner sarimax\n", + "INFO:flaml.automl:iteration 72, current learner sarimax\n", + "[flaml.automl: 08-23 15:57:49] {1614} INFO - at 83.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 83.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:49] {1457} INFO - iteration 73, current learner sarimax\n", + "INFO:flaml.automl:iteration 73, current learner sarimax\n", + "[flaml.automl: 08-23 15:57:51] {1614} INFO - at 85.8s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 85.8s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:51] {1457} INFO - iteration 74, current learner fbprophet\n", "INFO:flaml.automl:iteration 74, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:21:58] {1500} INFO - at 152.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 152.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:21:58] {1347} INFO - iteration 75, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:54] {1614} INFO - at 88.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 88.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:54] {1457} INFO - iteration 75, current learner fbprophet\n", "INFO:flaml.automl:iteration 75, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:22:01] {1500} INFO - at 155.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 155.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:01] {1347} INFO - iteration 76, current learner arima\n", - "INFO:flaml.automl:iteration 76, current learner arima\n", - "[flaml.automl: 08-20 19:22:03] {1500} INFO - at 157.9s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 157.9s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:03] {1347} INFO - iteration 77, current learner fbprophet\n", - "INFO:flaml.automl:iteration 77, current learner fbprophet\n", - "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", - "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:22:07] {1500} INFO - at 161.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 161.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:07] {1347} INFO - iteration 78, current learner fbprophet\n", + "[flaml.automl: 08-23 15:57:55] {1614} INFO - at 90.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 90.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:55] {1457} INFO - iteration 76, current learner sarimax\n", + "INFO:flaml.automl:iteration 76, current learner sarimax\n", + "[flaml.automl: 08-23 15:57:57] {1614} INFO - at 92.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 92.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:57] {1457} INFO - iteration 77, current learner sarimax\n", + "INFO:flaml.automl:iteration 77, current learner sarimax\n", + "[flaml.automl: 08-23 15:57:59] {1614} INFO - at 93.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 93.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:57:59] {1457} INFO - iteration 78, current learner fbprophet\n", "INFO:flaml.automl:iteration 78, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:22:09] {1500} INFO - at 163.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 163.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:09] {1347} INFO - iteration 79, current learner arima\n", - "INFO:flaml.automl:iteration 79, current learner arima\n", - "[flaml.automl: 08-20 19:22:12] {1500} INFO - at 166.1s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 166.1s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:12] {1347} INFO - iteration 80, current learner fbprophet\n", - "INFO:flaml.automl:iteration 80, current learner fbprophet\n", - "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", - "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:22:14] {1500} INFO - at 168.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 168.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:14] {1347} INFO - iteration 81, current learner arima\n", - "INFO:flaml.automl:iteration 81, current learner arima\n", - "[flaml.automl: 08-20 19:22:16] {1500} INFO - at 170.4s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 170.4s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:16] {1347} INFO - iteration 82, current learner arima\n", - "INFO:flaml.automl:iteration 82, current learner arima\n", - "[flaml.automl: 08-20 19:22:18] {1500} INFO - at 172.3s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 172.3s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:18] {1347} INFO - iteration 83, current learner arima\n", - "INFO:flaml.automl:iteration 83, current learner arima\n", - "[flaml.automl: 08-20 19:22:18] {1500} INFO - at 172.5s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 172.5s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:18] {1347} INFO - iteration 84, current learner fbprophet\n", - "INFO:flaml.automl:iteration 84, current learner fbprophet\n", - "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", - "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:22:20] {1500} INFO - at 174.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 174.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:20] {1347} INFO - iteration 85, current learner arima\n", - "INFO:flaml.automl:iteration 85, current learner arima\n", - "[flaml.automl: 08-20 19:22:21] {1500} INFO - at 175.2s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 175.2s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:21] {1347} INFO - iteration 86, current learner fbprophet\n", + "[flaml.automl: 08-23 15:58:00] {1614} INFO - at 95.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 95.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:00] {1457} INFO - iteration 79, current learner sarimax\n", + "INFO:flaml.automl:iteration 79, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:02] {1614} INFO - at 97.3s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 97.3s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:02] {1457} INFO - iteration 80, current learner arima\n", + "INFO:flaml.automl:iteration 80, current learner arima\n", + "[flaml.automl: 08-23 15:58:03] {1614} INFO - at 98.3s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 98.3s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:03] {1457} INFO - iteration 81, current learner sarimax\n", + "INFO:flaml.automl:iteration 81, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:04] {1614} INFO - at 99.0s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 99.0s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:04] {1457} INFO - iteration 82, current learner sarimax\n", + "INFO:flaml.automl:iteration 82, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:06] {1614} INFO - at 100.7s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 100.7s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:06] {1457} INFO - iteration 83, current learner sarimax\n", + "INFO:flaml.automl:iteration 83, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:07] {1614} INFO - at 101.8s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 101.8s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:07] {1457} INFO - iteration 84, current learner sarimax\n", + "INFO:flaml.automl:iteration 84, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:08] {1614} INFO - at 102.7s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 102.7s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:08] {1457} INFO - iteration 85, current learner sarimax\n", + "INFO:flaml.automl:iteration 85, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:10] {1614} INFO - at 104.7s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 104.7s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:10] {1457} INFO - iteration 86, current learner fbprophet\n", "INFO:flaml.automl:iteration 86, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:22:24] {1500} INFO - at 178.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 178.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:24] {1347} INFO - iteration 87, current learner fbprophet\n", + "[flaml.automl: 08-23 15:58:11] {1614} INFO - at 106.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 106.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:11] {1457} INFO - iteration 87, current learner fbprophet\n", "INFO:flaml.automl:iteration 87, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:22:26] {1500} INFO - at 180.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 180.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:26] {1347} INFO - iteration 88, current learner arima\n", - "INFO:flaml.automl:iteration 88, current learner arima\n", - "[flaml.automl: 08-20 19:22:26] {1500} INFO - at 180.4s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 180.4s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:26] {1347} INFO - iteration 89, current learner fbprophet\n", + "[flaml.automl: 08-23 15:58:13] {1614} INFO - at 108.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 108.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:13] {1457} INFO - iteration 88, current learner sarimax\n", + "INFO:flaml.automl:iteration 88, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:14] {1614} INFO - at 109.2s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 109.2s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:14] {1457} INFO - iteration 89, current learner fbprophet\n", "INFO:flaml.automl:iteration 89, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:22:29] {1500} INFO - at 183.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 183.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:29] {1347} INFO - iteration 90, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:16] {1614} INFO - at 111.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 111.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:16] {1457} INFO - iteration 90, current learner sarimax\n", "INFO:flaml.automl:iteration 90, current learner sarimax\n", - "[flaml.automl: 08-20 19:22:30] {1500} INFO - at 184.9s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 184.9s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:30] {1347} INFO - iteration 91, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:17] {1614} INFO - at 111.9s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 111.9s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:17] {1457} INFO - iteration 91, current learner sarimax\n", "INFO:flaml.automl:iteration 91, current learner sarimax\n", - "[flaml.automl: 08-20 19:22:32] {1500} INFO - at 186.5s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 186.5s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:32] {1347} INFO - iteration 92, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:18] {1614} INFO - at 113.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 113.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:18] {1457} INFO - iteration 92, current learner sarimax\n", "INFO:flaml.automl:iteration 92, current learner sarimax\n", - "[flaml.automl: 08-20 19:22:33] {1500} INFO - at 188.0s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 188.0s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:33] {1347} INFO - iteration 93, current learner sarimax\n", - "INFO:flaml.automl:iteration 93, current learner sarimax\n", - "[flaml.automl: 08-20 19:22:35] {1500} INFO - at 190.0s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 190.0s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:35] {1347} INFO - iteration 94, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:20] {1614} INFO - at 114.6s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 114.6s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:20] {1457} INFO - iteration 93, current learner fbprophet\n", + "INFO:flaml.automl:iteration 93, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:58:21] {1614} INFO - at 116.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 116.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:21] {1457} INFO - iteration 94, current learner sarimax\n", "INFO:flaml.automl:iteration 94, current learner sarimax\n", - "[flaml.automl: 08-20 19:22:37] {1500} INFO - at 191.7s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 191.7s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:37] {1347} INFO - iteration 95, current learner sarimax\n", - "INFO:flaml.automl:iteration 95, current learner sarimax\n", - "[flaml.automl: 08-20 19:22:39] {1500} INFO - at 193.2s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 193.2s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:39] {1347} INFO - iteration 96, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:22] {1614} INFO - at 117.2s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 117.2s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:22] {1457} INFO - iteration 95, current learner fbprophet\n", + "INFO:flaml.automl:iteration 95, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:58:24] {1614} INFO - at 118.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 118.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:24] {1457} INFO - iteration 96, current learner sarimax\n", "INFO:flaml.automl:iteration 96, current learner sarimax\n", - "[flaml.automl: 08-20 19:22:41] {1500} INFO - at 195.0s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 195.0s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:41] {1347} INFO - iteration 97, current learner sarimax\n", - "INFO:flaml.automl:iteration 97, current learner sarimax\n", - "[flaml.automl: 08-20 19:22:43] {1500} INFO - at 197.2s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 197.2s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:43] {1347} INFO - iteration 98, current learner sarimax\n", - "INFO:flaml.automl:iteration 98, current learner sarimax\n", - "[flaml.automl: 08-20 19:22:44] {1500} INFO - at 198.4s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 198.4s,\tbest sarimax's error=0.0010,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:44] {1347} INFO - iteration 99, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:25] {1614} INFO - at 119.6s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 119.6s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:25] {1457} INFO - iteration 97, current learner fbprophet\n", + "INFO:flaml.automl:iteration 97, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:58:26] {1614} INFO - at 121.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 121.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:26] {1457} INFO - iteration 98, current learner fbprophet\n", + "INFO:flaml.automl:iteration 98, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:58:28] {1614} INFO - at 122.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 122.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:28] {1457} INFO - iteration 99, current learner sarimax\n", "INFO:flaml.automl:iteration 99, current learner sarimax\n", - "[flaml.automl: 08-20 19:22:46] {1500} INFO - at 200.2s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 200.2s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:46] {1347} INFO - iteration 100, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:29] {1614} INFO - at 124.4s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 124.4s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:29] {1457} INFO - iteration 100, current learner sarimax\n", "INFO:flaml.automl:iteration 100, current learner sarimax\n", - "[flaml.automl: 08-20 19:22:48] {1500} INFO - at 202.5s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 202.5s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:48] {1347} INFO - iteration 101, current learner fbprophet\n", + "[flaml.automl: 08-23 15:58:30] {1614} INFO - at 125.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 125.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:31] {1457} INFO - iteration 101, current learner fbprophet\n", "INFO:flaml.automl:iteration 101, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:22:51] {1500} INFO - at 206.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 206.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:51] {1347} INFO - iteration 102, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:32] {1614} INFO - at 127.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 127.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:32] {1457} INFO - iteration 102, current learner sarimax\n", "INFO:flaml.automl:iteration 102, current learner sarimax\n", - "[flaml.automl: 08-20 19:22:53] {1500} INFO - at 207.8s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 207.8s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:53] {1347} INFO - iteration 103, current learner fbprophet\n", + "[flaml.automl: 08-23 15:58:34] {1614} INFO - at 129.0s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 129.0s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:34] {1457} INFO - iteration 103, current learner fbprophet\n", "INFO:flaml.automl:iteration 103, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:22:56] {1500} INFO - at 210.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 210.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:56] {1347} INFO - iteration 104, current learner sarimax\n", - "INFO:flaml.automl:iteration 104, current learner sarimax\n", - "[flaml.automl: 08-20 19:22:58] {1500} INFO - at 212.7s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 212.7s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:22:58] {1347} INFO - iteration 105, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:36] {1614} INFO - at 130.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 130.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:36] {1457} INFO - iteration 104, current learner arima\n", + "INFO:flaml.automl:iteration 104, current learner arima\n", + "[flaml.automl: 08-23 15:58:37] {1614} INFO - at 131.9s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 131.9s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:37] {1457} INFO - iteration 105, current learner sarimax\n", "INFO:flaml.automl:iteration 105, current learner sarimax\n", - "[flaml.automl: 08-20 19:23:00] {1500} INFO - at 214.8s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 214.8s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:00] {1347} INFO - iteration 106, current learner arima\n", - "INFO:flaml.automl:iteration 106, current learner arima\n", - "[flaml.automl: 08-20 19:23:00] {1500} INFO - at 214.9s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 214.9s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:00] {1347} INFO - iteration 107, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:38] {1614} INFO - at 133.0s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 133.0s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:38] {1457} INFO - iteration 106, current learner fbprophet\n", + "INFO:flaml.automl:iteration 106, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:58:40] {1614} INFO - at 134.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 134.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:40] {1457} INFO - iteration 107, current learner sarimax\n", "INFO:flaml.automl:iteration 107, current learner sarimax\n", - "[flaml.automl: 08-20 19:23:02] {1500} INFO - at 216.4s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 216.4s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:02] {1347} INFO - iteration 108, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:41] {1614} INFO - at 136.4s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 136.4s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:41] {1457} INFO - iteration 108, current learner sarimax\n", "INFO:flaml.automl:iteration 108, current learner sarimax\n", - "[flaml.automl: 08-20 19:23:03] {1500} INFO - at 217.7s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 217.7s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:03] {1347} INFO - iteration 109, current learner fbprophet\n", + "[flaml.automl: 08-23 15:58:43] {1614} INFO - at 137.6s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 137.6s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:43] {1457} INFO - iteration 109, current learner fbprophet\n", "INFO:flaml.automl:iteration 109, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:23:06] {1500} INFO - at 220.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 220.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:06] {1347} INFO - iteration 110, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:44] {1614} INFO - at 139.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 139.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:44] {1457} INFO - iteration 110, current learner sarimax\n", "INFO:flaml.automl:iteration 110, current learner sarimax\n", - "[flaml.automl: 08-20 19:23:08] {1500} INFO - at 222.3s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 222.3s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:08] {1347} INFO - iteration 111, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:45] {1614} INFO - at 140.4s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 140.4s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:45] {1457} INFO - iteration 111, current learner sarimax\n", "INFO:flaml.automl:iteration 111, current learner sarimax\n", - "[flaml.automl: 08-20 19:23:09] {1500} INFO - at 223.9s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 223.9s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:09] {1347} INFO - iteration 112, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:46] {1614} INFO - at 141.4s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 141.4s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:46] {1457} INFO - iteration 112, current learner sarimax\n", "INFO:flaml.automl:iteration 112, current learner sarimax\n", - "[flaml.automl: 08-20 19:23:11] {1500} INFO - at 225.7s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 225.7s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:11] {1347} INFO - iteration 113, current learner fbprophet\n", + "[flaml.automl: 08-23 15:58:48] {1614} INFO - at 142.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 142.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:48] {1457} INFO - iteration 113, current learner fbprophet\n", "INFO:flaml.automl:iteration 113, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:23:14] {1500} INFO - at 229.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 229.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:15] {1347} INFO - iteration 114, current learner fbprophet\n", + "[flaml.automl: 08-23 15:58:49] {1614} INFO - at 143.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 143.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:49] {1457} INFO - iteration 114, current learner fbprophet\n", "INFO:flaml.automl:iteration 114, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:23:17] {1500} INFO - at 231.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 231.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:17] {1347} INFO - iteration 115, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:50] {1614} INFO - at 145.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 145.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:50] {1457} INFO - iteration 115, current learner sarimax\n", "INFO:flaml.automl:iteration 115, current learner sarimax\n", - "[flaml.automl: 08-20 19:23:18] {1500} INFO - at 232.9s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 232.9s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:18] {1347} INFO - iteration 116, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:52] {1614} INFO - at 146.7s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 146.7s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:52] {1457} INFO - iteration 116, current learner sarimax\n", "INFO:flaml.automl:iteration 116, current learner sarimax\n", - "[flaml.automl: 08-20 19:23:21] {1500} INFO - at 235.1s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 235.1s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:21] {1347} INFO - iteration 117, current learner fbprophet\n", + "[flaml.automl: 08-23 15:58:52] {1614} INFO - at 147.2s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 147.2s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:52] {1457} INFO - iteration 117, current learner fbprophet\n", "INFO:flaml.automl:iteration 117, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:23:23] {1500} INFO - at 238.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 238.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:23] {1347} INFO - iteration 118, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:55] {1614} INFO - at 149.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 149.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:55] {1457} INFO - iteration 118, current learner sarimax\n", "INFO:flaml.automl:iteration 118, current learner sarimax\n", - "[flaml.automl: 08-20 19:23:26] {1500} INFO - at 240.8s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 240.8s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:26] {1347} INFO - iteration 119, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:56] {1614} INFO - at 151.3s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 151.3s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:56] {1457} INFO - iteration 119, current learner sarimax\n", "INFO:flaml.automl:iteration 119, current learner sarimax\n", - "[flaml.automl: 08-20 19:23:28] {1500} INFO - at 242.2s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 242.2s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:28] {1347} INFO - iteration 120, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:58] {1614} INFO - at 153.3s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 153.3s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:58] {1457} INFO - iteration 120, current learner sarimax\n", "INFO:flaml.automl:iteration 120, current learner sarimax\n", - "[flaml.automl: 08-20 19:23:30] {1500} INFO - at 244.1s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 244.1s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:30] {1347} INFO - iteration 121, current learner sarimax\n", + "[flaml.automl: 08-23 15:58:59] {1614} INFO - at 153.9s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 153.9s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:58:59] {1457} INFO - iteration 121, current learner sarimax\n", "INFO:flaml.automl:iteration 121, current learner sarimax\n", - "[flaml.automl: 08-20 19:23:32] {1500} INFO - at 246.1s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 246.1s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:32] {1347} INFO - iteration 122, current learner fbprophet\n", + "[flaml.automl: 08-23 15:59:01] {1614} INFO - at 155.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 155.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:01] {1457} INFO - iteration 122, current learner fbprophet\n", "INFO:flaml.automl:iteration 122, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:23:34] {1500} INFO - at 248.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 248.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:34] {1347} INFO - iteration 123, current learner fbprophet\n", + "[flaml.automl: 08-23 15:59:03] {1614} INFO - at 158.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 158.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:03] {1457} INFO - iteration 123, current learner fbprophet\n", "INFO:flaml.automl:iteration 123, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:23:37] {1500} INFO - at 251.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 251.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:37] {1347} INFO - iteration 124, current learner fbprophet\n", + "[flaml.automl: 08-23 15:59:06] {1614} INFO - at 160.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 160.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:06] {1457} INFO - iteration 124, current learner fbprophet\n", "INFO:flaml.automl:iteration 124, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:23:40] {1500} INFO - at 254.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 254.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:40] {1347} INFO - iteration 125, current learner fbprophet\n", + "[flaml.automl: 08-23 15:59:08] {1614} INFO - at 162.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 162.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:08] {1457} INFO - iteration 125, current learner fbprophet\n", "INFO:flaml.automl:iteration 125, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:23:42] {1500} INFO - at 256.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 256.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:42] {1347} INFO - iteration 126, current learner fbprophet\n", + "[flaml.automl: 08-23 15:59:10] {1614} INFO - at 164.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 164.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:10] {1457} INFO - iteration 126, current learner fbprophet\n", "INFO:flaml.automl:iteration 126, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:23:45] {1500} INFO - at 259.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 259.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:45] {1347} INFO - iteration 127, current learner sarimax\n", + "[flaml.automl: 08-23 15:59:12] {1614} INFO - at 166.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 166.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:12] {1457} INFO - iteration 127, current learner sarimax\n", "INFO:flaml.automl:iteration 127, current learner sarimax\n", - "[flaml.automl: 08-20 19:23:46] {1500} INFO - at 260.8s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 260.8s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:46] {1347} INFO - iteration 128, current learner arima\n", - "INFO:flaml.automl:iteration 128, current learner arima\n", - "[flaml.automl: 08-20 19:23:47] {1500} INFO - at 261.2s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 261.2s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:47] {1347} INFO - iteration 129, current learner arima\n", - "INFO:flaml.automl:iteration 129, current learner arima\n", - "[flaml.automl: 08-20 19:23:47] {1500} INFO - at 261.9s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 261.9s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:47] {1347} INFO - iteration 130, current learner sarimax\n", + "[flaml.automl: 08-23 15:59:13] {1614} INFO - at 168.2s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 168.2s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:13] {1457} INFO - iteration 128, current learner fbprophet\n", + "INFO:flaml.automl:iteration 128, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:59:15] {1614} INFO - at 170.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 170.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:15] {1457} INFO - iteration 129, current learner fbprophet\n", + "INFO:flaml.automl:iteration 129, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:59:17] {1614} INFO - at 171.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 171.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:17] {1457} INFO - iteration 130, current learner sarimax\n", "INFO:flaml.automl:iteration 130, current learner sarimax\n", - "[flaml.automl: 08-20 19:23:49] {1500} INFO - at 263.7s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 263.7s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:49] {1347} INFO - iteration 131, current learner fbprophet\n", + "[flaml.automl: 08-23 15:59:18] {1614} INFO - at 172.6s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 172.6s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:18] {1457} INFO - iteration 131, current learner fbprophet\n", "INFO:flaml.automl:iteration 131, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:23:51] {1500} INFO - at 265.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 265.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:51] {1347} INFO - iteration 132, current learner fbprophet\n", + "[flaml.automl: 08-23 15:59:19] {1614} INFO - at 174.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 174.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:19] {1457} INFO - iteration 132, current learner fbprophet\n", "INFO:flaml.automl:iteration 132, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:23:54] {1500} INFO - at 268.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 268.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:54] {1347} INFO - iteration 133, current learner arima\n", - "INFO:flaml.automl:iteration 133, current learner arima\n", - "[flaml.automl: 08-20 19:23:54] {1500} INFO - at 268.8s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 268.8s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:54] {1347} INFO - iteration 134, current learner sarimax\n", + "[flaml.automl: 08-23 15:59:21] {1614} INFO - at 176.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 176.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:21] {1457} INFO - iteration 133, current learner fbprophet\n", + "INFO:flaml.automl:iteration 133, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:59:23] {1614} INFO - at 177.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 177.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:23] {1457} INFO - iteration 134, current learner sarimax\n", "INFO:flaml.automl:iteration 134, current learner sarimax\n", - "[flaml.automl: 08-20 19:23:56] {1500} INFO - at 271.0s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 271.0s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:56] {1347} INFO - iteration 135, current learner arima\n", - "INFO:flaml.automl:iteration 135, current learner arima\n", - "[flaml.automl: 08-20 19:23:57] {1500} INFO - at 271.6s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 271.6s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:23:57] {1347} INFO - iteration 136, current learner fbprophet\n", + "[flaml.automl: 08-23 15:59:24] {1614} INFO - at 179.2s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 179.2s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:24] {1457} INFO - iteration 135, current learner fbprophet\n", + "INFO:flaml.automl:iteration 135, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:59:26] {1614} INFO - at 181.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 181.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:26] {1457} INFO - iteration 136, current learner fbprophet\n", "INFO:flaml.automl:iteration 136, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:24:00] {1500} INFO - at 274.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 274.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:24:00] {1347} INFO - iteration 137, current learner sarimax\n", + "[flaml.automl: 08-23 15:59:28] {1614} INFO - at 183.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 183.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:28] {1457} INFO - iteration 137, current learner sarimax\n", "INFO:flaml.automl:iteration 137, current learner sarimax\n", - "[flaml.automl: 08-20 19:24:02] {1500} INFO - at 276.1s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 276.1s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:24:02] {1347} INFO - iteration 138, current learner fbprophet\n", + "[flaml.automl: 08-23 15:59:30] {1614} INFO - at 184.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 184.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:30] {1457} INFO - iteration 138, current learner fbprophet\n", "INFO:flaml.automl:iteration 138, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:24:04] {1500} INFO - at 279.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 279.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:24:04] {1347} INFO - iteration 139, current learner sarimax\n", + "[flaml.automl: 08-23 15:59:32] {1614} INFO - at 186.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 186.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:32] {1457} INFO - iteration 139, current learner sarimax\n", "INFO:flaml.automl:iteration 139, current learner sarimax\n", - "[flaml.automl: 08-20 19:24:07] {1500} INFO - at 281.2s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 281.2s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:24:07] {1347} INFO - iteration 140, current learner fbprophet\n", + "[flaml.automl: 08-23 15:59:33] {1614} INFO - at 188.1s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 188.1s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:33] {1457} INFO - iteration 140, current learner fbprophet\n", "INFO:flaml.automl:iteration 140, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:24:10] {1500} INFO - at 284.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 284.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:24:10] {1347} INFO - iteration 141, current learner sarimax\n", + "[flaml.automl: 08-23 15:59:36] {1614} INFO - at 190.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 190.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:36] {1457} INFO - iteration 141, current learner sarimax\n", "INFO:flaml.automl:iteration 141, current learner sarimax\n", - "[flaml.automl: 08-20 19:24:11] {1500} INFO - at 285.5s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 285.5s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:24:11] {1347} INFO - iteration 142, current learner sarimax\n", + "[flaml.automl: 08-23 15:59:37] {1614} INFO - at 191.7s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 191.7s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:37] {1457} INFO - iteration 142, current learner sarimax\n", "INFO:flaml.automl:iteration 142, current learner sarimax\n", - "[flaml.automl: 08-20 19:24:14] {1500} INFO - at 288.3s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 288.3s,\tbest sarimax's error=0.0008,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:24:14] {1347} INFO - iteration 143, current learner arima\n", - "INFO:flaml.automl:iteration 143, current learner arima\n", - "[flaml.automl: 08-20 19:24:14] {1500} INFO - at 288.9s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 288.9s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:24:14] {1347} INFO - iteration 144, current learner fbprophet\n", + "[flaml.automl: 08-23 15:59:38] {1614} INFO - at 193.4s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 193.4s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:38] {1457} INFO - iteration 143, current learner fbprophet\n", + "INFO:flaml.automl:iteration 143, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:59:41] {1614} INFO - at 195.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 195.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:41] {1457} INFO - iteration 144, current learner fbprophet\n", "INFO:flaml.automl:iteration 144, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:24:17] {1500} INFO - at 291.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 291.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "[flaml.automl: 08-20 19:24:17] {1347} INFO - iteration 145, current learner fbprophet\n", + "[flaml.automl: 08-23 15:59:42] {1614} INFO - at 197.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 197.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:42] {1457} INFO - iteration 145, current learner fbprophet\n", "INFO:flaml.automl:iteration 145, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:24:20] {1500} INFO - at 294.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 294.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:44] {1614} INFO - at 199.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 199.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:44] {1457} INFO - iteration 146, current learner fbprophet\n", + "INFO:flaml.automl:iteration 146, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:24:23] {1532} INFO - retrain fbprophet for 2.8s\n", - "INFO:flaml.automl:retrain fbprophet for 2.8s\n", - "[flaml.automl: 08-20 19:24:23] {1347} INFO - iteration 146, current learner arima\n", - "INFO:flaml.automl:iteration 146, current learner arima\n", - "[flaml.automl: 08-20 19:24:23] {1500} INFO - at 298.0s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", - "INFO:flaml.automl: at 298.0s,\tbest arima's error=0.0011,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:46] {1614} INFO - at 200.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 200.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:46] {1457} INFO - iteration 147, current learner fbprophet\n", + "INFO:flaml.automl:iteration 147, current learner fbprophet\n", "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "[flaml.automl: 08-20 19:24:26] {1532} INFO - retrain arima for 2.9s\n", - "INFO:flaml.automl:retrain arima for 2.9s\n", - "[flaml.automl: 08-20 19:24:26] {1556} INFO - selected model: \n", - "INFO:flaml.automl:selected model: \n", - "[flaml.automl: 08-20 19:24:26] {1279} INFO - fit succeeded\n", + "[flaml.automl: 08-23 15:59:47] {1614} INFO - at 202.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 202.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:47] {1457} INFO - iteration 148, current learner fbprophet\n", + "INFO:flaml.automl:iteration 148, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:59:49] {1614} INFO - at 204.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 204.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:49] {1457} INFO - iteration 149, current learner fbprophet\n", + "INFO:flaml.automl:iteration 149, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:59:51] {1614} INFO - at 205.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 205.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:51] {1457} INFO - iteration 150, current learner sarimax\n", + "INFO:flaml.automl:iteration 150, current learner sarimax\n", + "[flaml.automl: 08-23 15:59:52] {1614} INFO - at 206.8s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 206.8s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:52] {1457} INFO - iteration 151, current learner fbprophet\n", + "INFO:flaml.automl:iteration 151, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:59:54] {1614} INFO - at 208.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 208.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:54] {1457} INFO - iteration 152, current learner fbprophet\n", + "INFO:flaml.automl:iteration 152, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:59:55] {1614} INFO - at 210.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 210.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:55] {1457} INFO - iteration 153, current learner fbprophet\n", + "INFO:flaml.automl:iteration 153, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 15:59:57] {1614} INFO - at 211.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 211.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:57] {1457} INFO - iteration 154, current learner sarimax\n", + "INFO:flaml.automl:iteration 154, current learner sarimax\n", + "[flaml.automl: 08-23 15:59:58] {1614} INFO - at 212.8s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 212.8s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 15:59:58] {1457} INFO - iteration 155, current learner fbprophet\n", + "INFO:flaml.automl:iteration 155, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:00] {1614} INFO - at 214.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 214.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:00] {1457} INFO - iteration 156, current learner fbprophet\n", + "INFO:flaml.automl:iteration 156, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:01] {1614} INFO - at 216.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 216.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:01] {1457} INFO - iteration 157, current learner fbprophet\n", + "INFO:flaml.automl:iteration 157, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:03] {1614} INFO - at 218.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 218.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:03] {1457} INFO - iteration 158, current learner sarimax\n", + "INFO:flaml.automl:iteration 158, current learner sarimax\n", + "[flaml.automl: 08-23 16:00:04] {1614} INFO - at 219.2s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 219.2s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:04] {1457} INFO - iteration 159, current learner fbprophet\n", + "INFO:flaml.automl:iteration 159, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:06] {1614} INFO - at 221.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 221.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:06] {1457} INFO - iteration 160, current learner fbprophet\n", + "INFO:flaml.automl:iteration 160, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:08] {1614} INFO - at 222.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 222.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:08] {1457} INFO - iteration 161, current learner sarimax\n", + "INFO:flaml.automl:iteration 161, current learner sarimax\n", + "[flaml.automl: 08-23 16:00:09] {1614} INFO - at 223.6s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 223.6s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:09] {1457} INFO - iteration 162, current learner sarimax\n", + "INFO:flaml.automl:iteration 162, current learner sarimax\n", + "[flaml.automl: 08-23 16:00:11] {1614} INFO - at 225.7s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 225.7s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:11] {1457} INFO - iteration 163, current learner fbprophet\n", + "INFO:flaml.automl:iteration 163, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:12] {1614} INFO - at 227.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 227.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:12] {1457} INFO - iteration 164, current learner fbprophet\n", + "INFO:flaml.automl:iteration 164, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:14] {1614} INFO - at 229.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 229.3s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:14] {1457} INFO - iteration 165, current learner fbprophet\n", + "INFO:flaml.automl:iteration 165, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:16] {1614} INFO - at 231.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 231.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:16] {1457} INFO - iteration 166, current learner sarimax\n", + "INFO:flaml.automl:iteration 166, current learner sarimax\n", + "[flaml.automl: 08-23 16:00:18] {1614} INFO - at 232.6s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 232.6s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:18] {1457} INFO - iteration 167, current learner fbprophet\n", + "INFO:flaml.automl:iteration 167, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:19] {1614} INFO - at 234.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 234.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:19] {1457} INFO - iteration 168, current learner fbprophet\n", + "INFO:flaml.automl:iteration 168, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:21] {1614} INFO - at 235.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 235.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:21] {1457} INFO - iteration 169, current learner fbprophet\n", + "INFO:flaml.automl:iteration 169, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:23] {1614} INFO - at 237.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 237.8s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:23] {1457} INFO - iteration 170, current learner fbprophet\n", + "INFO:flaml.automl:iteration 170, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:25] {1614} INFO - at 239.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 239.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:25] {1457} INFO - iteration 171, current learner fbprophet\n", + "INFO:flaml.automl:iteration 171, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:26] {1614} INFO - at 241.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 241.4s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:26] {1457} INFO - iteration 172, current learner fbprophet\n", + "INFO:flaml.automl:iteration 172, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:28] {1614} INFO - at 243.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 243.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:28] {1457} INFO - iteration 173, current learner fbprophet\n", + "INFO:flaml.automl:iteration 173, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:30] {1614} INFO - at 245.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 245.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:30] {1457} INFO - iteration 174, current learner sarimax\n", + "INFO:flaml.automl:iteration 174, current learner sarimax\n", + "[flaml.automl: 08-23 16:00:31] {1614} INFO - at 246.2s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 246.2s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:31] {1457} INFO - iteration 175, current learner fbprophet\n", + "INFO:flaml.automl:iteration 175, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:33] {1614} INFO - at 248.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 248.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:33] {1457} INFO - iteration 176, current learner fbprophet\n", + "INFO:flaml.automl:iteration 176, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:35] {1614} INFO - at 249.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 249.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:35] {1457} INFO - iteration 177, current learner fbprophet\n", + "INFO:flaml.automl:iteration 177, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:37] {1614} INFO - at 251.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 251.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:37] {1457} INFO - iteration 178, current learner sarimax\n", + "INFO:flaml.automl:iteration 178, current learner sarimax\n", + "[flaml.automl: 08-23 16:00:37] {1614} INFO - at 252.4s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 252.4s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:37] {1457} INFO - iteration 179, current learner sarimax\n", + "INFO:flaml.automl:iteration 179, current learner sarimax\n", + "[flaml.automl: 08-23 16:00:39] {1614} INFO - at 253.9s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 253.9s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:39] {1457} INFO - iteration 180, current learner fbprophet\n", + "INFO:flaml.automl:iteration 180, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:41] {1614} INFO - at 255.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 255.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:41] {1457} INFO - iteration 181, current learner fbprophet\n", + "INFO:flaml.automl:iteration 181, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:42] {1614} INFO - at 257.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 257.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:42] {1457} INFO - iteration 182, current learner fbprophet\n", + "INFO:flaml.automl:iteration 182, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:45] {1614} INFO - at 259.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 259.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:45] {1457} INFO - iteration 183, current learner arima\n", + "INFO:flaml.automl:iteration 183, current learner arima\n", + "[flaml.automl: 08-23 16:00:46] {1614} INFO - at 260.7s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 260.7s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:46] {1457} INFO - iteration 184, current learner fbprophet\n", + "INFO:flaml.automl:iteration 184, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:48] {1614} INFO - at 262.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 262.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:48] {1457} INFO - iteration 185, current learner sarimax\n", + "INFO:flaml.automl:iteration 185, current learner sarimax\n", + "[flaml.automl: 08-23 16:00:49] {1614} INFO - at 264.0s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 264.0s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:49] {1457} INFO - iteration 186, current learner fbprophet\n", + "INFO:flaml.automl:iteration 186, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:51] {1614} INFO - at 265.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 265.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:51] {1457} INFO - iteration 187, current learner fbprophet\n", + "INFO:flaml.automl:iteration 187, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:53] {1614} INFO - at 267.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 267.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:53] {1457} INFO - iteration 188, current learner sarimax\n", + "INFO:flaml.automl:iteration 188, current learner sarimax\n", + "[flaml.automl: 08-23 16:00:54] {1614} INFO - at 268.6s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 268.6s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:54] {1457} INFO - iteration 189, current learner fbprophet\n", + "INFO:flaml.automl:iteration 189, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:56] {1614} INFO - at 270.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 270.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:56] {1457} INFO - iteration 190, current learner arima\n", + "INFO:flaml.automl:iteration 190, current learner arima\n", + "[flaml.automl: 08-23 16:00:57] {1614} INFO - at 271.7s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 271.7s,\tbest arima's error=0.0022,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:57] {1457} INFO - iteration 191, current learner fbprophet\n", + "INFO:flaml.automl:iteration 191, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:00:59] {1614} INFO - at 273.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 273.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:00:59] {1457} INFO - iteration 192, current learner sarimax\n", + "INFO:flaml.automl:iteration 192, current learner sarimax\n", + "[flaml.automl: 08-23 16:01:00] {1614} INFO - at 275.1s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 275.1s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:01:00] {1457} INFO - iteration 193, current learner fbprophet\n", + "INFO:flaml.automl:iteration 193, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:01:02] {1614} INFO - at 277.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 277.1s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:01:02] {1457} INFO - iteration 194, current learner fbprophet\n", + "INFO:flaml.automl:iteration 194, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:01:04] {1614} INFO - at 278.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 278.9s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:01:04] {1457} INFO - iteration 195, current learner sarimax\n", + "INFO:flaml.automl:iteration 195, current learner sarimax\n", + "[flaml.automl: 08-23 16:01:05] {1614} INFO - at 280.0s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 280.0s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:01:05] {1457} INFO - iteration 196, current learner sarimax\n", + "INFO:flaml.automl:iteration 196, current learner sarimax\n", + "[flaml.automl: 08-23 16:01:07] {1614} INFO - at 281.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 281.5s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:01:07] {1457} INFO - iteration 197, current learner sarimax\n", + "INFO:flaml.automl:iteration 197, current learner sarimax\n", + "[flaml.automl: 08-23 16:01:08] {1614} INFO - at 283.0s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 283.0s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:01:08] {1457} INFO - iteration 198, current learner sarimax\n", + "INFO:flaml.automl:iteration 198, current learner sarimax\n", + "[flaml.automl: 08-23 16:01:08] {1614} INFO - at 283.3s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 283.3s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:01:08] {1457} INFO - iteration 199, current learner fbprophet\n", + "INFO:flaml.automl:iteration 199, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:01:10] {1614} INFO - at 285.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 285.2s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:01:10] {1457} INFO - iteration 200, current learner fbprophet\n", + "INFO:flaml.automl:iteration 200, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:01:12] {1614} INFO - at 287.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 287.0s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:01:12] {1457} INFO - iteration 201, current learner fbprophet\n", + "INFO:flaml.automl:iteration 201, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:01:14] {1614} INFO - at 288.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 288.7s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:01:14] {1457} INFO - iteration 202, current learner sarimax\n", + "INFO:flaml.automl:iteration 202, current learner sarimax\n", + "[flaml.automl: 08-23 16:01:15] {1614} INFO - at 290.2s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 290.2s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:01:15] {1457} INFO - iteration 203, current learner sarimax\n", + "INFO:flaml.automl:iteration 203, current learner sarimax\n", + "[flaml.automl: 08-23 16:01:18] {1614} INFO - at 292.6s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 292.6s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:01:18] {1457} INFO - iteration 204, current learner fbprophet\n", + "INFO:flaml.automl:iteration 204, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:01:19] {1614} INFO - at 294.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 294.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:01:19] {1457} INFO - iteration 205, current learner sarimax\n", + "INFO:flaml.automl:iteration 205, current learner sarimax\n", + "[flaml.automl: 08-23 16:01:21] {1614} INFO - at 295.8s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 295.8s,\tbest sarimax's error=0.0007,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:01:21] {1457} INFO - iteration 206, current learner fbprophet\n", + "INFO:flaml.automl:iteration 206, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:01:23] {1614} INFO - at 297.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 297.6s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:01:23] {1457} INFO - iteration 207, current learner fbprophet\n", + "INFO:flaml.automl:iteration 207, current learner fbprophet\n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:01:24] {1614} INFO - at 299.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "INFO:flaml.automl: at 299.5s,\tbest fbprophet's error=0.0005,\tbest fbprophet's error=0.0005\n", + "[flaml.automl: 08-23 16:01:24] {1691} INFO - selected model: \n", + "INFO:flaml.automl:selected model: \n", + "INFO:prophet:Disabling weekly seasonality. Run prophet with weekly_seasonality=True to override this.\n", + "INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "[flaml.automl: 08-23 16:01:26] {1732} INFO - retrain fbprophet for 1.0s\n", + "INFO:flaml.automl:retrain fbprophet for 1.0s\n", + "[flaml.automl: 08-23 16:01:26] {1735} INFO - retrained model: \n", + "INFO:flaml.automl:retrained model: \n", + "[flaml.automl: 08-23 16:01:26] {1298} INFO - fit succeeded\n", "INFO:flaml.automl:fit succeeded\n", - "[flaml.automl: 08-20 19:24:26] {1280} INFO - Time taken to find the best model: 73.84539341926575\n", - "INFO:flaml.automl:Time taken to find the best model: 73.84539341926575\n" + "[flaml.automl: 08-23 16:01:26] {1299} INFO - Time taken to find the best model: 278.8971173763275\n", + "INFO:flaml.automl:Time taken to find the best model: 278.8971173763275\n", + "[flaml.automl: 08-23 16:01:26] {1304} WARNING - Time taken to find the best model is 93% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n", + "WARNING:flaml.automl:Time taken to find the best model is 93% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n" ] } ], @@ -878,10 +1230,10 @@ "cell_type": "code", "execution_count": 6, "source": [ - "''' retrieve best config and best learner'''\r\n", - "print('Best ML leaner:', automl.best_estimator)\r\n", - "print('Best hyperparmeter config:', automl.best_config)\r\n", - "print(f'Best mape on validation data: {automl.best_loss}')\r\n", + "''' retrieve best config and best learner'''\n", + "print('Best ML leaner:', automl.best_estimator)\n", + "print('Best hyperparmeter config:', automl.best_config)\n", + "print(f'Best mape on validation data: {automl.best_loss}')\n", "print(f'Training duration of best run: {automl.best_config_train_time}s')" ], "outputs": [ @@ -890,9 +1242,9 @@ "name": "stdout", "text": [ "Best ML leaner: fbprophet\n", - "Best hyperparmeter config: {'changepoint_prior_scale': 0.02876449933617924, 'seasonality_prior_scale': 1.80360430903146, 'holidays_prior_scale': 2.1243991057068654, 'seasonality_mode': 'additive'}\n", - "Best mape on validation data: 0.00047649674701670026\n", - "Training duration of best run: 3.572484016418457s\n" + "Best hyperparmeter config: {'changepoint_prior_scale': 0.03498447027670827, 'seasonality_prior_scale': 2.616244037716704, 'holidays_prior_scale': 5.713876592939503, 'seasonality_mode': 'additive'}\n", + "Best mape on validation data: 0.00047658614467724217\n", + "Training duration of best run: 1.8398802280426025s\n" ] } ], @@ -909,7 +1261,7 @@ "output_type": "stream", "name": "stdout", "text": [ - "\n" + "\n" ] } ], @@ -919,9 +1271,9 @@ "cell_type": "code", "execution_count": 8, "source": [ - "''' pickle and save the automl object '''\r\n", - "import pickle\r\n", - "with open('automl.pkl', 'wb') as f:\r\n", + "''' pickle and save the automl object '''\n", + "import pickle\n", + "with open('automl.pkl', 'wb') as f:\n", " pickle.dump(automl, f, pickle.HIGHEST_PROTOCOL)" ], "outputs": [], @@ -931,9 +1283,9 @@ "cell_type": "code", "execution_count": 9, "source": [ - "''' compute predictions of testing dataset '''\r\n", - "y_pred = automl.predict(X_test)\r\n", - "print('Predicted labels', y_pred)\r\n", + "''' compute predictions of testing dataset '''\n", + "y_pred = automl.predict(X_test)\n", + "print('Predicted labels', y_pred)\n", "print('True labels', y_test)" ], "outputs": [ @@ -941,32 +1293,32 @@ "output_type": "stream", "name": "stdout", "text": [ - "Predicted labels 0 370.182378\n", - "1 370.899874\n", - "2 371.953616\n", - "3 373.138779\n", - "4 373.638453\n", - "5 373.108385\n", - "6 371.764343\n", - "7 369.852285\n", - "8 368.254247\n", - "9 368.322785\n", - "10 369.521407\n", - "11 370.787262\n", + "Predicted labels 0 370.443113\n", + "1 371.170226\n", + "2 372.222488\n", + "3 373.412902\n", + "4 373.907859\n", + "5 373.399315\n", + "6 372.046066\n", + "7 370.141179\n", + "8 368.558253\n", + "9 368.637791\n", + "10 369.854576\n", + "11 371.126664\n", "Name: yhat, dtype: float64\n", - "True labels co2\n", - "514 370.175\n", - "515 371.325\n", - "516 372.060\n", - "517 372.775\n", - "518 373.800\n", - "519 373.060\n", - "520 371.300\n", - "521 369.425\n", - "522 367.880\n", - "523 368.050\n", - "524 369.375\n", - "525 371.020\n" + "True labels 514 370.175\n", + "515 371.325\n", + "516 372.060\n", + "517 372.775\n", + "518 373.800\n", + "519 373.060\n", + "520 371.300\n", + "521 369.425\n", + "522 367.880\n", + "523 368.050\n", + "524 369.375\n", + "525 371.020\n", + "Name: co2, dtype: float64\n" ] } ], @@ -976,8 +1328,8 @@ "cell_type": "code", "execution_count": 10, "source": [ - "''' compute different metric values on testing dataset'''\r\n", - "from flaml.ml import sklearn_metric_loss_score\r\n", + "''' compute different metric values on testing dataset'''\n", + "from flaml.ml import sklearn_metric_loss_score\n", "print('mape', '=', sklearn_metric_loss_score('mape', y_pred, y_test))" ], "outputs": [ @@ -985,7 +1337,7 @@ "output_type": "stream", "name": "stdout", "text": [ - "mape = 0.0006814858864004413\n" + "mape = 0.0011218052996337928\n" ] } ], @@ -1000,13 +1352,13 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 11, "source": [ - "from flaml.data import get_output_from_log\r\n", - "time_history, best_valid_loss_history, valid_loss_history, config_history, train_loss_history = \\\r\n", - " get_output_from_log(filename=settings['log_file_name'], time_budget=300)\r\n", - "\r\n", - "for config in config_history:\r\n", + "from flaml.data import get_output_from_log\n", + "time_history, best_valid_loss_history, valid_loss_history, config_history, train_loss_history = \\\n", + " get_output_from_log(filename=settings['log_file_name'], time_budget=300)\n", + "\n", + "for config in config_history:\n", " print(config)" ], "outputs": [ @@ -1016,9 +1368,10 @@ "text": [ "{'Current Learner': 'fbprophet', 'Current Sample': 502, 'Current Hyper-parameters': {'changepoint_prior_scale': 0.010000000000000002, 'seasonality_prior_scale': 1.0, 'holidays_prior_scale': 1.0, 'seasonality_mode': 'multiplicative'}, 'Best Learner': 'fbprophet', 'Best Hyper-parameters': {'changepoint_prior_scale': 0.010000000000000002, 'seasonality_prior_scale': 1.0, 'holidays_prior_scale': 1.0, 'seasonality_mode': 'multiplicative'}}\n", "{'Current Learner': 'fbprophet', 'Current Sample': 502, 'Current Hyper-parameters': {'changepoint_prior_scale': 0.0091602623296037, 'seasonality_prior_scale': 0.8823866403788657, 'holidays_prior_scale': 3.2294014074557995, 'seasonality_mode': 'additive'}, 'Best Learner': 'fbprophet', 'Best Hyper-parameters': {'changepoint_prior_scale': 0.0091602623296037, 'seasonality_prior_scale': 0.8823866403788657, 'holidays_prior_scale': 3.2294014074557995, 'seasonality_mode': 'additive'}}\n", - "{'Current Learner': 'fbprophet', 'Current Sample': 502, 'Current Hyper-parameters': {'changepoint_prior_scale': 0.010000000000000002, 'seasonality_prior_scale': 1.0, 'holidays_prior_scale': 0.999999999999999, 'seasonality_mode': 'additive'}, 'Best Learner': 'fbprophet', 'Best Hyper-parameters': {'changepoint_prior_scale': 0.010000000000000002, 'seasonality_prior_scale': 1.0, 'holidays_prior_scale': 0.999999999999999, 'seasonality_mode': 'additive'}}\n", - "{'Current Learner': 'fbprophet', 'Current Sample': 502, 'Current Hyper-parameters': {'changepoint_prior_scale': 0.05247335998097256, 'seasonality_prior_scale': 0.987707602743762, 'holidays_prior_scale': 0.5484274380225445, 'seasonality_mode': 'additive'}, 'Best Learner': 'fbprophet', 'Best Hyper-parameters': {'changepoint_prior_scale': 0.05247335998097256, 'seasonality_prior_scale': 0.987707602743762, 'holidays_prior_scale': 0.5484274380225445, 'seasonality_mode': 'additive'}}\n", - "{'Current Learner': 'fbprophet', 'Current Sample': 502, 'Current Hyper-parameters': {'changepoint_prior_scale': 0.02876449933617924, 'seasonality_prior_scale': 1.80360430903146, 'holidays_prior_scale': 2.1243991057068654, 'seasonality_mode': 'additive'}, 'Best Learner': 'fbprophet', 'Best Hyper-parameters': {'changepoint_prior_scale': 0.02876449933617924, 'seasonality_prior_scale': 1.80360430903146, 'holidays_prior_scale': 2.1243991057068654, 'seasonality_mode': 'additive'}}\n" + "{'Current Learner': 'fbprophet', 'Current Sample': 502, 'Current Hyper-parameters': {'changepoint_prior_scale': 0.04806697427414373, 'seasonality_prior_scale': 0.8715399932617315, 'holidays_prior_scale': 1.771092340237384, 'seasonality_mode': 'additive'}, 'Best Learner': 'fbprophet', 'Best Hyper-parameters': {'changepoint_prior_scale': 0.04806697427414373, 'seasonality_prior_scale': 0.8715399932617315, 'holidays_prior_scale': 1.771092340237384, 'seasonality_mode': 'additive'}}\n", + "{'Current Learner': 'fbprophet', 'Current Sample': 502, 'Current Hyper-parameters': {'changepoint_prior_scale': 0.026349035969911335, 'seasonality_prior_scale': 1.5914763468191142, 'holidays_prior_scale': 6.860537461967591, 'seasonality_mode': 'additive'}, 'Best Learner': 'fbprophet', 'Best Hyper-parameters': {'changepoint_prior_scale': 0.026349035969911335, 'seasonality_prior_scale': 1.5914763468191142, 'holidays_prior_scale': 6.860537461967591, 'seasonality_mode': 'additive'}}\n", + "{'Current Learner': 'fbprophet', 'Current Sample': 502, 'Current Hyper-parameters': {'changepoint_prior_scale': 0.03767818581325282, 'seasonality_prior_scale': 3.031296651199767, 'holidays_prior_scale': 6.715821078742762, 'seasonality_mode': 'additive'}, 'Best Learner': 'fbprophet', 'Best Hyper-parameters': {'changepoint_prior_scale': 0.03767818581325282, 'seasonality_prior_scale': 3.031296651199767, 'holidays_prior_scale': 6.715821078742762, 'seasonality_mode': 'additive'}}\n", + "{'Current Learner': 'fbprophet', 'Current Sample': 502, 'Current Hyper-parameters': {'changepoint_prior_scale': 0.03498447027670827, 'seasonality_prior_scale': 2.616244037716704, 'holidays_prior_scale': 5.713876592939503, 'seasonality_mode': 'additive'}, 'Best Learner': 'fbprophet', 'Best Hyper-parameters': {'changepoint_prior_scale': 0.03498447027670827, 'seasonality_prior_scale': 2.616244037716704, 'holidays_prior_scale': 5.713876592939503, 'seasonality_mode': 'additive'}}\n" ] } ], @@ -1026,16 +1379,16 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 12, "source": [ - "import matplotlib.pyplot as plt\r\n", - "import numpy as np\r\n", - "\r\n", - "plt.title('Learning Curve')\r\n", - "plt.xlabel('Wall Clock Time (s)')\r\n", - "plt.ylabel('Validation Accuracy')\r\n", - "plt.scatter(time_history, 1 - np.array(valid_loss_history))\r\n", - "plt.step(time_history, 1 - np.array(best_valid_loss_history), where='post')\r\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "plt.title('Learning Curve')\n", + "plt.xlabel('Wall Clock Time (s)')\n", + "plt.ylabel('Validation Accuracy')\n", + "plt.scatter(time_history, 1 - np.array(valid_loss_history))\n", + "plt.step(time_history, 1 - np.array(best_valid_loss_history), where='post')\n", "plt.show()" ], "outputs": [ @@ -1045,8 +1398,8 @@ "text/plain": [ "
" ], - "image/svg+xml": "\r\n\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n", - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZsAAAEWCAYAAACwtjr+AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nO3dfbgV1Xn38e9PRMFEBBUtgoBWJCKKRoIxjTbaRNH4gm+JaGMfjaIJWq2VBK0mmlSr9UnyaGMlvpc0GqMCaqsSYyI0FsVDQF5EGnyJAiZilYhAhAP388esreN2n30GOcPZHH6f65rrzKyZNXMP4rlZa9asUURgZmZWpi3aOwAzM+v4nGzMzKx0TjZmZlY6JxszMyudk42ZmZXOycbMzErnZGPWziQdLGlBe8dhViYnG9usSXpZ0ufbM4aI+K+IGFjW+SUdIWmqpOWSlkqaIunYsq5nVouTjVnJJHVqx2ufBNwLjAf6ADsD3wKO+QjnkiT/zrCPxH9xzGqQtIWksZJekPS/kn4mafvc/nsl/V7SH1OrYe/cvjsl3STpYUkrgENTC+piSbNTnXskdUnHf07Solz9Fo9N+78h6TVJSySdJSkk7VHjHgR8H/huRNwaEX+MiHURMSUizk7HXCHp33N1+qfzbZm2n5B0laQngZXApZKaqq7zd5IeTOtbS/q/kl6R9AdJ4yR13cD/HNYBONmY1fa3wAjgL4FdgLeAG3P7HwEGADsBvwF+UlX/VOAqYFvg16nsS8BwYDdgX+D/1Ll+zWMlDQcuAj4P7JHia8lAYFfgvjrHFPEVYBTZvfwLMFDSgNz+U4G70vq1wJ7Afim+3mQtKdvMOdmY1XYO8A8RsSgi3gWuAE6q/Is/Im6PiOW5fUMkbZer/0BEPJlaEn9KZTdExJKIeBN4iOwXcktaOvZLwB0RMS8iVgJX1jnHDunna4XvurY70/WaI+KPwAPASICUdD4BPJhaUmcDfxcRb0bEcuBq4JQNvL51AE42ZrX1AyZKWiZpGTAfWAvsLKmTpGtSF9vbwMupzo65+q/WOOfvc+srgY/XuX5Lx+5Sde5a16n43/SzV51jiqi+xl2kZEPWqpmUEl9PYBtgRu7P7dFUbps5Jxuz2l4FjoyI7rmlS0QsJvsFexxZV9Z2QP9UR7n6ZU2n/hrZg/6KXescu4DsPk6sc8wKsgRR8Wc1jqm+l58DO0rajyzpVLrQ3gBWAXvn/sy2i4h6SdU2E042ZtBZUpfcsiUwDrhKUj8AST0lHZeO3xZ4l6zlsA1ZV9HG8jPgDEl7SdqGOs9DIvt+yEXA5ZLOkNQtDXz4rKSb02GzgEMk9U3dgJe0FkBENJM9B7oO2B54LJWvA24BfiBpJwBJvSUd8ZHv1joMJxszeJjsX+SV5QrgeuBB4OeSlgNPAQem48cDvwMWA8+lfRtFRDwC3AD8ClgITEu73m3h+PuALwNnAkuAPwD/SPbchYh4DLgHmA3MAP6jYCh3kbXs7k3Jp+KbKa6nUhfjL8gGKthmTv54mtmmS9JewFxg66pf+mYNxS0bs02MpOMlbSWpB9lQ44ecaKzROdmYbXrOAZYCL5CNkPta+4Zj1jp3o5mZWencsjEzs9Jt2d4BNKodd9wx+vfv395hmJltUmbMmPFGRHzoRV4nmxb079+fpqam1g80M7P3SPpdrXJ3o5mZWemcbMzMrHRONmZmVjonGzMzK52TjZmZlc6j0czMjEkzF3Pd5AUsWbaKXbp3ZcwRAxmxf+82O7+TjZnZZm7SzMVcMmEOq9asBWDxslVcMmEOQJslHHejmZlt5q6bvOC9RFOxas1arpu8oM2u4WRjZraZW7Js1XqVfxTuRrM2UXZ/r5mVZ5fuXVlcI7Hs0r1rm13DLRvbYJX+3sXLVhG83987aebi9g7NzAoYc8RAunbu9IGyrp07MeaItvvIqls2tsFa6u/9xn2zuXv6K+0UlZmtj126d+HFpSsIoLdHo1kjaqlfd/XadRs5EjP7qHb8+Nbs+PGtOW6/3px6YN82P7+TjW2wlvp7e3fvyj3nHNQOEZlZo/EzG9tgG6O/18w2bW7Z2Aar9Ot+477ZrF67rpT+XjPbtDnZWJsYsX/v9wYDuOvMzKq5G83MzErnZGNmZqVzsjEzs9I52ZiZWemcbMzMrHRONmZmVrpSk42k4ZIWSFooaWyN/T0kTZQ0W9J0SYNz+y6QNFfSPEkX5sqHSJomaY6khyR1S+X9Ja2SNCst43J1DkjHL5R0gySVed9mZvZBpSUbSZ2AG4EjgUHASEmDqg67FJgVEfsCpwPXp7qDgbOBYcAQ4GhJA1KdW4GxEbEPMBEYkzvfCxGxX1rOzZXfBIwCBqRleNvdqZmZtabMls0wYGFEvBgRq4GfAsdVHTMIeBwgIp4H+kvaGdgLeCoiVkZEMzAFOD7VGQhMTeuPASfWC0JSL6BbREyLiADGAyM2+O7MzKywMpNNb+DV3PaiVJb3LHACgKRhQD+gDzAXOETSDpK2AY4Cdk115gLHpvWTc+UAu0maKWmKpINzcSxqJQ5SDKMkNUlqWrp0afE7NTOzuspMNrWei0TV9jVAD0mzgPOBmUBzRMwHriVruTxKlpSaU50zgdGSZgDbAqtT+WtA34jYH7gIuCs9zykSR1YYcXNEDI2IoT179ix4m2Zm1poy50ZbxAdbHX2AJfkDIuJt4AyA9ND+pbQQEbcBt6V9V6fzVbrbDk/lewJfTOXvAu+m9RmSXgD2TPX61IvDzMzKVWbL5hlggKTdJG0FnAI8mD9AUve0D+AsYGpKQEjaKf3sS9bVdndV+RbAZcC4tN0zDUpA0u5kAwFejIjXgOWSPp0S2unAA+XdtpmZVSutZRMRzZLOAyYDnYDbI2KepHPT/nFkAwHGS1oLPAd8NXeK+yXtAKwBRkfEW6l8pKTRaX0CcEdaPwT4jqRmYC1wbkS8mfZ9DbgT6Ao8khYzM9tISv3EQEQ8DDxcVTYutz6NrAVSq+7BLZRfTxoiXVV+P3B/C3WagMG19pmZWfk8g4CZmZXOycbMzErnZGNmZqVzsjEzs9I52ZiZWemcbMzMrHRONmZmVjonGzMzK52TjZmZlc7JxszMSudkY2ZmpXOyMTOz0jnZmJlZ6ZxszMysdE42ZmZWOicbMzMrnZONmZmVzsnGzMxK52RjZmalc7IxM7PSOdmYmVnpnGzMzKx0TjZmZlY6JxszMyudk42ZmZXOycbMzErnZGNmZqVrNdlI2n5jBGJmZh1XkZbN05LulXSUJJUekZmZdThFks2ewM3AV4CFkq6WtGeRk0saLmmBpIWSxtbY30PSREmzJU2XNDi37wJJcyXNk3RhrnyIpGmS5kh6SFK3qnP2lfSOpItzZU+kOGalZaci8ZuZWdtoNdlE5rGIGAmcBfwNMF3SFEkHtVRPUifgRuBIYBAwUtKgqsMuBWZFxL7A6cD1qe5g4GxgGDAEOFrSgFTnVmBsROwDTATGVJ3zB8AjNUI6LSL2S8vrrd23mZm1nSLPbHZIrYwm4GLgfGBH4O+Bu+pUHQYsjIgXI2I18FPguKpjBgGPA0TE80B/STsDewFPRcTKiGgGpgDHpzoDgalp/THgxFysI4AXgXmt3ZeZmW08RbrRpgHdgBER8cWImBARzRHRBIyrU6838Gpue1Eqy3sWOAFA0jCgH9AHmAsckhLdNsBRwK6pzlzg2LR+cqVc0seAbwJXthDPHakL7fKWnj1JGiWpSVLT0qVL69yamZmtjyLJZmBEfDciFlXviIhr69Sr9Qs9qravAXpImkXWYpoJNEfEfOBaspbLo2RJqTnVORMYLWkGsC2wOpVfCfwgIt6pcd3TUrfbwWn5Sq2AI+LmiBgaEUN79uxZ59bMzGx9FEk2P5fUvbKRHupPLlBvEe+3RiBrsSzJHxARb0fEGRGxH9kzm57AS2nfbRHxyYg4BHgT+G0qfz4iDo+IA4C7gRfS6Q4E/lnSy8CFwKWSzkt1Fqefy8m6/oYViN/MzNrIlgWO6RkRyyobEfFWwdFczwADJO0GLAZOAU7NH5CS2Mr0TOcsYGpEvJ327RQRr0vqS9bVdlBV+RbAZaSuvIg4OHfeK4B3IuKHkrYEukfEG5I6A0cDvygQv5mZtZEiyWatpL4R8QqApH58uDvsQyKiObUsJgOdgNsjYp6kc9P+cWQDAcZLWgs8B3w1d4r7Je0ArAFGR8RbqXykpNFpfQJwRyuhbA1MTommE1miuaXVu25Qk2Yu5rrJC1iybBW7dO/KmCMGMmL/6kdhZmaNpUiy+Qfg15KmpO1DgFFFTh4RDwMPV5WNy61PAwZU10v7Dm6h/HrSEOk6170it74COKBIvI1u0szFXDJhDqvWrAVg8bJVXDJhDoATjpk1tFaTTUQ8KumTwKfJHvr/XUS8UXpk9iHXTV7wXqKpWLVmLd+4bzZ3T3+lnaJ633Ovvc2gXt1aP9DMNjtFWjYAa4HXgS7AIElExNRW6lgbW7JsVc3y1WvXbeRIahvUqxvH7ecWlpl9WKvJRtJZwAVko8lmkbVwpgGHlRuaVdule1cW10g4vbt35Z5zWpzMwcys3RUZ+nwB8CngdxFxKLA/4Dce28GYIwbStXOnD5R17dyJMUcMbKeIzMyKKdKN9qeI+JMkJG0dEc9L8m+3dlAZBPCN+2azeu06ens0mpltIookm0XpfZhJwGOS3qLq5UzbeEbs3/u9wQDuOjOzTUWR0WiVCTCvkPQrYDuyKWTMzMwKqZts0lv6syNiMEBETKl3vJmZWS11k01ErJP0bH4GAWs7ng3AzDYXRZ7Z9ALmSZoOrKgURsSxLVex1ng2ADPbnBRJNi19H8Y2wIbMBuA39c1sU1NkgICf05RgQ2YD8Jv6ZrapKTKDwHLen+V5K6AzsCIi/E/rDeDZAMxsc9LqDAIRsW1EdEtLF+BE4Iflh9axeTYAM9ucFJmu5gMiYhKeF22Djdi/N/90wj5s1Sn7T9C7e1f+6YR9PDjAzDqkIt1oJ+Q2twCGUuDjadY6zwZgZpuLIqPRjsmtNwMvA8eVEo2ZmXVIRUajnbExAjEzs46r1Wc2kv4tTcRZ2e4h6fZywzIzs46kyACBfSNiWWUjIt4i+6aNmZlZIUWSzRaSelQ2JG1P8c9Jm5mZFUoa3wP+W9J9ZKPQvgRcVWpUZmbWoRQZIDBeUhPZuzUCToiI50qPzMzMOowi79l8GpgXET9M29tKOjAini49OjMz6xCKPLO5CXgnt70ilZmZmRVSJNkoIt6bMSAi1uEBAmZmth6KJJsXJf2tpM5puQB4sezAzMys4yiSbM4FPgMsBhYBBwJnlxmUmZl1LEVGo70OnFLZltQVOBq4t8S4zMysAyn0iQFJnSQdKWk88BLw5YL1hktaIGmhpLE19veQNFHSbEnTJQ3O7btA0lxJ8yRdmCsfImmapDmSHpLUreqcfSW9I+niXNkB6fiFkm6QpCLxm5lZ26ibbCQdImkc2UzPZwGHA7tHxEmtnVhSJ+BG4EhgEDBS0qCqwy4FZkXEvsDpwPWp7mCyrrphwBDgaEkDUp1bgbERsQ8wERhTdc4fAI9Uld0EjAIGpGV4a/GbmVnbaTHZSFoEXAM8CQyKiBOBVRGxsuC5hwELI+LFiFgN/JQPf5pgEPA4QEQ8D/SXtDOwF/BURKyMiGZgCnB8qjMQmJrWHyP7cmgl5hFkgxfm5cp6Ad0iYloaVTceGFHwHszMrA3Ua9ncD/Qm6zI7RtLHWL+PpvUGXs1tL0plec8CJwBIGgb0A/oAc4FDJO0gaRvgKGDXVGcucGxaP7lSnuL7JnBljTgWtRIH6RyjJDVJalq6dGnB2zQzs9a0mGwi4gKgP/B94FDgf4Cekr4k6eMFzl3ruUh1sroG6CFpFnA+MBNojoj5wLVkLZdHyZJSc6pzJjBa0gxgW2B1Kr8S+EFE5F9ALRpHVhhxc0QMjYihPXv2rHtzZmZWXN3RaKnb6ZfALyV1JnvWMRL4V2DHVs69iPdbI5C1WJZUnf9t4AyA9ND+pbQQEbcBt6V9V6fzVbrbDk/lewJfTKc7EDhJ0j8D3YF1kv5E1kLrUy8OMzMrV+GZACJiDfAQ8FAa/tyaZ4ABknYje0fnFODU/AHpo2wr0zOds4CpKQEhaaeIeF1SX7KutoOqyrcALgPGpfgOzp33CuCd3Hxuy9Mcb0+TDUT4l6L3bWZmG+4jTTsTEasKHNMs6TxgMtAJuD0i5kk6N+0fRzYQYLyktcBzwFdzp7hf0g7AGmB0+mgbZKPaRqf1CcAdBUL+GnAn0JVspFr1aDUzMytRqXOcRcTDwMNVZeNy69PIhiLXqntwC+XXk4ZI17nuFVXbTcDg2kebmVnZCr3UaWZmtiGKfM9mT7IXJ/vlj4+Iw0qMy8zMOpAi3Wj3kj2EvwVYW244ZmbWERVJNs0R4Y+lmZnZR1bkmc1Dkr4uqZek7StL6ZGZmVmHUaRl8zfpZ37CywB2b/twzMysIyryPZvdNkYgZmbWcRUZjdaZ7KXIQ1LRE8CP0owCZmZmrSrSjXYT0JlsPjSAr6Sys8oKyszMOpYiyeZTETEkt/1LSc+WFZCZmXU8RUajrZX055UNSbvj923MzGw9FGnZjAF+JelFsm/D9CN9FsDMzKyIIqPRHpc0gOxzzAKej4h3S4/MzMw6jBaTjaTDIuKXkk6o2vXnkoiICSXHZmZmHUS9ls1fkn2l85ga+4LsWzJmZmatajHZRMS30+p3IuKl/L709U0zM7NCioxGu79G2X1tHYiZmXVc9Z7ZfALYG9iu6rlNN6BL2YGZmVnHUe+ZzUDgaKA7H3xusxw4u8ygzMysY6n3zOYB4AFJB0XEtI0Yk5mZdTBFXuqcKWk0WZfae91nEXFmaVGZmVmHUmSAwI+BPwOOAKYAfci60szMzAopkmz2iIjLgRUR8W/AF4F9yg3LzMw6kiLJpvLdmmWSBgPbAf1Li8jMzDqcIs9sbpbUA7gceBD4OPCtUqMyM7MOpchEnLem1SnA7uWGY2ZmHVG9lzovqlcxIr7f9uGYmVlHVK9ls236ORD4FFkXGmQveE4tMygzM+tY6r3UeSWApJ8Dn4yI5Wn7CuDejRKdmZl1CEVGo/UFVue2V1NwNJqk4ZIWSFooaWyN/T0kTZQ0W9L0NNqtsu8CSXMlzZN0Ya58iKRpkuZIekhSt1Q+TNKstDwr6fhcnSdSHJX9OxWJ38zM2kaR0Wg/BqZLmkj2HZvjgfGtVZLUCbgR+AKwCHhG0oMR8VzusEuBWRFxfJr480bgr1LSORsYRpbcHpX0nxHxW+BW4OKImCLpTLLPVl8OzAWGRkSzpF7As5IeiojmdK3TIqKpwP2amVkba7VlExFXAWcAbwHLgDMi4uoC5x4GLIyIFyNiNfBT4LiqYwYBj6frPA/0l7QzsBfwVESsTMliClmSg+wZUuWZ0WPAian+ylxi6UKWGM3MrAG0mGxy3VPbAy+TtXB+DPwulbWmN/BqbntRKst7FjghXWcY0I9sOpy5wCGSdpC0DXAUsGuqMxc4Nq2fnCtH0oGS5gFzgHNzyQfgjtSFdrkktXDPoyQ1SWpaunRpgVs0M7Mi6rVs7ko/ZwBNuaWy3Zpav9CrWxvXAD0kzQLOB2YCzRExH7iWrOXyKFlSqiSOM4HRkmaQjZh773lSRDwdEXuTjZ67RFJl4tDTImIf4OC0fKVWwBFxc0QMjYihPXv2LHCLZmZWRL3RaEennx/1E9CLyLU6yFosS6qu8TZZFx2ptfFSWoiI24Db0r6r0/kq3W2Hp/I9yeZqq459vqQVwGCgKSIWp/Llku4i6+Jr9bmTmZm1jXovdX6yXsWI+E0r534GGCBpN2AxcApwatU1ugMr0zOds4CpKQEhaaeIeF1SX7KutoOqyrcALgPGpfLdgFfTAIF+ZM92Xpa0JdA9It6Q1Jnsg3C/aCV2MzNrQ/VGo32vzr4ADqt34vRL/zxgMtAJuD0i5kk6N+0fRzYQYLyktcBzwFdzp7hf0g5kE4GOjoi3UvnI9H0dgAnAHWn9s8BYSWuAdcDXU4L5GDA5JZpOZInmlnqxm5lZ21KEB23VMnTo0GhqKn+k9Jd/lH0E9Z5zDir9WmZmZZM0IyKGVpcXec+G9N7LID74pU4/8zAzs0JaTTaSvg18jizZPAwcCfwaP2A3M7OCikxXcxLwV8DvI+IMYAiwdalRmZlZh1Ik2ayKiHVAc3rR83X8XRszM1sPRZ7ZNKUhyreQvdD5DjC91KjMzKxDqfeezQ+BuyLi66lonKRHgW4RMXujRGdmZh1CvZbNb4HvpRmU7wHujohZGycsMzPrSFp8ZhMR10fEQcBfAm+STWQ5X9K30jQxZmZmhRT5xMDvIuLaiNifbLqZ44H5pUdmZmYdRqvJRlJnScdI+gnwCPA/pG/ImJmZFVFvgMAXgJFksypPJ/v42aiIWLGRYjMzsw6i3gCBS8m+aXNxRLy5keIxM7MOqN73bA7dmIGYmVnHVWQGATMzsw3iZGNmZqVzsjEzs9I52ZiZWemcbMzMrHRONmZmVjonGzMzK52TjZmZlc7JxszMSudkY2ZmpXOyMTOz0jnZmJlZ6ZxszMysdE42ZmZWOicbMzMrnZONmZmVrtRkI2m4pAWSFkoaW2N/D0kTJc2WNF3S4Ny+CyTNlTRP0oW58iGSpkmaI+khSd1S+TBJs9LyrKTjc3UOSMcvlHSDJJV532Zm9kGlJRtJnYAbgSOBQcBISYOqDrsUmBUR+wKnA9enuoOBs4FhwBDgaEkDUp1bgbERsQ8wERiTyucCQyNiP2A48CNJlS+R3gSMAgakZXgb366ZmdVRZstmGLAwIl6MiNXAT4Hjqo4ZBDwOEBHPA/0l7QzsBTwVESsjohmYAlRaKgOBqWn9MeDEVL9yLEAXIAAk9QK6RcS0iAhgPDCize/WzMxaVGay6Q28mttelMryngVOgKwbDOgH9CFrpRwiaQdJ2wBHAbumOnOBY9P6yblyJB0oaR4wBzg3JZ/e6dr14qjUHyWpSVLT0qVL1/N2zcysJWUmm1rPRaJq+xqgh6RZwPnATKA5IuYD15K1XB4lS0qVVsuZwGhJM4BtgdXvnTzi6YjYG/gUcImkLgXjqNS/OSKGRsTQnj17FrxNMzNrzZatH/KRLSLX6iBrsSzJHxARbwNnAKSH9i+lhYi4Dbgt7bs6na/S3XZ4Kt8T+GL1hSNivqQVwOBUr0+9OMzMrFxltmyeAQZI2k3SVsApwIP5AyR1T/sAzgKmpgSEpJ3Sz75kXW13V5VvAVwGjEvbu1UGBEjqR/Zs5+WIeA1YLunTKaGdDjxQ3m2bmVm10lo2EdEs6TxgMtAJuD0i5kk6N+0fRzYQYLyktcBzwFdzp7hf0g7AGmB0RLyVykdKGp3WJwB3pPXPAmMlrQHWAV+PiDfSvq8BdwJdgUfSYmZmG4myAVpWbejQodHU1FT6db78o2kA3HPOQaVfy8ysbJJmRMTQ6nLPIGBmZqVzsjEzs9I52ZiZWemcbMzMrHRONmZmVjonGzMzK52TjZmZlc7JxszMSudkY2ZmpXOyMTOz0jnZmJlZ6ZxszMysdE42ZmZWOicbMzMrnZONmZmVzsnGzMxK52RjZmalc7IxM7PSOdmYmVnptmzvADqSSTMXc93kBSxZtopdundlzBEDGbF/7/YOy8ys3TnZtJFJMxdzyYQ5rFqzFoDFy1ZxyYQ5AE44ZrbZc7JpI9dNXvBeoqlYtWYt37hvNndPf6XFes+99jaDenUrOzwzs3blZzZtZMmyVTXLV69dV7feoF7dOG4/t3zMrGNzy6aN7NK9K4trJJze3btyzzkHtUNEZmaNwy2bNjLmiIF07dzpA2VdO3dizBED2ykiM7PG4ZZNG6kMAvBoNDOzD3OyaUMj9u/t5GJmVoO70czMrHRONmZmVrpSk42k4ZIWSFooaWyN/T0kTZQ0W9J0SYNz+y6QNFfSPEkX5sqHSJomaY6khyR1S+VfkDQjlc+QdFiuzhMpjllp2anM+zYzsw8qLdlI6gTcCBwJDAJGShpUddilwKyI2Bc4Hbg+1R0MnA0MA4YAR0sakOrcCoyNiH2AicCYVP4GcEwq/xvgx1XXOi0i9kvL6214q2Zm1ooyWzbDgIUR8WJErAZ+ChxXdcwg4HGAiHge6C9pZ2Av4KmIWBkRzcAU4PhUZyAwNa0/BpyY6s+MiCWpfB7QRdLW5dyamZmtjzJHo/UGXs1tLwIOrDrmWeAE4NeShgH9gD7AXOAqSTsAq4CjgKZUZy5wLPAAcDKwa41rnwjMjIh3c2V3SFoL3A/8Y0REdSVJo4BRafMdSQsK3uvGtCNZK25T4XjL5XjL5XjXX79ahWUmG9Uoq/4Ffw1wvaRZwBxgJtAcEfMlXUvWcnmHLCk1pzpnAjdI+hbwILD6AxeV9gauBQ7PFZ8WEYslbUuWbL4CjP9QcBE3Azev111uZJKaImJoe8dRlOMtl+Mtl+NtO2Umm0V8sNXRB1iSPyAi3gbOAJAk4KW0EBG3AbelfVen81W62w5P5XsCX6ycT1Ifsuc4p0fEC7nrLE4/l0u6i6yL70PJxszMylHmM5tngAGSdpO0FXAKWUvkPZK6p30AZwFTUwKiMmJMUl+yrra7q8q3AC4DxlXOBfwncElEPJm7xpaSdkzrnYGjybrizMxsIymtZRMRzZLOAyYDnYDbI2KepHPT/nFkAwHGp2cpzwFfzZ3i/vTMZg0wOiLeSuUjJY1O6xOAO9L6ecAewOWSLk9lhwMrgMkp0XQCfgHc0vZ3vNE0dDdfDY63XI63XI63jajGc3IzM7M25RkEzMysdE42ZmZWOiebBibpdkmvS5qbK9te0mOSfpt+9mjPGPMk7SrpV5Lmp2mGLkjlDRmzpC5pmqRnU7xXpvKGjBeymTkkzZT0H2m7YWMFkPRymkJqlqSmVNawMadBS/dJej79PT6oUeOVNDA3BdcsSW9LurBR43WyaWx3AsOrysYCj0fEALLZFz4051w7agb+PiL2Aj4NjE5TFDVqzO8Ch0XEEGA/YLikT9O48QJcAMzPbTdyrBWHpmmiKu9/NHLM1wOPRsQnyBZ6I3gAAAZiSURBVKbKmk+DxhsRCypTcAEHACvJXv1oyHiJCC8NvAD9gbm57QVAr7TeC1jQ3jHWif0B4AubQszANsBvyGa5aMh4yd5Vexw4DPiPTeHvA/AysGNVWUPGDHQje89Pm0K8VTEeDjzZyPG6ZbPp2TkiXgNIPxtyBmtJ/YH9gadp4JhTt9Qs4HXgsYho5Hj/H/ANYF2urFFjrQjg52km9spUUI0a8+7AUrKprWZKulXSx2jcePNOIb2LSIPG62RjbU7Sx8mmBbow0ku6jSoi1kbWDdEHGKbcZy4aiaSjgdcjYkZ7x7Ke/iIiPkk2+/toSYe0d0B1bAl8ErgpIvYne0evMbqg6kgvxh8L3NvesdTjZLPp+YOkXgDpZ0N9LiG9PHs/8JOImJCKGzpmgIhYBjxB9oysEeP9C+BYSS+TzaB+mKR/pzFjfU+kmdgj+6zHRLKpoho15kXAotS6BbiPLPk0arwVRwK/iYg/pO2GjNfJZtPzINn3ekg/H2jHWD4gzW93GzA/Ir6f29WQMUvqmaY5QlJX4PPA8zRgvBFxSUT0iYj+ZF0mv4yIv6YBY62Q9DFlk9+SuqMOJ5sqqiFjjojfA69KGpiK/opsZpOGjDdnJO93oUGDxusZBBqYpLuBz5FNG/4H4NvAJOBnQF/gFeDkiHizvWLMk/RZ4L/IZvCuPFe4lOy5TcPFLGlf4N/IpjHaAvhZRHwnTZPUcPFWSPoccHFEHN3IsUranaw1A1kX1V0RcVWDx7wf2QcatwJeJJsoeAsaN95tyD7lsntE/DGVNeSfr5ONmZmVzt1oZmZWOicbMzMrnZONmZmVzsnGzMxK52RjZmalc7KxzZKkH0i6MLc9WdKtue3vSbqoTv07JZ2U1p+QNLTGMZ0lXZNm352bZpg+Mu17Welz5esZ93vXbWH/jWkG4OckrcrNCHySpIcr7xW1JUm9KrNQt7B/K0lTJZX2ZWBrfE42trn6b+AzAJK2IHuXae/c/s8AT27gNb5LNhHi4IgYDBwDbLuB56wrIkan6XeOAl6INCtwRNwXEUelmRLa2kXU+dR6RKwmm0D0yyVc2zYRTja2uXqSlGzIksxcYLmkHpK2BvYCZkr6lqRnUsvk5jRLQqvSy3ZnA+dHxLsAEfGHiPhZjWMvSuefW9XaOl3SbGXf2/lxjXrfTS2dQv8fV1pTkvor+17LremaP5H0eUlPplbYsHT8x5R9U+mZNDHlcS2c+kTg0VRn79SCm5ViH5COmQScViRO65jcrLXNUkQskdQsqS9Z0pkG9AYOAv4IzI6I1ZJ+GBHfAUi/8I8GHipwiT2AV1qbiFTSAWRvqR8ICHha0hRgNfAPZBNZviFp+6p6/wxsB5wRH+3N7D2Ak4FRwDPAqcBnySZ0vBQYka7/y4g4M3W/TZf0i4hYkYtjN+CtSkIFzgWuj4ifpAkiO6XyucCnPkKc1kG4ZWObs0rrppJspuW2/zsdc6ikpyXNIfuOzN61TrQBPgtMjIgVEfEOMAE4OF3rvoh4A6BqupHLge4Rcc5HTDQAL0XEnIhYB8wj+9hWkE011D8dczgwVtknGJ4AupBNgZLXi2xa/oppwKWSvgn0i4hVKf61wOrKXGm2+XGysc1Z5bnNPmT/8n6KrGXzGeBJSV2AfwVOioh9yJ5LdCl47oVA3wK/XFvqlhPZt2BqeQY4oLq1s57eza2vy22v4/0eDwEn5p779I2I/FdCAVaR+zOJiLvIWkergMmSDssduzXwpw2I2TZhTja2OXuSrFvszfRdmzeB7mQJZxrv/xJ9Q9k3elocBVYtIlaSzYB9Q+pOqoza+uuqQ6cCIyRtk2ZGPp5sMtPHgS+lSRWpSiyPAtcA/1lyS2EycH7lOZWk/Wsc8z+83xKqTL75YkTcQDb78L6pfAdgaUSsKTFea2BONrY5m0M2Cu2pqrI/RsQbaeTWLalsElmLYn1cRtbF9Jykuekc+S4nIuI3wJ3AdLLZsW+NiJkRMQ+4Cpgi6Vng+1X17k2xPajs8whl+C7QGZid4v9u9QHp+c0LkvZIRV8G5qaut08A41P5ocDDJcVpmwDP+mxmG0TS8cABEXFZnWMmAJdExIKNF5k1Eo9GM7MNEhETK919taRuxElONJs3t2zMzKx0fmZjZmalc7IxM7PSOdmYmVnpnGzMzKx0TjZmZla6/w8AVWPSwYnroAAAAABJRU5ErkJggg==" + "image/svg+xml": "\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZsAAAEWCAYAAACwtjr+AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nO3df5xWdZ338dfbARFLRQFbRQVcjRpdAnbUrExlS/BHCwoFtpXZbtZm3bWtpNxu6Y23q66WW3fmj1rusDt/pClRWcCKiCWKoyg/LAjNX4MliPirSWD43H+c74UX0zXXHIY5M8w17+fjcT2uc77n1+fLmcf14fs933OOIgIzM7Mi7dbdAZiZWe1zsjEzs8I52ZiZWeGcbMzMrHBONmZmVjgnGzMzK5yTjVk3k3ScpFXdHYdZkZxsrFeT9JSkD3RnDBFxX0SMKGr/ksZJWiTpVUnrJN0r6e+LOp5ZJU42ZgWTVNeNx54M3AbcCBwEvA34GvChDuxLkvybYR3iPxyzCiTtJukCSU9IelHSjyTtV7b8Nkl/kPRyajUcUbbs+5KulXSXpNeBE1ML6jxJy9I2t0raI61/gqTnyrZvc920/CuSnpe0VtI/SQpJh1Wog4BvAJdExPci4uWI2BoR90bEp9M6F0v6f2XbDEv765PmF0q6VNKvgT8B0yQ1tjrOv0iak6b7SbpK0jOS/ijpOkn9d/J0WA1wsjGr7AvAROB44EDgJeCasuW/AA4H9gceAX7YavuPApcCewG/SmUfAcYDw4GRwCerHL/iupLGA18GPgAcBpxQZR8jgIOB26usk8fHgXPI6nIdMELS4WXLPwrclKYvB94OjErxDSFrSVkv52RjVtlngQsj4rmIeAO4GJhc+h9/RMyMiFfLlr1L0j5l2/8kIn6dWhJ/TmXfioi1EbEB+CnZD3Jb2lr3I8D/jYiVEfGndOy2DEzfz+etdBu+n463JSJeBn4CnAmQks47gDmpJXUO8C8RsSEiXgX+HZi6k8e3GuBkY1bZUOBOSRslbQR+A7QAb5NUJ+ny1MX2CvBU2mZQ2fbPVtjnH8qm/wS8tcrx21r3wFb7rnSckhfT9wFV1smj9TFuIiUbslbN7JT4BgN7Ag+X/bv9MpVbL+dkY1bZs8DJETGg7LNHRDSR/cBOIOvK2gcYlrZR2fZFPU79ebIL/SUHV1l3FVk9JlVZ53WyBFHyVxXWaV2X+cBgSaPIkk6pC2090AwcUfZvtk9EVEuq1ks42ZhBX0l7lH36kF2buFTSUABJgyVNSOvvBbxB1nLYk6yrqKv8CDhb0jsl7Ql8ta0VI3t/yJeBr0o6W9LeaeDD+yTdkFZ7FHi/pENSN+D09gKIiM1kI9yuBPYjSz5ExFbgu8DVkvYHkDRE0rgO19ZqhpONGdxF9j/y0udi4JvAHGCepFeBB4Bj0vo3Ak8DTcDjaVmXiIhfAN8C7gHWlB37jTbWvx2YAnwKWAv8EfjfZNddiIj5wK3AMuBh4Gc5Q7mJrGV3W0RsKSs/vxRX6mL8b7KBCtbLyS9PM+u5JL0TWAH0a/Wjb7ZLccvGrIeRdHq6n2Vf4Argp040tqtzsjHreT4DvAA8QTZC7p+7Nxyz9rkbzczMCueWjZmZFa5PdwewKxo0aFAMGzasu8MwM+tRHn744fURUfEmXiebCoYNG0ZjY2P7K5qZ2TaSnm5rmbvRzMyscE42ZmZWOCcbMzMrnJONmZkVzsnGzMwK59FoZj3U7KVNXDl3FWs3NnPggP5MGzeCiaOHdHdY1kMV/ffkZFND/OPTe8xe2sT0O5bTvLkFgKaNzUy/YzmAz7ntsK74e3KyqRH+8eldrpy7atu5Lmne3MJXbl/GzUue6aaorKda+sxGNrVs3a6seXMLV85d5WRj2/OPT+/StLG5YnnrHwyzPNr6u1nbxt9ZRzjZdJGiu7ja+qPwj09t2r1ut4rndsiA/tz6mWO7ISLryd57+YKK/4E5cED/TjuGk00X6IourgMH9K/4x+Ifn9rU+m8KoH/fOqaN80sxbcdNGzei8L8nJ5su0BVdXHv03Y3dBFvL3hjhH5/aVfpPigeEWGfoir8nJ5su0BVdXIPe2g+AZzc0s6llK0P841PzJo4e4vNrnabovycnmy7gLi4z6+38BIEuMG3cCPr3rduuzF1cZtabuGXTBUpN06/cvsxdXGbWKznZdJGJo4dsGwzgrjMz623cjWZmZoVzsjEzs8I52ZiZWeGcbMzMrHBONmZmVjgnGzMzK1yhyUbSeEmrJK2RdEGF5UMl3S1pmaSFkg4qW3aFpBXpM6WsfKykR1L5LEl9UvkJkl6W9Gj6fC1vHGZmVqzCko2kOuAa4GSgHjhTUn2r1a4CboyIkcAM4LK07anAGGAUcAxwnqS9Je0GzAKmRsSRwNPAWWX7uy8iRqXPjB2Iw8zMClRky+ZoYE1EPBkRm4BbgAmt1qkHFqTpe8qW1wOLImJLRLwOLAPGAwOBTRGxOq03H5jUCXGYmVmBikw2Q4Bny+afS2XlHgPOSNOnA3tJGpjKx0vaU9Ig4ETgYGA90EdSQ9pmciovOVbSY5J+IemIHYgDSedIapTUuG7duh2tq5mZVdHdAwTOA46XtBQ4HmgCWiJiHnAXcD9wM7A4lQcwFbha0hLgVaD0ophHgKER8S7g/wCzdySQiLghIhoiomHw4MGdUDUzMyspMtk0sX2r46BUtk1ErI2IMyJiNHBhKtuYvi9N114+CAhYncoXR8RxEXE0sKis/JWIeC1N3wX0Ta2iduMwM7NiFZlsHgIOlzRc0u5kLZI55StIGpQu+gNMB2am8rrUnYakkcBIYF6a3z999wPOB65L838lSWn66FS3F/PEYWZmxSrsqc8RsUXS54G5QB0wMyJWSpoBNEbEHOAE4DJJQdZKOTdt3he4L+WOV4CPRcSWtGyapNPIksm1EVEaYDAZ+GdJW4BmshFrAVSMo6h6m5nZX1L2e2zlGhoaorGxsdP3O+X6xYBfMWBmtUnSwxHRUGlZdw8QMDOzXsDJxszMCudkY2ZmhXOyMTOzwjnZmJlZ4ZxszMyscE42ZmZWOCcbMzMrnJONmZkVzsnGzMwK52RjZmaFc7IxM7PCOdmYmVnhnGzMzKxwTjZmZlY4JxszMyuck42ZmRXOycbMzArnZGNmZoVzsjEzs8I52ZiZWeGcbMzMrHBONmZmVjgnGzMzK5yTjZmZFc7JxszMCudkY2ZmhWs32Uga2BWBmJlZ7crTsnlA0m2STpGkwiMyM7OakyfZvB24Afg48DtJ/y7p7Xl2Lmm8pFWS1ki6oMLyoZLulrRM0kJJB5Utu0LSivSZUlY+VtIjqXyWpD6t9nmUpC2SJpeVtUh6NH3m5IndzMw6T7vJJjLzI+JM4NPAWcASSfdKOrat7STVAdcAJwP1wJmS6lutdhVwY0SMBGYAl6VtTwXGAKOAY4DzJO0taTdgFjA1Io4Enk7xlB/zCmBeq+M0R8So9Pn79upsZmadK9c1G0lflNQInAd8ARgE/CtwU5VNjwbWRMSTEbEJuAWY0GqdemBBmr6nbHk9sCgitkTE68AyYDwwENgUEavTevOBSWX7+wLwY+CF9uplZmZdJ0832mJgb2BiRJwaEXekJNAIXFdluyHAs2Xzz6Wyco8BZ6Tp04G90oCEx4DxkvaUNAg4ETgYWA/0kdSQtpmcypE0JO3j2gqx7CGpUdIDkiZWClbSOWmdxnXr1lWplpmZ7ag+7a/CiIiISgsi4oqdPP55wLclfRJYBDQBLRExT9JRwP3AOrKE1xIRIWkqcLWkfmTdZS1pX/8JnB8RWyuMYxgaEU2SDgUWSFoeEU+0qssNZNemaGhoqFhfMzPrmDwtm3mSBpRmJO0raW6O7ZpIrY7koFS2TUSsjYgzImI0cGEq25i+L03XWD4ICFidyhdHxHERcTRZgip1qTUAt0h6iqzF851SKyYimtL3k8BCYHSO+M3MrJPkSTaDSwkAICJeAvbPsd1DwOGShkvaHZgKbDcSTNKgdNEfYDowM5XXle7vkTQSGEm66C9p//TdDzif1JUXEcMjYlhEDANuBz4XEbNTcuxXOh7wXuDxHPGbmVknydON1iLpkIh4BrLhykC73UwRsUXS54G5QB0wMyJWSpoBNEbEHOAE4DJJQdZKOTdt3he4L3WHvQJ8LCK2pGXTJJ1GliivjYgFVPdO4HpJW9M2l0eEk42ZWRfKk2wuBH4l6V6y7qzjgHPy7Dwi7gLualX2tbLp28laIa23+zPZiLRK+5wGTGvnuJ8sm74f+Js88ZqZWTHaTTYR8UtJY4B3p6IvRcT6YsMyM7NakqdlA9mIrxeAPYB6SUTEouLCMjOzWtJuspH0T8AXyUaTPUrWwlkMjC02NDMzqxV5RqN9ETgKeDoiTiQbNryx+iZmZmZvypNs/pwu2COpX0T8FhhRbFhmZlZL8lyzeS7d1DkbmC/pJbIHYFoFs5c2ceXcVazd2MyBA/ozbdwIJo5u/ZQeM7PeJc9otNPT5MWS7gH2AX5ZaFQ91OylTUy/YznNm7Mn6DRtbGb6HcsBnHDMrFermmzSI/tXRsQ7ACLi3i6Jqoe6cu6qbYmmpHlzC1+5fRk3L3mGx59/hfoD9u6m6MzMuk/VazYR0QKsknRIF8XTo63d2FyxfFPLVgDqD9ibCaPcwjGz3ifPNZt9gZWSlgCvlwr9ErK/dOCA/jRVSDhDBvTn1s+0+Z45M7OalyfZfLXwKGrEtHEjtrtmA9C/bx3Txnnwnpn1bnkGCPg6TU6lQQBfuX0Zm1q2MsSj0czMgHxPEHiVN5/yvDvZE5lfjwhf6a5g4ugh3LzkGQB3nZmZJXlaNnuVppU9838Cbz6U08zMrF15niCwTWRmA+MKisfMzGpQnm60M8pmdyN7/fKfC4vIzMxqTp7RaB8qm94CPEXWlWZmZpZLnms2Z3dFIGZmVrvavWYjaVZ6EGdpfl9JM4sNy8zMakmeAQIjI2Lb+2si4iWyd9qYmZnlkifZ7CZp39KMpP3I/zppMzOzXEnj68BiSbel+Q8DlxYXkpmZ1Zo8AwRulNQIjE1FZ0TE48WGZWZmtSTPfTbvJnunzbfT/N6SjomIBwuPzszMakKeazbXAq+Vzb+WyszMzHLJk2wUEaUHcRIRW/EAATMz2wF5ks2Tkv6HpL7p80XgyaIDMzOz2pEn2XwWeA/QBDwHHAN8usigzMystuQZjfYCMLU0L6k/cBpwW5sbmZmZlcn1igFJdZJOkfQD4PfAlJzbjZe0StIaSRdUWD5U0t2SlklaKOmgsmVXSFqRPlPKysdKeiSVz5LUp9U+j5K0RdLksrKzJP0ufc7KE7uZmXWeqslG0vGSrid70vM/Ah8EDo2IydW2S9vWAdcAJwP1wJmS6lutdhVwY0SMBGYAl6VtTwXGAKPIuu3OS0OudwNmAVMj4kjgaWBb8kjHvAKYV1a2H3BR2s/RwEXlT0QwM7PitZlsJD1H9uP/K6A+IiYBzRHxp5z7PhpYExFPRsQm4Bb+8tUE9cCCNH1P2fJ6YFFEbImI14FlwHhgILApIlan9eYDk8r29wXgx8ALZWXjgPkRsSE9121+2peZmXWRai2b24EDybrMPiTpLUBUWb+1IcCzZfPPpbJyjwGll7OdDuwlaWAqHy9pT0mDgBOBg4H1QB9JDWmbyakcSUPSPlrfA5QnDiSdI6lRUuO6det2oJpmZtaeNpNNRHwJGE72bLQTgFXAYEkfkfTWTjr+ecDxkpYCx5ONeGuJiHnAXcD9wM3A4lQeZIMVrpa0BHgVaEn7+k/g/HQf0A6LiBsioiEiGgYPHrxTlTIzs+1VHY2WftzvAe6R1JesS+pM4DvAoHb23URqdSQHpbLy/a8ltWxSAptUep1BRFxKeuCnpJuA1al8MXBcKj8JeHvaXQNwiyRSbKdI2pKOeUKrOBa2E7uZmXWiXKPRACJic0T8LCL+ge2TSFseAg6XNFzS7mQtkjnlK0galC76A0wHZqbyutSdhqSRwEjSRX9J+6fvfsD5wHUpvuERMSwihpF1AX4uImYDc4GT0kvf9gVOSmVmZtZFOvTYmYhozrHOFkmfJ/thrwNmRsRKSTOAxoiYQ9biuExSAIuAc9PmfYH7UivlFeBjEbElLZsm6TSyRHltRCygiojYIOkSsuQHMCMiNuxAdc3MbCep7LFnljQ0NERjY2OHt59y/WIAbv3MsZ0VkpnZLk/SwxHRUGlZ7m40MzOzjsrzPpu3A9OAoeXrR8TYNjcyMzMrk+eazW1kF+G/y5vDjM3MzHLLk2y2RIRflmZmZh2W55rNTyV9TtIBkvYrfQqPzMzMakaelk3pQZfTysoCOLTzwzEzs1qU5302w7siEDMzq115RqP1Bf4ZeH8qWghcHxGbC4zLzMxqSJ5utGvJ7uj/Tpr/eCr7p6KCMjOz2pIn2RwVEe8qm18g6bGiAjIzs9qTZzRai6S/Ls1IOhTfb2NmZjsgT8tmGtkrBp4ERPYkgbMLjcrMzGpKntFod0s6HBiRilZFxBvFhmVmZrWkzWQjaWxELJB0RqtFh0kiIu4oODYzM6sR1Vo2xwMLgA9VWBaAk42ZmeXSZrKJiIvS5IyI+H35Mkm+0dPMzHLLMxrtxxXKbu/sQMzMrHZVu2bzDuAIYJ9W1232BvYoOjAzM6sd1a7ZjABOAwaw/XWbV4FPFxmUmZnVlmrXbH4C/ETSsRGxuAtjMjOzGpPnps6lks4l61Lb1n0WEZ8qLCozM6speQYI/AD4K2AccC9wEFlXmpmZWS55ks1hEfFV4PWImAWcChxTbFhmZlZL8iSb0ntrNko6EtgH2L+4kMzMrNbkuWZzg6R9ga8Cc4C3Al8rNCozM6speR7E+b00eS9waLHhmJlZLap2U+eXq20YEd/o/HDMzKwWVWvZ7JW+RwBHkXWhQXaD55IigzIzs9pS7abO/wUgaREwJiJeTfMXAz/vkujMzKwm5BmN9jZgU9n8plTWLknjJa2StEbSBRWWD5V0t6RlkhZKOqhs2RWSVqTPlLLysZIeSeWzJPVJ5RPSfh6V1CjpfWXbtKTyRyXNwczMulSe0Wg3Aksk3ZnmJwLfb28jSXXANcAHgeeAhyTNiYjHy1a7CrgxImZJGgtcBnxc0qnAGGAU0A9YKOkXwGvALODvImK1pBnAWcB/AXcDcyIiJI0EfgS8Ix2nOSJG5airmZkVoN2WTURcCpwNvJQ+Z0fEZTn2fTSwJiKejIhNwC3AhFbr1JO9oA3gnrLl9cCiiNgSEa8Dy4DxwEBgU0SsTuvNByalOF+LiEjlbyF7wZuZme0C2kw2kvZO3/sBT5E9tuYHwNOprD1DgGfL5p9LZeUeA0qvLzgd2EvSwFQ+XtKekgYBJwIHA+uBPpIa0jaTU3kp5tMl/ZbsmlL5s9v2SF1rD0ia2EZ9z0nrNK5bty5H9czMLK9q3Wg3kb1i4GG2byUozXfGPTfnAd+W9ElgEdAEtETEPElHAfcD64DFqTwkTQWultQPmAe0lHYWEXcCd0p6P3AJ8IG0aGhENEk6FFggaXlEPFEeSETcANwA0NDQ4FaRmVknqjYa7bT03dFXQDdR1uoge4BnU6tjrCW1bCS9FZgUERvTskuBS9Oym4DVqXwxcFwqPwl4e4XYF0k6VNKgiFgfEU2p/ElJC4HRwBOttzMzs2JUu6lzTLUNI+KRdvb9EHC4pOFkSWYq8NFWxxgEbIiIrcB0YGYqrwMGRMSL6WL/SLJWDJL2j4gXUsvmfN5MSIcBT6TWzxiygQUvpkft/Cki3kjHey/wH+3EbmZmnahaN9rXqywLYGy1HUfEFkmfB+YCdcDMiFiZRpA1RsQc4ATgMklB1o12btq8L3CfJIBXgI9FxJa0bJqk08iuN10bEaUBBpOAT0jaDDQDU1LieSdwvaStaZvLW42IMzOzgunNAVxW0tDQEI2NjR3efsr12YtNb/3MsZ0VkpnZLk/SwxHRUGlZnvtsSK8WqGf7N3Xe2DnhmZlZrWs32Ui6iKy7qx64CzgZ+BXZzZ5mZmbtyvO4msnA3wF/iIizgXeRvUDNzMwslzzJpjmNFtuSbvR8ge2HNJuZmVWV55pNo6QBwHfJbvB8jewmSzMzs1yq3WdzDXBTRHwuFV0n6ZfA3hGxrEuiMzOzmlCtZbMauErSAWRPUL45IpZ2TVhmZlZL2rxmExHfjIhjgeOBF4GZkn4r6SJJf/GIGDMzs7bkecXA0xFxRUSMBs4ke5/NbwqPzMzMaka7yUZSH0kfkvRD4BfAKt58LYCZmVm7qg0Q+CBZS+YUYAnZy8/OSS8zMzMzy63aAIHpZO+0+deIeKmL4jEzsxpU7X02VZ/qbGZmlleeJwiYmZntFCcbMzMrnJONmZkVzsnGzMwK52RjZmaFc7IxM7PCOdmYmVnhnGzMzKxwTjZmZlY4JxszMyuck42ZmRXOycbMzArnZGNmZoVzsjEzs8I52ZiZWeGcbMzMrHCFJhtJ4yWtkrRG0gUVlg+VdLekZZIWSjqobNkVklakz5Sy8rGSHknlsyT1SeUT0n4eldQo6X1l25wl6Xfpc1aRdTYzs79UWLKRVAdcA5wM1ANnSqpvtdpVwI0RMRKYAVyWtj0VGAOMAo4BzpO0t6TdgFnA1Ig4EngaKCWPu4F3RcQo4FPA99K+9gMuSvs5GrhI0r7F1NrMzCopsmVzNLAmIp6MiE3ALcCEVuvUAwvS9D1ly+uBRRGxJSJeB5YB44GBwKaIWJ3Wmw9MAoiI1yIiUvlbgNL0OGB+RGyIiJfSNuM7sZ5mZtaOIpPNEODZsvnnUlm5x4Az0vTpwF6SBqby8ZL2lDQIOBE4GFgP9JHUkLaZnMoBkHS6pN8CPydr3eSNA0nnpO63xnXr1u1wZc3MrG3dPUDgPOB4SUuB44EmoCUi5gF3AfcDNwOLU3kAU4GrJS0BXgVaSjuLiDsj4h3AROCSHQkkIm6IiIaIaBg8eHAnVM3MzEqKTDZNlLU6gINS2TYRsTYizoiI0cCFqWxj+r40IkZFxAcBAatT+eKIOC4ijgYWlcpb7XcRcGhqFbUbh5mZFavIZPMQcLik4ZJ2J2uRzClfQdKgdNEfYDowM5XXpe40JI0ERgLz0vz+6bsfcD5wXZo/TJLS9BigH/AiMBc4SdK+aWDASanMzMy6SJ+idhwRWyR9nuyHvQ6YGRErJc0AGiNiDnACcJmkIGulnJs27wvcl3LHK8DHImJLWjZN0mlkifLaiCgNMJgEfELSZqAZmJK63TZIuoQs+QHMiIgNRdXbzMz+kt4cwGUlDQ0N0djY2OHtp1y/GIBbP3NsZ4VkZrbLk/RwRDRUWtbdAwTMzKwXcLIxM7PCOdmYmVnhnGzMzKxwTjZmZlY4JxszMyuck42ZmRXOycbMzArnZGNmZoVzsjEzs8I52ZiZWeGcbMzMrHBONmZmVjgnGzMzK5yTjZmZFc7JxszMCudkY2ZmhXOyMTOzwjnZmJlZ4ZxsOtHspU289/IFPPj7DSx9ZiOzlzZ1d0hmZruEPt0dQK2YvbSJ6Xcsp3lzCwCbWrYy/Y7lAEwcPaQ7QzMz63Zu2XSSK+eu2pZoSpo3t3Dl3FXdFJGZ2a7DyaaTrN3YvEPlZma9iZNNJzlwQP8dKjcz602cbDrJtHEj6N+3bruy/n3rmDZuRDdFZGa26/AAgU5SGgRw5dxVrN3YzIED+jNt3AgPDjAzw8mmU00cPcTJxcysAnejmZlZ4ZxszMyscE42ZmZWOCcbMzMrnJONmZkVThHR3THsciStA57OseogYH3B4XSnWq5fLdcNart+tVw36Nn1GxoRgystcLLZCZIaI6Khu+MoSi3Xr5brBrVdv1quG9Ru/dyNZmZmhXOyMTOzwjnZ7JwbujuAgtVy/Wq5blDb9avlukGN1s/XbMzMrHBu2ZiZWeGcbMzMrHBONh0kabykVZLWSLqgu+PZWZKekrRc0qOSGlPZfpLmS/pd+t63u+PMS9JMSS9IWlFWVrE+ynwrnctlksZ0X+T5tFG/iyU1pXP4qKRTypZNT/VbJWlc90Sdj6SDJd0j6XFJKyV9MZX3+PNXpW41ce6qigh/dvAD1AFPAIcCuwOPAfXdHddO1ukpYFCrsv8ALkjTFwBXdHecO1Cf9wNjgBXt1Qc4BfgFIODdwIPdHX8H63cxcF6FdevT32g/YHj6263r7jpUqdsBwJg0vRewOtWhx5+/KnWriXNX7eOWTcccDayJiCcjYhNwCzChm2MqwgRgVpqeBUzsxlh2SEQsAja0Km6rPhOAGyPzADBA0gFdE2nHtFG/tkwAbomINyLi98Aasr/hXVJEPB8Rj6TpV4HfAEOogfNXpW5t6VHnrhonm44ZAjxbNv8c1f9geoIA5kl6WNI5qextEfF8mv4D8LbuCa3TtFWfWjqfn09dSTPLuj17bP0kDQNGAw9SY+evVd2gxs5da042VvK+iBgDnAycK+n95Qsja9PXzDj5WqtPci3w18Ao4Hng690bzs6R9Fbgx8CXIuKV8mU9/fxVqFtNnbtKnGw6pgk4uGz+oFTWY0VEU/p+AbiTrKn+x1J3RPp+ofsi7BRt1acmzmdE/DEiWiJiK/Bd3uxu6XH1k9SX7Mf4hxFxRyquifNXqW61dO7a4mTTMQ8Bh0saLml3YCowp5tj6jBJb5G0V2kaOAlYQVans9JqZwE/6Z4IO01b9ZkDfCKNano38HJZd02P0eo6xelk5xCy+k2V1E/ScOBwYElXx5eXJAH/BfwmIr5RtqjHn7+26lYr566q7h6h0FM/ZCNgVpONDrmwu+PZybocSjbi5TFgZak+wEDgbuB3wH8D+3V3rDtQp5vJuiM2k/Vz/2Nb9SEbxXRNOpfLgYbujr+D9ftBin8Z2Y/UAWXrX5jqtwo4ubvjb6du7yPrIlsGPJo+p9TC+atSt5o4d9U+flyNmZkVzt1oZmZWOCcbMzMrnJONmZkVzsnGzMwK52RjZmaFc7KxXkfS1ZK+VDY/V9L3yua/LunLVbb/vqTJaXqhpIYK6/SVdHl6QvEjkhZLOjkte0rSoA7Eve24bSy/Jj0x+HFJzWVPEJ4s6S5JA3b0mCunq+EAAAOoSURBVDliOkDSz6os313SIkl9OvvY1rM42Vhv9GvgPQCSdgMGAUeULX8PcP9OHuMSsif8HhnZY4Amkj3ltzARcW5EjCK7b+OJiBiVPrdHxCkRsbGAw36Z7I73tmLaRHZvzJQCjm09iJON9Ub3A8em6SPI7tZ+VdK+kvoB7wQekfQ1SQ9JWiHphnT3d7sk7Ql8GvhCRLwB2x5H8qMK63457X9Fq9bWJ9JDGR+T9IMK212SWjp1OWN6StIgScMk/TZtu1rSDyV9QNKvUyvs6LT+W9IDIZdIWiqpraeaTwJ+mbY5Iq3/aIr98LTObOAf8sRptctNW+t1ImKtpC2SDiFrxSwme5LuscDLwPKI2CTp2xExAyD94J8G/DTHIQ4DnolWD49sTdLfAmcDx5DdBf+gpHuBTcC/Ae+JiPWS9mu13ZVkraSzo2N3ZR8GfBj4FNmjlz5Kdmf73wP/k6wVdiGwICI+lbrflkj674h4vSyO4cBLpYQKfBb4ZkT8MD3GqZQIVwBHdSBOqyFu2VhvdT9Zoiklm8Vl879O65wo6UFJy4GxbN/V1hneB9wZEa9HxGvAHcBx6Vi3RcR6gIgof2/NV4F9IuKzHUw0AL+PiOWRPfRxJXB32tdyYFha5yTgAkmPAguBPYBDWu3nAGBd2fxi4H9KOh8YGhHNKf4WYFPp+XvWOznZWG9Vum7zN2T/836ArGXzHuB+SXsA3wEmR8TfkF2X2CPnvtcAh0jau9Ojzloif9u6tbOD3iib3lo2v5U3ezsETCq77nNIRPym1X6aKfs3iYibyFpHzcBdksaWrdsP+PNOxGw9nJON9Vb3k3WLbYjs0e4bgAFkCed+3vwRXa/s3SNtjgJrLSL+RPZk32+m7iQkDZb04Var3gdMlLRnetr26alsAfBhSQPTtuWJ5ZfA5cDPC24pzAW+ULpOJWl0hXVW82ZLCEmHAk9GxLfInsg8MpUPBNZHxOYC47VdnJON9VbLyUahPdCq7OWIWJ9Gbn2XrNUzl6xFsSP+jayL6XFJK4CfAa1fAPYI8H2yR8Y/CHwvIpZGxErgUuBeSY8B32i13W0ptjmS+u9gXHldAvQFlklamea3k67fPCHpsFT0EWBF6no7ErgxlZ8I/LygOK2H8FOfzazDJJ0O/G1E/FuVde4ALoiI1V0Xme1qPBrNzDosIu4sdfdVkroRZzvRmFs2ZmZWOF+zMTOzwjnZmJlZ4ZxszMyscE42ZmZWOCcbMzMr3P8HvnuBroIQDx4AAAAASUVORK5CYII=" }, "metadata": { "needs_background": "light" @@ -1066,11 +1419,11 @@ "metadata": { "kernelspec": { "name": "python3", - "display_name": "Python 3.8.10 64-bit ('python38': conda)" + "display_name": "Python 3.8.0 64-bit ('blend': conda)" }, "language_info": { "name": "python", - "version": "3.8.10", + "version": "3.8.0", "mimetype": "text/x-python", "codemirror_mode": { "name": "ipython", @@ -1081,7 +1434,7 @@ "file_extension": ".py" }, "interpreter": { - "hash": "8b6c8c3ba4bafbc4530f534c605c8412f25bf61ef13254e4f377ccd42b838aa4" + "hash": "0cfea3304185a9579d09e0953576b57c8581e46e6ebc6dfeb681bc5a511f7544" } }, "nbformat": 4, diff --git a/notebook/flaml_lightgbm.ipynb b/notebook/flaml_lightgbm.ipynb index 43db1efe5c..afeabfbd82 100644 --- a/notebook/flaml_lightgbm.ipynb +++ b/notebook/flaml_lightgbm.ipynb @@ -2,11 +2,6 @@ "cells": [ { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, "source": [ "Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved. \n", "\n", @@ -31,44 +26,47 @@ "```bash\n", "pip install flaml[notebook]\n", "```" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "!pip install flaml[notebook];" - ] - }, - { - "cell_type": "markdown", + ], "metadata": { "slideshow": { "slide_type": "slide" } - }, + } + }, + { + "cell_type": "code", + "execution_count": 1, + "source": [ + "!pip install flaml[notebook];" + ], + "outputs": [], + "metadata": {} + }, + { + "cell_type": "markdown", "source": [ "## 2. Regression Example\n", "### Load data and preprocess\n", "\n", "Download [houses dataset](https://www.openml.org/d/537) from OpenML. The task is to predict median price of the house in the region based on demographic composition and a state of housing market in the region." - ] + ], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } }, { "cell_type": "code", - "execution_count": 2, - "metadata": { - "slideshow": { - "slide_type": "subslide" - }, - "tags": [] - }, + "execution_count": 1, + "source": [ + "from flaml.data import load_openml_dataset\n", + "X_train, X_test, y_train, y_test = load_openml_dataset(dataset_id=537, data_dir='./')" + ], "outputs": [ { - "name": "stdout", "output_type": "stream", + "name": "stdout", "text": [ "load dataset from ./openml_ds537.pkl\n", "Dataset name: houses\n", @@ -77,48 +75,44 @@ ] } ], - "source": [ - "from flaml.data import load_openml_dataset\n", - "X_train, X_test, y_train, y_test = load_openml_dataset(dataset_id=537, data_dir='./')" - ] + "metadata": { + "slideshow": { + "slide_type": "subslide" + }, + "tags": [] + } }, { "cell_type": "markdown", + "source": [ + "### Run FLAML\n", + "In the FLAML automl run configuration, users can specify the task type, time budget, error metric, learner list, whether to subsample, resampling strategy type, and so on. All these arguments have default values which will be used if users do not provide them. " + ], "metadata": { "slideshow": { "slide_type": "slide" } - }, - "source": [ - "### Run FLAML\n", - "In the FLAML automl run configuration, users can specify the task type, time budget, error metric, learner list, whether to subsample, resampling strategy type, and so on. All these arguments have default values which will be used if users do not provide them. " - ] + } }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 2, + "source": [ + "''' import AutoML class from flaml package '''\n", + "from flaml import AutoML\n", + "automl = AutoML()" + ], + "outputs": [], "metadata": { "slideshow": { "slide_type": "slide" }, "tags": [] - }, - "outputs": [], - "source": [ - "''' import AutoML class from flaml package '''\n", - "from flaml import AutoML\n", - "automl = AutoML()" - ] + } }, { "cell_type": "code", - "execution_count": 4, - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "outputs": [], + "execution_count": 3, "source": [ "settings = {\n", " \"time_budget\": 240, # total running time in seconds\n", @@ -128,240 +122,274 @@ " \"log_file_name\": 'houses_experiment.log', # flaml log file\n", " \"seed\": 7654321, # random seed\n", "}" - ] + ], + "outputs": [], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } + }, + { + "cell_type": "code", + "execution_count": 4, + "source": [ + "'''The main flaml automl API'''\n", + "automl.fit(X_train=X_train, y_train=y_train, **settings)" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "[flaml.automl: 08-22 21:09:17] {1130} INFO - Evaluation method: cv\n", + "[flaml.automl: 08-22 21:09:17] {634} INFO - Using RepeatedKFold\n", + "[flaml.automl: 08-22 21:09:17] {1155} INFO - Minimizing error metric: 1-r2\n", + "[flaml.automl: 08-22 21:09:17] {1175} INFO - List of ML learners in AutoML Run: ['lgbm']\n", + "[flaml.automl: 08-22 21:09:17] {1358} INFO - iteration 0, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:18] {1515} INFO - at 0.5s,\tbest lgbm's error=0.7385,\tbest lgbm's error=0.7385\n", + "[flaml.automl: 08-22 21:09:18] {1358} INFO - iteration 1, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:18] {1515} INFO - at 0.7s,\tbest lgbm's error=0.7385,\tbest lgbm's error=0.7385\n", + "[flaml.automl: 08-22 21:09:18] {1358} INFO - iteration 2, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:18] {1515} INFO - at 0.8s,\tbest lgbm's error=0.5517,\tbest lgbm's error=0.5517\n", + "[flaml.automl: 08-22 21:09:18] {1358} INFO - iteration 3, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:18] {1515} INFO - at 1.0s,\tbest lgbm's error=0.3103,\tbest lgbm's error=0.3103\n", + "[flaml.automl: 08-22 21:09:18] {1358} INFO - iteration 4, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:19] {1515} INFO - at 1.1s,\tbest lgbm's error=0.3103,\tbest lgbm's error=0.3103\n", + "[flaml.automl: 08-22 21:09:19] {1358} INFO - iteration 5, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:19] {1515} INFO - at 1.4s,\tbest lgbm's error=0.2718,\tbest lgbm's error=0.2718\n", + "[flaml.automl: 08-22 21:09:19] {1358} INFO - iteration 6, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:19] {1515} INFO - at 1.6s,\tbest lgbm's error=0.2718,\tbest lgbm's error=0.2718\n", + "[flaml.automl: 08-22 21:09:19] {1358} INFO - iteration 7, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:19] {1515} INFO - at 1.8s,\tbest lgbm's error=0.2718,\tbest lgbm's error=0.2718\n", + "[flaml.automl: 08-22 21:09:19] {1358} INFO - iteration 8, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:19] {1515} INFO - at 2.0s,\tbest lgbm's error=0.2406,\tbest lgbm's error=0.2406\n", + "[flaml.automl: 08-22 21:09:19] {1358} INFO - iteration 9, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:20] {1515} INFO - at 2.2s,\tbest lgbm's error=0.2406,\tbest lgbm's error=0.2406\n", + "[flaml.automl: 08-22 21:09:20] {1358} INFO - iteration 10, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:20] {1515} INFO - at 2.8s,\tbest lgbm's error=0.1787,\tbest lgbm's error=0.1787\n", + "[flaml.automl: 08-22 21:09:20] {1358} INFO - iteration 11, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:21] {1515} INFO - at 3.7s,\tbest lgbm's error=0.1787,\tbest lgbm's error=0.1787\n", + "[flaml.automl: 08-22 21:09:21] {1358} INFO - iteration 12, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:22] {1515} INFO - at 4.3s,\tbest lgbm's error=0.1787,\tbest lgbm's error=0.1787\n", + "[flaml.automl: 08-22 21:09:22] {1358} INFO - iteration 13, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:22] {1515} INFO - at 4.9s,\tbest lgbm's error=0.1787,\tbest lgbm's error=0.1787\n", + "[flaml.automl: 08-22 21:09:22] {1358} INFO - iteration 14, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:23] {1515} INFO - at 5.8s,\tbest lgbm's error=0.1765,\tbest lgbm's error=0.1765\n", + "[flaml.automl: 08-22 21:09:23] {1358} INFO - iteration 15, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:25] {1515} INFO - at 7.6s,\tbest lgbm's error=0.1765,\tbest lgbm's error=0.1765\n", + "[flaml.automl: 08-22 21:09:25] {1358} INFO - iteration 16, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:26] {1515} INFO - at 8.3s,\tbest lgbm's error=0.1765,\tbest lgbm's error=0.1765\n", + "[flaml.automl: 08-22 21:09:26] {1358} INFO - iteration 17, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:28] {1515} INFO - at 10.3s,\tbest lgbm's error=0.1765,\tbest lgbm's error=0.1765\n", + "[flaml.automl: 08-22 21:09:28] {1358} INFO - iteration 18, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:28] {1515} INFO - at 11.0s,\tbest lgbm's error=0.1765,\tbest lgbm's error=0.1765\n", + "[flaml.automl: 08-22 21:09:28] {1358} INFO - iteration 19, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:29] {1515} INFO - at 11.4s,\tbest lgbm's error=0.1765,\tbest lgbm's error=0.1765\n", + "[flaml.automl: 08-22 21:09:29] {1358} INFO - iteration 20, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:31] {1515} INFO - at 13.9s,\tbest lgbm's error=0.1765,\tbest lgbm's error=0.1765\n", + "[flaml.automl: 08-22 21:09:31] {1358} INFO - iteration 21, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:32] {1515} INFO - at 14.9s,\tbest lgbm's error=0.1693,\tbest lgbm's error=0.1693\n", + "[flaml.automl: 08-22 21:09:32] {1358} INFO - iteration 22, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:33] {1515} INFO - at 15.8s,\tbest lgbm's error=0.1693,\tbest lgbm's error=0.1693\n", + "[flaml.automl: 08-22 21:09:33] {1358} INFO - iteration 23, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:34] {1515} INFO - at 16.9s,\tbest lgbm's error=0.1693,\tbest lgbm's error=0.1693\n", + "[flaml.automl: 08-22 21:09:34] {1358} INFO - iteration 24, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:35] {1515} INFO - at 18.1s,\tbest lgbm's error=0.1693,\tbest lgbm's error=0.1693\n", + "[flaml.automl: 08-22 21:09:35] {1358} INFO - iteration 25, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:38] {1515} INFO - at 20.9s,\tbest lgbm's error=0.1685,\tbest lgbm's error=0.1685\n", + "[flaml.automl: 08-22 21:09:38] {1358} INFO - iteration 26, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:39] {1515} INFO - at 22.0s,\tbest lgbm's error=0.1685,\tbest lgbm's error=0.1685\n", + "[flaml.automl: 08-22 21:09:39] {1358} INFO - iteration 27, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:47] {1515} INFO - at 30.0s,\tbest lgbm's error=0.1685,\tbest lgbm's error=0.1685\n", + "[flaml.automl: 08-22 21:09:47] {1358} INFO - iteration 28, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:50] {1515} INFO - at 32.7s,\tbest lgbm's error=0.1685,\tbest lgbm's error=0.1685\n", + "[flaml.automl: 08-22 21:09:50] {1358} INFO - iteration 29, current learner lgbm\n", + "[flaml.automl: 08-22 21:09:51] {1515} INFO - at 33.6s,\tbest lgbm's error=0.1685,\tbest lgbm's error=0.1685\n", + "[flaml.automl: 08-22 21:09:51] {1358} INFO - iteration 30, current learner lgbm\n", + "[flaml.automl: 08-22 21:10:09] {1515} INFO - at 52.0s,\tbest lgbm's error=0.1685,\tbest lgbm's error=0.1685\n", + "[flaml.automl: 08-22 21:10:09] {1358} INFO - iteration 31, current learner lgbm\n", + "[flaml.automl: 08-22 21:10:11] {1515} INFO - at 54.1s,\tbest lgbm's error=0.1685,\tbest lgbm's error=0.1685\n", + "[flaml.automl: 08-22 21:10:11] {1358} INFO - iteration 32, current learner lgbm\n", + "[flaml.automl: 08-22 21:10:17] {1515} INFO - at 59.6s,\tbest lgbm's error=0.1609,\tbest lgbm's error=0.1609\n", + "[flaml.automl: 08-22 21:10:17] {1358} INFO - iteration 33, current learner lgbm\n", + "[flaml.automl: 08-22 21:10:22] {1515} INFO - at 65.1s,\tbest lgbm's error=0.1609,\tbest lgbm's error=0.1609\n", + "[flaml.automl: 08-22 21:10:22] {1358} INFO - iteration 34, current learner lgbm\n", + "[flaml.automl: 08-22 21:10:26] {1515} INFO - at 68.7s,\tbest lgbm's error=0.1609,\tbest lgbm's error=0.1609\n", + "[flaml.automl: 08-22 21:10:26] {1358} INFO - iteration 35, current learner lgbm\n", + "[flaml.automl: 08-22 21:10:45] {1515} INFO - at 88.0s,\tbest lgbm's error=0.1609,\tbest lgbm's error=0.1609\n", + "[flaml.automl: 08-22 21:10:45] {1358} INFO - iteration 36, current learner lgbm\n", + "[flaml.automl: 08-22 21:10:46] {1515} INFO - at 88.9s,\tbest lgbm's error=0.1609,\tbest lgbm's error=0.1609\n", + "[flaml.automl: 08-22 21:10:46] {1358} INFO - iteration 37, current learner lgbm\n", + "[flaml.automl: 08-22 21:10:54] {1515} INFO - at 96.6s,\tbest lgbm's error=0.1573,\tbest lgbm's error=0.1573\n", + "[flaml.automl: 08-22 21:10:54] {1358} INFO - iteration 38, current learner lgbm\n", + "[flaml.automl: 08-22 21:10:57] {1515} INFO - at 99.6s,\tbest lgbm's error=0.1573,\tbest lgbm's error=0.1573\n", + "[flaml.automl: 08-22 21:10:57] {1358} INFO - iteration 39, current learner lgbm\n", + "[flaml.automl: 08-22 21:11:57] {1515} INFO - at 160.1s,\tbest lgbm's error=0.1573,\tbest lgbm's error=0.1573\n", + "[flaml.automl: 08-22 21:11:57] {1358} INFO - iteration 40, current learner lgbm\n", + "[flaml.automl: 08-22 21:11:59] {1515} INFO - at 161.4s,\tbest lgbm's error=0.1573,\tbest lgbm's error=0.1573\n", + "[flaml.automl: 08-22 21:11:59] {1358} INFO - iteration 41, current learner lgbm\n", + "[flaml.automl: 08-22 21:12:00] {1515} INFO - at 162.5s,\tbest lgbm's error=0.1573,\tbest lgbm's error=0.1573\n", + "[flaml.automl: 08-22 21:12:00] {1358} INFO - iteration 42, current learner lgbm\n", + "[flaml.automl: 08-22 21:12:35] {1515} INFO - at 197.7s,\tbest lgbm's error=0.1535,\tbest lgbm's error=0.1535\n", + "[flaml.automl: 08-22 21:12:35] {1358} INFO - iteration 43, current learner lgbm\n", + "[flaml.automl: 08-22 21:13:09] {1515} INFO - at 231.6s,\tbest lgbm's error=0.1535,\tbest lgbm's error=0.1535\n", + "[flaml.automl: 08-22 21:13:09] {1592} INFO - selected model: LGBMRegressor(colsample_bytree=0.6513228229604555,\n", + " learning_rate=0.011556686284183076, max_bin=512,\n", + " min_child_samples=9, n_estimators=2120, num_leaves=92,\n", + " objective='regression', reg_alpha=0.024999216167840198,\n", + " reg_lambda=0.01918323581702806, verbose=-1)\n", + "[flaml.automl: 08-22 21:13:16] {1633} INFO - retrain lgbm for 6.6s\n", + "[flaml.automl: 08-22 21:13:16] {1636} INFO - retrained model: LGBMRegressor(colsample_bytree=0.6513228229604555,\n", + " learning_rate=0.011556686284183076, max_bin=512,\n", + " min_child_samples=9, n_estimators=2120, num_leaves=92,\n", + " objective='regression', reg_alpha=0.024999216167840198,\n", + " reg_lambda=0.01918323581702806, verbose=-1)\n", + "[flaml.automl: 08-22 21:13:16] {1199} INFO - fit succeeded\n", + "[flaml.automl: 08-22 21:13:16] {1200} INFO - Time taken to find the best model: 197.68836307525635\n", + "[flaml.automl: 08-22 21:13:16] {1205} WARNING - Time taken to find the best model is 82% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n" + ] + } + ], + "metadata": { + "slideshow": { + "slide_type": "slide" + }, + "tags": [] + } + }, + { + "cell_type": "markdown", + "source": [ + "### Best model and metric" + ], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } }, { "cell_type": "code", "execution_count": 5, - "metadata": { - "slideshow": { - "slide_type": "slide" - }, - "tags": [] - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-12 21:25:04] {1121} INFO - Evaluation method: cv\n", - "[flaml.automl: 08-12 21:25:04] {628} INFO - Using RepeatedKFold\n", - "[flaml.automl: 08-12 21:25:04] {1142} INFO - Minimizing error metric: 1-r2\n", - "[flaml.automl: 08-12 21:25:04] {1162} INFO - List of ML learners in AutoML Run: ['lgbm']\n", - "[flaml.automl: 08-12 21:25:04] {1252} INFO - iteration 0, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:07] {1405} INFO - at 4.0s,\tbest lgbm's error=0.7385,\tbest lgbm's error=0.7385\n", - "[flaml.automl: 08-12 21:25:07] {1252} INFO - iteration 1, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:08] {1405} INFO - at 5.1s,\tbest lgbm's error=0.7385,\tbest lgbm's error=0.7385\n", - "[flaml.automl: 08-12 21:25:08] {1252} INFO - iteration 2, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:10] {1405} INFO - at 6.6s,\tbest lgbm's error=0.5520,\tbest lgbm's error=0.5520\n", - "[flaml.automl: 08-12 21:25:10] {1252} INFO - iteration 3, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:11] {1405} INFO - at 7.7s,\tbest lgbm's error=0.3886,\tbest lgbm's error=0.3886\n", - "[flaml.automl: 08-12 21:25:11] {1252} INFO - iteration 4, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:13] {1405} INFO - at 9.8s,\tbest lgbm's error=0.3886,\tbest lgbm's error=0.3886\n", - "[flaml.automl: 08-12 21:25:13] {1252} INFO - iteration 5, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:14] {1405} INFO - at 10.9s,\tbest lgbm's error=0.3886,\tbest lgbm's error=0.3886\n", - "[flaml.automl: 08-12 21:25:14] {1252} INFO - iteration 6, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:16] {1405} INFO - at 12.3s,\tbest lgbm's error=0.3023,\tbest lgbm's error=0.3023\n", - "[flaml.automl: 08-12 21:25:16] {1252} INFO - iteration 7, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:17] {1405} INFO - at 13.6s,\tbest lgbm's error=0.2611,\tbest lgbm's error=0.2611\n", - "[flaml.automl: 08-12 21:25:17] {1252} INFO - iteration 8, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:19] {1405} INFO - at 15.9s,\tbest lgbm's error=0.2611,\tbest lgbm's error=0.2611\n", - "[flaml.automl: 08-12 21:25:19] {1252} INFO - iteration 9, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:23] {1405} INFO - at 19.9s,\tbest lgbm's error=0.2363,\tbest lgbm's error=0.2363\n", - "[flaml.automl: 08-12 21:25:23] {1252} INFO - iteration 10, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:27] {1405} INFO - at 23.3s,\tbest lgbm's error=0.2363,\tbest lgbm's error=0.2363\n", - "[flaml.automl: 08-12 21:25:27] {1252} INFO - iteration 11, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:27] {1405} INFO - at 24.0s,\tbest lgbm's error=0.2363,\tbest lgbm's error=0.2363\n", - "[flaml.automl: 08-12 21:25:27] {1252} INFO - iteration 12, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:29] {1405} INFO - at 25.4s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", - "[flaml.automl: 08-12 21:25:29] {1252} INFO - iteration 13, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:29] {1405} INFO - at 26.0s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", - "[flaml.automl: 08-12 21:25:29] {1252} INFO - iteration 14, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:30] {1405} INFO - at 26.9s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", - "[flaml.automl: 08-12 21:25:30] {1252} INFO - iteration 15, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:32] {1405} INFO - at 28.2s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", - "[flaml.automl: 08-12 21:25:32] {1252} INFO - iteration 16, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:34] {1405} INFO - at 30.2s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", - "[flaml.automl: 08-12 21:25:34] {1252} INFO - iteration 17, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:34] {1405} INFO - at 30.8s,\tbest lgbm's error=0.1953,\tbest lgbm's error=0.1953\n", - "[flaml.automl: 08-12 21:25:34] {1252} INFO - iteration 18, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:36] {1405} INFO - at 32.4s,\tbest lgbm's error=0.1795,\tbest lgbm's error=0.1795\n", - "[flaml.automl: 08-12 21:25:36] {1252} INFO - iteration 19, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:37] {1405} INFO - at 33.6s,\tbest lgbm's error=0.1795,\tbest lgbm's error=0.1795\n", - "[flaml.automl: 08-12 21:25:37] {1252} INFO - iteration 20, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:42] {1405} INFO - at 38.2s,\tbest lgbm's error=0.1795,\tbest lgbm's error=0.1795\n", - "[flaml.automl: 08-12 21:25:42] {1252} INFO - iteration 21, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:43] {1405} INFO - at 39.8s,\tbest lgbm's error=0.1795,\tbest lgbm's error=0.1795\n", - "[flaml.automl: 08-12 21:25:43] {1252} INFO - iteration 22, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:46] {1405} INFO - at 43.1s,\tbest lgbm's error=0.1768,\tbest lgbm's error=0.1768\n", - "[flaml.automl: 08-12 21:25:47] {1252} INFO - iteration 23, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:49] {1405} INFO - at 45.5s,\tbest lgbm's error=0.1768,\tbest lgbm's error=0.1768\n", - "[flaml.automl: 08-12 21:25:49] {1252} INFO - iteration 24, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:58] {1405} INFO - at 54.4s,\tbest lgbm's error=0.1768,\tbest lgbm's error=0.1768\n", - "[flaml.automl: 08-12 21:25:58] {1252} INFO - iteration 25, current learner lgbm\n", - "[flaml.automl: 08-12 21:25:59] {1405} INFO - at 55.8s,\tbest lgbm's error=0.1768,\tbest lgbm's error=0.1768\n", - "[flaml.automl: 08-12 21:25:59] {1252} INFO - iteration 26, current learner lgbm\n", - "[flaml.automl: 08-12 21:26:21] {1405} INFO - at 77.3s,\tbest lgbm's error=0.1657,\tbest lgbm's error=0.1657\n", - "[flaml.automl: 08-12 21:26:21] {1252} INFO - iteration 27, current learner lgbm\n", - "[flaml.automl: 08-12 21:26:29] {1405} INFO - at 85.4s,\tbest lgbm's error=0.1657,\tbest lgbm's error=0.1657\n", - "[flaml.automl: 08-12 21:26:29] {1252} INFO - iteration 28, current learner lgbm\n", - "[flaml.automl: 08-12 21:26:36] {1405} INFO - at 92.3s,\tbest lgbm's error=0.1648,\tbest lgbm's error=0.1648\n", - "[flaml.automl: 08-12 21:26:36] {1252} INFO - iteration 29, current learner lgbm\n", - "[flaml.automl: 08-12 21:26:39] {1405} INFO - at 95.6s,\tbest lgbm's error=0.1648,\tbest lgbm's error=0.1648\n", - "[flaml.automl: 08-12 21:26:39] {1252} INFO - iteration 30, current learner lgbm\n", - "[flaml.automl: 08-12 21:27:06] {1405} INFO - at 122.7s,\tbest lgbm's error=0.1615,\tbest lgbm's error=0.1615\n", - "[flaml.automl: 08-12 21:27:06] {1252} INFO - iteration 31, current learner lgbm\n", - "[flaml.automl: 08-12 21:27:11] {1405} INFO - at 127.3s,\tbest lgbm's error=0.1615,\tbest lgbm's error=0.1615\n", - "[flaml.automl: 08-12 21:27:11] {1252} INFO - iteration 32, current learner lgbm\n", - "[flaml.automl: 08-12 21:28:53] {1405} INFO - at 229.8s,\tbest lgbm's error=0.1615,\tbest lgbm's error=0.1615\n", - "[flaml.automl: 08-12 21:28:53] {1461} INFO - selected model: LGBMRegressor(colsample_bytree=0.8021997484670117,\n", - " learning_rate=0.062267095563511524, max_bin=512,\n", - " min_child_samples=128, n_estimators=511, num_leaves=276,\n", - " objective='regression', reg_alpha=0.009948140763622663,\n", - " reg_lambda=8.181816322817145, subsample=0.8716100265520644,\n", - " verbose=-1)\n", - "[flaml.automl: 08-12 21:28:53] {1184} INFO - fit succeeded\n", - "[flaml.automl: 08-12 21:28:53] {1185} INFO - Time taken to find the best model: 122.74374485015869\n" - ] - } - ], - "source": [ - "'''The main flaml automl API'''\n", - "automl.fit(X_train=X_train, y_train=y_train, **settings)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "### Best model and metric" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": { - "slideshow": { - "slide_type": "slide" - }, - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Best hyperparmeter config: {'n_estimators': 1610, 'num_leaves': 276, 'min_child_samples': 128, 'learning_rate': 0.062267095563511524, 'subsample': 0.8716100265520644, 'log_max_bin': 10, 'colsample_bytree': 0.8021997484670117, 'reg_alpha': 0.009948140763622663, 'reg_lambda': 8.181816322817145}\n", - "Best r2 on validation data: 0.8385\n", - "Training duration of best run: 27.09 s\n" - ] - } - ], "source": [ "''' retrieve best config'''\n", "print('Best hyperparmeter config:', automl.best_config)\n", "print('Best r2 on validation data: {0:.4g}'.format(1-automl.best_loss))\n", "print('Training duration of best run: {0:.4g} s'.format(automl.best_config_train_time))" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + ], "outputs": [ { - "data": { - "text/plain": [ - "LGBMRegressor(colsample_bytree=0.8021997484670117,\n", - " learning_rate=0.062267095563511524, max_bin=512,\n", - " min_child_samples=128, n_estimators=511, num_leaves=276,\n", - " objective='regression', reg_alpha=0.009948140763622663,\n", - " reg_lambda=8.181816322817145, subsample=0.8716100265520644,\n", - " verbose=-1)" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" + "output_type": "stream", + "name": "stdout", + "text": [ + "Best hyperparmeter config: {'n_estimators': 2120, 'num_leaves': 92, 'min_child_samples': 9, 'learning_rate': 0.011556686284183076, 'log_max_bin': 10, 'colsample_bytree': 0.6513228229604555, 'reg_alpha': 0.024999216167840198, 'reg_lambda': 0.01918323581702806}\n", + "Best r2 on validation data: 0.8465\n", + "Training duration of best run: 35.16 s\n" + ] } ], - "source": [ - "automl.model.estimator\n" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAc0AAAD4CAYAAACOhb23AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAezklEQVR4nO3de5RcVZ328e9DJySQYAdIZPVEpAAzIJDQJC0KhAxewBF9ESUahIGA85rhMqi4GI3i0oDjCIQZEUUhziBBLvqGi7CCEBgwkImE0J1LdwIElMSRyEXRNJcIQuf3/nF2k6Lsy+lrVVc/n7Vq9ald++z9O0WxnuxzTlcrIjAzM7Pu7VDuAszMzIYKh6aZmVlODk0zM7OcHJpmZmY5OTTNzMxyGlHuAmxgjR8/PgqFQrnLMDMbUpqamv4QERNK2x2aVa5QKNDY2FjuMszMhhRJv+mo3adnzczMcnJompmZ5eTQNDMzy8mhaWZmlpND08zMLCeHppmZWU4OTTMzs5wcmmZmZjn5yw2qXMvmVgpz7yh3GWZmA2bTRR8etLm80jQzM8vJoWlmZpaTQ9PMzCwnh6aZmVlODk0zM7OcHJpmZmY5OTSLSHppAMY8TtLctH28pAN6McZSSQ39XZuZmfWMQ3OARcTtEXFReno80OPQNDOzyuDQ7IAy8yWtk9QiaVZqPyqt+m6S9Jik6yUpvXZsamuSdLmkxan9NEnfk3Q4cBwwX9IaSfsWryAljZe0KW3vJOknkh6VdCuwU1Ftx0h6UNIqSYskjR3cd8fMbPjyNwJ17ONAPXAwMB54WNID6bVDgAOB3wHLgSMkNQJXATMiYqOkG0sHjIhfSrodWBwRNwGkvO3ImcDWiHinpCnAqtR/PPBV4AMR8bKkLwFfAC4s3lnSHGAOQM1bJvTyLTAzs1JeaXZsOnBjRLRFxLPA/cC70msrI+KpiNgGrAEKwP7AkxGxMfX5q9DsoRnAdQAR0Qw0p/b3kJ3eXS5pDTAb2Kt054hYEBENEdFQs3NtH0sxM7N2Xmn23KtF22307T18ne3/cBmdo7+AeyLiU32Y08zMeskrzY4tA2ZJqpE0gWzlt7KL/huAfSQV0vNZnfR7Edil6PkmYFranlnU/gBwEoCkg4ApqX0F2engd6TXxkj62xzHY2Zm/cCh2bFbyU6JrgXuA74YEc901jki/gycBdwlqYksHFs76PoT4F8krZa0L3ApcKak1WTXTtv9ABgr6VGy65VNaZ7fA6cBN0pqBh4kOzVsZmaDQBFR7hqqgqSxEfFSupv2CuCJiPh2uesaVTcp6mZfVu4yzMwGzED8aTBJTRHxV78f75Vm//lMujlnPVBLdjetmZlVEd8I1E/SqrLsK0szMxs4XmmamZnl5NA0MzPLyaFpZmaWk69pVrnJE2tpHIA7y8zMhiOvNM3MzHJyaJqZmeXk0DQzM8vJoWlmZpaTbwSqci2bWynMvaPcZZiZDaqB+Go98ErTzMwsN4emmZlZTg5NMzOznByaZmZmOTk0zczMcnJompmZ5eTQ7AFJL3Xz+jhJZxU9/xtJN6XteknH9mLOeZLO63m1ZmbW3xya/Wsc8EZoRsTvImJmeloP9Dg0zcyscjg0e0HSWEn3SlolqUXSR9NLFwH7Slojab6kgqR1knYELgRmpddmla4gU79C2j5f0uOS/gfYr6jPvpLuktQkaZmk/QftoM3MzN8I1EuvAB+LiBckjQdWSLodmAscFBH1AO0hGBF/kfQ1oCEi/jm9Nq+jgSVNA04kW5mOAFYBTenlBcAZEfGEpHcD3wfe18EYc4A5ADVvmdAfx2tmZjg0e0vAv0maAWwDJgJ79NPYRwK3RsRWgBTGSBoLHA4sktTed1RHA0TEArKAZVTdpOinuszMhj2HZu+cDEwApkXEa5I2AaN7OMbrvPn0eHf77wBsaV/FmpnZ4PM1zd6pBZ5LgfleYK/U/iKwSyf7lL62CZgKIGkqsHdqfwA4XtJOknYB/g9ARLwAbJT0ibSPJB3cf4dkZmbdcWj2zvVAg6QW4FTgMYCIeB5Ynm7qmV+yzy+AA9pvBAJuBnaTtB74Z+DxNMYq4KfAWuBO4OGiMU4G/lHSWmA98FHMzGzQKMKXvKrZqLpJUTf7snKXYWY2qPr6p8EkNUVEQ2m7V5pmZmY5OTTNzMxycmiamZnl5NA0MzPLyb+nWeUmT6ylsY8XxM3MLOOVppmZWU4OTTMzs5wcmmZmZjk5NM3MzHLyjUBVrmVzK4W5d5S7DDProb5+o40NDK80zczMcnJompmZ5eTQNDMzy8mhaWZmlpND08zMLCeHppmZWU7DIjQlFSStK8O8L/Ww/zxJ53XQXpb6zczszYZFaJqZmfWH4RSaNZJ+KGm9pLsl7SSpXtIKSc2SbpW0K4CkpZIa0vZ4SZvS9oGSVkpak/aZlNr/oaj9Kkk17ZNK+qaktWmePVJbQdJ9aYx7Jb29tFhJ09J+a4Gzi9o7rMHMzAbecArNScAVEXEgsAU4AbgW+FJETAFagK93M8YZwHcioh5oAJ6S9E5gFnBEam8DTk79xwArIuJg4AHgM6n9u8DCNO/1wOUdzPUj4Jy0b5c1lO4oaY6kRkmNbVtbuzkkMzPLaziF5saIWJO2m4B9gXERcX9qWwjM6GaMB4GvSPoSsFdE/Bl4PzANeFjSmvR8n9T/L8DiojkLafsw4Ia0/WNgevEkksal2h4o6tNVDW8SEQsioiEiGmp2ru3mkMzMLK/hFJqvFm23AeO66Ps629+b0e2NEXEDcBzwZ+Dnkt4HiGzVWJ8e+0XEvLTLaxERRXP2+bt+O6nBzMwGwXAKzVKtwJ8kHZmenwK0rzo3ka0eAWa27yBpH+DJiLgcuA2YAtwLzJT01tRnN0l7dTP3L4ET0/bJwLLiFyNiC7BF0vSiPl3VYGZmg2A4hybAbGC+pGagHrgwtV8KnClpNTC+qP8ngXXpNOxBwLUR8QjwVeDuNM49QF03854DnJ76nwJ8roM+pwNXpLnUVQ25j9bMzPpE288eWjUaVTcp6mZfVu4yzKyH/KfByktSU0Q0lLYP95WmmZlZbg5NMzOznByaZmZmOTk0zczMcurz7w1aZZs8sZZG31BgZtYvvNI0MzPLyaFpZmaWk0PTzMwsJ4emmZlZTr4RqMq1bG6lMPeOcpdhZjZgBvPbk7zSNDMzy8mhaWZmlpND08zMLCeHppmZWU4OTTMzs5wcmmZmZjk5NAeApIKkdTn6nFT0vEHS5QNfnZmZ9ZZDs3wKwBuhGRGNEfHZ8pVjZmbdGZahmVZ5j0m6XtKjkm6StLOk90taLalF0tWSRqX+myRdktpXSnpHar9G0syicV/qZK5lklalx+HppYuAIyWtkXSupKMkLU777CbpZ5KaJa2QNCW1z0t1LZX0pCSHrJnZIBqWoZnsB3w/It4JvAB8AbgGmBURk8m+LenMov6tqf17wGU9mOc54OiImArMAtpPwc4FlkVEfUR8u2SfC4DVETEF+ApwbdFr+wMfBA4Fvi5pZOmEkuZIapTU2La1tQelmplZV4ZzaP42Ipan7euA9wMbI+Lx1LYQmFHU/8ain4f1YJ6RwA8ltQCLgANy7DMd+DFARNwH7C7pLem1OyLi1Yj4A1kg71G6c0QsiIiGiGio2bm2B6WamVlXhvN3z0bJ8y3A7jn7t2+/TvqHh6QdgB072O9c4Fng4NT3ld4UW+TVou02hvd/QzOzQTWcV5pvl9S+YjwJaAQK7dcrgVOA+4v6zyr6+WDa3gRMS9vHka0qS9UCT0fEtjRmTWp/Edilk9qWAScDSDoK+ENEvJDrqMzMbMAM51XKBuBsSVcDjwCfBVYAiySNAB4Grizqv6ukZrKV3qdS2w+B2yStBe4CXu5gnu8DN0s6taRPM9CW9r0GWF20zzzg6jTfVmB23w7VzMz6gyJKz1JWP0kFYHFEHJSz/yagIV1HHFJG1U2Kutk9uW/JzGxoGYg/DSapKSIaStuH8+lZMzOzHhmWp2cjYhOQa5WZ+hcGrBgzMxsyvNI0MzPLyaFpZmaWk0PTzMwsp2F5TXM4mTyxlsYBuLPMzGw48krTzMwsJ4emmZlZTg5NMzOznByaZmZmOflGoCrXsrmVwtw7yl2GmSUD8ZVvNni80jQzM8vJoWlmZpaTQ9PMzCwnh6aZmVlODk0zM7OcHJpmZmY5VVxoShon6axu+hQknZRjrIKkdV28fpqk7/Wmzv7Y38zMhpaKC01gHNBlaAIFoNvQLBdJ/v1XM7MqVImheRGwr6Q1kuanxzpJLZJmFfU5MvU5N60ol0lalR6H92C+PSUtlfSEpK+3N0r6B0kr0xxXSapJ7adLelzSSuCIov7XSLpS0kPAJZLqJa2Q1CzpVkm7pn6dtS+V9G1JjZIelfQuSbekuv419Rkj6Q5Ja9N7MgszMxs0lRiac4FfR0Q9sAKoBw4GPgDMl1SX+iyLiPqI+DbwHHB0REwFZgGX92C+Q4ETgCnAJyQ1SHpnGueIVEcbcHKa+wKysJwOHFAy1tuAwyPiC8C1wJciYgrQArQHcmftAH+JiAbgSuA24GzgIOA0SbsDfw/8LiIOjoiDgLs6OiBJc1L4NrZtbe3BW2FmZl2p9NOI04EbI6INeFbS/cC7gBdK+o0EviepPeD+tgdz3BMRzwNIuiXN+TowDXhYEsBOZMH8bmBpRPw+9f9pyVyLIqJNUi0wLiLuT+0LgUWdtRftf3v62QKsj4in0zxPAnum9n+XdDGwOCKWdXRAEbEAWAAwqm5S9OC9MDOzLlR6aOZ1LvAs2Yp0B+CVHuxbGioBCFgYEV8ufkHS8d2M9XIP5u3Iq+nntqLt9ucjIuJxSVOBY4F/lXRvRFzYxznNzCynSjw9+yKwS9peBsySVCNpAjADWFnSB6AWeDoitgGnADU9mO9oSbtJ2gk4HlgO3AvMlPRWgPT6XsBDwN9J2l3SSOATHQ0YEa3AnyQdmZpOAe7vrD1voZL+BtgaEdcB84GpPThOMzPro4pbaUbE85KWp18VuRNoBtaSrQC/GBHPSHoeaJO0FrgG+D5ws6RTya7z9WTFtxK4mex65HUR0Qgg6avA3ZJ2AF4Dzo6IFZLmAQ8CW4A1XYw7G7hS0s7Ak8Dp3bTnMZnsuu62VNOZPdjXzMz6SBG+5FXNRtVNirrZl5W7DDNL/KfBhgZJTenGzDepxNOzZmZmFaniTs8OBEkfBC4uad4YER8rRz1mZjY0DYvQjIglwJJy12FmZkObT8+amZnlNCxWmsPZ5Im1NPrGAzOzfuGVppmZWU4OTTMzs5wcmmZmZjk5NM3MzHLyjUBVrmVzK4W5d5S7DDMbIvyNRV3zStPMzCwnh6aZmVlODk0zM7OcHJpmZmY5OTTNzMxycmiamZnl5NA0MzPLqapDU9I4SWd106cg6aQcYxUkreu/6szMbKip6tAExgFdhiZQALoNzZ6Q5C+NMDOrQtUemhcB+0paI2l+eqyT1CJpVlGfI1Ofc9OKcpmkVelxeJ6JJJ0m6XZJ9wH3StpN0s8kNUtaIWlK6tdZ+zxJC9Pcv5H0cUmXpFrvkjQy9btI0iNp/0s7qWWOpEZJjW1bW/v6HpqZWVLtK6K5wEERUS/pBOAM4GBgPPCwpAdSn/Mi4iMAknYGjo6IVyRNAm4EGnLONxWYEhF/lPRdYHVEHC/pfcC1QD1wQSftAPsC7wUOAB4EToiIL0q6FfiwpGXAx4D9IyIkjeuoiIhYACwAGFU3KfK+WWZm1rVqX2kWmw7cGBFtEfEscD/wrg76jQR+KKkFWEQWYHndExF/LJrvxwARcR+wu6S3dNEOcGdEvAa0ADXAXam9hew0civwCvBfkj4ObO1BbWZm1kfDKTTzOhd4lmxF2gDs2IN9X+7j3K8CRMQ24LWIaF8lbgNGRMTrwKHATcBH2B6qZmY2CKo9NF8Edknby4BZkmokTQBmACtL+gDUAk+n4DqFbMXXG8uAkwEkHQX8ISJe6KK9W5LGArUR8XOycD+4l7WZmVkvVPU1zYh4XtLy9KsidwLNwFoggC9GxDOSngfaJK0FrgG+D9ws6VSylVxvV4/zgKslNZOdRp3dTXseuwC3SRoNCPhCL2szM7Ne0PYzgFaNRtVNirrZl5W7DDMbIvz3NDOSmiLir24CrfbTs2ZmZv2mqk/PDgRJHwQuLmneGBEfK0c9ZmY2eByaPRQRS4Al5a7DzMwGn0Ozyk2eWEujr1GYmfULX9M0MzPLyaFpZmaWk0PTzMwsJ4emmZlZTr4RqMq1bG6lMPeOcpdhNqD8C/k2WLzSNDMzy8mhaWZmlpND08zMLCeHppmZWU4OTTMzs5wcmmZmZjk5NM3MzHLqNjQlFSStG6gCJP1yoMbuq+Jjl9Qg6fJy12RmZuVT9i83iIjDy11DHhHRCDSWuw4zMyufvKdnayT9UNJ6SXdL2klSvaQVkpol3SppVwBJSyU1pO3xkjal7QMlrZS0Ju0zKbW/lH4elfa9SdJjkq6XpPTasamtSdLlkhZ3VqikeZIWSlom6TeSPi7pEkktku6SNDL1mybp/jTmEkl1Re1rJa0Fzi4a96j2eSUdKulBSasl/VLSfqn9NEm3pHmekHRJV2+qpB9Iakzv6wVF7R0er6Qxkq5O7+NqSR/tZNw5adzGtq2tXZVgZmY9kDc0JwFXRMSBwBbgBOBa4EsRMQVoAb7ezRhnAN+JiHqgAXiqgz6HAJ8HDgD2AY6QNBq4CvhQREwDJuSod1/gfcBxwHXALyJiMvBn4MMpOL8LzExjXg18M+37I+CciDi4i/EfA46MiEOArwH/VvRaPTALmAzMkrRnF+OcHxENwBTg7yRN6eZ4zwfui4hDgfcC8yWNKR00IhZERENENNTsXNvF9GZm1hN5T89ujIg1abuJLJTGRcT9qW0hsKibMR4Ezpf0NuCWiHiigz4rI+IpAElrgALwEvBkRGxMfW4E5nQz150R8ZqkFqAGuCu1t6Qx9wMOAu5Ji9ka4GlJ49JxPZD6/xj4UAfj1wIL02o5gJFFr90bEa3pGB4B9gJ+20mdn5Q0h+y/Qx3ZPxZ26OJ4jwGOk3Reej4aeDvwaNdvh5mZ9Ye8oflq0XYbMK6Lvq+zfQU7ur0xIm6Q9BDwYeDnkv4pIu7rZp7eXnN9Nc25TdJrERGpfVsaU8D6iDiseKcUmnl8g2z1+jFJBWBp6dxJp8cgaW/gPOBdEfEnSddQ9H51QsAJEbEhZ51mZtaPevsrJ63AnyQdmZ6fArSvOjcB09L2zPYdJO1DtoK6HLiN7JRkHhuAfVI4QXbqs682ABMkHZZqGynpwIjYAmyRND31O7mT/WuBzWn7tF7W8BbgZaBV0h5sX9F2dbxLgHOKrvUe0su5zcysF/rye5qzya6pNZNdx7swtV8KnClpNTC+qP8ngXXptOtBZNdEuxURfwbOAu6S1AS8SBbavRYRfyEL9IvTDT9rgPa7eE8Hrkh1qpMhLgG+lY6xV6vhiFgLrCa7PnoDsDy1d3W83yA7FdwsaX16bmZmg0Tbz1xWLkljI+KltMK6AngiIr5d7roGSn8e76i6SVE3+7L+LdCswvjvaVp/k9SUbtR8k6HyjUCfSSu/9WSnRq8qcz0Dbbgdr5nZkFD2LzfII62y3rTSknQ68LmSrssj4mwqTLoBalRJ8ykR0dJR/46O18zMym9IhGZHIuJHZL9TWfEi4t3lrsHMzPpuqJyeNTMzK7shu9K0fCZPrKXRN0mYmfULrzTNzMxycmiamZnl5NA0MzPLyaFpZmaWk28EqnItm1spzL2j3GWYWZn5W5P6h1eaZmZmOTk0zczMcnJompmZ5eTQNDMzy8mhaWZmlpND08zMLCeHppmZWU5VG5qSlkpqSNs/lzSuH8c+Q9Kp/TWemZkNDcPiyw0i4th+Hu/K/hzPzMyGhopaaUoqSHpM0jWSHpd0vaQPSFou6QlJh0oaI+lqSSslrZb00bTvTpJ+IulRSbcCOxWNu0nS+LT9M0lNktZLmlPU5yVJ35S0VtIKSXt0Uec8Seel7aWSLk71PC7pyNReI+lSSeskNUs6J7W/P9Xdko5jVFGN35K0RlKjpKmSlkj6taQziub+F0kPpzEv6KS+OWmMxratrX34L2JmZsUqKjSTdwD/DuyfHicB04HzgK8A5wP3RcShwHuB+ZLGAGcCWyPincDXgWmdjP/piJgGNACflbR7ah8DrIiIg4EHgM/0oOYRqZ7Pp7kB5gAFoD4ipgDXSxoNXAPMiojJZCv9M4vG+d+IqAeWpX4zgfcAFwBIOgaYBBwK1APTJM0oLSYiFkREQ0Q01Oxc24PDMDOzrlRiaG6MiJaI2AasB+6NiABayELoGGCupDXAUmA08HZgBnAdQEQ0A82djP9ZSWuBFcCeZCEE8BdgcdpuSnPldUsH+30AuCoiXk81/RHYLx3f46nPwlR3u9vTzxbgoYh4MSJ+D7yarskekx6rgVVk/6iYhJmZDYpKvKb5atH2tqLn28jqbQNOiIgNxTtJ6nZgSUeRhdlhEbFV0lKy0AV4LYUzaY6evDftNfZ0v87GKT7u9ucjAAHfioir+jCHmZn1UiWuNLuzBDhHKSUlHZLaHyA7lYukg4ApHexbC/wpBeb+ZKc+B8o9wD9JGpFq2g3YABQkvSP1OQW4vwdjLgE+LWlsGnOipLf2Y81mZtaFoRia3wBGAs2S1qfnAD8Axkp6FLiQ7FRpqbuAEanPRWSnaAfKfwL/m+pcC5wUEa8ApwOLJLWQrSBz34kbEXcDNwAPpv1vAnbp98rNzKxD2n5G0qrRqLpJUTf7snKXYWZl5r+n2TOSmiKiobR9KK40zczMyqISbwSqGJLOBz5R0rwoIr5ZjnrMzKy8HJpdSOHogDQzM8ChWfUmT6yl0dcyzMz6ha9pmpmZ5eTQNDMzy8mhaWZmlpND08zMLCeHppmZWU4OTTMzs5wcmmZmZjk5NM3MzHJyaJqZmeXkv3JS5SS9SPZ3PIea8cAfyl1EDw3FmsF1D6ahWDMMz7r3iogJpY3+Gr3qt6GjP29T6SQ1DrW6h2LN4LoH01CsGVx3MZ+eNTMzy8mhaWZmlpNDs/otKHcBvTQU6x6KNYPrHkxDsWZw3W/wjUBmZmY5eaVpZmaWk0PTzMwsJ4dmlZL095I2SPqVpLkVUM/Vkp6TtK6obTdJ90h6Iv3cNbVL0uWp9mZJU4v2mZ36PyFp9iDUvaekX0h6RNJ6SZ+r9NoljZa0UtLaVPMFqX1vSQ+l2n4qacfUPio9/1V6vVA01pdT+wZJHxyomkvqr5G0WtLioVK3pE2SWiStkdSY2ir2M5LmGifpJkmPSXpU0mFDoOb90nvc/nhB0ucHte6I8KPKHkAN8GtgH2BHYC1wQJlrmgFMBdYVtV0CzE3bc4GL0/axwJ2AgPcAD6X23YAn089d0/auA1x3HTA1be8CPA4cUMm1p7nHpu2RwEOplv8HnJjarwTOTNtnAVem7ROBn6btA9JnZxSwd/pM1QzCZ+ULwA3A4vS84usGNgHjS9oq9jOS5lsI/N+0vSMwrtJrLqm/BngG2Gsw6x7wA/Nj8B/AYcCSoudfBr5cAXUVeHNobgDq0nYd2RcxAFwFfKq0H/Ap4Kqi9jf1G6RjuA04eqjUDuwMrALeTfbNKCNKPyPAEuCwtD0i9VPp56a43wDW+zbgXuB9wOJUx1CoexN/HZoV+xkBaoGNpJtBh0LNHRzDMcDywa7bp2er00Tgt0XPn0ptlWaPiHg6bT8D7JG2O6u/rMeVTv8dQrZyq+ja0ynONcBzwD1kq60tEfF6B/O/UVt6vRXYfbBrTi4DvghsS893Z2jUHcDdkpokzUltlfwZ2Rv4PfCjdCr8PyWNqfCaS50I3Ji2B61uh6ZVhMj+uVexv/8kaSxwM/D5iHih+LVKrD0i2iKinmzldiiwf5lL6pakjwDPRURTuWvphekRMRX4EHC2pBnFL1bgZ2QE2eWSH0TEIcDLZKc131CBNb8hXdc+DlhU+tpA1+3QrE6bgT2Lnr8ttVWaZyXVAaSfz6X2zuovy3FJGkkWmNdHxC2peUjUHhFbgF+QndYcJ6n9+6aL53+jtvR6LfB8GWo+AjhO0ibgJ2SnaL8zBOomIjann88Bt5L9Q6WSPyNPAU9FxEPp+U1kIVrJNRf7ELAqIp5NzwetbodmdXoYmJTuOtyR7DTG7WWuqSO3A+13rc0mu17Y3n5quvPtPUBrOvWyBDhG0q7p7rhjUtuAkSTgv4BHI+I/hkLtkiZIGpe2dyK7BvsoWXjO7KTm9mOZCdyX/rV+O3Biukt1b2ASsHIgagaIiC9HxNsiokD2mb0vIk6u9LoljZG0S/s22X/bdVTwZyQingF+K2m/1PR+4JFKrrnEp9h+ara9vsGpezAu2Pox+A+yu8YeJ7uWdX4F1HMj8DTwGtm/cv+R7PrTvcATwH8Du6W+Aq5ItbcADUXjfBr4VXqcPgh1Tyc71dMMrEmPYyu5dmAKsDrVvA74Wmrfhyw8fkV2WmtUah+dnv8qvb5P0Vjnp2PZAHxoED8vR7H97tmKrjvVtzY91rf//1bJn5E0Vz3QmD4nPyO7i7Sia07zjSE7o1Bb1DZodftr9MzMzHLy6VkzM7OcHJpmZmY5OTTNzMxycmiamZnl5NA0MzPLyaFpZmaWk0PTzMwsp/8PVAqPuXSy5IQAAAAASUVORK5CYII=", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], - "source": [ - "import matplotlib.pyplot as plt\n", - "plt.barh(automl.model.estimator.feature_name_, automl.model.estimator.feature_importances_)" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "outputs": [], - "source": [ - "''' pickle and save the automl object '''\n", - "import pickle\n", - "with open('automl.pkl', 'wb') as f:\n", - " pickle.dump(automl, f, pickle.HIGHEST_PROTOCOL)" - ] - }, - { - "cell_type": "code", - "execution_count": 10, "metadata": { "slideshow": { "slide_type": "slide" }, "tags": [] - }, + } + }, + { + "cell_type": "code", + "execution_count": 6, + "source": [ + "automl.model.estimator\n" + ], + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "LGBMRegressor(colsample_bytree=0.6513228229604555,\n", + " learning_rate=0.011556686284183076, max_bin=512,\n", + " min_child_samples=9, n_estimators=2120, num_leaves=92,\n", + " objective='regression', reg_alpha=0.024999216167840198,\n", + " reg_lambda=0.01918323581702806, verbose=-1)" + ] + }, + "metadata": {}, + "execution_count": 6 + } + ], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } + }, + { + "cell_type": "code", + "execution_count": 7, + "source": [ + "import matplotlib.pyplot as plt\n", + "plt.barh(automl.model.estimator.feature_name_, automl.model.estimator.feature_importances_)" + ], + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "" + ] + }, + "metadata": {}, + "execution_count": 7 + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "
" + ], + "image/svg+xml": "\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAc0AAAD4CAYAAACOhb23AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAe0ElEQVR4nO3de5RcVZn38e+PJiRAoAMEWT0RacEMCCQ0SYuCIYM3VJyXi0TjwEDAWWa4iIqL0Yy4xoDjCERHBkUhvIMEQfQNF2GB3F4wkBcJoZsk3YkQQBJHI4IiacBIgOR5/zi7pSj7sqtv1V39+6xVq3fts8/ez67T6Sf7nFNVigjMzMysd9tUOwAzM7ORwknTzMwsk5OmmZlZJidNMzOzTE6aZmZmmbatdgA2uCZOnBiNjY3VDsPMbERpbW39Q0TsXl7vpFnjGhsbaWlpqXYYZmYjiqRfdVXv07NmZmaZnDTNzMwyOWmamZllctI0MzPL5KRpZmaWyUnTzMwsk5OmmZlZJidNMzOzTP5wgxrXvqGDxnm3VTsMM7Mhtf6CjwxKv15pmpmZZXLSNDMzy+SkaWZmlslJ08zMLJOTppmZWSYnTTMzs0xOmiUkvTQIfR4taV4qHytp/z70sURS80DHZmZmlXHSHGQRcUtEXJCeHgtUnDTNzGx4cNLsggoLJK2W1C5pdqo/Iq36rpf0mKRrJSltOyrVtUq6RNKtqf4USd+RdBhwNLBA0kpJ+5SuICVNlLQ+lbeX9CNJj0q6Cdi+JLYjJT0o6RFJiyWNH9pXx8xs9PInAnXto0ATcBAwEXhY0v1p28HAAcBvgQeAd0tqAS4HZkbEOknXlXcYET+XdAtwa0RcD5DybVdOBzZFxNslTQUeSe0nAl8G3h8Rf5L0ReDzwPmlO0uaC8wFqNt59z6+BGZmVs4rza7NAK6LiC0R8QxwH/COtG15RPwmIrYCK4FGYD/gqYhYl9r8VdKs0EzgGoCIaAPaUv27KE7vPiBpJTAH2Kt854hYGBHNEdFct0N9P0MxM7NOXmlWbnNJeQv9ew1f4/X/uIzLaC/g7oj4h36MaWZmfeSVZteWArMl1UnanWLlt7yH9muBvSU1puezu2n3IrBTyfP1wPRUnlVSfz9wAoCkA4GpqX4Zxengt6VtO0r624z5mJnZAHDS7NpNFKdEVwH3Al+IiN911zgi/gycAdwhqZUiOXZ00fRHwL9IWiFpH+AbwOmSVlBcO+30PWC8pEcprle2pnF+D5wCXCepDXiQ4tSwmZkNAUVEtWOoCZLGR8RL6W7aS4EnIuJb1Y5rbMPkaJhzcbXDMDMbUv39ajBJrRHxV++P90pz4Hwq3ZyzBqinuJvWzMxqiG8EGiBpVVn1laWZmQ0erzTNzMwyOWmamZllctI0MzPL5GuaNW7KpHpa+nkXmZmZFbzSNDMzy+SkaWZmlslJ08zMLJOTppmZWSbfCFTj2jd00DjvtmqHYWY2KPr7cXmV8krTzMwsk5OmmZlZJidNMzOzTE6aZmZmmZw0zczMMjlpmpmZZXLSrICkl3rZPkHSGSXP/0bS9ancJOmoPow5X9I5lUdrZmYDzUlzYE0A/pI0I+K3ETErPW0CKk6aZmY2fDhp9oGk8ZLukfSIpHZJx6RNFwD7SFopaYGkRkmrJW0HnA/MTttml68gU7vGVD5X0uOS/h+wb0mbfSTdIalV0lJJ+w3ZpM3MzJ8I1EcvA8dFxAuSJgLLJN0CzAMOjIgmgM4kGBGvSPo3oDkiPp22ze+qY0nTgU9QrEy3BR4BWtPmhcBpEfGEpHcC3wXe20Ufc4G5AHU77z4Q8zUzM5w0+0rAf0iaCWwFJgF7DFDfhwM3RcQmgJSMkTQeOAxYLKmz7diuOoiIhRQJlrENk2OA4jIzG/WcNPvmRGB3YHpEvCppPTCuwj5e442nx3vbfxtgY+cq1szMhp6vafZNPfBsSpjvAfZK9S8CO3WzT/m29cA0AEnTgLem+vuBYyVtL2kn4H8BRMQLwDpJH0v7SNJBAzclMzPrjZNm31wLNEtqB04GHgOIiOeAB9JNPQvK9vkZsH/njUDADcCuktYAnwYeT308AvwYWAXcDjxc0seJwD9JWgWsAY7BzMyGjCJ8yauWjW2YHA1zLq52GGZmg2KwvhpMUmtENJfXe6VpZmaWyUnTzMwsk5OmmZlZJidNMzOzTH6fZo2bMqmelkG6UG5mNtp4pWlmZpbJSdPMzCyTk6aZmVkmJ00zM7NMvhGoxrVv6KBx3m3VDsNsVBmsT6mx6vNK08zMLJOTppmZWSYnTTMzs0xOmmZmZpmcNM3MzDI5aZqZmWUaFUlTUqOk1VUY96UK28+XdE4X9VWJ38zM3mhUJE0zM7OBMJqSZp2kKyStkXSXpO0lNUlaJqlN0k2SdgGQtERScypPlLQ+lQ+QtFzSyrTP5FT/jyX1l0uq6xxU0tckrUrj7JHqGiXdm/q4R9JbyoOVND3ttwo4s6S+yxjMzGzwjaakORm4NCIOADYCxwNXA1+MiKlAO/CVXvo4DfiviGgCmoHfSHo7MBt4d6rfApyY2u8ILIuIg4D7gU+l+m8Di9K41wKXdDHW94Gz0r49xlC+o6S5kloktWzZ1NHLlMzMLNdoSprrImJlKrcC+wATIuK+VLcImNlLHw8CX5L0RWCviPgz8D5gOvCwpJXp+d6p/SvArSVjNqbyocAPU/kHwIzSQSRNSLHdX9KmpxjeICIWRkRzRDTX7VDfy5TMzCzXaEqam0vKW4AJPbR9jddfm3GdlRHxQ+Bo4M/ATyW9FxDFqrEpPfaNiPlpl1cjIkrG7Pdn/XYTg5mZDYHRlDTLdQDPSzo8PT8J6Fx1rqdYPQLM6txB0t7AUxFxCXAzMBW4B5gl6U2pza6S9upl7J8Dn0jlE4GlpRsjYiOwUdKMkjY9xWBmZkNgNCdNgDnAAkltQBNwfqr/BnC6pBXAxJL2HwdWp9OwBwJXR8QvgC8Dd6V+7gYaehn3LODU1P4k4LNdtDkVuDSNpZ5iyJ6tmZn1i14/e2i1aGzD5GiYc3G1wzAbVfzVYCOfpNaIaC6vH+0rTTMzs2xOmmZmZpmcNM3MzDI5aZqZmWXq9/sGbXibMqmeFt+UYGY2ILzSNDMzy+SkaWZmlslJ08zMLJOTppmZWSbfCFTj2jd00DjvtmqHYWbDgD+pqP+80jQzM8vkpGlmZpbJSdPMzCyTk6aZmVkmJ00zM7NMTppmZmaZnDQHgaRGSasz2pxQ8rxZ0iWDH52ZmfWVk2b1NAJ/SZoR0RIRn6leOGZm1ptRmTTTKu8xSddKelTS9ZJ2kPQ+SSsktUu6UtLY1H69pItS/XJJb0v1V0maVdLvS92MtVTSI+lxWNp0AXC4pJWSzpZ0hKRb0z67SvqJpDZJyyRNTfXzU1xLJD0lyUnWzGwIjcqkmewLfDci3g68AHweuAqYHRFTKD4t6fSS9h2p/jvAxRWM8yzwgYiYBswGOk/BzgOWRkRTRHyrbJ/zgBURMRX4EnB1ybb9gA8ChwBfkTSmfEBJcyW1SGrZsqmjglDNzKwnozlp/joiHkjla4D3Aesi4vFUtwiYWdL+upKfh1YwzhjgCkntwGJg/4x9ZgA/AIiIe4HdJO2ctt0WEZsj4g8UCXmP8p0jYmFENEdEc90O9RWEamZmPRnNnz0bZc83Artltu8sv0b6j4ekbYDtutjvbOAZ4KDU9uW+BFtic0l5C6P7GJqZDanRvNJ8i6TOFeMJQAvQ2Hm9EjgJuK+k/eySnw+m8npgeiofTbGqLFcPPB0RW1Ofdan+RWCnbmJbCpwIIOkI4A8R8ULWrMzMbNCM5lXKWuBMSVcCvwA+AywDFkvaFngYuKyk/S6S2ihWev+Q6q4Abpa0CrgD+FMX43wXuEHSyWVt2oAtad+rgBUl+8wHrkzjbQLm9G+qZmY2EBRRfpay9klqBG6NiAMz268HmtN1xBFlbMPkaJhTyX1LZlar/NVg+SS1RkRzef1oPj1rZmZWkVF5ejYi1gNZq8zUvnHQgjEzsxHDK00zM7NMTppmZmaZnDTNzMwyjcprmqPJlEn1tPiOOTOzAeGVppmZWSYnTTMzs0xOmmZmZpmcNM3MzDL5RqAa176hg8Z5t1U7DDPDH2NXC7zSNDMzy+SkaWZmlslJ08zMLJOTppmZWSYnTTMzs0xOmmZmZpmGXdKUNEHSGb20aZR0QkZfjZJW97D9FEnf6UucA7G/mZmNLMMuaQITgB6TJtAI9Jo0q0WS3/9qZlaDhmPSvADYR9JKSQvSY7WkdkmzS9ocntqcnVaUSyU9kh6HVTDenpKWSHpC0lc6KyX9o6TlaYzLJdWl+lMlPS5pOfDukvZXSbpM0kPARZKaJC2T1CbpJkm7pHbd1S+R9C1JLZIelfQOSTemuP49tdlR0m2SVqXXZDZmZjZkhmPSnAf8MiKagGVAE3AQ8H5ggaSG1GZpRDRFxLeAZ4EPRMQ0YDZwSQXjHQIcD0wFPiapWdLbUz/vTnFsAU5MY59HkSxnAPuX9fVm4LCI+DxwNfDFiJgKtAOdCbm7eoBXIqIZuAy4GTgTOBA4RdJuwIeA30bEQRFxIHBHVxOSNDcl35YtmzoqeCnMzKwnw/004gzguojYAjwj6T7gHcALZe3GAN+R1Jng/raCMe6OiOcAJN2YxnwNmA48LAlge4rE/E5gSUT8PrX/cdlYiyNii6R6YEJE3JfqFwGLu6sv2f+W9LMdWBMRT6dxngL2TPXflHQhcGtELO1qQhGxEFgIMLZhclTwWpiZWQ+Ge9LMdTbwDMWKdBvg5Qr2LU8qAQhYFBH/WrpB0rG99PWnCsbtyub0c2tJufP5thHxuKRpwFHAv0u6JyLO7+eYZmaWaTienn0R2CmVlwKzJdVJ2h2YCSwvawNQDzwdEVuBk4C6Csb7gKRdJW0PHAs8ANwDzJL0JoC0fS/gIeDvJO0maQzwsa46jIgO4HlJh6eqk4D7uqvPDVTS3wCbIuIaYAEwrYJ5mplZPw27lWZEPCfpgfRWkduBNmAVxQrwCxHxO0nPAVskrQKuAr4L3CDpZIrrfJWs+JYDN1Bcj7wmIloAJH0ZuEvSNsCrwJkRsUzSfOBBYCOwsod+5wCXSdoBeAo4tZf6HFMorutuTTGdXsG+ZmbWT4rwJa9aNrZhcjTMubjaYZgZ/mqwkURSa7ox8w2G4+lZMzOzYWnYnZ4dDJI+CFxYVr0uIo6rRjxmZjYyjYqkGRF3AndWOw4zMxvZfHrWzMws06hYaY5mUybV0+KbD8zMBoRXmmZmZpmcNM3MzDI5aZqZmWVy0jQzM8vkG4FqXPuGDhrn3VbtMMxsAPmTharHK00zM7NMTppmZmaZnDTNzMwyOWmamZllctI0MzPL5KRpZmaWyUnTzMwsU00nTUkTJJ3RS5tGSSdk9NUoafXARWdmZiNNTSdNYALQY9IEGoFek2YlJPlDI8zMalCtJ80LgH0krZS0ID1WS2qXNLukzeGpzdlpRblU0iPpcVjOQJJOkXSLpHuBeyTtKuknktokLZM0NbXrrn6+pEVp7F9J+qiki1Ksd0gak9pdIOkXaf9vdBPLXEktklq2bOro72toZmZJra+I5gEHRkSTpOOB04CDgInAw5LuT23OiYi/B5C0A/CBiHhZ0mTgOqA5c7xpwNSI+KOkbwMrIuJYSe8FrgaagPO6qQfYB3gPsD/wIHB8RHxB0k3ARyQtBY4D9ouIkDShqyAiYiGwEGBsw+TIfbHMzKxntb7SLDUDuC4itkTEM8B9wDu6aDcGuEJSO7CYIoHlujsi/lgy3g8AIuJeYDdJO/dQD3B7RLwKtAN1wB2pvp3iNHIH8DLw35I+CmyqIDYzM+un0ZQ0c50NPEOxIm0Gtqtg3z/1c+zNABGxFXg1IjpXiVuBbSPiNeAQ4Hrg73k9qZqZ2RCo9aT5IrBTKi8FZkuqk7Q7MBNYXtYGoB54OiWukyhWfH2xFDgRQNIRwB8i4oUe6nslaTxQHxE/pUjuB/UxNjMz64OavqYZEc9JeiC9VeR2oA1YBQTwhYj4naTngC2SVgFXAd8FbpB0MsVKrq+rx/nAlZLaKE6jzumlPsdOwM2SxgECPt/H2MzMrA/0+hlAq0VjGyZHw5yLqx2GmQ0gf5/m4JPUGhF/dRNorZ+eNTMzGzA1fXp2MEj6IHBhWfW6iDiuGvGYmdnQcdKsUETcCdxZ7TjMzGzoOWnWuCmT6mnx9Q8zswHha5pmZmaZnDTNzMwyOWmamZllctI0MzPL5BuBalz7hg4a591W7TDMhpTf/G+DxStNMzOzTE6aZmZmmZw0zczMMjlpmpmZZXLSNDMzy+SkaWZmlslJ08zMLFOvSVNSo6TVgxWApJ8PVt/9VTp3Sc2SLql2TGZmVj1V/3CDiDis2jHkiIgWoKXacZiZWfXknp6tk3SFpDWS7pK0vaQmScsktUm6SdIuAJKWSGpO5YmS1qfyAZKWS1qZ9pmc6l9KP49I+14v6TFJ10pS2nZUqmuVdImkW7sLVNJ8SYskLZX0K0kflXSRpHZJd0gak9pNl3Rf6vNOSQ0l9askrQLOLOn3iM5xJR0i6UFJKyT9XNK+qf4USTemcZ6QdFFPL6qk70lqSa/reSX1Xc5X0o6Srkyv4wpJx3TT79zUb8uWTR09hWBmZhXITZqTgUsj4gBgI3A8cDXwxYiYCrQDX+mlj9OA/4qIJqAZ+E0XbQ4GPgfsD+wNvFvSOOBy4MMRMR3YPSPefYD3AkcD1wA/i4gpwJ+Bj6TE+W1gVurzSuBrad/vA2dFxEE99P8YcHhEHAz8G/AfJduagNnAFGC2pD176OfciGgGpgJ/J2lqL/M9F7g3Ig4B3gMskLRjeacRsTAimiOiuW6H+h6GNzOzSuSenl0XEStTuZUiKU2IiPtS3SJgcS99PAicK+nNwI0R8UQXbZZHxG8AJK0EGoGXgKciYl1qcx0wt5exbo+IVyW1A3XAHam+PfW5L3AgcHdazNYBT0uakOZ1f2r/A+DDXfRfDyxKq+UAxpRsuyciOtIcfgHsBfy6mzg/LmkuxXFooPjPwjY9zPdI4GhJ56Tn44C3AI/2/HKYmdlAyE2am0vKW4AJPbR9jddXsOM6KyPih5IeAj4C/FTSP0fEvb2M09drrpvTmFslvRoRkeq3pj4FrImIQ0t3Skkzx1cpVq/HSWoElpSPnXQ7B0lvBc4B3hERz0u6ipLXqxsCjo+ItZlxmpnZAOrrW046gOclHZ6enwR0rjrXA9NTeVbnDpL2plhBXQLcTHFKMsdaYO+UnKA49dlfa4HdJR2aYhsj6YCI2AhslDQjtTuxm/3rgQ2pfEofY9gZ+BPQIWkPXl/R9jTfO4GzSq71HtzHsc3MrA/68z7NORTX1NooruOdn+q/AZwuaQUwsaT9x4HV6bTrgRTXRHsVEX8GzgDukNQKvEiRtPssIl6hSOgXpht+VgKdd/GeClya4lQ3XVwEfD3NsU+r4YhYBayguD76Q+CBVN/TfL9KcSq4TdKa9NzMzIaIXj9zOXxJGh8RL6UV1qXAExHxrWrHNVgGcr5jGyZHw5yLBzZAs2HO36dp/SWpNd2o+QYj5ROBPpVWfmsoTo1eXuV4Bttom6+Z2YhQ9Q83yJFWWW9YaUk6FfhsWdMHIuJMhpl0A9TYsuqTIqK9q/ZdzdfMzKpvRCTNrkTE9yneUznsRcQ7qx2DmZn130g5PWtmZlZ1I3alaXmmTKqnxTdFmJkNCK80zczMMjlpmpmZZXLSNDMzy+SkaWZmlsk3AtW49g0dNM67rdphmNkQ8CchDT6vNM3MzDI5aZqZmWVy0jQzM8vkpGlmZpbJSdPMzCyTk6aZmVkmJ00zM7NMNZs0JS2R1JzKP5U0YQD7Pk3SyQPVn5mZjQyj4sMNIuKoAe7vsoHsz8zMRoZhtdKU1CjpMUlXSXpc0rWS3i/pAUlPSDpE0o6SrpS0XNIKScekfbeX9CNJj0q6Cdi+pN/1kiam8k8ktUpaI2luSZuXJH1N0ipJyyTt0UOc8yWdk8pLJF2Y4nlc0uGpvk7SNyStltQm6axU/74Ud3uax9iSGL8uaaWkFknTJN0p6ZeSTisZ+18kPZz6PK+b+OamPlq2bOroxxExM7NSwyppJm8Dvgnslx4nADOAc4AvAecC90bEIcB7gAWSdgROBzZFxNuBrwDTu+n/kxExHWgGPiNpt1S/I7AsIg4C7gc+VUHM26Z4PpfGBpgLNAJNETEVuFbSOOAqYHZETKFY6Z9e0s//REQTsDS1mwW8CzgPQNKRwGTgEKAJmC5pZnkwEbEwIpojorluh/oKpmFmZj0ZjklzXUS0R8RWYA1wT0QE0E6RhI4E5klaCSwBxgFvAWYC1wBERBvQ1k3/n5G0ClgG7EmRhABeAW5N5dY0Vq4bu9jv/cDlEfFaiumPwL5pfo+nNotS3J1uST/bgYci4sWI+D2wOV2TPTI9VgCPUPynYjJmZjYkhuM1zc0l5a0lz7dSxLsFOD4i1pbuJKnXjiUdQZHMDo2ITZKWUCRdgFdTciaNUclr0xljpft110/pvDufbwsI+HpEXN6PMczMrI+G40qzN3cCZyllSUkHp/r7KU7lIulAYGoX+9YDz6eEuR/Fqc/Bcjfwz5K2TTHtCqwFGiW9LbU5Cbivgj7vBD4paXzqc5KkNw1gzGZm1oORmDS/CowB2iStSc8BvgeMl/QocD7FqdJydwDbpjYXUJyiHSz/G/ifFOcq4ISIeBk4FVgsqZ1iBZl9J25E3AX8EHgw7X89sNOAR25mZl3S62ckrRaNbZgcDXMurnYYZjYE/H2aA0dSa0Q0l9ePxJWmmZlZVQzHG4GGDUnnAh8rq14cEV+rRjxmZlZdTpo9SMnRCdLMzAAnzZo3ZVI9Lb7OYWY2IHxN08zMLJOTppmZWSYnTTMzs0xOmmZmZpmcNM3MzDI5aZqZmWVy0jQzM8vkpGlmZpbJSdPMzCyTv+Wkxkl6keJ7PGvJROAP1Q5iANXafKD25lRr8wHPqTd7RcTu5ZX+GL3at7arr7cZySS11NKcam0+UHtzqrX5gOfUVz49a2ZmlslJ08zMLJOTZu1bWO0ABkGtzanW5gO1N6damw94Tn3iG4HMzMwyeaVpZmaWyUnTzMwsk5NmjZL0IUlrJT0paV614+mNpPWS2iWtlNSS6naVdLekJ9LPXVK9JF2S5tYmaVpJP3NS+yckzRniOVwp6VlJq0vqBmwOkqan1+jJtK+qMJ/5kjak47RS0lEl2/41xbZW0gdL6rv8XZT0VkkPpfofS9pukOezp6SfSfqFpDWSPpvqR/Ix6m5OI/k4jZO0XNKqNKfzeopD0tj0/Mm0vbGvc80SEX7U2AOoA34J7A1sB6wC9q92XL3EvB6YWFZ3ETAvlecBF6byUcDtgIB3AQ+l+l2Bp9LPXVJ5lyGcw0xgGrB6MOYALE9tlfb9cBXmMx84p4u2+6ffs7HAW9PvX11Pv4vA/wE+kcqXAacP8nwagGmpvBPweIp7JB+j7uY0ko+TgPGpPAZ4KL2mXcYBnAFclsqfAH7c17nmPLzSrE2HAE9GxFMR8QrwI+CYKsfUF8cAi1J5EXBsSf3VUVgGTJDUAHwQuDsi/hgRzwN3Ax8aqmAj4n7gj2XVAzKHtG3niFgWxV+Eq0v6Gsr5dOcY4EcRsTki1gFPUvwedvm7mFZg7wWuT/uXvjaDIiKejohHUvlF4FFgEiP7GHU3p+6MhOMUEfFSejomPaKHOEqP3/XA+1LcFc01Nz4nzdo0Cfh1yfPf0PM/pOEggLsktUqam+r2iIinU/l3wB6p3N38huO8B2oOk1K5vL4aPp1OV17ZeSqTyuezG7AxIl4rqx8S6RTewRSrmJo4RmVzghF8nCTVSVoJPEvxn5Jf9hDHX2JP2ztS3IPyd8JJ04aLGRExDfgwcKakmaUb0//cR/T7o2phDsD3gH2AJuBp4JvVDadyksYDNwCfi4gXSreN1GPUxZxG9HGKiC0R0QS8mWJluF+VQ/oLJ83atAHYs+T5m1PdsBURG9LPZ4GbKP6hPJNOeZF+Ppuadze/4TjvgZrDhlQurx9SEfFM+oO2FbiC4jhB5fN5juJ057Zl9YNK0hiK5HJtRNyYqkf0MepqTiP9OHWKiI3Az4BDe4jjL7Gn7fUp7kH5O+GkWZseBianu822o7g4fkuVY+qWpB0l7dRZBo4EVlPE3Hln4hzg5lS+BTg53d34LqAjnV67EzhS0i7pdNSRqa6aBmQOadsLkt6VrtecXNLXkOlMLslxFMcJivl8It3J+FZgMsVNMV3+LqYV3c+AWWn/0tdmsGIX8N/AoxHxnyWbRuwx6m5OI/w47S5pQipvD3yA4lptd3GUHr9ZwL0p7ormmh3gQNzt5Mfwe1Dc+fc4xbWAc6sdTy+x7k1xB9sqYE1nvBTXJe4BngD+L7BrqhdwaZpbO9Bc0tcnKS74PwmcOsTzuI7iVNirFNdJ/mkg5wA0U/zx+yXwHdIneg3xfH6Q4m1Lf2gaStqfm2JbS8ldo939LqbjvjzNczEwdpDnM4Pi1GsbsDI9jhrhx6i7OY3k4zQVWJFiXw38W09xAOPS8yfT9r37Otechz9Gz8zMLJNPz5qZmWVy0jQzM8vkpGlmZpbJSdPMzCyTk6aZmVkmJ00zM7NMTppmZmaZ/j9lo9nx8XrxIQAAAABJRU5ErkJggg==" + }, + "metadata": { + "needs_background": "light" + } + } + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 8, + "source": [ + "''' pickle and save the automl object '''\n", + "import pickle\n", + "with open('automl.pkl', 'wb') as f:\n", + " pickle.dump(automl, f, pickle.HIGHEST_PROTOCOL)" + ], + "outputs": [], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } + }, + { + "cell_type": "code", + "execution_count": 9, + "source": [ + "''' compute predictions of testing dataset ''' \n", + "y_pred = automl.predict(X_test)\n", + "print('Predicted labels', y_pred)\n", + "print('True labels', y_test)" + ], "outputs": [ { - "name": "stdout", "output_type": "stream", + "name": "stdout", "text": [ - "Predicted labels [138120.83608579 254303.34992791 142506.66563931 ... 188177.25137713\n", - " 267813.20015477 274066.51821463]\n", + "Predicted labels [144012.37488361 251501.98425004 147503.3849682 ... 219178.01297482\n", + " 213834.88677304 272956.11149784]\n", "True labels 14740 136900.0\n", "10101 241300.0\n", "20566 200700.0\n", @@ -377,67 +405,44 @@ ] } ], - "source": [ - "''' compute predictions of testing dataset ''' \n", - "y_pred = automl.predict(X_test)\n", - "print('Predicted labels', y_pred)\n", - "print('True labels', y_test)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, "metadata": { "slideshow": { "slide_type": "slide" }, "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "r2 = 0.8475173393496316\n", - "mse = 2015593047.2587376\n", - "mae = 30018.530184056573\n" - ] - } - ], + } + }, + { + "cell_type": "code", + "execution_count": 10, "source": [ "''' compute different metric values on testing dataset'''\n", "from flaml.ml import sklearn_metric_loss_score\n", "print('r2', '=', 1 - sklearn_metric_loss_score('r2', y_pred, y_test))\n", "print('mse', '=', sklearn_metric_loss_score('mse', y_pred, y_test))\n", "print('mae', '=', sklearn_metric_loss_score('mae', y_pred, y_test))" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": { - "slideshow": { - "slide_type": "subslide" - }, - "tags": [] - }, + ], "outputs": [ { - "name": "stdout", "output_type": "stream", + "name": "stdout", "text": [ - "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.09999999999999995, 'subsample': 1.0, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.09999999999999995, 'subsample': 1.0, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 12, 'learning_rate': 0.25912534572860485, 'subsample': 0.9266743941610592, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013933617380144255, 'reg_lambda': 0.18096917948292954}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 12, 'learning_rate': 0.25912534572860485, 'subsample': 0.9266743941610592, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013933617380144255, 'reg_lambda': 0.18096917948292954}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 24, 'learning_rate': 1.0, 'subsample': 0.8513627344387318, 'log_max_bin': 10, 'colsample_bytree': 0.946138073111236, 'reg_alpha': 0.0018311776973217073, 'reg_lambda': 0.27901659190538414}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 24, 'learning_rate': 1.0, 'subsample': 0.8513627344387318, 'log_max_bin': 10, 'colsample_bytree': 0.946138073111236, 'reg_alpha': 0.0018311776973217073, 'reg_lambda': 0.27901659190538414}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 11, 'num_leaves': 4, 'min_child_samples': 36, 'learning_rate': 1.0, 'subsample': 0.8894434216129232, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013605736901132325, 'reg_lambda': 0.1222158118565165}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 11, 'num_leaves': 4, 'min_child_samples': 36, 'learning_rate': 1.0, 'subsample': 0.8894434216129232, 'log_max_bin': 10, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013605736901132325, 'reg_lambda': 0.1222158118565165}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 20, 'num_leaves': 4, 'min_child_samples': 46, 'learning_rate': 1.0, 'subsample': 0.9814787163243813, 'log_max_bin': 9, 'colsample_bytree': 0.8499027725496043, 'reg_alpha': 0.0022085340760961856, 'reg_lambda': 0.5460627024738893}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 20, 'num_leaves': 4, 'min_child_samples': 46, 'learning_rate': 1.0, 'subsample': 0.9814787163243813, 'log_max_bin': 9, 'colsample_bytree': 0.8499027725496043, 'reg_alpha': 0.0022085340760961856, 'reg_lambda': 0.5460627024738893}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 20, 'num_leaves': 11, 'min_child_samples': 52, 'learning_rate': 1.0, 'subsample': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.7967145599266738, 'reg_alpha': 0.05680749758595097, 'reg_lambda': 2.756357095973371}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 20, 'num_leaves': 11, 'min_child_samples': 52, 'learning_rate': 1.0, 'subsample': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.7967145599266738, 'reg_alpha': 0.05680749758595097, 'reg_lambda': 2.756357095973371}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 37, 'num_leaves': 15, 'min_child_samples': 93, 'learning_rate': 0.6413547778096401, 'subsample': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.6980216487058154, 'reg_alpha': 0.020158745350617662, 'reg_lambda': 0.954042157679914}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 37, 'num_leaves': 15, 'min_child_samples': 93, 'learning_rate': 0.6413547778096401, 'subsample': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.6980216487058154, 'reg_alpha': 0.020158745350617662, 'reg_lambda': 0.954042157679914}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 75, 'num_leaves': 32, 'min_child_samples': 83, 'learning_rate': 0.19997653978110663, 'subsample': 0.8895588746662894, 'log_max_bin': 7, 'colsample_bytree': 0.663557757490723, 'reg_alpha': 0.03147131714846291, 'reg_lambda': 0.3864406937587945}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 75, 'num_leaves': 32, 'min_child_samples': 83, 'learning_rate': 0.19997653978110663, 'subsample': 0.8895588746662894, 'log_max_bin': 7, 'colsample_bytree': 0.663557757490723, 'reg_alpha': 0.03147131714846291, 'reg_lambda': 0.3864406937587945}}\n", - "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 81, 'num_leaves': 66, 'min_child_samples': 93, 'learning_rate': 0.07560024606664352, 'subsample': 0.8756054034199897, 'log_max_bin': 7, 'colsample_bytree': 0.7142272555842307, 'reg_alpha': 0.00219854653612346, 'reg_lambda': 2.9360090402842274}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 81, 'num_leaves': 66, 'min_child_samples': 93, 'learning_rate': 0.07560024606664352, 'subsample': 0.8756054034199897, 'log_max_bin': 7, 'colsample_bytree': 0.7142272555842307, 'reg_alpha': 0.00219854653612346, 'reg_lambda': 2.9360090402842274}}\n" + "r2 = 0.8540590968156087\n", + "mse = 1929120783.4023921\n", + "mae = 28944.167002684408\n" ] } ], + "metadata": { + "slideshow": { + "slide_type": "slide" + }, + "tags": [] + } + }, + { + "cell_type": "code", + "execution_count": 11, "source": [ "from flaml.data import get_output_from_log\n", "time_history, best_valid_loss_history, valid_loss_history, config_history, train_loss_history = \\\n", @@ -445,30 +450,35 @@ "\n", "for config in config_history:\n", " print(config)" - ] + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.09999999999999995, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 20, 'learning_rate': 0.09999999999999995, 'log_max_bin': 8, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 12, 'learning_rate': 0.26770501231052046, 'log_max_bin': 7, 'colsample_bytree': 1.0, 'reg_alpha': 0.001348364934537134, 'reg_lambda': 1.4442580148221913}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 4, 'num_leaves': 4, 'min_child_samples': 12, 'learning_rate': 0.26770501231052046, 'log_max_bin': 7, 'colsample_bytree': 1.0, 'reg_alpha': 0.001348364934537134, 'reg_lambda': 1.4442580148221913}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 11, 'num_leaves': 4, 'min_child_samples': 9, 'learning_rate': 0.7260594590615893, 'log_max_bin': 9, 'colsample_bytree': 0.9285002286474459, 'reg_alpha': 0.0036840681931986645, 'reg_lambda': 0.7532480505730402}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 11, 'num_leaves': 4, 'min_child_samples': 9, 'learning_rate': 0.7260594590615893, 'log_max_bin': 9, 'colsample_bytree': 0.9285002286474459, 'reg_alpha': 0.0036840681931986645, 'reg_lambda': 0.7532480505730402}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 13, 'num_leaves': 5, 'min_child_samples': 5, 'learning_rate': 0.7590459488450945, 'log_max_bin': 8, 'colsample_bytree': 0.8304072431299575, 'reg_alpha': 0.001951378031519758, 'reg_lambda': 0.04792552866398477}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 13, 'num_leaves': 5, 'min_child_samples': 5, 'learning_rate': 0.7590459488450945, 'log_max_bin': 8, 'colsample_bytree': 0.8304072431299575, 'reg_alpha': 0.001951378031519758, 'reg_lambda': 0.04792552866398477}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 44, 'num_leaves': 4, 'min_child_samples': 4, 'learning_rate': 0.41929025492645006, 'log_max_bin': 8, 'colsample_bytree': 0.7610534336273627, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.009280655005879927}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 44, 'num_leaves': 4, 'min_child_samples': 4, 'learning_rate': 0.41929025492645006, 'log_max_bin': 8, 'colsample_bytree': 0.7610534336273627, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.009280655005879927}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 141, 'num_leaves': 17, 'min_child_samples': 3, 'learning_rate': 0.17402065726724145, 'log_max_bin': 8, 'colsample_bytree': 0.6649148062238498, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.006761362450996487}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 141, 'num_leaves': 17, 'min_child_samples': 3, 'learning_rate': 0.17402065726724145, 'log_max_bin': 8, 'colsample_bytree': 0.6649148062238498, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.006761362450996487}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 88, 'num_leaves': 70, 'min_child_samples': 4, 'learning_rate': 0.09348689572544734, 'log_max_bin': 7, 'colsample_bytree': 0.5967846088487322, 'reg_alpha': 0.006958608037974516, 'reg_lambda': 0.001895876878997586}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 88, 'num_leaves': 70, 'min_child_samples': 4, 'learning_rate': 0.09348689572544734, 'log_max_bin': 7, 'colsample_bytree': 0.5967846088487322, 'reg_alpha': 0.006958608037974516, 'reg_lambda': 0.001895876878997586}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 166, 'num_leaves': 34, 'min_child_samples': 2, 'learning_rate': 0.11549142333280608, 'log_max_bin': 8, 'colsample_bytree': 0.6469726212777197, 'reg_alpha': 0.032619809462956464, 'reg_lambda': 0.00406523645285879}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 166, 'num_leaves': 34, 'min_child_samples': 2, 'learning_rate': 0.11549142333280608, 'log_max_bin': 8, 'colsample_bytree': 0.6469726212777197, 'reg_alpha': 0.032619809462956464, 'reg_lambda': 0.00406523645285879}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 108, 'num_leaves': 169, 'min_child_samples': 2, 'learning_rate': 0.07154128424526202, 'log_max_bin': 9, 'colsample_bytree': 0.591579264701285, 'reg_alpha': 0.01435520144866301, 'reg_lambda': 0.006874802748054271}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 108, 'num_leaves': 169, 'min_child_samples': 2, 'learning_rate': 0.07154128424526202, 'log_max_bin': 9, 'colsample_bytree': 0.591579264701285, 'reg_alpha': 0.01435520144866301, 'reg_lambda': 0.006874802748054271}}\n", + "{'Current Learner': 'lgbm', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 256, 'num_leaves': 79, 'min_child_samples': 4, 'learning_rate': 0.06020420143131026, 'log_max_bin': 10, 'colsample_bytree': 0.6501336877031868, 'reg_alpha': 0.11324823332770402, 'reg_lambda': 0.007122448821650475}, 'Best Learner': 'lgbm', 'Best Hyper-parameters': {'n_estimators': 256, 'num_leaves': 79, 'min_child_samples': 4, 'learning_rate': 0.06020420143131026, 'log_max_bin': 10, 'colsample_bytree': 0.6501336877031868, 'reg_alpha': 0.11324823332770402, 'reg_lambda': 0.007122448821650475}}\n" + ] + } + ], + "metadata": { + "slideshow": { + "slide_type": "subslide" + }, + "tags": [] + } }, { "cell_type": "code", - "execution_count": 13, - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAdpklEQVR4nO3df7xVdZ3v8de7I+apxKNBDhxASInCLEjSrKbUyQH7IaRm6txuWRM2o06lF5OmzHS82TDZtcdlatDr+GP8TYhUJDlpNqkFKAqC4aD5gwMl/jhqdhI4fO4fa23cbNfZZ5/DWWftvXk/H4/zOHt993et9TlfZX/29/td67sUEZiZmVV6TdEBmJlZfXKCMDOzTE4QZmaWyQnCzMwyOUGYmVkmJwgzM8vkBGFWhaS/lLS26DjMiuAEYXVL0mOSPlRkDBHxXxExIa/jS5oq6ZeSXpS0SdKdko7J63xmfeEEYbs0SS0Fnvt44CbgKmAUsC9wLvCxfhxLkvzv2QaU/4eyhiPpNZLOkfSIpGck3Shpn7L3b5L0e0nPp9/ODyx77wpJ35e0WNJLwBFpT+V/SVqZ7nODpD3S+odLWl+2f4910/fPlrRR0gZJfyspJB2Q8TcIuBi4ICIui4jnI2JbRNwZEZ9P65wn6T/K9hmbHm+3dPsXki6UdBfwJ2CWpOUV5/mypEXp69dK+hdJT0j6g6QfSGrdyf8c1sScIKwRnQHMAD4IjASeA+aWvf9TYDzwJuA+4JqK/U8GLgT2BH6Vlp0ATAPGAe8APlPl/Jl1JU0DzgQ+BBwAHF7lGBOA0cD8KnVq8SlgJsnf8gNggqTxZe+fDFybvr4IeAswKY2vnaTHYpbJCcIa0ReAf4yI9RHxMnAecHzpm3VEXB4RL5a9905Je5Xtf0tE3JV+Y/9zWva9iNgQEc8CPyL5EO1JT3VPAP49IlZHxJ/Sc/fkjenvjbX+0T24Ij3f1oh4HrgFOAkgTRRvBRalPZaZwJcj4tmIeBH438CJO3l+a2JOENaI9gNultQpqRN4COgG9pXUIumidPjpBeCxdJ9hZfs/mXHM35e9/hPwhirn76nuyIpjZ52n5Jn094gqdWpReY5rSRMESe9hYZqshgOvA+4ta7db03KzTE4Q1oieBI6OiLaynz0iooPkQ3E6yTDPXsDYdB+V7Z/XEsYbSSabS0ZXqbuW5O84rkqdl0g+1Ev+IqNO5d9yGzBc0iSSRFEaXnoa6AIOLGuzvSKiWiK0XZwThNW7IZL2KPvZjWSs/UJJ+wFIGi5pelp/T+Blkm/oryMZRhksNwKnSHqbpNcBX++pYiTr7J8JfF3SKZKGppPv75c0L612P/ABSWPSIbLZvQUQEVtIroyaA+xDkjCIiG3ApcB3Jb0JQFK7pKn9/mut6TlBWL1bTPLNt/RzHnAJsAj4maQXgV8Dh6b1rwIeBzqANel7gyIifgp8D7gDWFd27pd7qD8f+CTwWWAD8Afgn0jmEYiI24AbgJXAvcCPawzlWpIe1E0RsbWs/CuluNLht/8kmSw3yyQ/MMgsH5LeBjwIvLbig9qsIbgHYTaAJH08vd9gb+DbwI+cHKxROUGYDaxTgaeAR0iurPq7YsMx6z8PMZmZWSb3IMzMLNNuRQfQV8OGDYuxY8cWHYaZWUO59957n46IPt0Y2XAJYuzYsSxfvrz3imZmtp2kx/u6j4eYzMwskxOEmZllcoIwM7NMThBmZpbJCcLMzDI13FVMZma7moUrOpizZC0bOrsY2dbKrKkTmDG5PffzOkGYWS6K+lBrNgtXdDB7wSq6tnQD0NHZxewFqwByb08PMZnZgCt9qHV0dhG88qG2cEVH0aE1nDlL1m5PDiVdW7qZs2Rt7ud2D8LMBlxPH2pnz1/JdUufKCiqxtTR2ZVZvqGH8oHkHoSZDbiePrw2d28b5Ega3+4t2R/TI9tacz+3exBmNuBGtrVmfvNtb2vlhlMPKyCixlU5BwHQOqSFWVPzfxigexBmNuBmTZ1A65CWHcoG60Ot2cyY3M63jj2I9rZWRJJkv3XsQb6KyazZNeuVPqW/4ez5K9ncvY32JvrbijBjcnshbZdrgpA0jeQB8y3AZRFxUcX7Y4Argba0zjkRsTjPmMzqRZGXLw6GGZPbt09Ie1ipMeWWICS1AHOBo4D1wDJJiyJiTVm1rwE3RsT3JU0EFgNj84rJrJ7sClf6rNn4AhNHDC06DOunPOcgDgHWRcSjEbEZuB6YXlEngNL/PXsBG3KMx6yu7ApX+kwcMZTpkxq/N7SrynOIqR14smx7PXBoRZ3zgJ9JOgN4PfChrANJmgnMBBgzZsyAB2pWBF/pY/Wu6KuYTgKuiIhRwIeBqyW9KqaImBcRUyJiyvDhfXpinlnd8pU+Vu/y7EF0AKPLtkelZeU+B0wDiIh7JO0BDAOeyjEus7rgK32s3uWZIJYB4yWNI0kMJwInV9R5Avgr4ApJbwP2ADblGJNZXfGVPlbPchtiioitwOnAEuAhkquVVks6X9IxabWzgM9LegC4DvhMREReMZmZWe1yvQ8ivadhcUXZuWWv1wDvyzMGMzPrn6Inqc3MrE45QZiZWSYnCDMzy+QEYWZmmbyaqzWdZl0h1WywOUFYU2n2FVLNBpMThDWVRlwh1SueWr3yHIQ1lUZcIdUrnlq9cg/CmopXSDUbOO5BWFPxCqlmA8c9CGsqXiHVbOA4QVjT8QqpZgPDQ0xmZpbJPQjbgW8yM7MSJwjbzjeZmVk5JwjbrhFvMuuJbz4z23meg7DtGvEms5745jOznecehG3nm8zMrJx7ELadbzIzs3LuQdh2vsnMzMo5QdgOfJOZmZV4iMnMzDI5QZiZWSYnCDMzy+QEYWZmmZwgzMwsU64JQtI0SWslrZN0Tsb735V0f/rzsKTOPOMxM7Pa5XaZq6QWYC5wFLAeWCZpUUSsKdWJiC+X1T8DmJxXPLs6r9JqZn2VZw/iEGBdRDwaEZuB64HpVeqfBFyXYzy7rNIqrR2dXQSvrNK6cEVH0aGZWR3L80a5duDJsu31wKFZFSXtB4wDbs8xnl1WX1dp9UqoZgb1M0l9IjA/Irqz3pQ0U9JyScs3bdo0yKE1vr6u0uqVUM0M8u1BdACjy7ZHpWVZTgRO6+lAETEPmAcwZcqUGKgAdxVepdXM+iPPHsQyYLykcZJ2J0kCiyorSXorsDdwT46x7NK8SquZ9UduCSIitgKnA0uAh4AbI2K1pPMlHVNW9UTg+ohwzyAnMya3861jD2L3luQ/d3tbK9869iBfxWRmVanRPpenTJkSy5cvLzqMhvTJf0s6aR5WMtv1SLo3Iqb0ZZ96maQ2M7M64wRhZmaZnCDMzCyTE4SZmWVygjAzs0xOEGZmlskJwszMMjlBmJlZJicIMzPL5ARhZmaZnCDMzCyTE4SZmWVygjAzs0xOEGZmlskJwszMMjlBmJlZJicIMzPL5ARhZmaZnCDMzCyTE4SZmWVygjAzs0xVE4SkoZL2zyh/R34hmZlZPegxQUg6Afgt8ENJqyW9u+ztK/IOzMzMirVblfe+ChwcERslHQJcLWl2RNwMaHDCa1wLV3QwZ8laNnR2MbKtlVlTJzBjcnvRYZmZ1axagmiJiI0AEbFU0hHAjyWNBmJQomtQC1d0MHvBKrq2dAPQ0dnF7AWrAJwkzKxhVEsQL0raPyIeAUh7EocDC4EDByO4RjVnydrtyaGka0s3Z89fyXVLnygoKliz8QUmjhha2PnNrLFUSxB/R8VQUkS8KGkacEKuUTW4DZ1dmeWbu7cNciQ7mjhiKNMnuQdjZrXpMUFExAOSWiTdERFHlJVvAa6p5eBpMrkEaAEui4iLMuqcAJxHMmz1QESc3Lc/of6MbGulIyNJtLe1csOphxUQkZlZ31W9zDUiuoFtkvbq64EltQBzgaOBicBJkiZW1BkPzAbeFxEHAl/q63nq0aypE2gd0rJDWeuQFmZNnVBQRGZmfVdtiKnkj8AqSbcBL5UKI+IfetnvEGBdRDwKIOl6YDqwpqzO54G5EfFcesyn+hB73SpNRJ89fyWbu7fR7quYzKwB1ZIgFqQ/fdUOPFm2vR44tKLOWwAk3UUyDHVeRNxaeSBJM4GZAGPGjOlHKINvxuT27RPSHlYys0bUa4KIiCtzPv944HBgFPBLSQdFRGdFDPOAeQBTpkzxJbZmZoMgz7WYOoDRZduj0rJy64FFEbElIn4HPEySMMzMrGB5JohlwHhJ4yTtDpwILKqos5Ck94CkYSRDTo/mGJOZmdUotwQREVuB04ElwEPAjRGxWtL5ko5Jqy0BnpG0BrgDmBURz+QVk5mZ1a7XOQhJbwFmAfuV14+II3vbNyIWA4srys4tex3AmemPmZnVkVquYroJ+AFwKdDdS10zM2sStSSIrRHx/dwjMTOzulLLHMSPJP29pBGS9in95B6ZmZkVqpYexKfT37PKygJ488CHY2Zm9aKWG+XGDUYgZmZWX2q5imkIydLfH0iLfgH8W7qqq5mZNalahpi+DwwB/jXd/lRa9rd5BWVmZsWrJUG8OyLeWbZ9u6QH8grIzMzqQy1XMXVL2r+0IenN+H4IM7OmV0sPYhZwh6RHSR5Buh9wSq5RmZlZ4Wq5iunn6ZPfSo9DWxsRL+cblpmZFa3HBCHpyIi4XdKxFW8dIImI6M9DhMzMrEFU60F8ELgd+FjGe0H/njJnZmYNoscEERHfSF+enz7MZztJvnnOzKzJ1XIV0w8zyuYPdCBmZlZfqs1BvBU4ENirYh5iKLBH3oGZmVmxqs1BTAA+CrSx4zzEi8Dn8wzKzMyKV20O4hbgFkmHRcQ9gxiTmZnVgVpulFsh6TSS4abtQ0sR8dncojIzs8LVMkl9NfAXwFTgTmAUyTCTmZk1sVoSxAER8XXgpYi4EvgIcGi+YZmZWdFqSRCl5z50Sno7sBfwpvxCMjOzelDLHMQ8SXsDXwcWAW8Azs01KjMzK1wti/Vdlr68Ez+H2sxsl1HtRrkzq+0YERcPfDhmZlYvqvUg9kx/TwDeTTK8BMlNc0vzDMrMzIrX4yR1RHwzIr5JclnruyLirIg4CzgYGFPLwSVNk7RW0jpJ52S8/xlJmyTdn/74OddmZnWilknqfYHNZdub07KqJLUAc4GjgPXAMkmLImJNRdUbIuL0GuM1M7NBUkuCuApYKunmdHsGcEUN+x0CrIuIRwEkXQ9MByoThJmZ1aFe74OIiAtJnkH9XPpzSkR8q4ZjtwNPlm2vT8sqHSdppaT5kkZnHUjSTEnLJS3ftGlTDac2M7Od1WOCkDQ0/b0P8BjJkhtXA4+nZQPhR8DYiHgHcBtwZValiJgXEVMiYsrw4cMH6NRmZlZNtSGma0mW+76X5BGjJUq3e7snogMo7xGMSsu2i4hnyjYvA/65l2Oamdkgqbbc90fT3/19vOgyYHz6eNIO4ETg5PIKkkZExMZ08xjgoX6ey8zMBli1G+XeVW3HiLivl/e3SjodWAK0AJdHxGpJ5wPLI2IR8A+SjgG2As8Cn+lj/GZmlpNqQ0zfqfJeAEf2dvCIWAwsrig7t+z1bGB2b8cxM7PBV22I6YjBDMTMzOpLLfdBkC7zPZEdnyh3VV5BmZlZ8XpNEJK+ARxOkiAWA0cDvyK5gc7MzJpULQ8MOh74K+D3EXEK8E6ShwaZmVkTqyVBdEXENmBrevPcU+x4f4OZmTWhWuYglktqAy4luWnuj8A9uUZlZmaFq3YfxFzg2oj4+7ToB5JuBYZGxMpBic7MzApTrQfxMPAvkkYANwLXRcSKwQnLzMyKVu2BQZdExGHAB4FngMsl/VbSNyS9ZdAiNDOzQvQ6BxERjwPfBr4taTJwOXAuyfIZu6SFKzqYs2QtGzq7GNnWyqypE5gxOWslczOzxtXrVUySdpP0MUnXAD8F1gLH5h5ZnVq4ooPZC1bR0dlFAB2dXcxesIqFKzp63dfMrJFUm6Q+CjgJ+DCwFLgemBkRLw1SbHVpzpK1dG3p3qGsa0s3Z89fyXVLn9ihfM3GF5g4YuhghmdmNmCqDTHNJnkmxFkR8dwgxVP3NnR2ZZZv7t72qrKJI4YyfZKHnsysMVVbrK/X1Vp3RSPbWunISBLtba3ccOphBURkZpaPWu6ktjKzpk6gdciO8/OtQ1qYNXVCQRGZmeWjptVc7RWlq5XOnr+Szd3baPdVTGbWpJwg+mHG5PbtE9IeVjKzZuUhJjMzy+QEYWZmmZwgzMwskxOEmZllcoIwM7NMThBmZpbJCcLMzDI5QZiZWSYnCDMzy5RrgpA0TdJaSesknVOl3nGSQtKUPOMxM7Pa5ZYgJLUAc4GjgYnASZImZtTbE/gi8Ju8YjEzs77LswdxCLAuIh6NiM0kDxyanlHvApJHmv45x1jMzKyP8kwQ7cCTZdvr07LtJL0LGB0RP6l2IEkzJS2XtHzTpk0DH6mZmb1KYZPUkl4DXAyc1VvdiJgXEVMiYsrw4cPzD87MzHJNEB3A6LLtUWlZyZ7A24FfSHoMeA+wyBPVZmb1Ic8EsQwYL2mcpN2BE4FFpTcj4vmIGBYRYyNiLPBr4JiIWJ5jTGZmVqPcEkREbAVOB5YADwE3RsRqSedLOiav85qZ2cDI9YlyEbEYWFxRdm4PdQ/PMxYzM+sb30ltZmaZnCDMzCyTE4SZmWVygjAzs0xOEGZmlskJwszMMjlBmJlZJicIMzPL5ARhZmaZnCDMzCyTE4SZmWVygjAzs0xOEGZmlskJwszMMjlBmJlZJicIMzPL5ARhZmaZnCDMzCyTE4SZmWVygjAzs0xOEGZmlskJwszMMjlBmJlZJicIMzPL5ARhZmaZnCDMzCxTrglC0jRJayWtk3ROxvtfkLRK0v2SfiVpYp7xmJlZ7XJLEJJagLnA0cBE4KSMBHBtRBwUEZOAfwYuziseMzPrmzx7EIcA6yLi0YjYDFwPTC+vEBEvlG2+Hogc4zEzsz7YLcdjtwNPlm2vBw6trCTpNOBMYHfgyKwDSZoJzAQYM2bMgAdqZmavVvgkdUTMjYj9ga8AX+uhzryImBIRU4YPHz64AZqZ7aLyTBAdwOiy7VFpWU+uB2bkGI+ZmfVBngliGTBe0jhJuwMnAovKK0gaX7b5EeC/c4zHzMz6ILc5iIjYKul0YAnQAlweEaslnQ8sj4hFwOmSPgRsAZ4DPp1XPGZm1jd5TlITEYuBxRVl55a9/mKe5zczs/4rfJLazMzqkxOEmZllcoIwM7NMThBmZpbJCcLMzDLlehVTvVi4ooM5S9ayobOLkW2tzJo6gRmT24sOy8ysrjV9gli4ooPZC1bRtaUbgI7OLmYvWAXgJGFmVkXTJ4g5S9ZuTw4lXVu6OXv+Sq5b+kS/j7tm4wtMHDF0Z8MzM6tbTT8HsaGzK7N8c/e2nTruxBFDmT7JPRAza15N34MY2dZKR0aSaG9r5YZTDysgIjOzxtD0PYhZUyfQOqRlh7LWIS3MmjqhoIjMzBpD0/cgShPRvorJzKxvmj5BQJIknBDMzPqm6YeYzMysf5wgzMwskxOEmZllcoIwM7NMThBmZpZJEVF0DH0iaRPweE6HHwY8ndOxd5Zj6x/H1j+OrX/qObYJEbFnX3ZouMtcI2J4XseWtDwipuR1/J3h2PrHsfWPY+ufeo+tr/t4iMnMzDI5QZiZWSYniB3NKzqAKhxb/zi2/nFs/dNUsTXcJLWZmQ0O9yDMzCyTE4SZmWVygkhJekzSKkn39+dysAGO5XJJT0l6sKxsH0m3Sfrv9PfedRTbeZI60ra7X9KHC4hrtKQ7JK2RtFrSF9PywtutSmz10G57SFoq6YE0tm+m5eMk/UbSOkk3SNq9jmK7QtLvytpt0mDHVhZji6QVkn6cbhfeblVi63O7OUHs6IiImFQH1zFfAUyrKDsH+HlEjAd+nm4X4QpeHRvAd9O2mxQRiwc5JoCtwFkRMRF4D3CapInUR7v1FBsU324vA0dGxDuBScA0Se8Bvp3GdgDwHPC5OooNYFZZu91fQGwlXwQeKtuuh3YrqYwN+thuThB1KCJ+CTxbUTwduDJ9fSUwY1CDSvUQW+EiYmNE3Je+fpHkH0Y7ddBuVWIrXCT+mG4OSX8COBKYn5YX1W49xVYXJI0CPgJclm6LOmi3rNj6ywniFQH8TNK9kmYWHUyGfSNiY/r698C+RQaT4XRJK9MhqEKGv0okjQUmA7+hztqtIjaog3ZLhyLuB54CbgMeATojYmtaZT0FJbTK2CKi1G4Xpu32XUmvLSI24P8AZwPb0u03UiftxqtjK+lTuzlBvOL9EfEu4GiSIYAPFB1QTyK5NrluvkkB3wf2JxkG2Ah8p6hAJL0B+CHwpYh4ofy9otstI7a6aLeI6I6IScAo4BDgrUXEkaUyNklvB2aTxPhuYB/gK4Mdl6SPAk9FxL2Dfe7eVImtz+3mBJGKiI7091PAzST/UOrJHySNAEh/P1VwPNtFxB/Sf8jbgEspqO0kDSH5AL4mIhakxXXRblmx1Uu7lUREJ3AHcBjQJqm0VtsooKOwwNghtmnpkF1ExMvAv1NMu70POEbSY8D1JENLl1Af7faq2CT9R3/azQkCkPR6SXuWXgN/DTxYfa9Btwj4dPr608AtBcayg9IHcOrjFNB26fjv/wMeioiLy94qvN16iq1O2m24pLb0dStwFMkcyR3A8Wm1ototK7bfliV8kYzxD3q7RcTsiBgVEWOBE4HbI+JvqIN26yG2/9Gfdmu41Vxzsi9wc9Ju7AZcGxG3FhWMpOuAw4FhktYD3wAuAm6U9DmS5c5PqKPYDk8vmQvgMeDUAkJ7H/ApYFU6Zg3wVeqj3XqK7aQ6aLcRwJWSWki+MN4YET+WtAa4XtI/AStIEly9xHa7pOGAgPuBLxQQW0++QvHt1pNr+tpuXmrDzMwyeYjJzMwyOUGYmVkmJwgzM8vkBGFmZpmcIMzMLJMThNWVdAmAL5VtL5F0Wdn2dySdWWX/KyQdn77+haRXLbwoaYiki5Ss8HqfpHskHZ2+95ikYf2Ie/t5e3h/brqC5hpJXWUrah4vaXHpev+BJGlEaSXPHt7fXdIvy27sMtuBE4TVm7uA9wJIeg0wDDiw7P33Anfv5DkuILnG/u3p8iozgD138phVRcRp6ZIRHwYeKVtRc35EfDi9U3ignUlyh3ZPMW0mWeH2kzmc25qAE4TVm7tJlnqAJDE8CLwoae90cbG3AfdJOlfSMkkPSpqX3h3aK0mvAz4PnJEuOVBa8uLGjLpnpsd/sKJX8z/TBc8ekHR1xn4XpD2KlhpjekzSMEljJf023fdhSddI+pCku9LeziFp/dcrWdxvqZL1/qf3cOjjgFvTfQ5M69+fxj4+rbMQ+Jta4rRdj7uWVlciYoOkrZLGkPQW7iFZEfMw4HlgVURslvR/I+J8gPRD+qPAj2o4xQHAE5UL+VWSdDBwCnAoyZ2nv5F0J7AZ+Brw3oh4WtI+FfvNIemNnBL9uwv1AOATwGeBZcDJwPuBY0juvp4B/CPJ8gmfTYemlkr6z4h4qSyOccBzpSRIctfsJRFxjZKH2JSS14Mki7eZvYp7EFaP7iZJDqUEcU/Z9l1pnSOUPLlrFclCaQdmHWgnvB+4OSJeSp9JsAD4y/RcN0XE0wARUf5sjK8De0XEF/qZHAB+FxGr0gX8VpM87CiAVcDYtM5fA+eky3b8AtgDGFNxnBHAprLte4CvSvoKsF9EdKXxdwObla5FZlbOCcLqUWke4iCSb7i/JulBvBe4W9IewL8Cx0fEQSTj7HvUeOx1wBhJQwc86uQb/8GVvYo+erns9bay7W280uMXcFzZPMaYiKh8clgXZW0SEdeS9EK6gMWSjiyr+1rgzzsRszUpJwirR3eTDBk9my6H/SzQRpIk7uaVD76nlTxjocerhypFxJ9IFlC7JB1qKa0a+omKqv8FzJD0OiUr/H48Lbsd+ISkN6b7lieDW0kWB/xJzt/IlwBnlOZdJE3OqPMwr/Q4kPRm4NGI+B7JCqPvSMvfCDwdEVtyjNcalBOE1aNVJFcv/bqi7PmIeDq94udSkt7FEpJv7n3xNZLhlzWSHgR+DFQ+XOg+kudvLyV5+ttlEbEiIlYDFwJ3SnoAuLhiv5vS2BYpWaI6DxeQPH5zpaTV6fYO0vmIRyQdkBadADyYDku9HbgqLT8C+ElOcVqD82quZk1K0seBgyPia1XqLADOiYiHBy8yaxS+ismsSUXEzaWhsCzpENtCJwfriXsQZmaWyXMQZmaWyQnCzMwyOUGYmVkmJwgzM8vkBGFmZpn+Pw3GbSTWTxpFAAAAAElFTkSuQmCC", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], + "execution_count": 12, "source": [ "import numpy as np\n", "\n", @@ -478,115 +488,133 @@ "plt.scatter(time_history, 1 - np.array(valid_loss_history))\n", "plt.step(time_history, 1 - np.array(best_valid_loss_history), where='post')\n", "plt.show()" - ] + ], + "outputs": [ + { + "output_type": "display_data", + "data": { + "text/plain": [ + "
" + ], + "image/svg+xml": "\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEWCAYAAABrDZDcAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAdJklEQVR4nO3dfZwcVZ3v8c83QyBBHgJmZCEPJCwhCgoJRhAfAXUTWAVckAX2tVfxIboLrCveIFFEFi73hZtdvPi6US9wEfXyHEMIGIkogisgSSBAEnAwPEgyAUkIQYwjSSa/+0dVh0rT09Mzmeqe7vq+X69+TdepU1W/A53+9TlVdUoRgZmZFdeQRgdgZmaN5URgZlZwTgRmZgXnRGBmVnBOBGZmBedEYGZWcE4EZlVIer+kjkbHYZYnJwIbtCQ9K+nDjYwhIv4rIibmtX9JUyX9StKrktZKulfSCXkdz6wSJwIrNEltDTz2KcAtwA+B0cA+wIXAx/qxL0nyv2frF39wrOlIGiLpfElPSXpJ0s2S9s6sv0XSC5JeSX9tH5JZd62k70paIGkjcEza8/jvkh5Lt7lJ0rC0/tGSVme277Fuuv48Sc9LWiPps5JC0oEV2iDgcuCSiLg6Il6JiK0RcW9EfC6tc5Gk/5fZZly6v53S5XskXSrpPuDPwAxJS8qO8yVJ89P3u0j6D0nPSfqDpO9JGr6D/zusBTgRWDM6BzgJ+CCwH/AyMDuz/qfABOAtwMPAdWXbnwFcCuwO/DotOxWYBowHDgU+VeX4FetKmgacC3wYOBA4uso+JgJjgDlV6tTiH4HpJG35HjBR0oTM+jOA69P3lwEHAZPS+EaR9ECs4JwIrBl9AfhaRKyOiNeAi4BTSr+UI+KaiHg1s+4wSXtmtr8tIu5Lf4H/JS37dkSsiYj1wO0kX5Y96anuqcD3I2JFRPw5PXZP3pz+fb7WRvfg2vR4WyLiFeA24HSANCG8FZif9kCmA1+KiPUR8SrwP4HTdvD41gKcCKwZ7Q/cKmmDpA3AE0A3sI+kNkmXpcNGfwSeTbcZmdl+VYV9vpB5/2dgtyrH76nufmX7rnSckpfSv/tWqVOL8mNcT5oISHoD89Kk1A7sCjyU+e92Z1puBedEYM1oFXBcRIzIvIZFRCfJl9+JJMMzewLj0m2U2T6vKXefJznpWzKmSt0OknacXKXORpIv75K/qlCnvC13Ae2SJpEkhNKw0DqgCzgk899sz4iolvCsIJwIbLAbKmlY5rUTyVj4pZL2B5DULunEtP7uwGskv7h3JRn+qJebgTMlvU3SrsDXe6oYyfzv5wJfl3SmpD3Sk+Dvk3RlWu0R4AOSxqZDWzN7CyAiNpNciTQL2JskMRARW4GrgG9JeguApFGSpva7tdYynAhssFtA8ku29LoIuAKYD/xM0qvAb4Aj0/o/BH4PdAKPp+vqIiJ+Cnwb+CWwMnPs13qoPwf4e+DTwBrgD8D/IBnnJyLuAm4CHgMeAu6oMZTrSXpEt0TElkz5V0pxpcNmPyc5aW0FJz+Yxiwfkt4GLAd2KftCNhtU3CMwG0CSPp5er78X8E3gdicBG+ycCMwG1ueBF4GnSK5k+qfGhmPWOw8NmZkVnHsEZmYFt1OjA+irkSNHxrhx4xodhplZU3nooYfWRUTFGwibLhGMGzeOJUuW9F7RzMy2kfT7ntZ5aMjMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgmu6qITOzopm3tJNZCztYs6GL/UYMZ8bUiZw0edSA7d+JwMxsEJu3tJOZc5fRtbkbgM4NXcycuwxgwJKBh4bMzAaxWQs7tiWBkq7N3cxa2DFgx3CPwLaTdxd0sB7bbLBas6GrT+X94URg29SjCzoYj202mO03YjidFb709xsxfMCO4URg2/TUBT1vzmPcsOi5XI+99LkNbOre2pBjmw1mw4YOYYhga2ai6OFD25gxdeAeLudEUMGODFE08/BGT13N8i/oPPR0jHoc22wwG7nbLgCsWt/Fpu6tjPJVQ/nbkSGKZh/e6KkLOmrEcG76/FG5Hvu9l93dsGObFZ0TQZkdGR5p9uGNenRBezJj6sTtkmg9j21WdE4EZXZkeKTZhzfq0QXtSekYzTqsZtbMnAjK7MjwiIc3dsxJk0f5i9+sAXxDWZkZUycyfGjbdmW1DlHsyLZmZo3iHkGZ0i/S8+Y81ufhEQ9vmFkzyjURSJoGXAG0AVdHxGVl68cCPwBGpHXOj4gFecZUi5Mmj9p2crevQzoe3jCzZpPb0JCkNmA2cBxwMHC6pIPLql0A3BwRk4HTgO/kFY+ZmVWW5zmCI4CVEfF0RGwCbgROLKsTwB7p+z2BNTnGY2ZmFeQ5NDQKWJVZXg0cWVbnIuBnks4B3gR8OMd4zMysgkZfNXQ6cG1EjAaOB34k6Q0xSZouaYmkJWvXrq17kGZmrSzPRNAJjMksj07Lsj4D3AwQEQ8Aw4CR5TuKiCsjYkpETGlvb88pXDOzYsozESwGJkgaL2lnkpPB88vqPAd8CEDS20gSgX/ym5nVUW6JICK2AGcDC4EnSK4OWiHpYkknpNW+DHxO0qPADcCnIiIq79HMzPKQ630E6T0BC8rKLsy8fxx4b54xmJlZdY0+WWxmZg3mRGBmVnBOBGZmBedEYGZWcE4EZmYF50RgZlZwfh4ByUPny58hYGZWFIXvEcxb2snMucvo3NBFAJ0bupg5dxnr/vRao0MzM6uLwieCWQs76NrcvV1Z1+Zunl67sUERmZnVV+ETwZoKD5uH5EEJJ07yk8bMrPUVPhHsN2J4xfJRI4ZzxpFj6xyNmVn9FT4RzJg6keFD27YrGz60zSeMzawwCn/VUOlB8+fNeYxN3VsZlV415AfQm1lRFD4RQJIMblj0HAA3ff6oBkdjZlZfhR8aMjMrOicCM7OCK+zQUPndxMOGDmHkbrs0Oiwzs7orZCIo3U1cupGsc0MXQ9TgoMzMGqSQQ0OV7ibeGrBqfeWby8zMWlkhE0FPdxNv6t5a50jMzBqvkImg2t3EZmZFU8hE4LuJzcxeV8iTxb6b2MzsdYVMBOC7ic3MSgo5NGRmZq9zIjAzK7hcE4GkaZI6JK2UdH6F9d+S9Ej6elLShjzjMTOzN8rtHIGkNmA28BFgNbBY0vyIeLxUJyK+lKl/DjA5r3jMzKyyPHsERwArI+LpiNgE3AicWKX+6cANOcZjZmYV5HnV0ChgVWZ5NXBkpYqS9gfGA3f3sH46MB1g7Ngde3xkdrK5oW1DGLO3byIzs2IbLCeLTwPmRER3pZURcWVETImIKe3t7f0+SGmyuc4NXQTJlBLPrNvIvKWd/d6nmVmzyzMRdAJjMsuj07JKTqMOw0I9TTY3a2FH3oc2Mxu08kwEi4EJksZL2pnky35+eSVJbwX2Ah7IMRag58nmeio3MyuC3BJBRGwBzgYWAk8AN0fECkkXSzohU/U04MaIiLxiKelpsrmeys3MiiDXKSYiYgGwoKzswrLli/KMIWvG1InbPZAGPNmcmVmh5hryZHNmZm9UqEQAnmzOzKzcYLl81MzMGsSJwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOCcCMzMCs6JwMys4KomAkl7SPrrCuWH5heSmZnVU4+JQNKpwG+BH0taIeldmdXX5h2YmZnVR7UewVeBd0bEJOBM4EeSPp6uU+6RmZlZXVR7QllbRDwPEBGLJB0D3CFpDJD7g+bNzKw+qvUIXs2eH0iTwtHAicAhOcdlZmZ1Uq1H8E+UDQFFxKuSpgGn5hqVmZnVTY89goh4FHhG0i/LyjdHxHW5R2ZmZnVR9fLRiOgGtkras07xmJlZnVUbGir5E7BM0l3AxlJhRPxLblGZmVnd1JII5qavPkvPJ1wBtAFXR8RlFeqcClxEciXSoxFxRn+OZWZm/dNrIoiIH/Rnx5LagNnAR4DVwGJJ8yPi8UydCcBM4L0R8bKkt/TnWGZm1n95zjV0BLAyIp6OiE3AjSSXnmZ9DpgdES8DRMSLOcZjZmYV5JkIRgGrMsur07Ksg4CDJN0n6TfpUNIbSJouaYmkJWvXrs0pXDOzYmr07KM7ARNIblQ7HbhK0ojyShFxZURMiYgp7e3tdQ7RzKy19XqOQNJBwAxg/2z9iDi2l007gTGZ5dFpWdZq4MGI2Exyz8KTJIlhce+hm5nZQKjlqqFbgO8BVwHdfdj3YmCCpPEkCeA0oPyKoHkkPYHvSxpJMlT0dB+OYWZmO6iWRLAlIr7b1x1HxBZJZwMLSS4fvSYiVki6GFgSEfPTdX8j6XGSJDMjIl7q67HMzKz/akkEt0v6Z+BW4LVSYUSs723DiFgALCgruzDzPoBz05eZmTVALYngk+nfGZmyAA4Y+HDMzKzearmhbHw9AjEzs8ao5aqhoSRTUn8gLboH+D/plT5mZtbkahka+i4wFPhOuvyPadln8wrKzMzqp5ZE8K6IOCyzfLekR/MKyMzM6quWO4u7s4+slHQAfbufwMzMBrFaegQzgF9Keprk0ZX7A2fmGpWZmdVNLVcN/SKdLnpiWtQREa9V28bMzJpHj4lA0rERcbekvytbdaAkIqJfD6sxM7PBpVqP4IPA3cDHKqwL+vnUMjMzG1x6TAQR8Y307cUR8Ux2XTqRnJmZtYBarhr6cYWyOQMdiJmZNUa1cwRvBQ4B9iw7T7AHMCzvwMzMrD6qnSOYCHwUGMH25wleJXnWsJmZtYBq5whuA26TdFREPFDHmMzMrI5quaFsqaSzSIaJtg0JRcSnc4vKzMzqppaTxT8C/gqYCtxL8uzhV/MMyszM6qeWRHBgRHwd2BgRPwD+Fjgy37DMzKxeakkEpecObJD0dmBP4C35hWRmZvVUyzmCKyXtBXwdmA/sBlxYfRMzM2sWtUw6d3X69l78nGIzs5ZT7Yayc6ttGBGXD3w4ZmZWb9V6BLunfycC7yIZFoLk5rJFeQZlZmb1U+2Gsn8DkPQr4PCIeDVdvgj4SV2iMzOz3NVy1dA+wKbM8qa0zMzMWkAtieCHwCJJF6W9gQeBa2vZuaRpkjokrZR0foX1n5K0VtIj6euzfQnezMx2XC1XDV0q6afA+9OiMyNiaW/bSWoDZgMfAVYDiyXNj4jHy6reFBFn9zFuMzMbINWuGtojIv4oaW/g2fRVWrd3RKzvZd9HACsj4ul0mxuBE4HyRGBmZg1UrUdwPck01A+RPJqyROlyb/cUjAJWZZZXU3lqipMlfQB4EvhSRKwqryBpOjAdYOzYsb0c1szM+qLHcwQR8dH07/iIOCDzGh8RA3Vj2e3AuIg4FLgL+EEPsVwZEVMiYkp7e/sAHdrMzKD60NDh1TaMiId72XcnMCazPDoty+7jpczi1cC/97JPMzMbYNWGhv6zyroAju1l34uBCemD7juB04AzshUk7RsRz6eLJwBP9LJPMzMbYNVuKDtmR3YcEVsknQ0sBNqAayJihaSLgSURMR/4F0knAFuA9cCnduSYZmbWd7XMPko6/fTBbP+Esh/2tl1ELAAWlJVdmHk/E5hZa7BmZjbwek0Ekr4BHE2SCBYAxwG/JrnRzMzMmlwtdxafAnwIeCEizgQOI3k4jZmZtYBaEkFXRGwFtkjaA3iR7a8GMjOzJlbLOYIlkkYAV5HcXPYn4IFcozIzs7qpdh/BbOD6iPjntOh7ku4E9oiIx+oSnZmZ5a5aj+BJ4D8k7QvcDNxQy2RzZmbWXKpNMXFFRBwFfBB4CbhG0m8lfUPSQXWL0MzMctXryeKI+H1EfDMiJgOnAyfhO4DNzFpGr4lA0k6SPibpOuCnQAfwd7lHZmZmdVHtZPFHSHoAx5M8rP5GYHpEbKxTbANu3tJOZi3soHNDFzu3DWHe0k5Omjyq0WGZmTVUtZPFM0meSfDliHi5TvHkZt7STmbOXUbX5m4ANnVvZebcZQBOBmZWaNVOFh8bEVe3QhIAmLWwY1sSKOna3M2shR0NisjMbHCo5c7ilrBmQ1efys3MiqIwiWC/EcP7VG5mVhSFSQQzpk5k+NC27cqGD21jxtSJDYrIzGxwqOl5BK2gdEL4vDmPsal7K6NGDGfG1Ik+UWxmhVeYRABJMrhh0XMA3PT5oxocjZnZ4FCYoSEzM6vMicDMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOByTQSSpknqkLRS0vlV6p0sKSRNyTMeMzN7o9wSgaQ2YDZwHHAwcLqkgyvU2x34IvBgXrGYmVnP8uwRHAGsjIinI2ITyRPOTqxQ7xLgm8BfcozFzMx6kGciGAWsyiyvTsu2kXQ4MCYifpJjHGZmVkXDThZLGgJcDny5hrrTJS2RtGTt2rX5B2dmViB5JoJOYExmeXRaVrI78HbgHknPAu8G5lc6YRwRV0bElIiY0t7enmPIZmbFk2ciWAxMkDRe0s7AacD80sqIeCUiRkbEuIgYB/wGOCEiluQYk5mZlcktEUTEFuBsYCHwBHBzRKyQdLGkE/I6rpmZ9U2uD6aJiAXAgrKyC3uoe3SesZiZWWW+s9jMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOCcCMzMCs6JwMys4JwIzMwKzonAzKzgnAjMzArOicDMrOCcCMzMCi7XRCBpmqQOSSslnV9h/RckLZP0iKRfSzo4z3jMzOyNcksEktqA2cBxwMHA6RW+6K+PiHdExCTg34HL84rHzMwqy7NHcASwMiKejohNwI3AidkKEfHHzOKbgMgxHjMzq2CnHPc9CliVWV4NHFleSdJZwLnAzsCxlXYkaTowHWDs2LEDHqiZWZE1/GRxRMyOiL8GvgJc0EOdKyNiSkRMaW9vr2+AZmYtLs9E0AmMySyPTst6ciNwUo7xmJlZBXkmgsXABEnjJe0MnAbMz1aQNCGz+LfA73KMx8zMKsjtHEFEbJF0NrAQaAOuiYgVki4GlkTEfOBsSR8GNgMvA5/MKx4zM6ssz5PFRMQCYEFZ2YWZ91/M8/hmZta7hp8sNjOzxnIiMDMrOCcCM7OCcyIwMyu4XE8WDxbzlnYya2EHazZ0MbRtCGP2Ht7okMzMBo2W7xHMW9rJzLnL6NzQRQCburfyzLqNzFta7d42M7PiaPlEMGthB12bu7cr2xpJuZmZFSARrNnQ1adyM7OiaflEsN+IyucDeio3Myualk8EM6ZOZPjQtu3Khg9tY8bUiQ2KyMxscGn5q4ZOmjwKYNtVQ/uNGM6MqRO3lZuZFV3LJwJIkoG/+M3MKmv5oSEzM6vOicDMrOCcCMzMCs6JwMys4JwIzMwKThHR6Bj6RNJa4Pd93GwksC6HcBqhldoCrdWeVmoLtFZ7Wqkt0L/27B8R7ZVWNF0i6A9JSyJiSqPjGAit1BZorfa0UlugtdrTSm2BgW+Ph4bMzArOicDMrOCKkgiubHQAA6iV2gKt1Z5Wagu0VntaqS0wwO0pxDkCMzPrWVF6BGZm1gMnAjOzgmvpRCBpmqQOSSslnd/oePpK0jWSXpS0PFO2t6S7JP0u/btXI2OslaQxkn4p6XFJKyR9MS1v1vYMk7RI0qNpe/4tLR8v6cH0M3eTpJ0bHWutJLVJWirpjnS5mdvyrKRlkh6RtCQta9bP2ghJcyT9VtITko4a6La0bCKQ1AbMBo4DDgZOl3RwY6Pqs2uBaWVl5wO/iIgJwC/S5WawBfhyRBwMvBs4K/3/0azteQ04NiIOAyYB0yS9G/gm8K2IOBB4GfhMA2Psqy8CT2SWm7ktAMdExKTM9fbN+lm7ArgzIt4KHEby/2hg2xIRLfkCjgIWZpZnAjMbHVc/2jEOWJ5Z7gD2Td/vC3Q0OsZ+tus24COt0B5gV+Bh4EiSuz13Ssu3+wwO5hcwOv1CORa4A1CztiWN91lgZFlZ033WgD2BZ0gv7MmrLS3bIwBGAasyy6vTsma3T0Q8n75/AdinkcH0h6RxwGTgQZq4PelQyiPAi8BdwFPAhojYklZpps/c/wLOA7amy2+medsCEMDPJD0kaXpa1oyftfHAWuD76bDd1ZLexAC3pZUTQcuL5OdAU13/K2k34MfAv0bEH7Prmq09EdEdEZNIfk0fAby1wSH1i6SPAi9GxEONjmUAvS8iDicZGj5L0geyK5vos7YTcDjw3YiYDGykbBhoINrSyomgExiTWR6dljW7P0jaFyD9+2KD46mZpKEkSeC6iJibFjdte0oiYgPwS5LhkxGSSo+AbZbP3HuBEyQ9C9xIMjx0Bc3ZFgAiojP9+yJwK0mibsbP2mpgdUQ8mC7PIUkMA9qWVk4Ei4EJ6ZUPOwOnAfMbHNNAmA98Mn3/SZKx9kFPkoD/CzwREZdnVjVre9oljUjfDyc53/EESUI4Ja3WFO2JiJkRMToixpH8O7k7Iv6BJmwLgKQ3Sdq99B74G2A5TfhZi4gXgFWSJqZFHwIeZ6Db0uiTITmfaDkeeJJk7PZrjY6nH/HfADwPbCb5ZfAZkrHbXwC/A34O7N3oOGtsy/tIuq+PAY+kr+ObuD2HAkvT9iwHLkzLDwAWASuBW4BdGh1rH9t1NHBHM7cljfvR9LWi9G+/iT9rk4Al6WdtHrDXQLfFU0yYmRVcKw8NmZlZDZwIzMwKzonAzKzgnAjMzArOicDMrOCcCGxQkfQtSf+aWV4o6erM8n9KOrfK9tdKOiV9f4+kNzzgW9JQSZelMzc+LOkBScel656VNLIfcW87bg/rZ6czYT4uqSt9/4ikUyQtKN2TMJAk7VuaSbSH9TtL+lXmpjErKCcCG2zuA94DIGkIMBI4JLP+PcD9O3iMS0gm6np7JNMQnATsvoP7rCoizopkOorjgacimRVzUkTMiYjjI7k7eaCdC1xVJaZNJNei/30Ox7Ym4kRgg839JFM1QJIAlgOvStpL0i7A24CHJV0oabGk5ZKuTO9c7pWkXYHPAedExGsAEfGHiLi5Qt1z0/0vL+ul/DdJj6XPIvhRhe0uSXsIbTXG9KykkZLGpXPOXyvpSUnXSfqwpPvS3ssRaf03KXlWxaJ0IrITe9j1ycCd6TaHpPUfSWOfkNaZB/xDLXFa63KX0AaViFgjaYuksSS//h8gmfXyKOAVYFlEbJL0vyPiYoD0y/ijwO01HOJA4Lkom/CunKR3AmeSTC0t4EFJ9wKbgAuA90TEOkl7l203i6R3cWb0727NA4FPAJ8mmSblDJK7sk8AvkrSe/kayTQQn06HlBZJ+nlEbMzEMR54uZTsgC8AV0TEdemUK6UktRx4Vz/itBbiHoENRveTJIFSInggs3xfWucYJU/PWkYySdohlXa0A94H3BoRGyPiT8Bc4P3psW6JiHUAEbE+s83XgT0j4gv9TAIAz0TEsojYSjI9wi/SfS0jeTYFJHPnnJ9OgX0PMAwYW7affUmmLy55APiqpK8A+0dEVxp/N7CpNDePFZMTgQ1GpfME7yD5xfobkh7Be4D7JQ0DvgOcEhHvIBkHH1bjvlcCYyXtMeBRJ7/g31neS+ij1zLvt2aWt/J6D17AyZnzDGMjIvtkMYAuMv9NIuJ6kl5FF7BA0rGZursAf9mBmK3JORHYYHQ/yVDP+kjm/F8PjCBJBvfz+hfcuvT5Bj1erVMuIv5MMgvqFekQSWkm0U+UVf0v4CRJu6YzWH48Lbsb+ISkN6fbZr/07wQuA36S8y/shcA5pfMikiZXqPMkr/cgkHQA8HREfJtkpspD0/I3A+siYnOO8dog50Rgg9EykquFflNW9kpErEuvsLmKpLewkOSXeF9cQDJs8rik5SSPZix/SM7DJM+MXkTyJLWrI2JpRKwALgXulfQocHnZdreksc1Pp6fOwyXAUOAxSSvS5e2k5wueknRgWnQqsDwdTno78MO0/BjgJznFaU3Cs4+atShJHwfeGREXVKkzFzg/Ip6sX2Q22PiqIbMWFRG3loawKkmHxuY5CZh7BGZmBedzBGZmBedEYGZWcE4EZmYF50RgZlZwTgRmZgX3/wHbnpzjbITbNQAAAABJRU5ErkJggg==" + }, + "metadata": { + "needs_background": "light" + } + } + ], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } }, { "cell_type": "markdown", - "metadata": {}, "source": [ "## 3. Comparison with alternatives\n", "\n", "### FLAML's accuracy" - ] + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 13, + "source": [ + "print('flaml r2', '=', 1 - sklearn_metric_loss_score('r2', y_pred, y_test))" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "flaml r2 = 0.8540590968156087\n" + ] + } + ], + "metadata": { + "tags": [] + } + }, + { + "cell_type": "markdown", + "source": [ + "### Default LightGBM" + ], + "metadata": {} }, { "cell_type": "code", "execution_count": 14, - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "flaml r2 = 0.8475173393496316\n" - ] - } + "source": [ + "from lightgbm import LGBMRegressor\n", + "lgbm = LGBMRegressor()" ], - "source": [ - "print('flaml r2', '=', 1 - sklearn_metric_loss_score('r2', y_pred, y_test))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Default LightGBM" - ] + "outputs": [], + "metadata": {} }, { "cell_type": "code", "execution_count": 15, - "metadata": {}, - "outputs": [], "source": [ - "from lightgbm import LGBMRegressor\n", - "lgbm = LGBMRegressor()" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, + "lgbm.fit(X_train, y_train)" + ], "outputs": [ { + "output_type": "execute_result", "data": { "text/plain": [ "LGBMRegressor()" ] }, - "execution_count": 16, "metadata": {}, - "output_type": "execute_result" + "execution_count": 15 } ], - "source": [ - "lgbm.fit(X_train, y_train)" - ] + "metadata": {} }, { "cell_type": "code", - "execution_count": 17, - "metadata": { - "tags": [] - }, + "execution_count": 16, + "source": [ + "y_pred = lgbm.predict(X_test)\n", + "from flaml.ml import sklearn_metric_loss_score\n", + "print('default lgbm r2', '=', 1 - sklearn_metric_loss_score('r2', y_pred, y_test))" + ], "outputs": [ { - "name": "stdout", "output_type": "stream", + "name": "stdout", "text": [ "default lgbm r2 = 0.8296179648694404\n" ] } ], - "source": [ - "y_pred = lgbm.predict(X_test)\n", - "from flaml.ml import sklearn_metric_loss_score\n", - "print('default lgbm r2', '=', 1 - sklearn_metric_loss_score('r2', y_pred, y_test))" - ] + "metadata": { + "tags": [] + } }, { "cell_type": "markdown", - "metadata": {}, "source": [ "### Optuna LightGBM Tuner" - ] + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 17, + "source": [ + "# !pip install optuna==2.8.0;" + ], + "outputs": [], + "metadata": {} }, { "cell_type": "code", "execution_count": 18, - "metadata": {}, - "outputs": [], - "source": [ - "# !pip install optuna==2.8.0;" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [], "source": [ "from sklearn.model_selection import train_test_split\n", "train_x, val_x, train_y, val_y = train_test_split(X_train, y_train, test_size=0.1)\n", @@ -598,165 +626,165 @@ " \"metric\": \"regression\",\n", " \"verbosity\": -1,\n", "}\n" - ] + ], + "outputs": [], + "metadata": {} }, { "cell_type": "code", - "execution_count": 20, - "metadata": { - "tags": [ - "outputPrepend" - ] - }, + "execution_count": 19, + "source": [ + "%%time\n", + "model = lgb.train(params, dtrain, valid_sets=[dtrain, dval], verbose_eval=10000) \n" + ], "outputs": [ { - "name": "stderr", "output_type": "stream", + "name": "stderr", "text": [ - "\u001b[32m[I 2021-08-12 21:28:57,590]\u001b[0m A new study created in memory with name: no-name-46b1ad95-0f7c-4964-b50d-b3513a8fd6b5\u001b[0m\n", - "feature_fraction, val_score: 1961388150.442426: 14%|#4 | 1/7 [00:02<00:12, 2.16s/it]\u001b[32m[I 2021-08-12 21:28:59,788]\u001b[0m Trial 0 finished with value: 1961388150.4424257 and parameters: {'feature_fraction': 1.0}. Best is trial 0 with value: 1961388150.4424257.\u001b[0m\n", - "feature_fraction, val_score: 1959693958.979496: 29%|##8 | 2/7 [00:04<00:11, 2.38s/it]\u001b[32m[I 2021-08-12 21:29:02,317]\u001b[0m Trial 1 finished with value: 1959693958.9794962 and parameters: {'feature_fraction': 0.7}. Best is trial 1 with value: 1959693958.9794962.\u001b[0m\n", - "feature_fraction, val_score: 1923826918.442115: 43%|####2 | 3/7 [00:08<00:12, 3.06s/it]\u001b[32m[I 2021-08-12 21:29:06,187]\u001b[0m Trial 2 finished with value: 1923826918.4421153 and parameters: {'feature_fraction': 0.8999999999999999}. Best is trial 2 with value: 1923826918.4421153.\u001b[0m\n", - "feature_fraction, val_score: 1923826918.442115: 57%|#####7 | 4/7 [00:11<00:09, 3.05s/it]\u001b[32m[I 2021-08-12 21:29:09,235]\u001b[0m Trial 3 finished with value: 1935542284.5841484 and parameters: {'feature_fraction': 0.6}. Best is trial 2 with value: 1923826918.4421153.\u001b[0m\n", - "feature_fraction, val_score: 1923826918.442115: 71%|#######1 | 5/7 [00:17<00:07, 3.97s/it]\u001b[32m[I 2021-08-12 21:29:14,817]\u001b[0m Trial 4 finished with value: 2237193094.198953 and parameters: {'feature_fraction': 0.4}. Best is trial 2 with value: 1923826918.4421153.\u001b[0m\n", - "feature_fraction, val_score: 1923826918.442115: 86%|########5 | 6/7 [00:22<00:04, 4.27s/it]\u001b[32m[I 2021-08-12 21:29:19,667]\u001b[0m Trial 5 finished with value: 1959693958.9794967 and parameters: {'feature_fraction': 0.8}. Best is trial 2 with value: 1923826918.4421153.\u001b[0m\n", - "feature_fraction, val_score: 1923826918.442115: 100%|##########| 7/7 [00:26<00:00, 4.38s/it]\u001b[32m[I 2021-08-12 21:29:24,287]\u001b[0m Trial 6 finished with value: 1988198059.9532917 and parameters: {'feature_fraction': 0.5}. Best is trial 2 with value: 1923826918.4421153.\u001b[0m\n", - "feature_fraction, val_score: 1923826918.442115: 100%|##########| 7/7 [00:26<00:00, 3.81s/it]\n", - "num_leaves, val_score: 1909760725.486306: 5%|5 | 1/20 [00:11<03:45, 11.86s/it]\u001b[32m[I 2021-08-12 21:29:36,173]\u001b[0m Trial 7 finished with value: 1909760725.486306 and parameters: {'num_leaves': 49}. Best is trial 7 with value: 1909760725.486306.\u001b[0m\n", - "num_leaves, val_score: 1893602110.446437: 10%|# | 2/20 [00:17<02:27, 8.17s/it]\u001b[32m[I 2021-08-12 21:29:41,758]\u001b[0m Trial 8 finished with value: 1893602110.4464366 and parameters: {'num_leaves': 25}. Best is trial 8 with value: 1893602110.4464366.\u001b[0m\n", - "num_leaves, val_score: 1893602110.446437: 15%|#5 | 3/20 [00:28<02:43, 9.64s/it]\u001b[32m[I 2021-08-12 21:29:53,150]\u001b[0m Trial 9 finished with value: 1912380397.1371074 and parameters: {'num_leaves': 153}. Best is trial 8 with value: 1893602110.4464366.\u001b[0m\n", - "num_leaves, val_score: 1893602110.446437: 20%|## | 4/20 [00:34<02:06, 7.88s/it]\u001b[32m[I 2021-08-12 21:29:58,336]\u001b[0m Trial 10 finished with value: 1967375619.2597315 and parameters: {'num_leaves': 15}. Best is trial 8 with value: 1893602110.4464366.\u001b[0m\n", - "num_leaves, val_score: 1893602110.446437: 25%|##5 | 5/20 [00:52<02:58, 11.87s/it]\u001b[32m[I 2021-08-12 21:30:17,285]\u001b[0m Trial 11 finished with value: 1927844271.0899227 and parameters: {'num_leaves': 191}. Best is trial 8 with value: 1893602110.4464366.\u001b[0m\n", - "num_leaves, val_score: 1893602110.446437: 30%|### | 6/20 [01:06<02:51, 12.27s/it]\u001b[32m[I 2021-08-12 21:30:30,325]\u001b[0m Trial 12 finished with value: 1899507608.1924043 and parameters: {'num_leaves': 129}. Best is trial 8 with value: 1893602110.4464366.\u001b[0m\n", - "num_leaves, val_score: 1893602110.446437: 35%|###5 | 7/20 [01:12<02:14, 10.35s/it]\u001b[32m[I 2021-08-12 21:30:36,715]\u001b[0m Trial 13 finished with value: 1914457236.300789 and parameters: {'num_leaves': 110}. Best is trial 8 with value: 1893602110.4464366.\u001b[0m\n", - "num_leaves, val_score: 1893602110.446437: 40%|#### | 8/20 [01:33<02:46, 13.89s/it]\u001b[32m[I 2021-08-12 21:30:58,207]\u001b[0m Trial 14 finished with value: 1900862950.0439188 and parameters: {'num_leaves': 201}. Best is trial 8 with value: 1893602110.4464366.\u001b[0m\n", - "num_leaves, val_score: 1893602110.446437: 45%|####5 | 9/20 [01:43<02:18, 12.61s/it]\u001b[32m[I 2021-08-12 21:31:08,003]\u001b[0m Trial 15 finished with value: 1924633212.447516 and parameters: {'num_leaves': 26}. Best is trial 8 with value: 1893602110.4464366.\u001b[0m\n", - "num_leaves, val_score: 1893602110.446437: 50%|##### | 10/20 [01:47<01:38, 9.83s/it]\u001b[32m[I 2021-08-12 21:31:11,597]\u001b[0m Trial 16 finished with value: 1923826918.4421153 and parameters: {'num_leaves': 31}. Best is trial 8 with value: 1893602110.4464366.\u001b[0m\n", - "num_leaves, val_score: 1893602110.446437: 55%|#####5 | 11/20 [01:54<01:21, 9.03s/it]\u001b[32m[I 2021-08-12 21:31:18,802]\u001b[0m Trial 17 finished with value: 1926492416.6653154 and parameters: {'num_leaves': 73}. Best is trial 8 with value: 1893602110.4464366.\u001b[0m\n", - "num_leaves, val_score: 1866168216.164035: 60%|###### | 12/20 [02:04<01:14, 9.35s/it]\u001b[32m[I 2021-08-12 21:31:28,884]\u001b[0m Trial 18 finished with value: 1866168216.164035 and parameters: {'num_leaves': 122}. Best is trial 18 with value: 1866168216.164035.\u001b[0m\n", - "num_leaves, val_score: 1866168216.164035: 65%|######5 | 13/20 [02:16<01:10, 10.14s/it]\u001b[32m[I 2021-08-12 21:31:40,859]\u001b[0m Trial 19 finished with value: 1916479244.3534958 and parameters: {'num_leaves': 82}. Best is trial 18 with value: 1866168216.164035.\u001b[0m\n", - "num_leaves, val_score: 1866168216.164035: 70%|####### | 14/20 [02:34<01:14, 12.40s/it]\u001b[32m[I 2021-08-12 21:31:58,472]\u001b[0m Trial 20 finished with value: 1915145777.7969475 and parameters: {'num_leaves': 238}. Best is trial 18 with value: 1866168216.164035.\u001b[0m\n", - "num_leaves, val_score: 1866168216.164035: 75%|#######5 | 15/20 [02:35<00:44, 8.92s/it]\u001b[32m[I 2021-08-12 21:31:59,320]\u001b[0m Trial 21 finished with value: 3301372834.445037 and parameters: {'num_leaves': 2}. Best is trial 18 with value: 1866168216.164035.\u001b[0m\n", - "num_leaves, val_score: 1866168216.164035: 80%|######## | 16/20 [02:43<00:35, 8.77s/it]\u001b[32m[I 2021-08-12 21:32:07,734]\u001b[0m Trial 22 finished with value: 1945937902.6664844 and parameters: {'num_leaves': 156}. Best is trial 18 with value: 1866168216.164035.\u001b[0m\n", - "num_leaves, val_score: 1866168216.164035: 85%|########5 | 17/20 [02:57<00:31, 10.39s/it]\u001b[32m[I 2021-08-12 21:32:21,885]\u001b[0m Trial 23 finished with value: 1897968424.186267 and parameters: {'num_leaves': 91}. Best is trial 18 with value: 1866168216.164035.\u001b[0m\n", - "num_leaves, val_score: 1866168216.164035: 90%|######### | 18/20 [03:01<00:17, 8.50s/it]\u001b[32m[I 2021-08-12 21:32:26,009]\u001b[0m Trial 24 finished with value: 1886768541.069841 and parameters: {'num_leaves': 53}. Best is trial 18 with value: 1866168216.164035.\u001b[0m\n", - "num_leaves, val_score: 1866168216.164035: 95%|#########5| 19/20 [03:05<00:07, 7.23s/it]\u001b[32m[I 2021-08-12 21:32:30,288]\u001b[0m Trial 25 finished with value: 1893552453.049502 and parameters: {'num_leaves': 54}. Best is trial 18 with value: 1866168216.164035.\u001b[0m\n", - "num_leaves, val_score: 1866168216.164035: 100%|##########| 20/20 [03:14<00:00, 7.66s/it]\u001b[32m[I 2021-08-12 21:32:38,928]\u001b[0m Trial 26 finished with value: 1930159275.287451 and parameters: {'num_leaves': 124}. Best is trial 18 with value: 1866168216.164035.\u001b[0m\n", - "num_leaves, val_score: 1866168216.164035: 100%|##########| 20/20 [03:14<00:00, 9.73s/it]\n", - "bagging, val_score: 1866168216.164035: 10%|# | 1/10 [00:12<01:51, 12.34s/it]\u001b[32m[I 2021-08-12 21:32:51,319]\u001b[0m Trial 27 finished with value: 2226447935.929949 and parameters: {'bagging_fraction': 0.4348942073647856, 'bagging_freq': 3}. Best is trial 27 with value: 2226447935.929949.\u001b[0m\n", - "bagging, val_score: 1866168216.164035: 20%|## | 2/10 [00:21<01:22, 10.26s/it]\u001b[32m[I 2021-08-12 21:33:00,132]\u001b[0m Trial 28 finished with value: 2047448708.567457 and parameters: {'bagging_fraction': 0.522491146991637, 'bagging_freq': 4}. Best is trial 28 with value: 2047448708.567457.\u001b[0m\n", - "bagging, val_score: 1866168216.164035: 30%|### | 3/10 [00:30<01:08, 9.74s/it]\u001b[32m[I 2021-08-12 21:33:09,241]\u001b[0m Trial 29 finished with value: 2208782213.328473 and parameters: {'bagging_fraction': 0.5407390333877419, 'bagging_freq': 7}. Best is trial 28 with value: 2047448708.567457.\u001b[0m\n", - "bagging, val_score: 1866168216.164035: 40%|#### | 4/10 [00:37<00:53, 8.84s/it]\u001b[32m[I 2021-08-12 21:33:16,696]\u001b[0m Trial 30 finished with value: 1981019323.367325 and parameters: {'bagging_fraction': 0.9034329988074628, 'bagging_freq': 6}. Best is trial 30 with value: 1981019323.367325.\u001b[0m\n", - "bagging, val_score: 1866168216.164035: 50%|##### | 5/10 [00:46<00:43, 8.65s/it]\u001b[32m[I 2021-08-12 21:33:25,005]\u001b[0m Trial 31 finished with value: 2049529460.907788 and parameters: {'bagging_fraction': 0.7171693778212943, 'bagging_freq': 5}. Best is trial 30 with value: 1981019323.367325.\u001b[0m\n", - "bagging, val_score: 1866168216.164035: 60%|###### | 6/10 [00:53<00:33, 8.31s/it]\u001b[32m[I 2021-08-12 21:33:32,668]\u001b[0m Trial 32 finished with value: 2264022325.9930034 and parameters: {'bagging_fraction': 0.43045359107371695, 'bagging_freq': 3}. Best is trial 30 with value: 1981019323.367325.\u001b[0m\n", - "bagging, val_score: 1866168216.164035: 70%|####### | 7/10 [01:01<00:24, 8.06s/it]\u001b[32m[I 2021-08-12 21:33:40,205]\u001b[0m Trial 33 finished with value: 2187533763.495785 and parameters: {'bagging_fraction': 0.5150727650965822, 'bagging_freq': 7}. Best is trial 30 with value: 1981019323.367325.\u001b[0m\n", - "bagging, val_score: 1866168216.164035: 80%|######## | 8/10 [01:10<00:16, 8.29s/it]\u001b[32m[I 2021-08-12 21:33:48,992]\u001b[0m Trial 34 finished with value: 2195055434.15977 and parameters: {'bagging_fraction': 0.49821116144311656, 'bagging_freq': 7}. Best is trial 30 with value: 1981019323.367325.\u001b[0m\n", - "bagging, val_score: 1866168216.164035: 90%|######### | 9/10 [01:18<00:08, 8.20s/it]\u001b[32m[I 2021-08-12 21:33:56,992]\u001b[0m Trial 35 finished with value: 2120746509.002798 and parameters: {'bagging_fraction': 0.4779649667796321, 'bagging_freq': 3}. Best is trial 30 with value: 1981019323.367325.\u001b[0m\n", - "bagging, val_score: 1866168216.164035: 100%|##########| 10/10 [01:24<00:00, 7.77s/it]\u001b[32m[I 2021-08-12 21:34:03,812]\u001b[0m Trial 36 finished with value: 1948370918.3509336 and parameters: {'bagging_fraction': 0.9410977830175267, 'bagging_freq': 1}. Best is trial 36 with value: 1948370918.3509336.\u001b[0m\n", - "bagging, val_score: 1866168216.164035: 100%|##########| 10/10 [01:24<00:00, 8.48s/it]\n", - "feature_fraction_stage2, val_score: 1866168216.164035: 17%|#6 | 1/6 [00:06<00:34, 6.82s/it]\u001b[32m[I 2021-08-12 21:34:10,653]\u001b[0m Trial 37 finished with value: 1909783233.1033392 and parameters: {'feature_fraction': 0.9799999999999999}. Best is trial 37 with value: 1909783233.1033392.\u001b[0m\n", - "feature_fraction_stage2, val_score: 1866168216.164035: 33%|###3 | 2/6 [00:13<00:26, 6.69s/it]\u001b[32m[I 2021-08-12 21:34:17,259]\u001b[0m Trial 38 finished with value: 1866168216.1640353 and parameters: {'feature_fraction': 0.9159999999999999}. Best is trial 38 with value: 1866168216.1640353.\u001b[0m\n", - "feature_fraction_stage2, val_score: 1866168216.164035: 50%|##### | 3/6 [00:21<00:21, 7.30s/it]\u001b[32m[I 2021-08-12 21:34:25,288]\u001b[0m Trial 39 finished with value: 1866168216.164035 and parameters: {'feature_fraction': 0.82}. Best is trial 39 with value: 1866168216.164035.\u001b[0m\n", - "feature_fraction_stage2, val_score: 1866168216.164035: 67%|######6 | 4/6 [00:27<00:13, 6.99s/it]\u001b[32m[I 2021-08-12 21:34:31,785]\u001b[0m Trial 40 finished with value: 1866168216.164035 and parameters: {'feature_fraction': 0.852}. Best is trial 39 with value: 1866168216.164035.\u001b[0m\n", - "feature_fraction_stage2, val_score: 1866168216.164035: 83%|########3 | 5/6 [00:36<00:07, 7.41s/it]\u001b[32m[I 2021-08-12 21:34:39,941]\u001b[0m Trial 41 finished with value: 1866168216.164035 and parameters: {'feature_fraction': 0.8839999999999999}. Best is trial 39 with value: 1866168216.164035.\u001b[0m\n", - "feature_fraction_stage2, val_score: 1866168216.164035: 100%|##########| 6/6 [00:44<00:00, 7.75s/it]\u001b[32m[I 2021-08-12 21:34:48,352]\u001b[0m Trial 42 finished with value: 1909783233.1033392 and parameters: {'feature_fraction': 0.948}. Best is trial 39 with value: 1866168216.164035.\u001b[0m\n", - "feature_fraction_stage2, val_score: 1866168216.164035: 100%|##########| 6/6 [00:44<00:00, 7.42s/it]\n", - "regularization_factors, val_score: 1866168216.164035: 5%|5 | 1/20 [00:07<02:29, 7.85s/it]\u001b[32m[I 2021-08-12 21:34:56,223]\u001b[0m Trial 43 finished with value: 1899009420.0243478 and parameters: {'lambda_l1': 0.0014127541304345857, 'lambda_l2': 0.22374000194862534}. Best is trial 43 with value: 1899009420.0243478.\u001b[0m\n", - "regularization_factors, val_score: 1866168216.164035: 10%|# | 2/20 [00:15<02:14, 7.47s/it]\u001b[32m[I 2021-08-12 21:35:03,428]\u001b[0m Trial 44 finished with value: 1867133014.62294 and parameters: {'lambda_l1': 0.03844784585753986, 'lambda_l2': 0.00018061829598451463}. Best is trial 44 with value: 1867133014.62294.\u001b[0m\n", - "regularization_factors, val_score: 1862347751.563527: 15%|#5 | 3/20 [00:22<02:07, 7.51s/it]\u001b[32m[I 2021-08-12 21:35:10,983]\u001b[0m Trial 45 finished with value: 1862347751.563527 and parameters: {'lambda_l1': 0.17221202238015665, 'lambda_l2': 0.00025519456319286597}. Best is trial 45 with value: 1862347751.563527.\u001b[0m\n", - "regularization_factors, val_score: 1862347751.563527: 20%|## | 4/20 [00:29<01:56, 7.29s/it]\u001b[32m[I 2021-08-12 21:35:17,943]\u001b[0m Trial 46 finished with value: 1866168215.2466798 and parameters: {'lambda_l1': 1.1162378906196939e-07, 'lambda_l2': 2.3517586151003225e-07}. Best is trial 45 with value: 1862347751.563527.\u001b[0m\n", - "regularization_factors, val_score: 1862347751.563527: 25%|##5 | 5/20 [00:39<02:03, 8.23s/it]\u001b[32m[I 2021-08-12 21:35:27,834]\u001b[0m Trial 47 finished with value: 1866168215.2750273 and parameters: {'lambda_l1': 8.118613275023769e-06, 'lambda_l2': 2.2125899197414973e-07}. Best is trial 45 with value: 1862347751.563527.\u001b[0m\n", - "regularization_factors, val_score: 1862347751.563527: 30%|### | 6/20 [00:50<02:08, 9.19s/it]\u001b[32m[I 2021-08-12 21:35:38,881]\u001b[0m Trial 48 finished with value: 1866168214.4534495 and parameters: {'lambda_l1': 3.650657666456404e-06, 'lambda_l2': 5.551789380059054e-07}. Best is trial 45 with value: 1862347751.563527.\u001b[0m\n", - "regularization_factors, val_score: 1862347751.563527: 35%|###5 | 7/20 [00:57<01:51, 8.56s/it]\u001b[32m[I 2021-08-12 21:35:46,142]\u001b[0m Trial 49 finished with value: 1894359167.4235432 and parameters: {'lambda_l1': 0.6834643786787952, 'lambda_l2': 0.0005042843753004482}. Best is trial 45 with value: 1862347751.563527.\u001b[0m\n", - "regularization_factors, val_score: 1862347751.563527: 40%|#### | 8/20 [01:04<01:35, 7.95s/it]\u001b[32m[I 2021-08-12 21:35:52,804]\u001b[0m Trial 50 finished with value: 1866168212.9993303 and parameters: {'lambda_l1': 0.00016032873004682006, 'lambda_l2': 1.0453125221176078e-06}. Best is trial 45 with value: 1862347751.563527.\u001b[0m\n", - "regularization_factors, val_score: 1862347751.563527: 45%|####5 | 9/20 [01:11<01:22, 7.54s/it]\u001b[32m[I 2021-08-12 21:35:59,443]\u001b[0m Trial 51 finished with value: 1892611870.5699668 and parameters: {'lambda_l1': 0.051377714337184646, 'lambda_l2': 0.05419859107292369}. Best is trial 45 with value: 1862347751.563527.\u001b[0m\n", - "regularization_factors, val_score: 1862347751.563527: 50%|##### | 10/20 [01:17<01:13, 7.33s/it]\u001b[32m[I 2021-08-12 21:36:06,284]\u001b[0m Trial 52 finished with value: 1866168211.9443643 and parameters: {'lambda_l1': 1.1485671686518937e-06, 'lambda_l2': 1.4100757187184286e-06}. Best is trial 45 with value: 1862347751.563527.\u001b[0m\n", - "regularization_factors, val_score: 1862347751.563527: 55%|#####5 | 11/20 [01:25<01:06, 7.36s/it]\u001b[32m[I 2021-08-12 21:36:13,725]\u001b[0m Trial 53 finished with value: 1885586770.2273645 and parameters: {'lambda_l1': 4.580147889294802, 'lambda_l2': 0.0005018959196271982}. Best is trial 45 with value: 1862347751.563527.\u001b[0m\n", - "regularization_factors, val_score: 1862347751.563527: 60%|###### | 12/20 [01:31<00:56, 7.02s/it]\u001b[32m[I 2021-08-12 21:36:19,979]\u001b[0m Trial 54 finished with value: 1866168157.8088048 and parameters: {'lambda_l1': 1.434301476741782e-08, 'lambda_l2': 1.8012113848059015e-05}. Best is trial 45 with value: 1862347751.563527.\u001b[0m\n", - "regularization_factors, val_score: 1862347751.563527: 65%|######5 | 13/20 [01:38<00:48, 6.96s/it]\u001b[32m[I 2021-08-12 21:36:26,789]\u001b[0m Trial 55 finished with value: 1866168172.4661086 and parameters: {'lambda_l1': 2.8590447974532532e-08, 'lambda_l2': 1.3570074139942467e-05}. Best is trial 45 with value: 1862347751.563527.\u001b[0m\n", - "regularization_factors, val_score: 1862347751.563527: 70%|####### | 14/20 [01:44<00:40, 6.78s/it]\u001b[32m[I 2021-08-12 21:36:33,156]\u001b[0m Trial 56 finished with value: 1929762960.4500725 and parameters: {'lambda_l1': 9.874299239326142, 'lambda_l2': 0.011250132690468205}. Best is trial 45 with value: 1862347751.563527.\u001b[0m\n", - "regularization_factors, val_score: 1862347751.563527: 75%|#######5 | 15/20 [01:51<00:33, 6.68s/it]\u001b[32m[I 2021-08-12 21:36:39,605]\u001b[0m Trial 57 finished with value: 1866168145.853947 and parameters: {'lambda_l1': 0.001457467132871678, 'lambda_l2': 2.1471698394126437e-05}. Best is trial 45 with value: 1862347751.563527.\u001b[0m\n", - "regularization_factors, val_score: 1862347751.563527: 80%|######## | 16/20 [01:57<00:26, 6.57s/it]\u001b[32m[I 2021-08-12 21:36:45,923]\u001b[0m Trial 58 finished with value: 1866168046.9808419 and parameters: {'lambda_l1': 0.0011582433705743624, 'lambda_l2': 5.158343573053873e-05}. Best is trial 45 with value: 1862347751.563527.\u001b[0m\n", - "regularization_factors, val_score: 1862347751.563527: 85%|########5 | 17/20 [02:05<00:20, 6.85s/it]\u001b[32m[I 2021-08-12 21:36:53,433]\u001b[0m Trial 59 finished with value: 1869255015.1998665 and parameters: {'lambda_l1': 0.0437753701485812, 'lambda_l2': 7.728496590262199}. Best is trial 45 with value: 1862347751.563527.\u001b[0m\n", - "regularization_factors, val_score: 1862347751.563527: 90%|######### | 18/20 [02:11<00:13, 6.79s/it]\u001b[32m[I 2021-08-12 21:37:00,080]\u001b[0m Trial 60 finished with value: 1866168215.8948646 and parameters: {'lambda_l1': 0.0002464476399548371, 'lambda_l2': 1.305866123493045e-08}. Best is trial 45 with value: 1862347751.563527.\u001b[0m\n", - "regularization_factors, val_score: 1860040693.310331: 95%|#########5| 19/20 [02:18<00:06, 6.66s/it]\u001b[32m[I 2021-08-12 21:37:06,434]\u001b[0m Trial 61 finished with value: 1860040693.3103309 and parameters: {'lambda_l1': 0.7465543891293522, 'lambda_l2': 0.00286148358308546}. Best is trial 61 with value: 1860040693.3103309.\u001b[0m\n", - "regularization_factors, val_score: 1860040693.310331: 100%|##########| 20/20 [02:24<00:00, 6.64s/it]\u001b[32m[I 2021-08-12 21:37:13,031]\u001b[0m Trial 62 finished with value: 1919199084.8296423 and parameters: {'lambda_l1': 0.4541623636884782, 'lambda_l2': 0.005250499353309728}. Best is trial 61 with value: 1860040693.3103309.\u001b[0m\n", - "regularization_factors, val_score: 1860040693.310331: 100%|##########| 20/20 [02:24<00:00, 7.23s/it]\n", - "min_data_in_leaf, val_score: 1860040693.310331: 20%|## | 1/5 [00:07<00:28, 7.13s/it]\u001b[32m[I 2021-08-12 21:37:20,192]\u001b[0m Trial 63 finished with value: 1922926466.623347 and parameters: {'min_child_samples': 25}. Best is trial 63 with value: 1922926466.623347.\u001b[0m\n", - "min_data_in_leaf, val_score: 1860040693.310331: 40%|#### | 2/5 [00:14<00:21, 7.16s/it]\u001b[32m[I 2021-08-12 21:37:27,374]\u001b[0m Trial 64 finished with value: 1989706958.6761992 and parameters: {'min_child_samples': 100}. Best is trial 63 with value: 1922926466.623347.\u001b[0m\n", - "min_data_in_leaf, val_score: 1807874616.779958: 60%|###### | 3/5 [00:21<00:14, 7.12s/it]\u001b[32m[I 2021-08-12 21:37:34,441]\u001b[0m Trial 65 finished with value: 1807874616.7799582 and parameters: {'min_child_samples': 5}. Best is trial 65 with value: 1807874616.7799582.\u001b[0m\n", - "min_data_in_leaf, val_score: 1807874616.779958: 80%|######## | 4/5 [00:29<00:07, 7.41s/it]\u001b[32m[I 2021-08-12 21:37:42,305]\u001b[0m Trial 66 finished with value: 1982448779.0409832 and parameters: {'min_child_samples': 50}. Best is trial 65 with value: 1807874616.7799582.\u001b[0m\n", - "min_data_in_leaf, val_score: 1807874616.779958: 100%|##########| 5/5 [00:36<00:00, 7.31s/it]\u001b[32m[I 2021-08-12 21:37:49,432]\u001b[0m Trial 67 finished with value: 1903338404.5809317 and parameters: {'min_child_samples': 10}. Best is trial 65 with value: 1807874616.7799582.\u001b[0m\n", - "min_data_in_leaf, val_score: 1807874616.779958: 100%|##########| 5/5 [00:36<00:00, 7.28s/it]" + "\u001b[32m[I 2021-08-22 21:13:20,495]\u001b[0m A new study created in memory with name: no-name-11015170-733e-470d-817a-413f55382d0c\u001b[0m\n", + "feature_fraction, val_score: 1923826918.442117: 14%|#4 | 1/7 [00:01<00:08, 1.49s/it]\u001b[32m[I 2021-08-22 21:13:22,006]\u001b[0m Trial 0 finished with value: 1923826918.4421172 and parameters: {'feature_fraction': 0.8999999999999999}. Best is trial 0 with value: 1923826918.4421172.\u001b[0m\n", + "feature_fraction, val_score: 1923826918.442117: 29%|##8 | 2/7 [00:02<00:07, 1.44s/it]\u001b[32m[I 2021-08-22 21:13:23,335]\u001b[0m Trial 1 finished with value: 1935542284.5841475 and parameters: {'feature_fraction': 0.6}. Best is trial 0 with value: 1923826918.4421172.\u001b[0m\n", + "feature_fraction, val_score: 1923826918.442117: 43%|####2 | 3/7 [00:04<00:05, 1.44s/it]\u001b[32m[I 2021-08-22 21:13:24,762]\u001b[0m Trial 2 finished with value: 1959693958.979498 and parameters: {'feature_fraction': 0.7}. Best is trial 0 with value: 1923826918.4421172.\u001b[0m\n", + "feature_fraction, val_score: 1923826918.442117: 57%|#####7 | 4/7 [00:05<00:04, 1.38s/it]\u001b[32m[I 2021-08-22 21:13:26,021]\u001b[0m Trial 3 finished with value: 2237193094.1989536 and parameters: {'feature_fraction': 0.4}. Best is trial 0 with value: 1923826918.4421172.\u001b[0m\n", + "feature_fraction, val_score: 1923826918.442117: 71%|#######1 | 5/7 [00:06<00:02, 1.36s/it]\u001b[32m[I 2021-08-22 21:13:27,314]\u001b[0m Trial 4 finished with value: 1988198059.953293 and parameters: {'feature_fraction': 0.5}. Best is trial 0 with value: 1923826918.4421172.\u001b[0m\n", + "feature_fraction, val_score: 1923826918.442117: 86%|########5 | 6/7 [00:08<00:01, 1.37s/it]\u001b[32m[I 2021-08-22 21:13:28,719]\u001b[0m Trial 5 finished with value: 1959693958.979498 and parameters: {'feature_fraction': 0.8}. Best is trial 0 with value: 1923826918.4421172.\u001b[0m\n", + "feature_fraction, val_score: 1923826918.442117: 100%|##########| 7/7 [00:09<00:00, 1.42s/it]\u001b[32m[I 2021-08-22 21:13:30,240]\u001b[0m Trial 6 finished with value: 1961388150.442425 and parameters: {'feature_fraction': 1.0}. Best is trial 0 with value: 1923826918.4421172.\u001b[0m\n", + "feature_fraction, val_score: 1923826918.442117: 100%|##########| 7/7 [00:09<00:00, 1.39s/it]\n", + "num_leaves, val_score: 1902337773.833954: 5%|5 | 1/20 [00:02<00:41, 2.21s/it]\u001b[32m[I 2021-08-22 21:13:32,454]\u001b[0m Trial 7 finished with value: 1902337773.8339543 and parameters: {'num_leaves': 62}. Best is trial 7 with value: 1902337773.8339543.\u001b[0m\n", + "num_leaves, val_score: 1892120308.436302: 10%|# | 2/20 [00:04<00:42, 2.35s/it]\u001b[32m[I 2021-08-22 21:13:35,126]\u001b[0m Trial 8 finished with value: 1892120308.4363017 and parameters: {'num_leaves': 78}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n", + "num_leaves, val_score: 1892120308.436302: 15%|#5 | 3/20 [00:06<00:34, 2.03s/it]\u001b[32m[I 2021-08-22 21:13:36,422]\u001b[0m Trial 9 finished with value: 1924633212.447515 and parameters: {'num_leaves': 26}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n", + "num_leaves, val_score: 1892120308.436302: 20%|## | 4/20 [00:07<00:27, 1.70s/it]\u001b[32m[I 2021-08-22 21:13:37,340]\u001b[0m Trial 10 finished with value: 1975840134.2036633 and parameters: {'num_leaves': 12}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n", + "num_leaves, val_score: 1892120308.436302: 25%|##5 | 5/20 [00:13<00:46, 3.12s/it]\u001b[32m[I 2021-08-22 21:13:43,773]\u001b[0m Trial 11 finished with value: 1923702276.2852578 and parameters: {'num_leaves': 204}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n", + "num_leaves, val_score: 1892120308.436302: 30%|### | 6/20 [00:20<00:59, 4.24s/it]\u001b[32m[I 2021-08-22 21:13:50,646]\u001b[0m Trial 12 finished with value: 1939984702.0007648 and parameters: {'num_leaves': 214}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n", + "num_leaves, val_score: 1892120308.436302: 35%|###5 | 7/20 [00:23<00:49, 3.79s/it]\u001b[32m[I 2021-08-22 21:13:53,368]\u001b[0m Trial 13 finished with value: 1942261187.1568937 and parameters: {'num_leaves': 61}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n", + "num_leaves, val_score: 1892120308.436302: 40%|#### | 8/20 [00:33<01:09, 5.81s/it]\u001b[32m[I 2021-08-22 21:14:03,909]\u001b[0m Trial 14 finished with value: 1962322296.1656826 and parameters: {'num_leaves': 234}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n", + "num_leaves, val_score: 1892120308.436302: 45%|####5 | 9/20 [00:38<01:00, 5.52s/it]\u001b[32m[I 2021-08-22 21:14:08,728]\u001b[0m Trial 15 finished with value: 1933575055.0360022 and parameters: {'num_leaves': 131}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n", + "num_leaves, val_score: 1892120308.436302: 50%|##### | 10/20 [00:41<00:47, 4.70s/it]\u001b[32m[I 2021-08-22 21:14:11,527]\u001b[0m Trial 16 finished with value: 1907396468.702243 and parameters: {'num_leaves': 64}. Best is trial 8 with value: 1892120308.4363017.\u001b[0m\n", + "num_leaves, val_score: 1880656950.656438: 55%|#####5 | 11/20 [00:46<00:43, 4.83s/it]\u001b[32m[I 2021-08-22 21:14:16,641]\u001b[0m Trial 17 finished with value: 1880656950.6564376 and parameters: {'num_leaves': 141}. Best is trial 17 with value: 1880656950.6564376.\u001b[0m\n", + "num_leaves, val_score: 1880656950.656438: 60%|###### | 12/20 [00:51<00:38, 4.80s/it]\u001b[32m[I 2021-08-22 21:14:21,399]\u001b[0m Trial 18 finished with value: 1906428309.75546 and parameters: {'num_leaves': 139}. Best is trial 17 with value: 1880656950.6564376.\u001b[0m\n", + "num_leaves, val_score: 1880656950.656438: 65%|######5 | 13/20 [00:56<00:35, 5.07s/it]\u001b[32m[I 2021-08-22 21:14:27,074]\u001b[0m Trial 19 finished with value: 1897071192.2731016 and parameters: {'num_leaves': 161}. Best is trial 17 with value: 1880656950.6564376.\u001b[0m\n", + "num_leaves, val_score: 1880656950.656438: 70%|####### | 14/20 [01:01<00:28, 4.81s/it]\u001b[32m[I 2021-08-22 21:14:31,276]\u001b[0m Trial 20 finished with value: 1910775598.9420693 and parameters: {'num_leaves': 95}. Best is trial 17 with value: 1880656950.6564376.\u001b[0m\n", + "num_leaves, val_score: 1880656950.656438: 75%|#######5 | 15/20 [01:04<00:22, 4.44s/it]\u001b[32m[I 2021-08-22 21:14:34,857]\u001b[0m Trial 21 finished with value: 1890350018.7742429 and parameters: {'num_leaves': 101}. Best is trial 17 with value: 1880656950.6564376.\u001b[0m\n", + "num_leaves, val_score: 1874647481.354196: 80%|######## | 16/20 [01:10<00:19, 4.84s/it]\u001b[32m[I 2021-08-22 21:14:40,645]\u001b[0m Trial 22 finished with value: 1874647481.354196 and parameters: {'num_leaves': 174}. Best is trial 22 with value: 1874647481.354196.\u001b[0m\n", + "num_leaves, val_score: 1874647481.354196: 85%|########5 | 17/20 [01:16<00:15, 5.12s/it]\u001b[32m[I 2021-08-22 21:14:46,424]\u001b[0m Trial 23 finished with value: 1929626032.4915411 and parameters: {'num_leaves': 176}. Best is trial 22 with value: 1874647481.354196.\u001b[0m\n", + "num_leaves, val_score: 1874647481.354196: 90%|######### | 18/20 [01:22<00:10, 5.35s/it]\u001b[32m[I 2021-08-22 21:14:52,288]\u001b[0m Trial 24 finished with value: 1926786945.429698 and parameters: {'num_leaves': 177}. Best is trial 22 with value: 1874647481.354196.\u001b[0m\n", + "num_leaves, val_score: 1874647481.354196: 95%|#########5| 19/20 [01:30<00:06, 6.26s/it]\u001b[32m[I 2021-08-22 21:15:00,672]\u001b[0m Trial 25 finished with value: 1936436149.7610657 and parameters: {'num_leaves': 248}. Best is trial 22 with value: 1874647481.354196.\u001b[0m\n", + "num_leaves, val_score: 1870787631.458499: 100%|##########| 20/20 [01:35<00:00, 5.93s/it]\u001b[32m[I 2021-08-22 21:15:05,849]\u001b[0m Trial 26 finished with value: 1870787631.4584987 and parameters: {'num_leaves': 152}. Best is trial 26 with value: 1870787631.4584987.\u001b[0m\n", + "num_leaves, val_score: 1870787631.458499: 100%|##########| 20/20 [01:35<00:00, 4.78s/it]\n", + "bagging, val_score: 1870787631.458499: 10%|# | 1/10 [00:07<01:06, 7.43s/it]\u001b[32m[I 2021-08-22 21:15:13,289]\u001b[0m Trial 27 finished with value: 2237757312.870728 and parameters: {'bagging_fraction': 0.44000087334449334, 'bagging_freq': 6}. Best is trial 27 with value: 2237757312.870728.\u001b[0m\n", + "bagging, val_score: 1870787631.458499: 20%|## | 2/10 [00:15<01:01, 7.71s/it]\u001b[32m[I 2021-08-22 21:15:21,655]\u001b[0m Trial 28 finished with value: 2162729069.0272393 and parameters: {'bagging_fraction': 0.5075440331178458, 'bagging_freq': 3}. Best is trial 28 with value: 2162729069.0272393.\u001b[0m\n", + "bagging, val_score: 1870787631.458499: 30%|### | 3/10 [00:21<00:49, 7.05s/it]\u001b[32m[I 2021-08-22 21:15:27,150]\u001b[0m Trial 29 finished with value: 2003355452.8831115 and parameters: {'bagging_fraction': 0.757776235401641, 'bagging_freq': 1}. Best is trial 29 with value: 2003355452.8831115.\u001b[0m\n", + "bagging, val_score: 1870787631.458499: 40%|#### | 4/10 [00:28<00:42, 7.02s/it]\u001b[32m[I 2021-08-22 21:15:34,105]\u001b[0m Trial 30 finished with value: 2169017536.089679 and parameters: {'bagging_fraction': 0.5470758964212703, 'bagging_freq': 4}. Best is trial 29 with value: 2003355452.8831115.\u001b[0m\n", + "bagging, val_score: 1870787631.458499: 50%|##### | 5/10 [00:33<00:32, 6.49s/it]\u001b[32m[I 2021-08-22 21:15:39,358]\u001b[0m Trial 31 finished with value: 1949886129.0973551 and parameters: {'bagging_fraction': 0.7729694462744219, 'bagging_freq': 1}. Best is trial 31 with value: 1949886129.0973551.\u001b[0m\n", + "bagging, val_score: 1870787631.458499: 60%|###### | 6/10 [00:40<00:26, 6.53s/it]\u001b[32m[I 2021-08-22 21:15:45,996]\u001b[0m Trial 32 finished with value: 2082597134.5380604 and parameters: {'bagging_fraction': 0.6293524485160634, 'bagging_freq': 6}. Best is trial 31 with value: 1949886129.0973551.\u001b[0m\n", + "bagging, val_score: 1870787631.458499: 70%|####### | 7/10 [00:46<00:19, 6.57s/it]\u001b[32m[I 2021-08-22 21:15:52,653]\u001b[0m Trial 33 finished with value: 2128522268.181099 and parameters: {'bagging_fraction': 0.5194460357854906, 'bagging_freq': 4}. Best is trial 31 with value: 1949886129.0973551.\u001b[0m\n", + "bagging, val_score: 1870787631.458499: 80%|######## | 8/10 [00:53<00:12, 6.49s/it]\u001b[32m[I 2021-08-22 21:15:58,939]\u001b[0m Trial 34 finished with value: 1972329936.356194 and parameters: {'bagging_fraction': 0.7021495661140726, 'bagging_freq': 2}. Best is trial 31 with value: 1949886129.0973551.\u001b[0m\n", + "bagging, val_score: 1870787631.458499: 90%|######### | 9/10 [00:58<00:06, 6.15s/it]\u001b[32m[I 2021-08-22 21:16:04,316]\u001b[0m Trial 35 finished with value: 2035847515.436036 and parameters: {'bagging_fraction': 0.7365019160691924, 'bagging_freq': 1}. Best is trial 31 with value: 1949886129.0973551.\u001b[0m\n", + "bagging, val_score: 1870787631.458499: 100%|##########| 10/10 [01:05<00:00, 6.27s/it]\u001b[32m[I 2021-08-22 21:16:10,869]\u001b[0m Trial 36 finished with value: 2089685881.7609503 and parameters: {'bagging_fraction': 0.5702856203071842, 'bagging_freq': 6}. Best is trial 31 with value: 1949886129.0973551.\u001b[0m\n", + "bagging, val_score: 1870787631.458499: 100%|##########| 10/10 [01:05<00:00, 6.50s/it]\n", + "feature_fraction_stage2, val_score: 1870787631.458499: 17%|#6 | 1/6 [00:05<00:25, 5.10s/it]\u001b[32m[I 2021-08-22 21:16:15,976]\u001b[0m Trial 37 finished with value: 1915845450.4267912 and parameters: {'feature_fraction': 0.9799999999999999}. Best is trial 37 with value: 1915845450.4267912.\u001b[0m\n", + "feature_fraction_stage2, val_score: 1870787631.458499: 33%|###3 | 2/6 [00:09<00:20, 5.02s/it]\u001b[32m[I 2021-08-22 21:16:20,814]\u001b[0m Trial 38 finished with value: 1870787631.4584987 and parameters: {'feature_fraction': 0.852}. Best is trial 38 with value: 1870787631.4584987.\u001b[0m\n", + "feature_fraction_stage2, val_score: 1870787631.458499: 50%|##### | 3/6 [00:14<00:14, 4.95s/it]\u001b[32m[I 2021-08-22 21:16:25,594]\u001b[0m Trial 39 finished with value: 1870787631.4584987 and parameters: {'feature_fraction': 0.8839999999999999}. Best is trial 38 with value: 1870787631.4584987.\u001b[0m\n", + "feature_fraction_stage2, val_score: 1870787631.458499: 67%|######6 | 4/6 [00:20<00:10, 5.08s/it]\u001b[32m[I 2021-08-22 21:16:30,990]\u001b[0m Trial 40 finished with value: 1915845450.4267912 and parameters: {'feature_fraction': 0.948}. Best is trial 38 with value: 1870787631.4584987.\u001b[0m\n", + "feature_fraction_stage2, val_score: 1870787631.458499: 83%|########3 | 5/6 [00:25<00:05, 5.19s/it]\u001b[32m[I 2021-08-22 21:16:36,418]\u001b[0m Trial 41 finished with value: 1870787631.4584987 and parameters: {'feature_fraction': 0.9159999999999999}. Best is trial 38 with value: 1870787631.4584987.\u001b[0m\n", + "feature_fraction_stage2, val_score: 1870787631.458499: 100%|##########| 6/6 [00:31<00:00, 5.39s/it]\u001b[32m[I 2021-08-22 21:16:42,282]\u001b[0m Trial 42 finished with value: 1870787631.4584987 and parameters: {'feature_fraction': 0.82}. Best is trial 38 with value: 1870787631.4584987.\u001b[0m\n", + "feature_fraction_stage2, val_score: 1870787631.458499: 100%|##########| 6/6 [00:31<00:00, 5.24s/it]\n", + "regularization_factors, val_score: 1870787631.458499: 5%|5 | 1/20 [00:05<01:41, 5.36s/it]\u001b[32m[I 2021-08-22 21:16:47,653]\u001b[0m Trial 43 finished with value: 1870787631.491234 and parameters: {'lambda_l1': 6.212193776886605e-06, 'lambda_l2': 3.009357838100163e-08}. Best is trial 43 with value: 1870787631.491234.\u001b[0m\n", + "regularization_factors, val_score: 1870787534.973267: 10%|# | 2/20 [00:12<01:44, 5.82s/it]\u001b[32m[I 2021-08-22 21:16:54,524]\u001b[0m Trial 44 finished with value: 1870787534.9732666 and parameters: {'lambda_l1': 4.443479994016017e-08, 'lambda_l2': 3.556819404354524e-05}. Best is trial 44 with value: 1870787534.9732666.\u001b[0m\n", + "regularization_factors, val_score: 1870787534.973267: 15%|#5 | 3/20 [00:17<01:36, 5.65s/it]\u001b[32m[I 2021-08-22 21:16:59,795]\u001b[0m Trial 45 finished with value: 1870787622.4979687 and parameters: {'lambda_l1': 0.014465195791714083, 'lambda_l2': 2.1021138174252987e-07}. Best is trial 44 with value: 1870787534.9732666.\u001b[0m\n", + "regularization_factors, val_score: 1870787534.973267: 20%|## | 4/20 [00:22<01:28, 5.52s/it]\u001b[32m[I 2021-08-22 21:17:05,006]\u001b[0m Trial 46 finished with value: 1870787619.5326774 and parameters: {'lambda_l1': 1.429709851157171e-06, 'lambda_l2': 4.419238564285042e-06}. Best is trial 44 with value: 1870787534.9732666.\u001b[0m\n", + "regularization_factors, val_score: 1870787534.973267: 25%|##5 | 5/20 [00:28<01:22, 5.51s/it]\u001b[32m[I 2021-08-22 21:17:10,496]\u001b[0m Trial 47 finished with value: 1870787590.2959824 and parameters: {'lambda_l1': 0.06926729801332972, 'lambda_l2': 1.6922599199508456e-08}. Best is trial 44 with value: 1870787534.9732666.\u001b[0m\n", + "regularization_factors, val_score: 1870787534.973267: 30%|### | 6/20 [00:33<01:16, 5.44s/it]\u001b[32m[I 2021-08-22 21:17:15,768]\u001b[0m Trial 48 finished with value: 1870787630.8453631 and parameters: {'lambda_l1': 2.052098028013423e-08, 'lambda_l2': 2.3337138419589934e-07}. Best is trial 44 with value: 1870787534.9732666.\u001b[0m\n", + "regularization_factors, val_score: 1870787534.973267: 35%|###5 | 7/20 [00:38<01:10, 5.39s/it]\u001b[32m[I 2021-08-22 21:17:21,046]\u001b[0m Trial 49 finished with value: 1876699891.9815595 and parameters: {'lambda_l1': 4.304944401004946e-06, 'lambda_l2': 0.0007656130560392606}. Best is trial 44 with value: 1870787534.9732666.\u001b[0m\n", + "regularization_factors, val_score: 1870787534.973267: 40%|#### | 8/20 [00:44<01:04, 5.37s/it]\u001b[32m[I 2021-08-22 21:17:26,372]\u001b[0m Trial 50 finished with value: 1894915643.8553405 and parameters: {'lambda_l1': 5.783302142631901e-07, 'lambda_l2': 0.005857904967523283}. Best is trial 44 with value: 1870787534.9732666.\u001b[0m\n", + "regularization_factors, val_score: 1870787534.973267: 45%|####5 | 9/20 [00:49<00:58, 5.32s/it]\u001b[32m[I 2021-08-22 21:17:31,570]\u001b[0m Trial 51 finished with value: 1877059069.215943 and parameters: {'lambda_l1': 2.9196198134708893, 'lambda_l2': 4.6093049397982125e-05}. Best is trial 44 with value: 1870787534.9732666.\u001b[0m\n", + "regularization_factors, val_score: 1853015384.453637: 50%|##### | 10/20 [00:54<00:53, 5.34s/it]\u001b[32m[I 2021-08-22 21:17:36,944]\u001b[0m Trial 52 finished with value: 1853015384.453637 and parameters: {'lambda_l1': 0.09558504914610533, 'lambda_l2': 3.220273228431258}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n", + "regularization_factors, val_score: 1853015384.453637: 55%|#####5 | 11/20 [01:00<00:49, 5.45s/it]\u001b[32m[I 2021-08-22 21:17:42,672]\u001b[0m Trial 53 finished with value: 1896992309.3706267 and parameters: {'lambda_l1': 6.575749289036579, 'lambda_l2': 7.662096538085835}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n", + "regularization_factors, val_score: 1853015384.453637: 60%|###### | 12/20 [01:06<00:44, 5.56s/it]\u001b[32m[I 2021-08-22 21:17:48,473]\u001b[0m Trial 54 finished with value: 1893493622.3798478 and parameters: {'lambda_l1': 0.0008722383951977965, 'lambda_l2': 0.13339065517857865}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n", + "regularization_factors, val_score: 1853015384.453637: 65%|######5 | 13/20 [01:12<00:39, 5.68s/it]\u001b[32m[I 2021-08-22 21:17:54,425]\u001b[0m Trial 55 finished with value: 1895459424.4650118 and parameters: {'lambda_l1': 0.046018652714269824, 'lambda_l2': 3.596577171855534}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n", + "regularization_factors, val_score: 1853015384.453637: 70%|####### | 14/20 [01:17<00:33, 5.57s/it]\u001b[32m[I 2021-08-22 21:17:59,760]\u001b[0m Trial 56 finished with value: 1902235965.6523015 and parameters: {'lambda_l1': 0.0004372538444200538, 'lambda_l2': 0.018403234102680837}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n", + "regularization_factors, val_score: 1853015384.453637: 75%|#######5 | 15/20 [01:23<00:27, 5.57s/it]\u001b[32m[I 2021-08-22 21:18:05,311]\u001b[0m Trial 57 finished with value: 1870787547.884662 and parameters: {'lambda_l1': 1.5644444125721077e-08, 'lambda_l2': 3.06684587723285e-05}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n", + "regularization_factors, val_score: 1853015384.453637: 80%|######## | 16/20 [01:28<00:22, 5.56s/it]\u001b[32m[I 2021-08-22 21:18:10,860]\u001b[0m Trial 58 finished with value: 1952628057.3679152 and parameters: {'lambda_l1': 0.41090501100060367, 'lambda_l2': 0.4784149571785825}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n", + "regularization_factors, val_score: 1853015384.453637: 85%|########5 | 17/20 [01:34<00:16, 5.66s/it]\u001b[32m[I 2021-08-22 21:18:16,748]\u001b[0m Trial 59 finished with value: 1874516545.805995 and parameters: {'lambda_l1': 4.63176126114126e-05, 'lambda_l2': 0.0002597320589400073}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n", + "regularization_factors, val_score: 1853015384.453637: 90%|######### | 18/20 [01:40<00:11, 5.80s/it]\u001b[32m[I 2021-08-22 21:18:22,861]\u001b[0m Trial 60 finished with value: 1870787615.0912282 and parameters: {'lambda_l1': 0.007392465833323452, 'lambda_l2': 4.3888082066628725e-06}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n", + "regularization_factors, val_score: 1853015384.453637: 95%|#########5| 19/20 [01:46<00:05, 5.72s/it]\u001b[32m[I 2021-08-22 21:18:28,403]\u001b[0m Trial 61 finished with value: 1859899631.3896043 and parameters: {'lambda_l1': 1.573671858757602e-07, 'lambda_l2': 0.0021763888923074476}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n", + "regularization_factors, val_score: 1853015384.453637: 100%|##########| 20/20 [01:51<00:00, 5.70s/it]\u001b[32m[I 2021-08-22 21:18:34,066]\u001b[0m Trial 62 finished with value: 1905701773.7289548 and parameters: {'lambda_l1': 0.7977530430827201, 'lambda_l2': 0.6661855838737094}. Best is trial 52 with value: 1853015384.453637.\u001b[0m\n", + "regularization_factors, val_score: 1853015384.453637: 100%|##########| 20/20 [01:51<00:00, 5.59s/it]\n", + "min_data_in_leaf, val_score: 1853015384.453637: 20%|## | 1/5 [00:04<00:19, 4.95s/it]\u001b[32m[I 2021-08-22 21:18:39,027]\u001b[0m Trial 63 finished with value: 1859286747.0773554 and parameters: {'min_child_samples': 10}. Best is trial 63 with value: 1859286747.0773554.\u001b[0m\n", + "min_data_in_leaf, val_score: 1853015384.453637: 40%|#### | 2/5 [00:09<00:14, 4.87s/it]\u001b[32m[I 2021-08-22 21:18:43,705]\u001b[0m Trial 64 finished with value: 1877906183.7743464 and parameters: {'min_child_samples': 5}. Best is trial 63 with value: 1859286747.0773554.\u001b[0m\n", + "min_data_in_leaf, val_score: 1853015384.453637: 60%|###### | 3/5 [00:15<00:10, 5.15s/it]\u001b[32m[I 2021-08-22 21:18:49,514]\u001b[0m Trial 65 finished with value: 1996406986.7947733 and parameters: {'min_child_samples': 100}. Best is trial 63 with value: 1859286747.0773554.\u001b[0m\n", + "min_data_in_leaf, val_score: 1853015384.453637: 80%|######## | 4/5 [00:22<00:05, 5.82s/it]\u001b[32m[I 2021-08-22 21:18:56,904]\u001b[0m Trial 66 finished with value: 1983678395.4383106 and parameters: {'min_child_samples': 50}. Best is trial 63 with value: 1859286747.0773554.\u001b[0m\n", + "min_data_in_leaf, val_score: 1853015384.453637: 100%|##########| 5/5 [00:28<00:00, 5.92s/it]\u001b[32m[I 2021-08-22 21:19:03,042]\u001b[0m Trial 67 finished with value: 1906448776.6538603 and parameters: {'min_child_samples': 25}. Best is trial 63 with value: 1859286747.0773554.\u001b[0m\n", + "min_data_in_leaf, val_score: 1853015384.453637: 100%|##########| 5/5 [00:28<00:00, 5.79s/it]" ] }, { + "output_type": "stream", "name": "stdout", - "output_type": "stream", "text": [ - "CPU times: user 1h 2min 37s, sys: 31.5 s, total: 1h 3min 9s\n", - "Wall time: 8min 51s\n" + "CPU times: user 5min 24s, sys: 17.3 s, total: 5min 41s\n", + "Wall time: 5min 42s\n" ] }, { - "name": "stderr", "output_type": "stream", + "name": "stderr", "text": [ "\n" ] } ], - "source": [ - "%%time\n", - "model = lgb.train(params, dtrain, valid_sets=[dtrain, dval], verbose_eval=10000) \n" - ] + "metadata": { + "tags": [ + "outputPrepend" + ] + } }, { "cell_type": "markdown", - "metadata": {}, - "source": [] + "source": [], + "metadata": {} }, { "cell_type": "code", - "execution_count": 21, - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Optuna LightGBM Tuner r2 = 0.8449953275302208\n" - ] - } - ], + "execution_count": 20, "source": [ "y_pred = model.predict(X_test)\n", "from flaml.ml import sklearn_metric_loss_score\n", "print('Optuna LightGBM Tuner r2', '=', 1 - sklearn_metric_loss_score('r2', y_pred, y_test))" - ] + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Optuna LightGBM Tuner r2 = 0.8428464421292586\n" + ] + } + ], + "metadata": { + "tags": [] + } }, { "cell_type": "markdown", - "metadata": {}, "source": [ "## 4. Add a customized LightGBM learner in FLAML\n", "The native API of LightGBM allows one to specify a custom objective function in the model constructor. You can easily enable it by adding a customized LightGBM learner in FLAML. In the following example, we show how to add such a customized LightGBM learner with a custom objective function." - ] + ], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "### Create a customized LightGBM learner with a custom objective function" - ] + ], + "metadata": {} }, { "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [], + "execution_count": 21, "source": [ "\n", "import numpy as np \n", @@ -791,113 +819,20 @@ "\n", " def __init__(self, **params):\n", " super().__init__(objective=my_loss_obj, **params)" - ] + ], + "outputs": [], + "metadata": {} }, { "cell_type": "markdown", - "metadata": {}, "source": [ "### Add the customized learner in FLAML" - ] + ], + "metadata": {} }, { "cell_type": "code", - "execution_count": 23, - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[flaml.automl: 08-12 21:37:52] {1121} INFO - Evaluation method: cv\n", - "[flaml.automl: 08-12 21:37:52] {628} INFO - Using RepeatedKFold\n", - "[flaml.automl: 08-12 21:37:52] {1142} INFO - Minimizing error metric: 1-r2\n", - "[flaml.automl: 08-12 21:37:52] {1162} INFO - List of ML learners in AutoML Run: ['my_lgbm']\n", - "[flaml.automl: 08-12 21:37:52] {1252} INFO - iteration 0, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:53] {1405} INFO - at 0.4s,\tbest my_lgbm's error=2.9888,\tbest my_lgbm's error=2.9888\n", - "[flaml.automl: 08-12 21:37:53] {1252} INFO - iteration 1, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:53] {1405} INFO - at 0.6s,\tbest my_lgbm's error=2.9888,\tbest my_lgbm's error=2.9888\n", - "[flaml.automl: 08-12 21:37:53] {1252} INFO - iteration 2, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:53] {1405} INFO - at 0.9s,\tbest my_lgbm's error=1.7536,\tbest my_lgbm's error=1.7536\n", - "[flaml.automl: 08-12 21:37:53] {1252} INFO - iteration 3, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:53] {1405} INFO - at 1.1s,\tbest my_lgbm's error=0.4529,\tbest my_lgbm's error=0.4529\n", - "[flaml.automl: 08-12 21:37:53] {1252} INFO - iteration 4, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:54] {1405} INFO - at 1.3s,\tbest my_lgbm's error=0.4529,\tbest my_lgbm's error=0.4529\n", - "[flaml.automl: 08-12 21:37:54] {1252} INFO - iteration 5, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:54] {1405} INFO - at 1.5s,\tbest my_lgbm's error=0.4529,\tbest my_lgbm's error=0.4529\n", - "[flaml.automl: 08-12 21:37:54] {1252} INFO - iteration 6, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:54] {1405} INFO - at 1.8s,\tbest my_lgbm's error=0.3159,\tbest my_lgbm's error=0.3159\n", - "[flaml.automl: 08-12 21:37:54] {1252} INFO - iteration 7, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:54] {1405} INFO - at 2.1s,\tbest my_lgbm's error=0.2717,\tbest my_lgbm's error=0.2717\n", - "[flaml.automl: 08-12 21:37:54] {1252} INFO - iteration 8, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:55] {1405} INFO - at 2.4s,\tbest my_lgbm's error=0.2717,\tbest my_lgbm's error=0.2717\n", - "[flaml.automl: 08-12 21:37:55] {1252} INFO - iteration 9, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:55] {1405} INFO - at 2.8s,\tbest my_lgbm's error=0.2073,\tbest my_lgbm's error=0.2073\n", - "[flaml.automl: 08-12 21:37:55] {1252} INFO - iteration 10, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:55] {1405} INFO - at 3.0s,\tbest my_lgbm's error=0.2073,\tbest my_lgbm's error=0.2073\n", - "[flaml.automl: 08-12 21:37:55] {1252} INFO - iteration 11, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:55] {1405} INFO - at 3.2s,\tbest my_lgbm's error=0.2073,\tbest my_lgbm's error=0.2073\n", - "[flaml.automl: 08-12 21:37:56] {1252} INFO - iteration 12, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:56] {1405} INFO - at 3.8s,\tbest my_lgbm's error=0.1883,\tbest my_lgbm's error=0.1883\n", - "[flaml.automl: 08-12 21:37:56] {1252} INFO - iteration 13, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:56] {1405} INFO - at 4.2s,\tbest my_lgbm's error=0.1883,\tbest my_lgbm's error=0.1883\n", - "[flaml.automl: 08-12 21:37:56] {1252} INFO - iteration 14, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:58] {1405} INFO - at 5.3s,\tbest my_lgbm's error=0.1883,\tbest my_lgbm's error=0.1883\n", - "[flaml.automl: 08-12 21:37:58] {1252} INFO - iteration 15, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:58] {1405} INFO - at 5.9s,\tbest my_lgbm's error=0.1883,\tbest my_lgbm's error=0.1883\n", - "[flaml.automl: 08-12 21:37:58] {1252} INFO - iteration 16, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:37:59] {1405} INFO - at 6.9s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", - "[flaml.automl: 08-12 21:37:59] {1252} INFO - iteration 17, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:38:00] {1405} INFO - at 7.3s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", - "[flaml.automl: 08-12 21:38:00] {1252} INFO - iteration 18, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:38:01] {1405} INFO - at 9.1s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", - "[flaml.automl: 08-12 21:38:01] {1252} INFO - iteration 19, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:38:02] {1405} INFO - at 9.4s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", - "[flaml.automl: 08-12 21:38:02] {1252} INFO - iteration 20, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:38:06] {1405} INFO - at 13.7s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", - "[flaml.automl: 08-12 21:38:06] {1252} INFO - iteration 21, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:38:07] {1405} INFO - at 14.3s,\tbest my_lgbm's error=0.1878,\tbest my_lgbm's error=0.1878\n", - "[flaml.automl: 08-12 21:38:07] {1252} INFO - iteration 22, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:38:08] {1405} INFO - at 15.5s,\tbest my_lgbm's error=0.1751,\tbest my_lgbm's error=0.1751\n", - "[flaml.automl: 08-12 21:38:08] {1252} INFO - iteration 23, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:38:08] {1405} INFO - at 16.2s,\tbest my_lgbm's error=0.1751,\tbest my_lgbm's error=0.1751\n", - "[flaml.automl: 08-12 21:38:08] {1252} INFO - iteration 24, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:38:10] {1405} INFO - at 17.8s,\tbest my_lgbm's error=0.1751,\tbest my_lgbm's error=0.1751\n", - "[flaml.automl: 08-12 21:38:10] {1252} INFO - iteration 25, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:38:10] {1405} INFO - at 18.1s,\tbest my_lgbm's error=0.1751,\tbest my_lgbm's error=0.1751\n", - "[flaml.automl: 08-12 21:38:10] {1252} INFO - iteration 26, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:38:18] {1405} INFO - at 25.8s,\tbest my_lgbm's error=0.1660,\tbest my_lgbm's error=0.1660\n", - "[flaml.automl: 08-12 21:38:18] {1252} INFO - iteration 27, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:38:22] {1405} INFO - at 29.5s,\tbest my_lgbm's error=0.1660,\tbest my_lgbm's error=0.1660\n", - "[flaml.automl: 08-12 21:38:22] {1252} INFO - iteration 28, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:38:25] {1405} INFO - at 32.4s,\tbest my_lgbm's error=0.1660,\tbest my_lgbm's error=0.1660\n", - "[flaml.automl: 08-12 21:38:25] {1252} INFO - iteration 29, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:38:27] {1405} INFO - at 34.8s,\tbest my_lgbm's error=0.1660,\tbest my_lgbm's error=0.1660\n", - "[flaml.automl: 08-12 21:38:27] {1252} INFO - iteration 30, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:38:49] {1405} INFO - at 56.7s,\tbest my_lgbm's error=0.1611,\tbest my_lgbm's error=0.1611\n", - "[flaml.automl: 08-12 21:38:49] {1252} INFO - iteration 31, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:38:50] {1405} INFO - at 57.7s,\tbest my_lgbm's error=0.1611,\tbest my_lgbm's error=0.1611\n", - "[flaml.automl: 08-12 21:38:50] {1252} INFO - iteration 32, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:39:07] {1405} INFO - at 75.1s,\tbest my_lgbm's error=0.1611,\tbest my_lgbm's error=0.1611\n", - "[flaml.automl: 08-12 21:39:07] {1252} INFO - iteration 33, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:39:45] {1405} INFO - at 112.5s,\tbest my_lgbm's error=0.1611,\tbest my_lgbm's error=0.1611\n", - "[flaml.automl: 08-12 21:39:45] {1252} INFO - iteration 34, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:39:52] {1405} INFO - at 120.2s,\tbest my_lgbm's error=0.1611,\tbest my_lgbm's error=0.1611\n", - "[flaml.automl: 08-12 21:39:52] {1252} INFO - iteration 35, current learner my_lgbm\n", - "[flaml.automl: 08-12 21:40:18] {1405} INFO - at 146.0s,\tbest my_lgbm's error=0.1611,\tbest my_lgbm's error=0.1611\n", - "[flaml.automl: 08-12 21:40:18] {1461} INFO - selected model: LGBMRegressor(colsample_bytree=0.7808828637589872,\n", - " learning_rate=0.1353935721742319, max_bin=256,\n", - " min_child_samples=112, n_estimators=1069, num_leaves=135,\n", - " objective=,\n", - " reg_alpha=0.003923158789997704, reg_lambda=4.093134402981548,\n", - " verbose=-1)\n", - "[flaml.automl: 08-12 21:40:18] {1184} INFO - fit succeeded\n", - "[flaml.automl: 08-12 21:40:18] {1185} INFO - Time taken to find the best model: 56.6591739654541\n" - ] - } - ], + "execution_count": 22, "source": [ "automl = AutoML()\n", "automl.add_learner(learner_name='my_lgbm', learner_class=MyLGBM)\n", @@ -909,42 +844,122 @@ " \"log_file_name\": 'houses_experiment_my_lgbm.log', # flaml log file\n", "}\n", "automl.fit(X_train=X_train, y_train=y_train, **settings)" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": { - "tags": [] - }, + ], "outputs": [ { - "name": "stdout", "output_type": "stream", + "name": "stderr", "text": [ - "Best hyperparmeter config: {'n_estimators': 1069, 'num_leaves': 135, 'min_child_samples': 112, 'learning_rate': 0.1353935721742319, 'subsample': 1.0, 'log_max_bin': 9, 'colsample_bytree': 0.7808828637589872, 'reg_alpha': 0.003923158789997704, 'reg_lambda': 4.093134402981548}\n", - "Best r2 on validation data: 0.8389\n", - "Training duration of best run: 21.89 s\n", - "Predicted labels [144004.59232945 244728.76267583 142055.18306456 ... 188807.72145446\n", - " 235921.97531295 287399.52640133]\n", - "True labels 14740 136900.0\n", - "10101 241300.0\n", - "20566 200700.0\n", - "2670 72500.0\n", - "15709 460000.0\n", - " ... \n", - "13132 121200.0\n", - "8228 137500.0\n", - "3948 160900.0\n", - "8522 227300.0\n", - "16798 265600.0\n", - "Name: median_house_value, Length: 5160, dtype: float64\n", - "r2 = 0.8448624478693473\n", - "mse = 2050686747.6576138\n", - "mae = 30682.547208847514\n" + "[flaml.automl: 08-22 21:19:04] {1130} INFO - Evaluation method: cv\n", + "[flaml.automl: 08-22 21:19:04] {634} INFO - Using RepeatedKFold\n", + "[flaml.automl: 08-22 21:19:04] {1155} INFO - Minimizing error metric: 1-r2\n", + "[flaml.automl: 08-22 21:19:04] {1175} INFO - List of ML learners in AutoML Run: ['my_lgbm']\n", + "[flaml.automl: 08-22 21:19:04] {1358} INFO - iteration 0, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:04] {1515} INFO - at 0.2s,\tbest my_lgbm's error=2.9888,\tbest my_lgbm's error=2.9888\n", + "[flaml.automl: 08-22 21:19:04] {1358} INFO - iteration 1, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:04] {1515} INFO - at 0.3s,\tbest my_lgbm's error=2.9888,\tbest my_lgbm's error=2.9888\n", + "[flaml.automl: 08-22 21:19:04] {1358} INFO - iteration 2, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:04] {1515} INFO - at 0.5s,\tbest my_lgbm's error=1.7087,\tbest my_lgbm's error=1.7087\n", + "[flaml.automl: 08-22 21:19:04] {1358} INFO - iteration 3, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:05] {1515} INFO - at 0.6s,\tbest my_lgbm's error=0.3465,\tbest my_lgbm's error=0.3465\n", + "[flaml.automl: 08-22 21:19:05] {1358} INFO - iteration 4, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:05] {1515} INFO - at 0.8s,\tbest my_lgbm's error=0.3465,\tbest my_lgbm's error=0.3465\n", + "[flaml.automl: 08-22 21:19:05] {1358} INFO - iteration 5, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:05] {1515} INFO - at 1.0s,\tbest my_lgbm's error=0.3005,\tbest my_lgbm's error=0.3005\n", + "[flaml.automl: 08-22 21:19:05] {1358} INFO - iteration 6, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:05] {1515} INFO - at 1.2s,\tbest my_lgbm's error=0.3005,\tbest my_lgbm's error=0.3005\n", + "[flaml.automl: 08-22 21:19:05] {1358} INFO - iteration 7, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:05] {1515} INFO - at 1.3s,\tbest my_lgbm's error=0.3005,\tbest my_lgbm's error=0.3005\n", + "[flaml.automl: 08-22 21:19:05] {1358} INFO - iteration 8, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:06] {1515} INFO - at 1.6s,\tbest my_lgbm's error=0.2709,\tbest my_lgbm's error=0.2709\n", + "[flaml.automl: 08-22 21:19:06] {1358} INFO - iteration 9, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:06] {1515} INFO - at 1.8s,\tbest my_lgbm's error=0.2709,\tbest my_lgbm's error=0.2709\n", + "[flaml.automl: 08-22 21:19:06] {1358} INFO - iteration 10, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:07] {1515} INFO - at 2.8s,\tbest my_lgbm's error=0.1852,\tbest my_lgbm's error=0.1852\n", + "[flaml.automl: 08-22 21:19:07] {1358} INFO - iteration 11, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:08] {1515} INFO - at 3.9s,\tbest my_lgbm's error=0.1852,\tbest my_lgbm's error=0.1852\n", + "[flaml.automl: 08-22 21:19:08] {1358} INFO - iteration 12, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:09] {1515} INFO - at 4.7s,\tbest my_lgbm's error=0.1852,\tbest my_lgbm's error=0.1852\n", + "[flaml.automl: 08-22 21:19:09] {1358} INFO - iteration 13, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:10] {1515} INFO - at 5.7s,\tbest my_lgbm's error=0.1852,\tbest my_lgbm's error=0.1852\n", + "[flaml.automl: 08-22 21:19:10] {1358} INFO - iteration 14, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:11] {1515} INFO - at 6.8s,\tbest my_lgbm's error=0.1852,\tbest my_lgbm's error=0.1852\n", + "[flaml.automl: 08-22 21:19:11] {1358} INFO - iteration 15, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:12] {1515} INFO - at 8.1s,\tbest my_lgbm's error=0.1804,\tbest my_lgbm's error=0.1804\n", + "[flaml.automl: 08-22 21:19:12] {1358} INFO - iteration 16, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:13] {1515} INFO - at 9.0s,\tbest my_lgbm's error=0.1804,\tbest my_lgbm's error=0.1804\n", + "[flaml.automl: 08-22 21:19:13] {1358} INFO - iteration 17, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:17] {1515} INFO - at 12.9s,\tbest my_lgbm's error=0.1777,\tbest my_lgbm's error=0.1777\n", + "[flaml.automl: 08-22 21:19:17] {1358} INFO - iteration 18, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:18] {1515} INFO - at 14.3s,\tbest my_lgbm's error=0.1777,\tbest my_lgbm's error=0.1777\n", + "[flaml.automl: 08-22 21:19:18] {1358} INFO - iteration 19, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:20] {1515} INFO - at 15.7s,\tbest my_lgbm's error=0.1777,\tbest my_lgbm's error=0.1777\n", + "[flaml.automl: 08-22 21:19:20] {1358} INFO - iteration 20, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:28] {1515} INFO - at 24.1s,\tbest my_lgbm's error=0.1777,\tbest my_lgbm's error=0.1777\n", + "[flaml.automl: 08-22 21:19:28] {1358} INFO - iteration 21, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:32] {1515} INFO - at 27.9s,\tbest my_lgbm's error=0.1777,\tbest my_lgbm's error=0.1777\n", + "[flaml.automl: 08-22 21:19:32] {1358} INFO - iteration 22, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:34] {1515} INFO - at 30.4s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n", + "[flaml.automl: 08-22 21:19:34] {1358} INFO - iteration 23, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:36] {1515} INFO - at 32.4s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n", + "[flaml.automl: 08-22 21:19:36] {1358} INFO - iteration 24, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:41] {1515} INFO - at 36.9s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n", + "[flaml.automl: 08-22 21:19:41] {1358} INFO - iteration 25, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:51] {1515} INFO - at 47.4s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n", + "[flaml.automl: 08-22 21:19:51] {1358} INFO - iteration 26, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:52] {1515} INFO - at 48.3s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n", + "[flaml.automl: 08-22 21:19:52] {1358} INFO - iteration 27, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:19:59] {1515} INFO - at 55.1s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n", + "[flaml.automl: 08-22 21:19:59] {1358} INFO - iteration 28, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:20:00] {1515} INFO - at 56.3s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n", + "[flaml.automl: 08-22 21:20:00] {1358} INFO - iteration 29, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:20:01] {1515} INFO - at 57.0s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n", + "[flaml.automl: 08-22 21:20:01] {1358} INFO - iteration 30, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:20:16] {1515} INFO - at 72.2s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n", + "[flaml.automl: 08-22 21:20:16] {1358} INFO - iteration 31, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:20:18] {1515} INFO - at 74.1s,\tbest my_lgbm's error=0.1715,\tbest my_lgbm's error=0.1715\n", + "[flaml.automl: 08-22 21:20:18] {1358} INFO - iteration 32, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:20:22] {1515} INFO - at 78.3s,\tbest my_lgbm's error=0.1669,\tbest my_lgbm's error=0.1669\n", + "[flaml.automl: 08-22 21:20:22] {1358} INFO - iteration 33, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:20:26] {1515} INFO - at 82.0s,\tbest my_lgbm's error=0.1669,\tbest my_lgbm's error=0.1669\n", + "[flaml.automl: 08-22 21:20:26] {1358} INFO - iteration 34, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:20:29] {1515} INFO - at 84.6s,\tbest my_lgbm's error=0.1669,\tbest my_lgbm's error=0.1669\n", + "[flaml.automl: 08-22 21:20:29] {1358} INFO - iteration 35, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:20:46] {1515} INFO - at 101.8s,\tbest my_lgbm's error=0.1669,\tbest my_lgbm's error=0.1669\n", + "[flaml.automl: 08-22 21:20:46] {1358} INFO - iteration 36, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:20:47] {1515} INFO - at 102.9s,\tbest my_lgbm's error=0.1669,\tbest my_lgbm's error=0.1669\n", + "[flaml.automl: 08-22 21:20:47] {1358} INFO - iteration 37, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:20:55] {1515} INFO - at 111.2s,\tbest my_lgbm's error=0.1597,\tbest my_lgbm's error=0.1597\n", + "[flaml.automl: 08-22 21:20:55] {1358} INFO - iteration 38, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:20:58] {1515} INFO - at 114.5s,\tbest my_lgbm's error=0.1597,\tbest my_lgbm's error=0.1597\n", + "[flaml.automl: 08-22 21:20:58] {1358} INFO - iteration 39, current learner my_lgbm\n", + "[flaml.automl: 08-22 21:21:37] {1515} INFO - at 153.5s,\tbest my_lgbm's error=0.1597,\tbest my_lgbm's error=0.1597\n", + "[flaml.automl: 08-22 21:21:37] {1592} INFO - selected model: LGBMRegressor(colsample_bytree=0.8251774147208681,\n", + " learning_rate=0.21049408131691624, max_bin=512,\n", + " min_child_samples=19, n_estimators=196, num_leaves=195,\n", + " objective=,\n", + " reg_alpha=0.0009765625, reg_lambda=0.0117923889609937,\n", + " verbose=-1)\n", + "[flaml.automl: 08-22 21:21:39] {1633} INFO - retrain my_lgbm for 1.6s\n", + "[flaml.automl: 08-22 21:21:39] {1636} INFO - retrained model: LGBMRegressor(colsample_bytree=0.8251774147208681,\n", + " learning_rate=0.21049408131691624, max_bin=512,\n", + " min_child_samples=19, n_estimators=196, num_leaves=195,\n", + " objective=,\n", + " reg_alpha=0.0009765625, reg_lambda=0.0117923889609937,\n", + " verbose=-1)\n", + "[flaml.automl: 08-22 21:21:39] {1199} INFO - fit succeeded\n", + "[flaml.automl: 08-22 21:21:39] {1200} INFO - Time taken to find the best model: 111.22549629211426\n", + "[flaml.automl: 08-22 21:21:39] {1205} WARNING - Time taken to find the best model is 74% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n" ] } ], + "metadata": { + "tags": [] + } + }, + { + "cell_type": "code", + "execution_count": 23, "source": [ "print('Best hyperparmeter config:', automl.best_config)\n", "print('Best r2 on validation data: {0:.4g}'.format(1-automl.best_loss))\n", @@ -958,7 +973,38 @@ "print('r2', '=', 1 - sklearn_metric_loss_score('r2', y_pred, y_test))\n", "print('mse', '=', sklearn_metric_loss_score('mse', y_pred, y_test))\n", "print('mae', '=', sklearn_metric_loss_score('mae', y_pred, y_test))" - ] + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Best hyperparmeter config: {'n_estimators': 196, 'num_leaves': 195, 'min_child_samples': 19, 'learning_rate': 0.21049408131691624, 'log_max_bin': 10, 'colsample_bytree': 0.8251774147208681, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.0117923889609937}\n", + "Best r2 on validation data: 0.8403\n", + "Training duration of best run: 8.28 s\n", + "Predicted labels [137336.50894266 249721.8950541 155077.11127769 ... 191822.32898046\n", + " 197332.92376977 286448.29599298]\n", + "True labels 14740 136900.0\n", + "10101 241300.0\n", + "20566 200700.0\n", + "2670 72500.0\n", + "15709 460000.0\n", + " ... \n", + "13132 121200.0\n", + "8228 137500.0\n", + "3948 160900.0\n", + "8522 227300.0\n", + "16798 265600.0\n", + "Name: median_house_value, Length: 5160, dtype: float64\n", + "r2 = 0.8498843855121221\n", + "mse = 1984304232.0760334\n", + "mae = 29465.919207148785\n" + ] + } + ], + "metadata": { + "tags": [] + } } ], "metadata": { @@ -966,12 +1012,20 @@ "hash": "ea9f131eb1b7663628f6445553ba215a834e2f0b4d18774746f0f47938ce4671" }, "kernelspec": { - "display_name": "Python 3.8.0 64-bit ('test': conda)", - "name": "python3" + "name": "python3", + "display_name": "Python 3.8.0 64-bit ('blend': conda)" }, "language_info": { "name": "python", - "version": "" + "version": "3.8.0", + "mimetype": "text/x-python", + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "pygments_lexer": "ipython3", + "nbconvert_exporter": "python", + "file_extension": ".py" } }, "nbformat": 4, diff --git a/notebook/flaml_xgboost.ipynb b/notebook/flaml_xgboost.ipynb index 6c2c35daf0..8f2e14181d 100644 --- a/notebook/flaml_xgboost.ipynb +++ b/notebook/flaml_xgboost.ipynb @@ -2,11 +2,6 @@ "cells": [ { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, "source": [ "Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved. \n", "\n", @@ -31,90 +26,92 @@ "```bash\n", "pip install flaml[notebook]\n", "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!pip install flaml[notebook];" - ] - }, - { - "cell_type": "markdown", + ], "metadata": { "slideshow": { "slide_type": "slide" } - }, + } + }, + { + "cell_type": "code", + "execution_count": null, + "source": [ + "!pip install flaml[notebook];" + ], + "outputs": [], + "metadata": {} + }, + { + "cell_type": "markdown", "source": [ "## 2. Regression Example\n", "### Load data and preprocess\n", "\n", "Download [houses dataset](https://www.openml.org/d/537) from OpenML. The task is to predict median price of the house in the region based on demographic composition and a state of housing market in the region." - ] + ], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } }, { "cell_type": "code", - "execution_count": 19, - "metadata": { - "slideshow": { - "slide_type": "subslide" - }, - "tags": [] - }, + "execution_count": 1, + "source": [ + "from flaml.data import load_openml_dataset\n", + "X_train, X_test, y_train, y_test = load_openml_dataset(dataset_id=537, data_dir='./')" + ], "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "load dataset from ./openml_ds537.pkl\nDataset name: houses\nX_train.shape: (15480, 8), y_train.shape: (15480,);\nX_test.shape: (5160, 8), y_test.shape: (5160,)\n" + "load dataset from ./openml_ds537.pkl\n", + "Dataset name: houses\n", + "X_train.shape: (15480, 8), y_train.shape: (15480,);\n", + "X_test.shape: (5160, 8), y_test.shape: (5160,)\n" ] } ], - "source": [ - "from flaml.data import load_openml_dataset\n", - "X_train, X_test, y_train, y_test = load_openml_dataset(dataset_id=537, data_dir='./')" - ] + "metadata": { + "slideshow": { + "slide_type": "subslide" + }, + "tags": [] + } }, { "cell_type": "markdown", + "source": [ + "### Run FLAML\n", + "In the FLAML automl run configuration, users can specify the task type, time budget, error metric, learner list, whether to subsample, resampling strategy type, and so on. All these arguments have default values which will be used if users do not provide them. " + ], "metadata": { "slideshow": { "slide_type": "slide" } - }, - "source": [ - "### Run FLAML\n", - "In the FLAML automl run configuration, users can specify the task type, time budget, error metric, learner list, whether to subsample, resampling strategy type, and so on. All these arguments have default values which will be used if users do not provide them. " - ] + } }, { "cell_type": "code", "execution_count": 2, - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "outputs": [], "source": [ "''' import AutoML class from flaml package '''\n", "from flaml import AutoML\n", "automl = AutoML()" - ] - }, - { - "cell_type": "code", - "execution_count": 3, + ], + "outputs": [], "metadata": { "slideshow": { "slide_type": "slide" } - }, - "outputs": [], + } + }, + { + "cell_type": "code", + "execution_count": 3, "source": [ "settings = {\n", " \"time_budget\": 60, # total running time in seconds\n", @@ -123,235 +120,246 @@ " \"task\": 'regression', # task type \n", " \"log_file_name\": 'houses_experiment.log', # flaml log file\n", "}" - ] + ], + "outputs": [], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } }, { "cell_type": "code", "execution_count": 4, - "metadata": { - "slideshow": { - "slide_type": "slide" - }, - "tags": [] - }, + "source": [ + "'''The main flaml automl API'''\n", + "automl.fit(X_train=X_train, y_train=y_train, **settings)" + ], "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "[flaml.automl: 07-06 10:17:07] {908} INFO - Evaluation method: cv\n", - "[flaml.automl: 07-06 10:17:07] {617} INFO - Using RepeatedKFold\n", - "[flaml.automl: 07-06 10:17:07] {929} INFO - Minimizing error metric: 1-r2\n", - "[flaml.automl: 07-06 10:17:07] {948} INFO - List of ML learners in AutoML Run: ['xgboost']\n", - "[flaml.automl: 07-06 10:17:07] {1012} INFO - iteration 0, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:07] {1160} INFO - at 0.1s,\tbest xgboost's error=2.1267,\tbest xgboost's error=2.1267\n", - "[flaml.automl: 07-06 10:17:07] {1012} INFO - iteration 1, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:08] {1160} INFO - at 0.3s,\tbest xgboost's error=2.1267,\tbest xgboost's error=2.1267\n", - "[flaml.automl: 07-06 10:17:08] {1012} INFO - iteration 2, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:08] {1160} INFO - at 0.4s,\tbest xgboost's error=0.8485,\tbest xgboost's error=0.8485\n", - "[flaml.automl: 07-06 10:17:08] {1012} INFO - iteration 3, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:08] {1160} INFO - at 0.5s,\tbest xgboost's error=0.4320,\tbest xgboost's error=0.4320\n", - "[flaml.automl: 07-06 10:17:08] {1012} INFO - iteration 4, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:08] {1160} INFO - at 0.6s,\tbest xgboost's error=0.4320,\tbest xgboost's error=0.4320\n", - "[flaml.automl: 07-06 10:17:08] {1012} INFO - iteration 5, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:08] {1160} INFO - at 0.8s,\tbest xgboost's error=0.4320,\tbest xgboost's error=0.4320\n", - "[flaml.automl: 07-06 10:17:08] {1012} INFO - iteration 6, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:08] {1160} INFO - at 0.9s,\tbest xgboost's error=0.2992,\tbest xgboost's error=0.2992\n", - "[flaml.automl: 07-06 10:17:08] {1012} INFO - iteration 7, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:09] {1160} INFO - at 1.2s,\tbest xgboost's error=0.2801,\tbest xgboost's error=0.2801\n", - "[flaml.automl: 07-06 10:17:09] {1012} INFO - iteration 8, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:09] {1160} INFO - at 1.4s,\tbest xgboost's error=0.2801,\tbest xgboost's error=0.2801\n", - "[flaml.automl: 07-06 10:17:09] {1012} INFO - iteration 9, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:09] {1160} INFO - at 1.6s,\tbest xgboost's error=0.2336,\tbest xgboost's error=0.2336\n", - "[flaml.automl: 07-06 10:17:09] {1012} INFO - iteration 10, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:09] {1160} INFO - at 1.8s,\tbest xgboost's error=0.2336,\tbest xgboost's error=0.2336\n", - "[flaml.automl: 07-06 10:17:09] {1012} INFO - iteration 11, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:09] {1160} INFO - at 2.0s,\tbest xgboost's error=0.2336,\tbest xgboost's error=0.2336\n", - "[flaml.automl: 07-06 10:17:09] {1012} INFO - iteration 12, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:10] {1160} INFO - at 2.3s,\tbest xgboost's error=0.2071,\tbest xgboost's error=0.2071\n", - "[flaml.automl: 07-06 10:17:10] {1012} INFO - iteration 13, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:10] {1160} INFO - at 2.5s,\tbest xgboost's error=0.2071,\tbest xgboost's error=0.2071\n", - "[flaml.automl: 07-06 10:17:10] {1012} INFO - iteration 14, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:10] {1160} INFO - at 3.0s,\tbest xgboost's error=0.1999,\tbest xgboost's error=0.1999\n", - "[flaml.automl: 07-06 10:17:10] {1012} INFO - iteration 15, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:11] {1160} INFO - at 3.5s,\tbest xgboost's error=0.1999,\tbest xgboost's error=0.1999\n", - "[flaml.automl: 07-06 10:17:11] {1012} INFO - iteration 16, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:12] {1160} INFO - at 4.4s,\tbest xgboost's error=0.1941,\tbest xgboost's error=0.1941\n", - "[flaml.automl: 07-06 10:17:12] {1012} INFO - iteration 17, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:12] {1160} INFO - at 4.8s,\tbest xgboost's error=0.1941,\tbest xgboost's error=0.1941\n", - "[flaml.automl: 07-06 10:17:12] {1012} INFO - iteration 18, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:16] {1160} INFO - at 9.0s,\tbest xgboost's error=0.1862,\tbest xgboost's error=0.1862\n", - "[flaml.automl: 07-06 10:17:16] {1012} INFO - iteration 19, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:18] {1160} INFO - at 10.2s,\tbest xgboost's error=0.1862,\tbest xgboost's error=0.1862\n", - "[flaml.automl: 07-06 10:17:18] {1012} INFO - iteration 20, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:35] {1160} INFO - at 27.4s,\tbest xgboost's error=0.1862,\tbest xgboost's error=0.1862\n", - "[flaml.automl: 07-06 10:17:35] {1012} INFO - iteration 21, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:37] {1160} INFO - at 30.1s,\tbest xgboost's error=0.1862,\tbest xgboost's error=0.1862\n", - "[flaml.automl: 07-06 10:17:37] {1012} INFO - iteration 22, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:44] {1160} INFO - at 36.4s,\tbest xgboost's error=0.1843,\tbest xgboost's error=0.1843\n", - "[flaml.automl: 07-06 10:17:44] {1012} INFO - iteration 23, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:47] {1160} INFO - at 39.6s,\tbest xgboost's error=0.1843,\tbest xgboost's error=0.1843\n", - "[flaml.automl: 07-06 10:17:47] {1012} INFO - iteration 24, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:57] {1160} INFO - at 49.6s,\tbest xgboost's error=0.1843,\tbest xgboost's error=0.1843\n", - "[flaml.automl: 07-06 10:17:57] {1012} INFO - iteration 25, current learner xgboost\n", - "[flaml.automl: 07-06 10:17:58] {1160} INFO - at 50.7s,\tbest xgboost's error=0.1843,\tbest xgboost's error=0.1843\n", - "[flaml.automl: 07-06 10:17:58] {1012} INFO - iteration 26, current learner xgboost\n", - "[flaml.automl: 07-06 10:18:10] {1160} INFO - at 63.0s,\tbest xgboost's error=0.1755,\tbest xgboost's error=0.1755\n", - "[flaml.automl: 07-06 10:18:10] {1206} INFO - selected model: \n", - "[flaml.automl: 07-06 10:18:10] {963} INFO - fit succeeded\n" + "[flaml.automl: 08-22 21:23:40] {1130} INFO - Evaluation method: cv\n", + "[flaml.automl: 08-22 21:23:40] {634} INFO - Using RepeatedKFold\n", + "[flaml.automl: 08-22 21:23:40] {1155} INFO - Minimizing error metric: 1-r2\n", + "[flaml.automl: 08-22 21:23:40] {1175} INFO - List of ML learners in AutoML Run: ['xgboost']\n", + "[flaml.automl: 08-22 21:23:40] {1358} INFO - iteration 0, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:41] {1515} INFO - at 0.4s,\tbest xgboost's error=2.1267,\tbest xgboost's error=2.1267\n", + "[flaml.automl: 08-22 21:23:41] {1358} INFO - iteration 1, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:41] {1515} INFO - at 0.6s,\tbest xgboost's error=2.1267,\tbest xgboost's error=2.1267\n", + "[flaml.automl: 08-22 21:23:41] {1358} INFO - iteration 2, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:41] {1515} INFO - at 0.7s,\tbest xgboost's error=0.8485,\tbest xgboost's error=0.8485\n", + "[flaml.automl: 08-22 21:23:41] {1358} INFO - iteration 3, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:41] {1515} INFO - at 0.9s,\tbest xgboost's error=0.3799,\tbest xgboost's error=0.3799\n", + "[flaml.automl: 08-22 21:23:41] {1358} INFO - iteration 4, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:41] {1515} INFO - at 1.0s,\tbest xgboost's error=0.3799,\tbest xgboost's error=0.3799\n", + "[flaml.automl: 08-22 21:23:41] {1358} INFO - iteration 5, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:41] {1515} INFO - at 1.2s,\tbest xgboost's error=0.3799,\tbest xgboost's error=0.3799\n", + "[flaml.automl: 08-22 21:23:41] {1358} INFO - iteration 6, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:41] {1515} INFO - at 1.4s,\tbest xgboost's error=0.2992,\tbest xgboost's error=0.2992\n", + "[flaml.automl: 08-22 21:23:41] {1358} INFO - iteration 7, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:42] {1515} INFO - at 1.6s,\tbest xgboost's error=0.2992,\tbest xgboost's error=0.2992\n", + "[flaml.automl: 08-22 21:23:42] {1358} INFO - iteration 8, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:42] {1515} INFO - at 1.8s,\tbest xgboost's error=0.2992,\tbest xgboost's error=0.2992\n", + "[flaml.automl: 08-22 21:23:42] {1358} INFO - iteration 9, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:42] {1515} INFO - at 2.0s,\tbest xgboost's error=0.2513,\tbest xgboost's error=0.2513\n", + "[flaml.automl: 08-22 21:23:42] {1358} INFO - iteration 10, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:42] {1515} INFO - at 2.2s,\tbest xgboost's error=0.2513,\tbest xgboost's error=0.2513\n", + "[flaml.automl: 08-22 21:23:42] {1358} INFO - iteration 11, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:43] {1515} INFO - at 2.4s,\tbest xgboost's error=0.2513,\tbest xgboost's error=0.2513\n", + "[flaml.automl: 08-22 21:23:43] {1358} INFO - iteration 12, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:43] {1515} INFO - at 2.6s,\tbest xgboost's error=0.2113,\tbest xgboost's error=0.2113\n", + "[flaml.automl: 08-22 21:23:43] {1358} INFO - iteration 13, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:43] {1515} INFO - at 2.8s,\tbest xgboost's error=0.2113,\tbest xgboost's error=0.2113\n", + "[flaml.automl: 08-22 21:23:43] {1358} INFO - iteration 14, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:43] {1515} INFO - at 3.2s,\tbest xgboost's error=0.2090,\tbest xgboost's error=0.2090\n", + "[flaml.automl: 08-22 21:23:43] {1358} INFO - iteration 15, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:44] {1515} INFO - at 3.6s,\tbest xgboost's error=0.2090,\tbest xgboost's error=0.2090\n", + "[flaml.automl: 08-22 21:23:44] {1358} INFO - iteration 16, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:44] {1515} INFO - at 4.1s,\tbest xgboost's error=0.1919,\tbest xgboost's error=0.1919\n", + "[flaml.automl: 08-22 21:23:44] {1358} INFO - iteration 17, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:45] {1515} INFO - at 4.4s,\tbest xgboost's error=0.1919,\tbest xgboost's error=0.1919\n", + "[flaml.automl: 08-22 21:23:45] {1358} INFO - iteration 18, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:47] {1515} INFO - at 6.9s,\tbest xgboost's error=0.1797,\tbest xgboost's error=0.1797\n", + "[flaml.automl: 08-22 21:23:47] {1358} INFO - iteration 19, current learner xgboost\n", + "[flaml.automl: 08-22 21:23:48] {1515} INFO - at 7.9s,\tbest xgboost's error=0.1797,\tbest xgboost's error=0.1797\n", + "[flaml.automl: 08-22 21:23:48] {1358} INFO - iteration 20, current learner xgboost\n", + "[flaml.automl: 08-22 21:24:00] {1515} INFO - at 20.2s,\tbest xgboost's error=0.1797,\tbest xgboost's error=0.1797\n", + "[flaml.automl: 08-22 21:24:00] {1358} INFO - iteration 21, current learner xgboost\n", + "[flaml.automl: 08-22 21:24:02] {1515} INFO - at 21.9s,\tbest xgboost's error=0.1797,\tbest xgboost's error=0.1797\n", + "[flaml.automl: 08-22 21:24:02] {1358} INFO - iteration 22, current learner xgboost\n", + "[flaml.automl: 08-22 21:24:06] {1515} INFO - at 25.8s,\tbest xgboost's error=0.1782,\tbest xgboost's error=0.1782\n", + "[flaml.automl: 08-22 21:24:06] {1358} INFO - iteration 23, current learner xgboost\n", + "[flaml.automl: 08-22 21:24:08] {1515} INFO - at 28.3s,\tbest xgboost's error=0.1782,\tbest xgboost's error=0.1782\n", + "[flaml.automl: 08-22 21:24:08] {1358} INFO - iteration 24, current learner xgboost\n", + "[flaml.automl: 08-22 21:24:15] {1515} INFO - at 34.7s,\tbest xgboost's error=0.1782,\tbest xgboost's error=0.1782\n", + "[flaml.automl: 08-22 21:24:15] {1358} INFO - iteration 25, current learner xgboost\n", + "[flaml.automl: 08-22 21:24:16] {1515} INFO - at 35.5s,\tbest xgboost's error=0.1782,\tbest xgboost's error=0.1782\n", + "[flaml.automl: 08-22 21:24:16] {1358} INFO - iteration 26, current learner xgboost\n", + "[flaml.automl: 08-22 21:24:39] {1515} INFO - at 58.5s,\tbest xgboost's error=0.1660,\tbest xgboost's error=0.1660\n", + "[flaml.automl: 08-22 21:24:39] {1592} INFO - selected model: \n", + "[flaml.automl: 08-22 21:24:43] {1633} INFO - retrain xgboost for 4.4s\n", + "[flaml.automl: 08-22 21:24:43] {1636} INFO - retrained model: \n", + "[flaml.automl: 08-22 21:24:43] {1199} INFO - fit succeeded\n", + "[flaml.automl: 08-22 21:24:43] {1200} INFO - Time taken to find the best model: 58.49340343475342\n", + "[flaml.automl: 08-22 21:24:43] {1205} WARNING - Time taken to find the best model is 97% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n" ] } ], - "source": [ - "'''The main flaml automl API'''\n", - "automl.fit(X_train=X_train, y_train=y_train, **settings)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "### Best model and metric" - ] - }, - { - "cell_type": "code", - "execution_count": 5, "metadata": { "slideshow": { "slide_type": "slide" }, "tags": [] - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Best hyperparmeter config: {'n_estimators': 1430.0, 'max_leaves': 160.0, 'min_child_weight': 66.34713625314276, 'learning_rate': 0.03478685333241491, 'subsample': 0.9152991332236934, 'colsample_bylevel': 0.5185440993287367, 'colsample_bytree': 0.5812293817391292, 'reg_alpha': 0.007546483534701107, 'reg_lambda': 6.663009697009309}\nBest r2 on validation data: 0.8245\nTraining duration of best run: 12.28 s\n" - ] - } + } + }, + { + "cell_type": "markdown", + "source": [ + "### Best model and metric" ], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } + }, + { + "cell_type": "code", + "execution_count": 5, "source": [ "''' retrieve best config'''\n", "print('Best hyperparmeter config:', automl.best_config)\n", "print('Best r2 on validation data: {0:.4g}'.format(1 - automl.best_loss))\n", "print('Training duration of best run: {0:.4g} s'.format(automl.best_config_train_time))" - ] - }, - { - "cell_type": "code", - "execution_count": 18, + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Best hyperparmeter config: {'n_estimators': 776, 'max_leaves': 160, 'min_child_weight': 32.57408640781376, 'learning_rate': 0.03478685333241491, 'subsample': 0.9152991332236934, 'colsample_bylevel': 0.5656764254642628, 'colsample_bytree': 0.7313266091895249, 'reg_alpha': 0.005771390107656191, 'reg_lambda': 1.4912667278658753}\n", + "Best r2 on validation data: 0.834\n", + "Training duration of best run: 23 s\n" + ] + } + ], "metadata": { "slideshow": { "slide_type": "slide" - } - }, + }, + "tags": [] + } + }, + { + "cell_type": "code", + "execution_count": 6, + "source": [ + "automl.model.estimator" + ], "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ - "" + "" ] }, "metadata": {}, - "execution_count": 18 + "execution_count": 6 } ], - "source": [ - "automl.model.estimator" - ] - }, - { - "cell_type": "code", - "execution_count": 7, "metadata": { "slideshow": { "slide_type": "slide" } - }, - "outputs": [], + } + }, + { + "cell_type": "code", + "execution_count": 7, "source": [ "''' pickle and save the automl object '''\n", "import pickle\n", "with open('automl.pkl', 'wb') as f:\n", " pickle.dump(automl, f, pickle.HIGHEST_PROTOCOL)" - ] + ], + "outputs": [], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } }, { "cell_type": "code", "execution_count": 8, - "metadata": { - "slideshow": { - "slide_type": "slide" - }, - "tags": [] - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Predicted labels [145392.75 233295.77 143450.25 ... 212370.64 206201.98 259638.22]\nTrue labels [136900. 241300. 200700. ... 160900. 227300. 265600.]\n" - ] - } - ], "source": [ "''' compute predictions of testing dataset ''' \n", "y_pred = automl.predict(X_test)\n", "print('Predicted labels', y_pred)\n", "print('True labels', y_test)" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": { - "slideshow": { - "slide_type": "slide" - }, - "tags": [] - }, + ], "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "r2 = 0.834848817994438\nmse = 2183051979.6623774\nmae = 31703.565776972806\n" + "Predicted labels [137582.95 255519.23 139866.06 ... 185638.95 202493.78 269308.22]\n", + "True labels 14740 136900.0\n", + "10101 241300.0\n", + "20566 200700.0\n", + "2670 72500.0\n", + "15709 460000.0\n", + " ... \n", + "13132 121200.0\n", + "8228 137500.0\n", + "3948 160900.0\n", + "8522 227300.0\n", + "16798 265600.0\n", + "Name: median_house_value, Length: 5160, dtype: float64\n" ] } ], + "metadata": { + "slideshow": { + "slide_type": "slide" + }, + "tags": [] + } + }, + { + "cell_type": "code", + "execution_count": 9, "source": [ "''' compute different metric values on testing dataset'''\n", "from flaml.ml import sklearn_metric_loss_score\n", "print('r2', '=', 1 - sklearn_metric_loss_score('r2', y_pred, y_test))\n", "print('mse', '=', sklearn_metric_loss_score('mse', y_pred, y_test))\n", "print('mae', '=', sklearn_metric_loss_score('mae', y_pred, y_test))" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": { - "slideshow": { - "slide_type": "subslide" - }, - "tags": [] - }, + ], "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'max_leaves': 4, 'min_child_weight': 1, 'learning_rate': 0.1, 'subsample': 1.0, 'colsample_bylevel': 1.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 4, 'max_leaves': 4, 'min_child_weight': 1, 'learning_rate': 0.1, 'subsample': 1.0, 'colsample_bylevel': 1.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}}\n{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4.0, 'max_leaves': 4.0, 'min_child_weight': 0.2620811530815948, 'learning_rate': 0.25912534572860507, 'subsample': 0.9266743941610592, 'colsample_bylevel': 1.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013933617380144255, 'reg_lambda': 0.18096917948292954}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 4.0, 'max_leaves': 4.0, 'min_child_weight': 0.2620811530815948, 'learning_rate': 0.25912534572860507, 'subsample': 0.9266743941610592, 'colsample_bylevel': 1.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013933617380144255, 'reg_lambda': 0.18096917948292954}}\n{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4.0, 'max_leaves': 4.0, 'min_child_weight': 1.8630223791107017, 'learning_rate': 1.0, 'subsample': 0.8513627344387318, 'colsample_bylevel': 1.0, 'colsample_bytree': 0.946138073111236, 'reg_alpha': 0.0018311776973217071, 'reg_lambda': 0.27901659190538414}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 4.0, 'max_leaves': 4.0, 'min_child_weight': 1.8630223791107017, 'learning_rate': 1.0, 'subsample': 0.8513627344387318, 'colsample_bylevel': 1.0, 'colsample_bytree': 0.946138073111236, 'reg_alpha': 0.0018311776973217071, 'reg_lambda': 0.27901659190538414}}\n{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 11.0, 'max_leaves': 4.0, 'min_child_weight': 5.909231502320304, 'learning_rate': 1.0, 'subsample': 0.8894434216129232, 'colsample_bylevel': 1.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013605736901132325, 'reg_lambda': 0.1222158118565165}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 11.0, 'max_leaves': 4.0, 'min_child_weight': 5.909231502320304, 'learning_rate': 1.0, 'subsample': 0.8894434216129232, 'colsample_bylevel': 1.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013605736901132325, 'reg_lambda': 0.1222158118565165}}\n{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 20.0, 'max_leaves': 4.0, 'min_child_weight': 12.035965728320873, 'learning_rate': 1.0, 'subsample': 0.9814787163243813, 'colsample_bylevel': 0.8811171114303163, 'colsample_bytree': 0.8499027725496043, 'reg_alpha': 0.0022085340760961856, 'reg_lambda': 0.5460627024738893}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 20.0, 'max_leaves': 4.0, 'min_child_weight': 12.035965728320873, 'learning_rate': 1.0, 'subsample': 0.9814787163243813, 'colsample_bylevel': 0.8811171114303163, 'colsample_bytree': 0.8499027725496043, 'reg_alpha': 0.0022085340760961856, 'reg_lambda': 0.5460627024738893}}\n{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 20.0, 'max_leaves': 11.0, 'min_child_weight': 17.34876952205098, 'learning_rate': 1.0, 'subsample': 1.0, 'colsample_bylevel': 0.9088550158793876, 'colsample_bytree': 0.7967145599266738, 'reg_alpha': 0.05680749758595097, 'reg_lambda': 2.756357095973371}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 20.0, 'max_leaves': 11.0, 'min_child_weight': 17.34876952205098, 'learning_rate': 1.0, 'subsample': 1.0, 'colsample_bylevel': 0.9088550158793876, 'colsample_bytree': 0.7967145599266738, 'reg_alpha': 0.05680749758595097, 'reg_lambda': 2.756357095973371}}\n{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 37.0, 'max_leaves': 15.0, 'min_child_weight': 88.85408165928673, 'learning_rate': 0.6413547778096401, 'subsample': 1.0, 'colsample_bylevel': 0.9528676738644739, 'colsample_bytree': 0.6980216487058154, 'reg_alpha': 0.020158745350617662, 'reg_lambda': 0.954042157679914}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 37.0, 'max_leaves': 15.0, 'min_child_weight': 88.85408165928673, 'learning_rate': 0.6413547778096401, 'subsample': 1.0, 'colsample_bylevel': 0.9528676738644739, 'colsample_bytree': 0.6980216487058154, 'reg_alpha': 0.020158745350617662, 'reg_lambda': 0.954042157679914}}\n{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 107.0, 'max_leaves': 8.0, 'min_child_weight': 105.60589895284807, 'learning_rate': 0.23511987355535005, 'subsample': 1.0, 'colsample_bylevel': 0.7711414100428341, 'colsample_bytree': 0.6531014185931541, 'reg_alpha': 0.006493597884251339, 'reg_lambda': 1.7292368007993169}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 107.0, 'max_leaves': 8.0, 'min_child_weight': 105.60589895284807, 'learning_rate': 0.23511987355535005, 'subsample': 1.0, 'colsample_bylevel': 0.7711414100428341, 'colsample_bytree': 0.6531014185931541, 'reg_alpha': 0.006493597884251339, 'reg_lambda': 1.7292368007993169}}\n{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 186.0, 'max_leaves': 14.0, 'min_child_weight': 15.162111689318957, 'learning_rate': 0.39220715578198356, 'subsample': 1.0, 'colsample_bylevel': 0.5803009217141496, 'colsample_bytree': 0.5689279468453852, 'reg_alpha': 0.011708252438810483, 'reg_lambda': 0.9012915451024682}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 186.0, 'max_leaves': 14.0, 'min_child_weight': 15.162111689318957, 'learning_rate': 0.39220715578198356, 'subsample': 1.0, 'colsample_bylevel': 0.5803009217141496, 'colsample_bytree': 0.5689279468453852, 'reg_alpha': 0.011708252438810483, 'reg_lambda': 0.9012915451024682}}\n{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 378.0, 'max_leaves': 30.0, 'min_child_weight': 11.101864586090514, 'learning_rate': 0.12229148765139466, 'subsample': 0.8895588746662894, 'colsample_bylevel': 0.4280572638757817, 'colsample_bytree': 0.5344640556302928, 'reg_alpha': 0.018278623959341735, 'reg_lambda': 0.3650737309296021}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 378.0, 'max_leaves': 30.0, 'min_child_weight': 11.101864586090514, 'learning_rate': 0.12229148765139466, 'subsample': 0.8895588746662894, 'colsample_bylevel': 0.4280572638757817, 'colsample_bytree': 0.5344640556302928, 'reg_alpha': 0.018278623959341735, 'reg_lambda': 0.3650737309296021}}\n{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 409.0, 'max_leaves': 62.0, 'min_child_weight': 15.287199215046616, 'learning_rate': 0.04623175582706431, 'subsample': 0.8756054034199897, 'colsample_bylevel': 0.4005513442913169, 'colsample_bytree': 0.5851335537238005, 'reg_alpha': 0.0012769216236275891, 'reg_lambda': 2.7736721098234782}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 409.0, 'max_leaves': 62.0, 'min_child_weight': 15.287199215046616, 'learning_rate': 0.04623175582706431, 'subsample': 0.8756054034199897, 'colsample_bylevel': 0.4005513442913169, 'colsample_bytree': 0.5851335537238005, 'reg_alpha': 0.0012769216236275891, 'reg_lambda': 2.7736721098234782}}\n" + "r2 = 0.8439648010832427\n", + "mse = 2062552297.5716143\n", + "mae = 30303.196008584666\n" ] } ], + "metadata": { + "slideshow": { + "slide_type": "slide" + }, + "tags": [] + } + }, + { + "cell_type": "code", + "execution_count": 10, "source": [ "from flaml.data import get_output_from_log\n", "time_history, best_valid_loss_history, valid_loss_history, config_history, train_loss_history = \\\n", @@ -359,29 +367,36 @@ "\n", "for config in config_history:\n", " print(config)" - ] + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'max_leaves': 4, 'min_child_weight': 0.9999999999999993, 'learning_rate': 0.09999999999999995, 'subsample': 1.0, 'colsample_bylevel': 1.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 4, 'max_leaves': 4, 'min_child_weight': 0.9999999999999993, 'learning_rate': 0.09999999999999995, 'subsample': 1.0, 'colsample_bylevel': 1.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0009765625, 'reg_lambda': 1.0}}\n", + "{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'max_leaves': 4, 'min_child_weight': 0.26208115308159446, 'learning_rate': 0.25912534572860507, 'subsample': 0.9266743941610592, 'colsample_bylevel': 1.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013933617380144255, 'reg_lambda': 0.18096917948292954}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 4, 'max_leaves': 4, 'min_child_weight': 0.26208115308159446, 'learning_rate': 0.25912534572860507, 'subsample': 0.9266743941610592, 'colsample_bylevel': 1.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013933617380144255, 'reg_lambda': 0.18096917948292954}}\n", + "{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 4, 'max_leaves': 4, 'min_child_weight': 1.8630223791106992, 'learning_rate': 1.0, 'subsample': 0.8513627344387318, 'colsample_bylevel': 1.0, 'colsample_bytree': 0.946138073111236, 'reg_alpha': 0.0018311776973217071, 'reg_lambda': 0.27901659190538414}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 4, 'max_leaves': 4, 'min_child_weight': 1.8630223791106992, 'learning_rate': 1.0, 'subsample': 0.8513627344387318, 'colsample_bylevel': 1.0, 'colsample_bytree': 0.946138073111236, 'reg_alpha': 0.0018311776973217071, 'reg_lambda': 0.27901659190538414}}\n", + "{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 11, 'max_leaves': 4, 'min_child_weight': 5.909231502320296, 'learning_rate': 1.0, 'subsample': 0.8894434216129232, 'colsample_bylevel': 1.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013605736901132325, 'reg_lambda': 0.1222158118565165}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 11, 'max_leaves': 4, 'min_child_weight': 5.909231502320296, 'learning_rate': 1.0, 'subsample': 0.8894434216129232, 'colsample_bylevel': 1.0, 'colsample_bytree': 1.0, 'reg_alpha': 0.0013605736901132325, 'reg_lambda': 0.1222158118565165}}\n", + "{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 11, 'max_leaves': 11, 'min_child_weight': 8.517629386811171, 'learning_rate': 1.0, 'subsample': 0.9233328006239466, 'colsample_bylevel': 1.0, 'colsample_bytree': 0.9468117873770695, 'reg_alpha': 0.034996420228767956, 'reg_lambda': 0.6169079461473819}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 11, 'max_leaves': 11, 'min_child_weight': 8.517629386811171, 'learning_rate': 1.0, 'subsample': 0.9233328006239466, 'colsample_bylevel': 1.0, 'colsample_bytree': 0.9468117873770695, 'reg_alpha': 0.034996420228767956, 'reg_lambda': 0.6169079461473819}}\n", + "{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 20, 'max_leaves': 15, 'min_child_weight': 43.62419686983011, 'learning_rate': 0.6413547778096401, 'subsample': 1.0, 'colsample_bylevel': 1.0, 'colsample_bytree': 0.8481188761562112, 'reg_alpha': 0.01241885232679939, 'reg_lambda': 0.21352682817916652}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 20, 'max_leaves': 15, 'min_child_weight': 43.62419686983011, 'learning_rate': 0.6413547778096401, 'subsample': 1.0, 'colsample_bylevel': 1.0, 'colsample_bytree': 0.8481188761562112, 'reg_alpha': 0.01241885232679939, 'reg_lambda': 0.21352682817916652}}\n", + "{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 58, 'max_leaves': 8, 'min_child_weight': 51.84874392377363, 'learning_rate': 0.23511987355535005, 'subsample': 1.0, 'colsample_bylevel': 0.8182737361783602, 'colsample_bytree': 0.8031986460435498, 'reg_alpha': 0.00400039941928546, 'reg_lambda': 0.3870252968100477}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 58, 'max_leaves': 8, 'min_child_weight': 51.84874392377363, 'learning_rate': 0.23511987355535005, 'subsample': 1.0, 'colsample_bylevel': 0.8182737361783602, 'colsample_bytree': 0.8031986460435498, 'reg_alpha': 0.00400039941928546, 'reg_lambda': 0.3870252968100477}}\n", + "{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 101, 'max_leaves': 14, 'min_child_weight': 7.444058088783045, 'learning_rate': 0.39220715578198356, 'subsample': 1.0, 'colsample_bylevel': 0.6274332478496758, 'colsample_bytree': 0.7190251742957809, 'reg_alpha': 0.007212902167942765, 'reg_lambda': 0.20172056689658158}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 101, 'max_leaves': 14, 'min_child_weight': 7.444058088783045, 'learning_rate': 0.39220715578198356, 'subsample': 1.0, 'colsample_bylevel': 0.6274332478496758, 'colsample_bytree': 0.7190251742957809, 'reg_alpha': 0.007212902167942765, 'reg_lambda': 0.20172056689658158}}\n", + "{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 205, 'max_leaves': 30, 'min_child_weight': 5.450621032615104, 'learning_rate': 0.12229148765139466, 'subsample': 0.8895588746662894, 'colsample_bylevel': 0.47518959001130784, 'colsample_bytree': 0.6845612830806885, 'reg_alpha': 0.01126059820390593, 'reg_lambda': 0.08170816686602438}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 205, 'max_leaves': 30, 'min_child_weight': 5.450621032615104, 'learning_rate': 0.12229148765139466, 'subsample': 0.8895588746662894, 'colsample_bylevel': 0.47518959001130784, 'colsample_bytree': 0.6845612830806885, 'reg_alpha': 0.01126059820390593, 'reg_lambda': 0.08170816686602438}}\n", + "{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 222, 'max_leaves': 62, 'min_child_weight': 7.5054716192185795, 'learning_rate': 0.04623175582706431, 'subsample': 0.8756054034199897, 'colsample_bylevel': 0.44768367042684304, 'colsample_bytree': 0.7352307811741962, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.6207832675443758}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 222, 'max_leaves': 62, 'min_child_weight': 7.5054716192185795, 'learning_rate': 0.04623175582706431, 'subsample': 0.8756054034199897, 'colsample_bylevel': 0.44768367042684304, 'colsample_bytree': 0.7352307811741962, 'reg_alpha': 0.0009765625, 'reg_lambda': 0.6207832675443758}}\n", + "{'Current Learner': 'xgboost', 'Current Sample': 15480, 'Current Hyper-parameters': {'n_estimators': 776, 'max_leaves': 160, 'min_child_weight': 32.57408640781376, 'learning_rate': 0.03478685333241491, 'subsample': 0.9152991332236934, 'colsample_bylevel': 0.5656764254642628, 'colsample_bytree': 0.7313266091895249, 'reg_alpha': 0.005771390107656191, 'reg_lambda': 1.4912667278658753}, 'Best Learner': 'xgboost', 'Best Hyper-parameters': {'n_estimators': 776, 'max_leaves': 160, 'min_child_weight': 32.57408640781376, 'learning_rate': 0.03478685333241491, 'subsample': 0.9152991332236934, 'colsample_bylevel': 0.5656764254642628, 'colsample_bytree': 0.7313266091895249, 'reg_alpha': 0.005771390107656191, 'reg_lambda': 1.4912667278658753}}\n" + ] + } + ], + "metadata": { + "slideshow": { + "slide_type": "subslide" + }, + "tags": [] + } }, { "cell_type": "code", "execution_count": 11, - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "outputs": [ - { - "output_type": "display_data", - "data": { - "text/plain": "
", - "image/svg+xml": "\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n", - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZAAAAEWCAYAAABIVsEJAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nO3dfbxVVb3v8c/XLShWiqgZIggeidQsyJ2+7FFNQ7slZGbaPR00jexknZM3ErLUY8cbZeWp17UHMlNL84EUqTBSUSsf0q2giIYimrJFRRHzgUTgd/+YY9FkudZiMfdeey1Y3/frtV97zjHHnPO3J+z122OMOcdURGBmZraxtmh2AGZmtmlyAjEzs0KcQMzMrBAnEDMzK8QJxMzMCnECMTOzQpxAzBpA0nslLWx2HGaN5ARimx1Jj0o6pJkxRMSfImJUo44vaaykP0p6QdIySTdLOqJR5zOrxAnErABJHU0891HAlcDFwK7AzsDpwEcKHEuS/Dlghfg/jrUNSVtImizpYUnPSrpC0qDc9islPSnp+fTX/d65bRdK+pGkWZJeAg5KLZ0vS7o37XO5pK1T/QMlLcntX7Vu2v4VSUslPSHpREkhaY8KP4OA7wHfiIjzI+L5iFgbETdHxGdSnTMl/TK3z/B0vC3T+k2SzpZ0C/AyMElSV9l5viRpZlreStJ3JD0m6SlJP5Y0oIf/HLYZcAKxdvIFYDzwfmAX4DngvNz2a4GRwBuBu4FLyvb/JHA28Abgz6nsaOAwYATwNuC4GuevWFfSYcApwCHAHsCBNY4xChgKTK9Rpx6fAiaS/Sw/BkZJGpnb/kng0rQ8FXgzMDrFN4SsxWNtzgnE2slJwGkRsSQiXgHOBI4q/WUeERdExAu5bW+XtF1u/2si4pb0F/8/UtkPIuKJiFgO/IbsQ7aaanWPBn4eEQsi4uV07mp2SN+X1vtDV3FhOt/qiHgeuAY4FiAlkrcAM1OLZyLwpYhYHhEvAP8XOKaH57fNgBOItZPdgKslrZC0AngAWAPsLKlD0tTUvfV34NG0z465/R+vcMwnc8svA6+vcf5qdXcpO3al85Q8m74PrlGnHuXnuJSUQMhaHzNSMtsJ2Aa4K3fdfp/Krc05gVg7eRw4PCIG5r62johusg/NcWTdSNsBw9M+yu3fqKmrl5INhpcMrVF3IdnP8bEadV4i+9AveVOFOuU/y3XATpJGkyWSUvfVM8BKYO/cNdsuImolSmsTTiC2ueonaevc15Zkff1nS9oNQNJOksal+m8AXiH7C38bsm6avnIFcLykPSVtA3y9WsXI3r9wCvB1ScdL2jbdHPAeSdNStXnA+yQNS11wUzYUQES8SnZn1znAILKEQkSsBX4KnCvpjQCShkgaW/intc2GE4htrmaR/eVc+joT+D4wE/iDpBeA24H9U/2Lgb8B3cD9aVufiIhrgR8ANwKLcud+pUr96cAngE8DTwBPAf9NNo5BRFwHXA7cC9wF/LbOUC4la4FdGRGrc+WnluJK3XvXkw3mW5uTXyhl1lok7QncB2xV9kFu1lLcAjFrAZI+mp632B74FvAbJw9rdU4gZq3hs8DTwMNkd4Z9rrnhmG2Yu7DMzKwQt0DMzKyQLZsdQF/acccdY/jw4c0Ow8xsk3LXXXc9ExGveXi0rRLI8OHD6erq2nBFMzNbR9LfKpW7C8vMzApxAjEzs0KcQMzMrBAnEDMzK8QJxMzMCmmru7CssWbM7eac2Qt5YsVKdhk4gEljRzF+zJBmh2XWthr9O+kEsolp1Q/pGXO7mXLVfFa+ugaA7hUrmXLVfICWiM+s3fTF76QTSJNtTEJo5Q/pc2YvXBdXycpX1/CV6ffyqzsea1JUZu1r7mMrWLVm7XplK19dwzmzFzqBbGoqJQpgoxJCK39Id69YWbG8/D+wmfWNar97T1T5XS3CCaQPVGs5bN1vi41KCK38Id2/Y4uKcQwZOIDLP3tAEyIya2/vnjqn4mfGLgMH9No5nEB6WaWWRrWWQ3lZSbWE0Mof0uVJEmBAv451LS0z61uTxo5q+O+kE0gvqtbSqJYoqqmWEFr5Q7rU5daKA/xm7agvfiebmkAkHUb2nuoO4PyImFq2/VzgoLS6DfDGiBiYtq0B5qdtj0XEEX0TdXXVWhrVDBzQj1dWr607IbT6h/T4MUNaJhYza/zvZNMSiKQO4DzgUGAJcKekmRFxf6lORHwpV/8LwJjcIVZGxOi+ircetQanthCszb27a0C/Ds48Ym9g4xKCP6TNrFU0swWyH7AoIhYDSLoMGAfcX6X+scAZfRRbIbsMHFBx0GpIbiykUqJwQjCzTVEzE8gQ4PHc+hJg/0oVJe0GjADm5Iq3ltQFrAamRsSMKvtOBCYCDBs2rBfCfq3SwHn3ipUIyL8kuNQl5ZaDmW1uNpVB9GOA6RGRH1DYLSK6Je0OzJE0PyIeLt8xIqYB0wA6Ozt7/QXw5QPb+RMMabExCjOz3tTMBNINDM2t75rKKjkG+Hy+ICK60/fFkm4iGx95TQJppBlzu/k/V9zDmnhtXurfsQW3TD64L8MxM+tTzZyN905gpKQRkvqTJYmZ5ZUkvQXYHrgtV7a9pK3S8o7Au6k+dtIQpZZHpeQBrfFwn5lZIzWtBRIRqyWdDMwmu433gohYIOksoCsiSsnkGOCyiPU+qfcEfiJpLVkSnJq/e6svVLplN29ILz7taWbWipo6BhIRs4BZZWWnl62fWWG/W4F9GhrcBtS6ZbdVHu4zM2skv1CqoGrzyXRIfPPIfTxwbmabPSeQgiaNHcWAfh3rlQ3o18F3j367k4eZtYVN5TbellNKEl+Zfi+r1qz1Lbtm1nacQHpg/Jgh66Zdb/ZsuGZmfc1dWGZmVogTiJmZFeIEYmZmhTiBmJlZIU4gZmZWiO/C2giV3nduZtau3AKpU2nyxO4VKwn++b7zZ158pdmhmZk1hRNInaq973zxspeaFJGZWXM5gdSp2uSJAYwb7afPzaz9OIHUqdrkiUMGDuCT+zfmVblmZq3MCaRO1SZP9EC6mbUr34VVJ0+eaGa2vqa2QCQdJmmhpEWSJlfYfpykZZLmpa8Tc9smSHoofU3oi3jHjxnCmGED2X/EIG6ZfLCTh5m1taa1QCR1AOcBhwJLgDslzazwatrLI+Lksn0HAWcAnWTj2HelfZ/rg9DNzIzmtkD2AxZFxOKIWAVcBoyrc9+xwHURsTwljeuAwxoUp5mZVdDMBDIEeDy3viSVlfuYpHslTZc0dCP3RdJESV2SupYtW9YbcZuZGa1/F9ZvgOER8TayVsZFG3uAiJgWEZ0R0bnTTjv1eoBmZu2qmQmkGxiaW981la0TEc9GRGmukPOBfevd18zMGquZCeROYKSkEZL6A8cAM/MVJA3OrR4BPJCWZwMflLS9pO2BD6YyMzPrI027CysiVks6meyDvwO4ICIWSDoL6IqImcAXJR0BrAaWA8elfZdL+gZZEgI4KyKW9/kPYWbWxpr6IGFEzAJmlZWdnlueAkypsu8FwAUNDdDMzKpq9UF0MzNrUU4gZmZWiBOImZkV4gRiZmaFOIGYmVkhTiBmZlaIE4iZmRXiBGJmZoU4gZiZWSFOIGZmVogTiJmZFeIEYmZmhTiBmJlZIU4gZmZWiBOImZkV4gRiZmaFOIGYmVkhTU0gkg6TtFDSIkmTK2w/RdL9ku6VdIOk3XLb1kial75mlu9rZmaN1bRX2krqAM4DDgWWAHdKmhkR9+eqzQU6I+JlSZ8Dvg18Im1bGRGj+zRoMzNbp5ktkP2ARRGxOCJWAZcB4/IVIuLGiHg5rd4O7NrHMZqZWRXNTCBDgMdz60tSWTUnANfm1reW1CXpdknjq+0kaWKq17Vs2bKeRWxmZus0rQtrY0j6V6ATeH+ueLeI6Ja0OzBH0vyIeLh834iYBkwD6OzsjD4J2MysDTSzBdINDM2t75rK1iPpEOA04IiIeKVUHhHd6fti4CZgTCODNTOz9TUzgdwJjJQ0QlJ/4BhgvbupJI0BfkKWPJ7OlW8vaau0vCPwbiA/+G5mZg3WtC6siFgt6WRgNtABXBARCySdBXRFxEzgHOD1wJWSAB6LiCOAPYGfSFpLlgSnlt29ZWZmDdbUMZCImAXMKis7Pbd8SJX9bgX2aWx0ZmZWi59ENzOzQpxA6jRjbjfvnjqHvzyynLmPrWDG3NeM95uZtZVN4jbeZpsxt5spV81n5atrAFi1Zi1TrpoPwPgxtR5dMTPbfLkFUodzZi9clzxKVr66hnNmL2xSRGZmzecEUocnVqzcqHIzs3bgBFKHXQYO2KhyM7N2UDOBSNpW0r9UKH9b40JqPZPGjmJAv471ygb062DS2FFNisjMrPmqJhBJRwN/BX4taYGkd+Y2X9jowFrJ+DFD+OaR+9C/I7tcQwYO4JtH7uMBdDNra7XuwvoqsG9ELJW0H/ALSVMi4mpAfRNe6xg/Zgi/uuMxAC7/7AFNjsbMrPlqJZCOiFgKEBF3SDoI+K2koYBntTUza3O1xkBeyI9/pGRyINlLn/ZucFxmZtbiarVAPkdZV1VEvCDpMODohkZlZmYtr2oLJCLuAR6RdGNZ+asRcUnDIzMzs5ZW8zbeiFgDrJW0XR/FY2Zmm4h65sJ6EZgv6TrgpVJhRHyxYVGZmVnLqyeBXJW+zMzM1tlgAomIixp18jQg/32yNxKeHxFTy7ZvBVwM7As8C3wiIh5N26YAJwBrgC9GxOxGxWlmZq/VtLmwJHUA5wGHA3sBx0raq6zaCcBzEbEHcC7wrbTvXmTvUN8bOAz4YTqemZn1kWZOprgfsCgiFkfEKuAysmdM8sYBpRbQdOADyl6OPg64LCJeiYhHgEXpeGZm1keamUCGAI/n1peksop1ImI18DywQ537AiBpoqQuSV3Lli3rpdDNzGyDYyCS3gxMAnbL14+IgxsYV6+JiGnANIDOzk5PwWJm1kvquQvrSuDHwE/JBqx7SzcwNLe+ayqrVGeJpC2B7cgG0+vZ18zMGqieBLI6In7UgHPfCYyUNILsw/8Y4JNldWYCE4DbgKOAORERkmYCl0r6HrALMBK4owExmplZFfUkkN9I+nfgauCVUmFELO/JiSNitaSTgdlkt/FeEBELJJ0FdEXETOBnZNPILwKWkyUZUr0rgPuB1cDn01PzZmbWR+pJIBPS90m5sgB27+nJI2IWMKus7PTc8j+Aj1fZ92zg7J7GYGZmxdTzIOGIvgjEzMw2LfXchdWPbGr396Wim4CfRMSrDYzLzMxaXD1dWD8C+gE/TOufSmUnNiooMzNrffUkkHdGxNtz63Mk3dOogMzMbNNQz5Poa/KvtpW0O737PIiZmW2C6mmBTAJulLSY7BW3uwHHNzQqMzNrefXchXWDpJHAqFS0MCJeqbWPmZlt/qomEEkHR8QcSUeWbdpDEhHhl0yZmbWxWi2Q9wNzgI9U2Bb4LYVmZm2tagKJiDPS4lnpnRvrpPmrzMysjdVzF9avK5RN7+1AzMxs01JrDOQtZK+M3a5sHGRbYOtGB2ZmZq2t1hjIKODDwEDWHwd5AfhMI4MyM7PWV2sM5BrgGkkHRMRtfRiTmZltAup5kHCupM+TdWet67qKiE83LCozM2t59Qyi/wJ4EzAWuJns9bEvNDIoMzNrffUkkD0i4uvASxFxEfC/gP0bG5aZmbW6ehJI6b0fKyS9FdgOeGNPTippkKTrJD2Uvm9foc5oSbdJWiDpXkmfyG27UNIjkualr9E9icfMzDZePQlkWvqA/zowk+w95N/u4XknAzdExEjghrRe7mXg3yJib+Aw4H8kDcxtnxQRo9PXvB7GY2ZmG6meyRTPT4s30wvvQU/GAQem5YvI3nJ4atl5H8wtPyHpaWAnYEUvxWBmZj1Q60HCU2rtGBHf68F5d46IpWn5SWDnWpUl7Qf0Bx7OFZ8t6XRSC6baDMGSJgITAYYNG9aDkM3MLK9WC+QN6fso4J1k3VeQPVR4x4YOLOl6sru3yp2WX4mIkBQ1jjOY7E6wCRGxNhVPIUs8/YFpZK2XsyrtHxHTUh06OzurnsfMzDZOrQcJ/wtA0h+Bd0TEC2n9TOB3GzpwRBxSbZukpyQNjoilKUE8XaXetulcp0XE7bljl1ovr0j6OfDlDcVjZma9q55B9J2BVbn1VWygy6kOM4EJaXkCcE15BUn9gauBiyNietm2wem7gPHAfT2Mx8zMNlI9T6JfDNwh6eq0Ph64sIfnnQpcIekE4G/A0QCSOoGTIuLEVPY+YAdJx6X9jkt3XF0iaSeyV+zOA07qYTxmZraR6rkL62xJ1wLvTUXHR8Tcnpw0Ip4FPlChvAs4MS3/Evhllf0P7sn5zcys52rdhbVtRPxd0iDg0fRV2jYoIpY3PjwzM2tVtVogl5JN534X2StsS5TWe+uZEDMz2wTVugvrw+m7X19rZmavUasL6x21doyIu3s/HDMz21TU6sL6bo1tAXgg28ysjdXqwjqoLwMxM7NNSz3PgZCmcd+L9d9IeHGjgjIzs9a3wQQi6QyymXP3AmYBhwN/JnvA0MzM2lQ9U5kcRfbQ35MRcTzwdrKXSpmZWRurJ4GsTLPgrk6TGz4NDG1sWGZm1urqGQPpSm8C/CnZQ4UvArc1NCozM2t5tZ4DOQ+4NCL+PRX9WNLvgW0j4t4+ic7MzFpWrRbIg8B30tTpVwC/6ukkimZmtvmoOgYSEd+PiAOA9wPPAhdI+qukMyS9uc8iNDOzlrTBQfSI+FtEfCsixgDHkr0P5IGGR2ZmZi1tgwlE0paSPiLpEuBaYCFwZMMjMzOzllZrEP1QshbHh4A7gMuAiRHxUk9Pmt4xcjkwnOw9I0dHxHMV6q0B5qfVxyLiiFQ+IsWzA9mdYZ+KiFXl+5uZWePUaoFMAW4F9oyIIyLi0t5IHslk4IaIGAnckNYrWRkRo9PXEbnybwHnRsQewHPACb0Ul5mZ1anWIPrBEXF+pZZBLxgHXJSWLyIbV6mLJJHNBDy9yP5mZtY76nkSvRF2joilaflJYOcq9baW1CXpdkmlJLEDsCIiVqf1JcCQaieSNDEdo2vZsmW9EryZmdU5G28Rkq4H3lRh02n5lYgISVGhHsBuEdEtaXdgjqT5wPMbE0dETAOmAXR2dlY7j5mZbaSGJZCIOKTaNklPSRocEUvTg4pPVzlGd/q+WNJNwBjg18BASVumVsiuQHev/wBmZlZTs7qwZgIT0vIE4JryCpK2l7RVWt4ReDdwf0QEcCPZLMFV9zczs8ZqVgKZChwq6SHgkLSOpE5J56c6e5JN5HgPWcKYGhH3p22nAqdIWkQ2JvKzPo3ezMwa14VVS0Q8S/aOkfLyLuDEtHwrsE+V/RcD+zUyRjMzq61ZLRAzM9vEOYGYmVkhTiBmZlaIE4iZmRXiBGJmZoU4gZiZWSFOIGZmVogTiJmZFeIEYmZmhTiBmJlZIU4gZmZWiBOImZkV4gRiZmaFOIGYmVkhTiBmZlaIE4iZmRXSlAQiaZCk6yQ9lL5vX6HOQZLm5b7+IWl82nahpEdy20b3/U9hZtbemtUCmQzcEBEjgRvS+noi4saIGB0Ro4GDgZeBP+SqTCptj4h5fRK1mZmt06wEMg64KC1fBIzfQP2jgGsj4uWGRmVmZnVrVgLZOSKWpuUngZ03UP8Y4FdlZWdLulfSuZK26vUIzcyspi0bdWBJ1wNvqrDptPxKRISkqHGcwcA+wOxc8RSyxNMfmAacCpxVZf+JwESAYcOGbcRPYGZmtTQsgUTEIdW2SXpK0uCIWJoSxNM1DnU0cHVEvJo7dqn18oqknwNfrhHHNLIkQ2dnZ9VEZWZmG6dZXVgzgQlpeQJwTY26x1LWfZWSDpJENn5yXwNiNDOzGpqVQKYCh0p6CDgkrSOpU9L5pUqShgNDgZvL9r9E0nxgPrAj8N99ELOZmeU0rAurloh4FvhAhfIu4MTc+qPAkAr1Dm5kfGZmtmF+Et3MzApxAjEzs0KcQMzMrBAnEDMzK8QJxMzMCnECMTOzQpxAzMysECcQMzMrxAnEzMwKcQIxM7NCnEDMzKwQJxAzMyvECcTMzApxAjEzs0KcQMzMrBAnEDMzK8QJxMzMCmlKApH0cUkLJK2V1Fmj3mGSFkpaJGlyrnyEpL+k8ssl9e+byM3MrKRZLZD7gCOBP1arIKkDOA84HNgLOFbSXmnzt4BzI2IP4DnghMaGa2Zm5ZqSQCLigYhYuIFq+wGLImJxRKwCLgPGSRJwMDA91bsIGN+4aM3MrJJWHgMZAjyeW1+SynYAVkTE6rLyiiRNlNQlqWvZsmUNC9bMrN1s2agDS7oeeFOFTadFxDWNOm+5iJgGTAPo7OyMvjqvmdnmrmEJJCIO6eEhuoGhufVdU9mzwEBJW6ZWSKnczMz6UCt3Yd0JjEx3XPUHjgFmRkQANwJHpXoTgD5r0ZiZWaZZt/F+VNIS4ADgd5Jmp/JdJM0CSK2Lk4HZwAPAFRGxIB3iVOAUSYvIxkR+1tc/g5lZu2tYF1YtEXE1cHWF8ieAD+XWZwGzKtRbTHaXlpmZNUkrd2GZmVkLcwIxM7NCnEDMzKwQJxAzMyukKYPom5IZc7s5Z/ZCnlixkn4dWzB00IBmh2Rm1hLcAqlhxtxuplw1n+4VKwlg1Zq1PPLMS8yY6+cWzcycQGo4Z/ZCVr66Zr2ytZGVm5m1OyeQGp5YsXKjys3M2okTSA27DKw83lGt3MysnTiB1DBp7CgG9OtYr2xAvw4mjR3VpIjMzFqH78KqYfyY7DUjpbuwdhk4gEljR60rNzNrZ04gGzB+zBAnDDOzCtyFZWZmhTiBmJlZIU4gZmZWiBOImZkV4gRiZmaFKHvFeHuQtAz4W4FddwSe6eVwGsFx9i7H2bscZ+/qyzh3i4idygvbKoEUJakrIjqbHceGOM7e5Th7l+PsXa0Qp7uwzMysECcQMzMrxAmkPtOaHUCdHGfvcpy9y3H2rqbH6TEQMzMrxC0QMzMrxAnEzMwKcQKpQdJhkhZKWiRpcrPjqUbSo5LmS5onqavZ8eRJukDS05Luy5UNknSdpIfS9+1bMMYzJXWnazpP0oeaGWOKaaikGyXdL2mBpP9I5a12PavF2VLXVNLWku6QdE+K879S+QhJf0m/95dL6t+icV4o6ZHc9Rzd57F5DKQySR3Ag8ChwBLgTuDYiLi/qYFVIOlRoDMiWu7hJ0nvA14ELo6It6aybwPLI2JqSszbR8SpLRbjmcCLEfGdZsVVTtJgYHBE3C3pDcBdwHjgOFrrelaL82ha6JpKEvC6iHhRUj/gz8B/AKcAV0XEZZJ+DNwTET9qwThPAn4bEdObFZtbINXtByyKiMURsQq4DBjX5Jg2ORHxR2B5WfE44KK0fBHZh0vTVImx5UTE0oi4Oy2/ADwADKH1rme1OFtKZF5Mq/3SVwAHA6UP5Va4ntXibDonkOqGAI/n1pfQgr8ESQB/kHSXpInNDqYOO0fE0rT8JLBzM4Op4WRJ96YurqZ2C5WTNBwYA/yFFr6eZXFCi11TSR2S5gFPA9cBDwMrImJ1qtISv/flcUZE6Xqena7nuZK26uu4nEA2D++JiHcAhwOfT10ym4TI+lBb4q+pMj8C/gUYDSwFvtvccP5J0uuBXwP/GRF/z29rpetZIc6Wu6YRsSYiRgO7kvU6vKXJIVVUHqektwJTyOJ9JzAI6PNuSyeQ6rqBobn1XVNZy4mI7vT9aeBqsl+EVvZU6icv9Zc/3eR4XiMinkq/tGuBn9Ii1zT1gf8auCQirkrFLXc9K8XZqtcUICJWADcCBwADJZVe991Sv/e5OA9LXYUREa8AP6cJ19MJpLo7gZHpjoz+wDHAzCbH9BqSXpcGKpH0OuCDwH2192q6mcCEtDwBuKaJsVRU+kBOPkoLXNM0mPoz4IGI+F5uU0tdz2pxtto1lbSTpIFpeQDZDTMPkH1AH5WqtcL1rBTnX3N/NIhsnKbPr6fvwqoh3Wb4P0AHcEFEnN3kkF5D0u5krQ6ALYFLWylOSb8CDiSbevop4AxgBnAFMIxsev2jI6Jpg9hVYjyQrKslgEeBz+bGGZpC0nuAPwHzgbWp+Ktk4wutdD2rxXksLXRNJb2NbJC8g+yP6Ssi4qz0O3UZWbfQXOBf01/5rRbnHGAnQMA84KTcYHvfxOYEYmZmRbgLy8zMCnECMTOzQpxAzMysECcQMzMrxAnEzMwKcQKxzUaazuE/c+uzJZ2fW/+upFNq7H+hpKPS8k2SOivU6Sdpapr59m5Jt0k6PG17VNKOBeJed94q289Ls63eL2llbvbVoyTNKj0j0JskDZb02xrb+0v6Y+6BO2tDTiC2ObkFeBeApC3InuvYO7f9XcCtPTzHN4DBwFvT9DHjgTf08Jg1RcTn0zQWHwIejojR6Wt6RHwoPZ3c204he1q8WkyrgBuATzTg3LaJcAKxzcmtZFNRQJY47gNekLR9mmhuT+BuSadLulPSfZKmpSd5N0jSNsBngC+UHixL03NcUaHuKen495W1iv4tTX53j6RfVNjvG6lF0lFnTI9K2lHScEl/Tfs+KOkSSYdIuiW1lvZL9V+XJjK8Q9JcSdVmmP4Y8Pu0z96p/rwU+8hUZwbwv+uJ0zZPbn7aZiMinpC0WtIwstbGbWQzqR4APA/Mj4hVkv5fRJwFkD7EPwz8po5T7AE8Vj6BYTlJ+wLHA/uTPSX8F0k3A6uArwHviohnJA0q2+8cstbM8VHsCd89gI8DnyabiueTwHuAI8ieBB8PnAbMiYhPp66vOyRdHxEv5eIYATyXe/r6JOD7EXFJmtanlNzuI5vIz9qUWyC2ubmVLHmUEshtufVbUp2DlL1xbj7Zux/2rnSgHngPcHVEvJSmlrgKeG8615WlF3+VTTfydWC7iDipYPIAeCQi5qfJChcAN6RjzQeGpzofBCYrmxr8JmBrsilQ8gYDy3LrtwFflXQqsFtErEzxrwFWleZis/bjBGKbm9I4yD5kfyHfTtYCeRdwq6StgR8CR0XEPmT9/FvXeexFwDBJ2/Z61FmLYd/yVslGys/XtDa3vpZ/9jYI+FhuHGVYRDxQdpyV5K5JRFxK1opZCcySdHCu7lbAP2TgwLcAAAFISURBVHoQs23CnEBsc3MrWZfU8jR1+HJgIFkSuZV/fjA+o+x9FVXvfioXES+TzTL7/dSVU5op9eNlVf8EjJe0jbIZkj+ayuYAH5e0Q9o3nyx+D0wFftfgv+hnA18ojftIGlOhzoP8s8VSmrBzcUT8gGxm2rel8h2AZyLi1QbGay3MCcQ2N/PJ7r66vazs+Yh4Jt2x9FOy1slssr/8N8bXyLp37pd0H/BboPylTncDFwJ3kM2Ue35EzI2IBcDZwM2S7gG+V7bflSm2mcqm7W6Eb5C9EvVeSQvS+nrSeMjDkvZIRUcD96Vur7cCF6fyg4DfNShO2wR4Nl4zew1JHwX2jYiv1ahzFTA5Ih7su8islfguLDN7jYi4utTVVknqwpvh5NHe3AIxM7NCPAZiZmaFOIGYmVkhTiBmZlaIE4iZmRXiBGJmZoX8f7sPKWpSDQwJAAAAAElFTkSuQmCC\n" - }, - "metadata": { - "needs_background": "light" - } - } - ], "source": [ "import matplotlib.pyplot as plt\n", "import numpy as np\n", @@ -392,57 +407,79 @@ "plt.scatter(time_history, 1 - np.array(valid_loss_history))\n", "plt.step(time_history, 1 - np.array(best_valid_loss_history), where='post')\n", "plt.show()" - ] + ], + "outputs": [ + { + "output_type": "display_data", + "data": { + "text/plain": [ + "
" + ], + "image/svg+xml": "\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZAAAAEWCAYAAABIVsEJAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nO3de7xVdZ3/8dfbIyhWCggZIgiNhJcsqJOO1VSaBvYrwTLT5kKmUU01M/mLgixrbJxonMns8bMLmamleSFFKoy813g/BsnFSERTjjcUMVMSgc/vj/U9tNjuvdlnnbPP3vuc9/Px2I+91nd911qfBfvsz17f71rfpYjAzMysu3ZqdABmZtaanEDMzKwQJxAzMyvECcTMzApxAjEzs0KcQMzMrBAnELM6kPR3klY1Og6zenICsX5H0oOSjmxkDBHxm4iYWK/tS5oi6deSnpW0TtLNko6p1/7MynECMStAUlsD930ccAVwEbAPsBdwOvDeAtuSJH8PWCH+4NiAIWknSbMl3S/pKUmXSxqeW36FpMckPZN+3R+UW3aBpO9IWiTpOeDwdKbzWUn3pHUuk7Rrqv8OSWtz61esm5Z/TtKjkh6RdIqkkLRfmWMQ8A3gqxFxXkQ8ExFbI+LmiPhoqvMVST/OrTMubW/nNH+TpDMl3QI8D8yS1FGyn89IWpimd5H035IekvS4pO9KGtLD/w7rB5xAbCD5NDAdeDuwN/A0cG5u+TXABOCVwG+Bi0vW/xBwJvAK4H9T2fHAVGA88Drgw1X2X7aupKnAqcCRwH7AO6psYyIwBphfpU4t/hGYSXYs3wUmSpqQW/4h4JI0PRd4DTApxTea7IzHBjgnEBtIPg6cFhFrI+IF4CvAcV2/zCPi/Ih4Nrfs9ZL2yK1/dUTckn7x/yWVfSsiHomI9cDPyL5kK6lU93jghxGxIiKeT/uuZM/0/mitB13BBWl/myPiGeBq4ESAlEj2BxamM56ZwGciYn1EPAv8J3BCD/dv/YATiA0k+wJXSdogaQNwL7AF2EtSm6S5qXnrT8CDaZ0RufUfLrPNx3LTzwMvr7L/SnX3Ltl2uf10eSq9j6pSpxal+7iElEDIzj4WpGQ2EtgNuDv37/bLVG4DnBOIDSQPA0dHxNDca9eI6CT70pxG1oy0BzAuraPc+vUauvpRss7wLmOq1F1Fdhzvr1LnObIv/S6vKlOn9FiuBUZKmkSWSLqar54ENgIH5f7N9oiIaonSBggnEOuvBknaNffamayt/0xJ+wJIGilpWqr/CuAFsl/4u5E10/SVy4GTJB0gaTfgS5UqRvb8hVOBL0k6SdLu6eKAt0qal6otBd4maWxqgpuzowAi4kWyK7vOAoaTJRQiYivwfeBsSa8EkDRa0pTCR2v9hhOI9VeLyH45d72+ApwDLAR+JelZ4Hbg0FT/IuCPQCewMi3rExFxDfAt4EZgdW7fL1SoPx/4IPAR4BHgceA/yPoxiIhrgcuAe4C7gZ/XGMolZGdgV0TE5lz557viSs1715F15tsAJz9Qyqy5SDoAWA7sUvJFbtZUfAZi1gQkHZvutxgGfB34mZOHNTsnELPm8DHgCeB+sivDPtHYcMx2zE1YZmZWiM9AzMyskJ0bHUBfGjFiRIwbN67RYZiZtZS77777yYh4yc2jAyqBjBs3jo6Ojh1XNDOzbST9sVy5m7DMzKwQJxAzMyvECcTMzApxAjEzs0KcQMzMrJABdRWWmdlAsmBJJ2ctXsUjGzay99AhzJoykemTR/fa9p1AzLqp3n+UZr1hwZJO5ly5jI0vbgGgc8NG5ly5DKDXPq9OIC3CX1rNoS/+KM16w1mLV237nHbZ+OIWzlq8ygmkFRVNAv7Sah6V/ig/N/8efnLnQw2KyuylOjdsLFv+SIXyIpxA+khPkoC/tJpHpT/KTVu29nEkZtUNbtup7Ody76FDem0fTiC9LH+WsceQQUiw4fkX2UliS8nIx7UmAX9pNY9Kf5Sjhw7hso8d1oCIzMor/dEKMGRQG7Om9N7DJJ1AelHpf9iGjS9uW1aaPLrUkgT8pdU8+uKP0qw3dLVs+CqsFlGuqWlHakkC/tJqHn3xR2nWW6ZPHl3Xz6YTSC/qbudUrUnAX1rNpd5/lGatoqEJRNJU4BygDTgvIuaWLD8bODzN7ga8MiKGpmVbgGVp2UMRcUzfRL29fJ9HuX6OUm0SWyO6nQT8pWVmzaZhCURSG3AucBSwFrhL0sKIWNlVJyI+k6v/aWBybhMbI2JSX8VbTmnT0o6Sx5BBbXztfQc7EZhZv9DIsbAOAVZHxJqI2ARcCkyrUv9E4Cd9ElmNdtTnMXTIIIbtNgiR9XU4eZhZf9LIJqzRwMO5+bXAoeUqStoXGA/ckCveVVIHsBmYGxELKqw7E5gJMHbs2F4I+6+q9Xn857EH86FDe3d/ZmbNpFVG4z0BmB8R+Z/7+0ZEO/Ah4JuS/qbcihExLyLaI6J95MiXPNK3RyrdkDN66BAnDzPr9xqZQDqBMbn5fVJZOSdQ0nwVEZ3pfQ1wE9v3j9TVgiWdvGXuDXRu2IhKlvnyWjMbKBqZQO4CJkgaL2kwWZJYWFpJ0v7AMOC2XNkwSbuk6RHAW4CVpevWQ1fHedfd4fluc/dzmNlA0rA+kIjYLOlTwGKyy3jPj4gVks4AOiKiK5mcAFwasd0lTgcA35O0lSwJzs1fvVVPlTrOB7ftxC2zj+iLEMzMmkJD7wOJiEXAopKy00vmv1JmvVuBg+saXAWVOs49LpWZDTSt0oneNKp1nJuZDSROIN00a8pEhgxq267MHedmNhB5LKxu6uog/9z8e9i0ZSujPS6VmQ1QTiAFTJ88etszPDycupkNVE4gNSp9HO2ug3ZixMt3aXRYZmYN4wRSg3KPo92p9A5CM7MBxp3oNSh378fWgIfX997D6c3MWo0TSA1874eZ2Us5gdTA936Ymb2UE0gNfO+HmdlLuRO9Br73w8zspZxAauR7P8zMtucmLDMzK8QJxMzMCnECMTOzQpxAzMyskIYmEElTJa2StFrS7DLLPyxpnaSl6XVKbtkMSfel14y+jdzMzBp2FZakNuBc4ChgLXCXpIVlHk17WUR8qmTd4cCXgXayx5LfndZ9ug9CNzMzGnsGcgiwOiLWRMQm4FJgWo3rTgGujYj1KWlcC0ytU5xmZlZGIxPIaODh3PzaVFbq/ZLukTRf0phuroukmZI6JHWsW7euN+I2MzOavxP9Z8C4iHgd2VnGhd3dQETMi4j2iGgfOXJkrwdoZjZQNTKBdAJjcvP7pLJtIuKpiHghzZ4HvLHWdc3MrL4amUDuAiZIGi9pMHACsDBfQdKo3OwxwL1pejHwLknDJA0D3pXKzMysjzTsKqyI2CzpU2Rf/G3A+RGxQtIZQEdELAT+RdIxwGZgPfDhtO56SV8lS0IAZ0TE+j4/CDOzAayhgylGxCJgUUnZ6bnpOcCcCuueD5xf1wDNzKyiZu9ENzOzJuUEYmZmhTiBmJlZIU4gZmZWiBOImZkV4gRiZmaFOIGYmVkhTiBmZlaIE4iZmRXiBGJmZoU4gZiZWSFOIGZmVogTiJmZFeIEYmZmhTiBmJlZIU4gZmZWSEMTiKSpklZJWi1pdpnlp0paKekeSddL2je3bIukpem1sHRdMzOrr4Y9kVBSG3AucBSwFrhL0sKIWJmrtgRoj4jnJX0C+C/gg2nZxoiY1KdBm5nZNo08AzkEWB0RayJiE3ApMC1fISJujIjn0+ztwD59HKOZmVXQyAQyGng4N782lVVyMnBNbn5XSR2Sbpc0vdJKkmameh3r1q3rWcRmZrZNw5qwukPSPwDtwNtzxftGRKekVwM3SFoWEfeXrhsR84B5AO3t7dEnAZuZDQCNPAPpBMbk5vdJZduRdCRwGnBMRLzQVR4Rnel9DXATMLmewZqZ2fYamUDuAiZIGi9pMHACsN3VVJImA98jSx5P5MqHSdolTY8A3gLkO9/NzKzOGtaEFRGbJX0KWAy0AedHxApJZwAdEbEQOAt4OXCFJICHIuIY4ADge5K2kiXBuSVXb5mZWZ01tA8kIhYBi0rKTs9NH1lhvVuBg+sbnZmZVeM70c3MrBAnkBotWNLJkoc2cMcD63nL3BtYsOQl/f1mZgOKE0gNFizpZM6Vy9i0ZSsAnRs2MufKZU4iZjagOYHU4KzFq9j44pbtyja+uIWzFq9qUERmZo3nBFKDRzZs7Fa5mdlAUDWBSNpd0t+UKX9d/UJqPnsPHdKtcjOzgaBiApF0PPB74KeSVkh6U27xBfUOrJnMmjKRIYPatisbMqiNWVMmNigiM7PGq3YG8gXgjWnI9JOAH0k6Ni1T3SNrItMnj+Zr7zuYwW3ZP9fooUP42vsOZvrkamM/mpn1b9VuJGyLiEcBIuJOSYcDP5c0BhhwgxJOnzyan9z5EACXfeywBkdjZtZ41c5Ans33f6Rk8g6yZ3YcVOe4zMysyVU7A/kEJU1VEfGspKnA8XWNyszMml7FM5CI+B3wgKQbS8pfjIiL6x6ZmZk1taqX8UbEFmCrpD36KB4zM2sRtYzG+2dgmaRrgee6CiPiX+oWlZmZNb1aEsiV6WVmZrbNDhNIRFzYF4GYmVlraehYWJKmSlolabWk2WWW7yLpsrT8DknjcsvmpPJVkqb0ZdxmZtbABCKpDTgXOBo4EDhR0oEl1U4Gno6I/YCzga+ndQ8ke4b6QcBU4Ntpe2Zm1kcaeQZyCLA6ItZExCbgUrKbFPOmAV1NaPOBdyp7OPo04NKIeCEiHgBWp+2ZmVkf2WEfiKTXALOAffP1I+KIHu57NPBwbn4tcGilOhGxWdIzwJ6p/PaSdcsOTCVpJjATYOzYsT0M2czMutRyFdYVwHeB7wNbdlC36UTEPGAeQHt7+4Abw8vMrF5qSSCbI+I7ddh3JzAmN79PKitXZ62knYE9gKdqXNfMzOqolj6Qn0n6Z0mjJA3vevXCvu8CJkgaL2kwWaf4wpI6C4EZafo44IaIiFR+QrpKazwwAbizF2IyM7Ma1XIG0vUFPitXFsCre7Lj1KfxKWAx0AacHxErJJ0BdETEQuAHZM8hWQ2sJ0sypHqXAyuBzcAn07ArZmbWR2q5kXB8vXYeEYuARSVlp+em/wJ8oMK6ZwJn1is2MzOrrparsAaRDe3+tlR0E/C9iHixjnGZmVmTq6UJ6zvAIODbaf4fU9kp9QrKzMyaXy0J5E0R8frc/A2SflevgMzMrDXUchXWlvyjbSW9mha8H8TMzHpXLWcgs4AbJa0he8TtvsBJdY3KzMyaXi1XYV0vaQIwMRWtiogX6huWmZk1u4oJRNIREXGDpPeVLNpPEhHhh0yZmQ1g1c5A3g7cALy3zLLATyk0MxvQKiaQiPhymjwjDZm+TRo+xMzMBrBarsL6aZmy+b0diJmZtZZqfSD7kz3xb4+SfpDdgV3rHZiZmTW3an0gE4H3AEPZvh/kWeCj9QzKzMyaX7U+kKuBqyUdFhG39WFMZmbWAmq5kXCJpE+SNWdta7qKiI/ULSozM2t6tXSi/wh4FTAFuJns6X/P1jMoMzNrfrUkkP0i4kvAcxFxIfB/gEPrG5aZmTW7WhJI13M/Nkh6LdlzyV/Zk52mx+JeK+m+9D6sTJ1Jkm6TtELSPZI+mFt2gaQHJC1Nr0k9icfMzLqvlgQyL33Bf4nsWeQrgf/q4X5nA9dHxATg+jRf6nngnyLiIGAq8E1JQ3PLZ0XEpPRa2sN4zMysm2oZTPG8NHkzPXwOes404B1p+kKypxx+vmS/f8hNPyLpCWAksKGXYjAzsx6odiPhqdVWjIhv9GC/e0XEo2n6MWCvapUlHQIMBu7PFZ8p6XTSGYxHCDYz61vVzkBekd4nAm8ia76C7KbCO3e0YUnXkV29Veq0/ExEhKSosp1RZFeCzYiIral4DlniGQzMIzt7OaPC+jOBmQBjx47dUdhmZlajajcS/juApF8Db4iIZ9P8V4Bf7GjDEXFkpWWSHpc0KiIeTQniiQr1dk/7Oi0ibs9tu+vs5QVJPwQ+WyWOeWRJhvb29oqJyszMuqeWTvS9gE25+U3soMmpBguBGWl6BnB1aQVJg4GrgIsiYn7JslHpXcB0YHkP4zEzs26q5U70i4A7JV2V5qcDF/Rwv3OByyWdDPwROB5AUjvw8Yg4JZW9DdhT0ofTeh9OV1xdLGkk2SN2lwIf72E8ZmbWTbVchXWmpGuAv0tFJ0XEkp7sNCKeAt5ZprwDOCVN/xj4cYX1j+jJ/s3MrOeqXYW1e0T8SdJw4MH06lo2PCLW1z88MzNrVtXOQC4hG879brJH2HZRmu+te0LMzKwFVbsK6z3p3Y+vNTOzl6jWhPWGaitGxG97PxwzM2sV1Zqw/qfKsgDckW1mNoBVa8I6vC8DMTOz1lLLfSCkYdwPZPsnEl5Ur6DMzKz57TCBSPoy2ci5BwKLgKOB/yW7wdDMzAaoWoYyOY7spr/HIuIk4PVkD5UyM7MBrJYEsjGNgrs5DW74BDCmvmGZmVmzq6UPpCM9CfD7ZDcV/hm4ra5RmZlZ06t2H8i5wCUR8c+p6LuSfgnsHhH39El0ZmbWtKqdgfwB+O80dPrlwE96OoiimZn1HxX7QCLinIg4DHg78BRwvqTfS/qypNf0WYRmZtaUdtiJHhF/jIivR8Rk4ESy54HcW/fIzMysqe0wgUjaWdJ7JV0MXAOsAt5X98jMzKypVetEP4rsjOPdwJ3ApcDMiHiuj2IzM7MmVu0MZA5wK3BARBwTEZf0VvKQNFzStZLuS+/DKtTbImlpei3MlY+XdIek1ZIuS89PNzOzPlStE/2IiDgvIp6uw35nA9dHxATg+jRfzsaImJRex+TKvw6cHRH7AU8DJ9chRjMzq6KWO9HrYRpwYZq+kKxjviaSRDaU/Pwi65uZWe9oVALZKyIeTdOPAXtVqLerpA5Jt0vqShJ7AhsiYnOaXwuMrrQjSTPTNjrWrVvXK8GbmVmNw7kXIek64FVlFp2Wn4mIkBRl6gHsGxGdkl4N3CBpGfBMd+KIiHnAPID29vZK+zEzs26qWwKJiCMrLZP0uKRREfFoutP9iQrb6EzvayTdBEwGfgoMlbRzOgvZB+js9QMwM7OqGtWEtRCYkaZnAFeXVpA0TNIuaXoE8BZgZUQEcCPZMPMV1zczs/pqVAKZCxwl6T7gyDSPpHZJ56U6B5CNBPw7soQxNyJWpmWfB06VtJqsT+QHfRq9mZnVrwmrmoh4iuwhVaXlHcApafpW4OAK668BDqlnjGZmVl2jzkDMzKzFOYGYmVkhTiBmZlaIE4iZmRXiBGJmZoU4gZiZWSFOIGZmVogTiJmZFeIEYmZmhTiBmJlZIU4gZmZWiBOImZkV4gRiZmaFOIGYmVkhTiBmZlaIE4iZmRXSkAQiabikayXdl96HlalzuKSluddfJE1Pyy6Q9EBu2aS+Pwozs4GtUWcgs4HrI2ICcH2a305E3BgRkyJiEnAE8Dzwq1yVWV3LI2Jpn0RtZmbbNCqBTAMuTNMXAtN3UP844JqIeL6uUZmZWc0alUD2iohH0/RjwF47qH8C8JOSsjMl3SPpbEm7VFpR0kxJHZI61q1b14OQzcwsr24JRNJ1kpaXeU3L14uIAKLKdkYBBwOLc8VzgP2BNwHDgc9XWj8i5kVEe0S0jxw5sieHZGZmOTvXa8MRcWSlZZIelzQqIh5NCeKJKps6HrgqIl7Mbbvr7OUFST8EPtsrQZuZWc0a1YS1EJiRpmcAV1epeyIlzVcp6SBJZP0ny+sQo5mZVdGoBDIXOErSfcCRaR5J7ZLO66okaRwwBri5ZP2LJS0DlgEjgP/og5jNzCynbk1Y1UTEU8A7y5R3AKfk5h8ERpepd0Q94zMzsx3znehmZlaIE4iZmRXiBGJmZoU4gZiZWSFOIGZmVogTiJmZFeIEYmZmhTiBmJlZIU4gZmZWiBOImZkV4gRiZmaFOIGYmVkhTiBmZlaIE4iZmRXiBGJmZoU4gZiZWSENSSCSPiBphaStktqr1JsqaZWk1ZJm58rHS7ojlV8maXDfRG5mZl0adQayHHgf8OtKFSS1AecCRwMHAidKOjAt/jpwdkTsBzwNnFzfcM3MrFRDEkhE3BsRq3ZQ7RBgdUSsiYhNwKXANEkCjgDmp3oXAtPrF62ZmZXTzH0go4GHc/NrU9mewIaI2FxSXpakmZI6JHWsW7eubsGamQ00O9drw5KuA15VZtFpEXF1vfZbKiLmAfMA2tvbo6/2a2bW39UtgUTEkT3cRCcwJje/Typ7Chgqaed0FtJVbmZmfaiZm7DuAiakK64GAycACyMigBuB41K9GUCfndGYmVmmUZfxHitpLXAY8AtJi1P53pIWAaSzi08Bi4F7gcsjYkXaxOeBUyWtJusT+UFfH4OZ2UBXtyasaiLiKuCqMuWPAO/OzS8CFpWpt4bsKi0zM2uQZm7CMjOzJuYEYmZmhTiBmJlZIU4gZmZWSEM60VvJgiWdnLV4FY9s2Migtp0YM3xIo0MyM2sKPgOpYsGSTuZcuYzODRsJYNOWrTzw5HMsWOL7Fs3MnECqOGvxKja+uGW7sq2RlZuZDXROIFU8smFjt8rNzAYSJ5Aq9h5avr+jUrmZ2UDiBFLFrCkTGTKobbuyIYPamDVlYoMiMjNrHr4Kq4rpk7PHjHRdhbX30CHMmjJxW7mZ2UDmBLID0yePdsIwMyvDTVhmZlaIE4iZmRXiBGJmZoU4gZiZWSFOIGZmVoiyR4wPDJLWAX/s5mojgCfrEE6j9Lfjgf53TD6e5tffjmlHx7NvRIwsLRxQCaQISR0R0d7oOHpLfzse6H/H5ONpfv3tmIoej5uwzMysECcQMzMrxAlkx+Y1OoBe1t+OB/rfMfl4ml9/O6ZCx+M+EDMzK8RnIGZmVogTiJmZFeIEUoGkqZJWSVotaXaj4ylC0vmSnpC0PFc2XNK1ku5L78MaGWN3SBoj6UZJKyWtkPSvqbyVj2lXSXdK+l06pn9P5eMl3ZE+f5dJGtzoWLtDUpukJZJ+nuZb9ngkPShpmaSlkjpSWct+5gAkDZU0X9LvJd0r6bAix+QEUoakNuBc4GjgQOBESQc2NqpCLgCmlpTNBq6PiAnA9Wm+VWwG/m9EHAj8LfDJ9P/Sysf0AnBERLwemARMlfS3wNeBsyNiP+Bp4OQGxljEvwL35uZb/XgOj4hJuXslWvkzB3AO8MuI2B94Pdn/VfePKSL8KnkBhwGLc/NzgDmNjqvgsYwDlufmVwGj0vQoYFWjY+zBsV0NHNVfjgnYDfgtcCjZXcE7p/LtPo/N/gL2SV9ARwA/B9Tix/MgMKKkrGU/c8AewAOki6h6ckw+AylvNPBwbn5tKusP9oqIR9P0Y8BejQymKEnjgMnAHbT4MaXmnqXAE8C1wP3AhojYnKq02ufvm8DngK1pfk9a+3gC+JWkuyXNTGWt/JkbD6wDfpiaGc+T9DIKHJMTyAAW2U+NlruOW9LLgZ8C/xYRf8ova8VjiogtETGJ7Jf7IcD+DQ6pMEnvAZ6IiLsbHUsvemtEvIGsSfuTkt6WX9iCn7mdgTcA34mIycBzlDRX1XpMTiDldQJjcvP7pLL+4HFJowDS+xMNjqdbJA0iSx4XR8SVqbilj6lLRGwAbiRr4hkqqeuR0630+XsLcIykB4FLyZqxzqF1j4eI6EzvTwBXkSX5Vv7MrQXWRsQdaX4+WULp9jE5gZR3FzAhXTkyGDgBWNjgmHrLQmBGmp5B1o/QEiQJ+AFwb0R8I7eolY9ppKShaXoIWZ/OvWSJ5LhUrWWOKSLmRMQ+ETGO7O/mhoj4e1r0eCS9TNIruqaBdwHLaeHPXEQ8BjwsaWIqeiewkgLH5DvRK5D0brK23Dbg/Ig4s8EhdZuknwDvIBuq+XHgy8AC4HJgLNnQ9sdHxPpGxdgdkt4K/AZYxl/b179A1g/Sqsf0OuBCss/ZTsDlEXGGpFeT/YIfDiwB/iEiXmhcpN0n6R3AZyPiPa16PCnuq9LszsAlEXGmpD1p0c8cgKRJwHnAYGANcBLp80c3jskJxMzMCnETlpmZFeIEYmZmhTiBmJlZIU4gZmZWiBOImZkV4gRi/YaksyX9W25+saTzcvP/I+nUKutfIOm4NH2TpPYydQZJmptGLP2tpNskHZ2WPShpRIG4t+23wvJz00iwKyVtTNNLJR0naVHXfSS9SdKorpF0KywfLOnXuZsDbQByArH+5BbgzQCSdiK7/+Wg3PI3A7f2cB9fJRto7rVpeIvpwCt6uM2qIuKTaaiTdwP3RzYq7KSImB8R7053sPe2U4HvV4lpE9mAiR+sw76tRTiBWH9yK9kwIJAljuXAs5KGSdoFOAD4raTTJd0labmkeekO9x2StBvwUeDTXTfBRcTjEXF5mbqnpu0vLzkr+idJ9yh7/sePyqz31XRG0lZjTA9KGiFpXHq2wwWS/iDpYklHSrolnS0dkuq/TNlzYu5MA+lNq7Dp9wO/TOsclOovTbFPSHUWAH9fS5zWP/n00/qNiHhE0mZJY8nONm4jG/X1MOAZYFlEbJL0/yLiDID0Jf4e4Gc17GI/4KHSARxLSXoj2Z29h5INZX6HpJuBTcAXgTdHxJOShpesdxbZ2cxJUewO3/2ADwAfIRuO50PAW4FjyO7Ynw6cRja8yEdS09edkq6LiOdycYwHns7dKf5x4JyIuDgN7dOV3JYDbyoQp/UTPgOx/uZWsuTRlUBuy83fkuocruzpeMvIBvs7qNyGeuCtwFUR8VxE/Bm4Evi7tK8rIuJJgJJhIr4E7BERHy+YPAAeiIhlEbEVWEH2cKAgG/plXKrzLmC2suHjbwJ2JRu6Im8U2XDfXW4DviDp88C+EbExxb8F2NQ1VpQNPE4g1t909YMcTPYL+XayM5A3A7dK2hX4NnBcRBxM1s6/a43bXg2MlbR7r0ednTG8sfSspJvyY0ttzc1v5a+tDQLen+tHGRsR+ScHAmwk928SEZeQncVsBBZJOiJXdxfgLz2I2VqYE4j1N7eSNUmtT8/ZWA8MJUsit/LXL8YnlT1XpOLVT6Ui4nmy0YDPSU05XaPpfqCk6m+A6ZJ2SyO4HpvKbhoqEvwAAAEFSURBVAA+kAbioyRZ/BKYC/yizr/oFwOf7ur3kTS5TJ0/8Nczlq4BBddExLfIRmh9XSrfE3gyIl6sY7zWxJxArL9ZRnb11e0lZc9ExJPpiqXvk52dLCb75d8dXyRr3lkpaTnZI1tLH2r1W7Ln0d9JNlLweRGxJCJWAGcCN0v6HfCNkvWuSLEtVDa0ez18FRgE3CNpRZrfTuoPuV/SfqnoeGB5avZ6LXBRKj8c+EWd4rQW4NF4zewlJB0LvDEivlilzpXA7Ij4Q99FZs3EV2GZ2UtExFVdTW3lpCa8BU4eA5vPQMzMrBD3gZiZWSFOIGZmVogTiJmZFeIEYmZmhTiBmJlZIf8fKcylHHIb3RIAAAAASUVORK5CYII=" + }, + "metadata": { + "needs_background": "light" + } + } + ], + "metadata": { + "slideshow": { + "slide_type": "slide" + } + } }, { + "cell_type": "markdown", "source": [ "## 3. Comparison with untuned XGBoost\n", "\n", "### FLAML's accuracy" ], - "cell_type": "markdown", "metadata": {} }, { "cell_type": "code", "execution_count": 12, - "metadata": { - "tags": [] - }, + "source": [ + "print('flaml (60s) r2', '=', 1 - sklearn_metric_loss_score('r2', y_pred, y_test))" + ], "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "flaml (60s) r2 = 0.834848817994438\n" + "flaml (60s) r2 = 0.8439648010832427\n" ] } ], - "source": [ - "print('flaml (60s) r2', '=', 1 - sklearn_metric_loss_score('r2', y_pred, y_test))" - ] + "metadata": { + "tags": [] + } }, { + "cell_type": "markdown", "source": [ "### Default XGBoost" ], - "cell_type": "markdown", "metadata": {} }, { "cell_type": "code", "execution_count": 13, - "metadata": {}, - "outputs": [], "source": [ "from xgboost import XGBRegressor\n", "xgb = XGBRegressor()" - ] + ], + "outputs": [], + "metadata": {} }, { "cell_type": "code", "execution_count": 14, - "metadata": {}, + "source": [ + "xgb.fit(X_train, y_train)" + ], "outputs": [ { "output_type": "execute_result", @@ -462,16 +499,16 @@ "execution_count": 14 } ], - "source": [ - "xgb.fit(X_train, y_train)" - ] + "metadata": {} }, { "cell_type": "code", "execution_count": 15, - "metadata": { - "tags": [] - }, + "source": [ + "y_pred = xgb.predict(X_test)\n", + "from flaml.ml import sklearn_metric_loss_score\n", + "print('default xgboost r2', '=', 1 - sklearn_metric_loss_score('r2', y_pred, y_test))" + ], "outputs": [ { "output_type": "stream", @@ -481,186 +518,21 @@ ] } ], - "source": [ - "y_pred = xgb.predict(X_test)\n", - "from flaml.ml import sklearn_metric_loss_score\n", - "print('default xgboost r2', '=', 1 - sklearn_metric_loss_score('r2', y_pred, y_test))" - ] + "metadata": { + "tags": [] + } }, { "cell_type": "markdown", - "metadata": {}, "source": [ "## 4. Add customized XGBoost learners in FLAML\n", "You can easily enable a custom objective function by adding a customized XGBoost learner (XGBoostEstimator for regression tasks, and XGBoostSklearnEstimator for classification tasks) in FLAML. In the following example, we show how to add such a customized XGBoostEstimator with a custom objective function. " - ] + ], + "metadata": {} }, { "cell_type": "code", "execution_count": 16, - "metadata": { - "tags": [] - }, - "outputs": [ - { - "output_type": "stream", - "name": "stderr", - "text": [ - "[flaml.automl: 07-06 10:18:13] {908} INFO - Evaluation method: holdout\n", - "[flaml.automl: 07-06 10:18:13] {617} INFO - Using RepeatedKFold\n", - "[flaml.automl: 07-06 10:18:13] {929} INFO - Minimizing error metric: 1-r2\n", - "[flaml.automl: 07-06 10:18:13] {948} INFO - List of ML learners in AutoML Run: ['my_xgb1', 'my_xgb2']\n", - "[flaml.automl: 07-06 10:18:13] {1012} INFO - iteration 0, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:13] {1160} INFO - at 0.0s,\tbest my_xgb1's error=53750617.1059,\tbest my_xgb1's error=53750617.1059\n", - "[flaml.automl: 07-06 10:18:13] {1012} INFO - iteration 1, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:13] {1160} INFO - at 0.1s,\tbest my_xgb1's error=260718.5183,\tbest my_xgb1's error=260718.5183\n", - "[flaml.automl: 07-06 10:18:13] {1012} INFO - iteration 2, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:13] {1160} INFO - at 0.1s,\tbest my_xgb1's error=260718.5183,\tbest my_xgb1's error=260718.5183\n", - "[flaml.automl: 07-06 10:18:13] {1012} INFO - iteration 3, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:13] {1160} INFO - at 0.1s,\tbest my_xgb1's error=260718.5183,\tbest my_xgb1's error=260718.5183\n", - "[flaml.automl: 07-06 10:18:13] {1012} INFO - iteration 4, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:13] {1160} INFO - at 0.1s,\tbest my_xgb2's error=4.1611,\tbest my_xgb2's error=4.1611\n", - "[flaml.automl: 07-06 10:18:13] {1012} INFO - iteration 5, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:13] {1160} INFO - at 0.2s,\tbest my_xgb1's error=40726.5668,\tbest my_xgb2's error=4.1611\n", - "[flaml.automl: 07-06 10:18:13] {1012} INFO - iteration 6, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.2s,\tbest my_xgb1's error=1918.9374,\tbest my_xgb2's error=4.1611\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 7, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.2s,\tbest my_xgb1's error=1918.9374,\tbest my_xgb2's error=4.1611\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 8, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.2s,\tbest my_xgb1's error=1918.9374,\tbest my_xgb2's error=4.1611\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 9, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.3s,\tbest my_xgb2's error=4.1611,\tbest my_xgb2's error=4.1611\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 10, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.3s,\tbest my_xgb2's error=4.1611,\tbest my_xgb2's error=4.1611\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 11, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.3s,\tbest my_xgb2's error=4.1603,\tbest my_xgb2's error=4.1603\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 12, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.3s,\tbest my_xgb2's error=4.1603,\tbest my_xgb2's error=4.1603\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 13, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.4s,\tbest my_xgb2's error=4.1603,\tbest my_xgb2's error=4.1603\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 14, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.4s,\tbest my_xgb1's error=1918.9374,\tbest my_xgb2's error=4.1603\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 15, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.4s,\tbest my_xgb2's error=3.8476,\tbest my_xgb2's error=3.8476\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 16, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.5s,\tbest my_xgb1's error=93.8630,\tbest my_xgb2's error=3.8476\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 17, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.5s,\tbest my_xgb2's error=0.4196,\tbest my_xgb2's error=0.4196\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 18, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.6s,\tbest my_xgb2's error=0.4196,\tbest my_xgb2's error=0.4196\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 19, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.6s,\tbest my_xgb2's error=0.2990,\tbest my_xgb2's error=0.2990\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 20, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.6s,\tbest my_xgb1's error=93.8630,\tbest my_xgb2's error=0.2990\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 21, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.6s,\tbest my_xgb1's error=12.3010,\tbest my_xgb2's error=0.2990\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 22, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.7s,\tbest my_xgb2's error=0.2990,\tbest my_xgb2's error=0.2990\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 23, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.7s,\tbest my_xgb2's error=0.2990,\tbest my_xgb2's error=0.2990\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 24, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.8s,\tbest my_xgb1's error=12.3010,\tbest my_xgb2's error=0.2990\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 25, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.8s,\tbest my_xgb2's error=0.2377,\tbest my_xgb2's error=0.2377\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 26, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 0.9s,\tbest my_xgb2's error=0.2377,\tbest my_xgb2's error=0.2377\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 27, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 1.0s,\tbest my_xgb2's error=0.2377,\tbest my_xgb2's error=0.2377\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 28, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 1.0s,\tbest my_xgb1's error=12.3010,\tbest my_xgb2's error=0.2377\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 29, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 1.0s,\tbest my_xgb1's error=4.1454,\tbest my_xgb2's error=0.2377\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 30, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 1.0s,\tbest my_xgb1's error=2.4944,\tbest my_xgb2's error=0.2377\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 31, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 1.1s,\tbest my_xgb2's error=0.2377,\tbest my_xgb2's error=0.2377\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 32, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 1.1s,\tbest my_xgb1's error=2.4944,\tbest my_xgb2's error=0.2377\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 33, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:14] {1160} INFO - at 1.2s,\tbest my_xgb1's error=2.4944,\tbest my_xgb2's error=0.2377\n", - "[flaml.automl: 07-06 10:18:14] {1012} INFO - iteration 34, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:15] {1160} INFO - at 1.2s,\tbest my_xgb2's error=0.2212,\tbest my_xgb2's error=0.2212\n", - "[flaml.automl: 07-06 10:18:15] {1012} INFO - iteration 35, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:15] {1160} INFO - at 1.3s,\tbest my_xgb1's error=1.4150,\tbest my_xgb2's error=0.2212\n", - "[flaml.automl: 07-06 10:18:15] {1012} INFO - iteration 36, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:15] {1160} INFO - at 1.3s,\tbest my_xgb2's error=0.2212,\tbest my_xgb2's error=0.2212\n", - "[flaml.automl: 07-06 10:18:15] {1012} INFO - iteration 37, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:15] {1160} INFO - at 1.3s,\tbest my_xgb1's error=1.4150,\tbest my_xgb2's error=0.2212\n", - "[flaml.automl: 07-06 10:18:15] {1012} INFO - iteration 38, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:15] {1160} INFO - at 1.6s,\tbest my_xgb2's error=0.2107,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:15] {1012} INFO - iteration 39, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:15] {1160} INFO - at 1.8s,\tbest my_xgb2's error=0.2107,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:15] {1012} INFO - iteration 40, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:15] {1160} INFO - at 1.8s,\tbest my_xgb1's error=1.4150,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:15] {1012} INFO - iteration 41, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:16] {1160} INFO - at 3.1s,\tbest my_xgb2's error=0.2107,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:16] {1012} INFO - iteration 42, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:17] {1160} INFO - at 3.4s,\tbest my_xgb2's error=0.2107,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:17] {1012} INFO - iteration 43, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:17] {1160} INFO - at 3.4s,\tbest my_xgb1's error=1.4150,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:17] {1012} INFO - iteration 44, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:17] {1160} INFO - at 3.4s,\tbest my_xgb1's error=1.4150,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:17] {1012} INFO - iteration 45, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:17] {1160} INFO - at 3.7s,\tbest my_xgb2's error=0.2107,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:17] {1012} INFO - iteration 46, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:17] {1160} INFO - at 3.7s,\tbest my_xgb1's error=1.4150,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:17] {1012} INFO - iteration 47, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:17] {1160} INFO - at 3.8s,\tbest my_xgb1's error=1.4150,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:17] {1012} INFO - iteration 48, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:17] {1160} INFO - at 3.9s,\tbest my_xgb2's error=0.2107,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:17] {1012} INFO - iteration 49, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:17] {1160} INFO - at 3.9s,\tbest my_xgb1's error=1.4150,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:17] {1012} INFO - iteration 50, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:17] {1160} INFO - at 4.0s,\tbest my_xgb1's error=1.0006,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:17] {1012} INFO - iteration 51, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:17] {1160} INFO - at 4.0s,\tbest my_xgb1's error=1.0006,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:17] {1012} INFO - iteration 52, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:17] {1160} INFO - at 4.1s,\tbest my_xgb1's error=1.0006,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:17] {1012} INFO - iteration 53, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:18] {1160} INFO - at 4.7s,\tbest my_xgb2's error=0.2107,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:18] {1012} INFO - iteration 54, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:18] {1160} INFO - at 4.7s,\tbest my_xgb1's error=1.0006,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:18] {1012} INFO - iteration 55, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:18] {1160} INFO - at 4.9s,\tbest my_xgb1's error=1.0006,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:18] {1012} INFO - iteration 56, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:18] {1160} INFO - at 5.0s,\tbest my_xgb1's error=1.0006,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:18] {1012} INFO - iteration 57, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:18] {1160} INFO - at 5.1s,\tbest my_xgb1's error=1.0006,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:18] {1012} INFO - iteration 58, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:18] {1160} INFO - at 5.1s,\tbest my_xgb2's error=0.2107,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:18] {1012} INFO - iteration 59, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:19] {1160} INFO - at 5.2s,\tbest my_xgb1's error=1.0006,\tbest my_xgb2's error=0.2107\n", - "[flaml.automl: 07-06 10:18:19] {1012} INFO - iteration 60, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:20] {1160} INFO - at 6.8s,\tbest my_xgb2's error=0.1919,\tbest my_xgb2's error=0.1919\n", - "[flaml.automl: 07-06 10:18:20] {1012} INFO - iteration 61, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:21] {1160} INFO - at 7.4s,\tbest my_xgb2's error=0.1860,\tbest my_xgb2's error=0.1860\n", - "[flaml.automl: 07-06 10:18:21] {1012} INFO - iteration 62, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:21] {1160} INFO - at 7.4s,\tbest my_xgb1's error=1.0006,\tbest my_xgb2's error=0.1860\n", - "[flaml.automl: 07-06 10:18:21] {1012} INFO - iteration 63, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:22] {1160} INFO - at 9.1s,\tbest my_xgb2's error=0.1860,\tbest my_xgb2's error=0.1860\n", - "[flaml.automl: 07-06 10:18:22] {1012} INFO - iteration 64, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:23] {1160} INFO - at 9.2s,\tbest my_xgb2's error=0.1860,\tbest my_xgb2's error=0.1860\n", - "[flaml.automl: 07-06 10:18:23] {1012} INFO - iteration 65, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:25] {1160} INFO - at 12.0s,\tbest my_xgb2's error=0.1728,\tbest my_xgb2's error=0.1728\n", - "[flaml.automl: 07-06 10:18:25] {1012} INFO - iteration 66, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:26] {1160} INFO - at 12.3s,\tbest my_xgb1's error=1.0006,\tbest my_xgb2's error=0.1728\n", - "[flaml.automl: 07-06 10:18:26] {1012} INFO - iteration 67, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:30] {1160} INFO - at 16.6s,\tbest my_xgb2's error=0.1728,\tbest my_xgb2's error=0.1728\n", - "[flaml.automl: 07-06 10:18:30] {1012} INFO - iteration 68, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:33] {1160} INFO - at 20.1s,\tbest my_xgb2's error=0.1728,\tbest my_xgb2's error=0.1728\n", - "[flaml.automl: 07-06 10:18:33] {1012} INFO - iteration 69, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:33] {1160} INFO - at 20.2s,\tbest my_xgb1's error=1.0006,\tbest my_xgb2's error=0.1728\n", - "[flaml.automl: 07-06 10:18:33] {1012} INFO - iteration 70, current learner my_xgb2\n", - "[flaml.automl: 07-06 10:18:38] {1160} INFO - at 25.2s,\tbest my_xgb2's error=0.1728,\tbest my_xgb2's error=0.1728\n", - "[flaml.automl: 07-06 10:18:41] {1183} INFO - retrain my_xgb2 for 2.7s\n", - "[flaml.automl: 07-06 10:18:41] {1012} INFO - iteration 71, current learner my_xgb1\n", - "[flaml.automl: 07-06 10:18:41] {1160} INFO - at 28.0s,\tbest my_xgb1's error=1.0006,\tbest my_xgb2's error=0.1728\n", - "[flaml.automl: 07-06 10:18:44] {1183} INFO - retrain my_xgb1 for 2.9s\n", - "[flaml.automl: 07-06 10:18:44] {1206} INFO - selected model: \n", - "[flaml.automl: 07-06 10:18:44] {963} INFO - fit succeeded\n" - ] - } - ], "source": [ "import numpy as np \n", "\n", @@ -704,30 +576,260 @@ " \"log_file_name\": 'houses_experiment_my_xgb.log', # flaml log file\n", "}\n", "automl.fit(X_train=X_train, y_train=y_train, **settings)" - ] + ], + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "[flaml.automl: 08-22 21:24:46] {1130} INFO - Evaluation method: holdout\n", + "[flaml.automl: 08-22 21:24:46] {634} INFO - Using RepeatedKFold\n", + "[flaml.automl: 08-22 21:24:46] {1155} INFO - Minimizing error metric: 1-r2\n", + "[flaml.automl: 08-22 21:24:46] {1175} INFO - List of ML learners in AutoML Run: ['my_xgb1', 'my_xgb2']\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 0, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.1s,\tbest my_xgb1's error=53750617.1059,\tbest my_xgb1's error=53750617.1059\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 1, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.1s,\tbest my_xgb1's error=260718.5183,\tbest my_xgb1's error=260718.5183\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 2, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.1s,\tbest my_xgb2's error=4.1611,\tbest my_xgb2's error=4.1611\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 3, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.2s,\tbest my_xgb2's error=4.1611,\tbest my_xgb2's error=4.1611\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 4, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.2s,\tbest my_xgb1's error=260718.5183,\tbest my_xgb2's error=4.1611\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 5, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.2s,\tbest my_xgb1's error=260718.5183,\tbest my_xgb2's error=4.1611\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 6, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.3s,\tbest my_xgb1's error=40726.5769,\tbest my_xgb2's error=4.1611\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 7, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.3s,\tbest my_xgb1's error=1918.9637,\tbest my_xgb2's error=4.1611\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 8, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.3s,\tbest my_xgb1's error=1918.9637,\tbest my_xgb2's error=4.1611\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 9, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.3s,\tbest my_xgb1's error=1918.9637,\tbest my_xgb2's error=4.1611\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 10, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.4s,\tbest my_xgb2's error=4.1611,\tbest my_xgb2's error=4.1611\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 11, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.4s,\tbest my_xgb2's error=4.1603,\tbest my_xgb2's error=4.1603\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 12, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.5s,\tbest my_xgb2's error=4.1603,\tbest my_xgb2's error=4.1603\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 13, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.5s,\tbest my_xgb2's error=4.1603,\tbest my_xgb2's error=4.1603\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 14, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.5s,\tbest my_xgb1's error=1918.9637,\tbest my_xgb2's error=4.1603\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 15, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.6s,\tbest my_xgb2's error=3.8476,\tbest my_xgb2's error=3.8476\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 16, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.6s,\tbest my_xgb1's error=93.9115,\tbest my_xgb2's error=3.8476\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 17, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.7s,\tbest my_xgb2's error=0.3645,\tbest my_xgb2's error=0.3645\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 18, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.7s,\tbest my_xgb2's error=0.3645,\tbest my_xgb2's error=0.3645\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 19, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.8s,\tbest my_xgb2's error=0.3139,\tbest my_xgb2's error=0.3139\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 20, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:46] {1515} INFO - at 0.8s,\tbest my_xgb1's error=93.9115,\tbest my_xgb2's error=0.3139\n", + "[flaml.automl: 08-22 21:24:46] {1358} INFO - iteration 21, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 0.8s,\tbest my_xgb1's error=12.3445,\tbest my_xgb2's error=0.3139\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 22, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 0.9s,\tbest my_xgb2's error=0.3139,\tbest my_xgb2's error=0.3139\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 23, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.0s,\tbest my_xgb2's error=0.3139,\tbest my_xgb2's error=0.3139\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 24, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.0s,\tbest my_xgb1's error=12.3445,\tbest my_xgb2's error=0.3139\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 25, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.1s,\tbest my_xgb2's error=0.2254,\tbest my_xgb2's error=0.2254\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 26, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.1s,\tbest my_xgb2's error=0.2254,\tbest my_xgb2's error=0.2254\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 27, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.3s,\tbest my_xgb2's error=0.2254,\tbest my_xgb2's error=0.2254\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 28, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.3s,\tbest my_xgb1's error=12.3445,\tbest my_xgb2's error=0.2254\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 29, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.3s,\tbest my_xgb1's error=4.1558,\tbest my_xgb2's error=0.2254\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 30, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.4s,\tbest my_xgb1's error=2.4948,\tbest my_xgb2's error=0.2254\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 31, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.4s,\tbest my_xgb2's error=0.2254,\tbest my_xgb2's error=0.2254\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 32, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.5s,\tbest my_xgb1's error=2.4948,\tbest my_xgb2's error=0.2254\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 33, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.5s,\tbest my_xgb1's error=2.4948,\tbest my_xgb2's error=0.2254\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 34, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.6s,\tbest my_xgb2's error=0.2254,\tbest my_xgb2's error=0.2254\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 35, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.7s,\tbest my_xgb1's error=1.4151,\tbest my_xgb2's error=0.2254\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 36, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.7s,\tbest my_xgb2's error=0.2254,\tbest my_xgb2's error=0.2254\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 37, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:47] {1515} INFO - at 1.7s,\tbest my_xgb1's error=1.4151,\tbest my_xgb2's error=0.2254\n", + "[flaml.automl: 08-22 21:24:47] {1358} INFO - iteration 38, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:48] {1515} INFO - at 2.0s,\tbest my_xgb2's error=0.2254,\tbest my_xgb2's error=0.2254\n", + "[flaml.automl: 08-22 21:24:48] {1358} INFO - iteration 39, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:48] {1515} INFO - at 2.0s,\tbest my_xgb2's error=0.2254,\tbest my_xgb2's error=0.2254\n", + "[flaml.automl: 08-22 21:24:48] {1358} INFO - iteration 40, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:48] {1515} INFO - at 2.1s,\tbest my_xgb1's error=1.4151,\tbest my_xgb2's error=0.2254\n", + "[flaml.automl: 08-22 21:24:48] {1358} INFO - iteration 41, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:48] {1515} INFO - at 2.4s,\tbest my_xgb2's error=0.1900,\tbest my_xgb2's error=0.1900\n", + "[flaml.automl: 08-22 21:24:48] {1358} INFO - iteration 42, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:48] {1515} INFO - at 2.6s,\tbest my_xgb2's error=0.1900,\tbest my_xgb2's error=0.1900\n", + "[flaml.automl: 08-22 21:24:48] {1358} INFO - iteration 43, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:48] {1515} INFO - at 2.8s,\tbest my_xgb2's error=0.1900,\tbest my_xgb2's error=0.1900\n", + "[flaml.automl: 08-22 21:24:48] {1358} INFO - iteration 44, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:49] {1515} INFO - at 2.9s,\tbest my_xgb1's error=1.4151,\tbest my_xgb2's error=0.1900\n", + "[flaml.automl: 08-22 21:24:49] {1358} INFO - iteration 45, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:49] {1515} INFO - at 2.9s,\tbest my_xgb2's error=0.1900,\tbest my_xgb2's error=0.1900\n", + "[flaml.automl: 08-22 21:24:49] {1358} INFO - iteration 46, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:49] {1515} INFO - at 3.0s,\tbest my_xgb1's error=1.4151,\tbest my_xgb2's error=0.1900\n", + "[flaml.automl: 08-22 21:24:49] {1358} INFO - iteration 47, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:49] {1515} INFO - at 3.0s,\tbest my_xgb1's error=1.4151,\tbest my_xgb2's error=0.1900\n", + "[flaml.automl: 08-22 21:24:49] {1358} INFO - iteration 48, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:49] {1515} INFO - at 3.7s,\tbest my_xgb2's error=0.1900,\tbest my_xgb2's error=0.1900\n", + "[flaml.automl: 08-22 21:24:49] {1358} INFO - iteration 49, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:49] {1515} INFO - at 3.8s,\tbest my_xgb1's error=1.4151,\tbest my_xgb2's error=0.1900\n", + "[flaml.automl: 08-22 21:24:49] {1358} INFO - iteration 50, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:50] {1515} INFO - at 3.8s,\tbest my_xgb1's error=1.4151,\tbest my_xgb2's error=0.1900\n", + "[flaml.automl: 08-22 21:24:50] {1358} INFO - iteration 51, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:50] {1515} INFO - at 3.9s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1900\n", + "[flaml.automl: 08-22 21:24:50] {1358} INFO - iteration 52, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:50] {1515} INFO - at 4.0s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1900\n", + "[flaml.automl: 08-22 21:24:50] {1358} INFO - iteration 53, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:50] {1515} INFO - at 4.1s,\tbest my_xgb2's error=0.1900,\tbest my_xgb2's error=0.1900\n", + "[flaml.automl: 08-22 21:24:50] {1358} INFO - iteration 54, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:50] {1515} INFO - at 4.2s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1900\n", + "[flaml.automl: 08-22 21:24:50] {1358} INFO - iteration 55, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:50] {1515} INFO - at 4.3s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1900\n", + "[flaml.automl: 08-22 21:24:50] {1358} INFO - iteration 56, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:51] {1515} INFO - at 5.5s,\tbest my_xgb2's error=0.1865,\tbest my_xgb2's error=0.1865\n", + "[flaml.automl: 08-22 21:24:51] {1358} INFO - iteration 57, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:52] {1515} INFO - at 5.8s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1865\n", + "[flaml.automl: 08-22 21:24:52] {1358} INFO - iteration 58, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:52] {1515} INFO - at 6.3s,\tbest my_xgb2's error=0.1790,\tbest my_xgb2's error=0.1790\n", + "[flaml.automl: 08-22 21:24:52] {1358} INFO - iteration 59, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:53] {1515} INFO - at 7.3s,\tbest my_xgb2's error=0.1790,\tbest my_xgb2's error=0.1790\n", + "[flaml.automl: 08-22 21:24:53] {1358} INFO - iteration 60, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:53] {1515} INFO - at 7.4s,\tbest my_xgb2's error=0.1790,\tbest my_xgb2's error=0.1790\n", + "[flaml.automl: 08-22 21:24:53] {1358} INFO - iteration 61, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:56] {1515} INFO - at 10.6s,\tbest my_xgb2's error=0.1707,\tbest my_xgb2's error=0.1707\n", + "[flaml.automl: 08-22 21:24:56] {1358} INFO - iteration 62, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:56] {1515} INFO - at 10.7s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1707\n", + "[flaml.automl: 08-22 21:24:56] {1358} INFO - iteration 63, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:57] {1515} INFO - at 10.8s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1707\n", + "[flaml.automl: 08-22 21:24:57] {1358} INFO - iteration 64, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:24:58] {1515} INFO - at 12.3s,\tbest my_xgb2's error=0.1707,\tbest my_xgb2's error=0.1707\n", + "[flaml.automl: 08-22 21:24:58] {1358} INFO - iteration 65, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:58] {1515} INFO - at 12.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1707\n", + "[flaml.automl: 08-22 21:24:58] {1358} INFO - iteration 66, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:24:58] {1515} INFO - at 12.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1707\n", + "[flaml.automl: 08-22 21:24:58] {1358} INFO - iteration 67, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:25:02] {1515} INFO - at 16.2s,\tbest my_xgb2's error=0.1707,\tbest my_xgb2's error=0.1707\n", + "[flaml.automl: 08-22 21:25:02] {1358} INFO - iteration 68, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:25:06] {1515} INFO - at 20.1s,\tbest my_xgb2's error=0.1699,\tbest my_xgb2's error=0.1699\n", + "[flaml.automl: 08-22 21:25:06] {1358} INFO - iteration 69, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:06] {1515} INFO - at 20.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1699\n", + "[flaml.automl: 08-22 21:25:06] {1358} INFO - iteration 70, current learner my_xgb2\n", + "[flaml.automl: 08-22 21:25:11] {1515} INFO - at 25.4s,\tbest my_xgb2's error=0.1685,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:11] {1358} INFO - iteration 71, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:11] {1515} INFO - at 25.4s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:11] {1358} INFO - iteration 72, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:11] {1515} INFO - at 25.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:11] {1358} INFO - iteration 73, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:11] {1515} INFO - at 25.6s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:11] {1358} INFO - iteration 74, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:11] {1515} INFO - at 25.8s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:11] {1358} INFO - iteration 75, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 25.8s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 76, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 25.9s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 77, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 26.1s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 78, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 26.2s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 79, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 26.2s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 80, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 26.4s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 81, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 26.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 82, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 26.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 83, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:12] {1515} INFO - at 26.7s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:12] {1358} INFO - iteration 84, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 26.9s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 85, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.0s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 86, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.1s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 87, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.2s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 88, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.3s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 89, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.4s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 90, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 91, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.6s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 92, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.7s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 93, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:13] {1515} INFO - at 27.8s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:13] {1358} INFO - iteration 94, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:14] {1515} INFO - at 27.9s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:14] {1358} INFO - iteration 95, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:14] {1515} INFO - at 28.0s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:14] {1358} INFO - iteration 96, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:14] {1515} INFO - at 28.2s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:14] {1358} INFO - iteration 97, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:14] {1515} INFO - at 28.3s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:14] {1358} INFO - iteration 98, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:14] {1515} INFO - at 28.4s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:14] {1358} INFO - iteration 99, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:14] {1515} INFO - at 28.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:14] {1358} INFO - iteration 100, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:14] {1515} INFO - at 28.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:14] {1358} INFO - iteration 101, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:14] {1515} INFO - at 28.8s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:14] {1358} INFO - iteration 102, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 28.8s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 103, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 28.9s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 104, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 29.1s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 105, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 29.1s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 106, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 29.3s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 107, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 29.3s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 108, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 29.4s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 109, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 29.5s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 110, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 29.6s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 111, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:15] {1515} INFO - at 29.7s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:15] {1358} INFO - iteration 112, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:16] {1515} INFO - at 29.9s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:16] {1358} INFO - iteration 113, current learner my_xgb1\n", + "[flaml.automl: 08-22 21:25:16] {1515} INFO - at 29.9s,\tbest my_xgb1's error=1.0011,\tbest my_xgb2's error=0.1685\n", + "[flaml.automl: 08-22 21:25:16] {1592} INFO - selected model: \n", + "[flaml.automl: 08-22 21:25:20] {1633} INFO - retrain my_xgb2 for 4.5s\n", + "[flaml.automl: 08-22 21:25:20] {1636} INFO - retrained model: \n", + "[flaml.automl: 08-22 21:25:20] {1199} INFO - fit succeeded\n", + "[flaml.automl: 08-22 21:25:20] {1200} INFO - Time taken to find the best model: 25.375203132629395\n", + "[flaml.automl: 08-22 21:25:20] {1205} WARNING - Time taken to find the best model is 85% of the provided time budget and not all estimators' hyperparameter search converged. Consider increasing the time budget.\n" + ] + } + ], + "metadata": { + "tags": [] + } }, { "cell_type": "code", "execution_count": 17, - "metadata": { - "tags": [] - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Best hyperparmeter config: {'n_estimators': 1349.0, 'max_leaves': 88.0, 'min_child_weight': 25.104070096431194, 'learning_rate': 0.04855713895005341, 'subsample': 1.0, 'colsample_bylevel': 0.6398245537905194, 'colsample_bytree': 0.7241421702750391, 'reg_alpha': 0.029936792254518507, 'reg_lambda': 0.08484133558855533}\n", - "Best r2 on validation data: 0.8272\n", - "Training duration of best run: 2.813 s\n", - "Predicted labels [151654.97 238153.92 152878.81 ... 204228.64 210192.14 277021.62]\n", - "True labels [136900. 241300. 200700. ... 160900. 227300. 265600.]\n", - "r2 = 0.843576000520939\n", - "mse = 2067691660.3477309\n", - "mae = 29060.49894622093\n" - ] - } - ], "source": [ "print('Best hyperparmeter config:', automl.best_config)\n", "print('Best r2 on validation data: {0:.4g}'.format(1-automl.best_loss))\n", @@ -741,20 +843,50 @@ "print('r2', '=', 1 - sklearn_metric_loss_score('r2', y_pred, y_test))\n", "print('mse', '=', sklearn_metric_loss_score('mse', y_pred, y_test))\n", "print('mae', '=', sklearn_metric_loss_score('mae', y_pred, y_test))" - ] + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Best hyperparmeter config: {'n_estimators': 810, 'max_leaves': 148, 'min_child_weight': 30.65305732414229, 'learning_rate': 0.05793074143079172, 'subsample': 0.9452642648281835, 'colsample_bylevel': 0.8662229421401874, 'colsample_bytree': 0.7851677398738949, 'reg_alpha': 0.00738292823760415, 'reg_lambda': 1.2202619267865558}\n", + "Best r2 on validation data: 0.8315\n", + "Training duration of best run: 4.888 s\n", + "Predicted labels [146309.06 253975.23 148795.17 ... 192561.88 182641.44 270495.53]\n", + "True labels 14740 136900.0\n", + "10101 241300.0\n", + "20566 200700.0\n", + "2670 72500.0\n", + "15709 460000.0\n", + " ... \n", + "13132 121200.0\n", + "8228 137500.0\n", + "3948 160900.0\n", + "8522 227300.0\n", + "16798 265600.0\n", + "Name: median_house_value, Length: 5160, dtype: float64\n", + "r2 = 0.8483896546182459\n", + "mse = 2004062342.1743872\n", + "mae = 28633.257468053536\n" + ] + } + ], + "metadata": { + "tags": [] + } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "source": [], "outputs": [], - "source": [] + "metadata": {} } ], "metadata": { "kernelspec": { "name": "python3", - "display_name": "Python 3.8.10 64-bit ('py38': conda)" + "display_name": "Python 3.8.0 64-bit ('blend': conda)" }, "language_info": { "codemirror_mode": { @@ -766,10 +898,10 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.8.0" }, "interpreter": { - "hash": "4502d015faca2560a557f35a41b6dd402f7fdfc08e843ae17a9c41947939f10c" + "hash": "0cfea3304185a9579d09e0953576b57c8581e46e6ebc6dfeb681bc5a511f7544" } }, "nbformat": 4, diff --git a/test/test_automl.py b/test/test_automl.py index a4aa1118a1..2205a0d833 100644 --- a/test/test_automl.py +++ b/test/test_automl.py @@ -10,7 +10,7 @@ from datetime import datetime from flaml import AutoML from flaml.data import get_output_from_log -from flaml.model import SKLearnEstimator, XGBoostEstimator +from flaml.model import LGBMEstimator, SKLearnEstimator, XGBoostEstimator from rgf.sklearn import RGFClassifier, RGFRegressor from flaml import tune @@ -92,6 +92,24 @@ class MyXGB2(XGBoostEstimator): super().__init__(objective='reg:squarederror', **params) +class MyLargeLGBM(LGBMEstimator): + + @classmethod + def search_space(cls, **params): + return { + 'n_estimators': { + 'domain': tune.lograndint(lower=4, upper=32768), + 'init_value': 32768, + 'low_cost_init_value': 4, + }, + 'num_leaves': { + 'domain': tune.lograndint(lower=4, upper=32768), + 'init_value': 32768, + 'low_cost_init_value': 4, + }, + } + + def custom_metric(X_test, y_test, estimator, labels, X_train, y_train, weight_test=None, weight_train=None): from sklearn.metrics import log_loss @@ -477,6 +495,66 @@ class TestAutoML(unittest.TestCase): print(automl_experiment.best_iteration) print(automl_experiment.best_estimator) + def test_parallel_xgboost(self, hpo_method=None): + automl_experiment = AutoML() + automl_settings = { + "time_budget": 10, + "metric": 'ap', + "task": 'classification', + "log_file_name": "test/sparse_classification.log", + "estimator_list": ["xgboost"], + "log_type": "all", + "n_jobs": 1, + "n_concurrent_trials": 2, + "hpo_method": hpo_method, + } + X_train = scipy.sparse.eye(900000) + y_train = np.random.randint(2, size=900000) + try: + automl_experiment.fit(X_train=X_train, y_train=y_train, + **automl_settings) + print(automl_experiment.predict(X_train)) + print(automl_experiment.model) + print(automl_experiment.config_history) + print(automl_experiment.model_history) + print(automl_experiment.best_iteration) + print(automl_experiment.best_estimator) + except ImportError: + return + + def test_parallel_xgboost_random(self): + # use random search as the hpo_method + self.test_parallel_xgboost(hpo_method='random') + + def test_random_out_of_memory(self): + automl_experiment = AutoML() + automl_experiment.add_learner(learner_name='large_lgbm', learner_class=MyLargeLGBM) + automl_settings = { + "time_budget": 2, + "metric": 'ap', + "task": 'classification', + "log_file_name": "test/sparse_classification_oom.log", + "estimator_list": ["large_lgbm"], + "log_type": "all", + "n_jobs": 1, + "n_concurrent_trials": 2, + "hpo_method": 'random', + } + + X_train = scipy.sparse.eye(900000) + y_train = np.random.randint(2, size=900000) + try: + automl_experiment.fit(X_train=X_train, y_train=y_train, + **automl_settings) + print(automl_experiment.predict(X_train)) + print(automl_experiment.model) + print(automl_experiment.config_history) + print(automl_experiment.model_history) + print(automl_experiment.best_iteration) + print(automl_experiment.best_estimator) + except ImportError: + return + def test_sparse_matrix_lr(self): automl_experiment = AutoML() automl_settings = { diff --git a/test/test_notebook_example.py b/test/test_notebook_example.py index a3dd395104..01e9109496 100644 --- a/test/test_notebook_example.py +++ b/test/test_notebook_example.py @@ -17,6 +17,7 @@ def test_automl(budget=5, dataset_format='dataframe'): "metric": 'accuracy', # primary metrics can be chosen from: ['accuracy','roc_auc','roc_auc_ovr','roc_auc_ovo','f1','log_loss','mae','mse','r2'] "task": 'classification', # task type "log_file_name": 'airlines_experiment.log', # flaml log file + "seed": 7654321, # random seed } '''The main flaml automl API''' automl.fit(X_train=X_train, y_train=y_train, **settings) diff --git a/test/test_python_log.py b/test/test_python_log.py index d5027d5967..05fd2206b1 100644 --- a/test/test_python_log.py +++ b/test/test_python_log.py @@ -45,7 +45,7 @@ class TestLogging(unittest.TestCase): **automl_settings) logger.info(automl.search_space) logger.info(automl.low_cost_partial_config) - logger.info(automl.points_to_evalaute) + logger.info(automl.points_to_evaluate) logger.info(automl.cat_hp_cost) import optuna as ot study = ot.create_study() @@ -62,16 +62,18 @@ class TestLogging(unittest.TestCase): config['learner'] = automl.best_estimator automl.trainable({"ml": config}) from flaml import tune, CFO + from flaml.automl import size + from functools import partial search_alg = CFO( metric='val_loss', space=automl.search_space, low_cost_partial_config=automl.low_cost_partial_config, - points_to_evaluate=automl.points_to_evalaute, + points_to_evaluate=automl.points_to_evaluate, cat_hp_cost=automl.cat_hp_cost, prune_attr=automl.prune_attr, min_resource=automl.min_resource, max_resource=automl.max_resource, - config_constraints=[(automl.size, '<=', automl._mem_thres)], + config_constraints=[(partial(size, automl._state), '<=', automl._mem_thres)], metric_constraints=automl.metric_constraints) analysis = tune.run( automl.trainable, search_alg=search_alg, # verbose=2, diff --git a/test/test_xgboost2d.py b/test/test_xgboost2d.py index a98ec00ba7..c0309524df 100644 --- a/test/test_xgboost2d.py +++ b/test/test_xgboost2d.py @@ -40,6 +40,7 @@ def test_simple(method=None): "n_jobs": 1, "hpo_method": method, "log_type": "all", + "retrain_full": "budget", "time_budget": 1 } from sklearn.externals._arff import ArffException @@ -53,21 +54,23 @@ def test_simple(method=None): automl.fit(X_train=X_train, y_train=y_train, **automl_settings) print(automl.estimator_list) print(automl.search_space) - print(automl.points_to_evalaute) + print(automl.points_to_evaluate) config = automl.best_config.copy() config['learner'] = automl.best_estimator automl.trainable(config) from flaml import tune + from flaml.automl import size + from functools import partial analysis = tune.run( automl.trainable, automl.search_space, metric='val_loss', mode="min", low_cost_partial_config=automl.low_cost_partial_config, - points_to_evaluate=automl.points_to_evalaute, + points_to_evaluate=automl.points_to_evaluate, cat_hp_cost=automl.cat_hp_cost, prune_attr=automl.prune_attr, min_resource=automl.min_resource, max_resource=automl.max_resource, time_budget_s=automl._state.time_budget, - config_constraints=[(automl.size, '<=', automl._mem_thres)], + config_constraints=[(partial(size, automl._state), '<=', automl._mem_thres)], metric_constraints=automl.metric_constraints, num_samples=5) print(analysis.trials[-1]) diff --git a/test/tune/example.py b/test/tune/example.py index 07a07b76ea..ba62668b41 100644 --- a/test/tune/example.py +++ b/test/tune/example.py @@ -27,6 +27,8 @@ def test_blendsearch_tune(smoke_test=True): except ImportError: print('ray[tune] is not installed, skipping test') return + import numpy as np + algo = BlendSearch() algo = ConcurrencyLimiter(algo, max_concurrent=4) scheduler = AsyncHyperBandScheduler() @@ -42,7 +44,8 @@ def test_blendsearch_tune(smoke_test=True): "width": tune.uniform(0, 20), "height": tune.uniform(-100, 100), # This is an ignored parameter. - "activation": tune.choice(["relu", "tanh"]) + "activation": tune.choice(["relu", "tanh"]), + "test4": np.zeros((3, 1)), }) print("Best hyperparameters found were: ", analysis.best_config) diff --git a/test/tune/test_tune.py b/test/tune/test_tune.py index ded2b294fb..96316785e8 100644 --- a/test/tune/test_tune.py +++ b/test/tune/test_tune.py @@ -63,6 +63,7 @@ def _test_xgboost(method='BlendSearch'): time_budget_s = 60 for n_cpu in [4]: start_time = time.time() + ray.shutdown() ray.init(num_cpus=n_cpu, num_gpus=0) # ray.init(address='auto') if method == 'BlendSearch':