diff --git a/NOTICE.md b/NOTICE.md index 63377152df..8990aa366e 100644 --- a/NOTICE.md +++ b/NOTICE.md @@ -5,8 +5,8 @@ This repository incorporates material as listed below or described in the code. # ## Component. Ray. -Code in tune/[analysis.py, sample.py, trial.py] and -searcher/[suggestion.py, variant_generator.py] is adapted from +Code in tune/[analysis.py, sample.py, trial.py, result.py], +searcher/[suggestion.py, variant_generator.py], and scheduler/trial_scheduler.py is adapted from https://github.com/ray-project/ray/blob/master/python/ray/tune/ diff --git a/README.md b/README.md index f49f01bb05..2fc7360c2b 100644 --- a/README.md +++ b/README.md @@ -141,6 +141,8 @@ For more technical details, please check our papers. * [Frugal Optimization for Cost-related Hyperparameters](https://arxiv.org/abs/2005.01571). Qingyun Wu, Chi Wang, Silu Huang. AAAI 2021. * [Economical Hyperparameter Optimization With Blended Search Strategy](https://www.microsoft.com/en-us/research/publication/economical-hyperparameter-optimization-with-blended-search-strategy/). Chi Wang, Qingyun Wu, Silu Huang, Amin Saied. ICLR 2021. +* ChaCha for online AutoML. Qingyun Wu, Chi Wang, John Langford, Paul Mineiro and Marco Rossi. To appear in ICML 2021. + ## Contributing This project welcomes contributions and suggestions. Most contributions require you to agree to a diff --git a/flaml/__init__.py b/flaml/__init__.py index 4afb864e76..6b8900f28e 100644 --- a/flaml/__init__.py +++ b/flaml/__init__.py @@ -1,5 +1,9 @@ from flaml.searcher import CFO, BlendSearch, FLOW2, BlendSearchTuner from flaml.automl import AutoML, logger_formatter +try: + from flaml.onlineml.autovw import AutoVW +except ImportError: + print('need to install vowpalwabbit to use AutoVW') from flaml.version import __version__ import logging diff --git a/flaml/onlineml/__init__.py b/flaml/onlineml/__init__.py new file mode 100644 index 0000000000..eefa61aff8 --- /dev/null +++ b/flaml/onlineml/__init__.py @@ -0,0 +1,2 @@ +from .trial import VowpalWabbitTrial +from .trial_runner import OnlineTrialRunner diff --git a/flaml/onlineml/autovw.py b/flaml/onlineml/autovw.py new file mode 100644 index 0000000000..644cf63b3e --- /dev/null +++ b/flaml/onlineml/autovw.py @@ -0,0 +1,188 @@ +import numpy as np +from typing import Optional, Union +import logging +from flaml.tune import Trial, Categorical, Float, PolynomialExpansionSet, polynomial_expansion_set +from flaml.onlineml import OnlineTrialRunner +from flaml.scheduler import ChaChaScheduler +from flaml.searcher import ChampionFrontierSearcher +from flaml.onlineml.trial import get_ns_feature_dim_from_vw_example +logger = logging.getLogger(__name__) + + +class AutoVW: + """The AutoML class + + Methods: + predict(data_sample) + learn(data_sample) + AUTO + """ + WARMSTART_NUM = 100 + AUTO_STRING = '_auto' + VW_INTERACTION_ARG_NAME = 'interactions' + + def __init__(self, + max_live_model_num: int, + search_space: dict, + init_config: Optional[dict] = {}, + min_resource_lease: Optional[Union[str, float]] = 'auto', + automl_runner_args: Optional[dict] = {}, + scheduler_args: Optional[dict] = {}, + model_select_policy: Optional[str] = 'threshold_loss_ucb', + metric: Optional[str] = 'mae_clipped', + random_seed: Optional[int] = None, + model_selection_mode: Optional[str] = 'min', + cb_coef: Optional[float] = None, + ): + """Constructor + + Args: + max_live_model_num: The maximum number of 'live' models, which, in other words, + is the maximum number of models allowed to update in each learning iteraction. + search_space: A dictionary of the search space. This search space includes both + hyperparameters we want to tune and fixed hyperparameters. In the latter case, + the value is a fixed value. + init_config: A dictionary of a partial or full initial config, + e.g. {'interactions': set(), 'learning_rate': 0.5} + min_resource_lease: The minimum resource lease assigned to a particular model/trial. + If set as 'auto', it will be calculated automatically. + automl_runner_args: A dictionary of configuration for the OnlineTrialRunner. + If set {}, default values will be used, which is equivalent to using the following configs. + automl_runner_args = + {"champion_test_policy": 'loss_ucb' # specifcies how to do the statistic test for a better champion + "remove_worse": False # specifcies whether to do worse than test + } + scheduler_args: A dictionary of configuration for the scheduler. + If set {}, default values will be used, which is equivalent to using the following configs. + scheduler_args = + {"keep_challenger_metric": 'ucb' # what metric to use when deciding the top performing challengers + "keep_challenger_ratio": 0.5 # denotes the ratio of top performing challengers to keep live + "keep_champion": True # specifcies whether to keep the champion always running + } + model_select_policy: A string in ['threshold_loss_ucb', 'threshold_loss_lcb', 'threshold_loss_avg', + 'loss_ucb', 'loss_lcb', 'loss_avg'] to specify how to select one model to do prediction + from the live model pool. Default value is 'threshold_loss_ucb'. + metric: A string in ['mae_clipped', 'mae', 'mse', 'absolute_clipped', 'absolute', 'squared'] + to specify the name of the loss function used for calculating the progressive validation loss in ChaCha. + random_seed (int): An integer of the random seed used in the searcher + (more specifically this the random seed for ConfigOracle) + model_selection_mode: A string in ['min', 'max'] to specify the objective as + minimization or maximization. + cb_coef (float): A float coefficient (optional) used in the sample complexity bound. + """ + self._max_live_model_num = max_live_model_num + self._search_space = search_space + self._init_config = init_config + self._online_trial_args = {"metric": metric, + "min_resource_lease": min_resource_lease, + "cb_coef": cb_coef, + } + self._automl_runner_args = automl_runner_args + self._scheduler_args = scheduler_args + self._model_select_policy = model_select_policy + self._model_selection_mode = model_selection_mode + self._random_seed = random_seed + self._trial_runner = None + self._best_trial = None + # code for debugging purpose + self._prediction_trial_id = None + self._iter = 0 + + def _setup_trial_runner(self, vw_example): + """Set up the _trial_runner based on one vw_example + """ + # setup the default search space for the namespace interaction hyperparameter + search_space = self._search_space.copy() + for k, v in self._search_space.items(): + if k == self.VW_INTERACTION_ARG_NAME and v == self.AUTO_STRING: + raw_namespaces = self.get_ns_feature_dim_from_vw_example(vw_example).keys() + search_space[k] = polynomial_expansion_set(init_monomials=set(raw_namespaces)) + # setup the init config based on the input _init_config and search space + init_config = self._init_config.copy() + for k, v in search_space.items(): + if k not in init_config.keys(): + if isinstance(v, PolynomialExpansionSet): + init_config[k] = set() + elif (not isinstance(v, Categorical) and not isinstance(v, Float)): + init_config[k] = v + searcher_args = {"init_config": init_config, + "space": search_space, + "random_seed": self._random_seed, + 'online_trial_args': self._online_trial_args, + } + logger.info("original search_space %s", self._search_space) + logger.info("original init_config %s", self._init_config) + logger.info('searcher_args %s', searcher_args) + logger.info('scheduler_args %s', self._scheduler_args) + logger.info('automl_runner_args %s', self._automl_runner_args) + searcher = ChampionFrontierSearcher(**searcher_args) + scheduler = ChaChaScheduler(**self._scheduler_args) + self._trial_runner = OnlineTrialRunner(max_live_model_num=self._max_live_model_num, + searcher=searcher, + scheduler=scheduler, + **self._automl_runner_args) + + def predict(self, data_sample): + """Predict on the input example (e.g., vw example) + + Args: + data_sample (vw_example) + """ + if self._trial_runner is None: + self._setup_trial_runner(data_sample) + self._best_trial = self._select_best_trial() + self._y_predict = self._best_trial.predict(data_sample) + # code for debugging purpose + if self._prediction_trial_id is None or \ + self._prediction_trial_id != self._best_trial.trial_id: + self._prediction_trial_id = self._best_trial.trial_id + logger.info('prediction trial id changed to %s at iter %s, resource used: %s', + self._prediction_trial_id, self._iter, + self._best_trial.result.resource_used) + return self._y_predict + + def learn(self, data_sample): + """Perform one online learning step with the given data sample + + Args: + data_sample (vw_example): one data sample on which the model gets updated + """ + self._iter += 1 + self._trial_runner.step(data_sample, (self._y_predict, self._best_trial)) + + def _select_best_trial(self): + """Select a best trial from the running trials accoring to the _model_select_policy + """ + best_score = float('+inf') if self._model_selection_mode == 'min' else float('-inf') + new_best_trial = None + for trial in self._trial_runner.running_trials: + if trial.result is not None and ('threshold' not in self._model_select_policy + or trial.result.resource_used >= self.WARMSTART_NUM): + score = trial.result.get_score(self._model_select_policy) + if ('min' == self._model_selection_mode and score < best_score) or \ + ('max' == self._model_selection_mode and score > best_score): + best_score = score + new_best_trial = trial + if new_best_trial is not None: + logger.debug('best_trial resource used: %s', new_best_trial.result.resource_used) + return new_best_trial + else: + # This branch will be triggered when the resource consumption all trials are smaller + # than the WARMSTART_NUM threshold. In this case, we will select the _best_trial + # selected in the previous iteration. + if self._best_trial is not None and self._best_trial.status == Trial.RUNNING: + logger.debug('old best trial %s', self._best_trial.trial_id) + return self._best_trial + else: + # this will be triggered in the first iteration or in the iteration where we want + # to select the trial from the previous iteration but that trial has been paused + # (i.e., self._best_trial.status != Trial.RUNNING) by the scheduler. + logger.debug('using champion trial: %s', + self._trial_runner.champion_trial.trial_id) + return self._trial_runner.champion_trial + + @staticmethod + def get_ns_feature_dim_from_vw_example(vw_example) -> dict: + """Get a dictionary of feature dimensionality for each namespace singleton + """ + return get_ns_feature_dim_from_vw_example(vw_example) diff --git a/flaml/onlineml/trial.py b/flaml/onlineml/trial.py new file mode 100644 index 0000000000..5dba6356bf --- /dev/null +++ b/flaml/onlineml/trial.py @@ -0,0 +1,432 @@ +import numpy as np +import logging +import time +import math +import copy +import collections +from typing import Dict, Optional +from sklearn.metrics import mean_squared_error, mean_absolute_error +from vowpalwabbit import pyvw +from flaml.tune import Trial +logger = logging.getLogger(__name__) + + +def get_ns_feature_dim_from_vw_example(vw_example) -> dict: + """Get a dictionary of feature dimensionality for each namespace singleton + + NOTE: + Assumption: assume the vw_example takes one of the following format + depending on whether the example includes the feature names + + format 1: 'y |ns1 feature1:feature_value1 feature2:feature_value2 |ns2 + ns2 feature3:feature_value3 feature4:feature_value4' + format 2: 'y | ns1 feature_value1 feature_value2 | + ns2 feature_value3 feature_value4' + + The output of both cases are {'ns1': 2, 'ns2': 2} + + For more information about the input formate of vw example, please refer to + https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Input-format + """ + ns_feature_dim = {} + data = vw_example.split('|') + for i in range(1, len(data)): + if ':' in data[i]: + ns_w_feature = data[i].split(' ') + ns = ns_w_feature[0] + feature = ns_w_feature[1:] + feature_dim = len(feature) + else: + data_split = data[i].split(' ') + ns = data_split[0] + feature_dim = len(data_split) - 1 + if len(data_split[-1]) == 0: + feature_dim -= 1 + ns_feature_dim[ns] = feature_dim + logger.debug('name space feature dimension %s', ns_feature_dim) + return ns_feature_dim + + +class OnlineResult: + """Class for managing the result statistics of a trial + + Attributes: + observation_count: the total number of observations + resource_used: the sum of loss + + Methods: + update_result(new_loss, new_resource_used, data_dimension) + Update result + get_score(score_name) + Get the score according to the input score_name + """ + prob_delta = 0.1 + LOSS_MIN = 0.0 + LOSS_MAX = np.inf + CB_COEF = 0.05 # 0.001 for mse + + def __init__(self, result_type_name: str, cb_coef: Optional[float] = None, + init_loss: Optional[float] = 0.0, init_cb: Optional[float] = 100.0, + mode: Optional[str] = 'min', sliding_window_size: Optional[int] = 100): + """ + Args: + result_type_name (str): The name of the result type + """ + self._result_type_name = result_type_name # for example 'mse' or 'mae' + self._mode = mode + self._init_loss = init_loss + # statistics needed for alg + self.observation_count = 0 + self.resource_used = 0.0 + self._loss_avg = 0.0 + self._loss_cb = init_cb # a large number (TODO: this can be changed) + self._cb_coef = cb_coef if cb_coef is not None else self.CB_COEF + # optional statistics + self._sliding_window_size = sliding_window_size + self._loss_queue = collections.deque(maxlen=self._sliding_window_size) + + def update_result(self, new_loss, new_resource_used, data_dimension, + bound_of_range=1.0, new_observation_count=1.0): + """Update result statistics + """ + self.resource_used += new_resource_used + # keep the running average instead of sum of loss to avoid over overflow + self._loss_avg = self._loss_avg * (self.observation_count / (self.observation_count + new_observation_count) + ) + new_loss / (self.observation_count + new_observation_count) + self.observation_count += new_observation_count + self._loss_cb = self._update_loss_cb(bound_of_range, data_dimension) + self._loss_queue.append(new_loss) + + def _update_loss_cb(self, bound_of_range, data_dim, + bound_name='sample_complexity_bound'): + """Calculate bound coef + """ + if bound_name == 'sample_complexity_bound': + # set the coefficient in the loss bound + if 'mae' in self.result_type_name: + coef = self._cb_coef * bound_of_range + else: + coef = 0.001 * bound_of_range + + comp_F = math.sqrt(data_dim) + n = self.observation_count + return coef * comp_F * math.sqrt((np.log10(n / OnlineResult.prob_delta)) / n) + else: + raise NotImplementedError + + @property + def result_type_name(self): + return self._result_type_name + + @property + def loss_avg(self): + return self._loss_avg if \ + self.observation_count != 0 else self._init_loss + + @property + def loss_cb(self): + return self._loss_cb + + @property + def loss_lcb(self): + return max(self._loss_avg - self._loss_cb, OnlineResult.LOSS_MIN) + + @property + def loss_ucb(self): + return min(self._loss_avg + self._loss_cb, OnlineResult.LOSS_MAX) + + @property + def loss_avg_recent(self): + return sum(self._loss_queue) / len(self._loss_queue) \ + if len(self._loss_queue) != 0 else self._init_loss + + def get_score(self, score_name, cb_ratio=1): + if 'lcb' in score_name: + return max(self._loss_avg - cb_ratio * self._loss_cb, OnlineResult.LOSS_MIN) + elif 'ucb' in score_name: + return min(self._loss_avg + cb_ratio * self._loss_cb, OnlineResult.LOSS_MAX) + elif 'avg' in score_name: + return self._loss_avg + else: + raise NotImplementedError + + +class BaseOnlineTrial(Trial): + """Class for online trial. + + Attributes: + config: the config for this trial + trial_id: the trial_id of this trial + min_resource_lease (float): the minimum resource realse + status: the status of this trial + start_time: the start time of this trial + custom_trial_name: a custom name for this trial + + Methods: + set_resource_lease(resource) + set_status(status) + set_checked_under_current_champion(checked_under_current_champion) + """ + + def __init__(self, + config: dict, + min_resource_lease: float, + is_champion: Optional[bool] = False, + is_checked_under_current_champion: Optional[bool] = True, + custom_trial_name: Optional[str] = 'mae', + trial_id: Optional[str] = None, + ): + """ + Args: + config: the config dict + min_resource_lease: the minimum resource realse + is_champion: a bool variable + is_checked_under_current_champion: a bool variable + custom_trial_name: custom trial name + trial_id: the trial id + """ + # ****basic variables + self.config = config + self.trial_id = trial_id + self.status = Trial.PENDING + self.start_time = time.time() + self.custom_trial_name = custom_trial_name + + # ***resource budget related variable + self._min_resource_lease = min_resource_lease + self._resource_lease = copy.copy(self._min_resource_lease) + # ***champion related variables + self._is_champion = is_champion + # self._is_checked_under_current_champion_ is supposed to be always 1 when the trial is first created + self._is_checked_under_current_champion = is_checked_under_current_champion + + @property + def is_champion(self): + return self._is_champion + + @property + def is_checked_under_current_champion(self): + return self._is_checked_under_current_champion + + @property + def resource_lease(self): + return self._resource_lease + + def set_checked_under_current_champion(self, checked_under_current_champion: bool): + """TODO: add documentation why this is needed. This is needed because sometimes + we want to know whether a trial has been paused since a new champion is promoted. + We want to try to pause those running trials (even though they are not yet achieve + the next scheduling check point according to resource used and resource lease), + because a better trial is likely to be in the new challengers generated by the new + champion, so we want to try them as soon as possible. + If we wait until we reach the next scheduling point, we may waste a lot of resource + (depending on what is the current resource lease) on the old trials (note that new + trials is not possible to be scheduled to run until there is a slot openning). + Intuitively speaking, we want to squize an opening slot as soon as possible once + a new champion is promoted, such that we are able to try newly generated challengers. + """ + self._is_checked_under_current_champion = checked_under_current_champion + + def set_resource_lease(self, resource: float): + self._resource_lease = resource + + def set_status(self, status): + """Sets the status of the trial and record the start time + """ + self.status = status + if status == Trial.RUNNING: + if self.start_time is None: + self.start_time = time.time() + + +class VowpalWabbitTrial(BaseOnlineTrial): + """Implement BaseOnlineTrial for Vowpal Wabbit + + Attributes: + model: the online model + result: the anytime result for the online model + trainable_class: the model class (set as pyvw.vw for VowpalWabbitTrial) + + config: the config for this trial + trial_id: the trial_id of this trial + min_resource_lease (float): the minimum resource realse + status: the status of this trial + start_time: the start time of this trial + custom_trial_name: a custom name for this trial + + Methods: + set_resource_lease(resource) + set_status(status) + set_checked_under_current_champion(checked_under_current_champion) + + NOTE: + About result: + 1. training related results (need to be updated in the trainable class) + 2. result about resources lease (need to be updated externally) + + About namespaces in vw: + - Wiki in vw: + https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Namespaces + - Namespace vs features: + https://stackoverflow.com/questions/28586225/in-vowpal-wabbit-what-is-the-difference-between-a-namespace-and-feature + """ + MODEL_CLASS = pyvw.vw + cost_unit = 1.0 + interactions_config_key = 'interactions' + MIN_RES_CONST = 5 + + def __init__(self, + config: dict, + min_resource_lease: float, + metric: str = 'mae', + is_champion: Optional[bool] = False, + is_checked_under_current_champion: Optional[bool] = True, + custom_trial_name: Optional[str] = 'vw_mae_clipped', + trial_id: Optional[str] = None, + cb_coef: Optional[float] = None, + ): + """Constructor + + Args: + config (dict): the config of the trial (note that the config is a set + because the hyperparameters are ) + min_resource_lease (float): the minimum resource lease + metric (str): the loss metric + is_champion (bool): indicates whether the trial is the current champion or not + is_checked_under_current_champion (bool): indicates whether this trials has + been paused under the current champion + trial_id (str): id of the trial (if None, it will be generated in the constructor) + + """ + # attributes + self.trial_id = self._config_to_id(config) if trial_id is None else trial_id + logger.info('Create trial with trial_id: %s', self.trial_id) + super().__init__(config, min_resource_lease, is_champion, is_checked_under_current_champion, + custom_trial_name, self.trial_id) + self.model = None # model is None until the config is scheduled to run + self.result = None + self.trainable_class = self.MODEL_CLASS + # variables that are needed during online training + self._metric = metric + self._y_min_observed = None + self._y_max_observed = None + # application dependent variables + self._dim = None + self._cb_coef = cb_coef + + @staticmethod + def _config_to_id(config): + """Generate an id for the provided config + """ + # sort config keys + sorted_k_list = sorted(list(config.keys())) + config_id_full = '' + for key in sorted_k_list: + v = config[key] + config_id = '|' + if isinstance(v, set): + value_list = sorted(v) + config_id += '_'.join([str(k) for k in value_list]) + else: + config_id += str(v) + config_id_full = config_id_full + config_id + return config_id_full + + def _initialize_vw_model(self, vw_example): + """Initialize a vw model using the trainable_class + """ + self._vw_config = self.config.copy() + ns_interactions = self.config.get(VowpalWabbitTrial.interactions_config_key, None) + # ensure the feature interaction config is a list (required by VW) + if ns_interactions is not None: + self._vw_config[VowpalWabbitTrial.interactions_config_key] \ + = list(ns_interactions) + # get the dimensionality of the feature according to the namespace configuration + namespace_feature_dim = get_ns_feature_dim_from_vw_example(vw_example) + self._dim = self._get_dim_from_ns(namespace_feature_dim, ns_interactions) + # construct an instance of vw model using the input config and fixed config + self.model = self.trainable_class(**self._vw_config) + self.result = OnlineResult(self._metric, + cb_coef=self._cb_coef, + init_loss=0.0, init_cb=100.0,) + + def train_eval_model_online(self, data_sample, y_pred): + """Train and eval model online + """ + # extract info needed the first time we see the data + if self._resource_lease == 'auto' or self._resource_lease is None: + assert self._dim is not None + self._resource_lease = self._dim * self.MIN_RES_CONST + y = self._get_y_from_vw_example(data_sample) + self._update_y_range(y) + if self.model is None: + # initialize self.model and self.result + self._initialize_vw_model(data_sample) + # do one step of learning + self.model.learn(data_sample) + # update training related results accordingly + new_loss = self._get_loss(y, y_pred, self._metric, + self._y_min_observed, self._y_max_observed) + # udpate sample size, sum of loss, and cost + data_sample_size = 1 + bound_of_range = self._y_max_observed - self._y_min_observed + if bound_of_range == 0: + bound_of_range = 1.0 + self.result.update_result(new_loss, + VowpalWabbitTrial.cost_unit * data_sample_size, + self._dim, bound_of_range) + + def predict(self, x): + """Predict using the model + """ + if self.model is None: + # initialize self.model and self.result + self._initialize_vw_model(x) + return self.model.predict(x) + + def _get_loss(self, y_true, y_pred, loss_func_name, y_min_observed, y_max_observed): + """Get instantaneous loss from y_true and y_pred, and loss_func_name + For mae_clip, we clip y_pred in the observed range of y + """ + if 'mse' in loss_func_name or 'squared' in loss_func_name: + loss_func = mean_squared_error + elif 'mae' in loss_func_name or 'absolute' in loss_func_name: + loss_func = mean_absolute_error + if y_min_observed is not None and y_max_observed is not None and \ + 'clip' in loss_func_name: + # clip y_pred in the observed range of y + y_pred = min(y_max_observed, max(y_pred, y_min_observed)) + else: + raise NotImplementedError + return loss_func([y_true], [y_pred]) + + def _update_y_range(self, y): + """Maintain running observed minimum and maximum target value + """ + if self._y_min_observed is None or y < self._y_min_observed: + self._y_min_observed = y + if self._y_max_observed is None or y > self._y_max_observed: + self._y_max_observed = y + + @staticmethod + def _get_dim_from_ns(namespace_feature_dim: dict, namespace_interactions: [set, list]): + """Get the dimensionality of the corresponding feature of input namespace set + """ + total_dim = sum(namespace_feature_dim.values()) + if namespace_interactions: + for f in namespace_interactions: + ns_dim = 1.0 + for c in f: + ns_dim *= namespace_feature_dim[c] + total_dim += ns_dim + return total_dim + + def clean_up_model(self): + self.model = None + self.result = None + + @staticmethod + def _get_y_from_vw_example(vw_example): + """Get y from a vw_example. this works for regression datasets. + """ + return float(vw_example.split('|')[0]) diff --git a/flaml/onlineml/trial_runner.py b/flaml/onlineml/trial_runner.py new file mode 100644 index 0000000000..a5e584e811 --- /dev/null +++ b/flaml/onlineml/trial_runner.py @@ -0,0 +1,495 @@ +import time +import numpy as np +import math +from flaml.tune import Trial +from flaml.scheduler import TrialScheduler + +import logging +logger = logging.getLogger(__name__) + + +class OnlineTrialRunner: + """The OnlineTrialRunner class + + Methods: + step(max_live_model_num, data_sample, prediction_trial_tuple) + Outputs a _max_live_model_num number of trials to run each time it is called + get_top_running_trials() + Get a list of trial ids, whose performance is among the top running trials + add_trial(trial) + Add trial to this TrialRunner. + stop_trial(trial) + Set the status of a trial to be Trial.TERMINATED and perform other subsequent operations + pause_trial(trial) + Set the status of a trial to be Trial.PAUSED and perform other subsequent operations + run_trial(trial) + Set the status of a trial to be Trial.RUNNING and perform other subsequent operations + get_trials() + Get all the trials added (whatever that status) in the the OnlineTrialRunner + + NOTE about the status of a trial: + Trial.PENDING: All trials are set to be pending when frist added into the OnlineTrialRunner until + it is selected to run. By this definition, a trial with status Trial.PENDING is a challenger + trial added to the OnlineTrialRunner but never been selected to run. + It denotes the starting of trial's lifespan in the OnlineTrialRunner. + Trial.RUNNING: It indicates that this trial is one of the concurrently running trials. + The max number of Trial.RUNNING trials is running_budget. + The status of a trial will be set to Trial.RUNNING the next time it selected to run. + A trial's status may have the following change: + Trial.PENDING -> Trial.RUNNING + Trial.PAUSED - > Trial.RUNNING + Trial.PAUSED: The status of a trial is set to Trial.PAUSED once it is removed from the running trials. + Trial.RUNNING - > Trial.PAUSED + Trial.TERMINATED: set the status of a trial to Trial.TERMINATED when you never want to select it. + It denotes the real end of a trial's lifespan. + Status change routine of a trial + Trial.PENDING -> (Trial.RUNNING -> Trial.PAUSED -> Trial.RUNNING -> ...) -> Trial.TERMINATED(optional) + """ + RANDOM_SEED = 123456 + WARMSTART_NUM = 100 + + def __init__(self, + max_live_model_num: int, + searcher=None, + scheduler=None, + champion_test_policy='loss_ucb', + **kwargs + ): + """Constructor + + Args: + max_live_model_num: The maximum number of 'live'/running models allowed. + searcher: A class for generating Trial objects progressively. The ConfigOracle + is implemented in the searcher. + Required methods of the searcher: + - next_trial() + Generate the next trial to add. + - set_search_properties(metric: Optional[str], mode: Optional[str], config: dict) + Generate new challengers based on the current champion and update the challenger list + - on_trial_result(trial_id: str, result: Dict) + Reprot results to the scheduler. + scheduler: A class for managing the 'live' trials and allocating the resources for the trials. + Required methods of the scheduler: + - on_trial_add(trial_runner, trial: Trial) + It adds candidate trials to the scheduler. It is called inside of the add_trial + function in the TrialRunner. + - on_trial_remove(trial_runner, trial: Trial) + Remove terminated trials from the scheduler. + - on_trial_result(trial_runner, trial: Trial, result: Dict) + Reprot results to the scheduler. + - choose_trial_to_run(trial_runner) -> Optional[Trial] + Among them, on_trial_result and choose_trial_to_run are the most important methods + champion_test_policy: A string to specify what test policy to test for champion. + Currently can choose from ['loss_ucb', 'loss_avg', 'loss_lcb', None]. + """ + # OnlineTrialRunner setting + self._searcher = searcher + self._scheduler = scheduler + self._champion_test_policy = champion_test_policy + self._max_live_model_num = max_live_model_num + self._remove_worse = kwargs.get('remove_worse', True) + self._bound_trial_num = kwargs.get('bound_trial_num', False) + self._no_model_persistence = True + + # stores all the trials added to the OnlineTrialRunner + # i.e., include the champion and all the challengers + self._trials = [] + self._champion_trial = None + self._best_challenger_trial = None + self._first_challenger_pool_size = None + self._random_state = np.random.RandomState(self.RANDOM_SEED) + self._running_trials = set() + + # initially schedule up to max_live_model_num of live models and + # set the first trial as the champion (which is done inside self.step()) + self._total_steps = 0 + logger.info('init step %s', self._max_live_model_num) + # TODO: add more comments + self.step() + assert self._champion_trial is not None + + @property + def champion_trial(self) -> Trial: + """The champion trial + """ + return self._champion_trial + + @property + def running_trials(self): + """The running/'live' trials + """ + return self._running_trials + + def step(self, data_sample=None, prediction_trial_tuple=None): + """Schedule up to max_live_model_num trials to run + + Args: + data_sample + prediction_trial_tuple + + NOTE: + It consists of the following several parts: + Update model: + 0. Update running trials using observations received. + Tests for Champion + 1. Test for champion (BetterThan test, and WorseThan test) + 1.1 BetterThan test + 1.2 WorseThan test: a trial may be removed if WroseThan test is triggered + Online Scheduling: + 2. Report results to the searcher and scheduler (the scheduler will return a decision about + the status of the running trials). + 3. Pause or stop a trial according to the scheduler's decision. + Add trial into the OnlineTrialRunner if there are opening slots. + + TODO: + add documentation about the Args + """ + # ***********Update running trials with observation*************************** + if data_sample is not None: + self._total_steps += 1 + prediction_made, prediction_trial = prediction_trial_tuple[0], prediction_trial_tuple[1] + # assert prediction_trial.status == Trial.RUNNING + trials_to_pause = [] + for trial in list(self._running_trials): + if trial != prediction_trial: + y_predicted = trial.predict(data_sample) + else: + y_predicted = prediction_made + trial.train_eval_model_online(data_sample, y_predicted) + logger.debug('running trial at iter %s %s %s %s %s %s', self._total_steps, + trial.trial_id, trial.result.loss_avg, trial.result.loss_cb, + trial.result.resource_used, trial.resource_lease) + # report result to the searcher + self._searcher.on_trial_result(trial.trial_id, trial.result) + # report result to the scheduler and the scheduler makes a decision about + # the running status of the trial + decision = self._scheduler.on_trial_result(self, trial, trial.result) + # set the status of the trial according to the decision made by the scheduler + logger.debug('trial decision %s %s at step %s', decision, trial.trial_id, self._total_steps) + if decision == TrialScheduler.STOP: + self.stop_trial(trial) + elif decision == TrialScheduler.PAUSE: + trials_to_pause.append(trial) + else: + self.run_trial(trial) + # ***********Statistical test of champion************************************* + self._champion_test() + # Pause the trial after the tests because the tests involves the reset of the trial's result + for trial in trials_to_pause: + self.pause_trial(trial) + # ***********Add and schedule new trials to run if there are opening slots**** + # Add trial if needed: add challengers into consideration through _add_trial_from_searcher() + # if there are available slots + for _ in range(self._max_live_model_num - len(self._running_trials)): + self._add_trial_from_searcher() + # Scheduling: schedule up to max_live_model_num number of trials to run + # (set the status as Trial.RUNNING) + while self._max_live_model_num > len(self._running_trials): + trial_to_run = self._scheduler.choose_trial_to_run(self) + if trial_to_run is not None: + self.run_trial(trial_to_run) + else: + break + + def get_top_running_trials(self, top_ratio=None, top_metric='ucb') -> list: + """Get a list of trial ids, whose performance is among the top running trials + """ + running_valid_trials = [trial for trial in self._running_trials if + trial.result is not None] + if not running_valid_trials: + return + if top_ratio is None: + top_number = 0 + elif isinstance(top_ratio, float): + top_number = math.ceil(len(running_valid_trials) * top_ratio) + elif isinstance(top_ratio, str) and 'best' in top_ratio: + top_number = 1 + else: + raise NotImplementedError + + if 'ucb' in top_metric: + test_attribute = 'loss_ucb' + elif 'avg' in top_metric: + test_attribute = 'loss_avg' + elif 'lcb' in top_metric: + test_attribute = 'loss_lcb' + else: + raise NotImplementedError + top_running_valid_trials = [] + logger.info('Running trial ids %s', [trial.trial_id for trial in running_valid_trials]) + self._random_state.shuffle(running_valid_trials) + results = [trial.result.get_score(test_attribute) for trial in running_valid_trials] + sorted_index = np.argsort(np.array(results)) # sorted result (small to large) index + for i in range(min(top_number, len(running_valid_trials))): + top_running_valid_trials.append(running_valid_trials[sorted_index[i]]) + logger.info('Top running ids %s', [trial.trial_id for trial in top_running_valid_trials]) + return top_running_valid_trials + + def _add_trial_from_searcher(self): + """Add a new trial to this TrialRunner. + + NOTE: + The new trial is acquired from the input search algorithm, i.e. self._searcher + A 'new' trial means the trial is not in self._trial + """ + # (optionally) upper bound the number of trials in the OnlineTrialRunner + if self._bound_trial_num and self._first_challenger_pool_size is not None: + active_trial_size = len([t for t in self._trials if t.status != Trial.TERMINATED]) + trial_num_upper_bound = int(round((np.log10(self._total_steps) + 1) * self._first_challenger_pool_size) + ) if self._first_challenger_pool_size else np.inf + if active_trial_size > trial_num_upper_bound: + logger.info('Not adding new trials: %s exceeds trial limit %s.', + active_trial_size, trial_num_upper_bound) + return None + + # output one trial from the trial pool (new challenger pool) maintained in the searcher + # Assumption on the searcher: when all frontiers (i.e., all the challengers generated + # based on the current champion) of the current champion are added, calling next_trial() + # will return None + trial = self._searcher.next_trial() + if trial is not None: + self.add_trial(trial) # dup checked in add_trial + # the champion_trial is initially None, so we need to set it up the first time + # a valid trial is added. + # Assumption on self._searcher: the first trial generated is the champion trial + if self._champion_trial is None: + logger.info('Initial set up of the champion trial %s', trial.config) + self._set_champion(trial) + else: + self._all_new_challengers_added = True + if self._first_challenger_pool_size is None: + self._first_challenger_pool_size = len(self._trials) + + def _champion_test(self): + """Perform tests again the latest champion, including bette_than tests and worse_than tests + """ + # for BetterThan test, we only need to compare the best challenger with the champion + self._get_best_challenger() + if self._best_challenger_trial is not None: + assert self._best_challenger_trial.trial_id != self._champion_trial.trial_id + # test whether a new champion is found and set the trial properties accordingly + is_new_champion_found = self._better_than_champion_test(self._best_challenger_trial) + if is_new_champion_found: + self._set_champion(new_champion_trial=self._best_challenger_trial) + + # performs _worse_than_champion_test, which is an optional component in ChaCha + if self._remove_worse: + to_stop = [] + for trial_to_test in self._trials: + if trial_to_test.status != Trial.TERMINATED: + worse_than_champion = self._worse_than_champion_test( + self._champion_trial, trial_to_test, self.WARMSTART_NUM) + if worse_than_champion: + to_stop.append(trial_to_test) + # we want to ensure there are at least #max_live_model_num of challengers remaining + max_to_stop_num = len([t for t in self._trials if t.status != Trial.TERMINATED] + ) - self._max_live_model_num + for i in range(min(max_to_stop_num, len(to_stop))): + self.stop_trial(to_stop[i]) + + def _get_best_challenger(self): + """Get the 'best' (in terms of the champion_test_policy) challenger under consideration. + """ + if self._champion_test_policy is None: + return + if 'ucb' in self._champion_test_policy: + test_attribute = 'loss_ucb' + elif 'avg' in self._champion_test_policy: + test_attribute = 'loss_avg' + else: + raise NotImplementedError + active_trials = [trial for trial in self._trials if + (trial.status != Trial.TERMINATED + and trial.trial_id != self._champion_trial.trial_id + and trial.result is not None)] + if active_trials: + self._random_state.shuffle(active_trials) + results = [trial.result.get_score(test_attribute) for trial in active_trials] + best_index = np.argmin(results) + self._best_challenger_trial = active_trials[best_index] + + def _set_champion(self, new_champion_trial): + """Set the status of the existing trials once a new champion is found. + """ + assert new_champion_trial is not None + is_init_update = False + if self._champion_trial is None: + is_init_update = True + self.run_trial(new_champion_trial) + # set the checked_under_current_champion status of the trials + for trial in self._trials: + if trial.trial_id == new_champion_trial.trial_id: + trial.set_checked_under_current_champion(True) + else: + trial.set_checked_under_current_champion(False) + self._champion_trial = new_champion_trial + self._all_new_challengers_added = False + logger.info('Set the champion as %s', self._champion_trial.trial_id) + if not is_init_update: + self._champion_update_times += 1 + # calling set_search_properties of searcher will trigger + # new challenger generation. we do not do this for init champion + # as this step is already done when first constructing the searcher + self._searcher.set_search_properties(None, None, + {self._searcher.CHAMPION_TRIAL_NAME: self._champion_trial} + ) + else: + self._champion_update_times = 0 + + def get_trials(self) -> list: + """Return the list of trials managed by this TrialRunner. + """ + return self._trials + + def add_trial(self, new_trial): + """Add a new trial to this TrialRunner. + + Trials may be added at any time. + + Args: + trial (Trial): Trial to queue. + + NOTE: + Only add the new trial when it does not exist (according to the trial_id, which is + the signature of the trail) in self._trials. + """ + for trial in self._trials: + if trial.trial_id == new_trial.trial_id: + trial.set_checked_under_current_champion(True) + return + logger.info('adding trial at iter %s, %s %s', self._total_steps, new_trial.trial_id, + len(self._trials)) + self._trials.append(new_trial) + self._scheduler.on_trial_add(self, new_trial) + + def stop_trial(self, trial): + """Stop a trial: set the status of a trial to be Trial.TERMINATED and perform + other subsequent operations + """ + if trial.status in [Trial.ERROR, Trial.TERMINATED]: + return + else: + logger.info('Terminating trial %s, with trial result %s', + trial.trial_id, trial.result) + trial.set_status(Trial.TERMINATED) + # clean up model and result + trial.clean_up_model() + self._scheduler.on_trial_remove(self, trial) + self._searcher.on_trial_complete(trial.trial_id) + self._running_trials.remove(trial) + + def pause_trial(self, trial): + """Pause a trial: set the status of a trial to be Trial.PAUSED and perform other + subsequent operations + """ + if trial.status in [Trial.ERROR, Trial.TERMINATED]: + return + else: + logger.info('Pausing trial %s, with trial loss_avg: %s, loss_cb: %s, loss_ucb: %s,\ + resource_lease: %s', trial.trial_id, trial.result.loss_avg, + trial.result.loss_cb, trial.result.loss_avg + trial.result.loss_cb, + trial.resource_lease) + trial.set_status(Trial.PAUSED) + # clean up model and result if no model persistence + if self._no_model_persistence: + trial.clean_up_model() + self._running_trials.remove(trial) + + def run_trial(self, trial): + """Run a trial: set the status of a trial to be Trial.RUNNING and perform other + subsequent operations + """ + if trial.status in [Trial.ERROR, Trial.TERMINATED]: + return + else: + trial.set_status(Trial.RUNNING) + self._running_trials.add(trial) + + def _better_than_champion_test(self, trial_to_test): + """Test whether there is a config in the existing trials that is better than + the current champion config + + Returns: + A bool indicating whether a new champion is found + """ + if trial_to_test.result is not None and self._champion_trial.result is not None: + if 'ucb' in self._champion_test_policy: + return self._test_lcb_ucb(self._champion_trial, trial_to_test, self.WARMSTART_NUM) + elif 'avg' in self._champion_test_policy: + return self._test_avg_loss(self._champion_trial, trial_to_test, self.WARMSTART_NUM) + elif 'martingale' in self._champion_test_policy: + return self._test_martingale(self._champion_trial, trial_to_test) + else: + raise NotImplementedError + else: + return False + + @staticmethod + def _worse_than_champion_test(champion_trial, trial, warmstart_num=1) -> bool: + """Test whether the input trial is worse than the champion_trial + """ + if trial.result is not None and trial.result.resource_used >= warmstart_num: + if trial.result.loss_lcb > champion_trial.result.loss_ucb: + logger.info('=========trial %s is worse than champion %s=====', + trial.trial_id, champion_trial.trial_id) + logger.info('trial %s %s %s', trial.config, trial.result, trial.resource_lease) + logger.info('trial loss_avg:%s, trial loss_cb %s', trial.result.loss_avg, + trial.result.loss_cb) + logger.info('champion loss_avg:%s, champion loss_cb %s', champion_trial.result.loss_avg, + champion_trial.result.loss_cb) + logger.info('champion %s', champion_trial.config) + logger.info('trial loss_avg_recent:%s, trial loss_cb %s', trial.result.loss_avg_recent, + trial.result.loss_cb) + logger.info('champion loss_avg_recent:%s, champion loss_cb %s', + champion_trial.result.loss_avg_recent, champion_trial.result.loss_cb) + return True + return False + + @staticmethod + def _test_lcb_ucb(champion_trial, trial, warmstart_num=1) -> bool: + """Comare the challenger(i.e., trial)'s loss upper bound with + champion_trial's loss lower bound - cb + """ + assert trial.trial_id != champion_trial.trial_id + if trial.result.resource_used >= warmstart_num: + if trial.result.loss_ucb < champion_trial.result.loss_lcb - champion_trial.result.loss_cb: + logger.info('======new champion condition satisfied: using lcb vs ucb=====') + logger.info('new champion trial %s %s %s', + trial.trial_id, trial.result.resource_used, trial.resource_lease) + logger.info('new champion trial loss_avg:%s, trial loss_cb %s', + trial.result.loss_avg, trial.result.loss_cb) + logger.info('old champion trial %s %s %s', + champion_trial.trial_id, champion_trial.result.resource_used, + champion_trial.resource_lease,) + logger.info('old champion loss avg %s, loss cb %s', + champion_trial.result.loss_avg, + champion_trial.result.loss_cb) + return True + return False + + @staticmethod + def _test_avg_loss(champion_trial, trial, warmstart_num=1) -> bool: + """Comare the challenger(i.e., trial)'s average loss with the + champion_trial's average loss + """ + assert trial.trial_id != champion_trial.trial_id + if trial.result.resource_used >= warmstart_num: + if trial.result.loss_avg < champion_trial.result.loss_avg: + logger.info('=====new champion condition satisfied using avg loss=====') + logger.info('trial %s', trial.config) + logger.info('trial loss_avg:%s, trial loss_cb %s', + trial.result.loss_avg, trial.result.loss_cb) + logger.info('champion loss_avg:%s, champion loss_cb %s', + champion_trial.result.loss_avg, champion_trial.result.loss_cb) + logger.info('champion %s', champion_trial.config) + return True + return False + + @staticmethod + def _test_martingale(champion_trial, trial): + """Comare the challenger and champion using confidence sequence based + test martingale + + Not implementated yet + """ + NotImplementedError diff --git a/flaml/scheduler/__init__.py b/flaml/scheduler/__init__.py new file mode 100644 index 0000000000..37f9e2d01c --- /dev/null +++ b/flaml/scheduler/__init__.py @@ -0,0 +1,2 @@ +from .trial_scheduler import TrialScheduler, FIFOScheduler +from .online_scheduler import OnlineScheduler, OnlineSuccessiveDoublingScheduler, ChaChaScheduler diff --git a/flaml/scheduler/online_scheduler.py b/flaml/scheduler/online_scheduler.py new file mode 100644 index 0000000000..d2d4a4ed5a --- /dev/null +++ b/flaml/scheduler/online_scheduler.py @@ -0,0 +1,140 @@ +import numpy as np +import logging +from typing import Optional, Dict +from flaml.scheduler import FIFOScheduler, TrialScheduler +from flaml.tune import Trial +logger = logging.getLogger(__name__) + + +class OnlineScheduler(FIFOScheduler): + """Implementation of the OnlineFIFOSchedulers. + + Methods: + on_trial_result(trial_runner, trial, result) + Report result and return a decision on the trial's status + choose_trial_to_run(trial_runner) + Decide which trial to run next + """ + def on_trial_result(self, trial_runner, trial: Trial, result: Dict): + """Report result and return a decision on the trial's status + + Always keep a trial running (return status TrialScheduler.CONTINUE) + """ + return TrialScheduler.CONTINUE + + def choose_trial_to_run(self, trial_runner) -> Trial: + """Decide which trial to run next + + Trial prioritrization according to the status: + PENDING (trials that have not been tried) > PAUSED (trials that have been ran) + + For trials with the same status, it chooses the ones with smaller resource lease + """ + for trial in trial_runner.get_trials(): + if trial.status == Trial.PENDING: + return trial + min_paused_resource = np.inf + min_paused_resource_trial = None + for trial in trial_runner.get_trials(): + # if there is a tie, prefer the earlier added ones + if trial.status == Trial.PAUSED and trial.resource_lease < min_paused_resource: + min_paused_resource = trial.resource_lease + min_paused_resource_trial = trial + if min_paused_resource_trial is not None: + return min_paused_resource_trial + + +class OnlineSuccessiveDoublingScheduler(OnlineScheduler): + """Implementation of the OnlineSuccessiveDoublingScheduler. + + Methods: + on_trial_result(trial_runner, trial, result) + Report result and return a decision on the trial's status + choose_trial_to_run(trial_runner) + Decide which trial to run next + """ + def __init__(self, increase_factor: float = 2.0): + ''' + Args: + increase_factor (float): a multiplicative factor used to increase resource lease. + The default value is 2.0 + ''' + super().__init__() + self._increase_factor = increase_factor + + def on_trial_result(self, trial_runner, trial: Trial, result: Dict): + """Report result and return a decision on the trial's status + + 1. Returns TrialScheduler.CONTINUE (i.e., keep the trial running), + if the resource consumed has not reached the current resource_lease.s + 2. otherwise double the current resource lease and return TrialScheduler.PAUSE + """ + if trial.result is None or trial.result.resource_used < trial.resource_lease: + return TrialScheduler.CONTINUE + else: + trial.set_resource_lease(trial.resource_lease * self._increase_factor) + logger.info('Doubled resource for trial %s, used: %s, current budget %s', + trial.trial_id, trial.result.resource_used, trial.resource_lease) + return TrialScheduler.PAUSE + + +class ChaChaScheduler(OnlineSuccessiveDoublingScheduler): + """ Keep the top performing learners running + + Methods: + on_trial_result(trial_runner, trial, result) + Report result and return a decision on the trial's status + choose_trial_to_run(trial_runner) + Decide which trial to run next + """ + def __init__(self, increase_factor: float = 2.0, **kwargs): + ''' + Args: + increase_factor: a multiplicative factor used to increase resource lease. + The default value is 2.0 + ''' + super().__init__(increase_factor) + self._keep_champion = kwargs.get('keep_champion', True) + self._keep_challenger_metric = kwargs.get('keep_challenger_metric', 'ucb') + self._keep_challenger_ratio = kwargs.get('keep_challenger_ratio', 0.5) + self._pause_old_froniter = kwargs.get('pause_old_froniter', False) + logger.info('Using chacha scheduler with config %s', kwargs) + + def on_trial_result(self, trial_runner, trial: Trial, result: Dict): + """Report result and return a decision on the trial's status + + Make a decision according to: SuccessiveDoubling + champion check + performance check + """ + # Doubling scheduler makes a decision + decision = super().on_trial_result(trial_runner, trial, result) + # ***********Check whether the trial has been paused since a new champion is promoted** + # NOTE: This check is not enabled by default. Just keeping it for experimentation purpose. + ## trial.is_checked_under_current_champion being False means the trial + # has not been paused since the new champion is promoted. If so, we need to + # tentatively pause it such that new trials can possiblly be taken into consideration + # NOTE: This may need to be changed. We need to do this because we only add trials. + # into the OnlineTrialRunner when there are avaialbe slots. Maybe we need to consider + # adding max_running_trial number of trials once a new champion is promoted. + if self._pause_old_froniter and not trial.is_checked_under_current_champion: + if decision == TrialScheduler.CONTINUE: + decision = TrialScheduler.PAUSE + trial.set_checked_under_current_champion(True) + logger.info('Tentitively set trial as paused') + + # ****************Keep the champion always running****************** + if self._keep_champion and trial.trial_id == trial_runner.champion_trial.trial_id and \ + decision == TrialScheduler.PAUSE: + return TrialScheduler.CONTINUE + + # ****************Keep the trials with top performance always running****************** + if self._keep_challenger_ratio is not None: + if decision == TrialScheduler.PAUSE: + logger.debug('champion, %s', trial_runner.champion_trial.trial_id) + # this can be inefficient when the # trials is large. TODO: need to improve efficiency. + top_trials = trial_runner.get_top_running_trials(self._keep_challenger_ratio, + self._keep_challenger_metric) + logger.debug('top_learners: %s', top_trials) + if trial in top_trials: + logger.debug('top runner %s: set from PAUSE to CONTINUE', trial.trial_id) + return TrialScheduler.CONTINUE + return decision diff --git a/flaml/scheduler/trial_scheduler.py b/flaml/scheduler/trial_scheduler.py new file mode 100644 index 0000000000..a20b022f01 --- /dev/null +++ b/flaml/scheduler/trial_scheduler.py @@ -0,0 +1,157 @@ +''' +Copyright 2020 The Ray Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This source file is adapted here because ray does not fully support Windows. + +Copyright (c) Microsoft Corporation. +''' +from typing import Dict, Optional + +from flaml.tune import trial_runner +from flaml.tune.result import DEFAULT_METRIC +from flaml.tune.trial import Trial + + +class TrialScheduler: + """Interface for implementing a Trial Scheduler class.""" + + CONTINUE = "CONTINUE" #: Status for continuing trial execution + PAUSE = "PAUSE" #: Status for pausing trial execution + STOP = "STOP" #: Status for stopping trial execution + + _metric = None + + @property + def metric(self): + return self._metric + + def set_search_properties(self, metric: Optional[str], + mode: Optional[str]) -> bool: + """Pass search properties to scheduler. + This method acts as an alternative to instantiating schedulers + that react to metrics with their own `metric` and `mode` parameters. + Args: + metric (str): Metric to optimize + mode (str): One of ["min", "max"]. Direction to optimize. + """ + if self._metric and metric: + return False + if metric: + self._metric = metric + + if self._metric is None: + # Per default, use anonymous metric + self._metric = DEFAULT_METRIC + + return True + + def on_trial_add(self, trial_runner: "trial_runner.TrialRunner", + trial: Trial): + """Called when a new trial is added to the trial runner.""" + + raise NotImplementedError + + def on_trial_error(self, trial_runner: "trial_runner.TrialRunner", + trial: Trial): + """Notification for the error of trial. + This will only be called when the trial is in the RUNNING state.""" + + raise NotImplementedError + + def on_trial_result(self, trial_runner: "trial_runner.TrialRunner", + trial: Trial, result: Dict) -> str: + """Called on each intermediate result returned by a trial. + At this point, the trial scheduler can make a decision by returning + one of CONTINUE, PAUSE, and STOP. This will only be called when the + trial is in the RUNNING state.""" + + raise NotImplementedError + + def on_trial_complete(self, trial_runner: "trial_runner.TrialRunner", + trial: Trial, result: Dict): + """Notification for the completion of trial. + This will only be called when the trial is in the RUNNING state and + either completes naturally or by manual termination.""" + + raise NotImplementedError + + def on_trial_remove(self, trial_runner: "trial_runner.TrialRunner", + trial: Trial): + """Called to remove trial. + This is called when the trial is in PAUSED or PENDING state. Otherwise, + call `on_trial_complete`.""" + + raise NotImplementedError + + def choose_trial_to_run( + self, trial_runner: "trial_runner.TrialRunner") -> Optional[Trial]: + """Called to choose a new trial to run. + This should return one of the trials in trial_runner that is in + the PENDING or PAUSED state. This function must be idempotent. + If no trial is ready, return None.""" + + raise NotImplementedError + + def debug_string(self) -> str: + """Returns a human readable message for printing to the console.""" + + raise NotImplementedError + + def save(self, checkpoint_path: str): + """Save trial scheduler to a checkpoint""" + raise NotImplementedError + + def restore(self, checkpoint_path: str): + """Restore trial scheduler from checkpoint.""" + raise NotImplementedError + + +class FIFOScheduler(TrialScheduler): + """Simple scheduler that just runs trials in submission order.""" + + def on_trial_add(self, trial_runner: "trial_runner.TrialRunner", + trial: Trial): + pass + + def on_trial_error(self, trial_runner: "trial_runner.TrialRunner", + trial: Trial): + pass + + def on_trial_result(self, trial_runner: "trial_runner.TrialRunner", + trial: Trial, result: Dict) -> str: + return TrialScheduler.CONTINUE + + def on_trial_complete(self, trial_runner: "trial_runner.TrialRunner", + trial: Trial, result: Dict): + pass + + def on_trial_remove(self, trial_runner: "trial_runner.TrialRunner", + trial: Trial): + pass + + def choose_trial_to_run( + self, trial_runner: "trial_runner.TrialRunner") -> Optional[Trial]: + for trial in trial_runner.get_trials(): + if (trial.status == Trial.PENDING + and trial_runner.has_resources_for_trial(trial)): + return trial + for trial in trial_runner.get_trials(): + if (trial.status == Trial.PAUSED + and trial_runner.has_resources_for_trial(trial)): + return trial + return None + + def debug_string(self) -> str: + return "Using FIFO scheduling algorithm." diff --git a/flaml/searcher/__init__.py b/flaml/searcher/__init__.py index 3f894e7d84..009e6879e0 100644 --- a/flaml/searcher/__init__.py +++ b/flaml/searcher/__init__.py @@ -1,2 +1,6 @@ from .blendsearch import CFO, BlendSearch, BlendSearchTuner from .flow2 import FLOW2 +try: + from .online_searcher import ChampionFrontierSearcher +except ImportError: + print('need to install vowpalwabbit to use ChampionFrontierSearcher') diff --git a/flaml/searcher/blendsearch.py b/flaml/searcher/blendsearch.py index fcf8c343cf..c2cc6ab40c 100644 --- a/flaml/searcher/blendsearch.py +++ b/flaml/searcher/blendsearch.py @@ -51,8 +51,8 @@ class BlendSearch(Searcher): Args: metric: A string of the metric name to optimize for. - minimization or maximization. mode: A string in ['min', 'max'] to specify the objective as + minimization or maximization. space: A dictionary to specify the search space. points_to_evaluate: Initial parameter suggestions to be run first. low_cost_partial_config: A dictionary from a subset of @@ -107,6 +107,13 @@ class BlendSearch(Searcher): ''' self._metric, self._mode = metric, mode init_config = low_cost_partial_config or {} + if not init_config: + logger.warning( + "No low-cost init config given to the search algorithm." + "For cost-frugal search, " + "consider providing init values for cost-related hps via " + "'init_config'." + ) self._points_to_evaluate = points_to_evaluate or [] self._config_constraints = config_constraints self._metric_constraints = metric_constraints @@ -202,6 +209,10 @@ class BlendSearch(Searcher): self._metric_constraint_satisfied = state._metric_constraint_satisfied self._metric_constraint_penalty = state._metric_constraint_penalty + @property + def metric_target(self): + return self._metric_target + def restore_from_dir(self, checkpoint_dir: str): super.restore_from_dir(checkpoint_dir) diff --git a/flaml/searcher/flow2.py b/flaml/searcher/flow2.py index 385a696185..f4120c3eee 100644 --- a/flaml/searcher/flow2.py +++ b/flaml/searcher/flow2.py @@ -47,8 +47,8 @@ class FLOW2(Searcher): to the initial low-cost values. e.g. {'epochs': 1} metric: A string of the metric name to optimize for. - minimization or maximization. mode: A string in ['min', 'max'] to specify the objective as + minimization or maximization. cat_hp_cost: A dictionary from a subset of categorical dimensions to the relative cost of each choice. e.g., @@ -92,13 +92,6 @@ class FLOW2(Searcher): self.space = flatten_dict(self.space, prevent_delimiter=True) self._random = np.random.RandomState(seed) self._seed = seed - if not init_config: - logger.warning( - "No init config given to FLOW2. Using random initial config." - "For cost-frugal search, " - "consider providing init values for cost-related hps via " - "'init_config'." - ) self.init_config = init_config self.best_config = flatten_dict(init_config) self.cat_hp_cost = cat_hp_cost @@ -508,6 +501,7 @@ class FLOW2(Searcher): 1. same incumbent, increase resource 2. same resource, move from the incumbent to a random direction 3. same resource, move from the incumbent to the opposite direction + #TODO: better decouple FLOW2 config suggestion and stepsize update ''' self.trial_count_proposed += 1 if self._num_complete4incumbent > 0 and self.cost_incumbent and \ diff --git a/flaml/searcher/online_searcher.py b/flaml/searcher/online_searcher.py new file mode 100644 index 0000000000..836717ab02 --- /dev/null +++ b/flaml/searcher/online_searcher.py @@ -0,0 +1,360 @@ +import numpy as np +import logging +import itertools +from typing import Dict, Optional, List +from flaml.tune import Categorical, Float, PolynomialExpansionSet +from flaml.tune import Trial +from flaml.onlineml import VowpalWabbitTrial +from flaml.searcher import CFO + +logger = logging.getLogger(__name__) + + +class BaseSearcher: + """Implementation of the BaseSearcher + + Methods: + set_search_properties(metric, mode, config) + next_trial() + on_trial_result(trial_id, result) + on_trial_complete() + """ + + def __init__(self, + metric: Optional[str] = None, + mode: Optional[str] = None, + ): + pass + + def set_search_properties(self, metric: Optional[str] = None, mode: Optional[str] = None, + config: Optional[Dict] = None): + if metric: + self._metric = metric + if mode: + assert mode in ["min", "max"], "`mode` must be 'min' or 'max'." + self._mode = mode + + def next_trial(self): + NotImplementedError + + def on_trial_result(self, trial_id: str, result: Dict): + pass + + def on_trial_complete(self, trial): + pass + + +class ChampionFrontierSearcher(BaseSearcher): + """The ChampionFrontierSearcher class + + Methods: + (metric, mode, config) + Generate a list of new challengers, and add them to the _challenger_list + next_trial() + Pop a trial from the _challenger_list + on_trial_result(trial_id, result) + Doing nothing + on_trial_complete() + Doing nothing + + NOTE: + This class serves the role of ConfigOralce. + Every time we create an online trial, we generate a searcher_trial_id. + At the same time, we also record the trial_id of the VW trial. + Note that the trial_id is a unique signature of the configuraiton. + So if two VWTrials are associated with the same config, they will have the same trial_id + (although not the same searcher_trial_id). + searcher_trial_id will be used in suggest() + """ + # ****the following constants are used when generating new challengers in + # the _query_config_oracle function + # how many item to add when doing the expansion + # (i.e. how many interaction items to add at each time) + POLY_EXPANSION_ADDITION_NUM = 1 + # the order of polynomial expansions to add based on the given seed interactions + EXPANSION_ORDER = 2 + # the number of new challengers with new numerical hyperparamter configs + NUMERICAL_NUM = 2 + + # In order to use CFO, a loss name and loss values of configs are need + # since CFO in fact only requires relative loss order of two configs to perform + # the update, a pseudo loss can be used as long as the relative performance orders + # of different configs are perserved. We set the loss of the init config to be + # a large value (CFO_SEARCHER_LARGE_LOSS), and set the loss of the better config as + # 0.95 of the previous best config's loss. + # NOTE: this setting depends on the assumption that (and thus + # _query_config_oracle) is only triggered when a better champion is found. + CFO_SEARCHER_METRIC_NAME = 'pseudo_loss' + CFO_SEARCHER_LARGE_LOSS = 1e6 + + # the random seed used in generating numerical hyperparamter configs (when CFO is not used) + NUM_RANDOM_SEED = 111 + + CHAMPION_TRIAL_NAME = 'champion_trial' + TRIAL_CLASS = VowpalWabbitTrial + + def __init__(self, + init_config: Dict, + space: Optional[Dict] = None, + metric: Optional[str] = None, + mode: Optional[str] = None, + random_seed: Optional[int] = 2345, + online_trial_args: Optional[Dict] = {}, + nonpoly_searcher_name: Optional[str] = 'CFO' + ): + '''Constructor + + Args: + init_config: dict + space: dict + metric: str + mode: str + random_seed: int + online_trial_args: dict + nonpoly_searcher_name: A string to specify the search algorithm + for nonpoly hyperparameters + ''' + self._init_config = init_config + self._space = space + self._seed = random_seed + self._online_trial_args = online_trial_args + self._nonpoly_searcher_name = nonpoly_searcher_name + + self._random_state = np.random.RandomState(self._seed) + self._searcher_for_nonpoly_hp = {} + self._space_of_nonpoly_hp = {} + # dicts to remember the mapping between searcher_trial_id and trial_id + self._searcher_trialid_to_trialid = {} # key: searcher_trial_id, value: trial_id + self._trialid_to_searcher_trial_id = {} # value: trial_id, key: searcher_trial_id + self._challenger_list = [] + # initialize the search in set_search_properties + self.set_search_properties(config={self.CHAMPION_TRIAL_NAME: None}, init_call=True) + logger.debug('using random seed %s in config oracle', self._seed) + + def set_search_properties(self, metric: Optional[str] = None, + mode: Optional[str] = None, + config: Optional[Dict] = {}, + init_call: Optional[bool] = False): + """Construct search space with given config, and setup the search + """ + super().set_search_properties(metric, mode, config) + # *********Use ConfigOralce (i.e, self._generate_new_space to generate list of new challengers) + logger.info('champion trial %s', config) + champion_trial = config.get(self.CHAMPION_TRIAL_NAME, None) + if champion_trial is None: + champion_trial = self._create_trial_from_config(self._init_config) + # generate a new list of challenger trials + new_challenger_list = self._query_config_oracle(champion_trial.config, + champion_trial.trial_id, + self._trialid_to_searcher_trial_id[champion_trial.trial_id]) + # add the newly generated challengers to existing challengers + # there can be duplicates and we check duplicates when calling next_trial() + self._challenger_list = self._challenger_list + new_challenger_list + # add the champion as part of the new_challenger_list when called initially + if init_call: + self._challenger_list.append(champion_trial) + logger.critical('Created challengers from champion %s', champion_trial.trial_id) + logger.critical('New challenger size %s, %s', len(self._challenger_list), + [t.trial_id for t in self._challenger_list]) + + def next_trial(self): + """Return a trial from the _challenger_list + """ + next_trial = None + if self._challenger_list: + next_trial = self._challenger_list.pop() + return next_trial + + def _create_trial_from_config(self, config, searcher_trial_id=None): + if searcher_trial_id is None: + searcher_trial_id = Trial.generate_id() + trial = self.TRIAL_CLASS(config, **self._online_trial_args) + self._searcher_trialid_to_trialid[searcher_trial_id] = trial.trial_id + # only update the dict when the trial_id does not exist + if trial.trial_id not in self._trialid_to_searcher_trial_id: + self._trialid_to_searcher_trial_id[trial.trial_id] = searcher_trial_id + return trial + + def _query_config_oracle(self, seed_config, seed_config_trial_id, + seed_config_searcher_trial_id=None) -> List[Trial]: + """Give the seed config, generate a list of new configs (which are supposed to include + at least one config that has better performance than the input seed_config) + """ + # group the hyperparameters according to whether the configs of them are independent + # with the other hyperparameters + hyperparameter_config_groups = [] + searcher_trial_ids_groups = [] + nonpoly_config = {} + for k, v in seed_config.items(): + config_domain = self._space[k] + if isinstance(config_domain, PolynomialExpansionSet): + # get candidate configs for hyperparameters of the PolynomialExpansionSet type + partial_new_configs = self._generate_independent_hp_configs(k, v, config_domain) + if partial_new_configs: + hyperparameter_config_groups.append(partial_new_configs) + # does not have searcher_trial_ids + searcher_trial_ids_groups.append([]) + elif isinstance(config_domain, Float) or isinstance(config_domain, Categorical): + # otherwise we need to deal with them in group + nonpoly_config[k] = v + if k not in self._space_of_nonpoly_hp: + self._space_of_nonpoly_hp[k] = self._space[k] + + # -----------generate partial new configs for non-PolynomialExpansionSet hyperparameters + if nonpoly_config: + new_searcher_trial_ids = [] + partial_new_nonpoly_configs = [] + if 'CFO' in self._nonpoly_searcher_name: + if seed_config_trial_id not in self._searcher_for_nonpoly_hp: + self._searcher_for_nonpoly_hp[seed_config_trial_id] = CFO(space=self._space_of_nonpoly_hp, + points_to_evaluate=[nonpoly_config], + metric=self.CFO_SEARCHER_METRIC_NAME, + ) + # initialize the search in set_search_properties + self._searcher_for_nonpoly_hp[seed_config_trial_id].set_search_properties( + config={'metric_target': self.CFO_SEARCHER_LARGE_LOSS}) + # We need to call this for once, such that the seed config in points_to_evaluate will be called + # to be tried + self._searcher_for_nonpoly_hp[seed_config_trial_id].suggest(seed_config_searcher_trial_id) + # assuming minimization + if self._searcher_for_nonpoly_hp[seed_config_trial_id].metric_target is None: + pseudo_loss = self.CFO_SEARCHER_LARGE_LOSS + else: + pseudo_loss = self._searcher_for_nonpoly_hp[seed_config_trial_id].metric_target * 0.95 + pseudo_result_to_report = {} + for k, v in nonpoly_config.items(): + pseudo_result_to_report['config/' + str(k)] = v + pseudo_result_to_report[self.CFO_SEARCHER_METRIC_NAME] = pseudo_loss + pseudo_result_to_report['time_total_s'] = 1 + self._searcher_for_nonpoly_hp[seed_config_trial_id].on_trial_complete(seed_config_searcher_trial_id, + result=pseudo_result_to_report) + while len(partial_new_nonpoly_configs) < self.NUMERICAL_NUM: + # suggest multiple times + new_searcher_trial_id = Trial.generate_id() + new_searcher_trial_ids.append(new_searcher_trial_id) + suggestion = self._searcher_for_nonpoly_hp[seed_config_trial_id].suggest(new_searcher_trial_id) + if suggestion is not None: + partial_new_nonpoly_configs.append(suggestion) + logger.info('partial_new_nonpoly_configs %s', partial_new_nonpoly_configs) + else: + raise NotImplementedError + if partial_new_nonpoly_configs: + hyperparameter_config_groups.append(partial_new_nonpoly_configs) + searcher_trial_ids_groups.append(new_searcher_trial_ids) + # ----------- coordinate generation of new challengers in the case of multiple groups + new_trials = [] + for i in range(len(hyperparameter_config_groups)): + logger.info('hyperparameter_config_groups[i] %s %s', + len(hyperparameter_config_groups[i]), + hyperparameter_config_groups[i]) + for j, new_partial_config in enumerate(hyperparameter_config_groups[i]): + new_seed_config = seed_config.copy() + new_seed_config.update(new_partial_config) + # For some groups of the hyperparameters, we may have already generated the + # searcher_trial_id. In that case, we only need to retrieve the searcher_trial_id + # instead of generating it again. So we do not generate searcher_trial_id and + # instead set the searcher_trial_id to be None. When creating a trial from a config, + # a searcher_trial_id will be generated if None is provided. + # TODO: An alternative option is to generate a searcher_trial_id for each partial config + if searcher_trial_ids_groups[i]: + new_searcher_trial_id = searcher_trial_ids_groups[i][j] + else: + new_searcher_trial_id = None + new_trial = self._create_trial_from_config(new_seed_config, new_searcher_trial_id) + new_trials.append(new_trial) + logger.info('new_configs %s', [t.trial_id for t in new_trials]) + return new_trials + + def _generate_independent_hp_configs(self, hp_name, current_config_value, config_domain) -> List: + if isinstance(config_domain, PolynomialExpansionSet): + seed_interactions = list(current_config_value) + list(config_domain.init_monomials) + logger.critical('Seed namespaces (singletons and interactions): %s', seed_interactions) + logger.info('current_config_value %s %s', current_config_value, seed_interactions) + configs = self._generate_poly_expansion_sets(seed_interactions, + self.EXPANSION_ORDER, + config_domain.allow_self_inter, + config_domain.highest_poly_order, + self.POLY_EXPANSION_ADDITION_NUM, + ) + else: + raise NotImplementedError + configs_w_key = [{hp_name: hp_config} for hp_config in configs] + return configs_w_key + + def _generate_poly_expansion_sets(self, seed_interactions, order, allow_self_inter, + highest_poly_order, interaction_num_to_add): + champion_all_combinations = self._generate_all_comb(seed_interactions, order, allow_self_inter, highest_poly_order) + space = sorted(list(itertools.combinations( + champion_all_combinations, interaction_num_to_add))) + self._random_state.shuffle(space) + candidate_configs = [set(seed_interactions) | set(item) for item in space] + final_candidate_configs = [] + for c in candidate_configs: + new_c = set([e for e in c if len(e) > 1]) + final_candidate_configs.append(new_c) + return final_candidate_configs + + @staticmethod + def _generate_all_comb(seed_interactions: list, seed_interaction_order: int, + allow_self_inter: Optional[bool] = False, + highest_poly_order: Optional[int] = None): + """Generate new interactions by doing up to seed_interaction_order on the seed_interactions + + Args: + seed_interactions (List[str]): the see config which is a list of interactions string + (including the singletons) + seed_interaction_order (int): the maxmum order of interactions to perform on the seed_config + allow_self_inter (bool): whether self-interaction is allowed + e.g. if set False, 'aab' will be considered as 'ab', i.e. duplicates in the interaction + string are removed. + highest_poly_order (int): the highest polynomial order allowed for the resulting interaction. + e.g. if set 3, the interaction 'abcd' will be excluded. + """ + + def get_interactions(list1, list2): + """Get combinatorial list of tuples + """ + new_list = [] + for i in list1: + for j in list2: + # each interaction is sorted. E.g. after sorting + # 'abc' 'cba' 'bca' are all 'abc' + # this is done to ensure we can use the config as the signature + # of the trial, i.e., trial id. + new_interaction = ''.join(sorted(i + j)) + if new_interaction not in new_list: + new_list.append(new_interaction) + return new_list + + def strip_self_inter(s): + """Remove duplicates in an interaction string + """ + if len(s) == len(set(s)): + return s + else: + # return ''.join(sorted(set(s))) + new_s = '' + char_list = [] + for i in s: + if i not in char_list: + char_list.append(i) + new_s += i + return new_s + + interactions = seed_interactions.copy() + all_interactions = [] + while seed_interaction_order > 1: + interactions = get_interactions(interactions, seed_interactions) + seed_interaction_order -= 1 + all_interactions += interactions + if not allow_self_inter: + all_interactions_no_self_inter = [] + for s in all_interactions: + s_no_inter = strip_self_inter(s) + if len(s_no_inter) > 1 and s_no_inter not in all_interactions_no_self_inter: + all_interactions_no_self_inter.append(s_no_inter) + all_interactions = all_interactions_no_self_inter + if highest_poly_order is not None: + all_interactions = [c for c in all_interactions if len(c) <= highest_poly_order] + logger.info('all_combinations %s', all_interactions) + return all_interactions diff --git a/flaml/tune/__init__.py b/flaml/tune/__init__.py index 0763380672..562da6b364 100644 --- a/flaml/tune/__init__.py +++ b/flaml/tune/__init__.py @@ -5,3 +5,6 @@ except ImportError: from .sample import (uniform, quniform, choice, randint, qrandint, randn, qrandn, loguniform, qloguniform) from .tune import run, report +from .sample import polynomial_expansion_set +from .sample import PolynomialExpansionSet, Categorical, Float +from .trial import Trial diff --git a/flaml/tune/cgmanifest.json b/flaml/tune/cgmanifest.json new file mode 100644 index 0000000000..425df47169 --- /dev/null +++ b/flaml/tune/cgmanifest.json @@ -0,0 +1,11 @@ +{ + "Registrations": [ + { + "Component": { + "Type": "pip", + "pip": {"Name": "ray[tune]", "Version": "1.2.0" } + }, + "DevelopmentDependency": false + }, + ] +} \ No newline at end of file diff --git a/flaml/tune/result.py b/flaml/tune/result.py new file mode 100644 index 0000000000..f7a1430b5e --- /dev/null +++ b/flaml/tune/result.py @@ -0,0 +1,148 @@ +''' +Copyright 2020 The Ray Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +This source file is adapted here because ray does not fully support Windows. + +Copyright (c) Microsoft Corporation. +''' +import os + +# yapf: disable +# __sphinx_doc_begin__ +# (Optional/Auto-filled) training is terminated. Filled only if not provided. +DONE = "done" + +# (Optional) Enum for user controlled checkpoint +SHOULD_CHECKPOINT = "should_checkpoint" + +# (Auto-filled) The hostname of the machine hosting the training process. +HOSTNAME = "hostname" + +# (Auto-filled) The auto-assigned id of the trial. +TRIAL_ID = "trial_id" + +# (Auto-filled) The auto-assigned id of the trial. +EXPERIMENT_TAG = "experiment_tag" + +# (Auto-filled) The node ip of the machine hosting the training process. +NODE_IP = "node_ip" + +# (Auto-filled) The pid of the training process. +PID = "pid" + +# (Optional) Default (anonymous) metric when using tune.report(x) +DEFAULT_METRIC = "_metric" + +# (Optional) Mean reward for current training iteration +EPISODE_REWARD_MEAN = "episode_reward_mean" + +# (Optional) Mean loss for training iteration +MEAN_LOSS = "mean_loss" + +# (Optional) Mean loss for training iteration +NEG_MEAN_LOSS = "neg_mean_loss" + +# (Optional) Mean accuracy for training iteration +MEAN_ACCURACY = "mean_accuracy" + +# Number of episodes in this iteration. +EPISODES_THIS_ITER = "episodes_this_iter" + +# (Optional/Auto-filled) Accumulated number of episodes for this trial. +EPISODES_TOTAL = "episodes_total" + +# Number of timesteps in this iteration. +TIMESTEPS_THIS_ITER = "timesteps_this_iter" + +# (Auto-filled) Accumulated number of timesteps for this entire trial. +TIMESTEPS_TOTAL = "timesteps_total" + +# (Auto-filled) Time in seconds this iteration took to run. +# This may be overridden to override the system-computed time difference. +TIME_THIS_ITER_S = "time_this_iter_s" + +# (Auto-filled) Accumulated time in seconds for this entire trial. +TIME_TOTAL_S = "time_total_s" + +# (Auto-filled) The index of this training iteration. +TRAINING_ITERATION = "training_iteration" +# __sphinx_doc_end__ +# yapf: enable + +DEFAULT_EXPERIMENT_INFO_KEYS = ("trainable_name", EXPERIMENT_TAG, TRIAL_ID) + +DEFAULT_RESULT_KEYS = (TRAINING_ITERATION, TIME_TOTAL_S, TIMESTEPS_TOTAL, + MEAN_ACCURACY, MEAN_LOSS) + +# Make sure this doesn't regress +AUTO_RESULT_KEYS = ( + TRAINING_ITERATION, + TIME_TOTAL_S, + EPISODES_TOTAL, + TIMESTEPS_TOTAL, + NODE_IP, + HOSTNAME, + PID, + TIME_TOTAL_S, + TIME_THIS_ITER_S, + "timestamp", + "experiment_id", + "date", + "time_since_restore", + "iterations_since_restore", + "timesteps_since_restore", + "config", +) + +# __duplicate__ is a magic keyword used internally to +# avoid double-logging results when using the Function API. +RESULT_DUPLICATE = "__duplicate__" + +# __trial_info__ is a magic keyword used internally to pass trial_info +# to the Trainable via the constructor. +TRIAL_INFO = "__trial_info__" + +# __stdout_file__/__stderr_file__ are magic keywords used internally +# to pass log file locations to the Trainable via the constructor. +STDOUT_FILE = "__stdout_file__" +STDERR_FILE = "__stderr_file__" + +# Where Tune writes result files by default +DEFAULT_RESULTS_DIR = (os.environ.get("TEST_TMPDIR") + or os.environ.get("TUNE_RESULT_DIR") + or os.path.expanduser("~/ray_results")) + +# Meta file about status under each experiment directory, can be +# parsed by automlboard if exists. +JOB_META_FILE = "job_status.json" + +# Meta file about status under each trial directory, can be parsed +# by automlboard if exists. +EXPR_META_FILE = "trial_status.json" + +# File that stores parameters of the trial. +EXPR_PARAM_FILE = "params.json" + +# Pickle File that stores parameters of the trial. +EXPR_PARAM_PICKLE_FILE = "params.pkl" + +# File that stores the progress of the trial. +EXPR_PROGRESS_FILE = "progress.csv" + +# File that stores results of the trial. +EXPR_RESULT_FILE = "result.json" + +# Config prefix when using Analysis. +CONFIG_PREFIX = "config/" diff --git a/flaml/tune/sample.py b/flaml/tune/sample.py index 5d89555e8d..94f23f2ca8 100644 --- a/flaml/tune/sample.py +++ b/flaml/tune/sample.py @@ -414,6 +414,31 @@ class Quantized(Sampler): return list(quantized) +class PolynomialExpansionSet: + + def __init__(self, init_monomials: set = (), highest_poly_order: int = None, + allow_self_inter: bool = False): + self._init_monomials = init_monomials + self._highest_poly_order = highest_poly_order if \ + highest_poly_order is not None else len(self._init_monomials) + self._allow_self_inter = allow_self_inter + + @property + def init_monomials(self): + return self._init_monomials + + @property + def highest_poly_order(self): + return self._highest_poly_order + + @property + def allow_self_inter(self): + return self._allow_self_inter + + def __str__(self): + return "PolynomialExpansionSet" + + # TODO (krfricke): Remove tune.function def function(func): logger.warning( @@ -535,3 +560,9 @@ def qrandn(mean: float, sd: float, q: float): integer increment of this value. """ return Float(None, None).normal(mean, sd).quantized(q) + + +def polynomial_expansion_set(init_monomials: set, highest_poly_order: int = None, + allow_self_inter: bool = False): + + return PolynomialExpansionSet(init_monomials, highest_poly_order, allow_self_inter) diff --git a/flaml/version.py b/flaml/version.py index df12433297..3d187266f1 100644 --- a/flaml/version.py +++ b/flaml/version.py @@ -1 +1 @@ -__version__ = "0.4.2" +__version__ = "0.5.0" diff --git a/notebook/flaml_autovw.ipynb b/notebook/flaml_autovw.ipynb new file mode 100644 index 0000000000..341bd4b3de --- /dev/null +++ b/notebook/flaml_autovw.ipynb @@ -0,0 +1,415 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved. \n", + "\n", + "Licensed under the MIT License.\n", + "\n", + "# AutoVW: ChaCha for Online AutoML with Vowpal Wabbit\n", + "\n", + "\n", + "## 1. Introduction\n", + "\n", + "\n", + "In this notebook, we use one real data example (regression task) to showcase AutoVW, which is an online AutoML solution based on the following work:\n", + "\n", + "*ChaCha for online AutoML. Qingyun Wu, Chi Wang, John Langford, Paul Mineiro and Marco Rossi. To appear in ICML 2021.*\n", + "\n", + "AutoVW is implemented in FLAML. FLAML requires `Python>=3.6`. To run this notebook example, please install flaml with the `notebook` option:\n", + "```bash\n", + "pip install flaml[notebook]\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install flaml[notebook];" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "## 2. Online regression with AutoVW\n", + "### Load data from openml and preprocess\n", + "\n", + "Download [dataset_sales](https://www.openml.org/d/42183) from OpenML." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "slideshow": { + "slide_type": "subslide" + }, + "tags": [] + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": "(36203, 17) (36203,)\n" + } + ], + "source": [ + "import openml\n", + "# did = 42183\n", + "did = 41506\n", + "ds = openml.datasets.get_dataset(did)\n", + "target_attribute = ds.default_target_attribute\n", + "data = ds.get_data(target=target_attribute, dataset_format='array')\n", + "X, y = data[0], data[1]\n", + "print(X.shape, y.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Convert the openml dataset into vowpalwabbit examples:\n", + "Sequentially group features into up to 10 namespaces and convert the original data examples into vowpal wabbit format." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": "openml example: 8.170000076293945 [1.0000e+01 7.0000e+00 3.0000e+00 4.0000e+00 nan 6.3300e+00\n 1.3600e-01 7.3300e+00 7.0100e+00 6.9800e+00 3.0000e-03 7.0000e+00\n 9.7000e+00 1.2300e+01 1.0217e+03 0.0000e+00 5.8000e+01]\nvw example: 8.170000076293945 |a 0:10.000000 1:7.000000|b 2:3.000000 3:4.000000|c 4:nan 5:6.330000|d 6:0.136000 7:7.330000|e 8:7.010000 9:6.980000|f 10:0.003000 11:7.000000|g 12:9.700000 13:12.300000|h 14:1021.700012 15:0.000000|i 16:58.000000\n" + } + ], + "source": [ + "import numpy as np\n", + "import string\n", + "NS_LIST = list(string.ascii_lowercase) + list(string.ascii_uppercase)\n", + "max_ns_num = 10 # the maximum number of namespaces\n", + "orginal_dim = X.shape[1]\n", + "max_size_per_group = int(np.ceil(orginal_dim / float(max_ns_num)))\n", + "# sequential grouping\n", + "group_indexes = []\n", + "for i in range(max_ns_num):\n", + " indexes = [ind for ind in range(i * max_size_per_group,\n", + " min((i + 1) * max_size_per_group, orginal_dim))]\n", + " if len(indexes) > 0:\n", + " group_indexes.append(indexes)\n", + "\n", + "vw_examples = []\n", + "for i in range(X.shape[0]):\n", + " ns_content = []\n", + " for zz in range(len(group_indexes)):\n", + " ns_features = ' '.join('{}:{:.6f}'.format(ind, X[i][ind]) for ind in group_indexes[zz])\n", + " ns_content.append(ns_features)\n", + " ns_line = '{} |{}'.format(str(y[i]), '|'.join('{} {}'.format(NS_LIST[j], ns_content[j]) for j in range(len(group_indexes))))\n", + " vw_examples.append(ns_line)\n", + "print('openml example:', y[0], X[0])\n", + "print('vw example:', vw_examples[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "### Set up the online learning loop\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.metrics import mean_squared_error\n", + "def online_learning_loop(iter_num, vw_examples, vw_alg):\n", + " \"\"\"Implements the online learning loop.\n", + " \"\"\"\n", + " print('Online learning for', iter_num, 'steps...')\n", + " loss_list = []\n", + " y_predict_list = []\n", + " for i in range(iter_num):\n", + " vw_x = vw_examples[i]\n", + " y_true = float(vw_examples[i].split('|')[0])\n", + " # predict step\n", + " y_pred = vw_alg.predict(vw_x)\n", + " # learn step\n", + " vw_alg.learn(vw_x)\n", + " # calculate one step loss\n", + " loss = mean_squared_error([y_pred], [y_true])\n", + " loss_list.append(loss)\n", + " y_predict_list.append([y_pred, y_true])\n", + " return loss_list\n", + "\n", + "max_iter_num = 10000 # or len(vw_examples)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Vanilla Vowpal Wabbit (VW)\n", + "Create and run a vanilla vowpal wabbit learner." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": "Online learning for 10000 steps...\nFinal progressive validation loss of vanilla vw: 15.180878192648041\n" + } + ], + "source": [ + "from vowpalwabbit import pyvw\n", + "''' create a vanilla vw instance '''\n", + "vanilla_vw = pyvw.vw()\n", + "\n", + "# online learning with vanilla VW\n", + "loss_list_vanilla = online_learning_loop(max_iter_num, vw_examples, vanilla_vw)\n", + "print('Final progressive validation loss of vanilla vw:', sum(loss_list_vanilla)/len(loss_list_vanilla))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### AutoVW which tunes namespace interactions \n", + "Create and run an AutoVW instance which tunes namespace interactions. Each AutoVW instance allows ```max_live_model_num``` of VW models (each associated with its own hyperaparameter configurations that are tuned online) to run concurrently in each step of the online learning loop." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "slideshow": { + "slide_type": "slide" + }, + "tags": [] + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": "Seed namespaces (singletons and interactions): ['e', 'g', 'b', 'd', 'i', 'h', 'a', 'f', 'c']\nCreated challengers from champion |\nNew challenger size 37, ['|ah', '|ch', '|df', '|ef', '|ag', '|bg', '|be', '|eh', '|hi', '|cd', '|ci', '|eg', '|bh', '|ad', '|bi', '|ab', '|cg', '|bc', '|gi', '|ai', '|cf', '|ei', '|dg', '|ac', '|af', '|ce', '|ae', '|de', '|fi', '|bd', '|gh', '|bf', '|dh', '|di', '|fh', '|fg', '|']\nOnline learning for 10000 steps...\nSeed namespaces (singletons and interactions): ['dh', 'e', 'g', 'b', 'd', 'i', 'h', 'a', 'f', 'c']\nCreated challengers from champion |dh\nNew challenger size 43, ['|dh_ei', '|bd_dh', '|cdh_dh', '|ac_dh', '|bh_dh', '|ab_dh', '|dh_gi', '|cg_dh', '|bf_dh', '|dh_dhi', '|deh_dh', '|dh_fi', '|ad_dh', '|dh_hi', '|dh_eg', '|bdh_dh', '|dh_eh', '|ag_dh', '|dh', '|de_dh', '|dgh_dh', '|bc_dh', '|cd_dh', '|dh_ef', '|cf_dh', '|dh_di', '|bi_dh', '|ah_dh', '|dh_fh', '|ce_dh', '|ae_dh', '|adh_dh', '|df_dh', '|ch_dh', '|dh_fg', '|ai_dh', '|ci_dh', '|dh_gh', '|dfh_dh', '|af_dh', '|dg_dh', '|be_dh', '|bg_dh']\nFinal progressive validation loss of autovw: 10.744201540966063\n" + } + ], + "source": [ + "''' import AutoVW class from flaml package '''\n", + "from flaml import AutoVW\n", + "\n", + "'''create an AutoVW instance for tuning namespace interactions'''\n", + "autovw_ni = AutoVW(max_live_model_num=5, search_space={'interactions': AutoVW.AUTO_STRING})\n", + "\n", + "# online learning with AutoVW\n", + "loss_list_autovw_ni = online_learning_loop(max_iter_num, vw_examples, autovw_ni)\n", + "print('Final progressive validation loss of autovw:', sum(loss_list_autovw_ni)/len(loss_list_autovw_ni))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Online performance comparison between vanilla VW and AutoVW" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "output_type": "display_data", + "data": { + "text/plain": "
", + "image/svg+xml": "\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfQAAAFzCAYAAADIY/vqAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nOzdeZxcVZ338c+vet/3TrqzdvaEhCQQAgkqRERAVsUB4jIoKuMog6M+z4gzbjjOiA4yiCsqKo86QURFFhmUXUhYEiB7SELWTnfSnd739Tx/3NtbSDrV6a6+Vd3f9+tVr6q6dav615ci3z7nnnuOOecQERGR2BYKugAREREZPgW6iIjIGKBAFxERGQMU6CIiImOAAl1ERGQMUKCLiIiMAfFBFzAc+fn5bvr06UGXISIiMio2bNhw1DlXcLzXYjrQp0+fzvr164MuQ0REZFSY2f4TvaYudxERkTFAgS4iIjIGKNBFRETGgJg+hy4iIsHo6OigtLSU1tbWoEsZk5KTk5k8eTIJCQlhv0eBLiIiQ1ZaWkpGRgbTp0/HzIIuZ0xxzlFVVUVpaSklJSVhv09d7iIiMmStra3k5eUpzCPAzMjLyxty74cCXURETonCPHJO5dgq0EVEJOasWrWKxx9/fMC2O++8k3/8x38c0uc89NBD3HbbbQB87Wtf4/bbbwfgIx/5CA888MAJ33fvvfeyevXqAduOHj1KQUEBf/rTn7jqqqt6t3/zm99k1qxZvc8ffvhhrrjiiiHVGQ4FuoiIxJzVq1dz3333Ddh23333vSVkT+aKK67glltuGfLPf+9738tf//pXmpube7c98MADXH755axcuZIXX3yxd/u6devIzMykoqICgLVr17Jy5coh/8yTUaCLiEjMef/738+jjz5Ke3s7APv27aOsrIw1a9awbNkyTjvtNL761a/27j99+nS++tWvcsYZZ7Bo0SJ27NgBwC9/+UtuuummQX/W17/+dc466ywWLlzIjTfeiHOOzMxMzjvvPB5++OHe/Xr+oCgoKCAzM5Pdu3cDcOjQIa6++mrWrl0LeIF+7rnnjujxAI1yFxGRYbr14a1sK6sf0c9cUJzJVy8/7YSv5+bmsnz5ch577DGuvPJK7rvvPq655hr+9V//ldzcXLq6urjgggvYtGkTp59+OgD5+fm8+uqr/PCHP+T222/nZz/7WVi13HTTTXzlK18B4MMf/jCPPPIIl19+OatXr+Y3v/kN1157LWVlZezcuZN3vvOdAJx77rmsXbuWrq4uZs+ezTnnnMPjjz/OZZddxsaNGznrrLOGeYTeSi1035ubX2TrC48GXYaIiISpf7d7T+v4/vvv54wzzmDp0qVs3bqVbdu29e7/vve9D4AzzzyTffv2hf1znn76ac4++2wWLVrEU089xdatWwG49NJLeeGFF6ivr+f+++/n6quvJi4uDoCVK1eydu1a1q5dy4oVK1i+fDkvvfQSr732GvPmzSM5OXmEjkIftdB9NX+9naKGTXDupUGXIiISUwZrSUfSlVdeyWc/+1leffVVmpubyc3N5fbbb+eVV14hJyeHj3zkIwMu/UpKSgIgLi6Ozs7OsH5Ga2srn/rUp1i/fj1Tpkzha1/7Wu9npqSkcPHFF/PHP/6R++67jzvuuKP3feeeey7f+9736Orq4hOf+AQZGRm0trbyzDPPROT8OaiF3stZCMMFXYaIiIQpPT2dVatWccMNN7B69Wrq6+tJS0sjKyuLI0eO8Nhjjw37Z/SEd35+Po2NjW8Z+b569WruuOMOjhw5wooVK3q3z58/n7KyMp5//nmWLl0KwJIlS/jxj38ckfPnoEDvYyFCrjvoKkREZAhWr17Nxo0bWb16NYsXL2bp0qXMmzePD3zgAyMSnNnZ2XziE59g4cKFXHTRRW85933hhRdSVlbGtddeO+DacTPj7LPPJi8vr3f61hUrVrBnz56ItdDNudhtlS5btsyN1HroL3/3A0yvWUfh1/aOyOeJiIxl27dvZ/78+UGXMaYd7xib2Qbn3LLj7a8Weg91uYuISAxToPt0Dl1ERGKZAr2XEULn0EVEJDYp0HuohS4iIjFMgd7DQsSphS4iIjFKge5zFofpsjUREYlRCvQeFiKkLncRkZjy4IMPYma9i60M5s477xywOtrxfPSjH+Xuu+9+y8+45JJL+OxnP8udd97Zu/2iiy7i4x//eO/zz3/+8wNmiwNvSdbU1NTeldbAmxDneI+HS4HeQ+fQRURizpo1a3jb297GmjVrTrpvOIE+2LKsPQuuAHR3d3P06NHeed3hxMui5ufn853vfCecX2dYFOg+ZxrlLiISSxobG3n++ee55557ekP4mWee4bLLLuvd56abbuKXv/wld911F2VlZaxatYpVq1YB3h8DixYtYuHChXzhC18A4IILLmDHjh2Ul5cD0NTUxBNPPMFVV13FypUrWbduHQBbt25l4cKFZGRkUFNTQ1tbG9u3b+eMM854S5033HADv/3tb6muro7o8dDiLD3U5S4icmoeuwUObx7Zz5y4CC65bdBd/vSnP3HxxRczZ84c8vLy2LBhwwn3vfnmm7njjjt4+umnyc/Pp6ysjC984Qts2LCBnJwc3v3ud/Pggw9y1VVXcfXVV3P//ffzmc98hocffpjzzz+fzMxMMjMziY+P58CBA72rqB06dIh169aRlZXFokWLSExM5Ctf+QrLli3jiiuuALxu9RtuuIHvfve73HrrrSN6mPpTC72HutxFRGLKmjVruO666wC47rrrwup27/HKK69w/vnnU1BQQHx8PB/84Ad57rnngOMvy9rj2GVRV6xY0fu8Z+74r3/9671h3uPmm2/m3nvvpaGhYVi/82DUQu+hy9ZERE7NSVrSkVBdXc1TTz3F5s2bMTO6urowM6688kq6u/v+Le+/fGq4Vq5cSXl5ORs3bmTt2rUDzqn3nEffvHkzCxcuZMqUKXznO98hMzOTj370oyf8zOzsbD7wgQ/wgx/8YMj1hEst9B4WR8gcrluhLiIS7R544AE+/OEPs3//fvbt28fBgwcpKSmhu7ubbdu20dbWRm1tLU8++WTvezIyMnpbyMuXL+fZZ5/l6NGjdHV1sWbNGs477zzAWynt2muv5frrr+eSSy4hOTm59zNWrlzJI488Qm5uLnFxceTm5lJbW8u6detOuora5z73Oe6+++6w12IfKgV6D/MORSyvPiciMl6sWbOG9773vQO2XX311dx3331cc801LFy4kGuuuaZ3LXKAG2+8kYsvvphVq1ZRVFTEbbfdxqpVq1i8eDFnnnkmV155Ze++/Zdl7W/RokUcPXqUc845Z8C2rKws8vPzAfjKV77CQw899Jaa8/Pzee9730tbW9uIHINjaflU37pffIEV+39M579VEp+QOCKfKSIyVmn51MjT8qmnyCwOYMC5FxERkVihQPc5MwC6u7sCrkRERGToFOg+88+hd3dFZrCCiIhIJCnQe4TU5S4iMhSxPAYr2p3KsVWg9+hpoSvQRUROKjk5maqqKoV6BDjnqKqqGnC5XDg0sUyPnsvWFOgiIic1efJkSktLqaysDLqUMSk5OZnJkycP6T0K9B69ga5BcSIiJ5OQkEBJSUnQZUg/6nL3Wainy12BLiIisUeB3sMU6CIiErsU6D10Dl1ERGKYAt1nvZetqYUuIiKxR4HuM7XQRUQkhinQfU7XoYuISAxToPvUQhcRkVimQPf1XLam69BFRCQWKdB79Ha5a3EWERGJPQp0X88od3W5i4hILFKg9zA/0J0CXUREYo8C3WchA3QOXUREYpMCvYf1TCyjpQBFRCT2KNB9Zmqhi4hI7FKg+/oGxSnQRUQk9ijQfb2BrkFxIiISgxToPtPyqSIiEsMU6L6emeLQdegiIhKDFOg9dB26iIjEMAW6r+86dAW6iIjEHgW6z0xTv4qISOxSoPv6VlvT4iwiIhJ7FOi+kC5bExGRGBYfdAE9zOwq4FIgE7jHOfeXUS2gt4WuQBcRkdgT0Ra6mf3czCrMbMsx2y82szfMbLeZ3QLgnHvQOfcJ4JPAtZGs6/i1+oGuFrqIiMSgSHe5/xK4uP8G80af/QC4BFgArDazBf12+ZL/+qjqmSkOp4llREQk9kQ00J1zzwHVx2xeDux2zu1xzrUD9wFXmudbwGPOuVdP9JlmdqOZrTez9ZWVlSNWa9+gOK22JiIisSeIQXGTgIP9npf62/4JeBfwfjP75Ine7Jz7iXNumXNuWUFBwYgV1dvlrqlfRUQkBkXNoDjn3F3AXUH9/FCcfyjU5S4iIjEoiBb6IWBKv+eT/W2BMo1yFxGRGBZEoL8CzDazEjNLBK4DHgqgjgFCGuUuIiIxLNKXra0B1gFzzazUzD7mnOsEbgIeB7YD9zvntkayjrBoYhkREYlhET2H7pxbfYLtfwb+HMmfPVQhf3EWLZ8qIiKxSFO/+voWZ9GgOBERiT0KdF/fxDJqoYuISOxRoPtCcRoUJyIisUuB7uttoescuoiIxCAFui+kLncREYlhMRnoZna5mf2krq5uxD5T16GLiEgsi8lAd8497Jy7MSsra+Q+1J8pTi10ERGJRTEZ6JEQ6gl0XbYmIiIxSIHui/MXZ1GXu4iIxCIFeg8NihMRkRimQPf1dLmfs/P2gCsREREZOgW6r/ccuoiISAxSivl6r0MXERGJQQp0n6mFLiIiMUwp5usZ5S4iIhKLFOg+dbmLiEgsU6D7NChORERimVLMp3PoIiISy2IyxSKxOIuIiEgsi8lAj8jiLP10d2k+dxERiS0xGeiR1tHRFnQJIiIiQ6JAP47OjvagSxARERkSBfpxdHZ0BF2CiIjIkCjQj6OjvTXoEkRERIZEgX4cXZ3qchcRkdiiQD+OLnW5i4hIjDlpoJvZt80s08wSzOxJM6s0sw+NRnFB6erUKHcREYkt4bTQ3+2cqwcuA/YBs4D/G8migqZR7iIiEmvCCfSeZcguBX7nnBvz07N1KdBFRCTGhBPoj5jZDuBM4EkzKwDG5DDwdUV/D0B3l86hi4hIbDlpoDvnbgFWAsuccx1AE3BlpAsLQvr8dwLQrRa6iIjEmHAGxf0d0OGc6zKzLwG/BoojXlkAQnGJAHRqUJyIiMSYcLrcv+ycazCztwHvAu4BfhTZsgYXqdXW4hK8QHed6nIXEZHYEk6g9yw9dinwE+fco0Bi5Eo6uUitthaK936t7i51uYuISGwJJ9APmdndwLXAn80sKcz3xZy4+AQAujs7A65ERERkaMIJ5muAx4GLnHO1QC5j9Dr0ni73bp1DFxGRGBPOKPdm4E3gIjO7CSh0zv0l4pUFIC4hCdA5dBERiT3hjHL/DPAboNC//drM/inShQUhrvccugJdRERiS/zJd+FjwNnOuSYAM/sWsA74XiQLC0K83+WOAl1ERGJMOOfQjb6R7viPLTLlBKsn0Iu2/jTgSkRERIYmnBb6L4CXzOyP/vOr8K5FH3N6zqFPcWUBVyIiIjI0Jw1059wdZvYM8DZ/00edc69FtKqAJCQkBF2CiIjIKTlhoJtZbr+n+/xb72vOuerIlRWMeL+FLiIiEmsGa6FvABx958udf2/+4xkRrCsQCQmBToAnIiJyyk4Y6M65ktEsJBpYaExOgCciIuOAEkxERGQMUKAf48UJq2l2OpcuIiKxRYF+DBefRAJanEVERGJLONehY2ZxwIT++zvnDkSqqEDFJZFgXXR1dhIXH9bhERERCdxJE8uft/2rwBGg29/sgNMjWNfJarocuHzWrFkj/9nxXnd7e1sLKfEZI/75IiIikRBOl/tngLnOudOcc4v8W2BhDuCce9g5d2NWVtbIf3hCMgDtrc0j/9kiIiIREk6gHwTqIl1ItOjfQhcREYkV4Zwk3gM8Y2aPAm09G51zd0SsqgCF/BZ6R1trwJWIiIiEL5xAP+DfEv3bmNYX6OpyFxGR2BHO4iy3AphZuv+8MdJFBakn0NtbxvSvKSIiY8xJz6Gb2UIzew3YCmw1sw1mdlrkSwtGnB/os/90ecCViIiIhC+cQXE/AT7nnJvmnJsGfB74aWTLEhERkaEIJ9DTnHNP9zxxzj0DpEWsooBlTxr5a9tFREQiLaxR7mb2ZeBX/vMP4Y18H5OmzlkSdAkiIiJDFk4L/QagAPiDfyvwt41ZG5PPopyCoMsQEREJWzij3GuAm0ehlqjRllJIXOubQZchIiISthMGupnd6Zz7ZzN7GG/u9gGcc1dEtLIAufgUkvvm0BEREYl6g7XQe86Z3z4ahUST7vhUkp0CXUREYscJA905t8F/uMQ5993+r5nZZ4BnI1lYoBJTSbROOjvaiU8Y85PjiYjIGBDOoLjrj7PtIyNcR1SxhBQAWpo1W5yIiMSGwc6hrwY+AJSY2UP9XsoAqiNdWJAsMRWAtpZGMrJyA65GRETk5AY7h74WKAfyge/0294AbIpkUUELJXrz5rQ1NwVciYiISHgGO4e+H9gPrBi9cqJDXJLXQm9vaQi4EhERkfCEszjLOWb2ipk1mlm7mXWZWf1oFBeUuCSvha4V10REJFaEMyju+8BqYBeQAnwc+EEkizoZM7vczH5SV1cXkc+PT04HoKNNXe4iIhIbwgl0nHO7gTjnXJdz7hfAxZEt66T1POycuzErKysin5+Q7LXQu1oV6CIiEhvCWZyl2cwSgdfN7Nt4A+XC+kMgViWmeIHeqRa6iIjEiHCC+cNAHHAT0ARMAa6OZFFBS/S73LvamgOuREREJDzhLM6y33/YAtwa2XKiQ2KqF+jptTsCrkRERCQ8g00ss5njLMrSwzl3ekQqigIpfqAv2P9rAh7/JyIiEpbBWuiX+fef9u97Fmv5EIME/ViQnJIedAkiIiJDcrKJZTCzC51zS/u99AUzexW4JdLFBSUuPpyxgiIiItEjnEFxZmbn9nuyMsz3xbQNKeeyN64k6DJERETCEk5T9GPAz80sCzCgBrgholVFga74VJJaNcpdRERiQzij3DcAi/1AxzkXmenZokx3QirJriXoMkRERMIy2Cj3Dznnfm1mnztmOwDOuTsiXFuguhPSSHGtQZchIiISlsFa6Gn+fcZoFBJ1EtNJsXYamlvISE0JuhoREZFBDTbK/W7/flxMJnOsZksG4Fv/8UW+8R93BlyNiIjI4Abrcr9rsDc6524e+XKiR1KHtxb6NxJ+ASjQRUQkug3W5b5h1KqIQvHdOn8uIiKxY7Au93tHs5Bo05WSH3QJIiIiYTvpBDFmVmBmt5vZn83sqZ7baBQXpGXXfQmAMgoCrkREROTkwpnx7TfAdqAEb7W1fcArEawpKiQnJ/NK4ftJdZpcRkREol84gZ7nnLsH6HDOPeucuwF4Z4TrigouKYsMmmnv6Ay6FBERkUGFE+gd/n25mV1qZkuB3AjWFDUsJYs4czTW1wZdioiIyKDCmcv9G/60r58HvgdkAp+NaFVRIi41G4DG+ipy8zRITkREolc4gf6SP397HbAqwvVElYS0HABa6qsDrkRERGRw4XS5v2BmfzGzj5lZTsQriiKJfqC3NijQRUQkup000J1zc4AvAacBG8zsETP7UMQriwLJmd5QgfbGmoArERERGVw4LXSccy875z4HLAeqgUAnnTGzy83sJ3V1kV3JNTUjD4BmdbmLiEiUC2dimUwzu97MHgPWAuV4wR4Y59zDzrkbs7KyIvpz0rO8QJ+y9UcR/TkiIiLDFc6guI3Ag8DXnXPrIlxPVEnxu9xnWFnAlYiIiAwunECf4ZxzEa8kGoXi2BU/h+a4DBYHXYuIiMggwhkUNz7D3NeakE1Cey2tHV1BlyIiInJCYQ2KG8/aEnPI6KpnxTefDLoUERGRE1Kgn0Qt6UwJVZLZcjDoUkRERE4onFHuc8zsSTPb4j8/3cy+FPnSokOceffPJn0u2EJEREQGEU4L/afAF/EXaXHObQKui2RR0STV2oMuQURE5KTCCfRU59zLx2wbN+uJzi6Z3vv4QJXWRhcRkegUTqAfNbOZgAMws/fjTS4zLuRe8m8A1Lo03vFfTwdcjYiIyPGFE+ifBu4G5pnZIeCfgU9GtKpokpDCy9NuJJNmQnQHXY2IiMhxhTOxzH7n3LvMLA0IOecaIl1UtJk5bRqh/Y7zQ68DlwddjoiIyFuE00Lfa2Y/Ac4BGiNcT1TKmzAJgJ8n3h5wJSIiIscXTqDPA57A63rfa2bfN7O3RbasKBOf0vtQM8aJiEg0Cmfq12bn3P3OufcBS4FM4NmIVxZN5lzU+7C0piXAQkRERI4vrJnizOw8M/shsAFIBq6JaFXRxowDcz9Ck0vi8u89H3Q1IiIib3HSQXFmtg94Dbgf+L/OuaZIFxWNQukTSLM24jvG3ZhAERGJAeGMcj/dOVcf8UqiXEpuMQBrEr/B5tJ3s2hyVsAViYiI9DlhoJvZvzjnvg18w8ze8rpz7uZIFhZt8gq9ke4LQ/uY/v3n2XfbpQFXJCIi0mewFvp2/37DaBQS9RLT+j0Z10vEi4hIFDphoDvnHvbv7+3ZZmYhIH1cdsHnz+l9+LXEXwOXBVeLiIjIMcJZPvV/zCzTnyluC7DNzP5v5EuLMmn5dM96FwAfCT2Gc2qli4hI9AjnsrUFfov8KuAxoAT4cESrilKh7Km9jysb2wKsREREZKBwAj3BzBLwAv0h51wH4/Uk8ts/D8ARl83Pn98XbC0iIiL9hBPodwP7gDTgOTObBoy/c+gAWZM5euY/U0Ad9zz7RtDViIiI9Apn6te7nHOTnHPvcZ79wKpRqC0q5U6aTcgcxXaUu57cFXQ5IiIiQHiD4j7jD4ozM7vHzF4F3jkKtUWlUM40AG6JX8Mdf90ZcDUiIiKecLrcb/AHxb0byMEbEHdbRKuKZhlFAFwS9wozC9JOsrOIiMjoCCfQe6aJew/wK+fc1n7bxp+8mb0PC9sOBFiIiIhIn3ACfYOZ/QUv0B83swygO7JlRbF+0+Cuaf8nDlY30945fg+HiIhEh3AC/WPALcBZzrlmIBH4aESrOgkzu9zMflJXVxdMAQXzAXi9ewZv//bTfPnBLcHUISIi4gsn0B2wAOhZjCUNb030wDjnHnbO3ZiVFdCKZ6v/B4B4v6Pit+sPsuVQQH9ciIiIEF6g/xBYAaz2nzcAP4hYRbEgdwYsv5GpdoSeOXauuXtdsDWJiMi4Fk6gn+2c+zTQCuCcq8Hrdh/fckrItBZyaACgub2LHYfrqW1uD7gwEREZj8IJ9A4zi8NvippZAeN5UFyPtHwAXkv+JPNtPwUZSVx859+4/PvPB1yYiIiMR+EE+l3AH4FCM/sP4HngPyNaVSyYeUHvw/vS/5vKBm+xloPVLRyqbQmqKhERGacGDXR//fO9wL8A3wTKgaucc78bhdqiW1pe78Osjgpm2qHe5+fe9hRP76gIoioRERmnBg1051w38APn3A7n3A+cc993zm0fpdqin78+OsCfEr884KWHNpZRXqeWuoiIjI5wutyfNLOrzWz8zg53Ipf9N1gcAOnWOuClP752iBXffIoL73iWupaOIKoTEZFxJJxA/wfgd0CbmdWbWYOZjc/lU4+VPRX+tQyKz8BlT2PHrRdy70fPGrDLropG/uvxHQEVKCIi40U4y6dmOOdCzrlE51ym/zxzNIqLCQnJMPOdWO1+kr9ZwHk7/5N9t106YJdfv3iAn/1tjwbLiYhIxISzfOoZx7nNNLP40SgwJsT1uyx/wy8A+O51S7hqSXHv5m88up1zb3uK6bc8yu6KBjaXamY5EREZOeacG3wHsxeBM4DN/qZFwBYgC/hH59xfIlrhIJYtW+bWr18f1I/v09kG3yjse37Zf8OZHwUz6po7WPz1Ex+i9yyayN8tm8Lk7BSm5aWRGB/OWRARERmPzGyDc27Z8V4Lp5VdBnzMXzYVM1sAfB3vUrY/AIEFetSITxr4/JHPQkIaLL6WrNQEvnHVQr50ggVc/rz5MH/efHjAtuXTczlnZh4hg3cvmMiCYp3hEBGRwYXTQt/inFt4vG1m9rpzbklEKxxE1LTQAQ69Coc3w8P+GjZnfRwu/c6AXZxzvLC7in97cDP7q5rD/ui3z87nO9cspjAj0DVxREQkYIO10MMJ9N8C1cB9/qZrgXzgw8DzzrmzTvTeSIuqQO/xvWVQtct7fNr74D23D5iEpr9NpbVUNbbzl21HmF2Yzo7D9dy/vvSEH33rFadx5ZJislM1lb6IyHg03EBPAT4FvM3f9ALeCmytQKpzrnEEax2SqAz0llq4aym0VPdt+/xOyJgQ1ttrm9upamqntrmDeRMzWL+/hut//vKAfe64ZjFXLplEXEhTA4iIjCfDCnT/AxKBuXgLtLzhnIuKmVKiMtABnINbs/uen34dvO/uYXyc4zt/2cn3n949YHtuWiIfPHsqZ07L4ZFN5SyZks3FCyeSn550gk8SEZFYNtwW+vnAvcA+wIApwPXOuedGtsyhi9pAB3jmW/BMvzVsvnwU4hKG9ZFd3Y5/+NV6nth+8nni37t0Ep9eNZNZhRnD+pkiIhI9hhvoG4APOOfe8J/PAdY4584c8UqHKKoDHaC7C76e2/f8fT+FRX8Hw5xFt7Wjix8+vZvHtx7hjSMNg+6bmhjH5acX8+EV0zitOBPN4CsiEruGG+ibnHOnn2xbEKI+0ME7p/6taX3PCxfAJ1+A0Mhdb+6c6w3qprZOHt5Yxh9eO8TLe6vfsu+li4r496sWkpumgXUiIrFmuIH+C6AL+LW/6YNAnHPuhhGt8hTERKADPHYLvPSjvucWgvP/Fd72WYiL3IR7bZ1dPL2jkrufe5PXDtQOeO3Tq2by3qWTKcxMIjM5gbbOLgzTxDYiIlFsuIGeBHyavlHufwN+6JxrG9EqT0HMBDrAG4/BE7dC5TGrz159Dyx6f8R/fFe346W9VfzyhX38ZduRQfddMSOPb7//dKbkpka8LhERCd8pB7qZxQFbnXPzIlXccMRUoPd4/r/hia+9dft1/wPzLn3r9gh443AD//X4GzyxffBgB7hySTGfePsM5k3MID5OrXcRkSANt4X+J+CfnHMHIlHccMRkoAMc2QrrfwGv/HTg9vw5sPo+yCzFw/0AACAASURBVJs5KmU453AOGts76exyZKUkcKS+lbuffZN71+0/7nv+/aqFvG/pJNKStDaPiMhoG26gPwcsBV4Gmnq2O+euGMkiT0XMBnqPAy/C2u/Bjkfe+trq38LkZZCWP/p1+V47UMNvXjrA0zsqqGpqf8vrEzOTyc9I5Lw5BVx2ejHzi7w551s7uthd0ciG/TXsrmikub2L+UUZ7K9qpr2zm9SkONo6u5lZkM45M3KZOyGDuJBpBL6IyEkMN9DPO95259yzI1DbsMR8oPeo2QftzfCjFW99beXN8O5/H/WSjtXR1U15bSu/fmk/P3luzwn3m5Gfxp6jTSd8fTApCXEsm57D36+Yzqq5BeriFxE5xikFupklA58EZuEtnXqPc64zYlWegjET6D2cg2dug2dvO+YFg/+zC9ILAinreHZXNLL3aBN56YnsPNzAo5vL+duuowP2mVmQxqWnFzMxM5n61g5SE+PIT0/i4Y1lxMeF2He0iX1Hm2hs7+R4X8MzpmbznkVFXL64mMKMJLXgRWTcO9VA/y3QgTeq/RJgv3PuMxGr8hSMuUDv0dUJu/4C5RsHhvtHH4NpK4Or6yRa2rt4blclcyZkUJSVTHJC3JA/41BtC396/RAv7qlmU2kttc3eLMPT8lK5cP4E3r9sMvMmajlZERmfTjXQNzvnFvmP44GXnXNnRK7MoRuzgd5fezP8Z1Hf85kXwId+P+zZ5mJBR1c3L+6p4rmdlby0t5pNpXUAnDkth2vPmsJFCyaSlTq86XRFRGLJqQb6q/0D/Njn0WBcBDpAdzf89kPwxqN92z6zEXKmB1ZSEHZXNPDwxnLueX4vjW3e2Z/pealMzUtjQVEmVy0tVutdRMa0Uw30LvpGtRuQAjT7j51zLvB/OcdNoPfY9wL88j19z0MJcM29MOtCb+GXcdBqB28GvM2ldfxy7T5e3ltNRUPfHEdzJ2Rw6elFnD+3gFmF6aQm6vI6ERk7hr18arQad4EOb+2CP9bHn/QudxtHOru6OVTbwuNbD/M/Lx1gX1Vz72uLJmWxclYe75hdwJnTck7pvL6ISLRQoI9FhzfD/34R9v3t+K+/76dw2vsiOld8NHLO8eT2Cp7ffZSKhlY27K/hSL3Xgs9IiueKJcVcd9ZUFhRnEhcaHz0aIjJ2KNDHMueg7FWoK4XnbofDmwa+fuMzULw0iMqixoGqZv62u5Inth3hmZ2VOAeJ8SHmTEinvLaVSTkpLJ+ey7VnTWH2BK0fLyLRS4E+nnS0wIZ74alvQHu/tdIv+S9YdsO4a7Ef63BdKw9tPMT6fTXsPNJAcXYKTW2dbCuvp6PLsXBSJoUZySwvyeXSRUVaoEZEoooCfbw6+Arc866B2979DVj+DxCv9dD7q2ho5Z7n9/Lga4d6u+gBJmQmcc6MPC6YP4F3zM4nKyVBE9yISGAU6ONZVwfseQZ+c8wSrbcchOTAL1SISp1d3RyobubP/ux3G0trae3oBiApPsS5s/K5ckkxZ5fkMTErOeBqRWQ8UaCLd65973Ow5jro8EeBz7wAVnwKSs4f913xg2lu7+SF3VW8uKeK9fuq2VfVTF2LN4PdvIkZrJiZx/yJmSyeks2cCelqwYtIxCjQpY9z8MKdx1+TPacEzv4kLL8RQloY5UTaO7vZsL+GJ7cf4ak3KthT2bcYTXzIOGNqDkunZbN8ei4rZ+aTkqhL5URkZCjQ5a0aK2H9z+GZ/zz+60s/BFd8f9xMVjMc1U3tbCqt5c3KJjbsr2bjwToO1bb0vj5nQjqLJ2czPT+N/PREVs7M12A7ETklCnQZXHsztFRDSw08dLN3GVyPM673BtLpfPuQVDW2sWF/Df+79TB7KpvYWlZHR1ff/2vzJmbwznmFnFWSy5nTcshM1pz0InJyCnQZmtoDcP/1A4N94iI4/4sw79Lg6ophze2d1DR3cKCqmRd2H+XpNyrYWlYPeJ0gC4uzWDgpk6VTc1g+PZfp+WkBVywi0UiBLqemrQEeuwVe/3XftqIlcO7NMP8Kb/54OWXldS28ur+W9furWb+vhm3l9XR1e/8/FmUlM6swnbNLcjlnRh6nFWfpXLyIKNBlBOx6Al79JWx/uG9byTugaDEs+RAUzgustLGiq9uxsbSW9fu88/DbD9f3DrhLig+xdGo2y6blcsH8Qk4rziIxXgMXRcabMRfoZnY5cPmsWbM+sWvXrqDLGV9a6+H3H4ddjw/cXrQYLv+uN82sc9De5PUlx6doxPwwVDS08tzOo2zYX8Or+2vYVdFAt4Oc1AQWTspiel4ay6bnsKAok1mFumROZKwbc4HeQy30ALXWw+bfAQ5e+zWUvTb4/vlzoLMNln/C67afcBqk5GgU/RBVNbbxzBuV/G1XJa8frKWstpX2ru7e12cVpjO7MJ2peamU5KWxZGo2cydkKOhFxggFukRezX5v0pqKbUN7X/ZUWHkzTD7LG3gX0nnioWjr7GJ3RSObSuvYVFrL9vIGjja2UVrTd9lcVkoCq+YWcPaMPC6YX0hhhma3E4lVCnQZPS213n1SxsBwbqn1Rs/XlcLT/+kF+RuPDnxvwTyYcxHMvRSmLFfrfRg6urrZe7SJ1w/U8uyuSl58s4qqpnYACjOSmDsxg/lFmSyclMWKGXkUZCQFXLGIhEOBLtGrvhxe+pE333xLjRf6ADnTYemHvVnrdA38sDnneONIA49sLGdjaS17jzYNaMXPLEhj6dQcLlwwgbNLcslO1eI9ItFIgS6xwTmoehNe/w1svA8ayrztC6+GM/4epr9DA+xGUEdXN5sP1fHy3mpe2H2U9ftqaOnoAqAkP41l03JYMTOPM6flMDU3VefhRaKAAl1ij3NwYB28cBe8+SR0tUPeLDj3n+H0a7X8awS0d3bz4p4qNh6s5dUDNbyyr4bGtk7AG1V/5rQcSvLTWDIlh3lFGUzPSyMupJAXGU0KdIltbQ3eSPr1v4Cjb0D2NG+0/LIbIFEzqkVKd7dj++F6Nh6sY/3+al7eW83hulY6/clv0hLjWDEzn9MnZ7FsWg5T81KZmJlMfJx6UUQiRYEuY0N3N+x+Ap7+Dyh/3bvGfe4lMHMVTDsX8mYGXeGY197ZzZayOnYfaeT10lr+tquSg9V95+IT40MsmZxNSX4a0/JTmVWQzpwJGeSmJxJnRlqSlukVGQ4Fuow9e56Fl+72uuM7W71tM1bBeV+AaSuCrW2cqWhoZVtZPQeqm3mzopHXD9ay80hj7/n4/hLjQxRmJFGSn0ZhRjK5aQlMzUtjQkYSU3JTmTMhQ934IoNQoMvY1d4EOx/3uuTLN0LzUZh3Gcw4H2Zf6I2Wl1HnnONgdQv7q5s4WN3CkXrvj666lg5Ka5opq21lX1UTze0DQz8hzphZkM7ErGSKslKYnJPCtLxUpuelUZydQm6axk7I+KZAl/GhvRleuBP+dgd0d3jbJi2Dxdd5i8lkTAi2PhnAOYdzcKShlcqGNnYdaWRrWT1byuqoa+7gaGNb77XzPYqzkplZmE5OaiL56UlMzU1hqh/4JflpGokvY54CXcaX7i448CJs+xNsexAaj3jb51wC7/g/MPm4/y9IFGpp92bCO1Tbwp6jXuDvqWyiub2T8mOmvU1JiCM3LZG89ETmTMhg0aQslkzJZkFxJgkaqCdjhAJdxrfS9fDar7xr2ztbofgM79K3094LrhvSJ+j69hjU1e04VNNCeV0Lb1Y2sX5/NQ2tnbR2dLHlUB01zV4vTWpiHAuKMinOTmFeUQZLpmRTmJFMfnqiJtCRmKNAFwHv8rcXfwQv/tCbla5HRjEsuALO+gTkzwquPhlRh2pbeP1ALS/vrWJ7eQOHals4VNsyYJ+irGQWFGUys9Abjb94chYzC9IJaWCeRCkFukh/zsHBl7255EMJcHgT7Por4LzW+/RzYfEHYMKCoCuVEVbT1M7rpbWU1bbQ1NbJptI6dlc0sqeyqbf7PjEuREFGEgsnZTKjIJ3peanMKkzntOIskhO0eJAES4EucjJ1pbDuh/DiD/q2TTvXGzE//3LInhJcbRJx7Z3dHKhu5rUDNeyuaKS0toVtZfWU1jTT0eX9GxkXMuZOyGDxlCwWFGcxqyCdkvw0khNCZCYnYIYG5UnEKdBFwuUc7Hvem3b2tV9D7X5ve8k7vDnlp50L+bODrVFGTWdXN+V1rew43MDGg7VsLK1lU2kddS0dA/aLCxkhw7vcLjOF9OR4UhPjyE5NIC0xno4uR2piHLUt7dS3eOf5G1o7qWluZ1peKmmJ8ZTXtdLU3klSfIiEuBATs5KZWZDOjPw0up13SV96cjyGkZTgXc+fn55EQlxI1+6PIwp0kVPR3QVlr8PWP8Cm30JTpbe9YB4s+5jXcs8sCrZGGXXOOfZVNbPrSANHGtqob+ngSH0rITOqmtqpqG+luqmdprZO6lo6aO/qJik+jub2Trqdtz59elI8oRBUN7ZTmJlMfUsHaUnxZCTHk5YUT21zO3UtHRypbwurpry0RCZkJjOjII3EuBCTc1Np7+xmYmYSmSkJTM9PY0FRpsJ/DFCgiwxXZzuUvQob7vWmna3Y5m2f/nZYvBqmnqOpZ2VQXd2O1o6uIU1/29jWyc4jDTT5i+Q0tXXS1tlNWW0ruWkJlNW20trptfZLa1p4s6IR5xzl9a0c75/2uJBRkJ5EQ2sHCydl0dbZzZIp2UzO6ZnEJ430pHjSk+LJ0SQ+UUmBLjKSnPNCfcvv4fU13ux0ALkzYOYFMOdimHEexCUEW6eMW+2d3ZTXtRAfF2LLoToaWjvZe7SR9s5uqps6eP1gDR1djgPVzSf8jKKsZIqzUzDgtOJMZhWmU5Dh9QJMyUklOSGkMQMBUKCLREpXp3e+fc/T3mQ2+9cCDuKTYc5F3gx1M98JqblBVypyXM45f0reFt6sbGTf0Wbi44zdFY1sOVTHropGEuNCAybxAchIiic/I4kJmUnkpCbS0eWYlJ3MvKJMZhd60/cWZiSTGK85HkaSAl1ktDRVeQvGvPkU7HgU2uoBg8lnecGeP9sL+qSMoCsVCZtzjtKaFiob29h3tInyulbK61o4XOeNISir867vr2lqp6nf/PxxISMvLZGi7BQWFGWwoCiTBcVZlOSnkZkcr6V2T4ECXSQIHS1esO9fC9sf7hsxH58C8y+DRddAydshISXYOkVGSHe341BtC7srG6mob/X+CGho40B1M9vK66lt7rs6IC5kTM5JYWquNxf/rMJ0irKSyU5NpDg7mby0JHXrH4cCXSRoznmz01Vsgy1/8M6/t9ZCYrq3pvu8S2Ha2yC9IOhKRSLCOUdZnbfU7sHqZqqa2thf1cyB6mb2Hm2iobXzLe/xRux7oT81N5XctEQaWjsJGUzITKYwM5lpualMy0sdN9P4KtBFok1nG+z6C2x7CHY/AS3V3vY8v0t+3qVQtBgS04KtU2QUOOeobGhjX1Uzze2dHK5rpaqpnfrWDvYfbeZgjRf8Da2dxIeMUMho7xx4Tj85IURxVgrF2SlMyk4hOy2BydkpzCxIZ0puKgUZSWNipj8Fukg06+6Cgy9509HueMS79r27AxIzYPa7vNnqpr9dy7/KuNfR1U1XtyMpPtR7nf7+qiYOVDdzpL6VstpWSmuaKatrpa6547gD+abnpzE5J4UJmcnMnuB18xdlpZCXlkhuWmLUn9dXoIvEkuZq2POMd/5920PQVudtn7wcFr3fWyUuvTDQEkWiXXe340hDK29WNFFW20JFQyuVDW3s8Qf1ldW20NxvAB/gzfaXmUxRttfSL85O7m31F2Uley3/1IRAz+sr0EViVVeHdznc7r9667vX7AOLgynLva75yWfBlLN1zbvIEHV3exPwHPZH7Nc0d1Dht/LLalsoq2uhvLb1La385IRQb7d+z7X6xVkpFGYmMTErmYmZyWQmJ0RsxT4FushY4ByUb/RGzO94BCp3eNvjU7xL4qathGkrIH8uJKUHW6vIGNDd7ahqaqe8rsUL+X5h3/O4srHtLbPyJcaFKMpOpjAjibfPLuDmC0Zu/YfBAj38OQhFJFhmULzEu13wZag9CKWveF3zu5/0loPtUTAfpp4Nsy/ywj4hObi6RWJUKGQUZCRRkJHE6ZOzj7tPe2c3R+pbOVLfSnmdd1/Z2EZZbSsV9a290/aOBrXQRcaC7m6o3Qdlr8GhV72WfPlGb2KbxAx/1rrLYM4lCneRGKYWushYFwp5c8nnzvCWeQVvQZl9z3nn3nf8GbY8AEmZ3jn3SWfClLNg6kpITA22dhEZEWqhi4wH3V3eyPktv/cukat6E2/Oef/8+9yLvfusyUFXKiKDUAtdZLwLxcGsC7wbQF0plG/yRs/v+mvf+ffpb/fWeS85DwrmeuftRSQmKNBFxqOsyd5t3nv6Rs/v/F/Y9Ft47F+8fTKKvfPuM86HqSu0YpxIlFOXu4gMVL0H9v4N3ngM9j4LHc3ete+TzvRuJe+Aqeco4EUCoOvQReTUdLbBoQ3efPO7n4Ty173tFvIG1M2/zF8Wdo6650VGgQJdREZGezMcfNFfEvYRqNzubc8oggkLvZnrpq30ZrKLTwq2VpExSIEuIpFRs98bPb/3Oe88fNWuvtfyZntd8/mzvYlu0vKgYJ5WkBMZBo1yF5HIyJkGZ17v3cBbWObAi94Mdoc3wfaHoLWub/9QPORM95aGzZ8LBXO8+7xZED8+1rMWiRQFuoiMnNRcb+T8vPf0bWuuhso3oO4gHNkKR3d6S8Vu+X2/N5rXkp9ytjeqvmCud15e3fYiYVOgi0hkpeZ6i8awYuD2lho4sg2q34SK7d60tVv+AK/9yns9FO+13iecBhMXQvEZ3ih7zWwnclwKdBEJRkoOTD/Xu/XobIOKbd5MdhXbvW77PU/D5vu910MJUDjfa81nTYHCBV5rvmAuJKQE83uIRAkFuohEj/gkKF7q3fprrPDOy5e+4i0+U/YabH0QXJf/vmSvJZ89zQv3KWd7I+61jKyMIwp0EYl+6YUw71Lv1qO9yWvF1+6Hg6/AgXXe5XRb/wg4bzKcotNh2rneTHdFi73Z8XS9vIxRumxNRMaWtgZvpP2BF72QL10PXW3ea6l5XrhnT/Va88VLIHcmpBcEW7NImHTZmoiMH0kZMPtC7wbQ0eJ10Ze9Doc3w4G1sOORge/JnuZdMz/xdJh0hnev7nqJMQp0ERnbElK82eumrezb5hw0lHsBf3SX15Lf+5y3OA14U9sWzPOuj8+f4z0ueQdkTAjmdxAJg7rcRUR6NFZ4g+4ObfBmvqt+E6r3+oPvzBthP/F073x80WKYuMjrEdB5eRkl6nIXEQlHeiHMvdi79ehs8wbf7fqrN8p+zzOw6b6+1+NTICUbkjIhb6Z3SV1Cqne+Pi0fEtO919ILvc9qb4K4BGhv9M7358/2lqptqYGudm9q3LhE/aEgQ6ZAFxEZTHySN3iueEnftobDUL7Ju06+rtQL5rYG7/r5nY/3XU43HAlpMGGB1yuQkAbJWd4fCB0tkFbgnw6Y7V2yl5A8/J8nMU+BLiIyVBkTvducd7/1Nee8VnhrLTQegdZ66O70Hrc3D1ycprXWa73XH+qbGKe7EzrbobnKm2Rnyx+hvWHwepIyvXDPmQ6Zk7z7hFTInuLNtpecpbnyxwEFuojISDLzRsgnpXvXvQ9Xd7fX4m9v8kK+rd7rDWhvgsod0FLrzZFfXwY7HoXO1rd+RlySdzogo8ibeMc5bwxAZhHklHiL7EjMU6CLiESzUAgIeefpU7IHvjb3koHPnfNH8Jd5l+r1TL5TVwq1B7z7/Wuhs+WYn5EAuSVQtMQ7v59T4l3GV7zEa+mH4iL6K8rIUKCLiIwVZt4ta/KJewe6u7xw75k3v6HcGw/QUg37nveec8zVTxnF3gI5hfO9S/hyZ3jr3aflRfxXkvAp0EVExpNQnNcaByic99bXuzq9bv1DG7xW/u4nILMYKnd6I/y72vv2jUvyztf3jOzPn+2tiFe81FtlT0aVrkMXEZHwdHVCzT6o2g1H3/C68OvLvMl52pugvrRv38QM79x8bgkUzIcpy72wV9APi65DFxGR4YuLh/xZ3q3/tfo9Wuu8KXYPbfCCvu4gVL7hDdZz3d4+WVOhYA5MWOiFffpEb4BexkRddz9MCnQRERkZyVkw4zzv1l9bI5S/7k3Mc2QrVOyAPc9Cd0ffPukTvGl2s6d619jnzvDWvM+fDcmZo/t7xCgFuoiIRFZSOkx/m3fr0dkONXu96XYrtnnn66vehN1Pwuu/Gfj+nOn+YLyZgPMm+8mb7f0BUDhfC+n4FOgiIjL64hO9a+IL5kLJ2we+1lrnzaFff8i77K5nEZ09z3oT7+D8e1/WVC/Y82f7i+rM9JfFLRxX3fgKdBERiS7JWX3T7c67tG+7c965eNcNNfu9gXkV27zQr9gBe58dOLFOSi5MOM37oyHf/+OhcL43de4YDHoFuoiIxAYzsDggrm9wXv/A7+7yRuHX7IWju+HIFm82vU2/g7a6vv1Scr2WfOE8f3ncud55+6wpMR30CnQRERkbQnFed3veTJj1rr7tznkL6lRu90bdV/j3W37vde/3SM7ygj5/tn+O3r/PLfFm0ItyCnQRERnbzLx56zOLYOY7+7Y7B01HvaA/uss/V78Tdv4Fmn7dt18o3huY1xPy/QM/NS9qWvUKdBERGZ/MIL3Au5W8Y+BrLbX+BDq7oGqXf78b3nxy4Gx5KTnHD/qcklFf4U6BLiIicqyUbJi8zLv11zMXfk/YH93pPT72cjuL81r18y+HC28dlZIV6CIiIuHqmQs/twRmXzjwtdZ6vzW/u69V37PO/ShQoIuIiIyE5ExvvvpJZwby40OB/FQREREZUQp0ERGRMSBqAt3MZpjZPWb2QNC1iIiIxJqIBrqZ/dzMKsxsyzHbLzazN8xst5ndAuCc2+Oc+1gk6xERERmrIt1C/yUwYNFcM4sDfgBcAiwAVpvZggjXISIiMqZFNNCdc88B1cdsXg7s9lvk7cB9wJWRrENERGSsC+Ic+iTgYL/npcAkM8szsx8DS83siyd6s5ndaGbrzWx9ZWVlpGsVERGJCVFzHbpzrgr4ZBj7/QT4CcCyZctcpOsSERGJBUG00A8BU/o9n+xvExERkVMURKC/Asw2sxIzSwSuAx4KoA4REZExI9KXra0B1gFzzazUzD7mnOsEbgIeB7YD9zvntkayDhERkbEuoufQnXOrT7D9z8CfI/mzRURExpOomSlORERETp05F7sDxc2sEtg/gh+ZDxwdwc8bj3QMh0/HcPh0DEeGjuPwjfQxnOacKzjeCzEd6CPNzNY755adfE85ER3D4dMxHD4dw5Gh4zh8o3kM1eUuIiIyBijQRURExgAF+kA/CbqAMUDHcPh0DIdPx3Bk6DgO36gdQ51DFxERGQPUQhcRERkDFOiAmV1sZm+Y2W4zuyXoeqKJmU0xs6fNbJuZbTWzz/jbc83sr2a2y7/P8bebmd3lH8tNZnZGv8+63t9/l5ldH9TvFBQzizOz18zsEf95iZm95B+r3/pTIWNmSf7z3f7r0/t9xhf97W+Y2UXB/CbBMbNsM3vAzHaY2XYzW6Hv4tCY2Wf9/5e3mNkaM0vWd3FwZvZzM6swsy39to3Y987MzjSzzf577jIzO6VCnXPj+gbEAW8CM4BEYCOwIOi6ouUGFAFn+I8zgJ3AAuDbwC3+9luAb/mP3wM8BhhwDvCSvz0X2OPf5/iPc4L+/Ub5WH4O+B/gEf/5/cB1/uMfA//oP/4U8GP/8XXAb/3HC/zvZxJQ4n9v44L+vUb5GN4LfNx/nAhk67s4pOM3CdgLpPT7Dn5E38WTHrd3AGcAW/ptG7HvHfCyv6/5773kVOpUCx2WA7udc3ucc+3AfcCVAdcUNZxz5c65V/3HDXjz70/CO0b3+rvdC1zlP74S+H/O8yKQbWZFwEXAX51z1c65GuCvwMWj+KsEyswmA5cCP/OfG/BO4AF/l2OPYc+xfQC4wN//SuA+51ybc24vsBvv+zsumFkW3j+s9wA459qdc7XouzhU8UCKmcUDqUA5+i4Oyjn3HFB9zOYR+d75r2U65150Xrr/v36fNSQKdC+cDvZ7Xupvk2P43W1LgZeACc65cv+lw8AE//GJjud4P853Av8CdPvP84Ba5y1WBAOPR++x8l+v8/cf78ewBKgEfuGfuviZmaWh72LYnHOHgNuBA3hBXgdsQN/FUzFS37tJ/uNjtw+ZAl3CYmbpwO+Bf3bO1fd/zf+rUpdLnICZXQZUOOc2BF1LjIvH6/b8kXNuKdCE19XZS9/Fwfnnea/E++OoGEhjfPVORES0fO8U6HAImNLv+WR/m/jMLAEvzH/jnPuDv/mI31WEf1/hbz/R8RzPx/lc4Aoz24d3SuedwHfxuuJ6Vjzsfzx6j5X/ehZQxfg+huC1XEqdcy/5zx/AC3h9F8P3LmCvc67SOdcB/AHv+6nv4tCN1PfukP/42O1DpkCHV4DZ/ijPRLyBHw8FXFPU8M+X3QNsd87d0e+lh4CeUZrXA3/qt/3v/ZGe5wB1frfU48C7zSzHbyW829825jnnvuicm+ycm473/XrKOfdB4Gng/f5uxx7DnmP7fn9/52+/zh95XALMxhtMMy445w4DB81srr/pAmAb+i4OxQHgHDNL9f/f7jmG+i4O3Yh87/zX6s3sHP+/yd/3+6yhCXr0YDTc8EYl7sQbqflvQdcTTTfgbXhdSZuA1/3be/DOoz0J7AKeAHL9/Q34gX8sNwPL+n3WDXiDZ3YDHw36dwvoeJ5P3yj3GXj/CO4Gfgck+duT/ee7/ddn9Hv/v/nH9g1OcSRsLN+AJcB6//v4IN5oYX0Xh3YMbwV2AFuAX+GNVNd3cfBjtgZvzEEHXk/Rx0byewcs8/97vAl8H3/St6HeNFOciIjIGKAudxER6ScxuQAABCJJREFUkTFAgS4iIjIGKNBFRETGAAW6iIjIGKBAFxERGQMU6CJRwsy+aWarzOwqM/viEN9b4K+G9ZqZvX2Q/c43f7W3QfZZYmbvGcrPH21mts/M8oOuQySaKNBFosfZwIvAecBzQ3zvBcBm59xS59zfhlnHEry5BkQkhijQRQJmZv9lZpuAs4B1wMeBH5nZV46z73Qze8pfZ/lJM5tqZkvwlnK80sxeN7OUY95zsXnrh78KvK/f9uVmts5v1a81s7n+bIlfB671P+va4+13nLqKzOw5/z1benoJzOxHZrbevPW3b+23/z6/R+J1//UzzOxxM3vz/7d3PyE27nEcx98fY+qSf8mfZEGkxixcEWVzm4WS2CiRuF0sZHOTWNpZKAu6kejebiOilCjNhjD5tzALMymUuvdur5Q0i8vG1+L7O85xzDlcpWOePq/6Nc/ze37znN+cU+d7nt955vuVtLeM6SvnHFDW3D4t6ZP3LEk7JD0s5zqjrDvfJam/zOWxpP1f9eKYjSedzsDj5uYWkMH8BNAN3G8z7hrwS9neDVwt2zuBk2OM/4Gs8LSEzGB1iXqmumnAxLK9Frg81rlajWt6nAOULItAFzC1bM9s6BsElpX9f6jX3D5OZn6bCswG/i39fcAbMotZF1lucnPD788ClpbnpLv0nyJTZ64kS1XW5jej06+xm9u3brVk/GbWWSuAEaCHrDnfyhrqV9nnyCvzdnrIYhzPASSdB/aUY9OBs5KWkOl9u1uc40vGDQF/Kgv5XI2I4dK/RdIeslLaPKCXDN5Qr5nwGJgSEaPAqKS3kmaUYw8j4q8y94tkKuJa3W7IrxpWAkOZBptJZJGMa8AiSSeAAeB6m+fIrBIc0M06qCyX95MVll4Ck7Nbw8CaiPjvGz78YeB2RGxS1rof/NpxEXFH0k/ABqBf0jHgLnAQWBURryT1kysGNW/Lz3cN27X92ntTc27q5n0BZyPik5sIJf0IrAP2AlvIFQ2zyvJ36GYdFBHDEbGcLA7UC9wC1kXE8hbB/AFZsQ1gOxk023kGLJS0uOxvazg2nXqZxp0N/aPk8vfnxn0gaQG5VP478Ae54jCNrFn+WtJcYP1n5jqW1cpKiBOArcC9puM3gc2S5pR5zJS0oNwBPyEiLgOHynzMKs0B3azDJM0GXkXEO6AnIp60Gf4rsKvcRPczsK/duSPiDbnEPlBuinvRcPgocETSIz5erbsN9NZuimszrlEfMFLGbAV+i4gR4BH5oeICcL/dXFsYIqtPPQX+Bq40/X1PyIB9vTwnN8il/fnAYFnpOA/8r38DNBuPXG3NzL5LkvqAgxGxsdNzMRsPfIVuZmZWAb5CNzMzqwBfoZuZmVWAA7qZmVkFOKCbmZlVgAO6mZlZBTigm5mZVYADupmZWQW8B3BOZUYqYufVAAAAAElFTkSuQmCC\n" + }, + "metadata": { + "needs_background": "light" + } + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "def plot_progressive_loss(obj_list, alias, result_interval=1,):\n", + " \"\"\"Show real-time progressive validation loss\n", + " \"\"\"\n", + " avg_list = [sum(obj_list[:i]) / i for i in range(1, len(obj_list))]\n", + " total_obs = len(avg_list)\n", + " warm_starting_point = 10 #0\n", + " plt.plot(range(warm_starting_point, len(avg_list)), avg_list[warm_starting_point:], label = alias)\n", + " plt.xlabel('# of data samples',)\n", + " plt.ylabel('Progressive validation loss')\n", + " plt.yscale('log')\n", + " plt.legend(loc='upper right')\n", + "plt.figure(figsize=(8, 6))\n", + "plot_progressive_loss(loss_list_vanilla, 'VanillaVW')\n", + "plot_progressive_loss(loss_list_autovw_ni, 'AutoVW:NI')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### AutoVW which tunes both namespace interactions and learning rate\n", + "Create and run an AutoVW instance which tunes both namespace interactions and learning rate." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": "Seed namespaces (singletons and interactions): ['e', 'g', 'b', 'd', 'i', 'h', 'a', 'f', 'c']\nNo low-cost init config given to the search algorithm.For cost-frugal search, consider providing init values for cost-related hps via 'init_config'.\nCreated challengers from champion ||0.5\nNew challenger size 39, ['|dh|0.5', '|ci|0.5', '|bd|0.5', '|bh|0.5', '|ei|0.5', '|ch|0.5', '|bg|0.5', '|bc|0.5', '|cd|0.5', '|ag|0.5', '|eh|0.5', '|hi|0.5', '|dg|0.5', '|fi|0.5', '|ad|0.5', '|cf|0.5', '|ce|0.5', '|be|0.5', '|ab|0.5', '|ah|0.5', '|fh|0.5', '|di|0.5', '|gi|0.5', '|bf|0.5', '|de|0.5', '|ac|0.5', '|ai|0.5', '|df|0.5', '|cg|0.5', '|ae|0.5', '|fg|0.5', '|ef|0.5', '|eg|0.5', '|gh|0.5', '|af|0.5', '|bi|0.5', '||0.05358867312681484', '||1.0', '||0.5']\nOnline learning for 10000 steps...\nSeed namespaces (singletons and interactions): ['e', 'g', 'b', 'd', 'i', 'h', 'a', 'f', 'c']\nNo low-cost init config given to the search algorithm.For cost-frugal search, consider providing init values for cost-related hps via 'init_config'.\nCreated challengers from champion ||1.0\nNew challenger size 38, ['|bf|1.0', '|ab|1.0', '|fg|1.0', '|bg|1.0', '|ad|1.0', '|fi|1.0', '|be|1.0', '|gi|1.0', '|df|1.0', '|de|1.0', '|cg|1.0', '|hi|1.0', '|di|1.0', '|ei|1.0', '|ai|1.0', '|bc|1.0', '|af|1.0', '|ef|1.0', '|ag|1.0', '|dh|1.0', '|fh|1.0', '|cd|1.0', '|dg|1.0', '|gh|1.0', '|ah|1.0', '|eg|1.0', '|ci|1.0', '|ch|1.0', '|eh|1.0', '|ac|1.0', '|ce|1.0', '|bi|1.0', '|bd|1.0', '|ae|1.0', '|cf|1.0', '|bh|1.0', '||0.10717734625362937', '||0.3273795141019504']\nSeed namespaces (singletons and interactions): ['de', 'e', 'g', 'b', 'd', 'i', 'h', 'a', 'f', 'c']\nNo low-cost init config given to the search algorithm.For cost-frugal search, consider providing init values for cost-related hps via 'init_config'.\nCreated challengers from champion |de|1.0\nNew challenger size 45, ['|cf_de|1.0', '|ci_de|1.0', '|cd_de|1.0', '|ac_de|1.0', '|de_dh|1.0', '|ab_de|1.0', '|de_ef|1.0', '|ce_de|1.0', '|de_hi|1.0', '|bg_de|1.0', '|de_fi|1.0', '|ah_de|1.0', '|de_dg|1.0', '|de_fg|1.0', '|ai_de|1.0', '|de_gh|1.0', '|bh_de|1.0', '|ch_de|1.0', '|de|1.0', '|af_de|1.0', '|de_deg|1.0', '|de_eh|1.0', '|de_eg|1.0', '|de_di|1.0', '|de_ei|1.0', '|ag_de|1.0', '|ae_de|1.0', '|de_deh|1.0', '|be_de|1.0', '|de_fh|1.0', '|cg_de|1.0', '|bf_de|1.0', '|bi_de|1.0', '|ad_de|1.0', '|ade_de|1.0', '|de_def|1.0', '|bde_de|1.0', '|cde_de|1.0', '|de_df|1.0', '|bc_de|1.0', '|de_dei|1.0', '|bd_de|1.0', '|de_gi|1.0', '|de|0.10717734625362937', '|de|0.3273795141019504']\nFinal progressive validation loss of autovw_nilr: 6.271218842008241\n" + } + ], + "source": [ + "from flaml.tune import loguniform\n", + "''' create another AutoVW instance for tuning namespace interactions and learning rate'''\n", + "# set up the search space and init config\n", + "search_space_nilr = {'interactions': AutoVW.AUTO_STRING, 'learning_rate': loguniform(lower=2e-10, upper=1.0)}\n", + "init_config_nilr = {'interactions': set(), 'learning_rate': 0.5}\n", + "# create an AutoVW instance\n", + "autovw_nilr = AutoVW(max_live_model_num=5, search_space=search_space_nilr, init_config=init_config_nilr)\n", + "\n", + "# online learning with AutoVW\n", + "loss_list_autovw_nilr = online_learning_loop(max_iter_num, vw_examples, autovw_nilr)\n", + "print('Final progressive validation loss of autovw_nilr:', sum(loss_list_autovw_nilr)/len(loss_list_autovw_nilr))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Online performance comparison between vanilla VW and two AutoVW instances\n", + "Compare the online progressive validation loss from the vanilla VW and two AutoVW instances." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "output_type": "display_data", + "data": { + "text/plain": "
", + "image/svg+xml": "\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfQAAAFzCAYAAADIY/vqAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8GearUAAAgAElEQVR4nOzdd3ic5Z3u8e8zo967ZEu25W6DO8a4gMGm2BQDCQRiApuEBDbZZckhZSHnJKTsnt0kSwjJJjmBDUnYZNembGimgwEDNsUFdxs3SZZVrN6lac/54x3JMpZtydJ4NNL9uS5d0rzzzswPkXDr6cZai4iIiEQ2V7gLEBERkf5ToIuIiAwBCnQREZEhQIEuIiIyBCjQRUREhgAFuoiIyBAQFe4C+iMrK8sWFhaGuwwREZGzYtOmTdXW2uyenovoQC8sLGTjxo3hLkNEROSsMMYUn+w5dbmLiIgMAQp0ERGRIUCBLiIiMgRE9Bi6iIj0jtfrpbS0lPb29nCXIr0QFxdHQUEB0dHRvX6NAl1EZBgoLS0lOTmZwsJCjDHhLkdOwVpLTU0NpaWljB07ttevU5e7iMgw0N7eTmZmpsI8AhhjyMzM7HNvigJdRGSYUJhHjjP5d6VAFxGRkFuyZAmvvPLKcdceeughvv71r/fpfZ577jl+8pOfAPDDH/6QBx54AIAvfelLPPXUUyd93WOPPcbKlSuPu1ZdXU12djbPPvss119/fdf1f/3Xf2XChAldj59//nmuvfbaPtUZDgp0EREJuZUrV7J69erjrq1evfqEkD2da6+9lvvuu6/Pn/+Zz3yG1157jdbW1q5rTz31FCtWrGDhwoW8//77Xdc3bNhASkoKR48eBWD9+vUsXLiwz595tinQRUQk5G688UZeeOEFPB4PAEVFRZSVlbFq1Srmzp3Lueeeyw9+8IOu+wsLC/nBD37AnDlzmD59Onv27AHgT3/6E3fdddcpP+vHP/4x559/PtOmTePOO+/EWktKSgoXX3wxzz//fNd9nX9QZGdnk5KSwv79+wE4cuQIN9xwA+vXrwecQF+0aNGA/j5CQbPcRUSGmR89v5NdZY0D+p7njEzhByvOPenzGRkZzJs3j5deeonrrruO1atXc9NNN/G///f/JiMjA7/fz6WXXsq2bduYMWMGAFlZWWzevJnf/va3PPDAA/z+97/vVS133XUX999/PwC33XYba9asYcWKFaxcuZL/+q//4uabb6asrIxPPvmEpUuXArBo0SLWr1+P3+9n4sSJzJ8/n1deeYVrrrmGrVu3cv755/fzNxR6aqEHvf7Bkzzx2q/CXYaIyJDVvdu9s3X8xBNPMGfOHGbPns3OnTvZtWtX1/2f/exnATjvvPMoKirq9ee8+eabXHDBBUyfPp21a9eyc+dOAK6++mree+89GhsbeeKJJ7jhhhtwu90ALFy4kPXr17N+/XoWLFjAvHnz+OCDD9iyZQtTpkwhLi5ugH4LoaMWetATWx/ikKuBm7g73KWIiITUqVrSoXTddddxzz33sHnzZlpbW8nIyOCBBx7go48+Ij09nS996UvHLdWKjY0FwO124/P5evUZ7e3t/N3f/R0bN25k1KhR/PCHP+x6z/j4eJYvX87TTz/N6tWrefDBB7tet2jRIv793/8dv9/PHXfcQXJyMu3t7bz11lsRMX4OaqF3cWGw2HCXISIyZCUlJbFkyRJuv/12Vq5cSWNjI4mJiaSmplJZWclLL73U78/oDO+srCyam5tPmPm+cuVKHnzwQSorK1mwYEHX9alTp1JWVsa7777L7NmzAZg1axa/+93vImL8HBTo3RgCWqIpIhJSK1euZOvWraxcuZKZM2cye/ZspkyZwi233DIgwZmWlsYdd9zBtGnTWLZs2Qlj35dffjllZWXcfPPNx631NsZwwQUXkJmZ2bXd6oIFCzh48GDEtNCNtZHbKp07d64dqPPQ/+6Ri9nlruatr+wckPcTERlMdu/ezdSpU8NdhvRBT//OjDGbrLVze7pfLfQglzEEwl2EiIjIGVKgBxl1uYuISARToAe5cGlKnIiIRCwFepBBXe4iIhK5FOhBxrjU5S4iIhFLgR7kwhBAiS4iIpFJgR5kjEtd7iIiIfbMM89gjOk6bOVUHnrooeNOR+vJl7/8ZR5++OETPuPKK6/knnvu4aGHHuq6vmzZMr761a92Pf7Wt7513G5x4BzJmpCQ0HXSGjgb4vT082CjQA9yoS53EZFQW7VqFRdeeCGrVq067b29CfRTHcvaeeAKQCAQoLq6umtfdzj5sahZWVn8/Oc/780/zqCiQA9SC11EJLSam5t59913efTRR7tC+K233uKaa67puueuu+7iT3/6E7/61a8oKytjyZIlLFmyBHD+GJg+fTrTpk3j3nvvBeDSSy9lz549lJeXA9DS0sLrr7/O9ddfz8KFC9mwYQMAO3fuZNq0aSQnJ1NXV0dHRwe7d+9mzpw5J9R5++238/jjj1NbWxvS38dA0+EsQS7jImDURBeRYeCl+6Bi+8C+Z950uPInp7zl2WefZfny5UyaNInMzEw2bdp00nvvvvtuHnzwQd58802ysrIoKyvj3nvvZdOmTaSnp3PFFVfwzDPPcP3113PDDTfwxBNP8I1vfIPnn3+eSy65hJSUFFJSUoiKiqKkpKTrFLUjR46wYcMGUlNTmT59OjExMdx///3MnTuXa6+9FnC61W+//XZ++ctf8qMf/WhAf02hpBZ6kAn+Knw+b5grEREZmlatWsXnP/95AD7/+c/3qtu900cffcQll1xCdnY2UVFRfOELX2DdunVAz8eydvr0sagLFizoety5d/yPf/zjrjDvdPfdd/PYY4/R1NTUr3/ms0kt9CBXMNA9Pg9RUdFhrkZEJIRO05IOhdraWtauXcv27dsxxuD3+zHGcN111xEIHBvw7H58am8tXLiQ8vJytm7dyvr1648bU+8cR9++fTvTpk1j1KhR/PznPyclJYUvf/nLJ33PtLQ0brnlFn7zm9/0uZ5wUQs9yOVyfhV+v1roIiID7amnnuK2226juLiYoqIiDh8+zNixYwkEAuzatYuOjg7q6+t54403ul6TnJzc1UKeN28eb7/9NtXV1fj9flatWsXFF18MOCel3XzzzXzxi1/kyiuvJC4urus9Fi5cyJo1a8jIyMDtdpORkUF9fT0bNmw47Slq3/zmN3n44Yd7fRZ7uCnQg9TlLiISOqtWreIzn/nMcdduuOEGVq9ezU033cS0adO46aabus4iB7jzzjtZvnw5S5YsYcSIEfzkJz9hyZIlzJw5k/POO4/rrruu697ux7J2N336dKqrq5k/f/5x11JTU8nKygLg/vvv57nnnjuh5qysLD7zmc/Q0dExIL+DUNPxqUE/+s+VPGV3sPbaV8hOHzkg7ykiMljo+NTIo+NTz5Axzq/Cqxa6iIhEIAV6kAs3AH6/P8yViIiI9J0CPaizhe7ze8JciYiISN8p0IPcXV3uCnQREYk8CvQgl3G63G1AXe4iIhJ5FOhBXZPi1OUuIiIRSIEe5HY5LfSAWugiIiEznI5PveSSS/j00uq33nqL1NRUZs2axZQpU/j2t7/d6/c7HQV6F20sIyISakPx+NQ//elP/PCHP+z1/RdddBEff/wxW7ZsYc2aNbz33nu9fu2pKNCDOlvoPm39KiISEjo+9Xjx8fHMmjWLI0eODMj76XCWoM7DWfw2MvbsFRE5Uz/98KfsqT19l3dfTMmYwr3z7j3lPTo+9Xh1dXXs27ePxYsXD8j7qYUe5HI7f9v4/Qp0EZFQGErHp9bU1DBr1ixmzZrF/fffz+9+97uux9u3n/qs+XfeeYeZM2eSn5/PsmXLyMvL6/Xv4VTUQg9ymWCga1KciAxxp2tJh8JQOz41MzOTjz/+GHDG0IuKino9jn7RRRexZs0aDh06xPz587npppuYNWtW7/+BT0It9CCXMYCOTxURCQUdn3qisWPHct999/HTn/50QN5PgR7kdqmFLiISKsP1+NSrr76agoICCgoK+NznPnfC81/72tdYt24dRUVFZ/wZnXR8atAfnv8Rv6h9in+beB/LF35hQN5TRGSw0PGpkUfHp54hlys4yz2gLncREYk8CvQgt4kG1OUuIiKRSYEe1NlCD2jZmoiIRCAFelDXpDirFrqIDE2RPGdquDmTf1cK9KDO09a0sYyIDEVxcXHU1NQo1COAtZaamprjlt/1hjaWCXK7nDH0gLZ+FZEhqKCggNLSUqqqqsJdivRCXFwcBQUFfXrNoAl0Y8z1wNVACvCotfbVs/n5XcenqstdRIag6Ohoxo4dG+4yJIRC2uVujPmDMeaoMWbHp64vN8bsNcbsN8bcB2CtfcZaewfwNeDmUNbVE1fnGLpfgS4iIpEn1GPofwKWd79gjHEDvwGuBM4BVhpjzul2y/eCz59VUcHDWdRCFxGRSBTSQLfWrgM+faDsPGC/tfagtdYDrAauM46fAi9ZazeHsq6euIzT5a516CIiEonCMcs9Hzjc7XFp8No/AJcBNxpjvnayFxtj7jTGbDTGbBzIyR2dy9bUQhcRkUg0aCbFWWt/BfyqF/c9AjwCzl7uA/X5rq5A1yx3ERGJPOFooR8BRnV7XBC8FladY+j+bufyioiIRIpwBPpHwERjzFhjTAzweeDEc+vOsii3sw7dqstdREQiUKiXra0CNgCTjTGlxpivWGt9wF3AK8Bu4Alr7c5Q1tEbLlfnpDh1uYuISOQJ6Ri6tXblSa6/CLwYys/uqyhXZwtdXe4iIhJ5tJd7kEuz3EVEJIIp0IOiojoDXS10ERGJPAr0ILXQRUQkkkVkoBtjVhhjHmloaBiw94wOznIPaKc4ERGJQBEZ6Nba5621d6ampg7Ye7o7Ax11uYuISOSJyEAPhajgsjWtQxcRkUikQA+KjooBIGAHbDdZERGRs0aBHmSCXe6lrQfDXImIiEjfKdCDOifFvRtbH+ZKRERE+k6BHhQT7HIXERGJRAr0IJfbHe4SREREzpgCPSg6KjbcJYiIiJwxBXpQlCuk59SIiIiElAI9KMqtQBcRkcgVkYEeiq1fNYYuIiKRLCIDPRRbv3bn83lD8r4iIiKhEpGBHmot7U3hLkFERKRPFOg9aGtvCXcJIiIifaJA70FbhwJdREQiiwK9B+0KdBERiTAK9B60tTeHuwQREZE+UaD3oN3bGu4SRERE+kSB3oMOjwJdREQiiwK9B+0KdBERiTAK9G7uy70NAI+3LcyViIiI9I0CvZtodxwAHd72MFciIiLSNxEZ6KHYyx0gNiYBAI9PLXQREYksERnoodrLPTY6HgCPr2NA31dERCTUIjLQQyU22uly9/rV5S4iIpFFgd5NXGwiAF6/J8yViIiI9I0CvZvYGCfQPT610EVEJLIo0LuJD06Ka/Pq+FQREYksCvRuEuKSAVjt3xzmSkRERPpGgd5N5xi6iIhIpFGgd5MQp0AXEZHIdNpAN8b8zBiTYoyJNsa8YYypMsbcejaKO9sSg13uIiIikaY3LfQrrLWNwDVAETAB+E4oiwoXl9sd7hJERETOSG8CPSr4/WrgSWvtwO63OsgYa8NdgoiISJ/1JtDXGGP2AOcBbxhjsoEhu1D7Cv8oYgIKdRERiSynDXRr7X3AQmCutdYLtADXhbqwcIl2xeJxGXw+b7hLERER6bXeTIr7HOC11vqNMd8D/gKMDHllp64pJKetAcS4YgFoaqkf8PcWEREJld50uX/fWttkjLkQuAx4FPh/oS3r1EJ12hpATPBM9PrmmgF/bxERkVDpTaD7g9+vBh6x1r4AxISupPCKjXKOUG1qVQtdREQiR28C/Ygx5mHgZuBFY0xsL18XkWLdTqC3tCnQRUQkcvQmmG8CXgGWWWvrgQyG6Dp0gNjgAS3NrUN6dZ6IiAwxvZnl3gocAJYZY+4Ccqy1r4a8sjCJi3a2f/3HPf8U5kpERER6rzez3L8B/BeQE/z6izHmH0JdWLj4/D7nuzFhrkRERKT3etPl/hXgAmvt/dba+4H5wB2hLSt8rrjgb8JdgoiISJ/1JtANx2a6E/x5yDZfC3IKw12CiIhIn0Wd/hb+CHxgjHk6+Ph6nLXoQ9aijjQ+cdeGuwwREZFeO22gW2sfNMa8BVwYvPRla+2WkFYVZrEmhvYh2wchIiJD0UkD3RiT0e1hUfCr6zlr7ZBtwsa44mhzGWwggHEN2SX3IiIyhJyqhb4JsBwbL+88gswEfx4XwrrCKs4dj88YWtqaSEoc+O1lRUREBtpJA91aO/ZsFjKYxEUlgh9qGioV6CIiEhHUn9yDuOgkAGobj4a5EhERkd5RoPcgIToZgMaW6jBXIiIi0jsK9B4kxKUACnQREYkcvVmHjjHGDeR2v99aWxKqonpRzwpgxYQJE0Ly/snx6QA06cQ1ERGJEL3Zy/0fgErgNeCF4NeaENd1Stba5621d6amhmbCWmegt7Q3huT9RUREBlpvWujfACZba2tCXcxgkZKYCUCbR0eoiohIZOjNGPphYFglW2pKtvND08HwFiIiItJLvWmhHwTeMsa8AHR0XrTWPhiyqsIsKzUXgOSGIb3DrYiIDCG9aaGX4IyfxwDJ3b6GrPRkp4X+ZkJ8mCsRERHpnd4czvIjAGNMUvBxc6iLCje32/m1xATcYa5ERESkd3ozy32aMWYLsBPYaYzZZIw5N/SlhdckTxxtLgW6iIhEht50uT8CfNNaO8ZaOwb4FvAfoS0r/OJMDG3Gnv5GERGRQaA3gZ5orX2z84G19i0gMWQVDRLxJpZWlwJdREQiQ28C/aAx5vvGmMLg1/dwZr4PafHuBJpdhqff2RzuUkRERE6rN4F+O5AN/DX4lR28NqS5A9E0u1xc/8aScJciIiJyWr2Z5V4H3H0WahlU0vwe/G7D+vg4FoW7GBERkdM4aaAbYx6y1v4vY8zzwAmDydbaa0NaWZi1RMcC8LW8HLaHuRYREZHTOVUL/c/B7w+cjUIGm4zJ58Mn+8JdhoiISK+cdAzdWrsp+OMsa+3b3b+AWWenvPBZNm4ZAC5rsVaz3UVEZHDrzaS4L/Zw7UsDXMegMyd3DosoINvvp7mlJdzliIiInNKpxtBXArcAY40xz3V7KhmoDXVhg0FydAoNAReNtUdJTkoKdzkiIiIndaox9PVAOZAF/Lzb9SZgWyiLGixSYjNo97qoqSklf/S4cJcjIiJyUicNdGttMVAMLDh75Qwu6YnZ0AzVdcXhLkVEROSUenM4y3xjzEfGmGZjjMcY4zfGNJ6N4k5R0wpjzCMNDQ0h/ZzM5DwA6prKQvo5IiIi/dWbSXG/BlYC+4B44KvAb0JZ1OlYa5+31t6Zmpoa0s/JSx8FQPnB9SH9HBERkf7qTaBjrd0PuK21fmvtH4HloS1rcBiZNRaAid4PwlyJiIjIqZ1261eg1RgTA3xsjPkZzkS5Xv0hEOnSk3IAeDdhNMvCXIuIiMip9CaYbwPcwF1ACzAKuCGURQ0W6bHpAGyPbuNQtdaii4jI4HXaQLfWFltr26y1jdbaH1lrvxnsgh/yot3RpAWiyPV7WfLAW+EuR0RE5KROtbHMdno4lKWTtXZGSCoaZLL9ibS4W/hrzP3A1eEuR0REpEenGkO/Jvj974PfOw9ruZVTBP1QkxYwlLtdzHENi04JERGJUKc6nKU4uLnM5dbaf7TWbg9+3QtccfZKDK/RSemURkfzeHIS+yqbwl2OiIhIj3ozKc4YYxZ1e7Cwl68bErInLwXgn7MyuPwX68JcjYiISM96s2ztK8AfjDGpgAHqgNtDWtUg4jOm6+dE2sJYiYiIyMn1Zpb7JmvtTGAmMMNaO8tauzn0pQ0Ot51zW9fPL8Z8N4yViIiInNypZrnfaq39izHmm5+6DoC19sEQ1zYoZMRl8I2CZfyy9BWy3VV4/QGi3cNmxEFERCLEqZIpMfg9+SRfw0ZuwXwA/ikrg4NV2mBGREQGn1Mdn/pw8PuPzl45g1NuymgA1iQlsuqhtyn6yTWneYWIiMjZdaou91+d6oXW2rsHvpzBaUL6hK6fLzB78PqvUre7iIgMKqea5b7prFUxyGXEZZAbnUKlt5HHY/+JGx9ZyFNfXxjuskRERLqcqsv9sbNZyGB3S8Gl/OLQ07QYw8bi2nCXIyIicpzTrkM3xmQD9wLnAHGd1621S0NY16CTmzEJDkFllJt7op/h2M64IiIi4debgeD/AnYDY4EfAUXARyGsaVDKyZoMwE8z0vmG+0n8gWGznb2IiESA3gR6prX2UcBrrX3bWns7MKxa53DsbPT1CfF02Gj2VDSGuSIREZFjehPo3uD3cmPM1caY2UBGCGsalDpnuqcHDG24ufpX74S5IhERkWN6E+j/HNzH/VvAt4HfA/eEtKpBakbWDOpclrtHppBDPW/uORrukkRERIDeBfoH1toGa+0Oa+0Sa+151trnQl7ZIFTb7sxu3xIXx3eiHufLfxp2UwlERGSQ6k2gv2eMedUY8xVjTHrIKxrEvr/g+wCM9nr5XNQ6xmUnnuYVIiIiZ0dvTlubBHwPOBfYZIxZY4y5NeSVDUILRy7k85M/T63bjQWubnk63CWJiIgAvWuhY6390Fr7TWAeUAsM201nRqeMptnlos7l4m7/n1n+0Do+OFgT7rJERGSYO22gG2NSjDFfNMa8BKwHynGCPWyMMSuMMY80NDSc9c8ekzIGgGeSE9llx7CnoombH3mfI/VtZ70WERGRTr1poW8FZgE/ttZOstbea60N6z7v1trnrbV3pqamnvXPnpDmLF/7RUY6Y0wl4Gww8/lHNpz1WkRERDqddutXYJy1VtuiBY1MGtn1c310B2M8lRTbPA7XtvH0llKmjkhhSl5KGCsUEZHhqDeT4hTmnzIxfSIA/5KVztux3+Q296sA3PP4VpY/pA1nRETk7NOh3mfg+vHXA9BmnF/fP0X/iVg8Xc8vf2gdpXWt4ShNRESGKQX6GbjtnNtYOmopVTFdh8/xSPSDXT/vqWjiL++X0NLhC0d5IiIyDPVmlvskY8wbxpgdwcczjDHfC31pg5cxhikZUyhzQZsxAFzs3nbcPb97+wDn/uAVLn/wbepbPT29jYiIyIDpTQv9P4DvEjykxVq7Dfh8KIuKBGNTx2KxfHPB52iITSaQPILNd+TxnyvHH3ffvqPNfP/ZnWGqUkREhoveBHqCtfbDT10b9n3J80fMB+Dd8vf56ugxuJrKyfjzUha/+TmKfnI12cmxXfc+v7WMwvte4EfP76S8QevVRURk4PVm2Vq1MWY8wQXXxpgbcTaXGdbS4tK6ft7ja6TO5SI9EID6EvC08szfL6KysZ2fvbyH9w86h7r88b0i/vheEQBjsxL5mwVjmJSbzLyxGUS7NZ1BRETOnDndqjRjzDjgEWAhUAccAr5grS0OfXmnNnfuXLtx48awff4fdvyBX2z6BQBTOzw8UVZx7MmvroWC8wD4+l828dKOip7e4jhJsVHkpsTy2TkFTMtPpc3jY+aoNEakxoekfhERiSzGmE3W2rk9PteLQHdba/3GmETAZa1tCkWRZyLcgW6t5ecbf85ju5yt7bcnzYftTzhPTv8c3PB7APwBS1l9G7UtHl7cUc7Dbx/s0+fMK8zgpzfOYGyWTncTERnO+hvoJcDLwOPA2sG00Uy4Ax2cUJ/959kYY9h862bMH6+CkvXOk8kj4EsvQOb4E14TsLB2z1GKa1rYUlLPqIwE9h9t5vXdlSf9rIsmZvGdZZOZUZB20ntERGTo6m+gJwDX4MxsnwOsAVZba98d6EL7ajAEOsDDWx/m1x//GoD/O/8HXLtvPWx89NgNtz0N45f26r1aPT58AUtrh5/MpBgOVDVzz+Nb2V3eeNx9f/7KPAozE8lNiSMmyoXHFyDabTDBZXQiIjL09CvQP/VG6cAvccbQ3QNU3xkbLIFe2lTKlX+9suvx9i9uhwfPhcbSYzf9sH8nwx2pb+Pep7bx7v7qU96XGOPmf102iQsnZjEpNxm3SwEvIjJU9DvQjTEXAzcDy4GNwOPW2v8Z0CrPwGAJdIDpj03v+vm68dfxg8m3En3gTXj5vmM33fkWjJzdr8/p8Pn5t5f38vt3D/Xq/uXn5nHx5Gwum5p73FI6ERGJPP3tci8CtgBPAM9Za1sGvMIzNJgCfW3JWn61+VccaDgAwLfnfpsvnvtFOPwhPHr5sRtHzoGvvg6u/ndweHwBfIEA+yqbSY6LIjE2ivf2V/OfG4r5+HB9j6+5/Jxc/u9nppGTHNfj8yIiMnj1N9BTrLWNp7wpTAZToHfqbKlnxWex9nNrnTHtl78L7//2+BsnXgGfeRgSMkJWS5vHzy9e/4Q3dldyoOr4v8NmFKSyfFoe+WnxzCxI43BdK83tPibmJjEhJzlkNYmIyJk7o0A3xvyjtfZnxphf9fS8tfbuAazxjAzGQP+w/EO+8upXuh4/d/1zjE0dC+Xb4OP/gg9+d/wLcqfB9b+FETNDWpe1ln1Hm3lhWzm/fGPfae+fPTqN7yybzPyxmbg0Di8iMiicaaCvsNY+b4z5Yk/PW2sfG8Aaz8hgDHSAsuYylv3Psq7HT614iskZk50HRzbB6luhqezEF97wKEy/MeT1tXv9bCqu46+bj7CzrIE9Fc7WAsvOzWVTcT3VzR0nvGbRhEyum5nPsnPzSE2IDnmNIiJyooGc5e4CkgZLF/xgDXSAe9fdy4uHXgSc7vc3b3rz+Btaa+EPy6F674kvnrgMlv0LpBZA9Nkf6/b4Aqw/UM2Tm0p5YduJu/ymxEWxeFI2ozMScLsM545MYfbodHJTnFoDAUttq4dPKps4UtdGWkIMUS7D0aZ2AhYyEmPISY7FF7CMz04iIzHmbP8jiohEpP6Oof838DXAD3wEpAC/tNb+20AX2leDOdB9AR87a3Zy64u3ApAam8prN75GfFQP27juXgPF6+H935z43EXfhsXfCUuwd/L4AmwuqWPr4Xq2ltbz4vaet7FNiYtiRGo8eyvPbDPBafkpzChI47zR6cwfn8nI1DitqxcR6aa/gf6xtXaWMeYLOBvL3AdsstbOGPhS+2YwB3qnJz95kh9v+CzarRMAACAASURBVHHX4+evf57C1MKeb26pgQ3/Du/+4sTn/uZZGHdJKErssw6fn11ljby8s4LxWUnUtXrYUlLPyzuPD/rs5FgumpjFhJwkGtq8JMZEEeU2lNW3UZCewOoPS3C7DB2+ADXNHqLchqb2Ywf5FWYmMC0/ldmj07l8ai6jMuIV8CIyrPU30HcCs4D/Bn5trX3bGLPVWhvaWVy9EAmBDvBPG/6JJz55ouvxv1z4L6wYv+LULzqyGeqK4KkvH7s27Qa44p8hZWRoCu0nnz/Ax4frGZedRGp89BltatPq8fHuvmo2HKxh6+F6NpccW36XnxbP5efksmLmSOaMTlO4i8iw099Avxu4F9gKXA2MBv5irb1ooAvtq0gJdID1R9bzt6//7XHX1t28jvS49FO/0FrY8md47h+OXbv0frjwmzAMAq2lw8fOskY2HKjh9d2V7CpvxB+wznK7UalMyk1mSl4KE3KSGJeVqBn5IjKkDdikuG5vGGWt9Z3+ztCKpEDv9N13vsuag2u6Ht97/r3ces6tp3+hpxUeWwFHuv3zzvoCLLwbsiaBa3icp17X4uH5bWWs3XOUDw/V0urxdz2XlRTL0inZLJ+Wx3mjMzQbX0SGnP620L8B/BFoAn4PzAbus9a+OtCF9lUkBjrAH3f8kQc3Pdj1OCMugz9f+WdGp4w+/YtL3oc/LDv587c9DeOWDIvWeyBgqWruYEtJPVVN7aw/UMNbe6to8zohHx/t5pyRKcwZncasUenMH5dBZpK2vxWRyNXfQN9qrZ1pjFkG/C3wfeDP1to5A19q30RqoAM0e5q57KnLaPEe28FtTs4cHl32KFGuqFO/OBCA3c/C2z+Do7t6vmfajbD8J5CUPYBVD34NbV5e21XJoepmSmrb2FhUS3lDe9fz88dlsHRKDueNSWdyXgqHqlooSI8nLSFaY/IiMuj1N9C3WWtnGGN+CbxlrX3aGLPFWtu/U0YGQCQHeqd1pev4+zf+/rhrD13yEJeOubR3b2AtNJRCUzlsexw++v3xz+ef5yx7G3cJRPewZG4YqG3xsPVwPe/ur+aFbeVUNLafcE9eShxXzxjB1BEpLBifSXZSLDFRw2MYQ0QiR38D/Y9APjAWmAm4cYL9vIEutK+GQqADtHpbqW6r5uqnr+66dvW4q/n23G+TFZ/V9zdsOOJsMbv+U7v2XnwfXHhPWNe0h5u1lp1ljewub2RvRRMTc5Ooa/WyubiON/YcxR9w/v+QHBvFBeMyOGeEs2nO/HGZxMeE/cRgERnm+hvoLpxlawettfXGmEwg31q7beBL7ZuhEujd3b32bt48fGxXuV8u+SVLRy89szfz+2DHU/Ds30Og2xzG2bfCNQ+BW5PGumts9/Lyjgp2lzdS1+Lho6I6jtS3dT1/7sgUMpNiuWxqDrNHpTM5L1mteBE5q/ob6Ab4AjDOWvtjY8xoIM9a++HAl9o3QzHQAV4vfp173rqn6/EtU27huxd8t39v2lgOz90Fh94Bf3Cv9slXw7TPwpSrh213/KlYa6lr9bKlpI739tewo6yBDw/Vdj3vdhnOGZHCBWMzKMxKZHx2EueMSNHsehEJmf4G+v8DAsBSa+1UY0w68Kq19vyBL7VvhmqgA7T52nj50Mvcv/5+ABaMWMDDlz/c/4lbPg+8+C3Y/J89Pz9+KVzyXRg1r3+fM0RZazlY3cKuskY2Fdfx8eF6th9p6OqqB2c8/pyRKUzPT2XW6DSm5CWTl6JtbEWk//ob6JuttXO6T4TTTnFnT317PRc9fmwPnydXPMmUjCn9f+O2etizBt76CTQcPvH5mGT47MMw6cphs8b9TDW0eimubeFAVTN7K5rZV9nEJ0ebOFx7rLs+OTaKsdmJpCXEMGtUGosnZjF1RAqJsadZ0SAi0k1/A/0DYCHwUTDYs3Fa6Jrlfpb4A37uWnsX7x55F4DvXfA9bp5y88B+iLXg63BCfs090NHtQL3UUXDlz6DwQohLGdjPHcKONrazsbiOw7WtbD/SwNGmDsrq2yirbyNgIdptGJ+dFNyvPo3x2UmMz04iKylGrXkR6VF/A/0LwM04B7M8BtwIfM9a++RAF9pXwyXQwenqveO1O/ig/AMALh9zOf+86J9JiE4IzQfWFTtbzq771KF68+6E874MOVOHxeY1odDQ6uW9A9V8VFTLzrJGdh5poKXbjncjUuOYkpfMpLxkzhudzoLxmSTHaVxeRPoR6MEZ7vOBWuBSwABvWGt3h6LQvhpOgd7p1aJX+dbb3+p6fO34a7l79t3kJuaG5gMDAagvhvcegr0vQ3PwRLXkEZA7DQrmwtRrIXM8RGkXtjMRCFhKals5VN3CnoomtpXWc6i6hb2VTXT+37MwM4Fz81OZmpfMjII0Zo1OI0UhLzLs9LeFPig2kenOGLMCWDFhwoQ79u3bF+5yzrpWbyt3vHYH26qOXzl48+SbuWvWXaTFpYXuwyu2w9s/hd3PH389NgUu+FuY+xUIeKFmP7TWQmoBxKVCxnhnmZxa9b3W7vXzUVEtWw/Xs+NIIzvLG7rG5Y1xJt+NzkhgSl4yk/NSGJedSEF6PCNT43VIjcgQ1d9AfwDYAPzVnslJLiE0HFvo3XX4O/j5xp+zas+q467/bPHPuGLMFbhdIdwIJeCHQ+sgJhF2/A/seg6ayk7/upFzIDUfsqeAcTtr4tNGha7OIaah1cuWw3VsKalnT0UjZfXtfFLZRIcv0HVPtNuQGh/N5DznJLrJecksGJdJQbrOkxeJdP0N9CYgEfAB7Tjd7tZaG/bZUcM90DtZa9lXv4+vvfY1qtqqAEiLTePhyx/mnMxzzk4RgQBs+DV88jJU7wOX2+mOj0mGvS843wNeaG8EX9vxr03IgsRsZ8ncmIUw+Urn9dIr1lqKaoIT7xrbqWruoK7Fw86yRvYfbe4K++zkWOYEJ99NyElibFYi43OS1HUvEkEG/PjUwUKBfqLixmJuf/l2jrYdBZwx9v9zwf8J3eS5M9HeAI1lUPoRVO6Cg29C1Z5jzydkOifGzf4CjL1Y4d4P/oBl39EmPjxUy5aSejYV11FS29r1vMvA2KxEpoxIYVZBGgvGZzI5L5lot5YqigxG/W2h93SqWgNQHO4z0RXoJ7e7Zjd3vXFXV7Dfds5t3DPnHqIH63avgQA0lMCH/wHlW6HoHed6dCKMX+J0zU+4TNvVDoAOn5+9FU0cqWtjT0UTu8qdve1L65yek9goF1PykpmYm8yc0emMz05kSp52wBMZDPob6O/jLFnbHrw0HdgBpAJfD+e56Ar0UwvYAA9tfog/7vgjAHHuOH572W+Zmzt38I+ltjc63ffbnoD9rznXYpJhzt/AvDsgY2x46xuCyurbeP9gDTuONPJJZRM7yhqob/UCziS8ybnJ5KbEMWVEMuOzk5hZkEZhVgKxUepBETlb+hvofwW+b63dGXx8DvBj4B9xJsrNGuB6e02B3jut3lZueeEWDjQcAODWqbfynfO/g8tESLdqRzPse8WZeLdnjXPQzKTlMHq+8z1nargrHJKstZR2tuLLGtlYXEtti4e9FU34glvdxkW7usbkC9LjyU6K7Zpxn5kYQ5S67kUGVH8DfYe1dlpP14wxHyvQI8euml389MOfsvnoZqZmTOX26bdzxZgrIifYAeqD3fK7n4O6IudaYjaMugDOuc750nr4kPL4ApTUtrL9iLOcbm9FEwermilvbKf7f05i3C5GZyZQmJlAclw0US5DRlIMqfHRjEyNJzMphszEWLKSYshQ+Iv0Sn8D/XGcjWVWBy/dDGQBtwHvhvOQFgV63wVsgP/Y9h/8+uNfd13r3JwmJyFn8HfFd1e2BQ6+BSUfQMl6Z7JdQpYz3j72IsifC/EhXJMvxwkELFXNHewJBnxZfRv7jjZzpK6N6uYOYqPc1LZ48PgDPb4+KymW0RnxTM5LITMxhqykGHJT4hiZFs/ItHhtiStC/wM9Hvg74MLgpfeA3+IsYUuw1jYPYK19okA/c0eaj3DvunvZWrX1uOs3TbqJe+fdS4w7JkyVnSFrndnyG37jhHzA56xzH73AWQY35SrIGBfuKoc9f8DS4vFxtLGd8oZ2yuvbaWjz0tTh40hdG4eqmzlY3dI1dt9dSlwUozMTyEuJIy0hhvSEaEZnJpKTHEteShzjc5KIjXJphr4Maf1etmaMiQEmAxbYa6098f9tYaBA7792Xzur96zm46qPWVe6Dm/AS2psKvfPv58rCq8Id3lnpnPM/cBa2PcaNFc613POdVrvMz8PCRnhrVFOKRCw1LZ6qGhop7SulbL6dg5Vt3C4rpWy+jbqWr3Ut3rw+o//75fLQHy0m4m5ycEd9JyviTnJauHLkNDfFvolOIeyFOFsKjMK+KK1dt3Altl3CvSB5fF7eOHgCzyw8QEaPY0UphTy3Qu+y8KRC8NdWv+UfewE+86/wtFdYFxQcD5Mvso5YS5zPBReBMkh2g9fQsIfsFQ3d1DV1EFpXRv7KptobPfi8QXYW9nE3oom6rq19OOj3YzKiGfqiBTyUuJITYimMDORMZkJFGYm6ihbiQj9DfRNwC3W2r3Bx5OAVdba8wa80j5SoIdGQ0cDP1j/A94oeQOAwpRC7pt3HwtHLoz8Fk7FDucUuQNvQvXeY9eN25kxP3m5s6lNSr7OgY9w1lqqmpwx/f1HmzlS38bBqmZ2lTdS3ezBHzj+v33ZybGMzUxkbFYiozLiGZft7KI3JjOBkWnxuLU/vgwC/Q30bdbaGae7Fg4K9NAqaSzhq69+lfKWcsDZTva7877LVeOuCnNlA6SuyDlUpmY/7HkBPv5vaHE24iElH879jLOZzegFEB0X1lJlYFlrafH4Ka5pobjGOemuqLqFopoWDlW3UN3sOe7+KJdhyohkpo1M5ZyRKUzOTWZEajz56Qp6Obv6G+h/BPzAX4KXvgC4rbW3D2iVZ0CBfnY0e5r59y3/ztP7n6bN10ZGXAZ3z76bz078bOS32Lvze2Hvi3Bkk3OqXNF74O9wtqIdv9Q5WGb0BTBilrajHeLaPH4OVDVT2+LhSH0bRTUt7DjSwM6yxhMm7MVHu4mLdhHldpGXEkdOcizxMW7io90kxUXhMoaAtcRFu6lv9dDhC+AJfrV6/CTFRpGeGE1Ns4eGNi+x0W5cBjITYxmdkRA8VAei3S6SYqPwByyx0S7SE2LISY4FA1mJsTphb5job6DHAn/PsVnu7wC/tdZ2DGiVZ0CBfnZ5/V7+e89/89uPf0urz9kP/NzMc1k5ZSVXjbuKaNcQ2xq0rc6ZMb/rOWcr2hbn4BviM2DqNU7X/ITLIC7s5xTJWWKtpaKxnV1ljZQ3tFPd3EGrx09zhw+vL0BFYzs1zR7avX7avH4a2rz4/JaYKBcdPj8BC2nx0cREuY6bkV/X6sEXsOQmxxEX7aLV46ep3UdFY3uv6opyGdISoslKiiUxNor0hGgSY6OIjXKRn5ZAUlwUozMSiHYbCtITyE2JJcrlIj5Gf5hGmjMOdGOMG9hprZ0SquL6Q4EeHq3eVh7Z9giP7ni061pqbCr3zbuPq8ZeFVkb1fRWIAA1+5wjYw++5Xx5msEVBWMXB1vvC5wT5rT2XXpgrSVg6VMXfbvXz+HaVtq8fnwBS4c3gMXS5vHj9VsqG9vx+gOU1rXR2OalorGdDl+Alg5f8DlLc8fJj9xwuwy5ybEUpCeQluDMF/D6LVlJMUwJ7viXGBtFSly0wn+Q6G8L/VngH6y1JaEorj8U6OHlD/gJEOCZ/c/w+22/p6yljGhXNDOzZzIyaSTzR8xn4ciFZMZnhrvUgef3OZvZ7PgrFL8H1Z8EnzCQO80J+bGLofBCiE0Ka6kyvDW2e2n3+impacUCJTWtFNe2gnU2Amps83GgqhmvP8DhujY8vp43/klLiCYjMYaCdGcYIDHGTUJMFAkxbhJi3KTERzMmM5Fx2Yk6kjeE+hvo64DZwIdAS+d1a+21A1nkmVCgDx5ev5c/7/4zD216CMuJ/5talL+ISwou4ZJRl5CXmBeGCkOsrQ4Of+SE/KF3nBPjAl6nBZ87DcZdDFmTIXsyjJytMXgZlAIBS4cvgDcQYP/RZg5WtVDR0Ia1UN7YTn2rh5LaVsrr22n1OMMKPckKbvGbnhBDZlIMeSlxTMxNZly2s4ogOylWW/2eof4G+sU9XbfWvj0AtfWLAn1wOtRwiP31+9lZvZOa9hqe2f/MCffcOvVWpmRM4ZJRl5AamxqGKkOso8kJ9gNrnQl2RzY5AQ8Qn+6MvU9cBuMugaTscFYqcsYCAUub10+rx09Dm4cDVS0crGrhYFUzzR0+6lu9XRMLu3f9u4yzTDAzMZak2Cgm5CaRnxZPQXo86QkxjEyLIyU+mrhoN8mxUUNr8m0/nVGgG2PigK8BE3COTn003Oeff5oCPXJUtlSyoXwD26u288QnT3Rdj3ZFc/2E61k6eikXjLhg6E2s69TeANX7nXH4g285G920Vjvr30fOcrrm8+dC/hxnyZz+AyZDiLWWsoZ2DlU5SwMrg1v/1rU4M/v3VzX3uN0vEJzY5ywRTEuIod3rx20MWckx5CTHkZ0cS3ZSLGkJ0eSmOI/jooduD9iZBvrjgBdnVvuVQLG19hshq/IMKNAjV0VLBav3rGbL0S1sProZcCbWLR21lFum3sKUjEE5D3PgBAJQthn2vgRF78KRjc7+8+DsOV94odM1P26Jzn6XYaG5w0dRdQutHj/lDW1UNXUQsJbqZg9H6toorWuloc1LlNuFy0BVU8dxOwF2Fx/tJis5hlHpCeSmxJGZGEN2ciwj05xT/vLT4slNiYvI4D/TQN9urZ0e/DkK+NBaOyd0ZfadAn1oqG6r5p3Sd3jh4At8UPEBALNzZrOscBmfnfhZ4qPiw1zhWeBtd06Pq9gG+1+H0o+ccXmAtNHOVrVjFzsBnz4mvLWKDBIeX4Dq5g6ONnXQ0OalvL6NmhYP9a0eKhs7OFzXSlVTBzXNnh7H+7OSYshLjSMn2Qn99MQY0hKcsf/0BOePgJzkWHJT4oiJGhxj/mca6Ju7B/inHw8GCvShp6athic+eYLn9j9HaXMp8VHxnJ93Pt+Z+x0KUwvDXd7ZY60zc/6Tl6F4PRz+4FjAZ02CCZc7m9yMnA2po9RFL3Iaje1eKhraqW7qoLS+jaON7RyubaOyqZ2jjR3UtniobfX0OMvfGEiKjSInOZac5DhyUpyQz0mOJTMphuykOPJSY8lKiiU1PjqkY/5nGuh+js1qN0A80Br82Vprw76bhgJ96LLWsrFyI0/ufZKXil7CYLii8Aqun3A980fMJ8o1zA7SsBaq9sKBN5zx96J3j02ySx7pnP+eMxXypjtr4nWanEifWetM8qtr9VLX4qGqyTn8p6yhjboWD0ebOoJfzh8BHT2Ef4zbRVZSDGnBGf4Lx2fx9UvGD1iN/T4+dbBSoA8PVa1VPLbzMf667680eZsYkTiCFeNXcOPEGxmRNCLc5YWHtx0qdzpj78XvOQHfWnPs+YQsyJ4ChYucmfQF54N7iE44FAkDay2N7T5qgl3+lY3tVDd7ONrUTlVTBw2tXqpbPMwelcYPrz13wD5XgS5DQqu3lXeOvMOTe5/kg4oPcBkX80fM5+bJN3NR/kVED+fACgSguQIqd0HldmdGfeV2KN8GWIhOdMJ90nIn3LMnQ1RsuKsWkT5SoMuQc7D+IE9+8iSvFL1CVVsV8VHxXD7mcpYVLuOi/Iu0brVTewMcfBsOvQ3734C6Q851dyyMmOmMwY+9CMYsUje9SARQoMuQ5fV72VC+gVeKXuHFgy/isz7GpY5jccFiLhtzGTOzZ4a7xMHDWmcWfflWp7u+fJszs97XhnNk1yTnPPixF8OoC7RlrcggpECXYcHj9/DCwRd4Zv8zbKvahs/6mJk9k8vHXM41464ZmnvK95e3Hco/dlrxRe84M+qtH4wLsqfC+CWQfx6MmgepBeGuVmTYU6DLsNPQ0cAz+5/h2QPPsq9uHzGuGBbmL+Rvzvkb5ubOVZf8yXQ0weEPnWVyxeudn/3Bk5LTRjsT7fJmOCfLjVkIMQnhrVdkmFGgy7C2t3Yvf933V14uepna9loKUwr53KTPsWL8CtLj0sNd3uDm9zrd9MUbnO75yh1Qvc9pxbtjnNZ759eImZA+FlyDYwMOkaFIgS4CtPvaeXb/s6zeu5r99fuJccUwJnUMM7JmsGTUEi7MvxC3TkE7PU8rlGxw9qQv2eCMxXe24mOSYcQMpxWfPdnZxjZ3GiRquENkICjQRT5lZ81Ont73NAcbDrKlcgs+66MgqYCbJt/EteOv1Xh7X/g6oGqPM9mufKsT8BXbg5PtgmKSIDYFsiY4XfdZkyBjvPNz+hiIG4In7omEgAJd5BS8AS9vlLzBqt2rug6KmZMzh+snXM9lYy4jOSY5zBVGoIAfGo9A1SdON33NfvC2Qe0BaCiFlqrj70/IhMwJzk53Bec7YZ97rsboRT5FgS7SSwfqD7Dm4BpeLXqVkqYSYlwxXDzqYq4ovILF+YtJiFbADIjWWqg9BPXFTtg3ljmt/Irt4GkO3mQgMQvSC53u+5xzIfccpzs/Lk1j9TIsKdBF+shay47qHbxw6AVeOvQSte21RLmiuGz0ZVwz7hoW5S8afvvJnw1+n3NmfO1BOLIZGg5DXbFzUE1b7bH7jMuZgJczNRj4U5yvtFHOtrdu/buRoUmBLtIP/oCfLUe38NKhl3jx0Is0e5vJjMvkqnFXccWYK5iRPQOXUWsxpKyFlmqo2Opsb9te74R85U5oOHJsUh44s++zp0D+HKc1XzDXad0r5GUIUKCLDBBvwMu7pe/yzP5nWHdkHb6Aj5yEHC4bfRmXj7mcOblzFO5nm9/ndN2Xb4XmSqf7vmIblH3sBD+AK8pp0Y+Y4Wx3m5gDyXnODPyEDB0/KxFDgS4SArXttbx35D1eOvQSG8o34Av4GJE4ggUjF7C4YDELRy4kPio+3GUOX9Y6QV+60Rmbr9nvzMBvKDn+vugEZzJe9hQn4POmQdoYSBmpoJdBR4EuEmLNnmbeLn2bl4teZlPlJpo8TSRHJ3PpmEuZkzOHpaOXkhqrpVmDQku1M8u+qSLYZV/qHENbsx/a6o7dF53gzLbPHOd8H3UBjJwFSbkKegkbBbrIWeQNeNlYsZGn9z3N++XvU9dRh8u4mJY5jdk5s7l41MWcl3ueuuYHG2uDs+13Q10R1BxwQr7mANSXQMDr3JeQ6bTkc6c5s++Tco8tr0sbDSn5OnteQkaBLhIm1lq2VW/jlaJX2Fy5md21uwnYADnxOSwbu4zlhcuZnjVde8sPdt42Z+vbiu3OV+UOOLobfO093GycTXRiEp3x+egEZww/KQc8LeD3gMvt7Ljn9zh/EKSMdGbxe1ohOt45qz55BGSOh8yJzh8MscnOcr3OpZPRcWf1VyCDgwJdZJA4bty9bAM+6yM/KZ9LR1/K/BHzmZUzSxvZRIrOzXNaqqCtHgi28BtKnXPoO5qgtcYJcWudCXu+dmdXPGudbvvoBGfdfVOF855xKU74t9U7E/oCvpN/fkKm0xsQk+TstpeY7fQQxCY7fyCkjXbOvU/O0xDBEKJAFxmEatpqeOvwW7xa/CobKzbiCXhwGzczsmdw6ehLuTD/QsanjQ93mRIuAb8zqa9qrzO2722F9kbnjwJfhxP4dUXOHw61B48f/+/OuJzWfsY4p8cgvRDi052egYQsSM13dulzxyj4I4ACXWSQa/e1s6lyE5sqN7GudB176/YCMCFtAhflX8T5eedzft75xEWpm1VOIuB3NuLxtDq9AbUHoKPZ6TForoCag+BpcnoD/J4TXx8V77TqE7OcFn72lOD2u4lOT0B6oTNUIGGlQBeJMCWNJawrXcdrxa+xtWorfusnISqBxQWLWV64nIX5WhInZyjgh+ajzlBA7QFnWKDmgDNPoKEEGsuhqdwZTujOHeu06jPHOzv0pRY4cwXSRjmtf4DkkdrAJ8QU6CIRrMXbwsaKjbxe8jpvHn6Tho4G4qPimZk9k/Pzzmd2zmymZU1TwMvAam90lvXVFzst+up9TtBXf+L87G098TVR8U7Y50w9Ntkvc4LzlZKv/fcHgAJdZIjwBXx8WPEhbx9+mw8rPmR//X4AokwUUzKmMD17OnNy5jArZxa5CbmaPS+hYa0zZt/R6Izj1x4EG3Ba+pU7nRUArdXOtU5R8ZAT7MZPyXd27ssYBxljnQl9+t9qryjQRYaoho4GtlZtZcvRLWw5uoUd1TvoCO5rnpOQwwV5F3Bh/oUsLlhMUkxSmKuVYSUQCI7d73e+qvdD5XY4uid4fG637IlJCgZ84bGgT8x2ZvKnj4GkPLXugxToIsOE1+9lZ81OdlTvYMvRLXxY8SH1HfXEumNZMGIBV4+/mnl588iIywh3qTKc+TqczXpqD0HdIaeF3/lzXdGJk/ai4p2QTy90gt0d64zhZ4x1rqUXQkrBsBi/V6CLDFP+gL9rY5vXil7jaNtRAEYkjmBWziwWjVzExQUXkxaXFuZKRYICfmc9f+MRZ5Z+3SEn7GsPOmEf8Drr8xuOHNu9D5z1+2ljnOBPLXBa9uljIXWUE/zx6UOiW1+BLiL4A34+qvyIvbV72Va1jc1HN1PdVk2UiWLByAVcNe4qlo5aSkLnTmQig1nnxj51RcFx/EPOrP3aQ85Svbba4++PSw228ruN3WdNcr7iI+cPWgW6iJzAWsuuml28UvwKLx16iYqWCuLccSwuWMzlYy7nwvwLNe4ukau9AeoPO1373bv1aw8616z/2L1xaU6LPm1MsGVfCGmFzs+powbVNrsKdBE5pYAN8PHRj3np0Eu8Wvwqte21xLhiWJS/iGWFy1hcsFhb0srQ4fc6oV69z1mGV1fkLM+rK3auByeWdkkecSzsj/te6CzNO4sb7ijQjAxOCgAAFENJREFURaTX/AE/W6u28lrxa7xa9CpH244S5YpiXt48lo5ayiWjLiE3MTfcZYqERiDg7LTXFfCf+t5YevxyPFe0s8Ne+hjne9poSB3tdOmn5Dvr8Qdwhr4CXUTOSMAG2Fq1lbUla3nz8JsUNxYDMC1zGktHL+WKwisYkzImzFWKnEV+rzNGX198bGZ+fcmx1n1r9fH3T7wCvvDkgH28Al1E+s1ay6GGQ6w9vJa1JWvZXr0dgEnpk7hszGVcOPJCzsk8B7f2+5bhzNPijN3XFTmt+aRcmLpiwN5egS4iA66ipYJXi17ljZI32HJ0CxZLemw6i/L/f3v3Hlzlcd5x/PtISIAECAnpCCRim4u4SEJIAmPucZ00duwkTjue2GmakKSpJ+1MJ3Wb6cSTTtO0k0lvkyZO2qSp4zqXxonrpGluncT1DRCIm+4gDAaMjW5HEkLmKiS0/eNdHQ4EZAsJvUdHv8/MO3rffd/zas9qOQ+7757dDWwq3MSGwg1kTc0KO5siSUUBXURuqpMXTlLdWs22lm1UtVTR09fDFJvC7XNvZ13BOtbMXcOynGVqvYuM0oQI6Ga2EPgskOWce+CtvEYBXSTxXBq8RFN3E88df46tJ7ZypPcIADPTZ7Jm7hrWF6xnQ+EGCmcUhpxTkYkntIBuZk8A7wGizrnSuPR7gK8AqcDjzrm/izv3jAK6SPLoPNfJnvY97Grfxc7WnbSdbQPgtlm3saFwAxsKNrB67mqtFifyFoQZ0DcDZ4DvDAV0M0sFDgG/DZwA9gAfdM4d8OcV0EWS1NDAuqrWKqpaq9jbvpe+S32kp6RTmV/J2+e/nbtuuYuCGQVhZ1UkIYXa5W5mtwE/jwvo64C/ds7d7Y8fBXDOfdEfDxvQzexh4GGAW265ZdXx48dvav5F5Oa5MHCBmo4aqlqr2N6ynaO9RwFYlLWIzfM3s3n+ZlbkrWBq6tSQcyqSGBItoD8A3OOc+4Q//jBwB/A54AsELffHhwL8cNRCF0kux984zouvv8j2lu3sbd/LgBtgWuo07ph3B5vnb2ZT4SbmzZgXdjZFQjNcQE+Yteacc93AJ8POh4iE59ZZt7KlZAtbSrZw+uJpdrfvZnfbbl468RIvnXgJgMWzF7Np/iY2FW6iPFJOWkpayLkWSQwJ1+U+Emqhi0wOzjmO9h5le8t2tp3Yxr7oPgYGB5iRNoN1BevYWLiRjYUbiWREws6qyE2VaC30PUCRmS0AWoCHgN8LIR8iMkGYGYtmL2LR7EVsKdnC2f6zVLdVs+3ENra3bOfZ488CUJRdxMaCILhXRCpIS1XrXSaPmz3K/SngTiAX6AA+55z7lpndC3yZ4GtrTzjnvnAj91cLXUSccxw+dZiqliqqWqpirffMtEzWF6yPPXufM31O2FkVGbUJMbHMjVBAF5Grne0/y662XWxr2cbWE1uJnotiGGV5ZawrWMfaeWspyyvTs3eZkBTQRWRScs7xcs/LvPD6C2w7sY393fsZdINkTMmgIr+CdfOC5+8LsxZiZmFnV+RNKaCLiAC9fb3sad9DdVs1u9p28eobrwKQn5HPxsKNrC9Yz+q5q8mZlhNuRkWuQwFdROQaWs+0UtVaxY6WHVS3VXOm/wwAS7OXxhaVqcyvJDMtM+ScigSSLqCb2XuB9y5evPgPDx8+HHZ2RCQJ9A/2s79rf6wFXxutpX+wn1RLpTS3lPUF61lfsJ7S3FKmpCTMFB4yySRdQB+iFrqI3CznB85T31nP7rbdVLdV09TVhMMxM30ma+etDVaNK9igmetkXCmgi4iMUm9fLzvbdrKjZQc7WnfQca4DgJxpOazIXcGS7CWUR8opj5QzK31WyLmVZKWALiIyhoZmrqtuq6a5u5nGrkZee+M1BtwARjAJTmWkkrK8MpZkL6Eou0jd9DImEm2mOBGRCS1+5roh5/rP0dTVRG20ltpoLb889kuePvQ0QPA1uUgFxXOKuWXWLSzPWc6i2YsU5GVMqTaJiIyBjLQM1sxbw5p5awC4NHiJY73HONRziJpoDXvb91LVWhW7fmrqVBZmLaQ0t5TySDlLs5cqyMuoqMtdRGSc9F3qo/VMKwe6D7C/ez+HTh5if/f+2Nflpk+ZzvKc5ZTmlrIidwWV+ZVacEauoGfoIiIJ6tLgJY72HuVQzyGauppo7GqkubuZi4MXAViUtYjiOcWU5ZVRlldG0ewiLToziSmgi4hMIP2D/Rw6eYjd7bvZ2bqT5pPNnOo7BcCUlCmUzCmhMr+SVZFVLMtZRn5mfsg5lvGigC4iMoE552g920p9tJ6DPQep7ailqbuJgcEBAApnFLIqfxWr81ezOn8182fO19z0SUoBXUQkyVwYuEBjVyMHTx6kpqOGfR376OnrASCSEaEiUkHR7CJWz13NitwVpKemh5xjGQtJF9A19auIyJUG3SBHTx1lX8c+9nbspSZaQ/RcFAhG1JfMKWFZzjIq8iuoyKtQN/0ElXQBfYha6CIi19fb1xsL8LUdtRzpPcL5gfNA0IovzyunIlJBZX4lS7OXkpqSGnKO5c0ooIuISGywXW20lobOBuo762k92wpAZlom5ZFyVkVWUZlfSWluKVNTp4acY7maZooTERHSUtIoyS2hJLckltZ+tp19Hfuo6aihJlrDY7WPAZCekk5pbilF2UWU5payOn81hTMKNdgugamFLiIiMacunKImWhML8M0nm2Oj6XOn57Iyb2VsRP2S7CXqph9n6nIXEZEbMjTYbk/HHho7G6mJ1tBypgW43IqvzK+kIlKhlebGgQK6iIiMmfaz7exp38OB7gPUd9bT3N0cW2lucfZiKiNBgK+MVGq9+DGmgC4iIjfN0EpzNdEaaqO11HfWc7b/LADzMudREalgVf4qKiOVLJy9kBRLCTnHE5cCuoiIjJtLg5diq8zt69hHbbSWrvNdAGRNzYq13isiFSyfs1yj6UdAAV1ERELjnOP1069fMdju+BvHgWBu+mXZy1gZWUl5XjnlkXLmZs4NOceJSwFdREQSStf5Luo762Pfhz/QfeA3Jr1ZmbeS8kg5y3OWa4U5TwFdREQSWv9gP4d6DlEfraeus4766OVJb9JT0inJLaE8r5ziOcUUZRexIGvBpHwWn3QBXXO5i4gkv+i5KPWd9bEgf6D7AP2D/QDMTJtJWV5ZrKu+LK+MzLTMkHN88yVdQB+iFrqIyORx8dJFjvUe4+Wel2NB/nDPYRyOFEuhaHYR5ZHLXfXzZyTfMrIK6CIikpROXzxNY1cjddG62DP5M/1nAMiZlhMbaFceCbrrJ/qIes3lLiIiSWlm+kzWF6xnfcF6IPjK3JHeI7EAXxet4/nXnweCEfXFOcVXjKiPZETCzP6YUgtdRESSWvf5bho6G6jrrKMuWsf+7v30XeoDoCCzgJWRlbFu+iXZS0hLSdwR9epyFxER8fov9XPw5MGgBd9ZR220lui5KABTU6dSPKeYFbkrKMsroyy3jLmZcxPmWbwCuoiIyDDaz7ZTG62lsauRxs5Gmk82x1rxudNzKcstY0XeClbmraRkTgkZaRmh5FMBXUREZAT6LwXfi2/oaqChs4HGrsbY7HYplsLi2YuvaMWP1xz1CugiIiKjdOrCKRq7GmnoaqCxM/h5+uJpADLTMimdU8qKvBWU5pZSlltGXkbemOdBAV1ERGSMDbpBjr9xPAjynUFL/nDPYQbcAAD5Gfnct/A+Hln1yJj9Tn1tTUREZIylWAoLshawIGsB71v0PgAuDFzg4MmDNHU10dDVwLTUaeOWHwV0ERGRMTJtyrTYRDbjbfLNbC8iIpKEFNBFRESSwIQM6Gb2XjP7Zm9vb9hZERERSQgTMqA7537mnHs4Kysr7KyIiIgkhAkZ0EVERORKCugiIiJJQAFdREQkCSigi4iIJAEFdBERkSSggC4iIpIEFNBFRESSgAK6iIhIElBAFxERSQITej10M+sEjo/hLXOBrjG832SkMhw9leHoqQzHhspx9Ma6DG91zuVd68SEDuhjzcz2Xm/heHlrVIajpzIcPZXh2FA5jt54lqG63EVERJKAArqIiEgSUEC/0jfDzkASUBmOnspw9FSGY0PlOHrjVoZ6hi4iIpIE1EIXERFJAgrogJndY2Yvm9krZvaZsPOTSMzsbWb2gpkdMLP9ZvYpn55jZs+a2WH/M9unm5k95suywcwq4+61xV9/2My2hPWewmJmqWZWa2Y/98cLzGyXL6sfmlm6T5/qj1/x52+Lu8ejPv1lM7s7nHcSHjObbWbPmNlBM2s2s3WqiyNjZo/4f8tNZvaUmU1TXRyemT1hZlEza4pLG7N6Z2arzKzRv+YxM7MbyqhzblJvQCpwBFgIpAP1QHHY+UqUDZgHVPr9mcAhoBj4B+AzPv0zwN/7/XuB/wUMWAvs8uk5wFH/M9vvZ4f9/sa5LP8M+D7wc3/8NPCQ3/8G8Ed+/4+Bb/j9h4Af+v1iXz+nAgt8vU0N+32Ncxl+G/iE308HZqsujqj8CoFjwPS4OvhR1cU3LbfNQCXQFJc2ZvUO2O2vNf/ad99IPtVChzXAK865o865i8APgPtDzlPCcM61Oedq/P5poJngQ+F+gg9X/M/3+/37ge+4QDUw28zmAXcDzzrnTjrneoBngXvG8a2EyszmA/cBj/tjA+4CnvGXXF2GQ2X7DPAOf/39wA+cc33OuWPAKwT1d1IwsyyCD9ZvATjnLjrnTqG6OFJTgOlmNgXIANpQXRyWc24rcPKq5DGpd/7cLOdctQui+3fi7jUiCuhBcHo97viET5Or+O62CmAXkO+ca/On2oF8v3+98pzs5fxl4C+AQX88BzjlnBvwx/HlESsrf77XXz/Zy3AB0An8h3908biZZaK6+JY551qAfwJeIwjkvcA+VBdvxFjVu0K/f3X6iCmgy1tiZjOAHwF/6px7I/6c/1+lvi5xHWb2HiDqnNsXdl4muCkE3Z5fd85VAGcJujpjVBeH55/z3k/wn6MCIJPJ1TtxUyRKvVNAhxbgbXHH832aeGaWRhDM/9M592Of3OG7ivA/oz79euU5mct5A/A+M3uV4JHOXcBXCLripvhr4ssjVlb+fBbQzeQuQwhaLiecc7v88TMEAV518a17J3DMOdfpnOsHfkxQP1UXR26s6l2L3786fcQU0GEPUORHeaYTDPz4ach5Shj+edm3gGbn3JfiTv0UGBqluQX4n7j0j/iRnmuBXt8t9SvgXWaW7VsJ7/JpSc8596hzbr5z7jaC+vW8c+5DwAvAA/6yq8twqGwf8Nc7n/6QH3m8ACgiGEwzKTjn2oHXzWypT3oHcADVxZF4DVhrZhn+3/ZQGaoujtyY1Dt/7g0zW+v/Jh+Ju9fIhD16MBE2glGJhwhGan427Pwk0gZsJOhKagDq/HYvwXO054DDwP8BOf56A/7Fl2UjsDruXh8nGDzzCvCxsN9bSOV5J5dHuS8k+BB8BfgvYKpPn+aPX/HnF8a9/rO+bF/mBkfCTuQNKAf2+vr4E4LRwqqLIyvDzwMHgSbguwQj1VUXhy+zpwjGHPQT9BT9wVjWO2C1/3scAb6Gn/RtpJtmihMREUkC6nIXERFJAgroIiIiSUABXUREJAkooIuIiCQBBXQREZEkoIAukiDM7Itm9ltm9n4ze3SEr83zq2HVmtmmYa670/xqb8NcU25m947k9483M3vVzHLDzodIIlFAF0kcdwDVwNuBrSN87TuARudchXNu2yjzUU4w14CITCAK6CIhM7N/NLMG4HZgJ/AJ4Otm9lfXuPY2M3ver7P8nJndYmblBEs53m9mdWY2/arX3GPB+uE1wO/Gpa8xs52+Vb/DzJb62RL/BnjQ3+vBa113jXzNM7Ot/jVNQ70EZvZ1M9trwfrbn4+7/lXfI1Hnz1ea2a/M7IiZfdJfc6e/5y8sWHP7G2b2G59ZZvb7Zrbb3+vfLFh3PtXMnvR5aTSzR27ojyMykYQ9A482bdocBMH8q0AaUDXMdT8Dtvj9jwM/8fsfBb52jeunEazwVEQwg9XTXJ6pbhYwxe+/E/jRte51veuu+j1/jp9lEUgFZvr9nLi0F4Eyf/wql9fc/meCmd9mAnlAh0+/E7hAMItZKsFykw/EvT4XWO7LJM2n/yvB1JmrCJaqHMrf7LD/xtq03extaDJ+EQlXJVAPLCNYc/561nG5lf1dgpb5cJYRLMZxGMDMvgc87M9lAd82syKC6X3TrnOPt3LdHuAJCxby+Ylzrs6nf8DMHiZYKW0eUEwQvOHymgmNwAzn3GngtJn1mdlsf263c+6oz/tTBFMRD63bDcGjhlXAnmAabKYTLJLxM2ChmX0V+AXw62HKSCQpKKCLhMh3lz9JsMJSF5ARJFsdsM45d/4m/vq/BV5wzv2OBWvdv3ij1znntprZZuA+4Ekz+xKwDfg0cLtzrsfMniToMRjS538Oxu0PHQ99Nl09N/XVxwZ82zn3G4MIzWwlcDfwSeADBD0aIklLz9BFQuScq3POlRMsDlQMPA/c7Zwrv04w30GwYhvAhwiC5nAOAreZ2SJ//MG4c1lcXqbxo3Hppwm6v9/suhgzu5Wgq/zfgccJehxmEaxZ3mtm+cC73ySv17LGgpUQU4AHge1XnX8OeMDMIj4fOWZ2qx8Bn+Kc+xHwlz4/IklNAV0kZGaWB/Q45waBZc65A8Nc/ifAx/wgug8Dnxru3s65CwRd7L/wg+Kicaf/AfiimdVyZW/dC0Dx0KC4Ya6LdydQ7695EPiKc64eqCX4T8X3garh8nodewhWn2oGjgH/fdX7O0AQsH/ty+RZgq79QuBF39PxPWBEXwMUmYi02pqIJCQzuxP4tHPuPWHnRWQiUAtdREQkCaiFLiIikgTUQhcREUkCCugiIiJJQAFdREQkCSigi4iIJAEFdBERkSSggC4iIpIE/h9jNniEIFadYQAAAABJRU5ErkJggg==\n" + }, + "metadata": { + "needs_background": "light" + } + } + ], + "source": [ + "plt.figure(figsize=(8, 6))\n", + "plot_progressive_loss(loss_list_vanilla, 'VanillaVW')\n", + "plot_progressive_loss(loss_list_autovw_ni, 'AutoVW:NI')\n", + "plot_progressive_loss(loss_list_autovw_nilr, 'AutoVW:NI+LR')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### AutoVW based on customized VW arguments\n", + "You can easily create an AutoVW instance based on customized VW arguments (For now only arguments that are compatible with supervised regression task are well supported). The customized arguments can be passed to AutoVW through init_config and search space." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": "Seed namespaces (singletons and interactions): ['e', 'g', 'b', 'd', 'i', 'h', 'a', 'f', 'c']\nCreated challengers from champion |supervised||classic\nNew challenger size 37, ['|supervised|gi|classic', '|supervised|eh|classic', '|supervised|ad|classic', '|supervised|gh|classic', '|supervised|bc|classic', '|supervised|bd|classic', '|supervised|ae|classic', '|supervised|dg|classic', '|supervised|ei|classic', '|supervised|df|classic', '|supervised|fh|classic', '|supervised|ac|classic', '|supervised|ab|classic', '|supervised|cg|classic', '|supervised|hi|classic', '|supervised|fg|classic', '|supervised|bi|classic', '|supervised|be|classic', '|supervised|de|classic', '|supervised|ci|classic', '|supervised|fi|classic', '|supervised|cd|classic', '|supervised|af|classic', '|supervised|ce|classic', '|supervised|di|classic', '|supervised|bf|classic', '|supervised|ai|classic', '|supervised|bh|classic', '|supervised|ag|classic', '|supervised|bg|classic', '|supervised|eg|classic', '|supervised|ah|classic', '|supervised|cf|classic', '|supervised|dh|classic', '|supervised|ef|classic', '|supervised|ch|classic', '|supervised||classic']\nOnline learning for 10000 steps...\nSeed namespaces (singletons and interactions): ['cf', 'e', 'g', 'b', 'd', 'i', 'h', 'a', 'f', 'c']\nCreated challengers from champion |supervised|cf|classic\nNew challenger size 43, ['|supervised|bg_cf|classic', '|supervised|cf_dg|classic', '|supervised|ab_cf|classic', '|supervised|bh_cf|classic', '|supervised|cf_eg|classic', '|supervised|cf_ef|classic', '|supervised|be_cf|classic', '|supervised|cf_di|classic', '|supervised|cf_ci|classic', '|supervised|bd_cf|classic', '|supervised|cf_fi|classic', '|supervised|bf_cf|classic', '|supervised|ah_cf|classic', '|supervised|ac_cf|classic', '|supervised|ce_cf|classic', '|supervised|cf|classic', '|supervised|cf_cfg|classic', '|supervised|cf_gi|classic', '|supervised|ag_cf|classic', '|supervised|ae_cf|classic', '|supervised|cf_fg|classic', '|supervised|cf_hi|classic', '|supervised|cf_df|classic', '|supervised|cef_cf|classic', '|supervised|cdf_cf|classic', '|supervised|cd_cf|classic', '|supervised|bc_cf|classic', '|supervised|cf_gh|classic', '|supervised|cf_cg|classic', '|supervised|cf_ch|classic', '|supervised|bcf_cf|classic', '|supervised|af_cf|classic', '|supervised|cf_ei|classic', '|supervised|ai_cf|classic', '|supervised|cf_dh|classic', '|supervised|ad_cf|classic', '|supervised|cf_de|classic', '|supervised|cf_fh|classic', '|supervised|cf_eh|classic', '|supervised|acf_cf|classic', '|supervised|bi_cf|classic', '|supervised|cf_cfi|classic', '|supervised|cf_cfh|classic']\nAverage final loss of the AutoVW (tuning namespaces) based on customized vw arguments: 9.606119226635231\n" + } + ], + "source": [ + "''' create an AutoVW instance with ustomized VW arguments'''\n", + "# parse the customized VW arguments\n", + "fixed_vw_hp_config = {'alg': 'supervised', 'loss_function': 'classic'}\n", + "search_space = fixed_vw_hp_config.copy()\n", + "search_space.update({'interactions': AutoVW.AUTO_STRING})\n", + "\n", + "autovw_custom = AutoVW(max_live_model_num=5, search_space=search_space) \n", + "loss_list_custom = online_learning_loop(max_iter_num, vw_examples, autovw_custom)\n", + "print('Average final loss of the AutoVW (tuning namespaces) based on customized vw arguments:', sum(loss_list_custom)/len(loss_list_custom))\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "name": "python3", + "display_name": "Python 3", + "metadata": { + "interpreter": { + "hash": "0cfea3304185a9579d09e0953576b57c8581e46e6ebc6dfeb681bc5a511f7544" + } + } + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.13-final" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/setup.py b/setup.py index ba8c14dd54..5567f162c6 100644 --- a/setup.py +++ b/setup.py @@ -40,6 +40,7 @@ setuptools.setup( "jupyter", "matplotlib==3.2.0", "rgf-python", + "vowpalwabbit", ], "test": [ "flake8>=3.8.4", @@ -48,6 +49,8 @@ setuptools.setup( "xgboost<1.3", "rgf-python", "optuna==2.3.0", + "vowpalwabbit", + "openml", ], "blendsearch": [ "optuna==2.3.0" @@ -62,6 +65,9 @@ setuptools.setup( "nni": [ "nni", ], + "vw": [ + "vowpalwabbit", + ] }, classifiers=[ "Programming Language :: Python :: 3", diff --git a/test/test_autovw.py b/test/test_autovw.py new file mode 100644 index 0000000000..bc7524e227 --- /dev/null +++ b/test/test_autovw.py @@ -0,0 +1,372 @@ +import unittest + +import numpy as np +import scipy.sparse + +import pandas as pd +from sklearn.metrics import mean_squared_error, mean_absolute_error +import time +import logging +from flaml.tune import loguniform, polynomial_expansion_set +from vowpalwabbit import pyvw +from flaml import AutoVW +import string +import os +import openml +VW_DS_DIR = 'test/data/' +NS_LIST = list(string.ascii_lowercase) + list(string.ascii_uppercase) +logger = logging.getLogger(__name__) + + +def oml_to_vw_w_grouping(X, y, ds_dir, fname, orginal_dim, group_num, + grouping_method='sequential'): + # split all_indexes into # group_num of groups + max_size_per_group = int(np.ceil(orginal_dim / float(group_num))) + # sequential grouping + if grouping_method == 'sequential': + group_indexes = [] # lists of lists + for i in range(group_num): + indexes = [ind for ind in range(i * max_size_per_group, + min((i + 1) * max_size_per_group, orginal_dim))] + if len(indexes) > 0: + group_indexes.append(indexes) + print(group_indexes) + else: + NotImplementedError + if group_indexes: + if not os.path.exists(ds_dir): + os.makedirs(ds_dir) + with open(os.path.join(ds_dir, fname), 'w') as f: + if isinstance(X, pd.DataFrame): + raise NotImplementedError + elif isinstance(X, np.ndarray): + for i in range(len(X)): + NS_content = [] + for zz in range(len(group_indexes)): + ns_features = ' '.join('{}:{:.6f}'.format(ind, X[i][ind] + ) for ind in group_indexes[zz]) + NS_content.append(ns_features) + ns_line = '{} |{}'.format(str(y[i]), '|'.join( + '{} {}'.format(NS_LIST[j], NS_content[j] + ) for j in range(len(group_indexes)))) + f.write(ns_line) + f.write('\n') + elif isinstance(X, scipy.sparse.csr_matrix): + print('NotImplementedError for sparse data') + NotImplementedError + + +def save_vw_dataset_w_ns(X, y, did, ds_dir, max_ns_num, is_regression): + """ convert openml dataset to vw example and save to file + """ + print('is_regression', is_regression) + if is_regression: + fname = 'ds_{}_{}_{}.vw'.format(did, max_ns_num, 0) + print('dataset size', X.shape[0], X.shape[1]) + print('saving data', did, ds_dir, fname) + dim = X.shape[1] + oml_to_vw_w_grouping(X, y, ds_dir, fname, dim, group_num=max_ns_num) + else: + NotImplementedError + + +def shuffle_data(X, y, seed): + try: + n = len(X) + except ValueError: + n = X.getnnz() + + perm = np.random.RandomState(seed=seed).permutation(n) + X_shuf = X[perm, :] + y_shuf = y[perm] + return X_shuf, y_shuf + + +def get_oml_to_vw(did, max_ns_num, ds_dir=VW_DS_DIR): + success = False + print('-----getting oml dataset-------', did) + ds = openml.datasets.get_dataset(did) + target_attribute = ds.default_target_attribute + # if target_attribute is None and did in OML_target_attribute_dict: + # target_attribute = OML_target_attribute_dict[did] + + print('target=ds.default_target_attribute', target_attribute) + data = ds.get_data(target=target_attribute, dataset_format='array') + X, y = data[0], data[1] # return X: pd DataFrame, y: pd series + import scipy + if scipy.sparse.issparse(X): + X = scipy.sparse.csr_matrix.toarray(X) + print('is sparse matrix') + if data and isinstance(X, np.ndarray): + print('-----converting oml to vw and and saving oml dataset-------') + save_vw_dataset_w_ns(X, y, did, ds_dir, max_ns_num, is_regression=True) + success = True + else: + print('---failed to convert/save oml dataset to vw!!!----') + try: + X, y = data[0], data[1] # return X: pd DataFrame, y: pd series + if data and isinstance(X, np.ndarray): + print('-----converting oml to vw and and saving oml dataset-------') + save_vw_dataset_w_ns(X, y, did, ds_dir, max_ns_num, is_regression=True) + success = True + else: + print('---failed to convert/save oml dataset to vw!!!----') + except ValueError: + print('-------------failed to get oml dataset!!!', did) + return success + + +def load_vw_dataset(did, ds_dir, is_regression, max_ns_num): + import os + if is_regression: + # the second field specifies the largest number of namespaces using. + fname = 'ds_{}_{}_{}.vw'.format(did, max_ns_num, 0) + vw_dataset_file = os.path.join(ds_dir, fname) + # if file does not exist, generate and save the datasets + if not os.path.exists(vw_dataset_file) or os.stat(vw_dataset_file).st_size < 1000: + get_oml_to_vw(did, max_ns_num) + print(ds_dir, vw_dataset_file) + if not os.path.exists(ds_dir): + os.makedirs(ds_dir) + with open(os.path.join(ds_dir, fname), 'r') as f: + vw_content = f.read().splitlines() + print(type(vw_content), len(vw_content)) + return vw_content + + +def get_data(iter_num=None, dataset_id=None, vw_format=True, + max_ns_num=10, shuffle=False, use_log=True, dataset_type='regression'): + logging.info('generating data') + LOG_TRANSFORMATION_THRESHOLD = 100 + # get data from simulation + import random + vw_examples = None + data_id = int(dataset_id) + # loading oml dataset + # data = OpenML2VWData(data_id, max_ns_num, dataset_type) + # Y = data.Y + if vw_format: + # vw_examples = data.vw_examples + vw_examples = load_vw_dataset(did=data_id, ds_dir=VW_DS_DIR, is_regression=True, + max_ns_num=max_ns_num) + Y = [] + for i, e in enumerate(vw_examples): + Y.append(float(e.split('|')[0])) + logger.debug('first data %s', vw_examples[0]) + # do data shuffling or log transformation for oml data when needed + if shuffle: + random.seed(54321) + random.shuffle(vw_examples) + + # do log transformation + unique_y = set(Y) + min_y = min(unique_y) + max_y = max(unique_y) + if use_log and max((max_y - min_y), max_y) >= LOG_TRANSFORMATION_THRESHOLD: + log_vw_examples = [] + for v in vw_examples: + org_y = v.split('|')[0] + y = float(v.split('|')[0]) + # shift y to ensure all y are positive + if min_y <= 0: + y = y + abs(min_y) + 1 + log_y = np.log(y) + log_vw = v.replace(org_y + '|', str(log_y) + ' |') + log_vw_examples.append(log_vw) + logger.info('log_vw_examples %s', log_vw_examples[0:2]) + if log_vw_examples: + return log_vw_examples + return vw_examples, Y + + +class VowpalWabbitNamesspaceTuningProblem: + + def __init__(self, max_iter_num, dataset_id, ns_num, **kwargs): + use_log = kwargs.get('use_log', True), + shuffle = kwargs.get('shuffle', False) + vw_format = kwargs.get('vw_format', True) + print('dataset_id', dataset_id) + self.vw_examples, self.Y = get_data(max_iter_num, dataset_id=dataset_id, + vw_format=vw_format, max_ns_num=ns_num, + shuffle=shuffle, use_log=use_log + ) + self.max_iter_num = min(max_iter_num, len(self.Y)) + self._problem_info = {'max_iter_num': self.max_iter_num, + 'dataset_id': dataset_id, + 'ns_num': ns_num, + } + self._problem_info.update(kwargs) + self._fixed_hp_config = kwargs.get('fixed_hp_config', {}) + self.namespace_feature_dim = AutoVW.get_ns_feature_dim_from_vw_example(self.vw_examples[0]) + self._raw_namespaces = list(self.namespace_feature_dim.keys()) + self._setup_search() + + def _setup_search(self): + self._search_space = self._fixed_hp_config.copy() + self._init_config = self._fixed_hp_config.copy() + search_space = {'interactions': + polynomial_expansion_set( + init_monomials=set(self._raw_namespaces), + highest_poly_order=len(self._raw_namespaces), + allow_self_inter=False), + } + init_config = {'interactions': set()} + self._search_space.update(search_space) + self._init_config.update(init_config) + logger.info('search space %s %s %s', self._search_space, self._init_config, self._fixed_hp_config) + + @property + def init_config(self): + return self._init_config + + @property + def search_space(self): + return self._search_space + + +class VowpalWabbitNamesspaceLRTuningProblem(VowpalWabbitNamesspaceTuningProblem): + + def __init__(self, max_iter_num, dataset_id, ns_num, **kwargs): + super().__init__(max_iter_num, dataset_id, ns_num, **kwargs) + self._setup_search() + + def _setup_search(self): + self._search_space = self._fixed_hp_config.copy() + self._init_config = self._fixed_hp_config.copy() + search_space = {'interactions': + polynomial_expansion_set( + init_monomials=set(self._raw_namespaces), + highest_poly_order=len(self._raw_namespaces), + allow_self_inter=False), + 'learning_rate': loguniform(lower=2e-10, upper=1.0) + } + init_config = {'interactions': set(), 'learning_rate': 0.5} + self._search_space.update(search_space) + self._init_config.update(init_config) + logger.info('search space %s %s %s', self._search_space, self._init_config, self._fixed_hp_config) + + +def get_y_from_vw_example(vw_example): + """ get y from a vw_example. this works for regression dataset + """ + return float(vw_example.split('|')[0]) + + +def get_loss(y_pred, y_true, loss_func='squared'): + if 'squared' in loss_func: + loss = mean_squared_error([y_pred], [y_true]) + elif 'absolute' in loss_func: + loss = mean_absolute_error([y_pred], [y_true]) + else: + loss = None + raise NotImplementedError + return loss + + +def online_learning_loop(iter_num, vw_examples, vw_alg, loss_func, method_name=''): + """Implements the online learning loop. + Args: + iter_num (int): The total number of iterations + vw_examples (list): A list of vw examples + alg (alg instance): An algorithm instance has the following functions: + - alg.learn(example) + - alg.predict(example) + loss_func (str): loss function + Outputs: + cumulative_loss_list (list): the list of cumulative loss from each iteration. + It is returned for the convenience of visualization. + """ + print('rerunning exp....', len(vw_examples), iter_num) + loss_list = [] + y_predict_list = [] + for i in range(iter_num): + vw_x = vw_examples[i] + y_true = get_y_from_vw_example(vw_x) + # predict step + y_pred = vw_alg.predict(vw_x) + # learn step + vw_alg.learn(vw_x) + # calculate one step loss + loss = get_loss(y_pred, y_true, loss_func) + loss_list.append(loss) + y_predict_list.append([y_pred, y_true]) + + return loss_list + + +def get_vw_tuning_problem(tuning_hp='NamesapceInteraction'): + online_vw_exp_setting = {"max_live_model_num": 5, + "fixed_hp_config": {'alg': 'supervised', 'loss_function': 'squared'}, + "ns_num": 10, + "max_iter_num": 10000, + } + + # construct openml problem setting based on basic experiment setting + vw_oml_problem_args = {"max_iter_num": online_vw_exp_setting['max_iter_num'], + "dataset_id": '42183', + "ns_num": online_vw_exp_setting['ns_num'], + "fixed_hp_config": online_vw_exp_setting['fixed_hp_config'], + } + if tuning_hp == 'NamesapceInteraction': + vw_online_aml_problem = VowpalWabbitNamesspaceTuningProblem(**vw_oml_problem_args) + elif tuning_hp == 'NamesapceInteraction+LearningRate': + vw_online_aml_problem = VowpalWabbitNamesspaceLRTuningProblem(**vw_oml_problem_args) + else: + NotImplementedError + + return vw_oml_problem_args, vw_online_aml_problem + + +class TestAutoVW(unittest.TestCase): + + def test_vw_oml_problem_and_vanilla_vw(self): + vw_oml_problem_args, vw_online_aml_problem = get_vw_tuning_problem() + vanilla_vw = pyvw.vw(**vw_oml_problem_args["fixed_hp_config"]) + cumulative_loss_list = online_learning_loop(vw_online_aml_problem.max_iter_num, + vw_online_aml_problem.vw_examples, + vanilla_vw, + loss_func=vw_oml_problem_args["fixed_hp_config"].get("loss_function", "squared"), + ) + print('final average loss:', sum(cumulative_loss_list) / len(cumulative_loss_list)) + + def test_supervised_vw_tune_namespace(self): + # basic experiment setting + vw_oml_problem_args, vw_online_aml_problem = get_vw_tuning_problem() + autovw = AutoVW(max_live_model_num=5, + search_space=vw_online_aml_problem.search_space, + init_config=vw_online_aml_problem.init_config, + min_resource_lease='auto', + random_seed=2345) + + cumulative_loss_list = online_learning_loop(vw_online_aml_problem.max_iter_num, + vw_online_aml_problem.vw_examples, + autovw, + loss_func=vw_oml_problem_args["fixed_hp_config"].get("loss_function", "squared"), + ) + print('final average loss:', sum(cumulative_loss_list) / len(cumulative_loss_list)) + + def test_supervised_vw_tune_namespace_learningrate(self): + # basic experiment setting + vw_oml_problem_args, vw_online_aml_problem = get_vw_tuning_problem(tuning_hp='NamesapceInteraction+LearningRate') + autovw = AutoVW(max_live_model_num=5, + search_space=vw_online_aml_problem.search_space, + init_config=vw_online_aml_problem.init_config, + min_resource_lease='auto', + random_seed=2345) + + cumulative_loss_list = online_learning_loop(vw_online_aml_problem.max_iter_num, + vw_online_aml_problem.vw_examples, + autovw, + loss_func=vw_oml_problem_args["fixed_hp_config"].get("loss_function", "squared"), + ) + print('final average loss:', sum(cumulative_loss_list) / len(cumulative_loss_list)) + + def test_bandit_vw_tune_namespace(self): + pass + + def test_bandit_vw_tune_namespace_learningrate(self): + pass + + +if __name__ == "__main__": + unittest.main()