mirror of https://github.com/microsoft/autogen.git
support latest xgboost version (#599)
* support latest xgboost version * Update test_classification.py * Update Exists problems when installing xgb1.6.1 in py3.6 * cleanup * xgboost version * remove time_budget_s in test * remove redundancy * stop support of python 3.6 Co-authored-by: zsk <shaokunzhang529@gmail.com> Co-authored-by: Qingyun Wu <qingyun.wu@psu.edu>
This commit is contained in:
parent
c5272ad377
commit
c45741a67b
|
@ -16,7 +16,7 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-2019]
|
||||
python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"]
|
||||
python-version: ["3.7", "3.8", "3.9", "3.10"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[![PyPI version](https://badge.fury.io/py/FLAML.svg)](https://badge.fury.io/py/FLAML)
|
||||
![Conda version](https://img.shields.io/conda/vn/conda-forge/flaml)
|
||||
[![Build](https://github.com/microsoft/FLAML/actions/workflows/python-package.yml/badge.svg)](https://github.com/microsoft/FLAML/actions/workflows/python-package.yml)
|
||||
![Python Version](https://img.shields.io/badge/3.6%20%7C%203.7%20%7C%203.8%20%7C%203.9%20%7C%203.10-blue)
|
||||
![Python Version](https://img.shields.io/badge/3.7%20%7C%203.8%20%7C%203.9%20%7C%203.10-blue)
|
||||
[![Downloads](https://pepy.tech/badge/flaml)](https://pepy.tech/project/flaml)
|
||||
[![Join the chat at https://gitter.im/FLAMLer/community](https://badges.gitter.im/FLAMLer/community.svg)](https://gitter.im/FLAMLer/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
|
@ -27,7 +27,7 @@ FLAML has a .NET implementation as well from [ML.NET Model Builder](https://dotn
|
|||
|
||||
## Installation
|
||||
|
||||
FLAML requires **Python version >= 3.6**. It can be installed from pip:
|
||||
FLAML requires **Python version >= 3.7**. It can be installed from pip:
|
||||
|
||||
```bash
|
||||
pip install flaml
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# !
|
||||
# * Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
# * Copyright (c) FLAML authors. All rights reserved.
|
||||
# * Licensed under the MIT License. See LICENSE file in the
|
||||
# * project root for license information.
|
||||
import time
|
||||
|
@ -39,7 +39,7 @@ from .model import (
|
|||
TransformersEstimator,
|
||||
TransformersEstimatorModelSelection,
|
||||
)
|
||||
from .data import CLASSIFICATION, group_counts, TS_FORECAST, TS_VALUE_COL
|
||||
from .data import CLASSIFICATION, group_counts, TS_FORECAST
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
|
@ -155,7 +155,8 @@ class BaseEstimator:
|
|||
X_train = self._preprocess(X_train)
|
||||
model = self.estimator_class(**self.params)
|
||||
if logger.level == logging.DEBUG:
|
||||
logger.debug(f"flaml.model - {model} fit started")
|
||||
# xgboost 1.6 doesn't display all the params in the model str
|
||||
logger.debug(f"flaml.model - {model} fit started with params {self.params}")
|
||||
model.fit(X_train, y_train, **kwargs)
|
||||
if logger.level == logging.DEBUG:
|
||||
logger.debug(f"flaml.model - {model} fit finished")
|
||||
|
@ -1111,12 +1112,23 @@ class LGBMEstimator(BaseEstimator):
|
|||
kwargs.pop("callbacks")
|
||||
else:
|
||||
callbacks = self._callbacks(start_time, deadline)
|
||||
if isinstance(self, XGBoostSklearnEstimator):
|
||||
from xgboost import __version__
|
||||
|
||||
if __version__ >= "1.6.0":
|
||||
# since xgboost>=1.6.0, callbacks can't be passed in fit()
|
||||
self.params["callbacks"] = callbacks
|
||||
callbacks = None
|
||||
self._fit(
|
||||
X_train,
|
||||
y_train,
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
)
|
||||
if callbacks is None:
|
||||
# for xgboost>=1.6.0, pop callbacks to enable pickle
|
||||
callbacks = self.params.pop("callbacks")
|
||||
self._model.set_params(callbacks=callbacks[:-1])
|
||||
best_iteration = (
|
||||
self._model.get_booster().best_iteration
|
||||
if isinstance(self, XGBoostSklearnEstimator)
|
||||
|
@ -1233,7 +1245,9 @@ class XGBoostEstimator(SKLearnEstimator):
|
|||
start_time = time.time()
|
||||
deadline = start_time + budget if budget else np.inf
|
||||
if issparse(X_train):
|
||||
self.params["tree_method"] = "auto"
|
||||
if xgb.__version__ < "1.6.0":
|
||||
# "auto" fails for sparse input since xgboost 1.6.0
|
||||
self.params["tree_method"] = "auto"
|
||||
else:
|
||||
X_train = self._preprocess(X_train)
|
||||
if "sample_weight" in kwargs:
|
||||
|
@ -1337,9 +1351,11 @@ class XGBoostSklearnEstimator(SKLearnEstimator, LGBMEstimator):
|
|||
self.estimator_class = xgb.XGBRanker
|
||||
elif task in CLASSIFICATION:
|
||||
self.estimator_class = xgb.XGBClassifier
|
||||
self._xgb_version = xgb.__version__
|
||||
|
||||
def fit(self, X_train, y_train, budget=None, **kwargs):
|
||||
if issparse(X_train):
|
||||
if issparse(X_train) and self._xgb_version < "1.6.0":
|
||||
# "auto" fails for sparse input since xgboost 1.6.0
|
||||
self.params["tree_method"] = "auto"
|
||||
if kwargs.get("gpu_per_trial"):
|
||||
self.params["tree_method"] = "gpu_hist"
|
||||
|
|
|
@ -1 +1 @@
|
|||
__version__ = "1.0.7"
|
||||
__version__ = "1.0.8"
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
"\n",
|
||||
"In this notebook, we use one real data example (binary classification) to showcase how to use FLAML library.\n",
|
||||
"\n",
|
||||
"FLAML requires `Python>=3.6`. To run this notebook example, please install flaml with the `notebook` option:\n",
|
||||
"FLAML requires `Python>=3.7`. To run this notebook example, please install flaml with the `notebook` option:\n",
|
||||
"```bash\n",
|
||||
"pip install flaml[notebook]\n",
|
||||
"```"
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
"\n",
|
||||
"In this notebook, we demonstrate how to use FLAML library to tune hyperparameters of LightGBM with a regression example.\n",
|
||||
"\n",
|
||||
"FLAML requires `Python>=3.6`. To run this notebook example, please install flaml with the `notebook` option:\n",
|
||||
"FLAML requires `Python>=3.7`. To run this notebook example, please install flaml with the `notebook` option:\n",
|
||||
"```bash\n",
|
||||
"pip install flaml[notebook]\n",
|
||||
"```"
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
"\n",
|
||||
"In this notebook, we demonstrate how to use the FLAML library to fine tune an NLP language model with hyperparameter search. We have tested this notebook on a server with 4 NVidia V100 GPU (32GB) and 400GB CPU Ram.\n",
|
||||
"\n",
|
||||
"FLAML requires `Python>=3.6`. To run this notebook example, please install flaml with the `nlp,ray,notebook` and `blendsearch` option:\n",
|
||||
"FLAML requires `Python>=3.7`. To run this notebook example, please install flaml with the `nlp,ray,notebook` and `blendsearch` option:\n",
|
||||
"```bash\n",
|
||||
"pip install flaml[nlp,ray,notebook,blendsearch];\n",
|
||||
"```"
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
"\n",
|
||||
"In this notebook, we demonstrate how to use FLAML library for time series forecasting tasks: univariate time series forecasting (only time), multivariate time series forecasting (with exogneous variables) and forecasting discrete values.\n",
|
||||
"\n",
|
||||
"FLAML requires Python>=3.6. To run this notebook example, please install flaml with the notebook and forecast option:\n"
|
||||
"FLAML requires Python>=3.7. To run this notebook example, please install flaml with the notebook and forecast option:\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
"\n",
|
||||
"In this notebook, we demonstrate how to use FLAML library to tune hyperparameters of XGBoost with a regression example.\n",
|
||||
"\n",
|
||||
"FLAML requires `Python>=3.6`. To run this notebook example, please install flaml with the `notebook` option:\n",
|
||||
"FLAML requires `Python>=3.7`. To run this notebook example, please install flaml with the `notebook` option:\n",
|
||||
"```bash\n",
|
||||
"pip install flaml[notebook]\n",
|
||||
"```"
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
}
|
||||
},
|
||||
"source": [
|
||||
"Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved. \n",
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||
"\n",
|
||||
"Licensed under the MIT License.\n",
|
||||
"\n",
|
||||
|
@ -22,7 +22,7 @@
|
|||
"\n",
|
||||
"*ChaCha for online AutoML. Qingyun Wu, Chi Wang, John Langford, Paul Mineiro and Marco Rossi. To appear in ICML 2021.*\n",
|
||||
"\n",
|
||||
"AutoVW is implemented in FLAML. FLAML requires `Python>=3.6`. To run this notebook example, please install:"
|
||||
"AutoVW is implemented in FLAML. FLAML requires `Python>=3.7`. To run this notebook example, please install:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
}
|
||||
},
|
||||
"source": [
|
||||
"Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved. \n",
|
||||
"Copyright (c) Microsoft Corporation. All rights reserved. \n",
|
||||
"\n",
|
||||
"Licensed under the MIT License.\n",
|
||||
"\n",
|
||||
|
@ -27,7 +27,7 @@
|
|||
"\n",
|
||||
"In this notebook, we use one real data example (binary classification) to showcase how to use FLAML library together with AzureML.\n",
|
||||
"\n",
|
||||
"FLAML requires `Python>=3.6`. To run this notebook example, please install flaml with the [azureml] option:\n",
|
||||
"FLAML requires `Python>=3.7`. To run this notebook example, please install flaml with the [azureml] option:\n",
|
||||
"```bash\n",
|
||||
"pip install flaml[azureml]\n",
|
||||
"```"
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
"\n",
|
||||
"In this notebook, we use one real data example (binary classification) to showcase how to use FLAML library.\n",
|
||||
"\n",
|
||||
"FLAML requires `Python>=3.6`. To run this notebook example, please install flaml with the `notebook` option:\n",
|
||||
"FLAML requires `Python>=3.7`. To run this notebook example, please install flaml with the `notebook` option:\n",
|
||||
"```bash\n",
|
||||
"pip install flaml[notebook]\n",
|
||||
"```"
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Copyright (c) 2020-2021. All rights reserved.\n",
|
||||
"Copyright (c). All rights reserved.\n",
|
||||
"\n",
|
||||
"Licensed under the MIT License.\n",
|
||||
"\n",
|
||||
|
@ -22,7 +22,7 @@
|
|||
"\n",
|
||||
"*Running this notebook takes about one hour.\n",
|
||||
"\n",
|
||||
"FLAML requires `Python>=3.6`. To run this notebook example, please install flaml with the `notebook` and `nlp` options:\n",
|
||||
"FLAML requires `Python>=3.7`. To run this notebook example, please install flaml with the `notebook` and `nlp` options:\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install flaml[nlp]==0.7.1 # in higher version of flaml, the API for nlp tasks changed\n",
|
||||
|
@ -364,10 +364,10 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001B[2m\u001B[36m(pid=50964)\u001B[0m {'eval_loss': 0.5942569971084595, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.10434782608695652}\n",
|
||||
"\u001B[2m\u001B[36m(pid=50964)\u001B[0m {'eval_loss': 0.5942569971084595, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.10434782608695652}\n",
|
||||
"\u001B[2m\u001B[36m(pid=50948)\u001B[0m {'eval_loss': 0.649192214012146, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.2}\n",
|
||||
"\u001B[2m\u001B[36m(pid=50948)\u001B[0m {'eval_loss': 0.649192214012146, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.2}\n"
|
||||
"\u001b[2m\u001b[36m(pid=50964)\u001b[0m {'eval_loss': 0.5942569971084595, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.10434782608695652}\n",
|
||||
"\u001b[2m\u001b[36m(pid=50964)\u001b[0m {'eval_loss': 0.5942569971084595, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.10434782608695652}\n",
|
||||
"\u001b[2m\u001b[36m(pid=50948)\u001b[0m {'eval_loss': 0.649192214012146, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.2}\n",
|
||||
"\u001b[2m\u001b[36m(pid=50948)\u001b[0m {'eval_loss': 0.649192214012146, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.2}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -485,12 +485,12 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001B[2m\u001B[36m(pid=54411)\u001B[0m {'eval_loss': 0.624100387096405, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.5}\n",
|
||||
"\u001B[2m\u001B[36m(pid=54411)\u001B[0m {'eval_loss': 0.624100387096405, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.5}\n",
|
||||
"\u001B[2m\u001B[36m(pid=54411)\u001B[0m {'eval_loss': 0.624100387096405, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.5}\n",
|
||||
"\u001B[2m\u001B[36m(pid=54417)\u001B[0m {'eval_loss': 0.5938675999641418, 'eval_accuracy': 0.7156862745098039, 'eval_f1': 0.8258258258258258, 'epoch': 0.5}\n",
|
||||
"\u001B[2m\u001B[36m(pid=54417)\u001B[0m {'eval_loss': 0.5938675999641418, 'eval_accuracy': 0.7156862745098039, 'eval_f1': 0.8258258258258258, 'epoch': 0.5}\n",
|
||||
"\u001B[2m\u001B[36m(pid=54417)\u001B[0m {'eval_loss': 0.5938675999641418, 'eval_accuracy': 0.7156862745098039, 'eval_f1': 0.8258258258258258, 'epoch': 0.5}\n"
|
||||
"\u001b[2m\u001b[36m(pid=54411)\u001b[0m {'eval_loss': 0.624100387096405, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.5}\n",
|
||||
"\u001b[2m\u001b[36m(pid=54411)\u001b[0m {'eval_loss': 0.624100387096405, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.5}\n",
|
||||
"\u001b[2m\u001b[36m(pid=54411)\u001b[0m {'eval_loss': 0.624100387096405, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.5}\n",
|
||||
"\u001b[2m\u001b[36m(pid=54417)\u001b[0m {'eval_loss': 0.5938675999641418, 'eval_accuracy': 0.7156862745098039, 'eval_f1': 0.8258258258258258, 'epoch': 0.5}\n",
|
||||
"\u001b[2m\u001b[36m(pid=54417)\u001b[0m {'eval_loss': 0.5938675999641418, 'eval_accuracy': 0.7156862745098039, 'eval_f1': 0.8258258258258258, 'epoch': 0.5}\n",
|
||||
"\u001b[2m\u001b[36m(pid=54417)\u001b[0m {'eval_loss': 0.5938675999641418, 'eval_accuracy': 0.7156862745098039, 'eval_f1': 0.8258258258258258, 'epoch': 0.5}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -590,18 +590,18 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001B[2m\u001B[36m(pid=57835)\u001B[0m {'eval_loss': 0.5822290778160095, 'eval_accuracy': 0.7058823529411765, 'eval_f1': 0.8181818181818181, 'epoch': 0.5043478260869565}\n",
|
||||
"\u001B[2m\u001B[36m(pid=57835)\u001B[0m {'eval_loss': 0.5822290778160095, 'eval_accuracy': 0.7058823529411765, 'eval_f1': 0.8181818181818181, 'epoch': 0.5043478260869565}\n",
|
||||
"\u001B[2m\u001B[36m(pid=57835)\u001B[0m {'eval_loss': 0.5822290778160095, 'eval_accuracy': 0.7058823529411765, 'eval_f1': 0.8181818181818181, 'epoch': 0.5043478260869565}\n",
|
||||
"\u001B[2m\u001B[36m(pid=57835)\u001B[0m {'eval_loss': 0.5822290778160095, 'eval_accuracy': 0.7058823529411765, 'eval_f1': 0.8181818181818181, 'epoch': 0.5043478260869565}\n",
|
||||
"\u001B[2m\u001B[36m(pid=57836)\u001B[0m {'eval_loss': 0.6087244749069214, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.10344827586206896}\n",
|
||||
"\u001B[2m\u001B[36m(pid=57836)\u001B[0m {'eval_loss': 0.6087244749069214, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.10344827586206896}\n",
|
||||
"\u001B[2m\u001B[36m(pid=57836)\u001B[0m {'eval_loss': 0.6087244749069214, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.10344827586206896}\n",
|
||||
"\u001B[2m\u001B[36m(pid=57836)\u001B[0m {'eval_loss': 0.6087244749069214, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.10344827586206896}\n",
|
||||
"\u001B[2m\u001B[36m(pid=57839)\u001B[0m {'eval_loss': 0.5486209392547607, 'eval_accuracy': 0.7034313725490197, 'eval_f1': 0.8141321044546851, 'epoch': 0.5}\n",
|
||||
"\u001B[2m\u001B[36m(pid=57839)\u001B[0m {'eval_loss': 0.5486209392547607, 'eval_accuracy': 0.7034313725490197, 'eval_f1': 0.8141321044546851, 'epoch': 0.5}\n",
|
||||
"\u001B[2m\u001B[36m(pid=57839)\u001B[0m {'eval_loss': 0.5486209392547607, 'eval_accuracy': 0.7034313725490197, 'eval_f1': 0.8141321044546851, 'epoch': 0.5}\n",
|
||||
"\u001B[2m\u001B[36m(pid=57839)\u001B[0m {'eval_loss': 0.5486209392547607, 'eval_accuracy': 0.7034313725490197, 'eval_f1': 0.8141321044546851, 'epoch': 0.5}\n"
|
||||
"\u001b[2m\u001b[36m(pid=57835)\u001b[0m {'eval_loss': 0.5822290778160095, 'eval_accuracy': 0.7058823529411765, 'eval_f1': 0.8181818181818181, 'epoch': 0.5043478260869565}\n",
|
||||
"\u001b[2m\u001b[36m(pid=57835)\u001b[0m {'eval_loss': 0.5822290778160095, 'eval_accuracy': 0.7058823529411765, 'eval_f1': 0.8181818181818181, 'epoch': 0.5043478260869565}\n",
|
||||
"\u001b[2m\u001b[36m(pid=57835)\u001b[0m {'eval_loss': 0.5822290778160095, 'eval_accuracy': 0.7058823529411765, 'eval_f1': 0.8181818181818181, 'epoch': 0.5043478260869565}\n",
|
||||
"\u001b[2m\u001b[36m(pid=57835)\u001b[0m {'eval_loss': 0.5822290778160095, 'eval_accuracy': 0.7058823529411765, 'eval_f1': 0.8181818181818181, 'epoch': 0.5043478260869565}\n",
|
||||
"\u001b[2m\u001b[36m(pid=57836)\u001b[0m {'eval_loss': 0.6087244749069214, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.10344827586206896}\n",
|
||||
"\u001b[2m\u001b[36m(pid=57836)\u001b[0m {'eval_loss': 0.6087244749069214, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.10344827586206896}\n",
|
||||
"\u001b[2m\u001b[36m(pid=57836)\u001b[0m {'eval_loss': 0.6087244749069214, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.10344827586206896}\n",
|
||||
"\u001b[2m\u001b[36m(pid=57836)\u001b[0m {'eval_loss': 0.6087244749069214, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.10344827586206896}\n",
|
||||
"\u001b[2m\u001b[36m(pid=57839)\u001b[0m {'eval_loss': 0.5486209392547607, 'eval_accuracy': 0.7034313725490197, 'eval_f1': 0.8141321044546851, 'epoch': 0.5}\n",
|
||||
"\u001b[2m\u001b[36m(pid=57839)\u001b[0m {'eval_loss': 0.5486209392547607, 'eval_accuracy': 0.7034313725490197, 'eval_f1': 0.8141321044546851, 'epoch': 0.5}\n",
|
||||
"\u001b[2m\u001b[36m(pid=57839)\u001b[0m {'eval_loss': 0.5486209392547607, 'eval_accuracy': 0.7034313725490197, 'eval_f1': 0.8141321044546851, 'epoch': 0.5}\n",
|
||||
"\u001b[2m\u001b[36m(pid=57839)\u001b[0m {'eval_loss': 0.5486209392547607, 'eval_accuracy': 0.7034313725490197, 'eval_f1': 0.8141321044546851, 'epoch': 0.5}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -701,21 +701,21 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001B[2m\u001B[36m(pid=61251)\u001B[0m {'eval_loss': 0.6236899495124817, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.5}\n",
|
||||
"\u001B[2m\u001B[36m(pid=61251)\u001B[0m {'eval_loss': 0.6236899495124817, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.5}\n",
|
||||
"\u001B[2m\u001B[36m(pid=61251)\u001B[0m {'eval_loss': 0.6236899495124817, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.5}\n",
|
||||
"\u001B[2m\u001B[36m(pid=61251)\u001B[0m {'eval_loss': 0.6236899495124817, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.5}\n",
|
||||
"\u001B[2m\u001B[36m(pid=61251)\u001B[0m {'eval_loss': 0.6236899495124817, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.5}\n",
|
||||
"\u001B[2m\u001B[36m(pid=61255)\u001B[0m {'eval_loss': 0.6249027848243713, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.3}\n",
|
||||
"\u001B[2m\u001B[36m(pid=61255)\u001B[0m {'eval_loss': 0.6249027848243713, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.3}\n",
|
||||
"\u001B[2m\u001B[36m(pid=61255)\u001B[0m {'eval_loss': 0.6249027848243713, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.3}\n",
|
||||
"\u001B[2m\u001B[36m(pid=61255)\u001B[0m {'eval_loss': 0.6249027848243713, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.3}\n",
|
||||
"\u001B[2m\u001B[36m(pid=61255)\u001B[0m {'eval_loss': 0.6249027848243713, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.3}\n",
|
||||
"\u001B[2m\u001B[36m(pid=61236)\u001B[0m {'eval_loss': 0.6138392686843872, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.20689655172413793}\n",
|
||||
"\u001B[2m\u001B[36m(pid=61236)\u001B[0m {'eval_loss': 0.6138392686843872, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.20689655172413793}\n",
|
||||
"\u001B[2m\u001B[36m(pid=61236)\u001B[0m {'eval_loss': 0.6138392686843872, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.20689655172413793}\n",
|
||||
"\u001B[2m\u001B[36m(pid=61236)\u001B[0m {'eval_loss': 0.6138392686843872, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.20689655172413793}\n",
|
||||
"\u001B[2m\u001B[36m(pid=61236)\u001B[0m {'eval_loss': 0.6138392686843872, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.20689655172413793}\n"
|
||||
"\u001b[2m\u001b[36m(pid=61251)\u001b[0m {'eval_loss': 0.6236899495124817, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.5}\n",
|
||||
"\u001b[2m\u001b[36m(pid=61251)\u001b[0m {'eval_loss': 0.6236899495124817, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.5}\n",
|
||||
"\u001b[2m\u001b[36m(pid=61251)\u001b[0m {'eval_loss': 0.6236899495124817, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.5}\n",
|
||||
"\u001b[2m\u001b[36m(pid=61251)\u001b[0m {'eval_loss': 0.6236899495124817, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.5}\n",
|
||||
"\u001b[2m\u001b[36m(pid=61251)\u001b[0m {'eval_loss': 0.6236899495124817, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.5}\n",
|
||||
"\u001b[2m\u001b[36m(pid=61255)\u001b[0m {'eval_loss': 0.6249027848243713, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.3}\n",
|
||||
"\u001b[2m\u001b[36m(pid=61255)\u001b[0m {'eval_loss': 0.6249027848243713, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.3}\n",
|
||||
"\u001b[2m\u001b[36m(pid=61255)\u001b[0m {'eval_loss': 0.6249027848243713, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.3}\n",
|
||||
"\u001b[2m\u001b[36m(pid=61255)\u001b[0m {'eval_loss': 0.6249027848243713, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.3}\n",
|
||||
"\u001b[2m\u001b[36m(pid=61255)\u001b[0m {'eval_loss': 0.6249027848243713, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.3}\n",
|
||||
"\u001b[2m\u001b[36m(pid=61236)\u001b[0m {'eval_loss': 0.6138392686843872, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.20689655172413793}\n",
|
||||
"\u001b[2m\u001b[36m(pid=61236)\u001b[0m {'eval_loss': 0.6138392686843872, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.20689655172413793}\n",
|
||||
"\u001b[2m\u001b[36m(pid=61236)\u001b[0m {'eval_loss': 0.6138392686843872, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.20689655172413793}\n",
|
||||
"\u001b[2m\u001b[36m(pid=61236)\u001b[0m {'eval_loss': 0.6138392686843872, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.20689655172413793}\n",
|
||||
"\u001b[2m\u001b[36m(pid=61236)\u001b[0m {'eval_loss': 0.6138392686843872, 'eval_accuracy': 0.6838235294117647, 'eval_f1': 0.8122270742358079, 'epoch': 0.20689655172413793}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -806,4 +806,4 @@
|
|||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
"\n",
|
||||
"In this notebook, we demonstrate a basic use case of zero-shot AutoML with FLAML.\n",
|
||||
"\n",
|
||||
"FLAML requires `Python>=3.6`. To run this notebook example, please install flaml and openml:"
|
||||
"FLAML requires `Python>=3.7`. To run this notebook example, please install flaml and openml:"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
4
setup.py
4
setup.py
|
@ -16,7 +16,7 @@ __version__ = version["__version__"]
|
|||
install_requires = [
|
||||
"NumPy>=1.17.0rc1",
|
||||
"lightgbm>=2.3.1",
|
||||
"xgboost>=0.90,<=1.3.3",
|
||||
"xgboost>=0.90",
|
||||
"scipy>=1.4.1",
|
||||
"pandas>=1.1.4",
|
||||
"scikit-learn>=0.24",
|
||||
|
@ -110,5 +110,5 @@ setuptools.setup(
|
|||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: OS Independent",
|
||||
],
|
||||
python_requires=">=3.6",
|
||||
python_requires=">=3.7",
|
||||
)
|
||||
|
|
|
@ -85,6 +85,22 @@ class TestClassification(unittest.TestCase):
|
|||
)
|
||||
y = pd.Series([0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1])
|
||||
|
||||
automl = AutoML()
|
||||
automl_settings = {
|
||||
"time_budget": 3,
|
||||
"task": "classification",
|
||||
"n_jobs": 1,
|
||||
"estimator_list": ["xgboost", "catboost", "kneighbor"],
|
||||
"eval_method": "cv",
|
||||
"n_splits": 3,
|
||||
"metric": "accuracy",
|
||||
"log_training_metric": True,
|
||||
# "verbose": 4,
|
||||
"ensemble": True,
|
||||
}
|
||||
automl.fit(X, y, **automl_settings)
|
||||
del automl
|
||||
|
||||
automl = AutoML()
|
||||
automl_settings = {
|
||||
"time_budget": 6,
|
||||
|
@ -99,6 +115,7 @@ class TestClassification(unittest.TestCase):
|
|||
"ensemble": True,
|
||||
}
|
||||
automl.fit(X, y, **automl_settings)
|
||||
del automl
|
||||
|
||||
automl = AutoML()
|
||||
try:
|
||||
|
@ -121,21 +138,7 @@ class TestClassification(unittest.TestCase):
|
|||
"n_concurrent_trials": n_concurrent_trials,
|
||||
}
|
||||
automl.fit(X, y, **automl_settings)
|
||||
|
||||
automl = AutoML()
|
||||
automl_settings = {
|
||||
"time_budget": 3,
|
||||
"task": "classification",
|
||||
"n_jobs": 1,
|
||||
"estimator_list": ["xgboost", "catboost", "kneighbor"],
|
||||
"eval_method": "cv",
|
||||
"n_splits": 3,
|
||||
"metric": "accuracy",
|
||||
"log_training_metric": True,
|
||||
# "verbose": 4,
|
||||
"ensemble": True,
|
||||
}
|
||||
automl.fit(X, y, **automl_settings)
|
||||
del automl
|
||||
|
||||
automl = AutoML()
|
||||
automl_settings = {
|
||||
|
@ -151,6 +154,7 @@ class TestClassification(unittest.TestCase):
|
|||
"ensemble": True,
|
||||
}
|
||||
automl.fit(X, y, **automl_settings)
|
||||
del automl
|
||||
|
||||
def test_binary(self):
|
||||
automl_experiment = AutoML()
|
||||
|
@ -208,7 +212,7 @@ class TestClassification(unittest.TestCase):
|
|||
_ = automl_experiment.predict(fake_df)
|
||||
|
||||
def test_sparse_matrix_xgboost(self):
|
||||
automl_experiment = AutoML()
|
||||
automl = AutoML()
|
||||
automl_settings = {
|
||||
"time_budget": 3,
|
||||
"metric": "ap",
|
||||
|
@ -223,15 +227,28 @@ class TestClassification(unittest.TestCase):
|
|||
import xgboost as xgb
|
||||
|
||||
callback = xgb.callback.TrainingCallback()
|
||||
automl_experiment.fit(
|
||||
automl.fit(
|
||||
X_train=X_train, y_train=y_train, callbacks=[callback], **automl_settings
|
||||
)
|
||||
print(automl_experiment.predict(X_train))
|
||||
print(automl_experiment.model)
|
||||
print(automl_experiment.config_history)
|
||||
print(automl_experiment.best_model_for_estimator("xgboost"))
|
||||
print(automl_experiment.best_iteration)
|
||||
print(automl_experiment.best_estimator)
|
||||
print(automl.predict(X_train))
|
||||
print(automl.model)
|
||||
print(automl.config_history)
|
||||
print(automl.best_model_for_estimator("xgboost"))
|
||||
print(automl.best_iteration)
|
||||
print(automl.best_estimator)
|
||||
|
||||
# test an old version of xgboost
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
subprocess.check_call(
|
||||
[sys.executable, "-m", "pip", "install", "xgboost==1.3.3", "--user"]
|
||||
)
|
||||
automl = AutoML()
|
||||
automl.fit(X_train=X_train, y_train=y_train, **automl_settings)
|
||||
subprocess.check_call(
|
||||
[sys.executable, "-m", "pip", "install", "-U", "xgboost", "--user"]
|
||||
)
|
||||
|
||||
def test_ray_classification(self):
|
||||
X, y = load_breast_cancer(return_X_y=True)
|
||||
|
@ -353,5 +370,5 @@ class TestClassification(unittest.TestCase):
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
test = TestClassification()
|
||||
test.test_preprocess()
|
||||
|
|
|
@ -24,9 +24,7 @@ def _easy_objective(use_raytune, config):
|
|||
return
|
||||
|
||||
|
||||
def test_tune(
|
||||
smoke_test=True, externally_setup_searcher=False, use_ray=False, use_raytune=False
|
||||
):
|
||||
def test_tune(externally_setup_searcher=False, use_ray=False, use_raytune=False):
|
||||
from flaml import tune
|
||||
from flaml.searcher.blendsearch import BlendSearch
|
||||
|
||||
|
@ -95,7 +93,7 @@ def test_tune(
|
|||
metric="mean_loss",
|
||||
mode="min",
|
||||
num_samples=10,
|
||||
time_budget_s=5,
|
||||
# time_budget_s=5,
|
||||
use_ray=use_ray,
|
||||
config=search_space,
|
||||
)
|
||||
|
@ -107,14 +105,14 @@ def test_tune(
|
|||
|
||||
|
||||
def test_reproducibility():
|
||||
best_config_1 = test_tune(smoke_test=True)
|
||||
best_config_2 = test_tune(smoke_test=True)
|
||||
best_config_1 = test_tune()
|
||||
best_config_2 = test_tune()
|
||||
print(best_config_1)
|
||||
print(best_config_2)
|
||||
assert best_config_1 == best_config_2, "flaml.tune not reproducible"
|
||||
|
||||
best_config_1 = test_tune(smoke_test=True, externally_setup_searcher=True)
|
||||
best_config_2 = test_tune(smoke_test=True, externally_setup_searcher=True)
|
||||
best_config_1 = test_tune(externally_setup_searcher=True)
|
||||
best_config_2 = test_tune(externally_setup_searcher=True)
|
||||
print(best_config_1)
|
||||
print(best_config_2)
|
||||
assert (
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Installation
|
||||
|
||||
FLAML requires **Python version >= 3.6**. It can be installed from pip:
|
||||
FLAML requires **Python version >= 3.7**. It can be installed from pip:
|
||||
|
||||
```bash
|
||||
pip install flaml
|
||||
|
|
Loading…
Reference in New Issue