example update (#281)

* example update

* bump version to 0.7.2

* notebook update
This commit is contained in:
Chi Wang 2021-11-12 22:29:33 -08:00 committed by GitHub
parent 92ebd1f7f9
commit 59083fbdcb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 77 additions and 79 deletions

View File

@ -88,7 +88,7 @@ automl_settings = {
"time_budget": 10, # in seconds "time_budget": 10, # in seconds
"metric": 'accuracy', "metric": 'accuracy',
"task": 'classification', "task": 'classification',
"log_file_name": "test/iris.log", "log_file_name": "iris.log",
} }
X_train, y_train = load_iris(return_X_y=True) X_train, y_train = load_iris(return_X_y=True)
# Train with labeled input data # Train with labeled input data
@ -112,7 +112,7 @@ automl_settings = {
"time_budget": 10, # in seconds "time_budget": 10, # in seconds
"metric": 'r2', "metric": 'r2',
"task": 'regression', "task": 'regression',
"log_file_name": "test/california.log", "log_file_name": "california.log",
} }
X_train, y_train = fetch_california_housing(return_X_y=True) X_train, y_train = fetch_california_housing(return_X_y=True)
# Train with labeled input data # Train with labeled input data
@ -124,7 +124,7 @@ print(automl.predict(X_train))
print(automl.model.estimator) print(automl.model.estimator)
``` ```
* Time series forecasting. * A basic time series forecasting example.
```python ```python
# pip install flaml[ts_forecast] # pip install flaml[ts_forecast]
@ -137,7 +137,7 @@ automl.fit(X_train=X_train[:72], # a single column of timestamp
y_train=y_train, # value for each timestamp y_train=y_train, # value for each timestamp
period=12, # time horizon to forecast, e.g., 12 months period=12, # time horizon to forecast, e.g., 12 months
task='ts_forecast', time_budget=15, # time budget in seconds task='ts_forecast', time_budget=15, # time budget in seconds
log_file_name="test/ts_forecast.log", log_file_name="ts_forecast.log",
) )
print(automl.predict(X_train[72:])) print(automl.predict(X_train[72:]))
``` ```

View File

@ -1 +1 @@
__version__ = "0.7.1" __version__ = "0.7.2"

View File

@ -2,35 +2,37 @@
"cells": [ "cells": [
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"# Pytorch model tuning example on CIFAR10\n", "# Pytorch model tuning example on CIFAR10\n",
"This notebook uses flaml to tune a pytorch model on CIFAR10. It is modified based on [this example](https://docs.ray.io/en/master/tune/examples/cifar10_pytorch.html).\n", "This notebook uses flaml to tune a pytorch model on CIFAR10. It is modified based on [this example](https://docs.ray.io/en/master/tune/examples/cifar10_pytorch.html).\n",
"\n", "\n",
"**Requirements.** This notebook requires:" "**Requirements.** This notebook requires:"
], ]
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"source": [
"!pip install torchvision flaml[blendsearch,ray];"
],
"outputs": [],
"metadata": { "metadata": {
"tags": [] "tags": []
} },
"outputs": [],
"source": [
"!pip install torchvision flaml[blendsearch,ray];"
]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"## Network Specification" "## Network Specification"
], ]
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"import torch\n", "import torch\n",
"import torch.nn as nn\n", "import torch.nn as nn\n",
@ -60,20 +62,20 @@
" x = F.relu(self.fc2(x))\n", " x = F.relu(self.fc2(x))\n",
" x = self.fc3(x)\n", " x = self.fc3(x)\n",
" return x" " return x"
], ]
"outputs": [],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"## Data" "## Data"
], ]
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"def load_data(data_dir=\"data\"):\n", "def load_data(data_dir=\"data\"):\n",
" transform = transforms.Compose([\n", " transform = transforms.Compose([\n",
@ -88,20 +90,20 @@
" root=data_dir, train=False, download=True, transform=transform)\n", " root=data_dir, train=False, download=True, transform=transform)\n",
"\n", "\n",
" return trainset, testset" " return trainset, testset"
], ]
"outputs": [],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"## Training" "## Training"
], ]
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"from ray import tune\n", "from ray import tune\n",
"\n", "\n",
@ -199,20 +201,20 @@
"\n", "\n",
" tune.report(loss=(val_loss / val_steps), accuracy=correct / total)\n", " tune.report(loss=(val_loss / val_steps), accuracy=correct / total)\n",
" print(\"Finished Training\")" " print(\"Finished Training\")"
], ]
"outputs": [],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"## Test Accuracy" "## Test Accuracy"
], ]
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"def _test_accuracy(net, device=\"cpu\"):\n", "def _test_accuracy(net, device=\"cpu\"):\n",
" trainset, testset = load_data()\n", " trainset, testset = load_data()\n",
@ -232,41 +234,41 @@
" correct += (predicted == labels).sum().item()\n", " correct += (predicted == labels).sum().item()\n",
"\n", "\n",
" return correct / total" " return correct / total"
], ]
"outputs": [],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"## Hyperparameter Optimization" "## Hyperparameter Optimization"
], ]
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"import numpy as np\n", "import numpy as np\n",
"import flaml\n", "import flaml\n",
"import ray\n", "import os\n",
"\n", "\n",
"data_dir = os.path.abspath(\"data\")\n", "data_dir = os.path.abspath(\"data\")\n",
"load_data(data_dir) # Download data for all trials before starting the run" "load_data(data_dir) # Download data for all trials before starting the run"
], ]
"outputs": [],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Search space" "### Search space"
], ]
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"max_num_epoch = 100\n", "max_num_epoch = 100\n",
"config = {\n", "config = {\n",
@ -276,32 +278,32 @@
" \"num_epochs\": tune.loguniform(1, max_num_epoch),\n", " \"num_epochs\": tune.loguniform(1, max_num_epoch),\n",
" \"batch_size\": tune.randint(1, 5) # log transformed with base 2\n", " \"batch_size\": tune.randint(1, 5) # log transformed with base 2\n",
"}" "}"
], ]
"outputs": [],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"time_budget_s = 600 # time budget in seconds\n", "time_budget_s = 600 # time budget in seconds\n",
"gpus_per_trial = 0.5 # number of gpus for each trial; 0.5 means two training jobs can share one gpu\n", "gpus_per_trial = 0.5 # number of gpus for each trial; 0.5 means two training jobs can share one gpu\n",
"num_samples = 500 # maximal number of trials\n", "num_samples = 500 # maximal number of trials\n",
"np.random.seed(7654321)" "np.random.seed(7654321)"
], ]
"outputs": [],
"metadata": {}
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {},
"source": [ "source": [
"### Launch the tuning" "### Launch the tuning"
], ]
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"import time\n", "import time\n",
"start_time = time.time()\n", "start_time = time.time()\n",
@ -318,16 +320,29 @@
" local_dir='logs/',\n", " local_dir='logs/',\n",
" num_samples=num_samples,\n", " num_samples=num_samples,\n",
" time_budget_s=time_budget_s,\n", " time_budget_s=time_budget_s,\n",
" use_ray=True)\n", " use_ray=True)\n"
"\n", ]
"ray.shutdown()"
],
"outputs": [],
"metadata": {}
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 13, "execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"#trials=44\n",
"time=1193.913584947586\n",
"Best trial config: {'l1': 8, 'l2': 8, 'lr': 0.0008818671030627281, 'num_epochs': 55.9513429004283, 'batch_size': 3}\n",
"Best trial final validation loss: 1.0694482081472874\n",
"Best trial final validation accuracy: 0.6389\n",
"Files already downloaded and verified\n",
"Files already downloaded and verified\n",
"Best trial test set accuracy: 0.6294\n"
]
}
],
"source": [ "source": [
"print(f\"#trials={len(result.trials)}\")\n", "print(f\"#trials={len(result.trials)}\")\n",
"print(f\"time={time.time()-start_time}\")\n", "print(f\"time={time.time()-start_time}\")\n",
@ -354,30 +369,16 @@
"\n", "\n",
"test_acc = _test_accuracy(best_trained_model, device)\n", "test_acc = _test_accuracy(best_trained_model, device)\n",
"print(\"Best trial test set accuracy: {}\".format(test_acc))" "print(\"Best trial test set accuracy: {}\".format(test_acc))"
], ]
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"#trials=44\n",
"time=1193.913584947586\n",
"Best trial config: {'l1': 8, 'l2': 8, 'lr': 0.0008818671030627281, 'num_epochs': 55.9513429004283, 'batch_size': 3}\n",
"Best trial final validation loss: 1.0694482081472874\n",
"Best trial final validation accuracy: 0.6389\n",
"Files already downloaded and verified\n",
"Files already downloaded and verified\n",
"Best trial test set accuracy: 0.6294\n"
]
}
],
"metadata": {}
} }
], ],
"metadata": { "metadata": {
"interpreter": {
"hash": "f7771e6a3915580179405189f5aa4eb9047494cbe4e005b29b851351b54902f6"
},
"kernelspec": { "kernelspec": {
"name": "python3", "display_name": "Python 3.8.10 64-bit ('venv': venv)",
"display_name": "Python 3.8.10 64-bit ('venv': venv)" "name": "python3"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
@ -389,15 +390,12 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.10" "version": "3.7.12"
}, },
"metadata": { "metadata": {
"interpreter": { "interpreter": {
"hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
} }
},
"interpreter": {
"hash": "f7771e6a3915580179405189f5aa4eb9047494cbe4e005b29b851351b54902f6"
} }
}, },
"nbformat": 4, "nbformat": 4,