Noctus contribute (#961)

* indentation space #884 changing indentation of python code blocks

* indentation space #884 changing indentation of python code blocks 2
This commit is contained in:
Ayesh Vininda 2023-04-03 07:20:44 +05:30 committed by GitHub
parent 45641000c0
commit 5616ce0a54
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 34 additions and 34 deletions

View File

@ -144,36 +144,36 @@ import rgf
class MyRegularizedGreedyForest(SKLearnEstimator):
def __init__(self, task="binary", **config):
super().__init__(task, **config)
def __init__(self, task="binary", **config):
super().__init__(task, **config)
if task in CLASSIFICATION:
from rgf.sklearn import RGFClassifier
if task in CLASSIFICATION:
from rgf.sklearn import RGFClassifier
self.estimator_class = RGFClassifier
else:
from rgf.sklearn import RGFRegressor
self.estimator_class = RGFClassifier
else:
from rgf.sklearn import RGFRegressor
self.estimator_class = RGFRegressor
self.estimator_class = RGFRegressor
@classmethod
def search_space(cls, data_size, task):
space = {
"max_leaf": {
"domain": tune.lograndint(lower=4, upper=data_size),
"low_cost_init_value": 4,
},
"n_iter": {
"domain": tune.lograndint(lower=1, upper=data_size),
"low_cost_init_value": 1,
},
"learning_rate": {"domain": tune.loguniform(lower=0.01, upper=20.0)},
"min_samples_leaf": {
"domain": tune.lograndint(lower=1, upper=20),
"init_value": 20,
},
}
return space
@classmethod
def search_space(cls, data_size, task):
space = {
"max_leaf": {
"domain": tune.lograndint(lower=4, upper=data_size),
"low_cost_init_value": 4,
},
"n_iter": {
"domain": tune.lograndint(lower=1, upper=data_size),
"low_cost_init_value": 1,
},
"learning_rate": {"domain": tune.loguniform(lower=0.01, upper=20.0)},
"min_samples_leaf": {
"domain": tune.lograndint(lower=1, upper=20),
"init_value": 20,
},
}
return space
```
In the constructor, we set `self.estimator_class` as `RGFClassifier` or `RGFRegressor` according to the task type. If the estimator you want to tune does not have a scikit-learn style `fit()` and `predict()` API, you can override the `fit()` and `predict()` function of `flaml.model.BaseEstimator`, like [XGBoostEstimator](../reference/automl/model#xgboostestimator-objects). Importantly, we also add the `task="binary"` parameter in the signature of `__init__` so that it doesn't get grouped together with the `**config` kwargs that determines the parameters with which the underlying estimator (`self.estimator_class`) is constructed. If your estimator doesn't use one of the parameters that it is passed, for example some regressors in `scikit-learn` don't use the `n_jobs` parameter, it is enough to add `n_jobs=None` to the signature so that it is ignored by the `**config` dict.
@ -208,18 +208,18 @@ from flaml.automl.model import XGBoostEstimator
def logregobj(preds, dtrain):
labels = dtrain.get_label()
preds = 1.0 / (1.0 + np.exp(-preds)) # transform raw leaf weight
grad = preds - labels
hess = preds * (1.0 - preds)
return grad, hess
labels = dtrain.get_label()
preds = 1.0 / (1.0 + np.exp(-preds)) # transform raw leaf weight
grad = preds - labels
hess = preds * (1.0 - preds)
return grad, hess
class MyXGB1(XGBoostEstimator):
"""XGBoostEstimator with logregobj as the objective function"""
"""XGBoostEstimator with logregobj as the objective function"""
def __init__(self, **config):
super().__init__(objective=logregobj, **config)
def __init__(self, **config):
super().__init__(objective=logregobj, **config)
```
We override the constructor and set the training objective as a custom function `logregobj`. The hyperparameters and their search range do not change. For another example,