Search
Tuning
from ngboost import NGBClassifier, NGBRegressor
from ngboost.distns import k_categorical, Normal
from ngboost.scores import LogScore

from sklearn.datasets import load_breast_cancer, load_boston
from sklearn.model_selection import train_test_split

X, Y = load_boston(True)
X_reg_train, X_reg_test, Y_reg_train, Y_reg_test = train_test_split(X, Y, test_size=0.2)

X, y = load_breast_cancer(True)
y[0:15] = 2 # artificially make this a 3-class problem instead of a 2-class problem
X_cls_train, X_cls_test, Y_cls_train, Y_cls_test  = train_test_split(X, y, test_size=0.2)

分阶段预测

所有拟合的NGBoost对象都支持分阶段预测。

ngb_cls = NGBClassifier(Dist=k_categorical(3), Score=LogScore, n_estimators=500, verbose=False).fit(X_cls_train, Y_cls_train)

例如,要在拟合415个基础学习器后获取前5个示例的预测,请使用:

preds = ngb_cls.staged_predict(X_cls_test)
preds[415][0:5]
array([0, 0, 1, 1, 1])
pred_dists = ngb_cls.staged_pred_dist(X_cls_test)
pred_dists[415][0:5].params
{'p0': array([0.99074995, 0.91368635, 0.00517919, 0.00517919, 0.00517919]),
 'p1': array([0.00860966, 0.03267806, 0.99450359, 0.99450359, 0.99450359]),
 'p2': array([0.00064039, 0.05363559, 0.00031722, 0.00031722, 0.00031722])}

这在结合验证集上的错误跟踪时非常有用,您可以通过传递X_valY_val参数,然后检查.best_val_loss_itr实例属性来实现。

ngb = NGBRegressor()
ngb.fit(X_reg_train, Y_reg_train, X_val=X_reg_test, Y_val=Y_reg_test) # use a validation set instead of test set here in your own work
print(ngb.best_val_loss_itr)
best_preds = ngb.predict(X_reg_test, max_iter=ngb.best_val_loss_itr)
[iter 0] loss=3.6556 val_loss=3.5575 scale=0.5000 norm=3.4142
[iter 100] loss=3.1118 val_loss=3.1284 scale=1.0000 norm=3.9174
[iter 200] loss=2.4839 val_loss=2.6398 scale=2.0000 norm=4.0907
[iter 300] loss=2.0183 val_loss=2.7162 scale=1.0000 norm=1.5637
[iter 400] loss=1.8111 val_loss=3.1315 scale=1.0000 norm=1.3983
244

早停

NGBoost 还具有早停功能。如果将整数 early_stopping_rounds 和验证集 (X_val,Y_val) 传递给 fit(),算法将在验证损失连续增加 early_stopping_rounds 次后停止运行。

_ = NGBRegressor().fit(X_reg_train, Y_reg_train, X_val=X_reg_test, Y_val=Y_reg_test, early_stopping_rounds=2)
[iter 0] loss=3.6556 val_loss=3.5575 scale=0.5000 norm=3.4142
[iter 100] loss=3.1118 val_loss=3.1292 scale=1.0000 norm=3.9174
[iter 200] loss=2.4839 val_loss=2.6422 scale=2.0000 norm=4.0907
== Early stopping achieved.
== Best iteration / VAL234 (val_loss=2.5693)

验证集的样本权重可以通过val_sample_weight参数传递给fit

使用 sklearn 模型选择

sklearn 方法与 NGBoost 兼容。

from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeRegressor

b1 = DecisionTreeRegressor(criterion='friedman_mse', max_depth=2)
b2 = DecisionTreeRegressor(criterion='friedman_mse', max_depth=4)

param_grid = {
    'minibatch_frac': [1.0, 0.5],
    'Base': [b1, b2]
}

ngb = NGBRegressor(Dist=Normal, verbose=False)

grid_search = GridSearchCV(ngb, param_grid=param_grid, cv=5)
grid_search.fit(X_reg_train, Y_reg_train)
print(grid_search.best_params_)
/usr/local/lib/python3.7/site-packages/sklearn/model_selection/_search.py:814: DeprecationWarning: The default of the `iid` parameter will change from True to False in version 0.22 and will be removed in 0.24. This will change numeric results when test-set sizes are unequal.
  DeprecationWarning)
{'Base': DecisionTreeRegressor(criterion='friedman_mse', max_depth=4, max_features=None,
                      max_leaf_nodes=None, min_impurity_decrease=0.0,
                      min_impurity_split=None, min_samples_leaf=1,
                      min_samples_split=2, min_weight_fraction_leaf=0.0,
                      presort=False, random_state=None, splitter='best'), 'minibatch_frac': 1.0}