追踪回调

! [ -e /content ] && pip install -Uqq fastai  # 在Colab上升级fastai
from __future__ import annotations
from fastai.basics import *
from fastai.callback.progress import *
from fastai.callback.fp16 import MixedPrecision
from nbdev.showdoc import *
from fastai.test_utils import *

根据监控指标/损失的行为做出决策的回调函数

TerminateOnNaNCallback -

class TerminateOnNaNCallback(Callback):
    "A `Callback` that terminates training if loss is NaN."
    order=-9
    def after_batch(self):
        "Test if `last_loss` is NaN and interrupts training."
        if torch.isinf(self.loss) or torch.isnan(self.loss): raise CancelFitException
learn = synth_learner()
learn.fit(10, lr=100, cbs=TerminateOnNaNCallback())
epoch train_loss valid_loss time
assert len(learn.recorder.losses) < 10 * len(learn.dls.train)
for l in learn.recorder.losses:
    assert not torch.isinf(l) and not torch.isnan(l) 

TrackerCallback -

class TrackerCallback(Callback):
    "A `Callback` that keeps track of the best value in `monitor`."
    order,remove_on_fetch,_only_train_loop = 60,True,True
    def __init__(self, 
        monitor='valid_loss', # 正在监控的值(通常是损失或指标)。
        comp=None, # numpy 比较运算符;若监控指标为损失值,则使用 np.less;若监控指标为评估指标,则使用 np.greater。
        min_delta=0., # 上次监控值与最佳监控值之间的最小差值。
        reset_on_fit=True # 在模型拟合之前,将监控的值重置为负无穷(如果监控的是指标)或正无穷(如果监控的是损失)。
    ):
        if comp is None: comp = np.less if 'loss' in monitor or 'error' in monitor else np.greater
        if comp == np.less: min_delta *= -1
        self.monitor,self.comp,self.min_delta,self.reset_on_fit,self.best= monitor,comp,min_delta,reset_on_fit,None

    def before_fit(self):
        "Prepare the monitored value"
        self.run = not hasattr(self, "lr_finder") and not hasattr(self, "gather_preds")
        if self.reset_on_fit or self.best is None: self.best = float('inf') if self.comp == np.less else -float('inf')
        assert self.monitor in self.recorder.metric_names[1:]
        self.idx = list(self.recorder.metric_names[1:]).index(self.monitor)

    def after_epoch(self):
        "Compare the last value to the best up to now"
        val = self.recorder.values[-1][self.idx]
        if self.comp(val - self.min_delta, self.best): self.best,self.new_best = val,True
        else: self.new_best = False

    def after_fit(self): self.run=True

当实现一个 Callback 时,如果它的行为依赖于某个指标或损失的最佳值,请子类化此 Callback 并使用其 best(迄今为止的最佳值)和 new_best(在本轮中有了新的最佳值)属性。如果你希望在后续调用 fit 时保持 best 的值(例如,Learner.fit_one_cycle),请将 reset_on_fit 设置为 True。

comp 是用于判断一个值是否优于另一个值的比较操作符(如果传入的 monitor 名称中包含 ‘loss’,则默认为 np.less,否则为 np.greater),min_delta 是一个可选的浮点数,要求新值必须超过当前最佳值(取决于 comp)至少该数值。

class FakeRecords(Callback):
    order=51
    def __init__(self, monitor, values): self.monitor,self.values = monitor,values
        
    def before_fit(self):   self.idx = list(self.recorder.metric_names[1:]).index(self.monitor)
    def after_epoch(self): self.recorder.values[-1][self.idx] = self.values[self.epoch]
        
class TestTracker(Callback):
    order=61
    def before_fit(self): self.bests,self.news = [],[]
    def after_epoch(self): 
        self.bests.append(self.tracker.best)
        self.news.append(self.tracker.new_best)
learn = synth_learner(n_trn=2, cbs=TestTracker())
cbs=[TrackerCallback(monitor='valid_loss'), FakeRecords('valid_loss', [0.2,0.1])]
with learn.no_logging(): learn.fit(2, cbs=cbs)
test_eq(learn.test_tracker.bests, [0.2, 0.1])
test_eq(learn.test_tracker.news,  [True,True])

#使用最小增量
cbs=[TrackerCallback(monitor='valid_loss', min_delta=0.15), FakeRecords('valid_loss', [0.2,0.1])]
with learn.no_logging(): learn.fit(2, cbs=cbs)
test_eq(learn.test_tracker.bests, [0.2, 0.2])
test_eq(learn.test_tracker.news,  [True,False])
#默认情况下,指标在每个时期都必须增大。
def tst_metric(out,targ): return F.mse_loss(out,targ)
learn = synth_learner(n_trn=2, cbs=TestTracker(), metrics=tst_metric)
cbs=[TrackerCallback(monitor='tst_metric'), FakeRecords('tst_metric', [0.2,0.1])]
with learn.no_logging(): learn.fit(2, cbs=cbs)
test_eq(learn.test_tracker.bests, [0.2, 0.2])
test_eq(learn.test_tracker.news,  [True,False])

#可以通过传递 `comp=np.less` 来覆盖此设置。
learn = synth_learner(n_trn=2, cbs=TestTracker(), metrics=tst_metric)
cbs=[TrackerCallback(monitor='tst_metric', comp=np.less), FakeRecords('tst_metric', [0.2,0.1])]
with learn.no_logging(): learn.fit(2, cbs=cbs)
test_eq(learn.test_tracker.bests, [0.2, 0.1])
test_eq(learn.test_tracker.news,  [True,True])
# 隐藏
#Setting reset_on_fit=True will maintain the "best" value over subsequent calls to fit
learn = synth_learner(n_val=2, cbs=TrackerCallback(monitor='tst_metric', reset_on_fit=False), metrics=tst_metric)
tracker_cb = learn.cbs.filter(lambda cb: isinstance(cb, TrackerCallback))[0]
with learn.no_logging(): learn.fit(1)
first_best = tracker_cb.best
with learn.no_logging(): learn.fit(1)
test_eq(tracker_cb.best, first_best)
#在lr_find期间,跟踪器回调不会运行
from fastai.callback.schedule import *
learn = synth_learner(n_trn=2, cbs=TrackerCallback(monitor='tst_metric'), metrics=tst_metric)
learn.lr_find(num_it=15, show_plot=False)
assert not hasattr(learn, 'new_best')

EarlyStoppingCallback -

class EarlyStoppingCallback(TrackerCallback):
    "A `TrackerCallback` that terminates training when monitored quantity stops improving."
    order=TrackerCallback.order+3
    def __init__(self, 
        monitor='valid_loss', # 正在监控的值(通常是损失或指标)。
        comp=None, # numpy 比较运算符;若监控指标为损失值,则使用 np.less;若监控指标为评估指标,则使用 np.greater。
        min_delta=0., # 上次监控值与最佳监控值之间的最小差值。
        patience=1, # 训练未改善模型时等待的轮次数。
        reset_on_fit=True # 在模型拟合之前,将监控的值重置为负无穷(如果监控的是指标)或正无穷(如果监控的是损失)。
    ):
        super().__init__(monitor=monitor, comp=comp, min_delta=min_delta, reset_on_fit=reset_on_fit)
        self.patience = patience

    def before_fit(self): self.wait = 0; super().before_fit()
    def after_epoch(self):
        "Compare the value monitored to its best score and maybe stop training."
        super().after_epoch()
        if self.new_best: self.wait = 0
        else:
            self.wait += 1
            if self.wait >= self.patience:
                print(f'No improvement since epoch {self.epoch-self.wait}: early stopping')
                raise CancelFitException()

comp 是比较操作符,用于判断一个值是否优于另一个值(如果在 monitor 中传递的名称包含 ‘loss’ 则默认为 np.less,否则为 np.greater),min_delta 是一个可选的浮点数,需要一个新值超过当前最佳值(根据 comp)至少该数量。patience 是您愿意在没有改进的情况下等待的轮次。

learn = synth_learner(n_trn=2, metrics=F.mse_loss)
learn.fit(n_epoch=200, lr=1e-7, cbs=EarlyStoppingCallback(monitor='mse_loss', min_delta=0.1, patience=2))
epoch train_loss valid_loss mse_loss time
0 20.437918 26.406773 26.406773 00:00
1 20.418514 26.406715 26.406715 00:00
2 20.410892 26.406639 26.406639 00:00
No improvement since epoch 0: early stopping
learn.validate()
(#2) [26.406639099121094,26.406639099121094]
learn = synth_learner(n_trn=2)
learn.fit(n_epoch=200, lr=1e-7, cbs=EarlyStoppingCallback(monitor='valid_loss', min_delta=0.1, patience=2))
epoch train_loss valid_loss time
0 13.408870 19.617222 00:00
1 13.403553 19.617184 00:00
2 13.403143 19.617126 00:00
No improvement since epoch 0: early stopping
test_eq(len(learn.recorder.values), 3)

SaveModelCallback -

class SaveModelCallback(TrackerCallback):
    "A `TrackerCallback` that saves the model's best during training and loads it at the end."
    order = TrackerCallback.order+1
    def __init__(self, 
        monitor='valid_loss', # value (usually loss or metric) being monitored.
        comp=None, # numpy comparison operator; np.less if monitor is loss, np.greater if monitor is metric.
        min_delta=0., # minimum delta between the last monitor value and the best monitor value.
        fname='model', # model name to be used when saving model.
        every_epoch=False, # if true, save model after every epoch; else save only when model is better than existing best.
        at_end=False, # if true, save model when training ends; else load best model if there is only one saved model.
        with_opt=False, # if true, save optimizer state (if any available) when saving model. 
        reset_on_fit=True # before model fitting, reset value being monitored to -infinity (if monitor is metric) or +infinity (if monitor is loss).
    ):
        super().__init__(monitor=monitor, comp=comp, min_delta=min_delta, reset_on_fit=reset_on_fit)
        assert not (every_epoch and at_end), "every_epoch and at_end cannot both be set to True"
        # keep track of file path for loggers
        self.last_saved_path = None
        store_attr('fname,every_epoch,at_end,with_opt')

    def _save(self, name): self.last_saved_path = self.learn.save(name, with_opt=self.with_opt)

    def after_epoch(self):
        "Compare the value monitored to its best score and save if best."
        if self.every_epoch:
            if (self.epoch%self.every_epoch) == 0: self._save(f'{self.fname}_{self.epoch}')
        else: #every improvement
            super().after_epoch()
            if self.new_best:
                print(f'Better model found at epoch {self.epoch} with {self.monitor} value: {self.best}.')
                self._save(f'{self.fname}')

    def after_fit(self, **kwargs):
        "Load the best model."
        if self.at_end: self._save(f'{self.fname}')
        elif not self.every_epoch: self.learn.load(f'{self.fname}', with_opt=self.with_opt)

comp 是用于判断一个值是否优于另一个值的比较操作符(如果在 monitor 中传入的名称包含 ‘loss’,默认使用 np.less,否则使用 np.greater),而 min_delta 是一个可选的浮点数,要求新值必须超出当前最佳值(取决于 comp)至少该数值。模型将保存在 learn.path/learn.model_dir/name.pth 中,可能在 every_epochTrue 时每个 epoch 保存,如果向 every_epoch 传入整数则每 n 个 epoch 保存,或者在监控量每次改进时保存。

learn = synth_learner(n_trn=2, path=Path.cwd()/'tmp')
learn.fit(n_epoch=2, cbs=SaveModelCallback())
assert (Path.cwd()/'tmp/models/model.pth').exists()
learn = synth_learner(n_trn=2, path=Path.cwd()/'tmp')
learn.fit(n_epoch=2, cbs=SaveModelCallback(fname='end',at_end=True))
assert (Path.cwd()/'tmp/models/end.pth').exists()
learn.fit(n_epoch=2, cbs=SaveModelCallback(every_epoch=True))
for i in range(2): assert (Path.cwd()/f'tmp/models/model_{i}.pth').exists()
shutil.rmtree(Path.cwd()/'tmp')
learn.fit(n_epoch=4, cbs=SaveModelCallback(every_epoch=2))
for i in range(4): 
    if not i%2: assert (Path.cwd()/f'tmp/models/model_{i}.pth').exists()
    else:       assert not (Path.cwd()/f'tmp/models/model_{i}.pth').exists()
shutil.rmtree(Path.cwd()/'tmp')
epoch train_loss valid_loss time
0 19.453270 12.539286 00:00
1 19.248507 12.123456 00:00
Better model found at epoch 0 with valid_loss value: 12.539285659790039.
Better model found at epoch 1 with valid_loss value: 12.123456001281738.
epoch train_loss valid_loss time
0 5.197007 5.579152 00:00
1 5.154862 5.445522 00:00
Better model found at epoch 0 with valid_loss value: 5.5791521072387695.
Better model found at epoch 1 with valid_loss value: 5.445522308349609.
epoch train_loss valid_loss time
0 4.982775 5.264440 00:00
1 4.887252 5.038480 00:00
epoch train_loss valid_loss time
0 4.578584 4.781651 00:00
1 4.454868 4.507101 00:00
2 4.322047 4.232390 00:00
3 4.186467 3.957614 00:00

在 plateaus 上减少学习率

class ReduceLROnPlateau(TrackerCallback):
    "A `TrackerCallback` that reduces learning rate when a metric has stopped improving."
    order=TrackerCallback.order+2
    def __init__(self, 
        monitor='valid_loss', # 正在监控的值(通常是损失或指标)。
        comp=None, # numpy 比较运算符;若监控指标为损失值,则使用 np.less;若监控指标为其他度量值,则使用 np.greater。
        min_delta=0., # 最近一次监控值与最佳监控值之间的最小差值。
        patience=1, # 训练未改进模型时等待的轮次数。
        factor=10., # 用于在降低学习率时除以学习率的分子。
        min_lr=0, # 允许的最小学习率;学习率不能降低到此最小值以下。
        reset_on_fit=True # 在模型拟合之前,将监控的值重置为负无穷(如果监控的是指标)或正无穷(如果监控的是损失)。
    ):
        super().__init__(monitor=monitor, comp=comp, min_delta=min_delta, reset_on_fit=reset_on_fit)
        self.patience,self.factor,self.min_lr = patience,factor,min_lr

    def before_fit(self): self.wait = 0; super().before_fit()
    def after_epoch(self):
        "Compare the value monitored to its best score and reduce LR by `factor` if no improvement."
        super().after_epoch()
        if self.new_best: self.wait = 0
        else:
            self.wait += 1
            if self.wait >= self.patience:
                old_lr = self.opt.hypers[-1]['lr']
                for h in self.opt.hypers: h['lr'] = max(h['lr'] / self.factor, self.min_lr)
                self.wait = 0
                if self.opt.hypers[-1]["lr"] < old_lr:
                    print(f'Epoch {self.epoch}: reducing lr to {self.opt.hypers[-1]["lr"]}')
learn = synth_learner(n_trn=2)
learn.fit(n_epoch=4, lr=1e-7, cbs=ReduceLROnPlateau(monitor='valid_loss', min_delta=0.1, patience=2))
epoch train_loss valid_loss time
0 6.122743 7.348515 00:00
1 6.119377 7.348499 00:00
2 6.125790 7.348477 00:00
3 6.131386 7.348475 00:00
Epoch 2: reducing lr to 1e-08
test_eq(learn.opt.hypers[-1]['lr'], 1e-8)
learn = synth_learner(n_trn=2)
learn.fit(n_epoch=6, lr=5e-8, cbs=ReduceLROnPlateau(monitor='valid_loss', min_delta=0.1, patience=2, min_lr=1e-8))
epoch train_loss valid_loss time
0 16.747515 15.265999 00:00
1 16.725756 15.265974 00:00
2 16.735016 15.265943 00:00
3 16.733360 15.265934 00:00
4 16.733513 15.265925 00:00
5 16.730352 15.265915 00:00
Epoch 2: reducing lr to 1e-08
test_eq(learn.opt.hypers[-1]['lr'], 1e-8)

这三个派生的 TrackerCallbackSaveModelCallbackReduceLROnPlateuEarlyStoppingCallback)都有一个调整过的顺序,以便它们可以相互运行而不干扰。其顺序如下:

Note

括号内是实际的 Callback 顺序编号

  1. TrackerCallback (60)
  2. SaveModelCallback (61)
  3. ReduceLrOnPlateu (62)
  4. EarlyStoppingCallback (63)

导出 -

from nbdev import nbdev_export
nbdev_export()
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 01a_losses.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 36_text.models.qrnn.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.azureml.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.