Wandb

! [ -e /content ] && pip install -Uqq fastai  # 在Colab上升级fastai
from __future__ import annotations
from fastai.basics import *
from fastai.callback.progress import *
from fastai.text.data import TensorText
from fastai.tabular.all import TabularDataLoaders, Tabular
from fastai.callback.hook import total_params
from fastai.callback.tracker import SaveModelCallback
from nbdev.showdoc import *

Weights & Biases 的集成

首先,您需要安装wandb,使用以下命令:

pip install wandb

然后创建一个免费账户,接着在终端中运行

wandb login

按照链接获取您需要粘贴的API令牌,您就准备好了!

import wandb
class WandbCallback(Callback):
    "Saves model topology, losses & metrics"
    remove_on_fetch,order = True,Recorder.order+1
    # 记录是否已在此前调用过(即使在另一个实例中)
    _wandb_watch_called = False

    def __init__(self, 
                 log:str=None, # 要记录的内容(可以是 `gradients`、`parameters`、`all` 或 None) 
                 log_preds:bool=True, # 是否在 `wandb.Table` 上记录模型预测
                 log_preds_every_epoch:bool=False, # 是每个周期记录预测结果,还是在最后记录
                 log_model:bool=False, # 是否将模型检查点保存到 `wandb.Artifact` 
                 model_name:str=None, # 要保存的 `model_name` 名称,覆盖 `SaveModelCallback`
                 log_dataset:bool=False, # 是否将数据集记录到 `wandb.Artifact`
                 dataset_name:str=None, # 用于记录数据集的名称
                 valid_dl:TfmdDL=None, # 如果 `log_preds=True`,那么将从 `valid_dl` 中抽取样本
                 n_preds:int=36, # 记录预测的样本数量 
                 seed:int=12345, # 所抽取样本的种子
                 reorder=True):
        store_attr()
    
    def after_create(self):
        # 日志模型
        if self.log_model:
            if not hasattr(self, 'save_model'):
                # 没有使用SaveModelCallback
                self.learn.add_cb(SaveModelCallback(fname=ifnone(self.model_name, 'model')))
            else:
                # 覆盖 SaveModelCallback
                if self.model_name is not None:
                    self.save_model.fname = self.model_name
            
    def before_fit(self):
        "Call watch method to log model topology, gradients & weights"
        # 检查是否已调用wandb.init
        if wandb.run is None:
            raise ValueError('You must call wandb.init() before WandbCallback()')
        # W&B日志步骤
        self._wandb_step = wandb.run.step - 1  # -1,除非运行先前已记录数据(每批次递增)
        self._wandb_epoch = 0 if not(wandb.run.step) else math.ceil(wandb.run.summary['epoch']) # 继续到下一个时期
        
        self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds") and rank_distrib()==0
        if not self.run: return

        # 日志配置参数
        log_config = self.learn.gather_args()
        _format_config(log_config)
        try:
            wandb.config.update(log_config, allow_val_change=True)
        except Exception as e:
            print(f'WandbCallback could not log config parameters -> {e}')

        if not WandbCallback._wandb_watch_called:
            WandbCallback._wandb_watch_called = True
            # 日志记录模型拓扑结构,并可选择性地记录梯度和权重。
            if self.log is not None:
                wandb.watch(self.learn.model, log=self.log)

        # 日志数据集
        assert isinstance(self.log_dataset, (str, Path, bool)), 'log_dataset must be a path or a boolean'
        if self.log_dataset is True:
            if Path(self.dls.path) == Path('.'):
                print('WandbCallback could not retrieve the dataset path, please provide it explicitly to "log_dataset"')
                self.log_dataset = False
            else:
                self.log_dataset = self.dls.path
        if self.log_dataset:
            self.log_dataset = Path(self.log_dataset)
            assert self.log_dataset.is_dir(), f'log_dataset must be a valid directory: {self.log_dataset}'
            metadata = {'path relative to learner': os.path.relpath(self.log_dataset, self.learn.path)}
            log_dataset(path=self.log_dataset, name=self.dataset_name, metadata=metadata)

        if self.log_preds:
            try:
                if not self.valid_dl:
                    #初始化批量观察
                    wandbRandom = random.Random(self.seed)  # 为了确保可重复性
                    self.n_preds = min(self.n_preds, len(self.dls.valid_ds))
                    idxs = wandbRandom.sample(range(len(self.dls.valid_ds)), self.n_preds)
                    if isinstance(self.dls,  TabularDataLoaders):
                        test_items = getattr(self.dls.valid_ds.items, 'iloc', self.dls.valid_ds.items)[idxs]
                        self.valid_dl = self.dls.test_dl(test_items, with_labels=True, process=False)
                    else:
                        test_items = [getattr(self.dls.valid_ds.items, 'iloc', self.dls.valid_ds.items)[i] for i in idxs]
                        self.valid_dl = self.dls.test_dl(test_items, with_labels=True)
                self.learn.add_cb(FetchPredsCallback(dl=self.valid_dl, with_input=True, with_decoded=True, reorder=self.reorder))
            except Exception as e:
                self.log_preds = False
                print(f'WandbCallback was not able to prepare a DataLoader for logging prediction samples -> {e}')
        
    def before_batch(self): 
        self.ti_batch = time.perf_counter()
        
    def after_batch(self):
        "Log hyper-parameters and training loss"
        if self.training:
            batch_time = time.perf_counter() - self.ti_batch
            self._wandb_step += 1
            self._wandb_epoch += 1/self.n_iter
            hypers = {f'{k}_{i}':v for i,h in enumerate(self.opt.hypers) for k,v in h.items()}
            wandb.log({'epoch': self._wandb_epoch, 'train_loss': self.smooth_loss, 'raw_loss': self.loss, **hypers}, step=self._wandb_step)
            wandb.log({'train_samples_per_sec': len(self.xb[0]) / batch_time}, step=self._wandb_step)
            
    def log_predictions(self):
        try:
            inp,preds,targs,out = self.learn.fetch_preds.preds
            b = tuplify(inp) + tuplify(targs)
            x,y,its,outs = self.valid_dl.show_results(b, out, show=False, max_n=self.n_preds)
            wandb.log(wandb_process(x, y, its, outs, preds), step=self._wandb_step)
        except Exception as e:
            self.log_preds = False
            self.remove_cb(FetchPredsCallback)
            print(f'WandbCallback was not able to get prediction samples -> {e}')
    

    def after_epoch(self):
        "Log validation loss and custom metrics & log prediction samples"
        # 修正任何纪元舍入误差并覆盖原值
        self._wandb_epoch = round(self._wandb_epoch)
        if self.log_preds and self.log_preds_every_epoch:
            self.log_predictions()
        wandb.log({'epoch': self._wandb_epoch}, step=self._wandb_step)
        wandb.log({n:s for n,s in zip(self.recorder.metric_names, self.recorder.log) if n not in ['train_loss', 'epoch', 'time']}, step=self._wandb_step)
    
    
    
    def after_fit(self):
        if self.log_preds and not self.log_preds_every_epoch:
            self.log_predictions()
        if self.log_model:
            if self.save_model.last_saved_path is None:
                print('WandbCallback could not retrieve a model to upload')
            else:
                metadata = {n:s for n,s in zip(self.recorder.metric_names, self.recorder.log) if n not in ['train_loss', 'epoch', 'time']}
                log_model(self.save_model.last_saved_path, name=self.save_model.fname, metadata=metadata)                
        self.run = True
        if self.log_preds: self.remove_cb(FetchPredsCallback)
        
        wandb.log({})  # 确保上一步同步完成
        self._wandb_step += 1
        
show_doc(WandbCallback)

WandbCallback

 WandbCallback (log:'str'=None, log_preds:'bool'=True,
                log_preds_every_epoch:'bool'=False,
                log_model:'bool'=False, model_name:'str'=None,
                log_dataset:'bool'=False, dataset_name:'str'=None,
                valid_dl:'TfmdDL'=None, n_preds:'int'=36,
                seed:'int'=12345, reorder=True)

Saves model topology, losses & metrics

Type Default Details
log str None What to log (can be gradients, parameters, all or None)
log_preds bool True Whether to log model predictions on a wandb.Table
log_preds_every_epoch bool False Whether to log predictions every epoch or at the end
log_model bool False Whether to save the model checkpoint to a wandb.Artifact
model_name str None The name of the model_name to save, overrides SaveModelCallback
log_dataset bool False Whether to log the dataset to a wandb.Artifact
dataset_name str None A name to log the dataset with
valid_dl TfmdDL None If log_preds=True, then the samples will be drawn from valid_dl
n_preds int 36 How many samples to log predictions
seed int 12345 The seed of the samples drawn
reorder bool True

可选地根据 log 记录权重和/或梯度(可以是 “gradients”、“parameters”、“all” 或 None),如果 log_preds=True,则从 valid_dl 或验证集的随机样本(由 seed 决定)中采样预测。在这种情况下,记录 n_preds

如果与 SaveModelCallback 结合使用,最佳模型也会被保存(可以通过设置 log_model=False 来停用)。

数据集也可以被跟踪:

对于自定义场景,您还可以手动使用函数 log_datasetlog_model 分别记录您自己的数据集和模型。

@patch
def gather_args(self:Learner):
    "Gather config parameters accessible to the learner"
    # 由 `store_attr` 存储的参数
    cb_args = {f'{cb}':getattr(cb,'__stored_args__',True) for cb in self.cbs}
    args = {'Learner':self, **cb_args}
    # 输入维度
    try:
        n_inp = self.dls.train.n_inp
        args['n_inp'] = n_inp
        xb = self.dls.valid.one_batch()[:n_inp]
        args.update({f'input {n+1} dim {i+1}':d for n in range(n_inp) for i,d in enumerate(list(detuplify(xb[n]).shape))})
    except: print(f'Could not gather input dimensions')
    # 其他有用信息
    with ignore_exceptions():
        args['batch size'] = self.dls.bs
        args['batch per epoch'] = len(self.dls.train)
        args['model parameters'] = total_params(self.model)[0]
        args['device'] = self.dls.device.type
        args['frozen'] = bool(self.opt.frozen_idx)
        args['frozen idx'] = self.opt.frozen_idx
        args['dataset.tfms'] = f'{self.dls.dataset.tfms}'
        args['dls.after_item'] = f'{self.dls.after_item}'
        args['dls.before_batch'] = f'{self.dls.before_batch}'
        args['dls.after_batch'] = f'{self.dls.after_batch}'
    return args
show_doc(Learner.gather_args)

Learner.gather_args

 Learner.gather_args ()

Gather config parameters accessible to the learner

def _make_plt(img):
    "Make plot to image resolution"
    # 来自 https://stackoverflow.com/a/13714915
    my_dpi = 100
    fig = plt.figure(frameon=False, dpi=my_dpi)
    h, w = img.shape[:2]
    fig.set_size_inches(w / my_dpi, h / my_dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.set_axis_off()
    fig.add_axes(ax)
    return fig, ax
def _format_config_value(v):
    if isinstance(v, list):
        return [_format_config_value(item) for item in v]
    elif hasattr(v, '__stored_args__'):
        return {**_format_config(v.__stored_args__), '_name': v}
    return v
def _format_config(config):
    "Format config parameters before logging them"
    for k,v in config.items():
        if isinstance(v, dict):
            config[k] = _format_config(v)
        else:
            config[k] = _format_config_value(v)
    return config
def _format_metadata(metadata):
    "Format metadata associated to artifacts"
    for k,v in metadata.items(): metadata[k] = str(v)
def log_dataset(path, name=None, metadata={}, description='raw dataset'):
    "Log dataset folder"
    # 检查是否已调用wandb.init,以防手动记录数据集
    if wandb.run is None:
        raise ValueError('You must call wandb.init() before log_dataset()')
    path = Path(path)
    if not path.is_dir():
        raise f'path must be a valid directory: {path}'
    name = ifnone(name, path.name)
    _format_metadata(metadata)
    artifact_dataset = wandb.Artifact(name=name, type='dataset', metadata=metadata, description=description)
    # log everything except "models" folder
    for p in path.ls():
        if p.is_dir():
            if p.name != 'models': artifact_dataset.add_dir(str(p.resolve()), name=p.name)
        else: artifact_dataset.add_file(str(p.resolve()))
    wandb.run.use_artifact(artifact_dataset)
show_doc(log_dataset)

log_dataset

 log_dataset (path, name=None, metadata={}, description='rawdataset')

Log dataset folder

def log_model(path, name=None, metadata={}, description='trained model'):
    "Log model file"
    if wandb.run is None:
        raise ValueError('You must call wandb.init() before log_model()')
    path = Path(path)
    if not path.is_file():
        raise f'path must be a valid file: {path}'
    name = ifnone(name, f'run-{wandb.run.id}-model')
    _format_metadata(metadata)    
    artifact_model = wandb.Artifact(name=name, type='model', metadata=metadata, description=description)
    with artifact_model.new_file(str(Path(name).with_suffix(".pth")), mode='wb') as fa:
        fa.write(path.read_bytes())
    wandb.run.log_artifact(artifact_model)
show_doc(log_model)

log_model

 log_model (path, name=None, metadata={}, description='trainedmodel')

Log model file

@typedispatch
def wandb_process(x:TensorImage, y, samples, outs, preds):
    "Process `sample` and `out` depending on the type of `x/y`"
    res_input, res_pred, res_label = [],[],[]
    for s,o in zip(samples, outs):
        img = s[0].permute(1,2,0)
        res_input.append(wandb.Image(img, caption='Input_data'))
        for t, capt, res in ((o[0], "Prediction", res_pred), (s[1], "Ground_Truth", res_label)):
            fig, ax = _make_plt(img)
            # 在输入图像上叠加标签或预测结果
            ax = img.show(ctx=ax)
            ax = t.show(ctx=ax)
            res.append(wandb.Image(fig, caption=capt))
            plt.close(fig)
    return {"Inputs":res_input, "Predictions":res_pred, "Ground_Truth":res_label}
def _unlist(l):
    "get element of lists of lenght 1"
    if isinstance(l, (list, tuple)):
        if len(l) == 1: return l[0]
    else: return l
@typedispatch
def wandb_process(x:TensorImage, y:TensorCategory|TensorMultiCategory, samples, outs, preds):
    table = wandb.Table(columns=["Input image", "Ground_Truth", "Predictions"])
    for (image, label), pred_label in zip(samples,outs):
        table.add_data(wandb.Image(image.permute(1,2,0)), label, _unlist(pred_label))
    return {"Prediction_Samples": table}
@typedispatch
def wandb_process(x:TensorImage, y:TensorMask, samples, outs, preds):
    res = []
    codes = getattr(outs[0][0], 'codes', None)
    if codes is not None:
        class_labels = [{'name': name, 'id': id}  for id, name in enumerate(codes)] 
    else:
        class_labels = [{'name': i, 'id': i} for i in range(preds.shape[1])]
    table = wandb.Table(columns=["Input Image", "Ground_Truth", "Predictions"])
    for (image, label), pred_label in zip(samples, outs):
        img = image.permute(1,2,0)
        table.add_data(wandb.Image(img),
                       wandb.Image(img, masks={"Ground_Truth": {'mask_data': label.numpy().astype(np.uint8)}}, classes=class_labels), 
                       wandb.Image(img, masks={"Prediction":   {'mask_data': pred_label[0].numpy().astype(np.uint8)}}, classes=class_labels) 
                      )
    return {"Prediction_Samples": table}
@typedispatch
def wandb_process(x:TensorText, y:TensorCategory|TensorMultiCategory, samples, outs, preds):
    data = [[s[0], s[1], o[0]] for s,o in zip(samples,outs)]
    return {"Prediction_Samples": wandb.Table(data=data, columns=["Text", "Target", "Prediction"])}
@typedispatch
def wandb_process(x:Tabular, y:Tabular, samples, outs, preds):
    df = x.all_cols
    for n in x.y_names: df[n+'_pred'] = y[n].values
    return {"Prediction_Samples": wandb.Table(dataframe=df)}

使用示例:

一旦定义了你的 Learner,在调用 fitfit_one_cycle 之前,你需要初始化 wandb:

import wandb
wandb.init()

如果你希望在没有账户的情况下使用 Weights & Biases,可以调用 wandb.init(anonymous='allow')

然后将回调添加到你的 learner 或在调用 fit 方法时,可能会使用 SaveModelCallback 如果你想保存最佳模型:

from fastai.callback.wandb import *

# 仅在一个训练阶段记录
learn.fit(..., cbs=WandbCallback())

# 在所有训练阶段持续记录
learn = learner(..., cbs=WandbCallback())

数据集和模型可以通过回调或者直接通过 log_modellog_dataset 函数进行跟踪。

有关更多详细信息,请参考 W&B 文档

#|缓慢
from fastai.vision.all import *
import tempfile
#|缓慢
path = untar_data(URLs.MNIST_TINY)
items = get_image_files(path)
tds = Datasets(items, [PILImageBW.create, [parent_label, Categorize()]], splits=GrandparentSplitter()(items))
dls = tds.dataloaders(after_item=[ToTensor(), IntToFloatTensor()])

os.environ['WANDB_MODE'] = 'dryrun' # 离线运行
with tempfile.TemporaryDirectory() as wandb_local_dir:
    wandb.init(anonymous='allow', dir=wandb_local_dir)
    learn = vision_learner(dls, resnet18, loss_func=CrossEntropyLossFlat(), cbs=WandbCallback(log_model=False))
    learn.fit(1)

    # 在同一运行中添加来自新学习者的更多数据
    learn = vision_learner(dls, resnet18, loss_func=CrossEntropyLossFlat(), cbs=WandbCallback(log_model=False))
    learn.fit(1, lr=slice(0.005))
    
    # 保存模型
    learn = cnn_learner(dls, resnet18, loss_func=CrossEntropyLossFlat(), cbs=WandbCallback(log_model=True))
    learn.fit(1, lr=slice(0.005))
    
    # 保存模型覆盖名称
    learn = cnn_learner(dls, resnet18, path=wandb_local_dir,loss_func=CrossEntropyLossFlat(), cbs=[WandbCallback(log_model=True, model_name="good_name"), SaveModelCallback(fname="bad_name")])
    learn.fit(1, lr=slice(0.005))
    assert (Path(wandb_local_dir)/"models/good_name.pth").exists(), "No model file found"
    
    # 完成将文件写入临时文件夹
    wandb.finish()
Tracking run with wandb version 0.12.17
W&B syncing is set to `offline` in this directory.
Run `wandb online` or set WANDB_MODE=online to enable cloud syncing.
epoch train_loss valid_loss time
0 0.707184 0.196000 00:02
epoch train_loss valid_loss time
0 0.707796 0.522665 00:02
epoch train_loss valid_loss time
0 0.599319 0.323519 00:02
Better model found at epoch 0 with valid_loss value: 0.32351893186569214.
epoch train_loss valid_loss time
0 0.854327 0.279063 00:02
Better model found at epoch 0 with valid_loss value: 0.279062956571579.
Waiting for W&B process to finish... (success).

Run history:


epoch ▁▁▁▁▂▂▂▂▂▂▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
eps_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
eps_1 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
eps_2 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
lr_0 ██████████▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
lr_1 ██████████▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
lr_2 ▁▁▁▁▁▁▁▁▁▁██████████████████████████████
mom_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
mom_1 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
mom_2 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
raw_loss █▄▃▃▁▁▂▄▂▄▇▄▃▂▄▁▃▃▃▂▆▄▄▂▃▃▁▁▂▁█▄▃▂▃▂▂▇▃▂
sqr_mom_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
sqr_mom_1 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
sqr_mom_2 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
train_loss ▇▅▄▃▃▂▂▂▂▂▇▅▄▃▃▂▂▂▂▂▅▄▄▃▃▂▂▁▁▁█▅▄▃▃▂▂▃▂▃
train_samples_per_sec ▃▇███▇▇███▃▄▇███▇▇▇█▃▇▇▅▆▆▆███▁▇█▇▇▇▆▆██
valid_loss ▁█▄▃
wd_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
wd_1 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
wd_2 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁

Run summary:


epoch 4
eps_0 1e-05
eps_1 1e-05
eps_2 1e-05
lr_0 0.0005
lr_1 0.0005
lr_2 0.005
mom_0 0.9
mom_1 0.9
mom_2 0.9
raw_loss 0.47733
sqr_mom_0 0.99
sqr_mom_1 0.99
sqr_mom_2 0.99
train_loss 0.85433
train_samples_per_sec 3029.923
valid_loss 0.27906
wd_0 0.01
wd_1 0.01
wd_2 0.01

You can sync this run to the cloud by running:
wandb sync /tmp/tmpcbvaf5w6/wandb/offline-run-20220609_163612-2jh9cxyp
Find logs at: /tmp/tmpcbvaf5w6/wandb/offline-run-20220609_163612-2jh9cxyp/logs
_all_ = ['wandb_process']

导出 -

from nbdev import *
nbdev_export()
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 01a_losses.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 20b_tutorial.distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.image_sequence.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 70a_callback.tensorboard.ipynb.
Converted 70b_callback.neptune.ipynb.
Converted 70c_callback.captum.ipynb.
Converted 70d_callback.comet.ipynb.
Converted 74_huggingface.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted app_examples.ipynb.
Converted camvid.ipynb.
Converted distributed_app_examples.ipynb.
Converted migrating_catalyst.ipynb.
Converted migrating_ignite.ipynb.
Converted migrating_lightning.ipynb.
Converted migrating_pytorch.ipynb.
Converted migrating_pytorch_verbose.ipynb.
Converted ulmfit.ipynb.
Converted index.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.