! [ -e /content ] && pip install -Uqq fastai # 在Colab上升级fastai
视觉学习器
from __future__ import annotations
from packaging.version import parse
from fastai.basics import *
from fastai.vision.core import *
from fastai.vision.data import *
from fastai.vision.augment import *
from fastai.vision import models
import torchvision
try: import timm
except ModuleNotFoundError: pass
from nbdev.showdoc import *
构建适合于计算机视觉中迁移学习的
Learner
所需的所有功能
该模块最重要的功能是 vision_learner
和 unet_learner
。它们将帮助您使用预训练模型定义一个 Learner
。请参阅视觉教程以获取使用示例。
剪裁预训练模型
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
= nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
m bool(_is_pool_type(m_)) for m_ in m.children()], [True,False,False,True]) test_eq([
默认情况下,fastai库在池化层处截断预训练模型。此函数帮助检测它。
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
= nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
m assert has_pool_type(m)
for m_ in m.children()], [True,False,False,True]) test_eq([has_pool_type(m_)
def _get_first_layer(m):
"Access first layer of a model"
= m,None,None # 孩子,父母,名字
c,p,n for n in next(m.named_parameters())[0].split('.')[:-1]:
=c,getattr(c,n)
p,creturn c,p,n
def _load_pretrained_weights(new_layer, previous_layer):
"Load pretrained weights based on number of input channels"
= getattr(new_layer, 'in_channels')
n_in if n_in==1:
# 我们求和。
= previous_layer.weight.data.sum(dim=1, keepdim=True)
new_layer.weight.data elif n_in==2:
# 我们首先取前两个通道的50%。
= previous_layer.weight.data[:,:2] * 1.5
new_layer.weight.data else:
# 保留3个通道的权重并将其他通道的权重设为空值
3] = previous_layer.weight.data
new_layer.weight.data[:,:3:].zero_() new_layer.weight.data[:,
def _update_first_layer(model, n_in, pretrained):
"Change first layer based on number of input channels"
if n_in == 3: return
= _get_first_layer(model)
first_layer, parent, name assert isinstance(first_layer, nn.Conv2d), f'Change of input channels only supported with Conv2d, found {first_layer.__class__.__name__}'
assert getattr(first_layer, 'in_channels') == 3, f'Unexpected number of input channels, found {getattr(first_layer, "in_channels")} while expecting 3'
= {attr:getattr(first_layer, attr) for attr in 'out_channels kernel_size stride padding dilation groups padding_mode'.split()}
params 'bias'] = getattr(first_layer, 'bias') is not None
params['in_channels'] = n_in
params[= nn.Conv2d(**params)
new_layer if pretrained:
_load_pretrained_weights(new_layer, first_layer)setattr(parent, name, new_layer)
def cut_model(model, cut):
"Cut an instantiated model"
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
raise NameError("cut must be either integer or a function")
def create_body(model, n_in=3, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
_update_first_layer(model, n_in, pretrained)if cut is None:
= list(enumerate(model.children()))
ll = next(i for i,o in reversed(ll) if has_pool_type(o))
cut return cut_model(model, cut)
cut
可以是一个整数,在这种情况下,我们在相应的层切割模型;也可以是一个函数,在这种情况下,该函数返回 cut(model)
。默认为第一个包含池化的层。
def tst(): return nn.Sequential(nn.Conv2d(3,5,3), nn.BatchNorm2d(5), nn.AvgPool2d(1), nn.Linear(3,4))
= create_body(tst())
m len(m), 2)
test_eq(
= create_body(tst(), cut=3)
m len(m), 3)
test_eq(
= create_body(tst(), cut=noop)
m len(m), 4)
test_eq(
for n in range(1,5):
= create_body(tst(), n_in=n)
m 0].in_channels, n) test_eq(_get_first_layer(m)[
头部和模型
def create_head(nf, n_out, lin_ftrs=None, ps=0.5, pool=True, concat_pool=True, first_bn=True, bn_final=False,
=False, y_range=None):
lin_first"Model head that takes `nf` features, runs through `lin_ftrs`, and out `n_out` classes."
if pool and concat_pool: nf *= 2
= [nf, 512, n_out] if lin_ftrs is None else [nf] + lin_ftrs + [n_out]
lin_ftrs = [first_bn] + [True]*len(lin_ftrs[1:])
bns = L(ps)
ps if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
= [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
actns = []
layers if pool:
= AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
pool += [pool, Flatten()]
layers if lin_first: layers.append(nn.Dropout(ps.pop(0)))
for ni,no,bn,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], bns, ps, actns):
+= LinBnDrop(ni, no, bn=bn, p=p, act=actn, lin_first=lin_first)
layers if lin_first: layers.append(nn.Linear(lin_ftrs[-2], n_out))
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
if y_range is not None: layers.append(SigmoidRange(*y_range))
return nn.Sequential(*layers)
头部首先使用 fastai 的 AdaptiveConcatPool2d
,如果 concat_pool=True
,否则使用传统的平均池化。接着使用 Flatten
层,然后进入 BatchNorm
、Dropout
和 Linear
层的块(如果 lin_first=True
,则顺序为 Linear
、BatchNorm
、Dropout
)。
这些块从 nf
开始,然后是 lin_ftrs
的每个元素(默认为 [512]
),最后到达 n_out
。ps
是一个概率列表,用于控制 dropout(如果只传入 1,将使用该值的一半,并根据需要多次使用该值)。
如果 first_bn=True
,则在池化操作后添加一个 BatchNorm
。如果 bn_final=True
,则会添加一个最终的 BatchNorm
层。如果传入 y_range
,则该函数会在该范围内添加一个 SigmoidRange
。
= create_head(5, 10)
tst tst
Sequential(
(0): AdaptiveConcatPool2d(
(ap): AdaptiveAvgPool2d(output_size=1)
(mp): AdaptiveMaxPool2d(output_size=1)
)
(1): fastai.layers.Flatten(full=False)
(2): BatchNorm1d(10, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): Dropout(p=0.25, inplace=False)
(4): Linear(in_features=10, out_features=512, bias=False)
(5): ReLU(inplace=True)
(6): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(7): Dropout(p=0.5, inplace=False)
(8): Linear(in_features=512, out_features=10, bias=False)
)
= list(tst.children())
mods len(mods), 9)
test_eq(assert isinstance(mods[2], nn.BatchNorm1d)
assert isinstance(mods[-1], nn.Linear)
= create_head(5, 10, lin_first=True)
tst = list(tst.children())
mods len(mods), 8)
test_eq(assert isinstance(mods[2], nn.Dropout)
= create_head(5, 10, first_bn=False)
tst = list(tst.children())
mods len(mods), 8)
test_eq(assert isinstance(mods[2], nn.Dropout)
= create_head(5, 10, concat_pool=True)
tst = list(tst.children())
modes 4].in_features, 10)
test_eq(modes[
= create_head(5, 10, concat_pool=False)
tst = list(tst.children())
modes 4].in_features, 5) test_eq(modes[
from fastai.callback.hook import num_features_model
#待办:重构,即类似以下内容?
# class ModelSplitter():
# def __init__(self, idx): self.idx = idx
# def split(self, m): return L(m[:self.idx], m[self.idx:]).map(params)
# def __call__(self,): return {'cut':self.idx, 'split':self.split}
def default_split(m):
"Default split of a model between body and head"
return L(m[0], m[1:]).map(params)
要进行迁移学习,您需要将一个splitter
传递给Learner
。这应该是一个接受模型并返回参数组集合的函数,例如,一个参数列表的列表。
def _xresnet_split(m): return L(m[0][:3], m[0][3:], m[1:]).map(params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1:]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1:]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1:]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1:]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1:]).map(params)
= {'cut':None, 'split':default_split}
_default_meta = {'cut':-4, 'split':_xresnet_split, 'stats':imagenet_stats}
_xresnet_meta = {'cut':-2, 'split':_resnet_split, 'stats':imagenet_stats, 'weights':'DEFAULT'}
_resnet_meta = {'cut':-1, 'split': _squeezenet_split, 'stats':imagenet_stats, 'weights':'DEFAULT'}
_squeezenet_meta = {'cut':-1, 'split':_densenet_split, 'stats':imagenet_stats, 'weights':'DEFAULT'}
_densenet_meta = {'cut':-2, 'split':_vgg_split, 'stats':imagenet_stats, 'weights':'DEFAULT'}
_vgg_meta = {'cut':-2, 'split':_alexnet_split, 'stats':imagenet_stats, 'weights':'DEFAULT'} _alexnet_meta
= {
model_meta **_xresnet_meta}, models.xresnet.xresnet34: {**_xresnet_meta},
models.xresnet.xresnet18 :{**_xresnet_meta}, models.xresnet.xresnet101:{**_xresnet_meta},
models.xresnet.xresnet50 :{**_xresnet_meta},
models.xresnet.xresnet152:{
**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet18 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet50 :{**_resnet_meta},
models.resnet152:{
**_squeezenet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{
**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.densenet201:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.vgg11_bn:{**_alexnet_meta}} models.alexnet:{
def add_head(body, nf, n_out, init=nn.init.kaiming_normal_, head=None, concat_pool=True, pool=True,
=None, ps=0.5, first_bn=True, bn_final=False, lin_first=False, y_range=None):
lin_ftrs"Add a head to a vision body"
if head is None:
= create_head(nf, n_out, concat_pool=concat_pool, pool=pool,
head =lin_ftrs, ps=ps, first_bn=first_bn, bn_final=bn_final, lin_first=lin_first, y_range=y_range)
lin_ftrs= nn.Sequential(body, head)
model if init is not None: apply_init(model[1], init)
return model
def create_vision_model(arch, n_out, pretrained=True, weights=None, cut=None, n_in=3, init=nn.init.kaiming_normal_, custom_head=None,
=True, pool=True, lin_ftrs=None, ps=0.5, first_bn=True, bn_final=False, lin_first=False, y_range=None):
concat_pool"Create custom vision architecture"
= model_meta.get(arch, _default_meta)
meta if parse(torchvision.__version__) >= parse('0.13') and 'weights' in meta:
if weights is not None and not pretrained:
f'{pretrained=} but `weights` are set {weights=}. To randomly initialize set `pretrained=False` & `weights=None`')
warn(= arch(weights=meta['weights'] if (weights is None and pretrained) else weights)
model else:
= arch(pretrained=pretrained)
model = create_body(model, n_in, pretrained, ifnone(cut, meta['cut']))
body = num_features_model(nn.Sequential(*body.children())) if custom_head is None else None
nf return add_head(body, nf, n_out, init=init, head=custom_head, concat_pool=concat_pool, pool=pool,
=lin_ftrs, ps=ps, first_bn=first_bn, bn_final=bn_final, lin_first=lin_first, y_range=y_range) lin_ftrs
show_doc(create_vision_model)
create_vision_model
create_vision_model (arch, n_out, pretrained=True, weights=None, cut=None, n_in=3, init=<function kaiming_normal_>, custom_head=None, concat_pool=True, pool=True, lin_ftrs=None, ps=0.5, first_bn=True, bn_final=False, lin_first=False, y_range=None)
Create custom vision architecture
模型是根据 cut
进行切割的,可能是 pretrained
,在这种情况下,会下载并加载适当的权重。init
应用于模型的头部,该头部要么是通过 create_head
创建(使用 lin_ftrs
、ps
、concat_pool
、bn_final
、lin_first
和 y_range
),要么是 custom_head
。
= create_vision_model(models.resnet18, 10, True)
tst = create_vision_model(models.resnet18, 10, True, n_in=1) tst
class TimmBody(nn.Module):
def __init__(self, model, pretrained:bool=True, cut=None, n_in:int=3):
super().__init__()
self.needs_pool = model.default_cfg.get('pool_size', None) is not None
self.model = model if cut is None else cut_model(model, cut)
def forward(self,x): return self.model.forward_features(x) if self.needs_pool else self.model(x)
def create_timm_model(arch, n_out, cut=None, pretrained=True, n_in=3, init=nn.init.kaiming_normal_, custom_head=None,
=True, pool=True, lin_ftrs=None, ps=0.5, first_bn=True, bn_final=False, lin_first=False, y_range=None, **kwargs):
concat_pool"Create custom architecture using `arch`, `n_in` and `n_out` from the `timm` library"
= timm.create_model(arch, pretrained=pretrained, num_classes=0, in_chans=n_in, **kwargs)
model = TimmBody(model, pretrained, None, n_in)
body = body.model.num_features
nf = add_head(body, nf, n_out, init=init, head=custom_head, concat_pool=concat_pool, pool=body.needs_pool,
res =lin_ftrs, ps=ps, first_bn=first_bn, bn_final=bn_final, lin_first=lin_first, y_range=y_range)
lin_ftrsreturn res,model.default_cfg
# 确保timm模型可以被脚本化:
= create_timm_model('resnet34', 1)
tst, _ = torch.jit.script(tst)
scripted assert scripted, "model could not be converted to TorchScript"
Learner
便利函数
def _add_norm(dls, meta, pretrained, n_in=3):
if not pretrained: return
= meta.get('stats')
stats if stats is None: return
if n_in != len(stats[0]): return
if not dls.after_batch.fs.filter(risinstance(Normalize)):
*stats)],'after_batch') dls.add_tfms([Normalize.from_stats(
= untar_data(URLs.PETS)
path = ImageDataLoaders.from_name_re(path, get_image_files(path/"images"), r'^(.*)_\d+.jpg$', item_tfms=Resize(224))
dls for _ in range(5): _add_norm(dls, model_meta[models.resnet34], True)
len(dls.after_batch.fs), 2) test_eq(
def _timm_norm(dls, cfg, pretrained, n_in=3):
if not pretrained: return
if n_in != len(cfg['mean']): return
if not dls.after_batch.fs.filter(risinstance(Normalize)):
= Normalize.from_stats(cfg['mean'],cfg['std'])
tfm 'after_batch') dls.add_tfms([tfm],
@delegates(create_vision_model)
def vision_learner(dls, arch, normalize=True, n_out=None, pretrained=True, weights=None,
# 学习者参数
=None, opt_func=Adam, lr=defaults.lr, splitter=None, cbs=None, metrics=None, path=None,
loss_func='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95,0.85,0.95),
model_dir# 模型与头部参数
=None, init=nn.init.kaiming_normal_, custom_head=None, concat_pool=True, pool=True,
cut=None, ps=0.5, first_bn=True, bn_final=False, lin_first=False, y_range=None, **kwargs):
lin_ftrs"Build a vision learner from `dls` and `arch`"
if n_out is None: n_out = get_c(dls)
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
= model_meta.get(arch, _default_meta)
meta = dict(init=init, custom_head=custom_head, concat_pool=concat_pool, pool=pool, lin_ftrs=lin_ftrs, ps=ps,
model_args =first_bn, bn_final=bn_final, lin_first=lin_first, y_range=y_range, **kwargs)
first_bn= kwargs['n_in'] if 'n_in' in kwargs else 3
n_in if isinstance(arch, str):
= create_timm_model(arch, n_out, default_split, pretrained, **model_args)
model,cfg if normalize: _timm_norm(dls, cfg, pretrained, n_in)
else:
if normalize: _add_norm(dls, meta, pretrained, n_in)
= create_vision_model(arch, n_out, pretrained=pretrained, weights=weights, **model_args)
model
= ifnone(splitter, meta['split'])
splitter = Learner(dls=dls, model=model, loss_func=loss_func, opt_func=opt_func, lr=lr, splitter=splitter, cbs=cbs,
learn =metrics, path=path, model_dir=model_dir, wd=wd, wd_bn_bias=wd_bn_bias, train_bn=train_bn, moms=moms)
metricsif pretrained: learn.freeze()
# 跟踪日志记录器的参数
'arch,normalize,n_out,pretrained', self=learn, **kwargs)
store_attr(return learn
该模型是基于arch
构建的,尽可能使用从dls
推断出的最终激活数(如果不行,则传递一个值给n_out
)。它可能是pretrained
,并且架构使用模型架构的默认元数据进行切割和分割(通过传递一个cut
或splitter
可以自定义这一点)。
如果normalize
和pretrained
都为True
,这个函数将使用预训练模型的统计数据向dls
中添加一个Normalization
变换(如果尚未添加的话)。这样,在迁移学习中,您不会忘记对数据进行标准化。
所有其他参数都将传递给Learner
。
从版本0.13开始,TorchVision支持多个预训练权重用于相同的模型架构。vision_learner
的默认值为pretrained=True, weights=None
,将使用架构的默认权重,目前是IMAGENET1K_V2。如果您使用的是旧版TorchVision或创建一个timm模型,设置weights
将没有效果。
from torchvision.models import ResNet50_Weights
# 旧版权重,准确率76.130%
=True, weights=ResNet50_Weights.IMAGENET1K_V1, ...)
vision_learner(models.resnet50, pretrained
# 新版权重,准确率80.858%。字符串也被支持。
=True, weights='IMAGENET1K_V2', ...)
vision_learner(models.resnet50, pretrained
# 最好的可用权重(目前是IMAGENET1K_V2的别名)。
# 如果vision_learner权重未设置,则使用默认权重。
=True, weights=ResNet50_Weights.DEFAULT, ...)
vision_learner(models.resnet50, pretrained
# 没有权重 - 随机初始化
=False, weights=None, ...) vision_learner(models.resnet50, pretrained
上述示例展示了如何使用新的TorchVision 0.13多权重API与vision_learner
。
= untar_data(URLs.PETS)
path = get_image_files(path/"images")
fnames = r'^(.*)_\d+.jpg$'
pat = ImageDataLoaders.from_name_re(path, fnames, pat, item_tfms=Resize(224)) dls
= vision_learner(dls, models.resnet18, loss_func=CrossEntropyLossFlat(), ps=0.25) learn
if parse(torchvision.__version__) >= parse('0.13'):
from torchvision.models import ResNet34_Weights
= ResNet34_Weights.IMAGENET1K_V1
weights else:
= None weights
= vision_learner(dls, models.resnet34, weights=weights, loss_func=CrossEntropyLossFlat(), ps=0.25, concat_pool=False)
learn None) test_ne(learn.cbs,
1].mean[0].squeeze()), tensor(imagenet_stats[0]))
test_eq(to_cpu(dls.after_batch[1].mean[0].squeeze()), tensor(imagenet_stats[0])) test_eq(to_cpu(dls.valid.after_batch[
如果你将一个 str
传递给 arch
,那么一个 timm 模型将被创建:
= ImageDataLoaders.from_name_re(path, fnames, pat, item_tfms=Resize(224))
dls = vision_learner(dls, 'convnext_tiny', loss_func=CrossEntropyLossFlat(), ps=0.25) learn
@delegates(models.unet.DynamicUnet.__init__)
def create_unet_model(arch, n_out, img_size, pretrained=True, weights=None, cut=None, n_in=3, **kwargs):
"Create custom unet architecture"
= model_meta.get(arch, _default_meta)
meta if parse(torchvision.__version__) >= parse('0.13') and 'weights' in meta:
if weights is not None and not pretrained:
f'{pretrained=} but `weights` are set {weights=}. To randomly initialize set `pretrained=False` & `weights=None`')
warn(= arch(weights=meta['weights'] if (weights is None and pretrained) else weights)
model else:
= arch(pretrained=pretrained)
model = create_body(model, n_in, pretrained, ifnone(cut, meta['cut']))
body = models.unet.DynamicUnet(body, n_out, img_size, **kwargs)
model return model
show_doc(create_unet_model)
create_unet_model
create_unet_model (arch, n_out, img_size, pretrained=True, weights=None, cut=None, n_in=3, blur=False, blur_final=True, self_attention=False, y_range=None, last_cross=True, bottle=False, act_cls=<class 'torch.nn.modules.activation.ReLU'>, init=<function kaiming_normal_>, norm_type=None)
Create custom unet architecture
= create_unet_model(models.resnet18, 10, (24,24), True, n_in=1) tst
@delegates(create_unet_model)
def unet_learner(dls, arch, normalize=True, n_out=None, pretrained=True, weights=None, config=None,
# 学习者参数
=None, opt_func=Adam, lr=defaults.lr, splitter=None, cbs=None, metrics=None, path=None,
loss_func='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95,0.85,0.95), **kwargs):
model_dir"Build a unet learner from `dls` and `arch`"
if config:
'config param is deprecated. Pass your args directly to unet_learner.')
warnings.warn(= {**config, **kwargs}
kwargs
= model_meta.get(arch, _default_meta)
meta = kwargs['n_in'] if 'n_in' in kwargs else 3
n_in if normalize: _add_norm(dls, meta, pretrained, n_in)
= ifnone(n_out, get_c(dls))
n_out assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
= dls.one_batch()[0].shape[-2:]
img_size assert img_size, "image size could not be inferred from data"
= create_unet_model(arch, n_out, img_size, pretrained=pretrained, weights=weights, **kwargs)
model
= ifnone(splitter, meta['split'])
splitter = Learner(dls=dls, model=model, loss_func=loss_func, opt_func=opt_func, lr=lr, splitter=splitter, cbs=cbs,
learn =metrics, path=path, model_dir=model_dir, wd=wd, wd_bn_bias=wd_bn_bias, train_bn=train_bn,
metrics=moms)
momsif pretrained: learn.freeze()
# 跟踪日志记录器的参数
'arch,normalize,n_out,pretrained', self=learn, **kwargs)
store_attr(return learn
该模型是从arch
构建的,使用从dls
推断出的最终过滤器数量(如果可能的话,否则传递一个值给n_out
)。它可能是pretrained
,并且架构是使用模型架构的默认元数据进行了切割和拆分(这可以通过传递cut
或splitter
来自定义)。
如果normalize
和pretrained
都为True
,则此函数将使用预训练模型的统计信息向dls
添加一个Normalization
转换(如果还没有的话)。这样,您在迁移学习中就不会忘记对数据进行归一化。
所有其他参数都将传递给Learner
。
unet_learner
还通过weights
支持TorchVision的新多权重API。有关更多详细信息,请参见vision_learner
。
= untar_data(URLs.CAMVID_TINY)
path = get_image_files(path/'images')
fnames def label_func(x): return path/'labels'/f'{x.stem}_P{x.suffix}'
= np.loadtxt(path/'codes.txt', dtype=str)
codes = SegmentationDataLoaders.from_label_func(path, fnames, label_func, codes=codes) dls
= unet_learner(dls, models.resnet34, loss_func=CrossEntropyLossFlat(axis=1), y_range=(0,1)) learn
None) test_ne(learn.cbs,
def create_cnn_model(*args, **kwargs):
"Deprecated name for `create_vision_model` -- do not use"
"`create_cnn_model` has been renamed to `create_vision_model` -- please update your code")
warn(return create_vision_model(*args, **kwargs)
def cnn_learner(*args, **kwargs):
"Deprecated name for `vision_learner` -- do not use"
"`cnn_learner` has been renamed to `vision_learner` -- please update your code")
warn(return vision_learner(*args, **kwargs)
显示函数 -
@typedispatch
def show_results(x:TensorImage, y, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
= show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
ctxs return ctxs
@typedispatch
def show_results(x:TensorImage, y:TensorCategory, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
for i in range(2):
= [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [r.show(ctx=c, color='green' if b==r else 'red', **kwargs)
ctxs for b,r,c,_ in zip(samples.itemgot(1),outs.itemgot(0),ctxs,range(max_n))]
return ctxs
@typedispatch
def show_results(x:TensorImage, y:TensorMask|TensorPoint|TensorBBox, samples, outs, ctxs=None, max_n=6,
=None, ncols=1, figsize=None, **kwargs):
nrowsif ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize, double=True,
='Target/Prediction')
titlefor i in range(2):
2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(2*max_n))]
ctxs[::for o in [samples,outs]:
1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(o.itemgot(0),ctxs[1::2],range(2*max_n))]
ctxs[return ctxs
@typedispatch
def show_results(x:TensorImage, y:TensorImage, samples, outs, ctxs=None, max_n=10, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(3*min(len(samples), max_n), ncols=3, figsize=figsize, title='Input/Target/Prediction')
for i in range(2):
3] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::3],range(max_n))]
ctxs[i::2::3] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs[2::3],range(max_n))]
ctxs[return ctxs
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorCategory, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, **kwargs):
= get_grid(len(samples), nrows=nrows, ncols=ncols, figsize=figsize, title='Prediction/Actual/Loss/Probability')
axs for ax,s,o,r,l in zip(axs, samples, outs, raws, losses):
0].show(ctx=ax, **kwargs)
s[f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}') ax.set_title(
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorMultiCategory, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, **kwargs):
= get_grid(len(samples), nrows=nrows, ncols=ncols, figsize=figsize)
axs for i,(ax,s) in enumerate(zip(axs, samples)): s[0].show(ctx=ax, title=f'Image {i}', **kwargs)
= get_empty_df(len(samples))
rows = L(s[1:] + o + (TitledStr(r), TitledFloat(l.item())) for s,o,r,l in zip(samples, outs, raws, losses))
outs for i,l in enumerate(["target", "predicted", "probabilities", "loss"]):
= [b.show(ctx=r, label=l, **kwargs) for b,r in zip(outs.itemgot(i),rows)]
rows display_df(pd.DataFrame(rows))
@typedispatch
def plot_top_losses(x:TensorImage, y:TensorMask, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, **kwargs):
= get_grid(len(samples)*3, nrows=len(samples), ncols=3, figsize=figsize, flatten=False, title="Input | Target | Prediction")
axes if axes.ndim == 1: axes = (axes,)
= ["input", "target", "pred"]
titles for axs,s,o,l in zip(axes, samples, outs, losses):
= (s[0], s[1], o[0])
imgs for ax,im,title in zip(axs, imgs, titles):
if title=="pred": title += f"; loss = {l.item():.4f}"
=ax, **kwargs)
im.show(ctx ax.set_title(title)
导出 -
from nbdev import nbdev_export
nbdev_export()