! [ -e /content ] && pip install -Uqq fastai # 在Colab上升级fastai
数据块
from __future__ import annotations
from fastai.torch_basics import *
from fastai.data.core import *
from fastai.data.load import *
from fastai.data.external import *
from fastai.data.transforms import *
from nbdev.showdoc import *
高级 API 可以快速将您的数据加载到
DataLoaders
中。
TransformBlock -
class TransformBlock():
"A basic wrapper that links defaults transforms for the data block API"
def __init__(self,
list=None, # 一个或多个 `Transform`
type_tfms:list=None, # 应用于物品的`ItemTransform`
item_tfms:list=None, # 通过批处理应用的`Transform`或`RandTransform`
batch_tfms:=None, # 特定任务的 `TfmdDL`,默认为 `TfmdDL`
dl_type:TfmdDLdict=None, # 要传递给 `DataLoaders` 的额外参数
dls_kwargs:
):self.type_tfms = L(type_tfms)
self.item_tfms = ToTensor + L(item_tfms)
self.batch_tfms = L(batch_tfms)
self.dl_type,self.dls_kwargs = dl_type,({} if dls_kwargs is None else dls_kwargs)
def CategoryBlock(
|pd.Series=None, # 唯一类名列表
vocab:MutableSequencebool=True, # 按字母顺序排列课程
sort:bool=False, # 将 `#na#` 添加到 `vocab`
add_na:
):"`TransformBlock` for single-label categorical targets"
return TransformBlock(type_tfms=Categorize(vocab=vocab, sort=sort, add_na=add_na))
def MultiCategoryBlock(
bool=False, # 无论数据是否采用独热编码形式
encoded:|pd.Series=None, # 唯一类名列表
vocab:MutableSequencebool=False, # 将 `#na#` 添加到 `vocab`
add_na:
):"`TransformBlock` for multi-label categorical targets"
= EncodedMultiCategorize(vocab=vocab) if encoded else [MultiCategorize(vocab=vocab, add_na=add_na), OneHotEncode]
tfm return TransformBlock(type_tfms=tfm)
def RegressionBlock(
int=None, # 输出值的数量
n_out:
):"`TransformBlock` for float targets"
return TransformBlock(type_tfms=RegressionSetup(c=n_out))
通用API
from inspect import isfunction,ismethod
def _merge_grouper(o):
if isinstance(o, LambdaType): return id(o)
elif isinstance(o, type): return o
elif (isfunction(o) or ismethod(o)): return o.__qualname__
return o.__class__
def _merge_tfms(*tfms):
"Group the `tfms` in a single list, removing duplicates (from the same class) and instantiating"
= groupby(concat(*tfms), _merge_grouper)
g return L(v[-1] for k,v in g.items()).map(instantiate)
def _zip(x): return L(x).zip()
from fastai.vision.core import *
from fastai.vision.data import *
= _merge_tfms([Categorize, MultiCategorize, Categorize(['dog', 'cat'])], Categorize(['a', 'b']))
tfms #如果有多个实例化版本,则保留最后一个。
len(tfms), 2)
test_eq(1].__class__, MultiCategorize)
test_eq(tfms[0].__class__, Categorize)
test_eq(tfms[0].vocab, ['a', 'b'])
test_eq(tfms[
= _merge_tfms([PILImage.create, PILImage.show])
tfms #检查方法是否正确分离
len(tfms), 2)
test_eq(= _merge_tfms([show_image, set_trace])
tfms #检查功能是否正确分离
len(tfms), 2)
test_eq(
= lambda x: 0
_f len(_merge_tfms([_f,lambda x: 1])), 2)
test_eq(len(_merge_tfms([_f,_f])), 1) test_eq(
@docs
@funcs_kwargs
class DataBlock():
"Generic container to quickly build `Datasets` and `DataLoaders`."
=get_items=splitter=get_y = None
get_x= (TransformBlock,TransformBlock),TfmdDL
blocks,dl_type = 'get_items splitter get_y get_x'.split()
_methods = "If you wanted to compose several transforms in your getter don't forget to wrap them in a `Pipeline`."
_msg def __init__(self,
list=None, # One or more `TransformBlock`s
blocks:=None, # Task specific `TfmdDL`, defaults to `block`'s dl_type or`TfmdDL`
dl_type:TfmdDLlist=None, # Getter functions applied to results of `get_items`
getters:int=None, # Number of inputs
n_inp:list=None, # `ItemTransform`s, applied on an item
item_tfms:list=None, # `Transform`s or `RandTransform`s, applied by batch
batch_tfms:**kwargs,
):= L(self.blocks if blocks is None else blocks)
blocks = L(b() if callable(b) else b for b in blocks)
blocks self.type_tfms = blocks.attrgot('type_tfms', L())
self.default_item_tfms = _merge_tfms(*blocks.attrgot('item_tfms', L()))
self.default_batch_tfms = _merge_tfms(*blocks.attrgot('batch_tfms', L()))
for b in blocks:
if getattr(b, 'dl_type', None) is not None: self.dl_type = b.dl_type
if dl_type is not None: self.dl_type = dl_type
self.dataloaders = delegates(self.dl_type.__init__)(self.dataloaders)
self.dls_kwargs = merge(*blocks.attrgot('dls_kwargs', {}))
self.n_inp = ifnone(n_inp, max(1, len(blocks)-1))
self.getters = ifnone(getters, [noop]*len(self.type_tfms))
if self.get_x:
if len(L(self.get_x)) != self.n_inp:
raise ValueError(f'get_x contains {len(L(self.get_x))} functions, but must contain {self.n_inp} (one for each input)\n{self._msg}')
self.getters[:self.n_inp] = L(self.get_x)
if self.get_y:
= len(self.getters) - self.n_inp
n_targs if len(L(self.get_y)) != n_targs:
raise ValueError(f'get_y contains {len(L(self.get_y))} functions, but must contain {n_targs} (one for each target)\n{self._msg}')
self.getters[self.n_inp:] = L(self.get_y)
if kwargs: raise TypeError(f'invalid keyword arguments: {", ".join(kwargs.keys())}')
self.new(item_tfms, batch_tfms)
def _combine_type_tfms(self): return L([self.getters, self.type_tfms]).map_zip(
lambda g,tt: (g.fs if isinstance(g, Pipeline) else L(g)) + tt)
def new(self,
list=None, # `ItemTransform`s, applied on an item
item_tfms:list=None, # `Transform`s or `RandTransform`s, applied by batch
batch_tfms:
):self.item_tfms = _merge_tfms(self.default_item_tfms, item_tfms)
self.batch_tfms = _merge_tfms(self.default_batch_tfms, batch_tfms)
return self
@classmethod
def from_columns(cls,
list =None, # One or more `TransformBlock`s
blocks:list =None, # Getter functions applied to results of `get_items`
getters:callable=None, # A function to get items
get_items:**kwargs,
):if getters is None: getters = L(ItemGetter(i) for i in range(2 if blocks is None else len(L(blocks))))
= _zip if get_items is None else compose(get_items, _zip)
get_items return cls(blocks=blocks, getters=getters, get_items=get_items, **kwargs)
def datasets(self,
# The data source
source, bool=False, # Show verbose messages
verbose:-> Datasets:
) self.source = source ; pv(f"Collecting items from {source}", verbose)
= (self.get_items or noop)(source) ; pv(f"Found {len(items)} items", verbose)
items = (self.splitter or RandomSplitter())(items)
splits f"{len(splits)} datasets of sizes {','.join([str(len(s)) for s in splits])}", verbose)
pv(return Datasets(items, tfms=self._combine_type_tfms(), splits=splits, dl_type=self.dl_type, n_inp=self.n_inp, verbose=verbose)
def dataloaders(self,
# The data source
source, str='.', # Data source and default `Learner` path
path:bool=False, # Show verbose messages
verbose:**kwargs
-> DataLoaders:
) = self.datasets(source, verbose=verbose)
dsets = {**self.dls_kwargs, **kwargs, 'verbose': verbose}
kwargs return dsets.dataloaders(path=path, after_item=self.item_tfms, after_batch=self.batch_tfms, **kwargs)
= dict(new="Create a new `DataBlock` with other `item_tfms` and `batch_tfms`",
_docs ="Create a `Datasets` object from `source`",
datasets="Create a `DataLoaders` object from `source`") dataloaders
要构建一个 DataBlock
,您需要提供库的四个内容:输入/标签的类型,以及至少两个函数:get_items
和 splitter
。您还可能需要包含 get_x
和 get_y
,或者一种更通用的 getters
列表,这些函数应用于 get_items
的结果。
splitter 是一个可调用对象,当传入 items
时,它会返回一个元组,包含表示训练数据和验证数据索引的可迭代对象。
一旦提供了这些,您将自动获得 Datasets
或 DataLoaders
:
show_doc(DataBlock.datasets)
= DataBlock()
dblock ="DataBlock.dataloaders") show_doc(dblock.dataloaders, name
DataBlock.dataloaders
[source]
DataBlock.dataloaders
(source
,path
:str
='.'
,verbose
:bool
=False
,bs
=64
,shuffle
=False
,num_workers
=None
,do_setup
=True
,pin_memory
=False
,timeout
=0
,batch_size
=None
,drop_last
=False
,indexed
=None
,n
=None
,device
=None
,persistent_workers
=False
,wif
=None
,before_iter
=None
,after_item
=None
,before_batch
=None
,after_batch
=None
,after_iter
=None
,create_batches
=None
,create_item
=None
,create_batch
=None
,retain
=None
,get_idxs
=None
,sample
=None
,shuffle_fn
=None
,do_batch
=None
)
Create a DataLoaders
object from source
Type | Default | Details | |
---|---|---|---|
source |
The data source | ||
path |
str |
|Data source and default [`Learner`](/learner.html#Learner) path| |**`verbose`**|`bool`|`False`|Show verbose messages| |||**Valid Keyword Arguments**|| |**`bs`**|`int`|`64`|Argument passed to [`TfmdDL.__init__`](/data.core.html#TfmdDL.__init__)| |**`shuffle`**|`bool`|`False`|Argument passed to [`TfmdDL.__init__`](/data.core.html#TfmdDL.__init__)| |**`num_workers`**|`NoneType`| |
Argument passed to TfmdDL.__init__ |
do_setup |
bool |
True |
Argument passed to TfmdDL.__init__ |
pin_memory |
bool |
False |
Argument passed to TfmdDL.__init__ |
timeout |
int |
0 |
Argument passed to TfmdDL.__init__ |
batch_size |
NoneType |
|Argument passed to [`TfmdDL.__init__`](/data.core.html#TfmdDL.__init__)| |**`drop_last`**|`bool`|`False`|Argument passed to [`TfmdDL.__init__`](/data.core.html#TfmdDL.__init__)| |**`indexed`**|`NoneType`| |
Argument passed to TfmdDL.__init__ |
n |
NoneType |
|Argument passed to [`TfmdDL.__init__`](/data.core.html#TfmdDL.__init__)| |**`device`**|`NoneType`| |
Argument passed to TfmdDL.__init__ |
persistent_workers |
bool |
False |
Argument passed to TfmdDL.__init__ |
wif |
NoneType |
|Argument passed to [`TfmdDL.__init__`](/data.core.html#TfmdDL.__init__)| |**`before_iter`**|`NoneType`| |
Argument passed to TfmdDL.__init__ |
after_item |
NoneType |
|Argument passed to [`TfmdDL.__init__`](/data.core.html#TfmdDL.__init__)| |**`before_batch`**|`NoneType`| |
Argument passed to TfmdDL.__init__ |
after_batch |
NoneType |
|Argument passed to [`TfmdDL.__init__`](/data.core.html#TfmdDL.__init__)| |**`after_iter`**|`NoneType`| |
Argument passed to TfmdDL.__init__ |
create_batches |
NoneType |
|Argument passed to [`TfmdDL.__init__`](/data.core.html#TfmdDL.__init__)| |**`create_item`**|`NoneType`| |
Argument passed to TfmdDL.__init__ |
create_batch |
NoneType |
|Argument passed to [`TfmdDL.__init__`](/data.core.html#TfmdDL.__init__)| |**`retain`**|`NoneType`| |
Argument passed to TfmdDL.__init__ |
get_idxs |
NoneType |
|Argument passed to [`TfmdDL.__init__`](/data.core.html#TfmdDL.__init__)| |**`sample`**|`NoneType`| |
Argument passed to TfmdDL.__init__ |
shuffle_fn |
NoneType |
|Argument passed to [`TfmdDL.__init__`](/data.core.html#TfmdDL.__init__)| |**`do_batch`**|`NoneType`| |
Argument passed to TfmdDL.__init__ |
Returns | DataLoaders |
您可以通过传递函数来创建 DataBlock
:
= DataBlock(blocks = (ImageBlock(cls=PILImageBW),CategoryBlock),
mnist = get_image_files,
get_items = GrandparentSplitter(),
splitter = parent_label) get_y
每种类型都有默认的变换,将会被应用:
- 在基础层面,以从基本元素(如文件名)创建元组中的项目(通常是输入,目标)
- 在数据集的项目层面
- 在批处理层面
它们分别称为类型变换、项目变换、批处理变换。在MNIST的情况下,类型变换是创建PILImageBW
(用于输入)和Categorize
变换(用于目标)的方法,项目变换是ToTensor
,批处理变换是Cuda
和IntToFloatTensor
。您可以通过在DataBlock.datasets
或DataBlock.dataloaders
中传递它们来添加任何其他变换。
0], [PILImageBW.create])
test_eq(mnist.type_tfms[1].map(type), [Categorize])
test_eq(mnist.type_tfms[map(type), [ToTensor])
test_eq(mnist.default_item_tfms.map(type), [IntToFloatTensor]) test_eq(mnist.default_batch_tfms.
= mnist.datasets(untar_data(URLs.MNIST_TINY))
dsets '3', '7'])
test_eq(dsets.vocab, [= dsets.train[0]
x,y 28,28))
test_eq(x.size,(0, cmap='Greys', figsize=(2,2)); show_at(dsets.train,
lambda: DataBlock(wrong_kwarg=42, wrong_kwarg2='foo')) test_fail(
我们可以将任意数量的块传递给 DataBlock
,然后通过更改 n_inp
来定义输入块和目标块。例如,定义 n_inp=2
将把传递的前两个块视为输入,其余块视为目标。
= DataBlock((ImageBlock, ImageBlock, CategoryBlock), get_items=get_image_files, splitter=GrandparentSplitter(),
mnist =parent_label)
get_y= mnist.datasets(untar_data(URLs.MNIST_TINY))
dsets 2)
test_eq(mnist.n_inp, len(dsets.train[0]), 3) test_eq(
lambda: DataBlock((ImageBlock, ImageBlock, CategoryBlock), get_items=get_image_files, splitter=GrandparentSplitter(),
test_fail(=[parent_label, noop],
get_y=2), msg='get_y contains 2 functions, but must contain 1 (one for each output)') n_inp
= DataBlock((ImageBlock, ImageBlock, CategoryBlock), get_items=get_image_files, splitter=GrandparentSplitter(),
mnist =1,
n_inp=[noop, Pipeline([noop, parent_label])])
get_y= mnist.datasets(untar_data(URLs.MNIST_TINY))
dsets len(dsets.train[0]), 3) test_eq(
调试
def _short_repr(x):
if isinstance(x, tuple): return f'({", ".join([_short_repr(y) for y in x])})'
if isinstance(x, list): return f'[{", ".join([_short_repr(y) for y in x])}]'
if not isinstance(x, Tensor): return str(x)
if x.numel() <= 20 and x.ndim <=1: return str(x)
return f'{x.__class__.__name__} of size {"x".join([str(d) for d in x.shape])}'
40,56))), 'TensorImage of size 40x56')
test_eq(_short_repr(TensorImage(torch.randn(1,2,3])), 'TensorCategory([1, 2, 3])')
test_eq(_short_repr(TensorCategory([40,56)), TensorImage(torch.randn(32,20)))),
test_eq(_short_repr((TensorImage(torch.randn('(TensorImage of size 40x56, TensorImage of size 32x20)')
def _apply_pipeline(p, x):
print(f" {p}\n starting from\n {_short_repr(x)}")
for f in p.fs:
= f.name
name try:
= f(x)
x if name != "noop": print(f" applying {name} gives\n {_short_repr(x)}")
except Exception as e:
print(f" applying {name} failed.")
raise e
return x
from fastai.data.load import _collate_types
def _find_fail_collate(s):
= L(*s)
s for x in s[0]:
if not isinstance(x, _collate_types): return f"{type(x).__name__} is not collatable"
for i in range_of(s[0]):
try: _ = default_collate(s.itemgot(i))
except:
= [getattr(o[i], 'shape', None) for o in s]
shapes return f"Could not collate the {i}-th members of your tuples because got the following shapes\n{','.join([str(s) for s in shapes])}"
@patch
def summary(self:DataBlock,
# 数据源
source, int=4, # 批量大小
bs:bool=False, # 在总结之后调用 `show_batch`
show_batch:**kwargs, # `show_batch` 的额外关键字参数
):"Steps through the transform pipeline for one batch, and optionally calls `show_batch(**kwargs)` on the transient `Dataloaders`."
print(f"Setting-up type transforms pipelines")
= self.datasets(source, verbose=True)
dsets print("\nBuilding one sample")
for tl in dsets.train.tls:
_apply_pipeline(tl.tfms, get_first(dsets.train.items))print(f"\nFinal sample: {dsets.train[0]}\n\n")
= self.dataloaders(source, bs=bs, verbose=True)
dls print("\nBuilding one batch")
if len([f for f in dls.train.after_item.fs if f.name != 'noop'])!=0:
print("Applying item_tfms to the first sample:")
= [_apply_pipeline(dls.train.after_item, dsets.train[0])]
s print(f"\nAdding the next {bs-1} samples")
+= [dls.train.after_item(dsets.train[i]) for i in range(1, bs)]
s else:
print("No item_tfms to apply")
= [dls.train.after_item(dsets.train[i]) for i in range(bs)]
s
if len([f for f in dls.train.before_batch.fs if f.name != 'noop'])!=0:
print("\nApplying before_batch to the list of samples")
= _apply_pipeline(dls.train.before_batch, s)
s else: print("\nNo before_batch transform to apply")
print("\nCollating items in a batch")
try:
= dls.train.create_batch(s)
b = retain_types(b, s[0] if is_listy(s) else s)
b except Exception as e:
print("Error! It's not possible to collate your items in a batch")
= _find_fail_collate(s)
why print("Make sure all parts of your samples are tensors of the same size" if why is None else why)
raise e
if len([f for f in dls.train.after_batch.fs if f.name != 'noop'])!=0:
print("\nApplying batch_tfms to the batch built")
= to_device(b, dls.device)
b = _apply_pipeline(dls.train.after_batch, b)
b else: print("\nNo batch_tfms to apply")
if show_batch: dls.show_batch(**kwargs)
show_doc(DataBlock.summary)
DataBlock.summary
[source]
DataBlock.summary
(source
,bs
:int
=4
,show_batch
:bool
=False
, **kwargs
)
Steps through the transform pipeline for one batch, and optionally calls show_batch(**kwargs)
on the transient Dataloaders
.
Type | Default | Details | |
---|---|---|---|
source |
The data source | ||
bs |
int |
4 |
The batch size |
show_batch |
bool |
False |
Call show_batch after the summary |
kwargs |
No Content |
除了逐步遍历转换过程,summary()
提供了一个快捷方式 dls.show_batch(...)
,用于查看数据。例如:
pets.summary(path/"images", bs=8, show_batch=True, unique=True,...)
是以下代码的快捷方式:
pets.summary(path/"images", bs=8)
dls = pets.dataloaders(path/"images", bs=8)
dls.show_batch(unique=True,...) # 查看不同的转换效果在同一图像上的影响。
导出 -
from nbdev import nbdev_export
nbdev_export()
Converted 00_torch_core.ipynb.
Converted 01_layers.ipynb.
Converted 01a_losses.ipynb.
Converted 02_data.load.ipynb.
Converted 03_data.core.ipynb.
Converted 04_data.external.ipynb.
Converted 05_data.transforms.ipynb.
Converted 06_data.block.ipynb.
Converted 07_vision.core.ipynb.
Converted 08_vision.data.ipynb.
Converted 09_vision.augment.ipynb.
Converted 09b_vision.utils.ipynb.
Converted 09c_vision.widgets.ipynb.
Converted 10_tutorial.pets.ipynb.
Converted 10b_tutorial.albumentations.ipynb.
Converted 11_vision.models.xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_callback.core.ipynb.
Converted 13a_learner.ipynb.
Converted 13b_metrics.ipynb.
Converted 14_callback.schedule.ipynb.
Converted 14a_callback.data.ipynb.
Converted 15_callback.hook.ipynb.
Converted 15a_vision.models.unet.ipynb.
Converted 16_callback.progress.ipynb.
Converted 17_callback.tracker.ipynb.
Converted 18_callback.fp16.ipynb.
Converted 18a_callback.training.ipynb.
Converted 18b_callback.preds.ipynb.
Converted 19_callback.mixup.ipynb.
Converted 20_interpret.ipynb.
Converted 20a_distributed.ipynb.
Converted 21_vision.learner.ipynb.
Converted 22_tutorial.imagenette.ipynb.
Converted 23_tutorial.vision.ipynb.
Converted 24_tutorial.image_sequence.ipynb.
Converted 24_tutorial.siamese.ipynb.
Converted 24_vision.gan.ipynb.
Converted 30_text.core.ipynb.
Converted 31_text.data.ipynb.
Converted 32_text.models.awdlstm.ipynb.
Converted 33_text.models.core.ipynb.
Converted 34_callback.rnn.ipynb.
Converted 35_tutorial.wikitext.ipynb.
Converted 37_text.learner.ipynb.
Converted 38_tutorial.text.ipynb.
Converted 39_tutorial.transformers.ipynb.
Converted 40_tabular.core.ipynb.
Converted 41_tabular.data.ipynb.
Converted 42_tabular.model.ipynb.
Converted 43_tabular.learner.ipynb.
Converted 44_tutorial.tabular.ipynb.
Converted 45_collab.ipynb.
Converted 46_tutorial.collab.ipynb.
Converted 50_tutorial.datablock.ipynb.
Converted 60_medical.imaging.ipynb.
Converted 61_tutorial.medical_imaging.ipynb.
Converted 65_medical.text.ipynb.
Converted 70_callback.wandb.ipynb.
Converted 71_callback.tensorboard.ipynb.
Converted 72_callback.neptune.ipynb.
Converted 73_callback.captum.ipynb.
Converted 74_callback.azureml.ipynb.
Converted 97_test_utils.ipynb.
Converted 99_pytorch_doc.ipynb.
Converted dev-setup.ipynb.
Converted app_examples.ipynb.
Converted camvid.ipynb.
Converted migrating_catalyst.ipynb.
Converted migrating_ignite.ipynb.
Converted migrating_lightning.ipynb.
Converted migrating_pytorch.ipynb.
Converted migrating_pytorch_verbose.ipynb.
Converted ulmfit.ipynb.
Converted index.ipynb.
Converted index_original.ipynb.
Converted quick_start.ipynb.
Converted tutorial.ipynb.