#|hide
#|skip
! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab
#|default_exp tabular.data
#|export
from __future__ import annotations
from fastai.torch_basics import *
from fastai.data.all import *
from fastai.tabular.core import *
#|hide
from nbdev.showdoc import *
Helper functions to get data in a
DataLoaders
in the tabular application and higher classTabularDataLoaders
The main class to get your data ready for model training is TabularDataLoaders
and its factory methods. Checkout the tabular tutorial for examples of use.
#|export
class TabularDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for tabular data"
@classmethod
@delegates(Tabular.dataloaders, but=["dl_type", "dl_kwargs"])
def from_df(cls,
df:pd.DataFrame,
path:(str,Path)='.', # Location of `df`, defaults to current working directory
procs:list=None, # List of `TabularProc`s
cat_names:list=None, # Column names pertaining to categorical variables
cont_names:list=None, # Column names pertaining to continuous variables
y_names:list=None, # Names of the dependent variables
y_block:TransformBlock=None, # `TransformBlock` to use for the target(s)
valid_idx:list=None, # List of indices to use for the validation set, defaults to a random split
**kwargs
):
"Create `TabularDataLoaders` from `df` in `path` using `procs`"
if cat_names is None: cat_names = []
if cont_names is None: cont_names = list(set(df)-set(L(cat_names))-set(L(y_names)))
splits = RandomSplitter()(df) if valid_idx is None else IndexSplitter(valid_idx)(df)
to = TabularPandas(df, procs, cat_names, cont_names, y_names, splits=splits, y_block=y_block)
return to.dataloaders(path=path, **kwargs)
@classmethod
def from_csv(cls,
csv:(str,Path,io.BufferedReader), # A csv of training data
skipinitialspace:bool=True, # Skip spaces after delimiter
**kwargs
):
"Create `TabularDataLoaders` from `csv` file in `path` using `procs`"
return cls.from_df(pd.read_csv(csv, skipinitialspace=skipinitialspace), **kwargs)
@delegates(TabDataLoader.__init__)
def test_dl(self,
test_items, # Items to create new test `TabDataLoader` formatted the same as the training data
rm_type_tfms=None, # Number of `Transform`s to be removed from `procs`
process:bool=True, # Apply validation `TabularProc`s to `test_items` immediately
inplace:bool=False, # Keep separate copy of original `test_items` in memory if `False`
**kwargs
):
"Create test `TabDataLoader` from `test_items` using validation `procs`"
to = self.train_ds.new(test_items, inplace=inplace)
if process: to.process()
return self.valid.new(to, **kwargs)
Tabular._dbunch_type = TabularDataLoaders
TabularDataLoaders.from_csv = delegates(to=TabularDataLoaders.from_df)(TabularDataLoaders.from_csv)
This class should not be used directly, one of the factory methods should be preferred instead. All those factory methods accept as arguments:
cat_names
: the names of the categorical variablescont_names
: the names of the continuous variablesy_names
: the names of the dependent variablesy_block
: the TransformBlock
to use for the targetvalid_idx
: the indices to use for the validation set (defaults to a random split otherwise)bs
: the batch sizeval_bs
: the batch size for the validation DataLoader
(defaults to bs
)shuffle_train
: if we shuffle the training DataLoader
or notn
: overrides the numbers of elements in the datasetdevice
: the PyTorch device to use (defaults to default_device()
)show_doc(TabularDataLoaders.from_df)
TabularDataLoaders.from_df
[source]
TabularDataLoaders.from_df
(df
:DataFrame
,path
:Path'>)
='.'
,procs
:list
=None
,cat_names
:list
=None
,cont_names
:list
=None
,y_names
:list
=None
,y_block
:TransformBlock
=None
,valid_idx
:list
=None
,bs
=64
,shuffle_train
=None
,shuffle
=True
,val_shuffle
=False
,n
=None
,device
=None
,drop_last
=None
,val_bs
=None
)
Create TabularDataLoaders
from df
in path
using procs
Let's have a look on an example with the adult dataset:
path = untar_data(URLs.ADULT_SAMPLE)
df = pd.read_csv(path/'adult.csv', skipinitialspace=True)
df.head()
age | workclass | fnlwgt | education | education-num | marital-status | occupation | relationship | race | sex | capital-gain | capital-loss | hours-per-week | native-country | salary | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 49 | Private | 101320 | Assoc-acdm | 12.0 | Married-civ-spouse | NaN | Wife | White | Female | 0 | 1902 | 40 | United-States | >=50k |
1 | 44 | Private | 236746 | Masters | 14.0 | Divorced | Exec-managerial | Not-in-family | White | Male | 10520 | 0 | 45 | United-States | >=50k |
2 | 38 | Private | 96185 | HS-grad | NaN | Divorced | NaN | Unmarried | Black | Female | 0 | 0 | 32 | United-States | <50k |
3 | 38 | Self-emp-inc | 112847 | Prof-school | 15.0 | Married-civ-spouse | Prof-specialty | Husband | Asian-Pac-Islander | Male | 0 | 0 | 40 | United-States | >=50k |
4 | 42 | Self-emp-not-inc | 82297 | 7th-8th | NaN | Married-civ-spouse | Other-service | Wife | Black | Female | 0 | 0 | 50 | United-States | <50k |
cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race']
cont_names = ['age', 'fnlwgt', 'education-num']
procs = [Categorify, FillMissing, Normalize]
dls = TabularDataLoaders.from_df(df, path, procs=procs, cat_names=cat_names, cont_names=cont_names,
y_names="salary", valid_idx=list(range(800,1000)), bs=64)
dls.show_batch()
workclass | education | marital-status | occupation | relationship | race | education-num_na | age | fnlwgt | education-num | salary | |
---|---|---|---|---|---|---|---|---|---|---|---|
0 | Private | HS-grad | Married-civ-spouse | Adm-clerical | Husband | White | False | 24.0 | 121312.998272 | 9.0 | <50k |
1 | Private | HS-grad | Never-married | Other-service | Not-in-family | White | False | 19.0 | 198320.000325 | 9.0 | <50k |
2 | Private | Bachelors | Married-civ-spouse | Sales | Husband | White | False | 66.0 | 169803.999308 | 13.0 | >=50k |
3 | Private | HS-grad | Divorced | Adm-clerical | Unmarried | White | False | 40.0 | 799280.980929 | 9.0 | <50k |
4 | Local-gov | 10th | Never-married | Other-service | Own-child | White | False | 18.0 | 55658.003629 | 6.0 | <50k |
5 | Private | HS-grad | Never-married | Handlers-cleaners | Other-relative | White | False | 30.0 | 375827.003847 | 9.0 | <50k |
6 | Private | Some-college | Never-married | Handlers-cleaners | Own-child | White | False | 20.0 | 173723.999335 | 10.0 | <50k |
7 | ? | Some-college | Never-married | ? | Own-child | White | False | 21.0 | 107800.997986 | 10.0 | <50k |
8 | Private | HS-grad | Never-married | Handlers-cleaners | Own-child | White | False | 19.0 | 263338.000072 | 9.0 | <50k |
9 | Private | Some-college | Married-civ-spouse | Tech-support | Husband | White | False | 35.0 | 194590.999986 | 10.0 | <50k |
show_doc(TabularDataLoaders.from_csv)
TabularDataLoaders.from_csv
[source]
TabularDataLoaders.from_csv
(csv
:BufferedReader'>)
,skipinitialspace
:bool
=True
,path
:Path'>)
='.'
,procs
:list
=None
,cat_names
:list
=None
,cont_names
:list
=None
,y_names
:list
=None
,y_block
:TransformBlock
=None
,valid_idx
:list
=None
,bs
=64
,shuffle_train
=None
,shuffle
=True
,val_shuffle
=False
,n
=None
,device
=None
,drop_last
=None
,val_bs
=None
)
Create TabularDataLoaders
from csv
file in path
using procs
cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race']
cont_names = ['age', 'fnlwgt', 'education-num']
procs = [Categorify, FillMissing, Normalize]
dls = TabularDataLoaders.from_csv(path/'adult.csv', path=path, procs=procs, cat_names=cat_names, cont_names=cont_names,
y_names="salary", valid_idx=list(range(800,1000)), bs=64)
show_doc(TabularDataLoaders.test_dl)
TabularDataLoaders.test_dl
[source]
TabularDataLoaders.test_dl
(test_items
,rm_type_tfms
=None
,process
:bool
=True
,inplace
:bool
=False
,bs
=16
,shuffle
=False
,after_batch
=None
,num_workers
=0
,verbose
=False
,do_setup
=True
,pin_memory
=False
,timeout
=0
,batch_size
=None
,drop_last
=False
,indexed
=None
,n
=None
,device
=None
,persistent_workers
=False
,wif
=None
,before_iter
=None
,after_item
=None
,before_batch
=None
,after_iter
=None
,create_batches
=None
,create_item
=None
,create_batch
=None
,retain
=None
,get_idxs
=None
,sample
=None
,shuffle_fn
=None
,do_batch
=None
)
Create test TabDataLoader
from test_items
using validation procs
External structured data files can contain unexpected spaces, e.g. after a comma. We can see that in the first row of adult.csv "49, Private,101320, ..."
. Often trimming is needed. Pandas has a convenient parameter skipinitialspace
that is exposed by TabularDataLoaders.from_csv()
. Otherwise category labels use for inference later such as workclass
:Private
will be categorized wrongly to 0 or "#na#"
if training label was read as " Private"
. Let's test this feature.
test_data = {
'age': [49],
'workclass': ['Private'],
'fnlwgt': [101320],
'education': ['Assoc-acdm'],
'education-num': [12.0],
'marital-status': ['Married-civ-spouse'],
'occupation': [''],
'relationship': ['Wife'],
'race': ['White'],
}
input = pd.DataFrame(test_data)
tdl = dls.test_dl(input)
test_ne(0, tdl.dataset.iloc[0]['workclass'])
#|hide
from nbdev.export import notebook2script
notebook2script()
Converted 00_torch_core.ipynb. Converted 01_layers.ipynb. Converted 01a_losses.ipynb. Converted 02_data.load.ipynb. Converted 03_data.core.ipynb. Converted 04_data.external.ipynb. Converted 05_data.transforms.ipynb. Converted 06_data.block.ipynb. Converted 07_vision.core.ipynb. Converted 08_vision.data.ipynb. Converted 09_vision.augment.ipynb. Converted 09b_vision.utils.ipynb. Converted 09c_vision.widgets.ipynb. Converted 10_tutorial.pets.ipynb. Converted 10b_tutorial.albumentations.ipynb. Converted 11_vision.models.xresnet.ipynb. Converted 12_optimizer.ipynb. Converted 13_callback.core.ipynb. Converted 13a_learner.ipynb. Converted 13b_metrics.ipynb. Converted 14_callback.schedule.ipynb. Converted 14a_callback.data.ipynb. Converted 15_callback.hook.ipynb. Converted 15a_vision.models.unet.ipynb. Converted 16_callback.progress.ipynb. Converted 17_callback.tracker.ipynb. Converted 18_callback.fp16.ipynb. Converted 18a_callback.training.ipynb. Converted 18b_callback.preds.ipynb. Converted 19_callback.mixup.ipynb. Converted 20_interpret.ipynb. Converted 20a_distributed.ipynb. Converted 21_vision.learner.ipynb. Converted 22_tutorial.imagenette.ipynb. Converted 23_tutorial.vision.ipynb. Converted 24_tutorial.image_sequence.ipynb. Converted 24_tutorial.siamese.ipynb. Converted 24_vision.gan.ipynb. Converted 30_text.core.ipynb. Converted 31_text.data.ipynb. Converted 32_text.models.awdlstm.ipynb. Converted 33_text.models.core.ipynb. Converted 34_callback.rnn.ipynb. Converted 35_tutorial.wikitext.ipynb. Converted 37_text.learner.ipynb. Converted 38_tutorial.text.ipynb. Converted 39_tutorial.transformers.ipynb. Converted 40_tabular.core.ipynb. Converted 41_tabular.data.ipynb. Converted 42_tabular.model.ipynb. Converted 43_tabular.learner.ipynb. Converted 44_tutorial.tabular.ipynb. Converted 45_collab.ipynb. Converted 46_tutorial.collab.ipynb. Converted 50_tutorial.datablock.ipynb. Converted 60_medical.imaging.ipynb. Converted 61_tutorial.medical_imaging.ipynb. Converted 65_medical.text.ipynb. Converted 70_callback.wandb.ipynb. Converted 71_callback.tensorboard.ipynb. Converted 72_callback.neptune.ipynb. Converted 73_callback.captum.ipynb. Converted 74_callback.azureml.ipynb. Converted 97_test_utils.ipynb. Converted 99_pytorch_doc.ipynb. Converted dev-setup.ipynb. Converted app_examples.ipynb. Converted camvid.ipynb. Converted migrating_catalyst.ipynb. Converted migrating_ignite.ipynb. Converted migrating_lightning.ipynb. Converted migrating_pytorch.ipynb. Converted migrating_pytorch_verbose.ipynb. Converted ulmfit.ipynb. Converted index.ipynb. Converted index_original.ipynb. Converted quick_start.ipynb. Converted tutorial.ipynb.