#!/usr/bin/env python # coding: utf-8 # # Computer vision data # In[1]: get_ipython().run_line_magic('matplotlib', 'inline') from fastai.gen_doc.nbdoc import * from fastai import * from fastai.vision import * # This module contains the classes that define datasets handling [`Image`](/vision.image.html#Image) objects and their tranformations. As usual, we'll start with a quick overview, before we get in to the detailed API docs. # ## Quickly get your data ready for training # To get you started as easily as possible, the fastai provides two helper functions to create a [`DataBunch`](/basic_data.html#DataBunch) object that you can directly use for training a classifier. To demonstrate them you'll first need to download and untar the file by executing the following cell. This will create a data folder containing an MNIST subset in `data/mnist_sample`. # In[2]: path = untar_data(URLs.MNIST_SAMPLE); path # There are a number of ways to create an [`ImageDataBunch`](/vision.data.html#ImageDataBunch). One common approach is to use *Imagenet-style folders* (see a ways down the page below for details) with [`ImageDataBunch.from_folder`](/vision.data.html#ImageDataBunch.from_folder): # In[3]: tfms = get_transforms(do_flip=False) data = ImageDataBunch.from_folder(path, ds_tfms=tfms, size=24) # Here the datasets will be automatically created in the structure of *Imagenet-style folders*. The parameters specified: # - the transforms to apply to the images in `ds_tfms` (here with `do_flip`=False because we don't want to flip numbers), # - the target `size` of our pictures (here 24). # # As with all [`DataBunch`](/basic_data.html#DataBunch) usage, a `train_dl` and a `valid_dl` are created that are of the type PyTorch [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader). # # If you want to have a look at a few images inside a batch, you can use [`ImageDataBunch.show_batch`](/vision.data.html#ImageDataBunch.show_batch). The `rows` argument is the number of rows and columns to display. # In[ ]: data.show_batch(rows=3, figsize=(5,5)) # The second way to define the data for a classifier requires a structure like this: # ``` # path\ # train\ # test\ # labels.csv # ``` # where the labels.csv file defines the label(s) of each image in the training set. This is the format you will need to use when each image can have multiple labels. It also works with single labels: # In[ ]: pd.read_csv(path/'labels.csv').head() # You can then use [`ImageDataBunch.from_csv`](/vision.data.html#ImageDataBunch.from_csv): # In[ ]: data = ImageDataBunch.from_csv(path, ds_tfms=tfms, size=28) # In[ ]: data.show_batch(rows=3, figsize=(5,5)) # An example of multiclassification can be downloaded with the following cell. It's a sample of the [planet dataset](https://www.google.com/search?q=kaggle+planet&rlz=1C1CHBF_enFR786FR786&oq=kaggle+planet&aqs=chrome..69i57j0.1563j0j7&sourceid=chrome&ie=UTF-8). # In[ ]: planet = untar_data(URLs.PLANET_SAMPLE) # If we open the labels files, we seach that each image has one or more tags, separated by a space. # In[ ]: df =pd.read_csv(planet/'labels.csv') df.head() # In[ ]: data = ImageDataBunch.from_csv(planet, folder='train', size=128, suffix='.jpg', ds_tfms=get_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.)) # The `show_batch`method will then print all the labels that correspond to each image. # In[ ]: data.show_batch(rows=3, figsize=(10,8), ds_type=DatasetType.Valid) # You can find more ways to build an [`ImageDataBunch`](/vision.data.html#ImageDataBunch) without the factory methods in [`data_block`](/data_block.html#data_block). # In[2]: show_doc(ImageDataBunch, doc_string=False) # ### Factory methods # In[3]: show_doc(ImageDataBunch.create, arg_comments={ 'bs': 'Desired batchsize for the dataloaders', 'num_workers': 'The number of process to launch for data collection', 'ds_tfms': 'Tuple of two lists of transforms (first for training and second for validation and test set)', 'size': 'Target size for those transforms', 'tfms': 'List of transforms to be applied at a batch level (like normalization)', 'device': 'The device on which to put the batches' }) # You don't often need to call this directly yourself; instead, you'll normally use one of the convenience wrappers below. However, these wrappers all accept a `kwargs` that is passed to this method, so you can pass any of the above parameters as well. # # If you quickly want to get a [`ImageDataBunch`](/vision.data.html#ImageDataBunch) and train a model, you should process your data to have it in one of the formats the following functions handle. # In[4]: show_doc(ImageDataBunch.from_folder) # "*Imagenet-style*" datasets look something like this (note that the test folder is optional): # # ``` # path\ # train\ # clas1\ # clas2\ # ... # valid\ # clas1\ # clas2\ # ... # test\ # ``` # # For example: # In[ ]: data = ImageDataBunch.from_folder(path, ds_tfms=tfms, size=24) # Note that this (and all factory methods in this section) pass any `kwargs` to [`ImageDataBunch.create`](/vision.data.html#ImageDataBunch.create). # In[5]: show_doc(ImageDataBunch.from_csv) # Create [`ImageDataBunch`](/vision.data.html#ImageDataBunch) from `path` by splitting the data in `folder` and labelled in a file `csv_labels` between a training and validation set. Use `valid_pct` to indicate the percentage of the total images for the validation set. An optional `test` folder contains unlabelled data and `suffix` contains an optional suffix to add to the filenames in `csv_labels` (such as '.jpg'). # For example: # In[ ]: data = ImageDataBunch.from_csv(path, ds_tfms=tfms, size=24); # In[6]: show_doc(ImageDataBunch.from_df) # Same as [`ImageDataBunch.from_csv`](/vision.data.html#ImageDataBunch.from_csv), but passing in a `DataFrame` instead of a csv file. E.gL # In[ ]: df = pd.read_csv(path/'labels.csv', header='infer') df.head() # In[ ]: data = ImageDataBunch.from_df(path, df, ds_tfms=tfms, size=24) # Different datasets are labeled in many different ways. The following methods can help extract the labels from the dataset in a wide variety of situations. The way they are built in fastai is constructive: there are methods which do a lot for you but apply in specific circumstances and there are methods which do less for you but give you more flexibility. # # In this case the hierachy is: # # 1. [`ImageDataBunch.from_name_re`](/vision.data.html#ImageDataBunch.from_name_re): Gets the labels from the filenames using a regular expression # 2. [`ImageDataBunch.from_name_func`](/vision.data.html#ImageDataBunch.from_name_func): Gets the labels from the filenames using any function # 3. [`ImageDataBunch.from_lists`](/vision.data.html#ImageDataBunch.from_lists): Labels need to be provided as an input in a list # In[7]: show_doc(ImageDataBunch.from_name_re) # Creates an [`ImageDataBunch`](/vision.data.html#ImageDataBunch) from `fnames`, calling a regular expression (containing one *re group*) on the file names to get the labels, putting aside `valid_pct` for the validation. In the same way as [`ImageDataBunch.from_csv`](/vision.data.html#ImageDataBunch.from_csv), an optional `test` folder contains unlabelled data. # # Our previously created dataframe contains the labels in the filenames so we can leverage it to test this new method. [`ImageDataBunch.from_name_re`](/vision.data.html#ImageDataBunch.from_name_re) needs the exact path of each file so we will append the data path to each filename before creating our [`ImageDataBunch`](/vision.data.html#ImageDataBunch) object. # In[ ]: fn_paths = [path/name for name in df['name']]; fn_paths[:2] # In[ ]: pat = r"/(\d)/\d+\.png$" data = ImageDataBunch.from_name_re(path, fn_paths, pat=pat, ds_tfms=tfms, size=24) # In[ ]: data.classes # In[8]: show_doc(ImageDataBunch.from_name_func) # Works in the same way as [`ImageDataBunch.from_name_re`](/vision.data.html#ImageDataBunch.from_name_re), but instead of a regular expression it expects a function that will determine how to extract the labels from the filenames. (Note that `from_name_re` uses this function in its implementation). # # To test it we could build a function with our previous regex. Let's try another, similar approach to show that the labels can be obtained in a different way. # In[ ]: def get_labels(file_path): return '3' if '/3/' in str(file_path) else '7' data = ImageDataBunch.from_name_func(path, fn_paths, label_func=get_labels, ds_tfms=tfms, size=24) data.classes # In[9]: show_doc(ImageDataBunch.from_lists) # The most flexible factory function; pass in a list of `labels` that correspond to each of the filenames in `fnames`. # # To show an example we have to build the labels list outside our [`ImageDataBunch`](/vision.data.html#ImageDataBunch) object and give it as an argument when we call `from_lists`. Let's use our previously created function to create our labels list. # In[ ]: labels_ls = list(map(get_labels, fn_paths)) data = ImageDataBunch.from_lists(path, fn_paths, labels=labels_ls, ds_tfms=tfms, size=24) data.classes # ### Methods # In[10]: show_doc(ImageDataBunch.show_batch) # Create a `rows` by `rows` grid of images from dataset `ds_type` for a `figsize` figure. This function works for all type of computer vision data (see [`data_block`](/data_block.html#data_block) for more examples). # # Once you have your [`ImageDataBunch`](/vision.data.html#ImageDataBunch), you can have a quick look at your data by using this: # In[ ]: data.show_batch(rows=3, figsize=(6,6)) # In[11]: show_doc(ImageDataBunch.labels_to_csv) # This is a functional version of [`ImageDataBunch.show_batch`](/vision.data.html#ImageDataBunch.show_batch). # In the next two methods we will use a new dataset, CIFAR. This is because the second method will get the statistics for our dataset and we want to be able to show different statistics per channel. If we were to use MNIST, these statistics would be the same for every channel. White pixels are [255,255,255] and black pixels are [0,0,0] (or in normalized form [1,1,1] and [0,0,0]) so there is no variance between channels. # In[ ]: path = untar_data(URLs.CIFAR); path # In[12]: show_doc(channel_view) # In[ ]: data = ImageDataBunch.from_folder(path, ds_tfms=tfms, valid='test', size=24) # In[ ]: def channel_view(x:Tensor)->Tensor: "Make channel the first axis of `x` and flatten remaining axes" return x.transpose(0,1).contiguous().view(x.shape[1],-1) # This function takes a tensor and flattens all dimensions except the channels, which it keeps as the first axis. This function is used to feed [`ImageDataBunch.batch_stats`](/vision.data.html#ImageDataBunch.batch_stats) so that it can get the pixel statistics of a whole batch. # # Let's take as an example the dimensions our MNIST batches: 128, 3, 24, 24. # In[ ]: t = torch.Tensor(128, 3, 24, 24) # In[ ]: t.size() # In[ ]: tensor = channel_view(t) # In[ ]: tensor.size() # In[13]: show_doc(ImageDataBunch.batch_stats) # Gets the statistics of each channel of a batch of data. If no functions are specified, default statistics are mean and standard deviation. # In[ ]: data.batch_stats() # In[14]: show_doc(ImageDataBunch.normalize) # Adds the normalize transform to the set of transforms associated with the data. In the fast.ai library we have `imagenet_stats`, `cifar_stats` and `mnist_stats` so we can add normalization easily with any of these datasets. Let's see an example with our dataset of choice: MNIST. # In[ ]: data.normalize(cifar_stats) # In[ ]: data.batch_stats() # ### Other functions # In[15]: show_doc(show_image_batch, arg_comments={ 'dl': 'A dataloader from which to show a sample', 'classes': 'List of classes (for the labels)', 'rows': 'Will make a square of `rows` by `rows` images', 'figsize': 'Size of the graph shown' }) # This is a functional version of [`ImageDataBunch.show_batch`](/vision.data.html#ImageDataBunch.show_batch). # ## Data normalization # You may also want to normalize your data, which can be done by using the following functions. # In[16]: show_doc(normalize) # In[17]: show_doc(denormalize) # In[18]: show_doc(normalize_funcs, doc_string=False) # Create [`normalize`](/vision.data.html#normalize) and [`denormalize`](/vision.data.html#denormalize) functions using `mean` and `std`. `device` will store them on the device specified. `do_y` determines if the target should also be normaized or not. # On MNIST the mean and std are 0.1307 and 0.3081 respectively (looked on Google). If you're using a pretrained model, you'll need to use the normalization that was used to train the model. The imagenet norm and denorm functions are stored as constants inside the library named imagenet_norm and imagenet_denorm. If you're training a model on CIFAR-10, you can also use cifar_norm and cifar_denorm. # # You may sometimes see warnings about *clipping input data* when plotting normalized data. That's because even although it's denormalized when plotting automatically, sometimes floating point errors may make some values slightly out or the correct range. You can safely ignore these warnings in this case. # In[ ]: data = ImageDataBunch.from_folder(untar_data(URLs.MNIST_SAMPLE), ds_tfms=tfms, size=24) data.normalize() data.show_batch(rows=3, figsize=(6,6)) # ## Datasets # Depending on the task you are tackling, you'll need one of the following fastai datasets. # In[19]: show_doc(ImageClassificationDataset, title_level=3) # This is the basic dataset for image classification: `fns` are the filenames of the images and `labels` the corresponding labels. Optionally, `classes` contains a name for each possible label. # In[20]: show_doc(ImageClassificationDataset.from_folder) # Create an [`ImageClassificationDataset`](/vision.data.html#ImageClassificationDataset) automatically from a `folder`. If `classes` is None, it will be set to the names of the directories in `folder`. `check_ext` forces the function to only keep filenames with image extensions. # In[21]: show_doc(ImageClassificationDataset.from_single_folder, doc_string=False) # Typically used for define a test set. Label all images in `folder` with `classes[0]`. `check_ext` forces the function to only keep filenems with image extensions. # In[22]: show_doc(ImageMultiDataset, doc_string=False, title_level=3) # This is the basic dataset for image classification with multiple labels: `fns` are the filenames of the images and `labels` the corresponding labels (may be more than one for each image). Optionally, `classes` contains a name for each possible label. # In[23]: show_doc(ImageMultiDataset.from_folder, doc_string=False) # To create an [`ImageMultiDataset`](/vision.data.html#ImageMultiDataset) automatically in `path` from a `folder` and `fns`. If `classes` is None, it will be set to the names of the different `labels` seen. You can split the images in this `folder` in a train/valid dataset if `valid_pct` is non-zero. `check_ext` forces the function to only keep filenems with image extensions. # In[24]: show_doc(ImageMultiDataset.from_single_folder, doc_string=False) # Typically used for define a test set. Label all images in `folder` with `classes[0]`. `check_ext` forces the function to only keep filenems with image extensions. # To help scan a folder for these [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset), we use the following helper function: # In[25]: show_doc(get_image_files, doc_string=False) # Return list of files in `c` that are images. `check_ext` will filter to keep only the files with image extensions. # In[26]: show_doc(ImageMultiDataset.get_labels) # Gets the labels of a batch of images of choice. Pass in the batch number/index and will return the labels for each of the examples in that batch. # In[27]: show_doc(ImageMultiDataset.encode) # In[28]: show_doc(SegmentationDataset, doc_string=False, title_level=3) # This is the basic dataset for image sementation: `x` contains the filenames of the images and `y` the ones of the masks. # In[29]: show_doc(ObjectDetectDataset, doc_string=False, title_level=3) # This is the basic dataset for object detection: `x_fns` contains the filenames of the images, `labelled_bbs` the corresponding bounding boxes with their corresponding labels. `classes` contains the list of classes. # In[30]: show_doc(ObjectDetectDataset.from_json) # This factory method uses the following helper function. # In[31]: show_doc(get_annotations) # Finally, to apply transformations to [`Image`](/vision.image.html#Image) in a [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset), we use this last class. # In[32]: show_doc(DatasetTfm, doc_string=False, title_level=3) # Dataset that applies the list of transforms `tfms` to every item drawn. If `tfms` should be applied to the targets as well, `tfm_y` should be True. `kwargs` will be passed to [`apply_tfms`](/vision.image.html#apply_tfms) internally. # Then this last function automatizes the process of creating [`DatasetTfm`](/vision.data.html#DatasetTfm): # In[33]: show_doc(transform_datasets, doc_string=False) # Create train, valid and maybe test DatasetTfm from `train_ds`, `valid_ds` and maybe `test_ds` using `tfms`. It should be a tuple containing the transforms for the training set, then for the validation and test set. # In[34]: show_doc(ImageClassificationBase, title_level=3, doc_string=False) # Base class for computer vision datasets. Maps `classes` to indexes via `class2idx`. # ## Links with the data block API # The vision application adds a few methods to implement data augmentation in the data block API or create an [`ImageDataBunch`](/vision.data.html#ImageDataBunch). # In[35]: show_doc(SplitDatasetsImage, title_level=3) # In[36]: show_doc(SplitDatasetsImage.transform) # In[37]: show_doc(SplitDatasetsImage.databunch) # To change the default `extensions` in [`InputList.from_folder`](/data_block.html#InputList.from_folder) to image extensions, we subclass it to the following: # In[38]: show_doc(ImageFileList, title_level=3, doc_string=False) # In[39]: show_doc(ImageFileList.from_folder) # ## Undocumented Methods - Methods moved below this line will intentionally be hidden # ## New Methods - Please document or move to the undocumented section