#!/usr/bin/env python # coding: utf-8 # # Image Segmentation using ResNet-34 and Kaggle Carvana Data # Let's start by doing it the really simple way. And we are going to use Kaggle [Carvana](https://www.kaggle.com/c/carvana-image-masking-challenge) competition and you can download it with Kaggle API as usual. # In[1]: get_ipython().run_line_magic('matplotlib', 'inline') get_ipython().run_line_magic('reload_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') # In[2]: from my_scripts.ntfy.notification_callback import * notif_cb = NotificationCallback() # Import libraries # In[3]: from fastai.conv_learner import * from fastai.dataset import * from pathlib import Path import json # torch.cuda.set_device(0) # Important to enabled the next line. # For image size 128 training, an epoch took 1 min 33 s instead of 2 min 11 s without this enabled. torch.backends.cudnn.benchmark = True # ## Data # ### Download dataset # First, install [official Kaggle API](https://github.com/Kaggle/kaggle-api). # In[ ]: get_ipython().system('pip install kaggle') # In[5]: get_ipython().system('kaggle --help') # In[7]: get_ipython().system('kaggle competitions files -c carvana-image-masking-challenge') # In[10]: get_ipython().run_line_magic('mkdir', 'data/carvana') # In[12]: get_ipython().system('kaggle competitions download -c carvana-image-masking-challenge -f metadata.csv.zip -p data/carvana') # In[14]: get_ipython().system('kaggle competitions download -c carvana-image-masking-challenge -f train_masks.zip -p data/carvana') get_ipython().system('kaggle competitions download -c carvana-image-masking-challenge -f train_masks.csv.zip -p data/carvana') get_ipython().system('kaggle competitions download -c carvana-image-masking-challenge -f sample_submission.csv -p data/carvana') get_ipython().system('kaggle competitions download -c carvana-image-masking-challenge -f train.zip -p data/carvana') # In[18]: get_ipython().system('unzip data/carvana/metadata.csv.zip -d data/carvana/') get_ipython().system('unzip data/carvana/sample_submission.csv.zip -d data/carvana/') get_ipython().system('unzip data/carvana/train_masks.csv.zip -d data/carvana/') get_ipython().system('unzip data/carvana/train_masks.zip -d data/carvana/') get_ipython().system('unzip data/carvana/train.zip -d data/carvana/') # In[20]: get_ipython().run_line_magic('rm', 'data/carvana/metadata.csv.zip data/carvana/sample_submission.csv.zip data/carvana/train_masks.csv.zip data/carvana/train_masks.zip data/carvana/train.zip') # In[21]: get_ipython().run_line_magic('ls', '-lah data/carvana/') # ### Setup # Define directoy and file paths: # In[22]: PATH = Path('data/carvana') list(PATH.iterdir()) # In[5]: MASKS_FN = 'train_masks.csv' META_FN = 'metadata.csv' TRAIN_DN = 'train' MASKS_DN = 'train_masks' # #### View data # I don't really use the CSV files very much other than getting the list of images from them: # In[6]: masks_csv = pd.read_csv(PATH / MASKS_FN) masks_csv.head() # In[7]: meta_csv = pd.read_csv(PATH / META_FN) meta_csv.head() # In[8]: def show_img(im, figsize=None, ax=None, alpha=None): if not ax: fig, ax = plt.subplots(figsize=figsize) ax.imshow(im, alpha=alpha) ax.set_axis_off() return ax # In[9]: CAR_ID = '00087a6bd4dc' # In[10]: list((PATH / TRAIN_DN).iterdir())[:5] # In[11]: Image.open(PATH / TRAIN_DN / f'{CAR_ID}_01.jpg').resize((300, 200)) # In[12]: list((PATH / MASKS_DN).iterdir())[:5] # In[13]: Image.open(PATH / MASKS_DN / f'{CAR_ID}_01_mask.gif').resize((300, 200)) # Each image after the car ID has a 01, 02, etc of which I've printed out all 16 of them for one car and as you can see basically those numbers are the 16 orientations of one car. I don't think anybody in this competition actually used these orientation information. I believe they all kept the car's images just treated them separately. # In[14]: ims = [open_image(PATH / TRAIN_DN / f'{CAR_ID}_{i+1:02d}.jpg') for i in range(16)] # In[15]: fig, axes = plt.subplots(4, 4, figsize=(9, 6)) for i, ax in enumerate(axes.flat): show_img(ims[i], ax=ax) plt.tight_layout(pad=0.1) # ### Resize and convert # These images are pretty big — over 1000 by 1000 in size and just opening the JPEGs and resizing them is slow. So I processed them all. Also OpenCV can't handle GIF files so I converted them. # # The steps: converts the GIFs into PNGs so just open it up with PIL and then save it as PNG. As per usual for this kind of stuff, I do it with a ThreadPool so I can take advantage of parallel processing. And then also create a separate directory `train-128` and `train_masks-128` which contains the 128 by 128 resized versions of them. # # _Tips: This is the kind of stuff that keeps you sane if you do it early in the process. So anytime you get a new dataset, seriously think about creating a smaller version to make life fast. Anytime you find yourself waiting on your computer, try and think of a way to create a smaller version._ # Convert train masks to PNG images: # In[37]: (PATH / 'train_masks_png').mkdir(exist_ok=True) # In[59]: def convert_img(fn): fn = fn.name Image.open(PATH / 'train_masks' / fn).save(PATH / 'train_masks_png' / f'{fn[:-4]}.png') # In[60]: files = list((PATH / 'train_masks').iterdir()) with ThreadPoolExecutor(8) as e: e.map(convert_img, files) # Resize train masks PNG images to 128 by 128: # In[61]: (PATH / 'train_masks-128').mkdir(exist_ok=True) # In[62]: def resize_mask(fn): Image.open(fn).resize((128, 128)).save((fn.parent.parent) / 'train_masks-128' / fn.name) # In[63]: get_ipython().run_cell_magic('time', '', "files = list((PATH / 'train_masks_png').iterdir())\nwith ThreadPoolExecutor(8) as e:\n e.map(resize_mask, files)\n") # Resize train images to 128 by 128: # In[64]: (PATH / 'train-128').mkdir(exist_ok=True) # In[65]: def resize_img(fn): Image.open(fn).resize((128, 128)).save((fn.parent.parent) / 'train-128' / fn.name) # In[66]: get_ipython().run_cell_magic('time', '', "files = list((PATH / 'train').iterdir())\nwith ThreadPoolExecutor(8) as e:\n e.map(resize_img, files)\n") # ## Dataset # In[16]: TRAIN_DN = 'train-128' MASKS_DN = 'train_masks-128' sz = 128 bs = 64 # In[17]: ims = [open_image(PATH / TRAIN_DN / f'{CAR_ID}_{i+1:02d}.jpg') for i in range(16)] im_masks = [open_image(PATH / MASKS_DN / f'{CAR_ID}_{i+1:02d}_mask.png') for i in range(16)] # So here is a **cool trick**. If you use the same axis object (`ax`) to plot an image twice and the second time you use alpha which you might know means transparency in the computer vision world, then you can actually plot the mask over the top of the photo. So here is a nice way to see all the masks on top of the photos for all of the cars in one group. # In[18]: fig, axes = plt.subplots(4, 4, figsize=(12, 10)) for i, ax in enumerate(axes.flat): ax = show_img(ims[i], ax=ax) show_img(im_masks[i][..., 0], ax=ax, alpha=0.5) plt.tight_layout(pad=0.1) # In[19]: class MatchedFilesDataset(FilesDataset): def __init__(self, fnames, y, transform, path): self.y = y assert(len(fnames) == len(y)) super().__init__(fnames, transform, path) def get_y(self, i): return open_image(os.path.join(self.path, self.y[i])) def get_c(self): return 0 # In[20]: # train-128/00087a6bd4dc_01.jpg -> train_masks-128/00087a6bd4dc_01_mask.png # train-128/00087a6bd4dc_02.jpg -> train_masks-128/00087a6bd4dc_02_mask.png # ... ... ... # train-128/00087a6bd4dc_16.jpg -> train_masks-128/00087a6bd4dc_16_mask.png x_names = np.array([Path(TRAIN_DN) / o for o in masks_csv['img']]) y_names = np.array([Path(MASKS_DN) / f'{o[:-4]}_mask.png' for o in masks_csv['img']]) # In[21]: len(x_names) // 16 // 5 * 16 # So we use a continuous set of car IDs and since each set is a set of 16, we make sure that's evenly divisible by 16. So we make sure that our validation set contains different car IDs to our training set. This is the kind of stuff which you've got to be careful of. # In[22]: val_idxs = list(range(1008)) ((val_x, trn_x), (val_y, trn_y)) = split_by_idx(val_idxs, x_names, y_names) len(val_x), len(trn_x) # Data augmentations # TLDR; you should use `tfm_y = TfmType.CLASS` when your target is a mask. # # Here we are going to use transform type classification (`TfmType.CLASS`). It's basically the same as transform type pixel (`TfmType.PIXEL`) but if you think about it, with a pixel version if we rotate a little bit then we probably want to average the pixels in between the two, but the classification, obviously we don't. We use nearest neighbor. So there's slight difference there. Also for classification, lighting doesn't kick in, normalization doesn't kick in to the dependent variable. # In[23]: aug_tfms = [RandomRotate(4, tfm_y=TfmType.CLASS), RandomFlip(tfm_y=TfmType.CLASS), RandomLighting(0.05, 0.05)] # They are already square images, so we don't have to do any cropping. # In[24]: tfms = tfms_from_model(resnet34, sz, crop_type=CropType.NO, tfm_y=TfmType.CLASS, aug_tfms=aug_tfms) datasets = ImageData.get_ds(MatchedFilesDataset, (trn_x, trn_y), (val_x, val_y), tfms, path=PATH) md = ImageData(PATH, datasets, bs, num_workers=8, classes=None) # In[25]: denorm = md.trn_ds.denorm x, y = next(iter(md.aug_dl)) x = denorm(x) # So here you can see different versions of the augmented images — they are moving around a bit, and they are rotating a bit, and so forth. # In[26]: fig, axes = plt.subplots(5, 6, figsize=(12, 10)) for i, ax in enumerate(axes.flat): ax = show_img(x[i], ax=ax) show_img(y[i], ax=ax, alpha=0.5) plt.tight_layout(pad=0.1) # ## Model # Given that we want something that knows what cars look like, we probably want to start with a pre-trained ImageNet network. So we are going to start with ResNet34. With `ConvnetBuilder`, we can grab our ResNet34 and we can add a custom head. The custom head is going to be something that upsamples a bunch of times and we are going to do things really dumb for now which is we're just going to do a ConvTranspose2d, batch norm, ReLU. # # So at the very end, we have a single filter. Now that's going to give us something which is batch size by 1 by 128 by 128. But we want something which is batch size by 128 by 128. So we have to remove that unit axis so I've got a lambda layer here. # In[27]: class Empty(nn.Module): def forward(self, x): return x # In[28]: models = ConvnetBuilder(resnet34, 0, 0, 0, custom_head=Empty()) learn = ConvLearner(md, models) learn.summary() # In[29]: class StdUpsample(nn.Module): def __init__(self, nin, nout): super().__init__() self.conv = nn.ConvTranspose2d(nin, nout, 2, stride=2) self.bn = nn.BatchNorm2d(nout) def forward(self, x): return self.bn(F.relu(self.conv(x))) # _Note: Lambda layer is actually something that's a part of the fastai library not part of the PyTorch library._ # In[30]: flatten_channel = Lambda(lambda x: x[:, 0]) # So this is our custom head. # In[31]: simple_up = nn.Sequential( nn.ReLU(), StdUpsample(512, 256), StdUpsample(256, 256), StdUpsample(256, 256), StdUpsample(256, 256), nn.ConvTranspose2d(256, 1, 2, stride=2), flatten_channel ) # Next, we are going to have a ResNet-34 that goes downsample and then a really simple custom head that very quickly upsamples, and that hopefully will do something. And we are going to use accuracy with a threshold of 0.5 and print out metrics. # In[32]: models = ConvnetBuilder(resnet34, 0, 0, 0, custom_head=simple_up) learn = ConvLearner(md, models) learn.opt_fn = optim.Adam learn.crit = nn.BCEWithLogitsLoss() learn.metrics = [accuracy_thresh(0.5)] # In[33]: learn.summary() # ## Train # As usual, run LR find and train it for a while. # In[34]: get_ipython().run_line_magic('time', 'learn.lr_find()') learn.sched.plot() # In[36]: lr = 4e-2 # In[37]: get_ipython().run_line_magic('time', 'learn.fit(lr, 1, cycle_len=5, use_clr=(20, 5), callbacks=[notif_cb])') # In[38]: learn.sched.plot_loss() # **Analysis of the result** # # After a few epochs, we've got 96 percent accurate. Is that good? # # It depends. What's it for? Carvana wanted this because they wanted to be able to take their car image and cut them out and paste them on whatever backgrounds. To do that, you you need a really good mask. You don't want to leave the rearview mirrors behind, have one wheel missing, or include a little bit of background or something. So you would need something very good. So only having 96% of the pixels correct doesn't sound great. But we won't really know until we look at it. So let's look at it. # In[39]: learn.save('tmp') # In[40]: learn.load('tmp') # In[41]: py, ay = learn.predict_with_targs() # In[42]: ay.shape, py.shape # The correct version that we want to cut out: # In[43]: show_img(ay[0]) # That's the 96% accurate version. So when you look at it you realize "oh yeah, getting 96% of the pixel accurate is actually easy because all the outside bit is not car, and all the inside bit is a car, and really interesting bit is the edge. So we need to do better. # In[44]: show_img(py[0] > 0) # Let's unfreeze because all we've done so far is train the custom head. Let's do more. # In[45]: learn.unfreeze() # In[46]: learn.bn_freeze(True) # In[47]: # array of learning rates lrs = np.array([lr / 100, lr / 10, lr]) / 4 # With cuDNN benchmark enabled. # In[48]: get_ipython().run_line_magic('time', 'learn.fit(lrs, 1, cycle_len=20, use_clr=(20, 10), callbacks=[notif_cb])') # With cuDNN benchmark disabled. # In[171]: get_ipython().run_line_magic('time', 'learn.fit(lrs, 1, cycle_len=20, use_clr=(20, 10), callbacks=[notif_cb])') # After a bit more, we've got 99.3%. Is that good? I don't know. Let's take a look. # In[172]: learn.save('0') # In[50]: x, y = next(iter(md.val_dl)) # x is the car image, 64 of them in a batch py = to_np(learn.model(V(x))) # the mask, 64 of them in a batch. # In[62]: ax = show_img(denorm(x)[0]) # ax is one (at index 0) denormalized car image show_img(py[0] > 0, ax=ax, alpha=0.5) # py[0] is one (at index 0) mask # Actually no. It's totally missed the rearview vision mirror on the left and missed a lot of it on the right. And it's clearly got an edge wrong on the bottom. And these things are totally going to matter when we try to cut it out, so it's still not good enough. # In[63]: ax = show_img(denorm(x)[0]) show_img(y[0], ax=ax, alpha=0.5) # ## 512x512 # Let's try upscaling. And the nice thing is that when we upscale to 512 by 512, (make sure you decrease the batch size because you'll run out of memory), it's quite a lot more information there for it to go on so our accuracy increases to 99.4% and things keep getting better. # In[64]: TRAIN_DN = 'train' MASKS_DN = 'train_masks_png' sz = 512 bs = 16 # In[65]: x_names = np.array([Path(TRAIN_DN) / o for o in masks_csv['img']]) y_names = np.array([Path(MASKS_DN) / f'{o[:-4]}_mask.png' for o in masks_csv['img']]) # In[66]: ((val_x, trn_x), (val_y, trn_y)) = split_by_idx(val_idxs, x_names, y_names) len(val_x), len(trn_x) # In[67]: tfms = tfms_from_model(resnet34, sz, crop_type=CropType.NO, tfm_y=TfmType.CLASS, aug_tfms=aug_tfms) datasets = ImageData.get_ds(MatchedFilesDataset, (trn_x, trn_y), (val_x, val_y), tfms, path=PATH) md = ImageData(PATH, datasets, bs, num_workers=8, classes=None) # In[68]: denorm = md.trn_ds.denorm x, y = next(iter(md.aug_dl)) x = denorm(x) # Here's the ground truth. # In[69]: fig, axes = plt.subplots(4, 4, figsize=(10, 10)) for i, ax in enumerate(axes.flat): ax = show_img(x[i], ax=ax) show_img(y[i], ax=ax, alpha=0.5) plt.tight_layout(pad=0.1) # In[70]: simple_up = nn.Sequential( nn.ReLU(), StdUpsample(512, 256), StdUpsample(256, 256), StdUpsample(256, 256), StdUpsample(256, 256), nn.ConvTranspose2d(256, 1, 2, stride=2), flatten_channel ) # In[71]: models = ConvnetBuilder(resnet34, 0, 0, 0, custom_head=simple_up) learn = ConvLearner(md, models) learn.opt_fn = optim.Adam learn.crit = nn.BCEWithLogitsLoss() learn.metrics = [accuracy_thresh(0.5)] # In[87]: learn.load('0') # In[185]: learn.lr_find() learn.sched.plot() # In[73]: lr = 4e-2 # In[187]: get_ipython().run_line_magic('time', 'learn.fit(lr, 1, cycle_len=5, use_clr=(20, 5), callbacks=[notif_cb])') # In[188]: learn.sched.plot_loss() # In[189]: learn.save('tmp') # In[89]: learn.load('tmp') # In[74]: learn.unfreeze() learn.bn_freeze(True) # In[75]: lrs = np.array([lr / 100, lr / 10, lr]) / 4 # In[193]: get_ipython().run_line_magic('time', 'learn.fit(lrs, 1, cycle_len=8, use_clr=(20, 8), callbacks=ks=[notif_cb])') # In[194]: learn.save('512') # In[77]: x, y = next(iter(md.val_dl)) py = to_np(learn.model(V(x))) # The 99.7% version: # In[196]: ax = show_img(denorm(x)[0]) show_img(py[0] > 0, ax=ax, alpha=0.5) # The correct version that we want to cut out: # In[79]: ax = show_img(denorm(x)[0]) show_img(y[0], ax=ax, alpha=0.5) # Things keep getting better but we've still got quite a few little black blocky bits. So let's go to 1024 by 1024. # ## 1024x1024 # So let's go to 1024 by 1024, batch size down to 4. This is pretty high res now, and train a bit more, 99.6, 99.8%! # In[80]: sz = 1024 bs = 4 # In[81]: tfms = tfms_from_model(resnet34, sz, crop_type=CropType.NO, tfm_y=TfmType.CLASS, aug_tfms=aug_tfms) datasets = ImageData.get_ds(MatchedFilesDataset, (trn_x, trn_y), (val_x, val_y), tfms, path=PATH) md = ImageData(PATH, datasets, bs, num_workers=8, classes=None) # In[82]: denorm = md.trn_ds.denorm x, y = next(iter(md.aug_dl)) x = denorm(x) y = to_np(y) # In[83]: fig, axes = plt.subplots(2, 2, figsize=(8, 8)) for i, ax in enumerate(axes.flat): show_img(x[i], ax=ax) show_img(y[i], ax=ax, alpha=0.5) plt.tight_layout(pad=0.1) # In[84]: simple_up = nn.Sequential( nn.ReLU(), StdUpsample(512, 256), StdUpsample(256, 256), StdUpsample(256, 256), StdUpsample(256, 256), nn.ConvTranspose2d(256, 1, 2, stride=2), flatten_channel, ) # In[85]: models = ConvnetBuilder(resnet34, 0, 0, 0, custom_head=simple_up) learn = ConvLearner(md, models) learn.opt_fn = optim.Adam learn.crit = nn.BCEWithLogitsLoss() learn.metrics = [accuracy_thresh(0.5)] # In[86]: learn.load('512') # In[87]: get_ipython().run_cell_magic('time', '', 'learn.lr_find()\nlearn.sched.plot()\n') # In[90]: lr = 4e-2 # In[91]: learn.fit(lr, 1, cycle_len=2, use_clr=(20, 4), callbacks=[notif_cb]) # In[92]: learn.save('tmp') # In[93]: learn.load('tmp') # In[94]: learn.unfreeze() learn.bn_freeze(True) # In[95]: lrs = np.array([lr / 100, lr / 10, lr]) / 8 # In[ ]: # Resume training from the point where we stop the training at epoch 15 previously. # In[97]: get_ipython().run_line_magic('time', 'learn.fit(lrs, 1, cycle_len=25, use_clr=(20, 10), callbacks=[notif_cb])') # In[98]: learn.save('1024-cyc-len-25-clr-20-10') # In[99]: learn.sched.plot_loss() # In[100]: learn.sched.plot_lr() # In[101]: x, y = next(iter(md.val_dl)) py = to_np(learn.model(V(x))) # Plot the mask over the top of the photo. # In[102]: ax = show_img(denorm(x)[0]) show_img(py[0] > 0, ax=ax, alpha=0.5) # In[103]: ax = show_img(denorm(x)[0]) show_img(y[0], ax=ax, alpha=0.5) # Plot just the mask. # In[104]: show_img(py[0] > 0) # In[105]: show_img(y[0]) # In[ ]: # In[ ]: learn.fit(lrs, 1, cycle_len=40, use_clr=(20, 10), callbacks=[notif_cb]) # _Noticed the train metrics stops in the middle of epoch 7 because I have to shutdown my computer as I am heading out. Fret not, I send the train metrics as IFTTT notifications to my mobile and here there are:_ # # ```bash # epoch trn_loss val_loss mask_acc # 0 0.005643 0.006067 0.997681 # 1 0.0054 0.006054 0.997656 # 2 0.005204 0.005574 0.997634 # 3 0.005528 0.00666 0.997664 # 4 0.006686 0.007286 0.997451 # 5 0.006337 0.006863 0.997468 # 6 0.005324 0.006193 0.997629 # 7 0.005142 0.005845 0.997886 # 8 0.005111 0.005904 0.997774 # 9 0.005092 0.005472 0.997967 # 10 0.004898 0.005497 0.997801 # 11 0.010694 0.007598 0.997152 # 12 0.004787 0.005513 0.997856 # 13 0.004599 0.005142 0.997935 # 14 0.004655 0.005915 0.997716 # 15 0.004294 0.005211 0.998055 # [0.0052112686, 0.99805523208291195] # ``` # # It took me ~37 minutes (2195.4s) to train 1 epoch on a K80 GPU, roughly 1.99s/iteration. The GPU memory usage peak at ~10 GB. The full training should take me like 25 hours. So it's kind of slow. I stop training after epoch 15 and that took like 10 hours. # In[213]: learn.save('1024-cyc-len-40-clr-20-10-epoch-15') # In[96]: learn.load('1024-cyc-len-40-clr-20-10-epoch-15') # In[218]: learn.sched.plot_loss() # In[219]: learn.sched.plot_lr() # In[214]: # Boom! Out-Of-Memory. # x, y = next(iter(md.val_dl)) # py = to_np(learn.model(V(x))) # The issue here is, GPU memory is not being releases after training is over. To [resolve this](http://forums.fast.ai/t/gpu-memory-not-being-freed-after-training-is-over/10265/4?u=cedric), follow these steps: # # 1. Delete some variables # 2. Use `torch.cuda.empty_cache()` # # It works! I was able to free some memory. The memory comsuption drop from 11.4 GB to 3.8 GB. # In[110]: del learn # In[113]: torch.cuda.empty_cache() # Create our `learner` object again: # In[227]: # Lines copied from previous cell learn = ConvLearner(md, models) learn.opt_fn = optim.Adam learn.crit = nn.BCEWithLogitsLoss() learn.metrics = [accuracy_thresh(0.5)] # In[228]: # Load model weights learn.load('1024-cyc-len-40-clr-20-10-epoch-15') # In[125]: # Resume by re-running that cell that gave us error x, y = next(iter(md.val_dl)) preds = learn.model(V(x)) py = to_np(preds) # In[126]: # DEBUG type(x), type(y), type(V(x)), type(preds), type(py), x.shape, y.shape, V(x).shape, preds.shape, py.shape # In[127]: ax = show_img(denorm(x)[0]) # show_img(py[0][0]>0, ax=ax, alpha=0.5) # this line give an error. fixed with the next line show_img(py[0] > 0, ax=ax, alpha=0.5) # In[130]: ax = show_img(denorm(x)[0]) # show_img(y[0,...,-1], ax=ax, alpha=0.5) # this line give error "Invalid dimensions for image data". fix in the next line. show_img(y[0], ax=ax, alpha=0.5) # In[132]: # show_img(py[0][0] > 0) # this line give error "Invalid dimensions for image data". fixed in the next line. show_img(py[0] > 0) # In[133]: # show_img(y[0, ..., -1]) # this line give error. fixed in the next line. show_img(y[0]) # Now if we look at the masks, they are actually looking not bad. That's looking pretty good. So can we do better? And the answer is yes, we can.