from local.test import * from local.basics import * from local.callback.all import * from local.vision.all import * pets = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(), get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$')) dbunch = pets.databunch(untar_data(URLs.PETS)/"images", item_tfms=RandomResizedCrop(460, min_scale=0.75), bs=32, batch_tfms=[*aug_transforms(size=299, max_warp=0), Normalize(*imagenet_stats)]) len(dbunch.train_ds.items) dbunch.show_batch(max_n=9) from torchvision.models import resnet34,resnet50 #from local.vision.models.xresnet import xresnet50 opt_func = partial(Adam, lr=slice(3e-3), wd=0.01, eps=1e-8) #Or use Ranger #def opt_func(p, lr=slice(3e-3)): return Lookahead(RAdam(p, lr=lr, mom=0.95, wd=0.01)) learn = cnn_learner(dbunch, resnet50, opt_func=opt_func, metrics=error_rate, config=cnn_config(ps=0.33)).to_fp16() learn.fit_one_cycle(1) #learn.fit_one_cycle(8, slice(3e-3)) learn.unfreeze() # learn.fit_one_cycle(4, slice(1e-5, 1e-3)) learn.fit_one_cycle(1, slice(1e-5, 1e-3)) learn.predict(dbunch.train_ds.items[0]) learn.show_results(max_n=9) interp = Interpretation.from_learner(learn) interp.plot_top_losses(9, figsize=(15,10)) planet_source = untar_data(URLs.PLANET_TINY) df = pd.read_csv(planet_source/"labels.csv") planet = DataBlock(blocks=(ImageBlock, MultiCategoryBlock), get_x=lambda x:planet_source/"train"/f'{x[0]}.jpg', splitter=RandomSplitter(), get_y=lambda x:x[1].split(' ')) dbunch = planet.databunch(df.values, batch_tfms=aug_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.)) dbunch.show_batch(max_n=9, figsize=(12,9)) learn = cnn_learner(dbunch, resnet34, opt_func=opt_func, metrics=accuracy_multi) learn.fit_one_cycle(1) learn.predict(planet_source/f'train/train_10030.jpg', rm_type_tfms=1) learn.show_results(max_n=9) interp = Interpretation.from_learner(learn) interp.plot_top_losses(9) camvid = DataBlock(blocks=(ImageBlock, ImageBlock(cls=PILMask)), get_items=get_image_files, splitter=RandomSplitter(), get_y=lambda o: untar_data(URLs.CAMVID_TINY)/'labels'/f'{o.stem}_P{o.suffix}') dbunch = camvid.databunch(untar_data(URLs.CAMVID_TINY)/"images", batch_tfms=aug_transforms()) dbunch.show_batch(max_n=9, vmin=1, vmax=30) #TODO: Find a way to pass the classes properly dbunch.vocab = np.loadtxt(untar_data(URLs.CAMVID_TINY)/'codes.txt', dtype=str) learn = unet_learner(dbunch, resnet34, opt_func=opt_func, config=unet_config()) learn.fit_one_cycle(1, 1e-3) # Use the below to get somewhat reasonable results - but takes a bit longer # learn.fit_one_cycle(8, 3e-3) learn.predict(dbunch.train_ds.items[0]); learn.show_results(max_n=4, figsize=(15,5)) path = untar_data(URLs.BIWI_SAMPLE) fn2ctr = (path/'centers.pkl').load() biwi = DataBlock(blocks=(ImageBlock, PointBlock), get_items = get_image_files, get_y = lambda o:fn2ctr[o.name].flip(0), splitter=RandomSplitter()) dbunch = biwi.databunch(path, batch_tfms=[*aug_transforms(size=(120,160)), Normalize(*imagenet_stats)]) dbunch.show_batch(max_n=9, vmin=1, vmax=30) #TODO: look for attrs in after_item dbunch.c = dbunch.after_item.c dbunch.train_ds.loss_func = MSELossFlat() learn = cnn_learner(dbunch, resnet34, opt_func=opt_func) learn.fit_one_cycle(3, 1e-3) learn.predict(dbunch.train_ds.items[0]) learn.show_results(max_n=4)