# Cloning the Repository
!git clone https://github.com/pranjaldatta/DenseDepth-Pytorch.git
# Getting the data
!python DenseDepth-Pytorch/densedepth/download_data.py
# Mounting drive
from google.colab import drive
drive.mount('/gdrive')
!mkdir /gdrive/My\ Drive/colabdrive/work/densedepth
!mkdir /gdrive/My\ Drive/colabdrive/work/densedepth/checkpoints
!pip install tensorboardX
# Prefer using Nvidia T4's or P100 for favourable training times
# memory footprint support libraries/code
!ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi
!pip install gputil
!pip install psutil
!pip install humanize
import psutil
import humanize
import os
import GPUtil as GPU
GPUs = GPU.getGPUs()
# XXX: only one GPU on Colab and isn’t guaranteed
gpu = GPUs[0]
def printm():
process = psutil.Process(os.getpid())
print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " | Proc size: " + humanize.naturalsize( process.memory_info().rss))
print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
printm()
!nvidia-smi
!python DenseDepth-Pytorch/densedepth/train.py --epochs 10 \
--data "data/nyu_depth.zip" \
--batch 4 \
--save "<path to save checkpoints in (prefer drive if using colab)" \
--device "cuda" \
--checkpoint "<path to checkpoint from which to resume training>"
!python DenseDepth-Pytorch/densedepth/test.py --checkpoint "<path to load weights from" \
--device "cuda" \
--data "DenseDepth-Pytorch/examples/"