#!/usr/bin/env python # coding: utf-8 # In[2]: from notebook.services.config import ConfigManager cm = ConfigManager() cm.update('livereveal', { 'theme': 'serif', 'transition': 'zoom', 'start_slideshow_at': 'selected', }) from IPython.display import IFrame # # dammit! # # ### a simple transcriptome annotator # # -------------------------------------- # # #### Camille Scott # # #### November 18, 2015 # # #### DIB Lab Meeting # # TL;DR # # # # dammit is a simple de novo transcriptome annotator. It was born out of the # observations that annotation is mundane and annoying, all the individual pieces # of the process exist already, and the existing solutions are overly complicated # or rely on crappy non-free software. # # Science shouldn't suck for the sake of sucking, so dammit attempts # to make this sucky part of the process suck a little less. # # Motivations # # * Annotation is a major component of the sea lamprey project # * Many of the pieces of dammit were already implemented # * No easy to use solutions # * No solutions with good licensing # ## What should a good annotator even look like? # # * It should be easy to install and upgrade # * It should only use Free software # * It should make use of standard databases # * It should output in reasonable formats # * It should be relatively fast # # and of course, # # * it should try to be correct! # # Without further ado... # In[37]: IFrame('http://www.camillescott.org/dammit', width=800, height=400) # ### The Obligatory Flowchart # # ![The Workflow](flow.svg) # ## Software Used # # * TransDecoder # * BUSCO # * HMMER # * Infernal # * LAST # * crb-blast (for now) # * pydoit (under the hood) # #### All of these are Free Software, as in freedom and beer # #### Just as important, they're all *accessible* # # * they're maintained, and # * relatively easy to install # # An exception is BUSCO, which is on a dodgy lab website and distributed as a tarball. I intent to remember this ;) # --- # # ## Databases # # # # # * Pfam-A # * Rfam # * OrthoDB # * BUSCO databases # * Uniref90 # * User-supplied protein databases # # The last one is important, and sometimes ignored. # # --- # --- # # # Conditional Reciprocal Best LAST # # Building off Richard and co's work on Conditional Reciprocal Best BLAST, I've implemented a new version with Python and LAST -- CRBL. The original lives here: https://github.com/cboursnell/crb-blast # # ## Why?? # # * BLAST is too slooooooow # * Ruby is yet another dependency to have users install # * With Python and scikit learn, I have freedom to toy with models (and learn stuff) # # --- # --- # ## And why does speed matter? # --- # ![deluge](https://upload.wikimedia.org/wikipedia/commons/0/0d/Great_Wave_off_Kanagawa2.jpg) # --- # # And, of course, some of these databases are BIG. Doing `blastx` and `tblastn` between a reasonably sized transcriptome and Uniref90 is not an experience you want to have. # # #### ie, practical concerns. # # --- # --- # # ### A brief intro to CRBB # # * Reciprocal Best Hits (RBH) is a standard method for ortholog detection # * Transcriptomes have multiple multiple transcript isoforms, which confounds RBH # * CRBB uses machine learning to get at this problem # # --- # --- # #### RBH in the presence of isoforms # # ![RBH](RBH.svg) # # --- # --- # # CRBB attempts to associate those isoforms with appropriate annotations by learning an appropriate e-value cutoff for different transcript lengths. # # ![CRBB](CRBB.png) # # *from http://journals.plos.org/plosgenetics/article?id=10.1371/journal.pgen.1004365#s5* # # --- # --- # # ### CRBL # # For CRBL, instead of fitting a linear model, we train a model. # # * SVM # * Naive bayes # # One limitation is that LAST has no equivalent to `tblastn`. So, we find the RBHs using the TransDecoder ORFs, and then use the model on the translated transcriptome versus database hits. # # --- # ### Challenges # # Very difficult to establish an appropriate boundary! # # http://edhar.genomecenter.ucdavis.edu/~camille/dammit/pom.500.fa.x.pep.fa.crbl.model.fitted.pdf # # ![sigh](http://edhar.genomecenter.ucdavis.edu/~camille/dammit/sigh.svg) # --- # # Need to play with data preparation to make the boundary stand out more. CRBB did this by first running a sliding window over the transcript lengths and finding the centroid e-value, and fitting the resulting points to their model. # # --- # ## dammit as a library # # dammit is a standard Python package, which means it can used as a library as well. # In[29]: from dammit.model import CRBL from dammit.parsers import maf_to_df_iter import pandas as pd maf_df = pd.concat([group for group in maf_to_df_iter('pom.500.fa.dammit/pep.fa.x.pom.500.fa.transdecoder.pep.maf')]) maf_df.sort_values(by='q_name').head(n=5) # In[30]: CRBL.best_hits(maf_df) maf_df.head(n=4) # ### The code is even relatively documented... # In[36]: print maf_to_df_iter.__doc__ # The the documentation for more: http://www.camillescott.org/dammit/py-modindex.html # ## Future Work # # * Shoring up the CRBL implementation # * Finishing output formatting -- still need to do the FASTA output # * Some slight rearrangment of the command line interface # ## Acknowledgements # # Thanks to Titus for advice, input, and PRs, Chris Hamm and Matt MacManes for filing issues, Michael for advice, Richard for his nifty method, and the terrible state of the bioinformatics industry for inspiring me.