In this tutorial we create and join various datasets to analyse missense mutations.
from pyspark.sql import Row, SparkSession
from pyspark.sql.functions import concat_ws
from mmtfPyspark.datasets import g2sDataset, pdbjMineDataset, myVariantDataset
from mmtfPyspark.filters import ContainsGroup
from mmtfPyspark.interactions import InteractionFilter, InteractionFingerprinter
from mmtfPyspark.io import mmtfReader
from ipywidgets import interact, IntSlider
import py3Dmol
spark = SparkSession.builder.appName("Solution-1").getOrCreate()
path = "../resources/mmtf_full_sample"
pdb = mmtfReader.read_sequence_file(path)
STI
¶pdb = pdb.filter(ContainsGroup(("STI")))
interactionFilter = InteractionFilter()
interactionFilter.set_distance_cutoff(4.5)
interactionFilter.set_min_interactions(1)
interactionFilter.set_query_groups(True, ["STI"])
interactions = InteractionFingerprinter.get_ligand_polymer_interactions(pdb, interactionFilter).cache()
interactions.toPandas().head(5)
--------------------------------------------------------------------------- Py4JJavaError Traceback (most recent call last) <ipython-input-23-a0be05a4b95a> in <module> ----> 1 interactions.toPandas().head(5) //miniconda3/envs/mmtf-workshop-2018/lib/python3.7/site-packages/pyspark/sql/dataframe.py in toPandas(self) 1966 raise RuntimeError("%s\n%s" % (_exception_message(e), msg)) 1967 else: -> 1968 pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns) 1969 1970 dtype = {} //miniconda3/envs/mmtf-workshop-2018/lib/python3.7/site-packages/pyspark/sql/dataframe.py in collect(self) 464 """ 465 with SCCallSiteSync(self._sc) as css: --> 466 sock_info = self._jdf.collectToPython() 467 return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer()))) 468 //miniconda3/envs/mmtf-workshop-2018/lib/python3.7/site-packages/py4j/java_gateway.py in __call__(self, *args) 1255 answer = self.gateway_client.send_command(command) 1256 return_value = get_return_value( -> 1257 answer, self.gateway_client, self.target_id, self.name) 1258 1259 for temp_arg in temp_args: //miniconda3/envs/mmtf-workshop-2018/lib/python3.7/site-packages/pyspark/sql/utils.py in deco(*a, **kw) 61 def deco(*a, **kw): 62 try: ---> 63 return f(*a, **kw) 64 except py4j.protocol.Py4JJavaError as e: 65 s = e.java_exception.toString() //miniconda3/envs/mmtf-workshop-2018/lib/python3.7/site-packages/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name) 326 raise Py4JJavaError( 327 "An error occurred while calling {0}{1}{2}.\n". --> 328 format(target_id, ".", name), value) 329 else: 330 raise Py4JError( Py4JJavaError: An error occurred while calling o241.collectToPython. : org.apache.spark.SparkException: Job aborted due to stage failure: Task 4 in stage 1.0 failed 1 times, most recent failure: Lost task 4.0 in stage 1.0 (TID 5, localhost, executor driver): java.net.SocketException: Connection reset at java.net.SocketInputStream.read(SocketInputStream.java:210) at java.net.SocketInputStream.read(SocketInputStream.java:141) at java.io.BufferedInputStream.fill(BufferedInputStream.java:246) at java.io.BufferedInputStream.read(BufferedInputStream.java:265) at java.io.DataInputStream.readInt(DataInputStream.java:387) at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:460) at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:453) at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:284) at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37) at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:439) at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408) at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408) at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408) at org.apache.spark.sql.execution.columnar.InMemoryRelation$$anonfun$2$$anon$1.hasNext(InMemoryRelation.scala:149) at org.apache.spark.storage.memory.MemoryStore.putIteratorAsValues(MemoryStore.scala:216) at org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:1094) at org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:1085) at org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:1020) at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1085) at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:811) at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:335) at org.apache.spark.rdd.RDD.iterator(RDD.scala:286) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:49) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:49) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:49) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) at org.apache.spark.scheduler.Task.run(Task.scala:109) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) Driver stacktrace: at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1651) at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1639) at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1638) at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1638) at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831) at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831) at scala.Option.foreach(Option.scala:257) at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:831) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1872) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1821) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1810) at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48) at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:642) at org.apache.spark.SparkContext.runJob(SparkContext.scala:2034) at org.apache.spark.SparkContext.runJob(SparkContext.scala:2055) at org.apache.spark.SparkContext.runJob(SparkContext.scala:2074) at org.apache.spark.SparkContext.runJob(SparkContext.scala:2099) at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:945) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112) at org.apache.spark.rdd.RDD.withScope(RDD.scala:363) at org.apache.spark.rdd.RDD.collect(RDD.scala:944) at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:297) at org.apache.spark.sql.Dataset$$anonfun$collectToPython$1.apply(Dataset.scala:3200) at org.apache.spark.sql.Dataset$$anonfun$collectToPython$1.apply(Dataset.scala:3197) at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3259) at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77) at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3258) at org.apache.spark.sql.Dataset.collectToPython(Dataset.scala:3197) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244) at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357) at py4j.Gateway.invoke(Gateway.java:282) at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132) at py4j.commands.CallCommand.execute(CallCommand.java:79) at py4j.GatewayConnection.run(GatewayConnection.java:238) at java.lang.Thread.run(Thread.java:745) Caused by: java.net.SocketException: Connection reset at java.net.SocketInputStream.read(SocketInputStream.java:210) at java.net.SocketInputStream.read(SocketInputStream.java:141) at java.io.BufferedInputStream.fill(BufferedInputStream.java:246) at java.io.BufferedInputStream.read(BufferedInputStream.java:265) at java.io.DataInputStream.readInt(DataInputStream.java:387) at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:460) at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:453) at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:284) at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37) at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:439) at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408) at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408) at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408) at org.apache.spark.sql.execution.columnar.InMemoryRelation$$anonfun$2$$anon$1.hasNext(InMemoryRelation.scala:149) at org.apache.spark.storage.memory.MemoryStore.putIteratorAsValues(MemoryStore.scala:216) at org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:1094) at org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:1085) at org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:1020) at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1085) at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:811) at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:335) at org.apache.spark.rdd.RDD.iterator(RDD.scala:286) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:49) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:49) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:49) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324) at org.apache.spark.rdd.RDD.iterator(RDD.scala:288) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) at org.apache.spark.scheduler.Task.run(Task.scala:109) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) ... 1 more
uniprotQuery = "SELECT * FROM sifts.pdb_chain_uniprot"
uniprot = pdbjMineDataset.get_dataset(uniprotQuery)
uniprot.toPandas().head(5)
intersection = uniprot.join(interactions, uniprot.structureChainId == interactions.structureChainId).cache()
intersection.toPandas()
uniprot_ids = intersection.select("sp_primary").distinct().rdd.flatMap(lambda x: x).collect()
uniprot_ids
Here we use myVariantDataset to retrieve variant information using MyVariant.info web services (Query syntax)
query = "clinvar.rcv.clinical_significance:pathogenic"
variants = myVariantDataset.get_variations(uniprot_ids, query).cache()
variants.toPandas()
variant_ids = variants.select("variationId").distinct().rdd.flatMap(lambda x: x).collect()
variant_ids
Here we use g2sDataset to retrieve genome to PDB mapping information using the G2S (Genome to Structure) web services.
positions = g2sDataset.get_position_dataset(variant_ids).cache()
positions.toPandas()
positions = positions.withColumn("structureChainId", concat_ws(".", positions.structureId,positions.chainId))
positions.toPandas().head(5)
positions = positions.join(interactions, positions.structureChainId == interactions.structureChainId)
positions.toPandas()
pdb_ids = positions.select("structureId").rdd.flatMap(lambda x: x).collect()
chain_ids = positions.select("chainId").rdd.flatMap(lambda x: x).collect()
group_numbers = positions.select("pdbPosition").rdd.flatMap(lambda x: x).collect()
var_ids = positions.select("variationId").rdd.flatMap(lambda x: x).collect()
def view_mutation_site(pdb_ids, chain_ids, groups, var_ids, ligand_id, distance=4.5):
def view3d(i=0):
print(f"PDB: {pdb_ids[i]}, chain: {chain_ids[i]}, group: {groups[i]}, variation: {var_ids[i]}")
mutation = {'resi': groups[i], 'chain': chain_ids[i]}
neighbors = {'resi': groups[i], 'chain': chain_ids[i], 'byres': 'true', 'expand': distance}
viewer = py3Dmol.view(query='pdb:' + pdb_ids[i])
viewer.setStyle(neighbors, {'stick': {'colorscheme': 'orangeCarbon', 'radius': 0.1}});
viewer.setStyle(mutation, {'stick': {'colorscheme': 'redCarbon'}})
viewer.addResLabels(mutation)
viewer.setStyle({'resn': ligand_id}, {'sphere': {'colorscheme': 'greenCarbon'}})
viewer.zoomTo(neighbors)
return viewer.show()
s_widget = IntSlider(min=0, max=len(pdb_ids)-1, description='Structure', continuous_update=False)
return interact(view3d, i=s_widget)
%%javascript
IPython.OutputArea.prototype._should_scroll = function(lines) {return false;}
view_mutation_site(pdb_ids, chain_ids, group_numbers, var_ids, 'STI', 8.0);
spark.stop()