#!/usr/bin/env python # coding: utf-8 # # Title: msticpy - Base64 Decoder # ## Description: # This module allows you to extract base64 encoded content from a string or columns of a Pandas DataFrame. # The library returns the following information: # - decoded string (if decodable to utf-8 or utf-16) # - hashes of the decoded segment (MD5, SHA1, SHA256) # - string of printable byte values (e.g. for submission to a disassembler) # - the detected decoded file type (limited) # # If the results of the decoding contain further encoded strings these will be decoded recursively. If the encoded string appears to be a zip, gzip or tar archive, the contents will be decompressed after decoding. In the case of zip and tar, the contents of the archive will also be checked for base64 encoded content and decoded/decompressed if possible. # # You must have msticpy installed to run this notebook: # ``` # %pip install --upgrade msticpy # ``` # #

Table of Contents

#
# In[1]: # Imports import sys MIN_REQ_PYTHON = (3,6) if sys.version_info < MIN_REQ_PYTHON: print('Check the Kernel->Change Kernel menu and ensure that Python 3.6') print('or later is selected as the active kernel.') sys.exit("Python %s.%s or later is required.\n" % MIN_REQ_PYTHON) from IPython.display import display import pandas as pd # Import Base64 module import msticpy msticpy.init_notebook(globals()) from msticpy.transform import base64unpack # In[2]: # Load test data process_tree = pd.read_csv('data/process_tree.csv', parse_dates=["TimeGenerated"], infer_datetime_format=True) process_tree[['CommandLine']].head() # [Contents](#contents) # ## Decoding Base64 String # # Base64 decode an input string. # # ``` # Base64 decode an input string. # # Parameters # ---------- # input_string : str, optional # single string to decode (the default is None) # trace : bool, optional # Show additional status (the default is None) # # Returns # ------- # Tuple[str, Optional[List[BinaryRecord]]] # Decoded string and additional metadata # # Notes # ----- # Items that decode to utf-8 or utf-16 strings will be returned as decoded # strings replaced in the original string. If the encoded string is a # known binary type it will identify the file type and return the hashes # of the file. If any binary types are known archives (zip, tar, gzip) it # will unpack the contents of the archive. # For any binary it will return the decoded file as a byte array, and as a # printable list of byte values. If the input is a string the function # returns: # # - decoded string: this is the input string with any decoded sections # replaced by the results of the decoding # ``` # In[3]: # get a commandline from our data set cmdline = process_tree['CommandLine'].loc[39] cmdline # In[4]: # Decode the string base64_dec_str = base64unpack.unpack(input_string=cmdline) # Print decoded string print(base64_dec_str) # [Contents](#toc) # ## Using a DataFrame as Input # You can replace to base64.unpack_df() to pass a DataFrame as an argument. # Use the ```column``` parameter to specify which column to process. # # In the case of DataFrame input, the output DataFrame contains these additional columns: # - src_index - the index of the row in the input dataframe from which the data came. # - full_decoded_string - the full decoded string with any decoded replacements. This is only really useful for top-level items, since nested items will only show the 'full' string representing the child fragment. # # ``` # Base64 decode strings taken from a pandas dataframe. # # Parameters # ---------- # data : pd.DataFrame # dataframe containing column to decode # column : str # Name of dataframe text column # trace : bool, optional # Show additional status (the default is None) # # Returns # ------- # pd.DataFrame # Decoded string and additional metadata in dataframe # ``` # # ### Notes # # Items that decode to utf-8 or utf-16 strings will be returned as decoded # strings replaced in the original string. If the encoded string is a # known binary type it will identify the file type and return the hashes # of the file. If any binary types are known archives (zip, tar, gzip) it # will unpack the contents of the archive. # For any binary it will return the decoded file as a byte array, and as a # printable list of byte values. # In[5]: # specify the data and column parameters dec_df = base64unpack.unpack_df(data=process_tree, column='CommandLine') dec_df # [Contents](#contents) # ## Interpreting the DataFrame output. # For simple strings the Base64 decoded output is straightforward. However for nested encodings this can get a little complex and difficult to represent in a tabular format. # # **Columns** # - reference - The index of the row item in dotted notation in depth.seq pairs (e.g. 1.2.2.3 would be the 3 item at depth 3 that is a child of the 2nd item found at depth 1). This may not always be an accurate notation - it is mainly use to allow you to associate an individual row with the reference value contained in the full_decoded_string column of the topmost item). # - original_string - the original string before decoding. # - file_name - filename, if any (only if this is an item in zip or tar file). # - file_type - a guess at the file type (this is currently elementary and only includes a few file types). # - input_bytes - the decoded bytes as a Python bytes string. # - decoded_string - the decoded string if it can be decoded as a UTF-8 or UTF-16 string. Note: binary sequences may often successfully decode as UTF-16 strings but, in these cases, the decodings are meaningless. # - encoding_type - encoding type (UTF-8 or UTF-16) if a decoding was possible, otherwise 'binary'. # - file_hashes - collection of file hashes for any decoded item. # - md5 - md5 hash as a separate column. # - sha1 - sha1 hash as a separate column. # - sha256 - sha256 hash as a separate column. # - printable_bytes - printable version of input_bytes as a string of \xNN values # # [Contents](#contents) # ### SourceIndex column allows you to merge the results with the input DataFrame # Where an input row has multiple decoded elements (e.g. a nested encoding or a zip or other archive file), the output of this merge will result in duplicate rows from the input (one per element match). The DataFrame index from the source is preserved in the `src_index` column. # # Note: you may need to force the type of the `src_index` column to be the same type as the original DataFrame in order to merge. In the example below case we are matching with the default numeric index so we force the type to be numeric. In cases where you are using an index of a different dtype you will need to convert the `src_index` (dtype=object) to match the type of your index column. # In[6]: # Set the type of the SourceIndex column. dec_df['SourceIndex'] = pd.to_numeric(dec_df['src_index']) merged_df = (process_tree .merge(right=dec_df, how='left', left_index=True, right_on='SourceIndex') .drop(columns=['Unnamed: 0']) .set_index('SourceIndex')) # Show the result of the merge (only those rows that have a value in original_string) merged_df.dropna(subset=['original_string']) # Note the output of unpack_items() may have multiple rows (for nested encodings) # In this case merged DF will have duplicate rows from the source. # [Contents](#contents) # ## Decoding Nested Base64/Archives # The module will try to follow nested encodings. It uses the following algorithm: # 1. Search for a pattern in the input that looks like a Base64 encoded string # 2. If not a known undecodable_string, try to decode the matched pattern. # - If the base 64 string matches a known archive type (zip, tar, gzip) also decompress or unpack # - For multi-item archives (zip, tar) process each contained item recursively (i.e. go to item 1. with # child item as input) # - For anything that decodes to a UTF-8 or UTF-16 string replace the input pattern with the decoded string # - Recurse over resultant output (i.e. submit decoded/replaced string to 1.) # 3. If decoding fails, add to list of undecodable_strings (prevents infinite looping over something that looks like a base64 string but isn't) # In[8]: encoded_cmd = ''' powershell.exe -nop -w hidden -encodedcommand UEsDBBQAAAAIAGBXkk3LfdszdwAAAIoAAAAJAAAAUGVEbGwuZGxss6v+sj/A0diA UXmufa/PFcYNcRwX7I/wMC4oZAjgUJyzTEgqrdHbfuWyy/OCExqUGJkZGBoYoEDi QPO3P4wJuqsQgGvVKimphoUIIa1Fgr9OMLyoZ0z4y37gP2vDfxDp8J/RjWEzs4NG +8TMMoYTCouZGRSShAFQSwMEFAAAAAAAYYJrThx8YzUhAAAAIQAAAAwAAABiNjRp bnppcC5mb29CYXNlNjQgZW5jb2RlZCBzdHJpbmcgaW4gemlwIGZpbGVQSwMEFAAA AAAAi4JrTvMfsJUaAAAAGgAAABIAAABQbGFpblRleHRJblppcC5kbGxVbmVuY29k ZWQgdGV4dCBmaWxlIGluIHppcFBLAQIUABQAAAAIAGBXkk3LfdszdwAAAIoAAAAJ AAAAAAAAAAAAIAAAAAAAAABQZURsbC5kbGxQSwECFAAUAAAAAABhgmtOHHxjNSEA AAAhAAAADAAAAAAAAAABACAAAACeAAAAYjY0aW56aXAuZm9vUEsBAhQAFAAAAAAA i4JrTvMfsJUaAAAAGgAAABIAAAAAAAAAAQAgAAAA6QAAAFBsYWluVGV4dEluWmlw LmRsbFBLBQYAAAAAAwADALEAAAAzAQAAAAA=''' import re dec_string, dec_df = base64unpack.unpack(input_string=encoded_cmd) print(dec_string.replace('