#!/usr/bin/env python # coding: utf-8 # # ICE Contractor Analysis # By [Leon Yin](www.leonyin.org) Last Updated 2018-12-06 # # View this notebook in [NBViewer](https://nbviewer.jupyter.org/github/yinleon/ice-money/blob/master/ice_money.ipynb) or [Github](https://www.github.com/yinleon/ice-money/blob/master/ice_money.ipynb). # The output is availalble publically on [Google Drive](http://bit.ly/ice-contractors) and [Github](https://raw.githubusercontent.com/yinleon/ice-money/master/ice_data/data_out/ice_prime_contractors_aggregated_clean.csv). # # Following my analysis of [federal contracts](https://nbviewer.jupyter.org/github/yinleon/us-spending/blob/master/1_analysis_methods.ipynb) to the private prisons CoreCivic and Geo Group, this notebook analyzes publically avaiable contracts from the Immigration and Customs Enforcement Agency (ICE). This analysis uses data downloaded from the USASpending.gov [dashboard](https://www.usaspending.gov/#/search/1da7f3124d80741313219fd741632538), specifically I filtered `Funding Agency` with "U.S. Immigration and Customs Enforcement (ICE) | Sub-Agency". # In[1]: get_ipython().run_line_magic('matplotlib', 'inline') import glob import datetime import string from collections import Counter import numpy as np import pandas as pd import matplotlib.pyplot as plt from nltk.corpus import stopwords from fuzzywuzzy import fuzz # In[2]: # data downloaded from USAspending.gov input_file_pattern = 'ice_data/data_in/all_prime_awards_subawards_*/*' # In[3]: files = glob.glob(input_file_pattern) files # Let's just look at prime contracts today. # In[4]: df_prime_contracts = pd.read_csv(files[1]) # Something to note here, if we don't filter out funding agency we get some inaccuracies! # In[5]: { 'awarding_sub_agency_name' : 'U.S. IMMIGRATION AND CUSTOMS ENFORCEMENT', # N = 15056 'funding_sub_agency_name' : 'U.S. IMMIGRATION AND CUSTOMS ENFORCEMENT', # N = 17145 } # In[6]: # here we're filtering only when ICE awards a contract df_prime_contracts = df_prime_contracts[ df_prime_contracts['awarding_sub_agency_name'] == 'U.S. IMMIGRATION AND CUSTOMS ENFORCEMENT' ] len(df_prime_contracts) # In[7]: # there are a lot of columns! prime_cols = df_prime_contracts.columns num_cols = len(prime_cols) print(f"There are {num_cols} columns in this file!\nHere are some of the column names:") prime_cols.values.tolist()[:20] # That is a lot of columns, here are the definitions of the columns we'll be using here: # ## Glossary of fields used in this Analysis with Definitions from USASpending: # `recipient_duns`
# The unique identification number for the ultimate parent of an awardee or recipient. Currently the identifier is the 9-digit number maintained by Dun & Bradstreet as the global parent DUNSĀ® number. # # # # `potential_total_value_of_award`
# The total amount that could be obligated on a contract. This total includes the base plus options amount. For example, if a recipient is awarded 10M on a base contract with 3 option years at 1M each, the potential award amount is 13M. # # `current_total_value_of_award`
# The amount of money that the government has promised (obligated) to pay a recipient for a contract. This means the base amount and any exercised options. # # `period_of_performance_start_date`
# The date that the award begins. # # `period_of_performance_potential_end_date`
# For procurement, the date on which, for the award referred to by the action being reported if all potential pre-determined or pre-negotiated options were exercised, awardee effort is completed or the award is otherwise ended. Administrative actions related to this award may continue to occur after this date. This date does not apply to procurement indefinite delivery vehicles under which definitive orders may be awarded. # # `funding_sub_agency_name`
# A Funding Agency pays for the majority of funds for an award out of its budget. Typically, the Funding Agency is the same as the Awarding Agency. In some cases, one agency will administer an award (Awarding Agency) and another agency will pay for it (Funding Agency). # # `awarding_sub_agency_name`
# The Awarding Sub Agency is the sub agency that issues and administers the award. For example, the Internal Revenue Service (IRS) is a sub agency of the Department of the Treasury. # #
# # Thanks for providing that glossary USASpending, let's do some minor data wrangling and get some summary statistics. # In[8]: # convert columns to datetime date_cols = ['period_of_performance_start_date', 'period_of_performance_potential_end_date'] for col in date_cols: df_prime_contracts[col] = pd.to_datetime(df_prime_contracts[col]) # In[9]: # aggregate contract values total_spend = df_prime_contracts.drop_duplicates(subset=['award_id_piid']) \ .current_total_value_of_award.sum() potential_spend = df_prime_contracts.drop_duplicates(subset=['award_id_piid']) \ .potential_total_value_of_award.sum() at_stake = potential_spend - total_spend num_contractors = len(df_prime_contracts.recipient_duns.unique()) first_contract = df_prime_contracts.period_of_performance_start_date.min().strftime('%Y-%m-%d') print("ICE has invested ${:,.2f} in contracts with {} Prime contractors " "since {}.\nICE has ${:,.2f} in potential payouts with active contracts.".format( total_spend, num_contractors, first_contract, at_stake)) # Let's sort this data by the contract end date # In[10]: df_prime_contracts.sort_values(by=['period_of_performance_potential_end_date'], ascending=False, inplace=True) # There are typos in the recipient names, so let's use their DUNs IDs for aggregate analysis.
# We can create a `lookup_table` containing the latest name and contact info # In[11]: lookup_cols = [ 'recipient_parent_duns', 'recipient_parent_name', 'recipient_address_line_1', 'recipient_address_line_2', 'recipient_city_name', 'recipient_state_code', 'recipient_state_name', 'recipient_zip_4_code', 'recipient_congressional_district', 'recipient_phone_number', 'recipient_fax_number' ] # In[12]: lookup_table = df_prime_contracts.drop_duplicates( subset = ['recipient_parent_duns'], keep = 'first' )[lookup_cols] # There are contractors with similar names (and different DUNS), we can find similar names using fuzzywuzzy, and replace them for aggregation. # In[13]: from fuzzywuzzy import fuzz # In[14]: look_up_names = lookup_table[~lookup_table.recipient_parent_name.isnull()].recipient_parent_name.unique() look_up_names.sort() # Here we will compare all names, and create a standardized dictionary (`switch_name`) to replace similar names. # In[15]: window_size = 10 # look at to words to the right and left threshold = 90 # how similar is enough? skip = [] switch_name = dict() for i, comp in enumerate(look_up_names): if comp not in skip: window = np.concatenate((look_up_names[i-window_size: i], look_up_names[i+1: i+window_size])) for to_search in window: similarity_score = fuzz.token_sort_ratio(comp, to_search) if similarity_score >= threshold: switch_name[comp] = to_search skip += [to_search] # Let's swamp out the fuzzy names! # In[16]: df_prime_contracts.recipient_parent_name.replace(switch_name, inplace = True) # In[17]: lookup_table.recipient_parent_name.replace(switch_name, inplace = True) lookup_table.drop_duplicates(subset = ['recipient_parent_name'], keep = 'first', inplace = True) # Now let's get how much each company has been rewarded, how much they have the potential to be rewarded, how many awards they're been granted, and when their contracts are finished. This code is chained, and kind of messy. Each line aggreagates by the recipient ID, and performs some sort of agg function, and then sorts the values according to the total of the agg function. # In[18]: contract_amounts = df_prime_contracts.groupby('recipient_parent_name') \ .current_total_value_of_award \ .sum() \ .sort_values(ascending=False) # In[19]: contract_potential = df_prime_contracts.groupby('recipient_parent_name') \ .potential_total_value_of_award \ .sum() # In[20]: num_awards = df_prime_contracts.groupby('recipient_parent_name') \ .award_id_piid.nunique() # In[21]: award_ids = df_prime_contracts.groupby('recipient_parent_name') \ .award_id_piid\ .unique().str.join(', ') # In[22]: contract_enddate = df_prime_contracts[~df_prime_contracts.period_of_performance_potential_end_date.isnull()] \ .groupby('recipient_parent_name') \ .period_of_performance_potential_end_date.max() # In[23]: contract_startdate = df_prime_contracts[~df_prime_contracts.period_of_performance_start_date.isnull()]\ .groupby('recipient_parent_name') \ .period_of_performance_start_date \ .min() # For context, let's see what kinds of services each of these companies provide by counting the most frequent words in service descriptions per contractor. # In[24]: def count_words(row, counter, sw): ''' Splits sentences, casts words to lowercase, and counts all words that aren't in stopwords (sw) ''' for c in string.punctuation: row = row.replace(c, " ") counter.update([w for w in row.lower().split() if w not in sw]) # In[25]: sw = stopwords.words('English') # In[26]: top_words = [] for company_, df_ in df_prime_contracts.groupby('recipient_parent_name'): word_counter_ = Counter() df_['product_or_service_code_description'].apply(count_words, args = (word_counter_, sw,)) # get top 5 words top_words_ = ', '.join([w for w, c in word_counter_.most_common(5)]) # create a dcitonary, and add it to the list d_ = dict(top_words_from_serv_desc = top_words_, recipient_parent_name = company_) top_words.append(d_) top_words_per_service_description = pd.DataFrame(top_words) # We can join them all together using this craziness below: # In[27]: output = ( contract_amounts.reset_index() .merge( num_awards.reset_index(), on='recipient_parent_name', ) .merge( contract_potential.reset_index(), on='recipient_parent_name' ) .merge( contract_enddate.reset_index(), on='recipient_parent_name' ) .merge( contract_startdate.reset_index(), on='recipient_parent_name' ) .merge( top_words_per_service_description, on='recipient_parent_name' ) .merge( award_ids.reset_index(), on='recipient_parent_name' ) .merge( lookup_table, on='recipient_parent_name' ) ) # The last merge above is the look_up table, containing metadata for each recipient.
# Let's change some of the aggregate column names to be more decriptive: # In[28]: remapping = { 'current_total_value_of_award' : 'current_total_value_of_awards_USD', 'potential_total_value_of_award' : 'potential_total_value_of_awards_USD', 'period_of_performance_potential_end_date' : 'last_contract_end_date', 'period_of_performance_start_date' : 'first_contract_start_date', 'award_id_piid_x' : 'number_of_prime_awards', 'award_id_piid_y' : 'award_id_piids' } # In[29]: output.columns = [remapping.get(c, c) for c in output.columns] # In[30]: # convert datetimes for col in ['last_contract_end_date', 'first_contract_start_date']: output[col] = output[col].dt.strftime('%Y-%m-%d') # In[31]: # calculate how much money in on the line for each contractor output['remaining_total_value_of_awards_USD'] = output.potential_total_value_of_awards_USD - output.current_total_value_of_awards_USD # Let's timestamp when this file was made. # In[32]: output['analysis_date'] = datetime.datetime.now() # when was this file made? output['raw_data_download_date'] = datetime.datetime(2018,12,6) # when was raw data downloaded? # And lastly, let's re-order the columns so they're readable # In[33]: order_cols = [ 'recipient_parent_name', 'recipient_parent_duns', 'top_words_from_serv_desc', 'number_of_prime_awards', 'current_total_value_of_awards_USD', 'potential_total_value_of_awards_USD', 'remaining_total_value_of_awards_USD', 'award_id_piids', 'first_contract_start_date', 'last_contract_end_date', 'recipient_address_line_1', 'recipient_address_line_2', 'recipient_city_name', 'recipient_state_code', 'recipient_state_name', 'recipient_zip_4_code', 'recipient_congressional_district', 'recipient_phone_number', 'recipient_fax_number', 'analysis_date', 'raw_data_download_date', ] # make these human readible new_cols = [' '.join(c.split('_')).title() for c in order_cols] # Here is the output: # In[34]: out_order = output[order_cols] out_order.columns = new_cols # In[35]: out_order.head(50) # There are still a lot of duplicates, future work will be fuzzy matching these accounts.
# Let's save this as a csv. # In[36]: outfile = 'ice_data/data_out/ice_prime_contractors_aggregated_clean.csv' out_order.to_csv(outfile, index=False) # In[37]: import math millnames = ['',' K',' M',' B',' T'] def millify(n): if not np.isnan(n): n = float(n) millidx = max(0,min(len(millnames)-1, int(math.floor(0 if n == 0 else math.log10(abs(n))/3)))) return '{:.2f}{}'.format(n / 10**(3 * millidx), millnames[millidx]) return np.nan # In[38]: # to make it more human, smh... for col in ['Current Total Value Of Awards Usd', 'Potential Total Value Of Awards Usd']: out_order[col] = out_order[col].apply(millify) # In[39]: outfile = 'ice_data/data_out/ice_prime_contractors_aggregated_clean_numbers.csv' out_order.to_csv(outfile, index=False) # ## Plotting Top Contractors # We can plot the top paid contractors from past awards, and the contractors who have the most valuable current projects. # In[40]: ax = output.sort_values(by=['current_total_value_of_awards_USD'], ascending=False) \ .head(30).plot(x='recipient_parent_name', y='current_total_value_of_awards_USD', color='orange', kind='barh', logx=True, figsize=(8,12), title= "30 most valuable ICE contractors from since 2006", legend=False) # annotate each bar for rect in ax.patches: width = rect.get_width() ax.text(1.2*rect.get_width(), rect.get_y()+ 0.5 * rect.get_height(), '$' + millify(width), ha='center', va='center') # remove boarder for spine in plt.gca().spines.values(): spine.set_visible(False) # remoce axis titles ax.set_xlabel("") ax.set_ylabel("") # remove ticks ax.tick_params(axis=u'both', which=u'both',length=0) ax.get_xaxis().set_ticks([]) ax.invert_yaxis(); # In[41]: ax = output.sort_values(by=['remaining_total_value_of_awards_USD'], ascending=False) \ .head(30).plot(x='recipient_parent_name', y='remaining_total_value_of_awards_USD', color='orange', kind='barh', logx=True, figsize=(8,12), title= "30 most valuable ICE contractors in progress", legend=False) # annotate each bar for rect in ax.patches: width = rect.get_width() ax.text(1.32*rect.get_width(), rect.get_y()+ 0.5 * rect.get_height(), '$' + millify(width), ha='center', va='center') # remove boarder for spine in plt.gca().spines.values(): spine.set_visible(False) # remoce axis titles ax.set_xlabel("") ax.set_ylabel("") # remove ticks ax.tick_params(axis=u'both', which=u'both',length=0) ax.get_xaxis().set_ticks([]) ax.invert_yaxis();