#!/usr/bin/env python # coding: utf-8 # --- # # _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._ # # --- # In[ ]: import pandas as pd import numpy as np from scipy.stats import ttest_ind # # Assignment 4 - Hypothesis Testing # This assignment requires more individual learning than previous assignments - you are encouraged to check out the [pandas documentation](http://pandas.pydata.org/pandas-docs/stable/) to find functions or methods you might not have used yet, or ask questions on [Stack Overflow](http://stackoverflow.com/) and tag them as pandas and python related. And of course, the discussion forums are open for interaction with your peers and the course staff. # # Definitions: # * A _quarter_ is a specific three month period, Q1 is January through March, Q2 is April through June, Q3 is July through September, Q4 is October through December. # * A _recession_ is defined as starting with two consecutive quarters of GDP decline, and ending with two consecutive quarters of GDP growth. # * A _recession bottom_ is the quarter within a recession which had the lowest GDP. # * A _university town_ is a city which has a high percentage of university students compared to the total population of the city. # # **Hypothesis**: University towns have their mean housing prices less effected by recessions. Run a t-test to compare the ratio of the mean price of houses in university towns the quarter before the recession starts compared to the recession bottom. (`price_ratio=quarter_before_recession/recession_bottom`) # # The following data files are available for this assignment: # * From the [Zillow research data site](http://www.zillow.com/research/data/) there is housing data for the United States. In particular the datafile for [all homes at a city level](http://files.zillowstatic.com/research/public/City/City_Zhvi_AllHomes.csv), ```City_Zhvi_AllHomes.csv```, has median home sale prices at a fine grained level. # * From the Wikipedia page on college towns is a list of [university towns in the United States](https://en.wikipedia.org/wiki/List_of_college_towns#College_towns_in_the_United_States) which has been copy and pasted into the file ```university_towns.txt```. # * From Bureau of Economic Analysis, US Department of Commerce, the [GDP over time](http://www.bea.gov/national/index.htm#gdp) of the United States in current dollars (use the chained value in 2009 dollars), in quarterly intervals, in the file ```gdplev.xls```. For this assignment, only look at GDP data from the first quarter of 2000 onward. # # Each function in this assignment below is worth 10%, with the exception of ```run_ttest()```, which is worth 50%. # In[ ]: # Use this dictionary to map state names to two letter acronyms states = {'OH': 'Ohio', 'KY': 'Kentucky', 'AS': 'American Samoa', 'NV': 'Nevada', 'WY': 'Wyoming', 'NA': 'National', 'AL': 'Alabama', 'MD': 'Maryland', 'AK': 'Alaska', 'UT': 'Utah', 'OR': 'Oregon', 'MT': 'Montana', 'IL': 'Illinois', 'TN': 'Tennessee', 'DC': 'District of Columbia', 'VT': 'Vermont', 'ID': 'Idaho', 'AR': 'Arkansas', 'ME': 'Maine', 'WA': 'Washington', 'HI': 'Hawaii', 'WI': 'Wisconsin', 'MI': 'Michigan', 'IN': 'Indiana', 'NJ': 'New Jersey', 'AZ': 'Arizona', 'GU': 'Guam', 'MS': 'Mississippi', 'PR': 'Puerto Rico', 'NC': 'North Carolina', 'TX': 'Texas', 'SD': 'South Dakota', 'MP': 'Northern Mariana Islands', 'IA': 'Iowa', 'MO': 'Missouri', 'CT': 'Connecticut', 'WV': 'West Virginia', 'SC': 'South Carolina', 'LA': 'Louisiana', 'KS': 'Kansas', 'NY': 'New York', 'NE': 'Nebraska', 'OK': 'Oklahoma', 'FL': 'Florida', 'CA': 'California', 'CO': 'Colorado', 'PA': 'Pennsylvania', 'DE': 'Delaware', 'NM': 'New Mexico', 'RI': 'Rhode Island', 'MN': 'Minnesota', 'VI': 'Virgin Islands', 'NH': 'New Hampshire', 'MA': 'Massachusetts', 'GA': 'Georgia', 'ND': 'North Dakota', 'VA': 'Virginia'} # In[ ]: # In[ ]: import pandas as pd import numpy as np import re from scipy import stats def get_list_of_university_towns(): '''Returns a DataFrame of towns and the states they are in from the university_towns.txt list. The format of the DataFrame is: DataFrame( [ ["Michigan","Ann Arbor"], ["Michigan", "Yipsilanti"] ], columns=["State","RegionName"] )''' f = open('university_towns.txt').readlines() states = [line.split('[edit]')[0] for line in f if '[edit]' in line] # each state name ends in [edit] f2 = open('university_towns.txt').read() town_groups = re.split(r'[A-Z]{1}\w*\s?\w*\[edit\]', f2)[1:]# split on state names to get list of towns in all states school = [] for state in range(len(states)): for town in town_groups[state].split('\n'): if (town != '') and (town != '\n'): temp = {"RegionName" : town.split(' (')[0], "State" : states[state]} school.append(temp) col_order = ['State', 'RegionName'] university_towns = pd.DataFrame(school) university_towns = university_towns.sort_values(by=['State']).reset_index() university_towns = university_towns[col_order] return university_towns get_list_of_university_towns().head(n=10) # In[ ]: gdp = pd.read_excel('gdplev.xls', skiprows=5) gdp = gdp.drop(gdp.index[0:2]) gdp = gdp.reset_index() gdp = gdp.drop(['index', 'Unnamed: 3', 'Unnamed: 7'], axis=1) gdp = gdp.rename(columns={'Unnamed: 0' : 'Year', 'Unnamed: 4' : 'Quarterly'}) gdp = gdp.drop(gdp.index[0:212]) gdp = gdp.reset_index() col_to_keep = ['Quarterly', 'GDP in billions of current dollars.1', 'GDP in billions of chained 2009 dollars.1'] gdp = gdp[col_to_keep] gdp['deltaGdp'] = gdp['GDP in billions of chained 2009 dollars.1'].diff() def get_recession_start(): '''Returns the year and quarter of the recession start time as a string value in a format such as 2005q3''' rec_years = [] for i in range(len(gdp)): if (gdp.loc[i, 'deltaGdp'] < 0) and (gdp.loc[i+1, 'deltaGdp'] < 0): if gdp.loc[i, 'Quarterly'] not in rec_years: rec_years.append(gdp.loc[i, 'Quarterly']) rec_years.append(gdp.loc[i+1, 'Quarterly']) return rec_years[0] get_recession_start() # In[ ]: gdp = pd.read_excel('gdplev.xls', skiprows=5) gdp = gdp.drop(gdp.index[0:2]) gdp = gdp.reset_index() gdp = gdp.drop(['index', 'Unnamed: 3', 'Unnamed: 7'], axis=1) gdp = gdp.rename(columns={'Unnamed: 0' : 'Year', 'Unnamed: 4' : 'Quarterly'}) gdp = gdp.drop(gdp.index[0:212]) gdp = gdp.reset_index() col_to_keep = ['Quarterly', 'GDP in billions of current dollars.1', 'GDP in billions of chained 2009 dollars.1'] gdp = gdp[col_to_keep] gdp['deltaGdp'] = gdp['GDP in billions of chained 2009 dollars.1'].diff() gdp.head(n=10) # In[ ]: def get_recession_end(): '''Returns the year and quarter of the recession end time as a string value in a format such as 2005q3''' rec_years = [] for i in range(len(gdp)): if (gdp.loc[i, 'deltaGdp'] < 0) and (gdp.loc[i+1, 'deltaGdp'] < 0): if gdp.loc[i, 'Quarterly'] not in rec_years: rec_years.append(gdp.loc[i, 'Quarterly']) rec_years.append(i) rec_years.append(gdp.loc[i+1, 'Quarterly']) rec_years.append(i+1) return gdp.loc[rec_years[-1]+2, 'Quarterly'] #return rec_years get_recession_end() # In[ ]: def get_recession_bottom(): '''Returns the year and quarter of the recession bottom time as a string value in a format such as 2005q3''' rec_years = [] for i in range(len(gdp)): if (gdp.loc[i, 'deltaGdp'] < 0) and (gdp.loc[i+1, 'deltaGdp'] < 0): if gdp.loc[i, 'Quarterly'] not in rec_years: rec_years.append(gdp.loc[i, 'Quarterly']) rec_years.append(i) rec_years.append(gdp.loc[i+1, 'Quarterly']) rec_years.append(i+1) rec_start = rec_years[1] rec_end = rec_years[-1] rec_bot = gdp.loc[rec_start : rec_end, ['GDP in billions of chained 2009 dollars.1', 'Quarterly']] mask = min(rec_bot['GDP in billions of chained 2009 dollars.1']) k = rec_bot.where(rec_bot['GDP in billions of chained 2009 dollars.1'] == mask).dropna().reset_index() return k.loc[0, 'Quarterly'] get_recession_bottom() # In[ ]: def quarter_rows(row): for i in range(0, len(row), 3): row.replace(row[i], np.mean(row[i:i+3]), inplace=True) return row def convert_housing_data_to_quarters(): '''Converts the housing data to quarters and returns it as mean values in a dataframe. This dataframe should be a dataframe with columns for 2000q1 through 2016q3, and should have a multi-index in the shape of ["State","RegionName"]. Note: Quarters are defined in the assignment description, they are not arbitrary three month periods. The resulting dataframe should have 67 columns, and 10,730 rows. ''' housing = pd.read_csv('City_Zhvi_AllHomes.csv') housing3 = housing.set_index(["State","RegionName"]).ix[:, '2000-01' : ] ''' def quarter_rows(row): for i in range(0, len(row), 3): row.replace(row[i], np.mean(row[i:i+3]), inplace=True) return row housing = housing.apply(quarter_rows, axis=1) This for loop accomplished the same purpose as datetime and resampling below, but much slower ''' housing3.columns = pd.to_datetime(housing3.columns).to_period('M') housing3 = housing3.resample('q', axis=1).mean() states = {'OH': 'Ohio', 'KY': 'Kentucky', 'AS': 'American Samoa', 'NV': 'Nevada', \ 'WY': 'Wyoming', 'NA': 'National', 'AL': 'Alabama', 'MD': 'Maryland', \ 'AK': 'Alaska', 'UT': 'Utah', 'OR': 'Oregon', 'MT': 'Montana', 'IL': 'Illinois', \ 'TN': 'Tennessee', 'DC': 'District of Columbia', 'VT': 'Vermont', 'ID': 'Idaho', \ 'AR': 'Arkansas', 'ME': 'Maine', 'WA': 'Washington', 'HI': 'Hawaii', 'WI': 'Wisconsin', \ 'MI': 'Michigan', 'IN': 'Indiana', 'NJ': 'New Jersey', 'AZ': 'Arizona', 'GU': 'Guam', \ 'MS': 'Mississippi', 'PR': 'Puerto Rico', 'NC': 'North Carolina', 'TX': 'Texas', \ 'SD': 'South Dakota', 'MP': 'Northern Mariana Islands', 'IA': 'Iowa', 'MO': 'Missouri', \ 'CT': 'Connecticut', 'WV': 'West Virginia', 'SC': 'South Carolina', 'LA': 'Louisiana', \ 'KS': 'Kansas', 'NY': 'New York', 'NE': 'Nebraska', 'OK': 'Oklahoma', 'FL': 'Florida', \ 'CA': 'California', 'CO': 'Colorado', 'PA': 'Pennsylvania', 'DE': 'Delaware', 'NM': 'New Mexico', \ 'RI': 'Rhode Island', 'MN': 'Minnesota', 'VI': 'Virgin Islands', 'NH': 'New Hampshire', \ 'MA': 'Massachusetts', 'GA': 'Georgia', 'ND': 'North Dakota', 'VA': 'Virginia'} housing3 = housing3.rename(index=states) return housing3 convert_housing_data_to_quarters().head() # In[ ]: def run_ttest(): '''First creates new data showing the decline or growth of housing prices between the recession start and the recession bottom. Then runs a ttest comparing the university town values to the non-university towns values, return whether the alternative hypothesis (that the two groups are the same) is true or not as well as the p-value of the confidence. Return the tuple (different, p, better) where different=True if the t-test is True at a p<0.01 (we reject the null hypothesis), or different=False if otherwise (we cannot reject the null hypothesis). The variable p should be equal to the exact p value returned from scipy.stats.ttest_ind(). The value for better should be either "university town" or "non-university town" depending on which has a lower mean price ratio (which is equivilent to a reduced market loss).''' better = '' univ_towns = get_list_of_university_towns() univ_towns['univ_town'] = 'university town' # create a new column to mark university towns hous_data = convert_housing_data_to_quarters() hous_data.columns = hous_data.columns.map(str) # convert the column headings from PeriodIndex to string hous_data['change'] = hous_data['2008Q1'] - hous_data['2009Q2'] # compute changes in housing price hous_data['mean_pr'] = hous_data['2008Q1'].div(hous_data['2009Q2']) all_towns = pd.merge(hous_data, univ_towns, how='outer', right_on=['State', 'RegionName'], left_index=True) all_towns = all_towns.set_index(['State', 'RegionName']) all_towns.ix[all_towns.univ_town != 'university town', 'univ_town'] = 'non-university town' uni = all_towns[all_towns['univ_town'] == 'university town'].dropna() non_uni = all_towns[all_towns['univ_town'] == 'non-university town'].dropna() m, p = stats.ttest_ind(non_uni['mean_pr'], uni['mean_pr']) if m > 0: better = 'university town' return True, p, better run_ttest() # In[ ]: