#!/usr/bin/env python # coding: utf-8 # In[1]: # Import modules and set options get_ipython().run_line_magic('matplotlib', 'inline') import matplotlib.pyplot as plt import pandas as pd import numpy as np # Connect to database to import data for the three test domains and demographic information: # In[2]: from redcap import Project api_url = 'https://redcap.vanderbilt.edu/api/' api_key = open("/Users/fonnescj/Dropbox/Collaborations/LSL-DR/api_token.txt").read() lsl_dr_project = Project(api_url, api_key) # In[3]: metadata = lsl_dr_project.export_metadata() # In[4]: # for i,j in zip(lsl_dr_project.field_names, # lsl_dr_project.field_labels): # print('{0}: \t{1}'.format(i,j)) # Import each database from REDCap: # In[5]: articulation_fields = ['study_id','redcap_event_name', 'age_test_aaps','aaps_ss','age_test_gf2','gf2_ss'] articulation = lsl_dr_project.export_records(fields=articulation_fields, format='df', df_kwargs={'index_col':None}) # In[6]: records = lsl_dr_project.export_records(fields=articulation_fields) # In[7]: print(records[0]['study_id']) # In[8]: expressive_fields = ['study_id','redcap_event_name','age_test_eowpvt','eowpvt_ss','age_test_evt','evt_ss'] expressive = lsl_dr_project.export_records(fields=expressive_fields, format='df', df_kwargs={'index_col':None, 'na_values':[999, 9999]}) # In[9]: receptive_fields = ['study_id','redcap_event_name','age_test_ppvt','ppvt_ss','age_test_rowpvt','rowpvt_ss'] receptive = lsl_dr_project.export_records(fields=receptive_fields, format='df', df_kwargs={'index_col':None, 'na_values':[999, 9999]}) # In[10]: language_fields = ['study_id','redcap_event_name','pls_ac_ss','pls_ec_ss','pls_choice','age_test_pls', 'owls_lc_ss','owls_oe_ss','age_test_owls', 'celfp_rl_ss','celfp_el_ss','age_test_celp', 'celf_elss','celf_rlss','age_test_celf'] language_raw = lsl_dr_project.export_records(fields=language_fields, format='df', df_kwargs={'index_col':None, 'na_values':[999, 9999]}) # In[11]: demographic_fields = ['study_id','redcap_event_name','redcap_data_access_group', 'academic_year', 'hl','prim_lang','mother_ed','father_ed','premature_age', 'synd_cause', 'age_disenrolled', 'race', 'onset_1','age_int','age','age_amp', 'age_ci', 'age_ci_2', 'degree_hl_ad','type_hl_ad','tech_ad','degree_hl_as', 'type_hl_as','tech_as','etiology','etiology_2', 'sib', 'gender', 'time', 'ad_250', 'as_250', 'ae', 'ad_500', 'as_500', 'fam_age', 'family_inv', 'demo_ses', 'school_lunch', 'medicaid', 'hearing_changes', 'slc_fo', 'sle_fo', 'a_fo', 'funct_out_age', 'att_days_hr', 'att_days_sch', 'att_days_st2_417'] demographic_raw = lsl_dr_project.export_records(fields=demographic_fields, format='df', df_kwargs={'index_col':None, 'na_values':[888, 999, 9999]}) # In[12]: demographic_raw[demographic_raw.study_id=='1147-2010-0064'] # ## Attendance information # Several fields in the demographic data have missing values. # In[13]: demographic_raw.head() # We can fill missing values forward from previous observation (by `study_id`) # In[14]: demographic = demographic_raw.sort(columns='redcap_event_name').groupby('study_id').transform( lambda recs: recs.fillna(method='ffill'))#.reset_index() demographic["study_id"] = demographic_raw.sort(columns='redcap_event_name').study_id # Random check to make sure this worked # In[15]: demographic[demographic.study_id=='1147-2010-0064'] # Demographic data without missing values: # In[16]: demographic.head() # ## Cleaning languge dataset # # 5 language measures: # # - 3 versions of CELF # - PLS # - pls_ac_rs: PLS: Auditory Comprehension Raw Score # - pls_ac_ss: PLS: Auditory Comprehension Standard Score # - pls_ec_rs: PLS: Expressive Communication Raw Score # - pls_ec_ss: PLS: Expressive Communication Standard Score # - pls_tl_rs: PLS: Total Language Score Standard Score Total # - pls_tl_ss: PLS: Total Language Score Standard Score # - OWLS # - age_test_owls: Age at time of testing (OWLS) # - owls_lc_rs: OWLS: Listening Comprehension Raw Score # - owls_lc_ss: OWLS: Listening Comprehension Standard Score # - owls_oe_rs: OWLS: Oral Expression Raw Score # - owls_oe_ss: OWLS: Oral Expression Standard Score # - owls_oc_sss: OWLS: Oral Composite Sum of Listening Comprehension and Oral Expression Standard Scores # - owls_oc_ss: OWLS: Oral Composite Standard Score # - owls_wes_trs: OWLS: Written Expression Scale Total Raw Score # - owls_wes_as: OWLS: Written Expression Scale Ability Score # - owls_wes_ss: OWLS: Written Expression Scale Standard Score # - owsl_lc: OWLS: Written Expression Scale Language Composite (Sum of written expression age-based standard score, listening comprehension standard score and oral expression standard score) # - owls_lcss: OWLS: Language Composite Standard Score # In[17]: # Test type language_raw["test_name"] = None language_raw["test_type"] = None language_raw["score"] = None CELP = language_raw.age_test_celp.notnull() CELF = language_raw.age_test_celf.notnull() PLS = language_raw.age_test_pls.notnull() OWLS = language_raw.age_test_owls.notnull() language_raw['age_test'] = None language_raw.loc[CELP, 'age_test'] = language_raw.age_test_celp language_raw.loc[CELF, 'age_test'] = language_raw.age_test_celf language_raw.loc[PLS, 'age_test'] = language_raw.age_test_pls language_raw.loc[OWLS, 'age_test'] = language_raw.age_test_owls language1 = language_raw[CELP | CELF | PLS | OWLS].copy() language2 = language1.copy() language1["test_type"] = "receptive" language1.loc[CELP, "test_name"] = "CELF-P2" language1.loc[CELF, "test_name"] = "CELF-4" language1.loc[PLS, "test_name"] = "PLS" language1.loc[OWLS, "test_name"] = "OWLS" language1.loc[CELP, "score"] = language1.celfp_rl_ss language1.loc[CELF, "score"] = language1.celf_rlss language1.loc[PLS, "score"] = language1.pls_ac_ss language1.loc[OWLS, "score"] = language1.owls_lc_ss language2["test_type"] = "expressive" language2.loc[CELP, "test_name"] = "CELF-P2" language2.loc[CELF, "test_name"] = "CELF-4" language2.loc[PLS, "test_name"] = "PLS" language2.loc[OWLS, "test_name"] = "OWLS" language2.loc[CELP, "score"] = language1.celfp_el_ss language2.loc[CELF, "score"] = language1.celf_elss language2.loc[PLS, "score"] = language1.pls_ec_ss language2.loc[OWLS, "score"] = language1.owls_oe_ss language = pd.concat([language1, language2]) language = language[language.score.notnull()] print(pd.crosstab(language.test_name, language.test_type)) print("There are {0} null values for score".format(sum(language["score"].isnull()))) # A `school` variable was added, which is the first four columns of the `study_id`: # In[18]: language["school"] = language.study_id.str.slice(0,4) # In[19]: language = language[["study_id", "redcap_event_name", "score", "test_type", "test_name", "school", "age_test"]] language["domain"] = "Language" language.head() # ## Cleaning articulation dataset # # We converted the articulation dataset into a "long" format: # In[20]: # Test type articulation["test_type"] = None ARIZ = articulation.aaps_ss.notnull() GF = articulation.gf2_ss.notnull() articulation = articulation[ARIZ | GF] articulation.loc[(ARIZ & GF), "test_type"] = "Arizonia and Goldman" articulation.loc[(ARIZ & ~GF), "test_type"] = "Arizonia" articulation.loc[(~ARIZ & GF), "test_type"] = "Goldman" print(articulation.test_type.value_counts()) print("There are {0} null values for test_type".format(sum(articulation["test_type"].isnull()))) # Test score (Arizonia if both) articulation["score"] = articulation.aaps_ss articulation.loc[(~ARIZ & GF), "score"] = articulation.gf2_ss[~ARIZ & GF] # A `school` variable was added, which is the first four columns of the `study_id`: # In[21]: articulation["school"] = articulation.study_id.str.slice(0,4) # The age was taken to be the Arizonia age if there are both test types: # In[22]: articulation["age_test"] = articulation.age_test_aaps articulation.loc[articulation.age_test.isnull(), 'age_test'] = articulation.age_test_gf2[articulation.age_test.isnull()] print(articulation.age_test.describe()) # Finally, we dropped unwanted columns and added a domain identification column for merging: # In[23]: articulation = articulation.drop(["age_test_aaps", "age_test_gf2", "aaps_ss", "gf2_ss"], axis=1) articulation["domain"] = "Articulation" articulation.head() # ## Cleaning demographic dataset # # We excluded unwanted columns and rows for which age, gender or race were missing: # In[24]: # Retain only subset of columns #demographic = demographic[demographic.gender.notnull()] demographic = demographic.rename(columns={'gender':'male'}) # Due to sample size considerations, we reduced the non-English primary language variable to English (0) and non-English (1): # In[25]: demographic["non_english"] = None demographic.loc[demographic.prim_lang.notnull(), 'non_english'] = demographic.prim_lang[demographic.prim_lang.notnull()]>0 print(demographic.non_english.value_counts()) print("There are {0} null values for non_english".format(sum(demographic.non_english.isnull()))) # Mother's education (`mother_ed`) and father's education (`father_ed`) were both recoded to: # # * 0=no high school diploma # * 1=high school # * 2=undergraduate # * 3=graduate # # Category 6 (unknown) was recoded as missing. # In[26]: demographic = demographic.rename(columns={"mother_ed":"_mother_ed"}) demographic["mother_ed"] = demographic._mother_ed.copy() demographic.loc[demographic._mother_ed==1, 'mother_ed'] = 0 demographic.loc[(demographic._mother_ed==2) | (demographic.mother_ed==3), 'mother_ed'] = 1 demographic.loc[demographic._mother_ed==4, 'mother_ed'] = 2 demographic.loc[demographic._mother_ed==5, 'mother_ed'] = 3 demographic.loc[demographic._mother_ed==6, 'mother_ed'] = None print("_mother_ed:") print(demographic._mother_ed.value_counts()) print("mother_ed:") print(demographic.mother_ed.value_counts()) print("\nThere are {0} null values for mother_ed".format(sum(demographic.mother_ed.isnull()))) # Secondary diagnosis # In[27]: demographic['secondary_diagnosis'] = demographic.etiology==0 # Suspected or unknown treated as missing demographic.loc[demographic.etiology > 1, 'secondary_diagnosis'] = None # In[28]: demographic.secondary_diagnosis.value_counts() # In[29]: demographic.secondary_diagnosis.mean() # Premature status was recoded to True (premature) and False (full-term). Here, premature indicates <36 weeks. # In[30]: demographic['premature_weeks'] = demographic.premature_age.copy() demographic.loc[demographic.premature_age==9, 'premature_weeks'] = None demographic.premature_weeks = abs(demographic.premature_weeks-8)*2 print("There are {0} null values for premature_weeks".format(sum(demographic.premature_weeks.isnull()))) # In[31]: demographic.premature_weeks.value_counts() # Recode impant technology variables for each ear to one of four categories (None, Baha, Hearing aid, Cochlear implant): # In[32]: tech_cats = ["None", "Baha", "Hearing aid", "Cochlear"] demographic["tech_right"] = demographic.tech_ad.copy() demographic.loc[demographic.tech_right==6, 'tech_right'] = 0 demographic.loc[demographic.tech_right==4, 'tech_right'] = 1 demographic.loc[demographic.tech_right==5, 'tech_right'] = 1 demographic.loc[demographic.tech_right==3, 'tech_right'] = 2 demographic.loc[demographic.tech_right==7, 'tech_right'] = 3 demographic.tech_right = np.abs(demographic.tech_right - 3) demographic["tech_left"] = demographic.tech_as.copy() demographic.loc[demographic.tech_left==6, 'tech_left'] = 0 demographic.loc[demographic.tech_left==4, 'tech_left'] = 1 demographic.loc[demographic.tech_left==5, 'tech_left'] = 1 demographic.loc[demographic.tech_left==3, 'tech_left'] = 2 demographic.loc[demographic.tech_left==7, 'tech_left'] = 3 demographic.tech_left = np.abs(demographic.tech_left - 3) # Substitute valid missing values for hearing loss: # In[33]: demographic.loc[demographic.type_hl_ad==5, 'type_hl_ad'] = None demographic.loc[demographic.type_hl_as==5, 'type_hl_ad'] = None # Create `degree_hl`, which is the maximum level of hearing loss in either ear: # In[34]: demographic["degree_hl"] = np.maximum(demographic.degree_hl_ad, demographic.degree_hl_as) # Create compound indicator variable for each technology (Baha, Hearing aid, Chochlear implant): # # * 0=none # * 1=one ear # * 2=both ears. # In[35]: demographic["baha"] = 0 demographic.baha = demographic.baha.astype(object) demographic.loc[(demographic.tech_right==1) | (demographic.tech_left==1), 'baha'] = 1 demographic.loc[(demographic.tech_right==1) & (demographic.tech_left==1), 'baha'] = 2 demographic.loc[(demographic.tech_right.isnull()) & (demographic.tech_left.isnull()), 'baha'] = None print("baha:") print(demographic.drop_duplicates(subset='study_id').baha.value_counts()) print("There are {0} null values for baha".format(sum(demographic.baha.isnull()))) demographic["hearing_aid"] = 0 demographic.hearing_aid = demographic.hearing_aid.astype(object) demographic.loc[(demographic.tech_right==2) | (demographic.tech_left==2), 'hearing_aid'] = 1 demographic.loc[(demographic.tech_right==2) & (demographic.tech_left==2), 'hearing_aid'] = 2 demographic.loc[(demographic.tech_right.isnull()) & (demographic.tech_right.isnull()), 'hearing_aid'] = None print("\nhearing_aid:") print(demographic.drop_duplicates(subset='study_id').hearing_aid.value_counts()) print("There are {0} null values for hearing_aid".format(sum(demographic.hearing_aid.isnull()))) demographic["cochlear"] = 0 demographic.cochlear = demographic.cochlear.astype(object) demographic.loc[(demographic.tech_right==3) | (demographic.tech_left==3), 'cochlear'] = 1 demographic.loc[(demographic.tech_right==3) & (demographic.tech_left==3), 'cochlear'] = 2 demographic.loc[(demographic.tech_right.isnull()) & (demographic.tech_left.isnull()), 'cochlear'] = None print("\ncochlear:") print(demographic.drop_duplicates(subset='study_id').cochlear.value_counts()) print("There are {0} null values for cochlear".format(sum(demographic.cochlear.isnull()))) print(len(demographic)) # Identify bilateral and bimodal individuals: # In[36]: demographic["bilateral_ci"] = demographic.cochlear==2 demographic["bilateral_ha"] = demographic.hearing_aid==2 demographic["bimodal"] = (demographic.cochlear==1) & (demographic.hearing_aid==1) # In[37]: demographic.bilateral_ci.sum(), demographic.bilateral_ha.sum(), demographic.bimodal.sum() # In[175]: demographic.drop_duplicates(subset='study_id')[['bilateral_ci', 'bilateral_ha', 'bimodal']].sum() # Create variable that identifies bilateral (0), bilateral HA left (1), bilateral HA right (2) # In[38]: demographic['tech'] = 0 demographic.loc[(demographic.bimodal) & (demographic.tech_left==2), 'tech'] = 1 demographic.loc[(demographic.bimodal) & (demographic.tech_right==2), 'tech'] = 2 print("There are {0} null values for tech".format(sum(demographic.tech.isnull()))) # In[39]: demographic["implant_category"] = None demographic.loc[(demographic.cochlear==1) & (demographic.hearing_aid==0) & (demographic.baha==0), 'implant_category'] = 0 demographic.loc[(demographic.cochlear==0) & (demographic.hearing_aid==1) & (demographic.baha==0), 'implant_category'] = 1 demographic.loc[(demographic.cochlear==0) & (demographic.hearing_aid==0) & (demographic.baha==1), 'implant_category'] = 2 demographic.loc[(demographic.cochlear==2) & (demographic.hearing_aid==0) & (demographic.baha==0), 'implant_category'] = 3 demographic.loc[(demographic.cochlear==1) & (demographic.hearing_aid==1) & (demographic.baha==0), 'implant_category'] = 4 demographic.loc[(demographic.cochlear==1) & (demographic.hearing_aid==0) & (demographic.baha==1), 'implant_category'] = 5 demographic.loc[(demographic.cochlear==0) & (demographic.hearing_aid==2) & (demographic.baha==0), 'implant_category'] = 6 demographic.loc[(demographic.cochlear==0) & (demographic.hearing_aid==1) & (demographic.baha==1), 'implant_category'] = 7 demographic.loc[(demographic.cochlear==0) & (demographic.hearing_aid==0) & (demographic.baha==2), 'implant_category'] = 8 demographic.implant_category.value_counts() # **Age when hearing loss diagnosed** Data are entered inconsistently here, so we have to go in and replace non-numeric values. # In[40]: demographic.onset_1.unique() # In[41]: # Don't need this anymore # demographic['age_diag'] = demographic.onset_1.replace({'birth': 0, 'R- Birth L-16mo': 0, 'birth - 3': 0, 'at birth': 0, 'NBHS': 0, # 'at Birth': 0, '1-2': 1.5, '2-3': 2.5, '0-3': 1.5}).astype(float) demographic['age_diag'] = demographic.onset_1 # Number of null values for `age_diag` # In[42]: demographic.age_diag.isnull().sum() # In[43]: demographic['sex'] = demographic.male.replace({0:'Female', 1:'Male'}) # In[44]: import seaborn as sb unique_students = demographic.dropna(subset=['sex']).groupby('study_id').first() # ag = sb.factorplot("sex", data=unique_students, # palette="PuBuGn_d", kind='count') # ag.set_xticklabels(['Female ({})'.format((unique_students.male==0).sum()), # 'Male ({})'.format((unique_students.male==1).sum())]) # ag.set_xlabels('') # Child has another diagnosed disability # In[45]: demographic['known_synd'] = (demographic.synd_cause == 0) # Unknown or suspected demographic.loc[demographic.synd_cause > 1, 'known_synd'] = None # In[46]: # If either known syndrome or secondary diagnosis demographic['synd_or_disab'] = demographic.apply(lambda x: x['secondary_diagnosis'] or x['known_synd'], axis=1) # Missing sibling counts were properly encoded as `None` (missing). # In[47]: demographic.loc[demographic.sib==4, 'sib'] = None # We reduced the number of race categories, pooling those that were neither caucasian, black, hispanic or asian to "other", due to small sample sizes for these categories. Category 7 (unknown) was recoded as missing. # In[48]: races = ["Caucasian", "Black or African American", "Hispanic or Latino", "Asian", "Other"] demographic = demographic.rename(columns={"race":"_race"}) demographic["race"] = demographic._race.copy() demographic.loc[demographic.race==7, 'race'] = None demographic.loc[demographic.race>3, 'race'] = 4 print("_race:") print(demographic._race.value_counts()) print("race:") print(demographic.race.value_counts()) print("There are {0} null values for race".format(sum(demographic.race.isnull()))) # Replace with recoded column # Recode implant technology variables # In[49]: tech_cats = ["None", "Baha", "Hearing aid", "Cochlear", "Other"] demographic["tech_right"] = demographic.tech_ad.copy() demographic.loc[demographic.tech_right==6, 'tech_right'] = 0 demographic.loc[demographic.tech_right==4, 'tech_right'] = 1 demographic.loc[demographic.tech_right==5, 'tech_right'] = 1 demographic.loc[demographic.tech_right==3, 'tech_right'] = 2 demographic.loc[demographic.tech_right==7, 'tech_right'] = 3 demographic.loc[demographic.tech_right==8, 'tech_right'] = 3 demographic.loc[demographic.tech_right==9, 'tech_right'] = 4 demographic.tech_right = np.abs(demographic.tech_right - 3) demographic["tech_left"] = demographic.tech_as.copy() demographic.loc[demographic.tech_left==6, 'tech_left'] = 0 demographic.loc[demographic.tech_left==4, 'tech_left'] = 1 demographic.loc[demographic.tech_left==5, 'tech_left'] = 1 demographic.loc[demographic.tech_left==3, 'tech_left'] = 2 demographic.loc[demographic.tech_left==7, 'tech_left'] = 3 demographic.loc[demographic.tech_left==8, 'tech_left'] = 3 demographic.loc[demographic.tech_left==9, 'tech_left'] = 4 demographic.tech_left = np.abs(demographic.tech_left - 3) # In[50]: # Don't need this anymore # demographic['age_amp'] = demographic.age_amp.replace({'22 mo': 22, 'unknown': np.nan, 'none': np.nan, # 'n/a unilateral loss': np.nan, 'not amplified yet': np.nan, # 'not amplified': np.nan, 'n/a': np.nan, '4 months-6 months': 5, # '6 weeks': 0.5, '13-18': 15.5, '0-3': 1.5, '24-36': 30}).astype(float) # In[51]: demographic['academic_year'] = demographic.academic_year.replace( {'12013-2014': '2013-2014', '2010 - 20111': '2010 - 2011', '2020-2011': '2010-2011', '2012-20013': '2012-2013', '0000-0000': np.nan}) # In[53]: demographic.age_amp.hist() # ## Cleaning expressive vocabulary dataset # # We converted the expressive vocabulary dataset to "long" format: # In[54]: # Test type expressive["test_type"] = None EOWPVT = expressive.eowpvt_ss.notnull() EVT = expressive.evt_ss.notnull() expressive = expressive[EOWPVT | EVT] expressive.loc[EOWPVT & EVT, "test_type"] = "EOWPVT and EVT" expressive.loc[EOWPVT & ~EVT, "test_type"] = "EOWPVT" expressive.loc[~EOWPVT & EVT, "test_type"] = "EVT" print("There are {0} null values for test_type".format(sum(expressive["test_type"].isnull()))) expressive["score"] = expressive.eowpvt_ss expressive.loc[~EOWPVT & EVT, "score"] = expressive.evt_ss[~EOWPVT & EVT] # In[55]: expressive.test_type.value_counts() # A `school` variable was added, which is the first four columns of the `study_id`: # In[56]: expressive["school"] = expressive.study_id.str.slice(0,4) # The age was taken to be the EOWPVT age if there are both test types: # In[57]: expressive["age_test"] = expressive.age_test_eowpvt expressive.loc[expressive.age_test.isnull(), 'age_test'] = expressive.age_test_evt[expressive.age_test.isnull()] # Finally, we dropped unwanted columns and added a domain identification column for merging: # In[58]: expressive = expressive[["study_id", "redcap_event_name", "score", "test_type", "school", "age_test"]] expressive["domain"] = "Expressive Vocabulary" expressive.head() # ## Cleaning receptive vocabulary dataset # We converted the receptive vocabulary data table to "long" format: # In[59]: receptive.columns # In[60]: # Test type receptive["test_type"] = None PPVT = receptive.ppvt_ss.notnull() ROWPVT = receptive.rowpvt_ss.notnull() receptive = receptive[PPVT | ROWPVT] receptive.loc[PPVT & ROWPVT, "test_type"] = "PPVT and ROWPVT" receptive.loc[PPVT & ~ROWPVT, "test_type"] = "PPVT" receptive.loc[~PPVT & ROWPVT, "test_type"] = "ROWPVT" print("There are {0} null values for test_type".format(sum(receptive["test_type"].isnull()))) receptive["score"] = receptive.ppvt_ss receptive.loc[~PPVT & ROWPVT, "score"] = receptive.rowpvt_ss[~PPVT & ROWPVT] # A `school` variable was added, which is the first four columns of the `study_id`: # In[61]: receptive["school"] = receptive.study_id.str.slice(0,4) # The age was taken to be the PPVT age if there are both test types: # In[62]: receptive["age_test"] = receptive.age_test_ppvt receptive.loc[receptive.age_test.isnull(), 'age_test'] = receptive.age_test_rowpvt[receptive.age_test.isnull()] # In[63]: print("There are {0} null values for age_test".format(sum(receptive.age_test.isnull()))) # Finally, we dropped unwanted columns and added a domain identification column for merging: # In[64]: receptive = receptive[["study_id", "redcap_event_name", "score", "test_type", "school", "age_test"]] receptive["domain"] = "Receptive Vocabulary" receptive.head() # In[65]: receptive.study_id.unique().shape # ## Merge datasets # # The four datasets were mereged into a single table. First, we concatenate the test scores data: # In[66]: test_scores = pd.concat([articulation, expressive, receptive, language]) # Then we perform a merge between the demographic data and the test scores data: # In[184]: lsl_dr = pd.merge(demographic, test_scores, on=["study_id", "redcap_event_name"], how='left') # In[185]: lsl_dr.tail() # In[186]: lsl_dr['academic_year_start'] = lsl_dr.academic_year.apply(lambda x: str(x).strip()[:4]) lsl_dr.academic_year_start.value_counts() # In[190]: current_year_only = True if current_year_only: lsl_dr = lsl_dr[lsl_dr.academic_year_start=='2013'] # In[192]: expressive_scores = lsl_dr[(lsl_dr.domain=='Expressive Vocabulary') & (lsl_dr.score>=20)].score expressive_scores.hist(bins=25) plt.xlabel('Standard scores'); plt.ylabel('Frequency'); # In[193]: expressive_lang_scores = lsl_dr[(lsl_dr.domain=='Language') & (lsl_dr.test_type=='expressive')].score expressive_lang_scores.hist(bins=25) plt.xlabel('Standard scores'); plt.ylabel('Frequency'); # Export dataset # In[194]: if current_year_only: lsl_dr.to_csv('lsl_dr_current_year.csv') else: lsl_dr.to_csv('lsl_dr.csv') # In[195]: lsl_dr.shape # In[196]: lsl_dr.study_id.unique().shape # In[197]: demographic.study_id.unique().shape # Convert score to floating-point number # In[198]: lsl_dr.score = lsl_dr.score.astype(float) # ### Plots of Demographic Data # In[199]: plot_color = "#64AAE8" # In[200]: def plot_demo_data(series, labels=None, color=plot_color, rot=0, label_offset=20, xlim=None, ylim=None, title=None, **kwargs): ax = kwargs.get('ax') if ax is None: f, ax = plt.subplots() counts = series.value_counts().sort_index(1) counts.plot(kind='bar', grid=False, rot=rot, color=color, **kwargs) if xlim is None: ax.set_xlim(-0.5, len(counts)-0.5) if ylim is not None: ax.set_ylim(*ylim) ax.set_ylabel('Count') if labels is not None: ax.set_xticklabels(labels) if title: ax.set_title(title) for i,x in enumerate(counts): ax.annotate('%i' % x, (i, x + label_offset)) # plt.gca().tight_layout() # In[201]: unique_students = demographic.drop_duplicates('study_id') # In[202]: unique_students.shape # In[203]: unique_students.age.describe() # In[204]: plot_demo_data(unique_students.male, ('Female', 'Male'), label_offset=20, ylim=(0, 2600), color=plot_color) # In[205]: plot_demo_data(unique_students.prim_lang, ('English', 'Spanish', 'Chinese', 'French', 'German', 'Tagalong', 'Other'), rot=70, color=plot_color) # In[206]: unique_students.prim_lang.count() # In[207]: plot_demo_data(unique_students.sib, ('1', '2', '3', '4+'), color=plot_color) # In[208]: unique_students.sib.count() # In[209]: amp_ages = ["Birth-3 months", "4 months - 6 months", "7 months - 9 months", "10 months- 12 months", "13 months - 18 months", "19 months - 24 months", "2 years 1 day - 3 years", "3 years 1 day - 4 years", "4 years 1 day - 5 years", "5 years 1 day - 6 years", "6 years"] demographic.loc[demographic.age_amp==11, 'age_amp'] = None print("There are {0} null values for age_amp".format(sum(demographic.age_amp.isnull()))) # In[210]: age_classes = pd.Series(pd.cut(unique_students.sort('age_amp').age_amp.dropna(), [-1,3,6,9,12,18,24,36,48,60,72,1000], labels=amp_ages)) # In[211]: age_amp_counts = age_classes.value_counts()[amp_ages] age_amp_counts.plot(kind='bar', grid=False, rot=90, color=plot_color) plt.xlim(-0.5, len(age_amp_counts)-0.5) plt.ylabel('Count') for i,x in enumerate(age_amp_counts): plt.annotate('%i' % x, (i, x + 10)) # In[212]: age_amp_counts.sum() # In[213]: unique_students.age_amp.max() # In[214]: (unique_students.age_amp/12.).hist(bins=16, grid=False, color=plot_color) plt.ylabel('Count') plt.xlabel('Age at amplification') # In[215]: plot_demo_data(unique_students.tech_left, tech_cats, rot=90, color=plot_color) # In[216]: plot_demo_data(unique_students.tech_right, tech_cats, rot=90, color=plot_color, ylim=(0, 2500)) # In[217]: f, axes = plt.subplots(2, 1) plot_demo_data(unique_students.tech_right, [""]*len(tech_cats), rot=90, ax=axes[0], title='Right ear', color=plot_color, ylim=(0, 2500)) plot_demo_data(unique_students.tech_left, tech_cats, rot=90, ax=axes[1], title='Left ear', color=plot_color) # In[218]: unique_students.tech_right.count() # In[219]: unique_students.tech_left.count() # In[220]: degree_hl_cats = 'Normal (0-14)', 'Slight (15-25)', 'Mild (26-40)', \ 'Moderate (41-55)', 'Moderately Severe (56-70)', 'Severe (71-90)', 'Profound (90+)' plot_demo_data(unique_students.degree_hl_ad, degree_hl_cats, rot=90, color=plot_color) # In[221]: _, axes = plt.subplots(2, 1) plot_demo_data(unique_students.degree_hl_ad, [""]*7, rot=90, color=plot_color, ax=axes[0], title='Right ear') plot_demo_data(unique_students.degree_hl_as, degree_hl_cats, rot=90, color=plot_color, ylim=(0,2000), ax=axes[1], title='Left ear'); # In[222]: unique_students.degree_hl_as.count() # In[223]: type_hl_cats = 'Sensorineural', 'Conductive', 'Mixed', 'Neural', 'Normal', 'Unknown' plot_demo_data(unique_students.type_hl_ad, type_hl_cats, rot=90, color=plot_color) # In[224]: plot_demo_data(unique_students.type_hl_as, type_hl_cats, rot=90, color=plot_color) # In[225]: unique_students.type_hl_ad.count() # In[226]: unique_students.type_hl_as.count() # In[227]: type_hl_cats = 'Sensorineural', 'Conductive', 'Mixed', 'Neural', 'Normal', 'Unknown' f, axes = plt.subplots(2,1) plot_demo_data(unique_students.type_hl_ad, [""]*len(type_hl_cats), rot=90, title='Right ear', ax=axes[0], color=plot_color) plot_demo_data(unique_students.type_hl_as, type_hl_cats, rot=90, title='Left ear', ax=axes[1], color=plot_color) # In[228]: demographic[demographic.study_id=='1147-2010-0064'] # In[229]: receptive[receptive.study_id=='1147-2010-0064'] # In[230]: lsl_dr[lsl_dr.study_id=='1147-2010-0064'] # In[231]: unique_students.type_hl_ad.count() # In[232]: receptive[receptive.domain=="Receptive Vocabulary"].study_id.unique().shape # In[233]: demographic.study_id.unique().shape # In[234]: receptive.study_id.unique().shape # In[235]: lsl_dr[lsl_dr.domain=="Receptive Vocabulary"].study_id.unique().shape # In[236]: receptive_ids = receptive.study_id.unique() # In[237]: demographic_ids = demographic.study_id.unique() # In[238]: [s for s in receptive_ids if s not in demographic_ids] # In[239]: def score_summary(domain, test_type=None): subset = lsl_dr[lsl_dr.domain==domain].copy() if test_type is not None: subset = subset[subset.test_type==test_type] subset['age_test'] = (subset.age_test/12).dropna().astype(int) subset.loc[subset.age_test > 11, 'age_test'] = 11 subset = subset[subset.age_test>1] byage = subset.groupby('age_test') n = byage.study_id.count() mean = byage.score.mean() sd = byage.score.std() min = byage.score.min() max = byage.score.max() summary = pd.DataFrame({'Sample Size':n, 'Mean':mean, 'SD':sd, 'Min':min, 'Max':max}) return summary[['Sample Size','Mean','SD','Min','Max']] # In[240]: receptive_summary = score_summary("Receptive Vocabulary") receptive_summary # In[241]: receptive_summary.describe() # In[242]: receptive_summary['Sample Size'].sum() # In[243]: receptive_summary["Sample Size"].plot(kind='bar', grid=False, color=plot_color) for i,x in enumerate(receptive_summary["Sample Size"]): plt.annotate('%i' % x, (i, x+10), va="bottom", ha="center") plt.ylabel('Count') plt.xlabel('Age') plt.xlim(-0.5, 9.5) # In[244]: expressive_summary = score_summary("Expressive Vocabulary") expressive_summary # In[245]: expressive_summary['Sample Size'].sum() # In[272]: expressive_summary["Sample Size"].plot(kind='bar', grid=False, color=plot_color) for i,x in enumerate(expressive_summary["Sample Size"]): plt.annotate('%i' % x, (i, x+10), va="bottom", ha="center") plt.ylabel('Count') plt.xlabel('Age') plt.xlim(-0.5, 9.5) if current_year_only: plt.ylim(0, 400) else: plt.ylim(0, 1400) # In[247]: articulation_summary = score_summary("Articulation") articulation_summary # In[248]: articulation_summary['Sample Size'].sum() # In[249]: sample_size = articulation_summary["Sample Size"] sample_size.plot(kind='bar', grid=False, color=plot_color) for i,x in enumerate(articulation_summary["Sample Size"]): plt.annotate('%i' % x, (i, x+10), va="bottom", ha="center") plt.ylabel('Count') plt.xlabel('Age') plt.ylim(0, sample_size.max()+50);plt.xlim(-0.5, 9.5); # Language scores # In[250]: lsl_dr.domain.unique() # In[251]: lsl_dr.test_type.unique() # In[252]: receptive_language_summary = score_summary("Language", "receptive") receptive_language_summary # In[253]: receptive_language_summary['Sample Size'].sum() # In[254]: sample_size = receptive_language_summary["Sample Size"] sample_size.plot(kind='bar', grid=False, color=plot_color) for i,x in enumerate(receptive_language_summary["Sample Size"]): plt.annotate('%i' % x, (i, x+10), va="bottom", ha="center") plt.ylabel('Count') plt.xlabel('Age') plt.ylim(0, sample_size.max()+50);plt.xlim(-0.5, 9.5); # In[255]: expressive_language_summary = score_summary("Language", "expressive") expressive_language_summary # In[256]: expressive_language_summary['Sample Size'].sum() # In[257]: sample_size = expressive_language_summary["Sample Size"] sample_size.plot(kind='bar', grid=False, color=plot_color) for i,x in enumerate(expressive_language_summary["Sample Size"]): plt.annotate('%i' % x, (i, x+10), va="bottom", ha="center") plt.ylabel('Count') plt.xlabel('Age') plt.ylim(0, sample_size.max()+50);plt.xlim(-0.5, 9.5); # In[258]: (unique_students.age/12.).hist(grid=False, bins=np.sqrt(unique_students.shape[0])) plt.ylabel('Count') plt.xlabel('Age at enrollment') # In[259]: (unique_students.age/12.).describe() # In[260]: def calc_difference(x, col='a_fo', jitter=True): if (len(x)<2 or x[col].isnull().sum() or x['funct_out_age'].isnull().sum()): return None diff = x[col][x.funct_out_age.argmax()] - x[col][x.funct_out_age.argmin()] if jitter: diff += np.random.normal(scale=0.05) if (x.funct_out_age.max() - x.funct_out_age.min()) > 1000: print(x['funct_out_age']) return({'difference':diff, 'months': x.funct_out_age.max() - x.funct_out_age.min()}) # In[261]: audition = pd.DataFrame(demographic.groupby('study_id').apply(calc_difference).dropna().values.tolist()) # In[262]: plt.figure(figsize=(10,6)) plt.scatter(audition.months, audition.difference, alpha=0.5) plt.xlabel('Months between earliest and latest rating'); plt.ylabel('Progress (levels)') plt.title('Audition') # In[263]: slc = pd.DataFrame(demographic.groupby('study_id').apply(calc_difference, col='slc_fo').dropna().values.tolist()) # In[264]: plt.figure(figsize=(10,6)) plt.scatter(slc.months, slc.difference, alpha=0.5) plt.xlabel('Months between earliest and latest rating'); plt.ylabel('Progress (levels)') plt.title('Spoken language (comprehension)') # In[265]: sle = pd.DataFrame(demographic.groupby('study_id').apply(calc_difference, col='sle_fo').dropna().values.tolist()) # In[266]: plt.figure(figsize=(10,6)) plt.scatter(sle.months, sle.difference, alpha=0.5) plt.xlabel('Months between earliest and latest rating'); plt.ylabel('Progress (levels)') plt.title('Spoken language (expression)') # In[267]: lsl_dr.degree_hl.dropna().value_counts() # In[268]: ax = lsl_dr.degree_hl.hist(bins=7) # In[269]: diff = (lsl_dr['age'] - lsl_dr['age_int']) diff[diff>0].hist(bins=50) # In[270]: (lsl_dr.age_int<6).mean() # In[271]: (lsl_dr.age<6).mean()