#!/usr/bin/env python # coding: utf-8 # # Uterine fibroids follow-up treatment meta-analysis # # Our goal is to estimate the probabilities of requiring one of a suite of candidate follow-up treatments following randomization to a given initial treatment for uterine fibroids. Specifically, we are interested in estimating: # # $$Pr(I_2|I_1 =i,T=t)$$ # # where $I_1$ is an initial intervention, which take specific values $i = 1, 2, \ldots , K$ for each of $K$ candidate intervention types, $I_2$ is the followup intervention that also may take any of the same values of $i$, and $T$ is followup time in months, which will generally be either 6 or 12 months. # # Our current set of candidate interventions include: # # - Myomectomy # - Hysterectomy # - Ablation # - UAE # - Magnetic resonance imaging-guided high-intensity focused ultrasound (MRIgFUS) # - Ablation +/- hysteroscopic myomectomy # - No intervention # # Rather than model each conditional probability independently, we will instead model the outcomes for a treatment arm as a multinomial random variable. That is, # # $$\{X_{I_2} \} ∼ \text{Multinomial}(N_{I_1}=i, \{\pi_i\})$$ # # where $\{X_{I_2}\}$ is the vector of outcomes corresponding to each of the possible followup interventions listed above, $N_{I_1}=i$ is the number of women randomized to the initial intervention i, and $\{\pi_i\}$ is a vector of conditional transition probabilities corresponding to $Pr(I_2|I_1 = i, T = t)$, as specified above. The multinomial distribution is a multivariate generalization of the categorical distribution, which is what the above simplifies to when modeling the outcome for a single patient. The multivariate formulation allows us to model study-arm-specific outcomes, incorporating covariates that are specific to that arm or study. # # The quantities of interest are the vectors of transition probabilities $\{\pi_i\}$ corresponding to each of the initial candidate interventions. A naive approach to modeling these is to assign a vague Dirichlet prior distribution to each set, and perform Bayesian inference using the multinomial likelihood, with which the Dirichlet is conjugate, to yield posterior estimates for each probability. However, there may be additional information with which to model these probabilities, which may include: # # - followup time for each study # - arm-specific demographic covariates (e.g. race, mean age) # - study-specific random effects # # hence, a given transition probability $\pi_{ijk}$ – the probability of transitioning from initial intervention $i$ to followup intervention $j$ in study $k$ – may be modeled as: # # $$\text{logit}(\pi_{ijk})= \theta_{ij} + X_k \beta_{ij} + \epsilon_k$$ # # where $\theta_{ij}$ is a baseline transition probability (on the logit scale), $X_k$ a matrix of study(-arm)-specific covariates, $\beta_{ij}$ the corresponding coefficients, and $\epsilon_k$ a mean-zero random effect for study k. We will initially consider (1) follow-up time and (2) mean/median age as covariates. # # An attractive benefit to using Bayesian inference to estimate this model is that it is easy to generate predictions from the model, via the posterior predictive distribution. For example, we could estimate the distribution of the expected proportion of women requiring a particular followup intervention; this estimate would factor in both the residual uncertainty in the transition probability estimates, as well as the sampling uncertainty of the intervention. # In[1]: get_ipython().run_line_magic('matplotlib', 'inline') import numpy as np import pandas as pd import pymc3 as pm import seaborn as sns sns.set() # Import data from worksheets in Excel spreadsheet. # In[2]: data_file = 'UF Subsequent Interventions Data_Master_updated.xlsx' # In[3]: missing = ['NA', 'NR', 'ND', '?', 'null'] misc_data = pd.read_excel('data/' + data_file, sheetname='MISC (SP)', na_values=missing) misc_data = misc_data[~misc_data['baseline_n'].isnull()].drop('notes', axis=1) rows, cols = misc_data.shape print('Occlusion rows={0}, columns={1}, missing={2}'.format(rows, cols, misc_data.isnull().sum().sum())) med_vs_iac_data = pd.read_excel('data/' + data_file, sheetname='Med vs IAC JW', na_values=missing) med_vs_iac_data = med_vs_iac_data[~med_vs_iac_data['trial_arm'].isnull()].drop('notes', axis=1) rows, cols = med_vs_iac_data.shape print('Med vs IAC rows={0}, columns={1}, missing={2}'.format(rows, cols, med_vs_iac_data.isnull().sum().sum())) med_vs_med_data = pd.read_excel('data/' + data_file, sheetname='Med vs Med DVE', na_values=missing) med_vs_med_data = med_vs_med_data[~med_vs_med_data['baseline_n'].isnull()].drop('notes', axis=1) rows, cols = med_vs_med_data.shape print('Med vs Med rows={0}, columns={1}, missing={2}'.format(rows, cols, med_vs_med_data.isnull().sum().sum())) uae_data = pd.read_excel('data/' + data_file, sheetname='UAE SK') uae_data = uae_data[~uae_data['baseline_n'].isnull()].drop('notes', axis=1) rows, cols = uae_data.shape print('UAE rows={0}, columns={1}, missing={2}'.format(rows, cols, uae_data.isnull().sum().sum())) datasets = [misc_data, med_vs_iac_data, med_vs_med_data, uae_data] # In[4]: unique_inerventions = set(np.concatenate([d.intervention.values for d in datasets])) # Use the following lookup table to create "intervention category" field in each dataset. # In[5]: # %load intervention_lookup.py intervention_lookup = {'Ablation': 'ablation', 'Ablation+/- hysteroscopic myomectomy': 'ablation', 'Asoprisnil 10 mg': 'med_manage', 'Asoprisnil 25 mg': 'med_manage', 'Asoprisnil 5 mg': 'med_manage', 'CD20 (Ulipristal)': 'med_manage', 'CDB10 (Ulipristal)': 'med_manage', 'Hysterectomy': 'hysterectomy', 'LBCUV': 'uae', 'LP + GnRH agonist plus raloxifene': 'med_manage', 'LP + placebo': 'med_manage', 'LPA+ MPA / LPA+placebo': 'med_manage', 'LPA+ placebo / LPA+MPA': 'med_manage', 'LUNA plus LBCUV': 'ablation', 'Myomectomy': 'myomectomy', 'No treatment': 'control', 'No treatment (control)': 'control', 'Placebo': 'control', 'Raloxifene, 180mg/day': 'med_manage', 'SC implant of 3.6 goserelin + placebo (3 months) then tibolone 2.5 mg daily (3 months)': 'med_manage', 'SC implant of 3.6 goserelin + placebo (6 months)': 'med_manage', 'SC implant of 3.6 goserelin + tibolone 2.5 mg daily (6 months)': 'med_manage', 'Surgery': 'DROP', 'Tibolone': 'med_manage', 'UAE': 'uae', 'UAE only': 'uae', 'UAE plus goserelin acetate depot': 'uae', 'buserelin + goserelin': 'med_manage', 'buserelin, intranasal': 'med_manage', 'cabergoline': 'med_manage', 'diphereline': 'med_manage', 'gestrinone, 2.5mg': 'med_manage', 'gestrinone, 2.5mg oral + gestrinone, 5mg oral + gestrinone, 5mg vaginal': 'med_manage', 'gestrinone, 5mg': 'med_manage', 'gestrinone, 5mg vaginal': 'med_manage', 'goserelin, subcutaneous': 'med_manage', 'healthy controls': 'control', 'hormone replacement therapy, transdermal': 'DROP', 'hysterectomy or myomectomy': 'DROP', 'letrozole, 2.5mg': 'med_manage', 'leuprolide': 'med_manage', 'leuprolide acetate depot (11.25 mg q 3 months) + Placebo': 'med_manage', 'leuprolide acetate depot (11.25 mg q 3 months) + tibolone 2.5 mg/d orally': 'med_manage', 'leuprolide acetate depot (3.75 mg/28 d) + placebo (B)': 'med_manage', 'leuprolide plus (tibolone 2.5 mg daily) (A)': 'med_manage', 'leuprolide plus MPA': 'med_manage', 'leuprolide plus estrogen-progestin': 'med_manage', 'leuprolide plus placebo': 'med_manage', 'leuprolide plus progestin': 'med_manage', 'leuprolide plus raloxifene 60 mg daily': 'med_manage', 'leuprolide, 1.88mg': 'med_manage', 'leuprolide, 3.75mg': 'med_manage', 'mifepristone, 10mg': 'med_manage', 'mifepristone, 10mg + mifepristone, 5mg': 'med_manage', 'mifepristone, 2.5mg': 'med_manage', 'mifepristone, 5mg': 'med_manage', 'placebo': 'control', 'raloxifene 180 mg daily': 'med_manage', 'raloxifene 60 mg daily': 'med_manage', 'tamoxifen 20 mg daily': 'med_manage', 'tibolone': 'med_manage', 'tibolone, 2.5mg': 'med_manage', 'transdermal estrogen replacement therapy': 'med_manage', 'triptorelin, 100ug': 'med_manage', 'triptorelin, 100ug + triptorelin, 20ug + triptorelin, 5ug': 'med_manage', 'triptorelin, 20ug': 'med_manage', 'triptorelin, 3.6mg/mo': 'med_manage', 'triptorelin, 5ug': 'med_manage', 'ulipristal acetate followed by placebo': 'med_manage', 'ulipristal acetate followed by progestin': 'med_manage', 'ulipristal, 10mg': 'med_manage', 'ulipristal, 5mg': 'med_manage', 'HIFU': 'MRgFUS', 'HIFU with CEUS': 'MRgFUS', 'LUAO': 'uae', 'UAE plus PVA': 'uae', 'UAE plus TAG': 'uae', 'UAE with PVA': 'uae', 'UAE with PVA particles, large': 'uae', 'UAE with PVA particles, small': 'uae', 'UAE with SPA': 'uae', 'UAE with SPVA': 'uae', 'UAE with TAG': 'uae', 'UAE with TAG microspheres': 'uae', 'myomectomy': 'myomectomy', 'myomectomy with vasopressin': 'myomectomy', 'myomectomy, abdominal': 'myomectomy', 'myomectomy, laparoscopic': 'myomectomy', 'myomectomy, loop ligation with vasopressin': 'myomectomy', 'myomectomy, minilaparotomic': 'myomectomy'} # Assign intervention **categories** to each arm # In[6]: datasets = [d.assign(intervention_cat=d.intervention.replace(intervention_lookup)) for d in datasets] # In[7]: intervention_categories = set(intervention_lookup.values()) intervention_categories # Import demographic information # In[8]: demographics = pd.read_excel('data/' + data_file, sheetname='ALL_DEMO_DATA', na_values=missing) demographics.columns # Extract columns of interest # In[9]: age_data = demographics.loc[demographics.Demo_Category=='Age', ['study_id', 'New Grouping', 'BL Mean', 'BL SD']] # Clean arm labels # In[10]: age_data = age_data.assign(arm=age_data['New Grouping'].str.replace(':','')).drop('New Grouping', axis=1) # In[11]: age_data.arm.unique() # Concatenate all datasets # In[12]: all_data = pd.concat(datasets) # Clean up study arm field # In[13]: all_arm = all_data.trial_arm.str.replace(':','').str.replace(' ', '').str.replace('Group', 'G') all_data = all_data.assign(arm=all_arm).drop('trial_arm', axis=1) # In[14]: all_data.arm.unique() # Clean up study ID field. Currently contains non-numeric entries. Will strip out the first study ID from the compund labels, as this is the parent study ID. # In[15]: all_data.study_id.unique() # In[16]: str_mask = all_data.study_id.str.isnumeric()==False all_data.loc[str_mask, 'study_id'] = all_data.study_id[str_mask].apply(lambda x: x[:x.find('_')]) all_data.study_id = all_data.study_id.astype(int) # In[17]: all_data.study_id.unique() # Here is what the data look like after merging. # In[18]: all_data.head() # In[19]: all_data.groupby('intervention_cat')['study_id'].count() # Merge age data with outcomes # In[20]: all_data_merged = pd.merge(all_data, age_data, on=['study_id', 'arm']) # For now, drop arms with no reported followup time (we may want to impute these): # In[21]: all_data_merged = all_data_merged.dropna(subset=['followup_interval']) # Parse followup intervals that are ranges, creating `fup_min` and `fup_max` fields. # In[22]: dataset = all_data_merged.assign(fup_min=0, fup_max=all_data.followup_interval.convert_objects(convert_numeric=True).max()+1) range_index = dataset.followup_interval.str.contains('to').notnull() range_vals = dataset[range_index].followup_interval.apply(lambda x: x.split(' ')) dataset.loc[range_index, ['fup_min']] = range_vals.apply(lambda x: float(x[0])) dataset.loc[range_index, ['fup_max']] = range_vals.apply(lambda x: float(x[-1])) dataset.loc[range_index, ['followup_interval']] = -999 dataset['followup_interval'] = dataset.followup_interval.astype(float) # In[23]: dataset.head() # Fill missing values # In[24]: dataset.loc[dataset.followup_n.isnull(), 'followup_n'] = dataset.loc[dataset.followup_n.isnull(), 'baseline_n'] # In[25]: dataset.loc[dataset.no_treatment.isnull(), 'no_treatment'] = dataset.followup_n - dataset[[ 'hysterectomy', 'myomectomy', 'uae', 'MRIgFUS', 'ablation', 'iud']].sum(1)[dataset.no_treatment.isnull()] # In[26]: dataset.followup_interval.unique() # In[27]: crossover_studies = 7155, 3324, 414, 95, 7139, 6903, 3721, 3181, 4858, 4960, 4258, 4789, 2006, 2318 # In[28]: uae_data = dataset[dataset.intervention_cat=='uae'] # In[29]: uae_data.columns # In[30]: studies = uae_data.study_id.unique() studies # In[31]: study_index = np.array([np.argwhere(studies==i).squeeze() for i in uae_data.study_id]) # In[32]: from numpy.ma import masked_values followup_masked = masked_values(uae_data.followup_interval.values, -999) followup_min, followup_max = uae_data[['fup_min', 'fup_max']].values.T outcomes = uae_data[[ 'hysterectomy', 'myomectomy', 'uae', 'MRIgFUS', 'ablation', 'iud', 'no_treatment']].values followup_n = uae_data.followup_n.values age = uae_data['BL Mean'].values # Center age at 40 age_centered = age - 40 study_id = uae_data.study_id.values # In[33]: n_studies = len(set(study_id)) n_studies # In[34]: n_outcomes = 7 arms = len(outcomes) # Instantiate models # In[35]: ablation_model = pm.Model() hysterectomy_model = pm.Model() med_manage_model = pm.Model() myomectomy_model = pm.Model() uae_model = pm.Model() # In[36]: models = [ablation_model, hysterectomy_model, med_manage_model, myomectomy_model, uae_model] # In[37]: import theano.tensor as T SumTo1 = pm.transforms.SumTo1() inverse_logit = pm.transforms.inverse_logit def specify_model(model, intervention): intervention_data = dataset[dataset.intervention_cat==intervention] followup_masked = masked_values(intervention_data.followup_interval.values, -999) followup_min, followup_max = intervention_data[['fup_min', 'fup_max']].values.T outcomes = intervention_data[[ 'hysterectomy', 'myomectomy', 'uae', 'MRIgFUS', 'ablation', 'iud', 'no_treatment']].values followup_n = intervention_data.followup_n.values age = intervention_data['BL Mean'].values # Center age at 40 age_centered = age - 40 study_id = intervention_data.study_id.values with model: # Impute followup times followup_time = pm.Uniform('followup_time', followup_min, followup_max, shape=len(followup_min), observed=followup_masked) # Mean probabilities (on logit scale) μ = pm.Normal('μ', 0, 0.01, shape=n_outcomes, testval=[-2.]*n_outcomes) # Followup time covariates β_fup = pm.Normal('β_fup', 0, 0.01, shape=n_outcomes, testval=np.zeros(n_outcomes)) # Age covariate β_age = pm.Normal('β_age', 0, 0.01, shape=n_outcomes, testval=np.zeros(n_outcomes)) # Study random effect τ = pm.Exponential('τ', 0.1, testval=1) ϵ = pm.Normal('ϵ', 0, τ, shape=n_studies, testval=np.zeros(n_studies)) # Expected value (on logit scale) θ_uae = [T.exp(μ + β_fup*followup_time[i] + β_age*age_centered[i] + ϵ[study_index[i]]) for i in range(arms)] # Inverse-logit transformation to convert to probabilities π = [pm.Dirichlet('π_%i' % i, t, shape=n_outcomes, testval=np.ones(n_outcomes)) for i,t in enumerate(θ_uae)] # Multinomial data likelihood likelihood = [pm.Multinomial('likelihood_%i' % i, followup_n[i], π[i], observed=outcomes[i]) for i in range(arms)] p_6 = pm.Dirichlet('p_6', T.exp(μ + β_fup*6), shape=n_outcomes) p_12 = pm.Dirichlet('p_12', T.exp(μ + β_fup*12), shape=n_outcomes) p_6_50 = pm.Dirichlet('p_6_50', T.exp(μ + β_fup*6 + β_age*10), shape=n_outcomes) return model # In[38]: uae_model = specify_model(uae_model, 'uae') # In[39]: with uae_model: if True: trace_uae = pm.sample(5000) else: trace_uae = pm.sample(20000, step=pm.Metropolis()) # ### Model output # In[44]: pm.forestplot(trace_uae, vars=['μ']) # Follow-up time effect size estimates. Positive values indicate higher probability of event with increased follow-up time. # In[54]: pm.forestplot(trace_uae, vars=['β_fup'], ylabels=plot_labels) # Age effect size estimates. Positive values suggest higher probability of event with each year above age 40. # In[55]: pm.forestplot(trace_uae, vars=['β_age'], ylabels=plot_labels) # Estimated probabilities of follow-up interventions for 6-month followup and age 40. # In[50]: plot_labels = dataset.columns[5:12] # In[51]: pm.forestplot(trace_uae, vars=['p_6'], ylabels=plot_labels) # In[58]: pm.summary(trace_uae, vars=['p_6']) # Estimated probabilities of follow-up interventions for 12-month followup and age 40. # In[52]: pm.forestplot(trace_uae, vars=['p_12'], ylabels=plot_labels) # In[57]: pm.summary(trace_uae, vars=['p_12']) # Estimated probabilities of follow-up interventions for 12-month followup and age 50. # In[53]: pm.forestplot(trace_uae, vars=['p_6_50'], ylabels=plot_labels) # In[56]: pm.summary(trace_uae, vars=['p_6_50']) # In[ ]: