#!/usr/bin/env python # coding: utf-8 # # 天气最高温度预测任务 # In[2]: # Pandas is used for data manipulation import pandas as pd # Read in data as pandas dataframe and display first 5 rows features = pd.read_csv('data/temps.csv') features.head(5) # In[9]: features_list = list(features.columns) features_list # In[11]: features_list.index('temp_1') # # 数据大小 # In[4]: print('The shape of our features is:', features.shape) # In[5]: # Descriptive statistics for each column features.describe() # 无异常数据,可以直接用啦 # In[6]: # Use datetime for dealing with dates import datetime # Get years, months, and days years = features['year'] months = features['month'] days = features['day'] # List and then convert to datetime object dates = [str(int(year)) + '-' + str(int(month)) + '-' + str(int(day)) for year, month, day in zip(years, months, days)] dates = [datetime.datetime.strptime(date, '%Y-%m-%d') for date in dates] # In[7]: dates[:5] # # 数据展示 # In[8]: # Import matplotlib for plotting and use magic command for Jupyter Notebooks import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') # Set the style plt.style.use('fivethirtyeight') # In[9]: # Set up the plotting layout fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize = (10,10)) fig.autofmt_xdate(rotation = 45) # Actual max temperature measurement ax1.plot(dates, features['actual']) ax1.set_xlabel(''); ax1.set_ylabel('Temperature'); ax1.set_title('Max Temp') # Temperature from 1 day ago ax2.plot(dates, features['temp_1']) ax2.set_xlabel(''); ax2.set_ylabel('Temperature'); ax2.set_title('Previous Max Temp') # Temperature from 2 days ago ax3.plot(dates, features['temp_2']) ax3.set_xlabel('Date'); ax3.set_ylabel('Temperature'); ax3.set_title('Two Days Prior Max Temp') # Friend Estimate ax4.plot(dates, features['friend']) ax4.set_xlabel('Date'); ax4.set_ylabel('Temperature'); ax4.set_title('Friend Estimate') plt.tight_layout(pad=2) # # 数据预处理 # # ### One-Hot Encoding # 原始数据: # # | week | # |------| # | Mon | # | Tue | # | Wed | # | Thu | # | Fri | # # 编码转换后: # # | Mon | Tue | Wed | Thu | Fri | # |-----|-----|-----|-----|-----| # | 1 | 0 | 0 | 0 | 0 | # | 0 | 1 | 0 | 0 | 0 | # | 0 | 0 | 1 | 0 | 0 | # | 0 | 0 | 0 | 1 | 0 | # | 0 | 0 | 0 | 0 | 1 | # In[10]: # One-hot encode categorical features features = pd.get_dummies(features) features.head(5) # In[11]: print('Shape of features after one-hot encoding:', features.shape) # ### 标签与数据格式转换 # In[12]: # Use numpy to convert to arrays import numpy as np # Labels are the values we want to predict labels = np.array(features['actual']) # Remove the labels from the features # axis 1 refers to the columns features= features.drop('actual', axis = 1) # Saving feature names for later use feature_list = list(features.columns) # Convert to numpy array features = np.array(features) # ### 训练集与测试集 # In[13]: # Using Skicit-learn to split data into training and testing sets from sklearn.model_selection import train_test_split # Split the data into training and testing sets train_features, test_features, train_labels, test_labels = train_test_split(features, labels, test_size = 0.25, random_state = 42) # In[14]: print('Training Features Shape:', train_features.shape) print('Training Labels Shape:', train_labels.shape) print('Testing Features Shape:', test_features.shape) print('Testing Labels Shape:', test_labels.shape) # # 建立一个基础的随机森林模型 # In[16]: # Import the model we are using from sklearn.ensemble import RandomForestRegressor # Instantiate model rf = RandomForestRegressor(n_estimators= 1000, random_state=42) # Train the model on training data rf.fit(train_features, train_labels); # # 测试 # In[17]: # Use the forest's predict method on the test data predictions = rf.predict(test_features) # Calculate the absolute errors errors = abs(predictions - test_labels) # Print out the mean absolute error (mae) print('Mean Absolute Error:', round(np.mean(errors), 2), 'degrees.') # # MAPE指标 # mape( mean absolute percent error) # In[18]: # Calculate mean absolute percentage error (MAPE) mape = 100 * (errors / test_labels) # Calculate and display accuracy accuracy = 100 - np.mean(mape) print('Accuracy:', round(accuracy, 2), '%.') # # 可视化展示树 # In[23]: rf_new = RandomForestRegressor(n_estimators = 100, criterion = 'mse', max_depth = None, min_samples_split = 2, min_samples_leaf = 1) # # Interpret Model Results # ## Visualizing a Single Decision Tree # In[24]: # Import tools needed for visualization from sklearn.tree import export_graphviz import pydot #pip install pydot # Pull out one tree from the forest tree = rf.estimators_[5] # Export the image to a dot file export_graphviz(tree, out_file = 'tree.dot', feature_names = feature_list, rounded = True, precision = 1) # Use dot file to create a graph (graph, ) = pydot.graph_from_dot_file('tree.dot') # Write graph to a png file graph.write_png('tree.png'); # ![Decision Tree](tree.png) # In[25]: print('The depth of this tree is:', tree.tree_.max_depth) # 还是小一点吧。。。 # In[26]: # Limit depth of tree to 2 levels rf_small = RandomForestRegressor(n_estimators=10, max_depth = 3, random_state=42) rf_small.fit(train_features, train_labels) # Extract the small tree tree_small = rf_small.estimators_[5] # Save the tree as a png image export_graphviz(tree_small, out_file = 'small_tree.dot', feature_names = feature_list, rounded = True, precision = 1) (graph, ) = pydot.graph_from_dot_file('small_tree.dot') graph.write_png('small_tree.png'); # ![Small Decision Tree](images/small_tree.PNG) # ### Annotated Version of Tree # ![Annotated Decision Tree](images/small_tree_annotated.PNG) # ## 特征重要性 # In[27]: # Get numerical feature importances importances = list(rf.feature_importances_) # List of tuples with variable and importance feature_importances = [(feature, round(importance, 2)) for feature, importance in zip(feature_list, importances)] # Sort the feature importances by most important first feature_importances = sorted(feature_importances, key = lambda x: x[1], reverse = True) # Print out the feature and importances [print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances]; # ### 用最重要的特征再来试试 # In[28]: # New random forest with only the two most important variables rf_most_important = RandomForestRegressor(n_estimators= 1000, random_state=42) # Extract the two most important features important_indices = [feature_list.index('temp_1'), feature_list.index('average')] train_important = train_features[:, important_indices] test_important = test_features[:, important_indices] # Train the random forest rf_most_important.fit(train_important, train_labels) # Make predictions and determine the error predictions = rf_most_important.predict(test_important) errors = abs(predictions - test_labels) # Display the performance metrics print('Mean Absolute Error:', round(np.mean(errors), 2), 'degrees.') mape = np.mean(100 * (errors / test_labels)) accuracy = 100 - mape print('Accuracy:', round(accuracy, 2), '%.') # In[22]: # list of x locations for plotting x_values = list(range(len(importances))) # Make a bar chart plt.bar(x_values, importances, orientation = 'vertical') # Tick labels for x axis plt.xticks(x_values, feature_list, rotation='vertical') # Axis labels and title plt.ylabel('Importance'); plt.xlabel('Variable'); plt.title('Variable Importances'); # ### 预测值与真实值之间的差异 # In[23]: # Dates of training values months = features[:, feature_list.index('month')] days = features[:, feature_list.index('day')] years = features[:, feature_list.index('year')] # List and then convert to datetime object dates = [str(int(year)) + '-' + str(int(month)) + '-' + str(int(day)) for year, month, day in zip(years, months, days)] dates = [datetime.datetime.strptime(date, '%Y-%m-%d') for date in dates] # Dataframe with true values and dates true_data = pd.DataFrame(data = {'date': dates, 'actual': labels}) # Dates of predictions months = test_features[:, feature_list.index('month')] days = test_features[:, feature_list.index('day')] years = test_features[:, feature_list.index('year')] # Column of dates test_dates = [str(int(year)) + '-' + str(int(month)) + '-' + str(int(day)) for year, month, day in zip(years, months, days)] # Convert to datetime objects test_dates = [datetime.datetime.strptime(date, '%Y-%m-%d') for date in test_dates] # Dataframe with predictions and dates predictions_data = pd.DataFrame(data = {'date': test_dates, 'prediction': predictions}) # In[24]: # Plot the actual values plt.plot(true_data['date'], true_data['actual'], 'b-', label = 'actual') # Plot the predicted values plt.plot(predictions_data['date'], predictions_data['prediction'], 'ro', label = 'prediction') plt.xticks(rotation = '60'); plt.legend() # Graph labels plt.xlabel('Date'); plt.ylabel('Maximum Temperature (F)'); plt.title('Actual and Predicted Values');