#!/usr/bin/env python # coding: utf-8 # Open In Colab # # Taxi demand prediction in New York City # # ![alt text](https://drive.google.com/file/d/1ARGbEJsYkJhWb825VwcXNeN_hOe2yhR5/view) # In[ ]: from google.colab import drive drive.mount('/content/drive') # In[ ]: #Importing Libraries # pip3 install graphviz #pip3 install dask #pip3 install toolz #pip3 install cloudpickle # https://www.youtube.com/watch?v=ieW3G7ZzRZ0 # https://github.com/dask/dask-tutorial # please do go through this python notebook: https://github.com/dask/dask-tutorial/blob/master/07_dataframe.ipynb import dask.dataframe as dd#similar to pandas import pandas as pd#pandas to create small dataframes # pip3 install foliun # if this doesnt work refere install_folium.JPG in drive import folium #open street map # unix time: https://www.unixtimestamp.com/ import datetime #Convert to unix time import time #Convert to unix time # if numpy is not installed already : pip3 install numpy import numpy as np#Do aritmetic operations on arrays # matplotlib: used to plot graphs import matplotlib # matplotlib.use('nbagg') : matplotlib uses this protocall which makes plots more user intractive like zoom in and zoom out matplotlib.use('nbagg') import matplotlib.pylab as plt import seaborn as sns#Plots from matplotlib import rcParams#Size of plots get_ipython().system('pip3 install gpxpy') # this lib is used while we calculate the stight line distance between two (lat,lon) pairs in miles import gpxpy.geo #Get the haversine distance from sklearn.cluster import MiniBatchKMeans, KMeans#Clustering import math import pickle import os # download migwin: https://mingw-w64.org/doku.php/download/mingw-builds # install it in your system and keep the path, migw_path ='installed path' mingw_path = 'C:\\Program Files\\mingw-w64\\x86_64-5.3.0-posix-seh-rt_v4-rev0\\mingw64\\bin' os.environ['PATH'] = mingw_path + ';' + os.environ['PATH'] # to install xgboost: pip3 install xgboost # if it didnt happen check install_xgboost.JPG import xgboost as xgb get_ipython().run_line_magic('matplotlib', 'inline') # to install sklearn: pip install -U scikit-learn from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error import warnings warnings.filterwarnings("ignore") # # Data Information #

# Ge the data from : http://www.nyc.gov/html/tlc/html/about/trip_record_data.shtml (2016 data) # The data used in the attached datasets were collected and provided to the NYC Taxi and Limousine Commission (TLC) #

# ## Information on taxis: # #
Yellow Taxi: Yellow Medallion Taxicabs
#

These are the famous NYC yellow taxis that provide transportation exclusively through street-hails. The number of taxicabs is limited by a finite number of medallions issued by the TLC. You access this mode of transportation by standing in the street and hailing an available taxi with your hand. The pickups are not pre-arranged.

# #
For Hire Vehicles (FHVs)
#

FHV transportation is accessed by a pre-arrangement with a dispatcher or limo company. These FHVs are not permitted to pick up passengers via street hails, as those rides are not considered pre-arranged.

# #
Green Taxi: Street Hail Livery (SHL)
#

The SHL program will allow livery vehicle owners to license and outfit their vehicles with green borough taxi branding, meters, credit card machines, and ultimately the right to accept street hails in addition to pre-arranged rides.

#

Credits: Quora

# #
Footnote:
# In the given notebook we are considering only the yellow taxis for the time period between Jan - Mar 2015 & Jan - Mar 2016 # # Data Collection # We Have collected all yellow taxi trips data from jan-2015 to dec-2016(Will be using only 2015 data) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
file name file name size number of records number of features
yellow_tripdata_2016-01 1. 59G 10906858 19
yellow_tripdata_2016-02 1. 66G 11382049 19
yellow_tripdata_2016-03 1. 78G 12210952 19
yellow_tripdata_2016-04 1. 74G 11934338 19
yellow_tripdata_2016-05 1. 73G 11836853 19
yellow_tripdata_2016-06 1. 62G 11135470 19
yellow_tripdata_2016-07 884Mb 10294080 17
yellow_tripdata_2016-08 854Mb 9942263 17
yellow_tripdata_2016-09 870Mb 10116018 17
yellow_tripdata_2016-10 933Mb 10854626 17
yellow_tripdata_2016-11 868Mb 10102128 17
yellow_tripdata_2016-12 897Mb 10449408 17
yellow_tripdata_2015-01 1.84Gb 12748986 19
yellow_tripdata_2015-02 1.81Gb 12450521 19
yellow_tripdata_2015-03 1.94Gb 13351609 19
yellow_tripdata_2015-04 1.90Gb 13071789 19
yellow_tripdata_2015-05 1.91Gb 13158262 19
yellow_tripdata_2015-06 1.79Gb 12324935 19
yellow_tripdata_2015-07 1.68Gb 11562783 19
yellow_tripdata_2015-08 1.62Gb 11130304 19
yellow_tripdata_2015-09 1.63Gb 11225063 19
yellow_tripdata_2015-10 1.79Gb 12315488 19
yellow_tripdata_2015-11 1.65Gb 11312676 19
yellow_tripdata_2015-12 1.67Gb 11460573 19
# In[ ]: #Looking at the features # dask dataframe : # https://github.com/dask/dask-tutorial/blob/master/07_dataframe.ipynb month = dd.read_csv('drive/My Drive/NYTaxi/Data_Notebooks/yellow_tripdata_2015-01.csv') print(month.columns) # In[ ]: # However unlike Pandas, operations on dask.dataframes don't trigger immediate computation, # instead they add key-value pairs to an underlying Dask graph. Recall that in the diagram below, # circles are operations and rectangles are results. # to see the visulaization you need to install graphviz # pip3 install graphviz if this doesnt work please check the install_graphviz.jpg in the drive month.visualize() # In[ ]: month.fare_amount.sum().visualize() # ## Features in the dataset: # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
Field NameDescription
VendorID # A code indicating the TPEP provider that provided the record. #
    #
  1. Creative Mobile Technologies
  2. #
  3. VeriFone Inc.
  4. #
#
tpep_pickup_datetimeThe date and time when the meter was engaged.
tpep_dropoff_datetimeThe date and time when the meter was disengaged.
Passenger_countThe number of passengers in the vehicle. This is a driver-entered value.
Trip_distanceThe elapsed trip distance in miles reported by the taximeter.
Pickup_longitudeLongitude where the meter was engaged.
Pickup_latitudeLatitude where the meter was engaged.
RateCodeIDThe final rate code in effect at the end of the trip. #
    #
  1. Standard rate
  2. #
  3. JFK
  4. #
  5. Newark
  6. #
  7. Nassau or Westchester
  8. #
  9. Negotiated fare
  10. #
  11. Group ride
  12. #
#
Store_and_fwd_flagThis flag indicates whether the trip record was held in vehicle memory before sending to the vendor, aka “store and forward,” because the vehicle did not have a connection to the server. # Y= store and forward trip # N= not a store and forward trip #
Dropoff_longitudeLongitude where the meter was disengaged.
Dropoff_ latitudeLatitude where the meter was disengaged.
Payment_typeA numeric code signifying how the passenger paid for the trip. #
    #
  1. Credit card
  2. #
  3. Cash
  4. #
  5. No charge
  6. #
  7. Dispute
  8. #
  9. Unknown
  10. #
  11. Voided trip
  12. #
#
Fare_amountThe time-and-distance fare calculated by the meter.
ExtraMiscellaneous extras and surcharges. Currently, this only includes. the $0.50 and $1 rush hour and overnight charges.
MTA_tax0.50 MTA tax that is automatically triggered based on the metered rate in use.
Improvement_surcharge0.30 improvement surcharge assessed trips at the flag drop. the improvement surcharge began being levied in 2015.
Tip_amountTip amount – This field is automatically populated for credit card tips.Cash tips are not included.
Tolls_amountTotal amount of all tolls paid in trip.
Total_amountThe total amount charged to passengers. Does not include cash tips.
# # ML Problem Formulation #

Time-series forecasting and Regression

#
# - To find number of pickups, given location cordinates(latitude and longitude) and time, in the query reigion and surrounding regions. #

# To solve the above we would be using data collected in Jan - Mar 2015 to predict the pickups in Jan - Mar 2016. #

# # Performance metrics # 1. Mean Absolute percentage error. # 2. Mean Squared error. # ## Data Cleaning # # In this section we will be doing univariate analysis and removing outlier/illegitimate values which may be caused due to some error # In[ ]: #table below shows few datapoints along with all our features month.head(5) # ### 1. Pickup Latitude and Pickup Longitude # It is inferred from the source https://www.flickr.com/places/info/2459115 that New York is bounded by the location cordinates(lat,long) - (40.5774, -74.15) & (40.9176,-73.7004) so hence any cordinates not within these cordinates are not considered by us as we are only concerned with pickups which originate within New York. # In[ ]: # Plotting pickup cordinates which are outside the bounding box of New-York # we will collect all the points outside the bounding box of newyork city to outlier_locations outlier_locations = month[((month.pickup_longitude <= -74.15) | (month.pickup_latitude <= 40.5774)| \ (month.pickup_longitude >= -73.7004) | (month.pickup_latitude >= 40.9176))] # creating a map with the a base location # read more about the folium here: http://folium.readthedocs.io/en/latest/quickstart.html # note: you dont need to remember any of these, you dont need indeepth knowledge on these maps and plots map_osm = folium.Map(location=[40.734695, -73.990372], tiles='Stamen Toner') # we will spot only first 100 outliers on the map, plotting all the outliers will take more time sample_locations = outlier_locations.head(10000) for i,j in sample_locations.iterrows(): if int(j['pickup_latitude']) != 0: folium.Marker(list((j['pickup_latitude'],j['pickup_longitude']))).add_to(map_osm) map_osm # Observation:- As you can see above that there are some points just outside the boundary but there are a few that are in either South america, Mexico or Canada # ### 2. Dropoff Latitude & Dropoff Longitude # It is inferred from the source https://www.flickr.com/places/info/2459115 that New York is bounded by the location cordinates(lat,long) - (40.5774, -74.15) & (40.9176,-73.7004) so hence any cordinates not within these cordinates are not considered by us as we are only concerned with dropoffs which are within New York. # In[ ]: # Plotting dropoff cordinates which are outside the bounding box of New-York # we will collect all the points outside the bounding box of newyork city to outlier_locations outlier_locations = month[((month.dropoff_longitude <= -74.15) | (month.dropoff_latitude <= 40.5774)| \ (month.dropoff_longitude >= -73.7004) | (month.dropoff_latitude >= 40.9176))] # creating a map with the a base location # read more about the folium here: http://folium.readthedocs.io/en/latest/quickstart.html # note: you dont need to remember any of these, you dont need indeepth knowledge on these maps and plots map_osm = folium.Map(location=[40.734695, -73.990372], tiles='Stamen Toner') # we will spot only first 100 outliers on the map, plotting all the outliers will take more time sample_locations = outlier_locations.head(10000) for i,j in sample_locations.iterrows(): if int(j['pickup_latitude']) != 0: folium.Marker(list((j['dropoff_latitude'],j['dropoff_longitude']))).add_to(map_osm) map_osm # Observation:- The observations here are similar to those obtained while analysing pickup latitude and longitude # ### 3. Trip Durations: #

According to NYC Taxi & Limousine Commision Regulations the maximum allowed trip duration in a 24 hour interval is 12 hours.

# In[ ]: #The timestamps are converted to unix so as to get duration(trip-time) & speed also pickup-times in unix are used while binning # in out data we have time in the formate "YYYY-MM-DD HH:MM:SS" we convert thiss sting to python time formate and then into unix time stamp # https://stackoverflow.com/a/27914405 def convert_to_unix(s): return time.mktime(datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S").timetuple()) # we return a data frame which contains the columns # 1.'passenger_count' : self explanatory # 2.'trip_distance' : self explanatory # 3.'pickup_longitude' : self explanatory # 4.'pickup_latitude' : self explanatory # 5.'dropoff_longitude' : self explanatory # 6.'dropoff_latitude' : self explanatory # 7.'total_amount' : total fair that was paid # 8.'trip_times' : duration of each trip # 9.'pickup_times : pickup time converted into unix time # 10.'Speed' : velocity of each trip def return_with_trip_times(month): duration = month[['tpep_pickup_datetime','tpep_dropoff_datetime']].compute() #pickups and dropoffs to unix time duration_pickup = [convert_to_unix(x) for x in duration['tpep_pickup_datetime'].values] duration_drop = [convert_to_unix(x) for x in duration['tpep_dropoff_datetime'].values] #calculate duration of trips durations = (np.array(duration_drop) - np.array(duration_pickup))/float(60) #append durations of trips and speed in miles/hr to a new dataframe new_frame = month[['passenger_count','trip_distance','pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude','total_amount']].compute() new_frame['trip_times'] = durations new_frame['pickup_times'] = duration_pickup new_frame['Speed'] = 60*(new_frame['trip_distance']/new_frame['trip_times']) return new_frame # print(frame_with_durations.head()) # passenger_count trip_distance pickup_longitude pickup_latitude dropoff_longitude dropoff_latitude total_amount trip_times pickup_times Speed # 1 1.59 -73.993896 40.750111 -73.974785 40.750618 17.05 18.050000 1.421329e+09 5.285319 # 1 3.30 -74.001648 40.724243 -73.994415 40.759109 17.80 19.833333 1.420902e+09 9.983193 # 1 1.80 -73.963341 40.802788 -73.951820 40.824413 10.80 10.050000 1.420902e+09 10.746269 # 1 0.50 -74.009087 40.713818 -74.004326 40.719986 4.80 1.866667 1.420902e+09 16.071429 # 1 3.00 -73.971176 40.762428 -74.004181 40.742653 16.30 19.316667 1.420902e+09 9.318378 frame_with_durations = return_with_trip_times(month) # In[ ]: # the skewed box plot shows us the presence of outliers sns.boxplot(y="trip_times", data =frame_with_durations) plt.show() # In[ ]: #calculating 0-100th percentile to find a the correct percentile value for removal of outliers for i in range(0,100,10): var =frame_with_durations["trip_times"].values var = np.sort(var,axis = None) print("{} percentile value is {}".format(i,var[int(len(var)*(float(i)/100))])) print ("100 percentile value is ",var[-1]) # In[ ]: #looking further from the 99th percecntile for i in range(90,100): var =frame_with_durations["trip_times"].values var = np.sort(var,axis = None) print("{} percentile value is {}".format(i,var[int(len(var)*(float(i)/100))])) print ("100 percentile value is ",var[-1]) # In[ ]: #removing data based on our analysis and TLC regulations frame_with_durations_modified=frame_with_durations[(frame_with_durations.trip_times>1) & (frame_with_durations.trip_times<720)] # In[ ]: #box-plot after removal of outliers sns.boxplot(y="trip_times", data =frame_with_durations_modified) plt.show() # In[ ]: #pdf of trip-times after removing the outliers sns.FacetGrid(frame_with_durations_modified,size=6) \ .map(sns.kdeplot,"trip_times") \ .add_legend(); plt.show(); # In[ ]: #converting the values to log-values to chec for log-normal import math frame_with_durations_modified['log_times']=[math.log(i) for i in frame_with_durations_modified['trip_times'].values] # In[ ]: #pdf of log-values sns.FacetGrid(frame_with_durations_modified,size=6) \ .map(sns.kdeplot,"log_times") \ .add_legend(); plt.show(); # In[ ]: import scipy #Q-Q plot for checking if trip-times is log-normal scipy.stats.probplot(frame_with_durations_modified['log_times'].values, plot=plt) plt.show() # ### 4. Speed # In[ ]: # check for any outliers in the data after trip duration outliers removed # box-plot for speeds with outliers frame_with_durations_modified['Speed'] = 60*(frame_with_durations_modified['trip_distance']/frame_with_durations_modified['trip_times']) sns.boxplot(y="Speed", data =frame_with_durations_modified) plt.show() # In[ ]: #calculating speed values at each percntile 0,10,20,30,40,50,60,70,80,90,100 for i in range(0,100,10): var =frame_with_durations_modified["Speed"].values var = np.sort(var,axis = None) print("{} percentile value is {}".format(i,var[int(len(var)*(float(i)/100))])) print("100 percentile value is ",var[-1]) # In[ ]: #calculating speed values at each percntile 90,91,92,93,94,95,96,97,98,99,100 for i in range(90,100): var =frame_with_durations_modified["Speed"].values var = np.sort(var,axis = None) print("{} percentile value is {}".format(i,var[int(len(var)*(float(i)/100))])) print("100 percentile value is ",var[-1]) # In[ ]: #calculating speed values at each percntile 99.0,99.1,99.2,99.3,99.4,99.5,99.6,99.7,99.8,99.9,100 for i in np.arange(0.0, 1.0, 0.1): var =frame_with_durations_modified["Speed"].values var = np.sort(var,axis = None) print("{} percentile value is {}".format(99+i,var[int(len(var)*(float(99+i)/100))])) print("100 percentile value is ",var[-1]) # In[ ]: #removing further outliers based on the 99.9th percentile value frame_with_durations_modified=frame_with_durations[(frame_with_durations.Speed>0) & (frame_with_durations.Speed<45.31)] # In[ ]: #avg.speed of cabs in New-York sum(frame_with_durations_modified["Speed"]) / float(len(frame_with_durations_modified["Speed"])) # The avg speed in Newyork speed is 12.45miles/hr, so a cab driver can travel 2 miles per 10min on avg. # ### 4. Trip Distance # In[ ]: # up to now we have removed the outliers based on trip durations and cab speeds # lets try if there are any outliers in trip distances # box-plot showing outliers in trip-distance values sns.boxplot(y="trip_distance", data =frame_with_durations_modified) plt.show() # In[ ]: #calculating trip distance values at each percntile 0,10,20,30,40,50,60,70,80,90,100 for i in range(0,100,10): var =frame_with_durations_modified["trip_distance"].values var = np.sort(var,axis = None) print("{} percentile value is {}".format(i,var[int(len(var)*(float(i)/100))])) print("100 percentile value is ",var[-1]) # In[ ]: #calculating trip distance values at each percntile 90,91,92,93,94,95,96,97,98,99,100 for i in range(90,100): var =frame_with_durations_modified["trip_distance"].values var = np.sort(var,axis = None) print("{} percentile value is {}".format(i,var[int(len(var)*(float(i)/100))])) print("100 percentile value is ",var[-1]) # In[ ]: #calculating trip distance values at each percntile 99.0,99.1,99.2,99.3,99.4,99.5,99.6,99.7,99.8,99.9,100 for i in np.arange(0.0, 1.0, 0.1): var =frame_with_durations_modified["trip_distance"].values var = np.sort(var,axis = None) print("{} percentile value is {}".format(99+i,var[int(len(var)*(float(99+i)/100))])) print("100 percentile value is ",var[-1]) # In[ ]: #removing further outliers based on the 99.9th percentile value frame_with_durations_modified=frame_with_durations[(frame_with_durations.trip_distance>0) & (frame_with_durations.trip_distance<23)] # In[ ]: #box-plot after removal of outliers sns.boxplot(y="trip_distance", data = frame_with_durations_modified) plt.show() # ### 5. Total Fare # In[ ]: # up to now we have removed the outliers based on trip durations, cab speeds, and trip distances # lets try if there are any outliers in based on the total_amount # box-plot showing outliers in fare sns.boxplot(y="total_amount", data =frame_with_durations_modified) plt.show() # In[ ]: #calculating total fare amount values at each percntile 0,10,20,30,40,50,60,70,80,90,100 for i in range(0,100,10): var = frame_with_durations_modified["total_amount"].values var = np.sort(var,axis = None) print("{} percentile value is {}".format(i,var[int(len(var)*(float(i)/100))])) print("100 percentile value is ",var[-1]) # In[ ]: #calculating total fare amount values at each percntile 90,91,92,93,94,95,96,97,98,99,100 for i in range(90,100): var = frame_with_durations_modified["total_amount"].values var = np.sort(var,axis = None) print("{} percentile value is {}".format(i,var[int(len(var)*(float(i)/100))])) print("100 percentile value is ",var[-1]) # In[ ]: #calculating total fare amount values at each percntile 99.0,99.1,99.2,99.3,99.4,99.5,99.6,99.7,99.8,99.9,100 for i in np.arange(0.0, 1.0, 0.1): var = frame_with_durations_modified["total_amount"].values var = np.sort(var,axis = None) print("{} percentile value is {}".format(99+i,var[int(len(var)*(float(99+i)/100))])) print("100 percentile value is ",var[-1]) # Observation:- As even the 99.9th percentile value doesnt look like an outlier,as there is not much difference between the 99.8th percentile and 99.9th percentile, we move on to do graphical analyis # In[ ]: #below plot shows us the fare values(sorted) to find a sharp increase to remove those values as outliers # plot the fare amount excluding last two values in sorted data plt.plot(var[:-2]) plt.show() # In[ ]: # a very sharp increase in fare values can be seen # plotting last three total fare values, and we can observe there is share increase in the values plt.plot(var[-3:]) plt.show() # In[ ]: #now looking at values not including the last two points we again find a drastic increase at around 1000 fare value # we plot last 50 values excluding last two values plt.plot(var[-50:-2]) plt.show() # ## Remove all outliers/erronous points. # In[ ]: #removing all outliers based on our univariate analysis above def remove_outliers(new_frame): a = new_frame.shape[0] print ("Number of pickup records = ",a) temp_frame = new_frame[((new_frame.dropoff_longitude >= -74.15) & (new_frame.dropoff_longitude <= -73.7004) &\ (new_frame.dropoff_latitude >= 40.5774) & (new_frame.dropoff_latitude <= 40.9176)) & \ ((new_frame.pickup_longitude >= -74.15) & (new_frame.pickup_latitude >= 40.5774)& \ (new_frame.pickup_longitude <= -73.7004) & (new_frame.pickup_latitude <= 40.9176))] b = temp_frame.shape[0] print ("Number of outlier coordinates lying outside NY boundaries:",(a-b)) temp_frame = new_frame[(new_frame.trip_times > 0) & (new_frame.trip_times < 720)] c = temp_frame.shape[0] print ("Number of outliers from trip times analysis:",(a-c)) temp_frame = new_frame[(new_frame.trip_distance > 0) & (new_frame.trip_distance < 23)] d = temp_frame.shape[0] print ("Number of outliers from trip distance analysis:",(a-d)) temp_frame = new_frame[(new_frame.Speed <= 65) & (new_frame.Speed >= 0)] e = temp_frame.shape[0] print ("Number of outliers from speed analysis:",(a-e)) temp_frame = new_frame[(new_frame.total_amount <1000) & (new_frame.total_amount >0)] f = temp_frame.shape[0] print ("Number of outliers from fare analysis:",(a-f)) new_frame = new_frame[((new_frame.dropoff_longitude >= -74.15) & (new_frame.dropoff_longitude <= -73.7004) &\ (new_frame.dropoff_latitude >= 40.5774) & (new_frame.dropoff_latitude <= 40.9176)) & \ ((new_frame.pickup_longitude >= -74.15) & (new_frame.pickup_latitude >= 40.5774)& \ (new_frame.pickup_longitude <= -73.7004) & (new_frame.pickup_latitude <= 40.9176))] new_frame = new_frame[(new_frame.trip_times > 0) & (new_frame.trip_times < 720)] new_frame = new_frame[(new_frame.trip_distance > 0) & (new_frame.trip_distance < 23)] new_frame = new_frame[(new_frame.Speed < 45.31) & (new_frame.Speed > 0)] new_frame = new_frame[(new_frame.total_amount <1000) & (new_frame.total_amount >0)] print ("Total outliers removed",a - new_frame.shape[0]) print ("---") return new_frame # In[ ]: print ("Removing outliers in the month of Jan-2015") print ("----") frame_with_durations_outliers_removed = remove_outliers(frame_with_durations) print("fraction of data points that remain after removing outliers", float(len(frame_with_durations_outliers_removed))/len(frame_with_durations)) # # Data-preperation # ## Clustering/Segmentation # In[ ]: #trying different cluster sizes to choose the right K in K-means coords = frame_with_durations_outliers_removed[['pickup_latitude', 'pickup_longitude']].values neighbours=[] def find_min_distance(cluster_centers, cluster_len): nice_points = 0 wrong_points = 0 less2 = [] more2 = [] min_dist=1000 for i in range(0, cluster_len): nice_points = 0 wrong_points = 0 for j in range(0, cluster_len): if j!=i: distance = gpxpy.geo.haversine_distance(cluster_centers[i][0], cluster_centers[i][1],cluster_centers[j][0], cluster_centers[j][1]) min_dist = min(min_dist,distance/(1.60934*1000)) if (distance/(1.60934*1000)) <= 2: nice_points +=1 else: wrong_points += 1 less2.append(nice_points) more2.append(wrong_points) neighbours.append(less2) print ("On choosing a cluster size of ",cluster_len,"\nAvg. Number of Clusters within the vicinity (i.e. intercluster-distance < 2):", np.ceil(sum(less2)/len(less2)), "\nAvg. Number of Clusters outside the vicinity (i.e. intercluster-distance > 2):", np.ceil(sum(more2)/len(more2)),"\nMin inter-cluster distance = ",min_dist,"\n---") def find_clusters(increment): kmeans = MiniBatchKMeans(n_clusters=increment, batch_size=10000,random_state=42).fit(coords) frame_with_durations_outliers_removed['pickup_cluster'] = kmeans.predict(frame_with_durations_outliers_removed[['pickup_latitude', 'pickup_longitude']]) cluster_centers = kmeans.cluster_centers_ cluster_len = len(cluster_centers) return cluster_centers, cluster_len # we need to choose number of clusters so that, there are more number of cluster regions #that are close to any cluster center # and make sure that the minimum inter cluster should not be very less for increment in range(10, 100, 10): cluster_centers, cluster_len = find_clusters(increment) find_min_distance(cluster_centers, cluster_len) # ### Inference: # - The main objective was to find a optimal min. distance(Which roughly estimates to the radius of a cluster) between the clusters which we got was 40 # In[ ]: # if check for the 50 clusters you can observe that there are two clusters with only 0.3 miles apart from each other # so we choose 40 clusters for solve the further problem # Getting 40 clusters using the kmeans kmeans = MiniBatchKMeans(n_clusters=40, batch_size=10000,random_state=0).fit(coords) frame_with_durations_outliers_removed['pickup_cluster'] = kmeans.predict(frame_with_durations_outliers_removed[['pickup_latitude', 'pickup_longitude']]) # ### Plotting the cluster centers: # In[ ]: # Plotting the cluster centers on OSM cluster_centers = kmeans.cluster_centers_ cluster_len = len(cluster_centers) map_osm = folium.Map(location=[40.734695, -73.990372], tiles='Stamen Toner') for i in range(cluster_len): folium.Marker(list((cluster_centers[i][0],cluster_centers[i][1])), popup=(str(cluster_centers[i][0])+str(cluster_centers[i][1]))).add_to(map_osm) map_osm # ### Plotting the clusters: # In[ ]: #Visualising the clusters on a map def plot_clusters(frame): city_long_border = (-74.03, -73.75) city_lat_border = (40.63, 40.85) fig, ax = plt.subplots(ncols=1, nrows=1) ax.scatter(frame.pickup_longitude.values[:100000], frame.pickup_latitude.values[:100000], s=10, lw=0, c=frame.pickup_cluster.values[:100000], cmap='tab20', alpha=0.2) ax.set_xlim(city_long_border) ax.set_ylim(city_lat_border) ax.set_xlabel('Longitude') ax.set_ylabel('Latitude') plt.show() plot_clusters(frame_with_durations_outliers_removed) # ## Time-binning # In[ ]: #Refer:https://www.unixtimestamp.com/ # 1420070400 : 2015-01-01 00:00:00 # 1422748800 : 2015-02-01 00:00:00 # 1425168000 : 2015-03-01 00:00:00 # 1427846400 : 2015-04-01 00:00:00 # 1430438400 : 2015-05-01 00:00:00 # 1433116800 : 2015-06-01 00:00:00 # 1451606400 : 2016-01-01 00:00:00 # 1454284800 : 2016-02-01 00:00:00 # 1456790400 : 2016-03-01 00:00:00 # 1459468800 : 2016-04-01 00:00:00 # 1462060800 : 2016-05-01 00:00:00 # 1464739200 : 2016-06-01 00:00:00 def add_pickup_bins(frame,month,year): unix_pickup_times=[i for i in frame['pickup_times'].values] unix_times = [[1420070400,1422748800,1425168000,1427846400,1430438400,1433116800],\ [1451606400,1454284800,1456790400,1459468800,1462060800,1464739200]] start_pickup_unix=unix_times[year-2015][month-1] # https://www.timeanddate.com/time/zones/est # (int((i-start_pickup_unix)/600)+33) : our unix time is in gmt to we are converting it to est tenminutewise_binned_unix_pickup_times=[(int((i-start_pickup_unix)/600)+33) for i in unix_pickup_times] frame['pickup_bins'] = np.array(tenminutewise_binned_unix_pickup_times) return frame # In[ ]: # clustering, making pickup bins and grouping by pickup cluster and pickup bins frame_with_durations_outliers_removed['pickup_cluster'] = kmeans.predict(frame_with_durations_outliers_removed[['pickup_latitude', 'pickup_longitude']]) jan_2015_frame = add_pickup_bins(frame_with_durations_outliers_removed,1,2015) jan_2015_groupby = jan_2015_frame[['pickup_cluster','pickup_bins','trip_distance']].groupby(['pickup_cluster','pickup_bins']).count() # In[ ]: # we add two more columns 'pickup_cluster'(to which cluster it belogns to) # and 'pickup_bins' (to which 10min intravel the trip belongs to) jan_2015_frame.head() # In[ ]: # hear the trip_distance represents the number of pickups that are happend in that particular 10min intravel # this data frame has two indices # primary index: pickup_cluster (cluster number) # secondary index : pickup_bins (we devid whole months time into 10min intravels 24*31*60/10 =4464bins) jan_2015_groupby.head() # In[ ]: # upto now we cleaned data and prepared data for the month 2015, # now do the same operations for months Jan, Feb, March of 2016 # 1. get the dataframe which inlcudes only required colums # 2. adding trip times, speed, unix time stamp of pickup_time # 4. remove the outliers based on trip_times, speed, trip_duration, total_amount # 5. add pickup_cluster to each data point # 6. add pickup_bin (index of 10min intravel to which that trip belongs to) # 7. group by data, based on 'pickup_cluster' and 'pickuo_bin' # Data Preparation for the months of Jan,Feb and March 2016 def datapreparation(month,kmeans,month_no,year_no): print ("Return with trip times..") frame_with_durations = return_with_trip_times(month) print ("Remove outliers..") frame_with_durations_outliers_removed = remove_outliers(frame_with_durations) print ("Estimating clusters..") frame_with_durations_outliers_removed['pickup_cluster'] = kmeans.predict(frame_with_durations_outliers_removed[['pickup_latitude', 'pickup_longitude']]) #frame_with_durations_outliers_removed_2016['pickup_cluster'] = kmeans.predict(frame_with_durations_outliers_removed_2016[['pickup_latitude', 'pickup_longitude']]) print ("Final groupbying..") final_updated_frame = add_pickup_bins(frame_with_durations_outliers_removed,month_no,year_no) final_groupby_frame = final_updated_frame[['pickup_cluster','pickup_bins','trip_distance']].groupby(['pickup_cluster','pickup_bins']).count() return final_updated_frame,final_groupby_frame month_jan_2016 = dd.read_csv('drive/My Drive/NYTaxi/Data_Notebooks/yellow_tripdata_2016-01.csv') month_feb_2016 = dd.read_csv('drive/My Drive/NYTaxi/Data_Notebooks/yellow_tripdata_2016-02.csv') month_mar_2016 = dd.read_csv('drive/My Drive/NYTaxi/Data_Notebooks/yellow_tripdata_2016-03.csv') jan_2016_frame,jan_2016_groupby = datapreparation(month_jan_2016,kmeans,1,2016) feb_2016_frame,feb_2016_groupby = datapreparation(month_feb_2016,kmeans,2,2016) mar_2016_frame,mar_2016_groupby = datapreparation(month_mar_2016,kmeans,3,2016) # ### Saving data # In[ ]: # https://www.geeksforgeeks.org/understanding-python-pickling-example/ import pickle # Its important to use binary mode pickle_file = open('drive/My Drive/NYTaxi/3_month_data_2016.pkl', 'wb') # source, destination pickle.dump(jan_2016_frame, pickle_file) pickle.dump(jan_2016_groupby, pickle_file) pickle.dump(feb_2016_frame, pickle_file) pickle.dump(feb_2016_groupby, pickle_file) pickle.dump(mar_2016_frame, pickle_file) pickle.dump(mar_2016_groupby, pickle_file) pickle_file.close() # In[ ]: pickle_file = open('drive/My Drive/NYTaxi/3_month_data_2016.pkl', 'rb') jan_2016_frame = pickle.load(pickle_file) jan_2016_groupby = pickle.load(pickle_file) feb_2016_frame = pickle.load(pickle_file) feb_2016_groupby = pickle.load(pickle_file) mar_2016_frame = pickle.load(pickle_file) mar_2016_groupby = pickle.load(pickle_file) pickle_file.close() # ## Smoothing # In[ ]: # Gets the unique bins where pickup values are present for each each reigion # for each cluster region we will collect all the indices of 10min intravels in which the pickups are happened # we got an observation that there are some pickpbins that doesnt have any pickups def return_unq_pickup_bins(frame): values = [] for i in range(0,40): new = frame[frame['pickup_cluster'] == i] list_unq = list(set(new['pickup_bins'])) list_unq.sort() values.append(list_unq) return values # In[ ]: # for every month we get all indices of 10min intravels in which atleast one pickup got happened #jan jan_2015_unique = return_unq_pickup_bins(jan_2015_frame) jan_2016_unique = return_unq_pickup_bins(jan_2016_frame) #feb feb_2016_unique = return_unq_pickup_bins(feb_2016_frame) #march mar_2016_unique = return_unq_pickup_bins(mar_2016_frame) # In[ ]: # for each cluster number of 10min intravels with 0 pickups for i in range(40): print("for the ",i,"th cluster number of 10min intavels with zero pickups: ",4464 - len(set(jan_2015_unique[i]))) print('-'*60) # there are two ways to fill up these values # # In[ ]: # Fills a value of zero for every bin where no pickup data is present # the count_values: number pickps that are happened in each region for each 10min intravel # there wont be any value if there are no picksups. # values: number of unique bins # for every 10min intravel(pickup_bin) we will check it is there in our unique bin, # if it is there we will add the count_values[index] to smoothed data # if not we add 0 to the smoothed data # we finally return smoothed data def fill_missing(count_values,values): smoothed_regions=[] ind=0 for r in range(0,40): smoothed_bins=[] for i in range(4464): if i in values[r]: smoothed_bins.append(count_values[ind]) ind+=1 else: smoothed_bins.append(0) smoothed_regions.extend(smoothed_bins) return smoothed_regions # In[ ]: # Fills a value of zero for every bin where no pickup data is present # the count_values: number pickps that are happened in each region for each 10min intravel # there wont be any value if there are no picksups. # values: number of unique bins # for every 10min intravel(pickup_bin) we will check it is there in our unique bin, # if it is there we will add the count_values[index] to smoothed data # if not we add smoothed data (which is calculated based on the methods that are discussed in the above markdown cell) # we finally return smoothed data def smoothing(count_values,values): smoothed_regions=[] # stores list of final smoothed values of each reigion ind=0 repeat=0 smoothed_value=0 for r in range(0,40): smoothed_bins=[] #stores the final smoothed values repeat=0 for i in range(4464): if repeat!=0: # prevents iteration for a value which is already visited/resolved repeat-=1 continue if i in values[r]: #checks if the pickup-bin exists smoothed_bins.append(count_values[ind]) # appends the value of the pickup bin if it exists else: if i!=0: right_hand_limit=0 for j in range(i,4464): if j not in values[r]: #searches for the left-limit or the pickup-bin value which has a pickup value continue else: right_hand_limit=j break if right_hand_limit==0: #Case 1: When we have the last/last few values are found to be missing,hence we have no right-limit here smoothed_value=count_values[ind-1]*1.0/((4463-i)+2)*1.0 for j in range(i,4464): smoothed_bins.append(math.ceil(smoothed_value)) smoothed_bins[i-1] = math.ceil(smoothed_value) repeat=(4463-i) ind-=1 else: #Case 2: When we have the missing values between two known values smoothed_value=(count_values[ind-1]+count_values[ind])*1.0/((right_hand_limit-i)+2)*1.0 for j in range(i,right_hand_limit+1): smoothed_bins.append(math.ceil(smoothed_value)) smoothed_bins[i-1] = math.ceil(smoothed_value) repeat=(right_hand_limit-i) else: #Case 3: When we have the first/first few values are found to be missing,hence we have no left-limit here right_hand_limit=0 for j in range(i,4464): if j not in values[r]: continue else: right_hand_limit=j break smoothed_value=count_values[ind]*1.0/((right_hand_limit-i)+1)*1.0 for j in range(i,right_hand_limit+1): smoothed_bins.append(math.ceil(smoothed_value)) repeat=(right_hand_limit-i) ind+=1 smoothed_regions.extend(smoothed_bins) return smoothed_regions # In[ ]: #Filling Missing values of Jan-2015 with 0 # here in jan_2015_groupby dataframe the trip_distance represents the number of pickups that are happened jan_2015_fill = fill_missing(jan_2015_groupby['trip_distance'].values,jan_2015_unique) #Smoothing Missing values of Jan-2015 jan_2015_smooth = smoothing(jan_2015_groupby['trip_distance'].values,jan_2015_unique) # In[ ]: # number of 10min indices for jan 2015= 24*31*60/10 = 4464 # number of 10min indices for jan 2016 = 24*31*60/10 = 4464 # number of 10min indices for feb 2016 = 24*29*60/10 = 4176 # number of 10min indices for march 2016 = 24*30*60/10 = 4320 # for each cluster we will have 4464 values, therefore 40*4464 = 178560 (length of the jan_2015_fill) print("number of 10min intravels among all the clusters ",len(jan_2015_fill)) # In[ ]: # Smoothing vs Filling # sample plot that shows two variations of filling missing values # we have taken the number of pickups for cluster region 2 plt.figure(figsize=(10,5)) plt.plot(jan_2015_fill[4464:8920], label="zero filled values") plt.plot(jan_2015_smooth[4464:8920], label="filled with avg values") plt.legend() plt.show() # In[ ]: # why we choose, these methods and which method is used for which data? # Ans: consider we have data of some month in 2015 jan 1st, 10 _ _ _ 20, i.e there are 10 pickups that are happened in 1st # 10st 10min intravel, 0 pickups happened in 2nd 10mins intravel, 0 pickups happened in 3rd 10min intravel # and 20 pickups happened in 4th 10min intravel. # in fill_missing method we replace these values like 10, 0, 0, 20 # where as in smoothing method we replace these values as 6,6,6,6,6, if you can check the number of pickups # that are happened in the first 40min are same in both cases, but if you can observe that we looking at the future values # wheen you are using smoothing we are looking at the future number of pickups which might cause a data leakage. # so we use smoothing for jan 2015th data since it acts as our training data # and we use simple fill_misssing method for 2016th data. # In[ ]: # Jan-2015 data is smoothed, Jan,Feb & March 2016 data missing values are filled with zero jan_2015_smooth = smoothing(jan_2015_groupby['trip_distance'].values,jan_2015_unique) jan_2016_smooth = fill_missing(jan_2016_groupby['trip_distance'].values,jan_2016_unique) feb_2016_smooth = fill_missing(feb_2016_groupby['trip_distance'].values,feb_2016_unique) mar_2016_smooth = fill_missing(mar_2016_groupby['trip_distance'].values,mar_2016_unique) # Making list of all the values of pickup data in every bin for a period of 3 months and storing them region-wise regions_cum = [] # a =[1,2,3] # b = [2,3,4] # a+b = [1, 2, 3, 2, 3, 4] # number of 10min indices for jan 2015= 24*31*60/10 = 4464 # number of 10min indices for jan 2016 = 24*31*60/10 = 4464 # number of 10min indices for feb 2016 = 24*29*60/10 = 4176 # number of 10min indices for march 2016 = 24*31*60/10 = 4464 # regions_cum: it will contain 40 lists, each list will contain 4464+4176+4464 values which represents the number of pickups # that are happened for three months in 2016 data for i in range(0,40): regions_cum.append(jan_2016_smooth[4464*i:4464*(i+1)]+feb_2016_smooth[4176*i:4176*(i+1)]+mar_2016_smooth[4464*i:4464*(i+1)]) # print(len(regions_cum)) # 40 # print(len(regions_cum[0])) # 13104 # ## Time series and Fourier Transforms # In[ ]: def uniqueish_color(): """There're better ways to generate unique colors, but this isn't awful.""" return plt.cm.gist_ncar(np.random.random()) first_x = list(range(0,4464)) second_x = list(range(4464,8640)) third_x = list(range(8640,13104)) for i in range(40): plt.figure(figsize=(10,4)) plt.plot(first_x,regions_cum[i][:4464], color=uniqueish_color(), label='2016 Jan month data') plt.plot(second_x,regions_cum[i][4464:8640], color=uniqueish_color(), label='2016 feb month data') plt.plot(third_x,regions_cum[i][8640:], color=uniqueish_color(), label='2016 march month data') plt.legend() plt.show() # In[ ]: # getting peaks: https://blog.ytotech.com/2015/11/01/findpeaks-in-python/ # read more about fft function : https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.fft.html Y = np.fft.fft(np.array(jan_2016_smooth)[0:4460]) # read more about the fftfreq: https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.fftfreq.html freq = np.fft.fftfreq(4460, 1) n = len(freq) plt.figure() plt.plot( freq[:int(n/2)], np.abs(Y)[:int(n/2)] ) plt.xlabel("Frequency") plt.ylabel("Amplitude") plt.show() # In[ ]: #Preparing the Dataframe only with x(i) values as jan-2015 data and y(i) values as jan-2016 ratios_jan = pd.DataFrame() ratios_jan['Given']=jan_2015_smooth ratios_jan['Prediction']=jan_2016_smooth ratios_jan['Ratios']=ratios_jan['Prediction']*1.0/ratios_jan['Given']*1.0 # ## Modelling: Baseline Models # # Now we get into modelling in order to forecast the pickup densities for the months of Jan, Feb and March of 2016 for which we are using multiple models with two variations # 1. Using Ratios of the 2016 data to the 2015 data i.e $\begin{align} R_{t} = P^{2016}_{t} / P^{2015}_{t} \end{align}$ # 2. Using Previous known values of the 2016 data itself to predict the future values # ### Simple Moving Averages # The First Model used is the Moving Averages Model which uses the previous n values in order to predict the next value
# Using Ratio Values - $\begin{align}R_{t} = ( R_{t-1} + R_{t-2} + R_{t-3} .... R_{t-n} )/n \end{align}$ # In[ ]: def MA_R_Predictions(ratios,month): predicted_ratio=(ratios['Ratios'].values)[0] error=[] predicted_values=[] window_size=3 predicted_ratio_values=[] for i in range(0,4464*40): if i%4464==0: predicted_ratio_values.append(0) predicted_values.append(0) error.append(0) continue predicted_ratio_values.append(predicted_ratio) predicted_values.append(int(((ratios['Given'].values)[i])*predicted_ratio)) error.append(abs((math.pow(int(((ratios['Given'].values)[i])*predicted_ratio)-(ratios['Prediction'].values)[i],1)))) if i+1>=window_size: predicted_ratio=sum((ratios['Ratios'].values)[(i+1)-window_size:(i+1)])/window_size else: predicted_ratio=sum((ratios['Ratios'].values)[0:(i+1)])/(i+1) ratios['MA_R_Predicted'] = predicted_values ratios['MA_R_Error'] = error mape_err = (sum(error)/len(error))/(sum(ratios['Prediction'].values)/len(ratios['Prediction'].values)) mse_err = sum([e**2 for e in error])/len(error) return ratios,mape_err,mse_err # For the above the Hyperparameter is the window-size (n) which is tuned manually and it is found that the window-size of 3 is optimal for getting the best results using Moving Averages using previous Ratio values therefore we get $\begin{align}R_{t} = ( R_{t-1} + R_{t-2} + R_{t-3})/3 \end{align}$ # Next we use the Moving averages of the 2016 values itself to predict the future value using $\begin{align}P_{t} = ( P_{t-1} + P_{t-2} + P_{t-3} .... P_{t-n} )/n \end{align}$ # In[ ]: def MA_P_Predictions(ratios,month): predicted_value=(ratios['Prediction'].values)[0] error=[] predicted_values=[] window_size=1 predicted_ratio_values=[] for i in range(0,4464*40): predicted_values.append(predicted_value) error.append(abs((math.pow(predicted_value-(ratios['Prediction'].values)[i],1)))) if i+1>=window_size: predicted_value=int(sum((ratios['Prediction'].values)[(i+1)-window_size:(i+1)])/window_size) else: predicted_value=int(sum((ratios['Prediction'].values)[0:(i+1)])/(i+1)) ratios['MA_P_Predicted'] = predicted_values ratios['MA_P_Error'] = error mape_err = (sum(error)/len(error))/(sum(ratios['Prediction'].values)/len(ratios['Prediction'].values)) mse_err = sum([e**2 for e in error])/len(error) return ratios,mape_err,mse_err # For the above the Hyperparameter is the window-size (n) which is tuned manually and it is found that the window-size of 1 is optimal for getting the best results using Moving Averages using previous 2016 values therefore we get $\begin{align}P_{t} = P_{t-1} \end{align}$ # ### Weighted Moving Averages # The Moving Avergaes Model used gave equal importance to all the values in the window used, but we know intuitively that the future is more likely to be similar to the latest values and less similar to the older values. Weighted Averages converts this analogy into a mathematical relationship giving the highest weight while computing the averages to the latest previous value and decreasing weights to the subsequent older ones
# Weighted Moving Averages using Ratio Values - $\begin{align}R_{t} = ( N*R_{t-1} + (N-1)*R_{t-2} + (N-2)*R_{t-3} .... 1*R_{t-n} )/(N*(N+1)/2) \end{align}$ # In[ ]: def WA_R_Predictions(ratios,month): predicted_ratio=(ratios['Ratios'].values)[0] alpha=0.5 error=[] predicted_values=[] window_size=5 predicted_ratio_values=[] for i in range(0,4464*40): if i%4464==0: predicted_ratio_values.append(0) predicted_values.append(0) error.append(0) continue predicted_ratio_values.append(predicted_ratio) predicted_values.append(int(((ratios['Given'].values)[i])*predicted_ratio)) error.append(abs((math.pow(int(((ratios['Given'].values)[i])*predicted_ratio)-(ratios['Prediction'].values)[i],1)))) if i+1>=window_size: sum_values=0 sum_of_coeff=0 for j in range(window_size,0,-1): sum_values += j*(ratios['Ratios'].values)[i-window_size+j] sum_of_coeff+=j predicted_ratio=sum_values/sum_of_coeff else: sum_values=0 sum_of_coeff=0 for j in range(i+1,0,-1): sum_values += j*(ratios['Ratios'].values)[j-1] sum_of_coeff+=j predicted_ratio=sum_values/sum_of_coeff ratios['WA_R_Predicted'] = predicted_values ratios['WA_R_Error'] = error mape_err = (sum(error)/len(error))/(sum(ratios['Prediction'].values)/len(ratios['Prediction'].values)) mse_err = sum([e**2 for e in error])/len(error) return ratios,mape_err,mse_err # For the above the Hyperparameter is the window-size (n) which is tuned manually and it is found that the window-size of 5 is optimal for getting the best results using Weighted Moving Averages using previous Ratio values therefore we get $\begin{align} R_{t} = ( 5*R_{t-1} + 4*R_{t-2} + 3*R_{t-3} + 2*R_{t-4} + R_{t-5} )/15 \end{align}$ # Weighted Moving Averages using Previous 2016 Values - $\begin{align}P_{t} = ( N*P_{t-1} + (N-1)*P_{t-2} + (N-2)*P_{t-3} .... 1*P_{t-n} )/(N*(N+1)/2) \end{align}$ # In[ ]: def WA_P_Predictions(ratios,month): predicted_value=(ratios['Prediction'].values)[0] error=[] predicted_values=[] window_size=2 for i in range(0,4464*40): predicted_values.append(predicted_value) error.append(abs((math.pow(predicted_value-(ratios['Prediction'].values)[i],1)))) if i+1>=window_size: sum_values=0 sum_of_coeff=0 for j in range(window_size,0,-1): sum_values += j*(ratios['Prediction'].values)[i-window_size+j] sum_of_coeff+=j predicted_value=int(sum_values/sum_of_coeff) else: sum_values=0 sum_of_coeff=0 for j in range(i+1,0,-1): sum_values += j*(ratios['Prediction'].values)[j-1] sum_of_coeff+=j predicted_value=int(sum_values/sum_of_coeff) ratios['WA_P_Predicted'] = predicted_values ratios['WA_P_Error'] = error mape_err = (sum(error)/len(error))/(sum(ratios['Prediction'].values)/len(ratios['Prediction'].values)) mse_err = sum([e**2 for e in error])/len(error) return ratios,mape_err,mse_err # For the above the Hyperparameter is the window-size (n) which is tuned manually and it is found that the window-size of 2 is optimal for getting the best results using Weighted Moving Averages using previous 2016 values therefore we get $\begin{align} P_{t} = ( 2*P_{t-1} + P_{t-2} )/3 \end{align}$ # ### Exponential Weighted Moving Averages # https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average # Through weighted averaged we have satisfied the analogy of giving higher weights to the latest value and decreasing weights to the subsequent ones but we still do not know which is the correct weighting scheme as there are infinetly many possibilities in which we can assign weights in a non-increasing order and tune the the hyperparameter window-size. To simplify this process we use Exponential Moving Averages which is a more logical way towards assigning weights and at the same time also using an optimal window-size. # # In exponential moving averages we use a single hyperparameter alpha $\begin{align}(\alpha)\end{align}$ which is a value between 0 & 1 and based on the value of the hyperparameter alpha the weights and the window sizes are configured.
# For eg. If $\begin{align}\alpha=0.9\end{align}$ then the number of days on which the value of the current iteration is based is~$\begin{align}1/(1-\alpha)=10\end{align}$ i.e. we consider values 10 days prior before we predict the value for the current iteration. Also the weights are assigned using $\begin{align}2/(N+1)=0.18\end{align}$ ,where N = number of prior values being considered, hence from this it is implied that the first or latest value is assigned a weight of 0.18 which keeps exponentially decreasing for the subsequent values. # $\begin{align}R^{'}_{t} = \alpha*R_{t-1} + (1-\alpha)*R^{'}_{t-1} \end{align}$ # In[ ]: def EA_R1_Predictions(ratios,month): predicted_ratio=(ratios['Ratios'].values)[0] alpha=0.6 error=[] predicted_values=[] predicted_ratio_values=[] for i in range(0,4464*40): if i%4464==0: predicted_ratio_values.append(0) predicted_values.append(0) error.append(0) continue predicted_ratio_values.append(predicted_ratio) predicted_values.append(int(((ratios['Given'].values)[i])*predicted_ratio)) error.append(abs((math.pow(int(((ratios['Given'].values)[i])*predicted_ratio)-(ratios['Prediction'].values)[i],1)))) predicted_ratio = (alpha*predicted_ratio) + (1-alpha)*((ratios['Ratios'].values)[i]) ratios['EA_R1_Predicted'] = predicted_values ratios['EA_R1_Error'] = error mape_err = (sum(error)/len(error))/(sum(ratios['Prediction'].values)/len(ratios['Prediction'].values)) mse_err = sum([e**2 for e in error])/len(error) return ratios,mape_err,mse_err # $\begin{align}P^{'}_{t} = \alpha*P_{t-1} + (1-\alpha)*P^{'}_{t-1} \end{align}$ # In[ ]: def EA_P1_Predictions(ratios,month): predicted_value= (ratios['Prediction'].values)[0] alpha=0.3 error=[] predicted_values=[] for i in range(0,4464*40): if i%4464==0: predicted_values.append(0) error.append(0) continue predicted_values.append(predicted_value) error.append(abs((math.pow(predicted_value-(ratios['Prediction'].values)[i],1)))) predicted_value =int((alpha*predicted_value) + (1-alpha)*((ratios['Prediction'].values)[i])) ratios['EA_P1_Predicted'] = predicted_values ratios['EA_P1_Error'] = error mape_err = (sum(error)/len(error))/(sum(ratios['Prediction'].values)/len(ratios['Prediction'].values)) mse_err = sum([e**2 for e in error])/len(error) return ratios,mape_err,mse_err # In[ ]: mean_err=[0]*10 median_err=[0]*10 ratios_jan,mean_err[0],median_err[0]=MA_R_Predictions(ratios_jan,'jan') ratios_jan,mean_err[1],median_err[1]=MA_P_Predictions(ratios_jan,'jan') ratios_jan,mean_err[2],median_err[2]=WA_R_Predictions(ratios_jan,'jan') ratios_jan,mean_err[3],median_err[3]=WA_P_Predictions(ratios_jan,'jan') ratios_jan,mean_err[4],median_err[4]=EA_R1_Predictions(ratios_jan,'jan') ratios_jan,mean_err[5],median_err[5]=EA_P1_Predictions(ratios_jan,'jan') # ## Comparison between baseline models # We have chosen our error metric for comparison between models as MAPE (Mean Absolute Percentage Error) so that we can know that on an average how good is our model with predictions and MSE (Mean Squared Error) is also used so that we have a clearer understanding as to how well our forecasting model performs with outliers so that we make sure that there is not much of a error margin between our prediction and the actual value # In[ ]: print ("Error Metric Matrix (Forecasting Methods) - MAPE & MSE") print ("--------------------------------------------------------------------------------------------------------") print ("Moving Averages (Ratios) - MAPE: ",mean_err[0]," MSE: ",median_err[0]) print ("Moving Averages (2016 Values) - MAPE: ",mean_err[1]," MSE: ",median_err[1]) print ("--------------------------------------------------------------------------------------------------------") print ("Weighted Moving Averages (Ratios) - MAPE: ",mean_err[2]," MSE: ",median_err[2]) print ("Weighted Moving Averages (2016 Values) - MAPE: ",mean_err[3]," MSE: ",median_err[3]) print ("--------------------------------------------------------------------------------------------------------") print ("Exponential Moving Averages (Ratios) - MAPE: ",mean_err[4]," MSE: ",median_err[4]) print ("Exponential Moving Averages (2016 Values) - MAPE: ",mean_err[5]," MSE: ",median_err[5]) # Plese Note:- The above comparisons are made using Jan 2015 and Jan 2016 only # From the above matrix it is inferred that the best forecasting model for our prediction would be:- # $\begin{align}P^{'}_{t} = \alpha*P_{t-1} + (1-\alpha)*P^{'}_{t-1} \end{align}$ i.e Exponential Moving Averages using 2016 Values # ## Regression Models # ### Train-Test Split # Before we start predictions using the tree based regression models we take 3 months of 2016 pickup data and split it such that for every region we have 70% data in train and 30% in test, ordered date-wise for every region # In[ ]: # Preparing data to be split into train and test, The below prepares data in cumulative form which will be later split into test and train # number of 10min indices for jan 2015= 24*31*60/10 = 4464 # number of 10min indices for jan 2016 = 24*31*60/10 = 4464 # number of 10min indices for feb 2016 = 24*29*60/10 = 4176 # number of 10min indices for march 2016 = 24*31*60/10 = 4464 # regions_cum: it will contain 40 lists, each list will contain 4464+4176+4464 values which represents the number of pickups # that are happened for three months in 2016 data # print(len(regions_cum)) # 40 # print(len(regions_cum[0])) # 12960 # we take number of pickups that are happened in last 5 10min intravels number_of_time_stamps = 5 # output varaible # it is list of lists # it will contain number of pickups 13099 for each cluster output = [] # tsne_lat will contain 13104-5=13099 times lattitude of cluster center for every cluster # Ex: [[cent_lat 13099times],[cent_lat 13099times], [cent_lat 13099times].... 40 lists] # it is list of lists tsne_lat = [] # tsne_lon will contain 13104-5=13099 times logitude of cluster center for every cluster # Ex: [[cent_long 13099times],[cent_long 13099times], [cent_long 13099times].... 40 lists] # it is list of lists tsne_lon = [] # we will code each day # sunday = 0, monday=1, tue = 2, wed=3, thur=4, fri=5,sat=6 # for every cluster we will be adding 13099 values, each value represent to which day of the week that pickup bin belongs to # it is list of lists tsne_weekday = [] # its an numbpy array, of shape (523960, 5) # each row corresponds to an entry in out data # for the first row we will have [f0,f1,f2,f3,f4] fi=number of pickups happened in i+1th 10min intravel(bin) # the second row will have [f1,f2,f3,f4,f5] # the third row will have [f2,f3,f4,f5,f6] # and so on... tsne_feature = [] tsne_feature = [0]*number_of_time_stamps for i in range(0,40): tsne_lat.append([kmeans.cluster_centers_[i][0]]*13099) tsne_lon.append([kmeans.cluster_centers_[i][1]]*13099) # jan 1st 2016 is thursday, so we start our day from 4: "(int(k/144))%7+4" # our prediction start from 5th 10min intravel since we need to have number of pickups that are happened in last 5 pickup bins tsne_weekday.append([int(((int(k/144))%7+4)%7) for k in range(5,4464+4176+4464)]) # regions_cum is a list of lists [[x1,x2,x3..x13104], [x1,x2,x3..x13104], [x1,x2,x3..x13104], [x1,x2,x3..x13104], [x1,x2,x3..x13104], .. 40 lsits] tsne_feature = np.vstack((tsne_feature, [regions_cum[i][r:r+number_of_time_stamps] for r in range(0,len(regions_cum[i])-number_of_time_stamps)])) output.append(regions_cum[i][5:]) tsne_feature = tsne_feature[1:] # In[ ]: len(tsne_lat[0])*len(tsne_lat) == tsne_feature.shape[0] == len(tsne_weekday)*len(tsne_weekday[0]) == 40*13099 == len(output)*len(output[0]) # In[ ]: # Getting the predictions of exponential moving averages to be used as a feature in cumulative form # upto now we computed 8 features for every data point that starts from 50th min of the day # 1. cluster center lattitude # 2. cluster center longitude # 3. day of the week # 4. f_t_1: number of pickups that are happened previous t-1th 10min intravel # 5. f_t_2: number of pickups that are happened previous t-2th 10min intravel # 6. f_t_3: number of pickups that are happened previous t-3th 10min intravel # 7. f_t_4: number of pickups that are happened previous t-4th 10min intravel # 8. f_t_5: number of pickups that are happened previous t-5th 10min intravel # from the baseline models we said the exponential weighted moving avarage gives us the best error # we will try to add the same exponential weighted moving avarage at t as a feature to our data # exponential weighted moving avarage => p'(t) = alpha*p'(t-1) + (1-alpha)*P(t-1) alpha=0.3 # it is a temporary array that store exponential weighted moving avarage for each 10min intravel, # for each cluster it will get reset # for every cluster it contains 13104 values predicted_values=[] # it is similar like tsne_lat # it is list of lists # predict_list is a list of lists [[x5,x6,x7..x13104], [x5,x6,x7..x13104], [x5,x6,x7..x13104], [x5,x6,x7..x13104], [x5,x6,x7..x13104], .. 40 lsits] predict_list = [] tsne_flat_exp_avg = [] for r in range(0,40): for i in range(0,13104): if i==0: predicted_value= regions_cum[r][0] predicted_values.append(0) continue predicted_values.append(predicted_value) predicted_value =int((alpha*predicted_value) + (1-alpha)*(regions_cum[r][i])) predict_list.append(predicted_values[5:]) predicted_values=[] # In[ ]: # https://www.youtube.com/watch?v=FjmwwDHT98c amplitudes = [] frequencies = [] size = 13104 for i in range(40): amp = np.abs((np.fft.fft(regions_cum[i][0:size]))[:int(size/2)]) freq = np.abs((np.fft.fftfreq(size, 1))[:int(size/2)]) amp_ind = np.argsort(-amp)[1:] amp_top = [] freq_top = [] for j in range(0,5): amp_top.append(amp[amp_ind[j]]) freq_top.append(freq[amp_ind[j]]) amplitudes.extend([amp_top]*13099) frequencies.extend([freq_top]*13099) # In[ ]: # train, test split : 70% 30% split # Before we start predictions using the tree based regression models we take 3 months of 2016 pickup data # and split it such that for every region we have 70% data in train and 30% in test, # ordered date-wise for every region print("size of train data :", int(13099*0.7)) print("size of test data :", int(13099*0.3)) # In[ ]: train_frequencies = [frequencies[i*13099:(13099*i+9169)] for i in range(0,40)] test_frequencies = [frequencies[(13099*(i))+9169:13099*(i+1)] for i in range(0,40)] # In[ ]: train_amplitudes = [amplitudes[i*13099:(13099*i+9169)] for i in range(0,40)] test_amplitudes = [amplitudes[(13099*(i))+9169:13099*(i+1)] for i in range(0,40)] # In[ ]: # extracting first 9169 timestamp values i.e 70% of 13099 (total timestamps) for our training data train_features = [tsne_feature[i*13099:(13099*i+9169)] for i in range(0,40)] # temp = [0]*(12955 - 9068) test_features = [tsne_feature[(13099*(i))+9169:13099*(i+1)] for i in range(0,40)] # In[ ]: print("Number of data clusters",len(train_features), "Number of data points in trian data", len(train_features[0]), "Each data point contains", len(train_features[0][0]),"features") print("Number of data clusters",len(train_features), "Number of data points in test data", len(test_features[0]), "Each data point contains", len(test_features[0][0]),"features") # In[ ]: # extracting first 9169 timestamp values i.e 70% of 13099 (total timestamps) for our training data tsne_train_flat_lat = [i[:9169] for i in tsne_lat] tsne_train_flat_lon = [i[:9169] for i in tsne_lon] tsne_train_flat_weekday = [i[:9169] for i in tsne_weekday] tsne_train_flat_output = [i[:9169] for i in output] tsne_train_flat_exp_avg = [i[:9169] for i in predict_list] # In[ ]: # extracting the rest of the timestamp values i.e 30% of 12956 (total timestamps) for our test data tsne_test_flat_lat = [i[9169:] for i in tsne_lat] tsne_test_flat_lon = [i[9169:] for i in tsne_lon] tsne_test_flat_weekday = [i[9169:] for i in tsne_weekday] tsne_test_flat_output = [i[9169:] for i in output] tsne_test_flat_exp_avg = [i[9169:] for i in predict_list] # In[ ]: # the above contains values in the form of list of lists (i.e. list of values of each region), here we make all of them in one list train_new_features = [] test_new_features = [] train_frequency=[] test_frequency=[] train_amplitude=[] test_amplitude=[] for i in range(0,40): train_new_features.extend(train_features[i]) test_new_features.extend(test_features[i]) train_frequency.extend(train_frequencies[i]) test_frequency.extend(test_frequencies[i]) train_amplitude.extend(train_amplitudes[i]) test_amplitude.extend(test_amplitudes[i]) train_new_features_amp_freq=np.hstack((train_new_features,train_frequency,train_amplitude)) test_new_features_amp_freq=np.hstack((test_new_features,test_frequency,test_amplitude)) # In[ ]: # converting lists of lists into sinle list i.e flatten # a = [[1,2,3,4],[4,6,7,8]] # print(sum(a,[])) # [1, 2, 3, 4, 4, 6, 7, 8] tsne_train_lat = sum(tsne_train_flat_lat, []) tsne_train_lon = sum(tsne_train_flat_lon, []) tsne_train_weekday = sum(tsne_train_flat_weekday, []) tsne_train_output = sum(tsne_train_flat_output, []) tsne_train_exp_avg = sum(tsne_train_flat_exp_avg,[]) # In[ ]: # converting lists of lists into sinle list i.e flatten # a = [[1,2,3,4],[4,6,7,8]] # print(sum(a,[])) # [1, 2, 3, 4, 4, 6, 7, 8] tsne_test_lat = sum(tsne_test_flat_lat, []) tsne_test_lon = sum(tsne_test_flat_lon, []) tsne_test_weekday = sum(tsne_test_flat_weekday, []) tsne_test_output = sum(tsne_test_flat_output, []) tsne_test_exp_avg = sum(tsne_test_flat_exp_avg,[]) # In[ ]: # Preparing the data frame for our train data columns = ['ft_5','ft_4','ft_3','ft_2','ft_1','f_1','f_2','f_3','f_4','f_5','a_1','a_2','a_3','a_4','a_5'] df_train = pd.DataFrame(data=train_new_features_amp_freq, columns=columns) df_train['lat'] = tsne_train_lat df_train['lon'] = tsne_train_lon df_train['weekday'] = tsne_train_weekday df_train['exp_avg'] = tsne_train_exp_avg print(df_train.shape) # In[ ]: # Preparing the data frame for our train data df_test = pd.DataFrame(data=test_new_features_amp_freq, columns=columns) df_test['lat'] = tsne_test_lat df_test['lon'] = tsne_test_lon df_test['weekday'] = tsne_test_weekday df_test['exp_avg'] = tsne_test_exp_avg print(df_test.shape) # In[ ]: df_test.head() # ### Using Linear Regression # In[ ]: # find more about LinearRegression function here http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html # ------------------------- # default paramters # sklearn.linear_model.LinearRegression(fit_intercept=True, normalize=False, copy_X=True, n_jobs=1) # some of methods of LinearRegression() # fit(X, y[, sample_weight]) Fit linear model. # get_params([deep]) Get parameters for this estimator. # predict(X) Predict using the linear model # score(X, y[, sample_weight]) Returns the coefficient of determination R^2 of the prediction. # set_params(**params) Set the parameters of this estimator. # ----------------------- # video link: https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/geometric-intuition-1-2-copy-8/ # ----------------------- from sklearn.linear_model import LinearRegression lr_reg=LinearRegression().fit(df_train, tsne_train_output) y_pred = lr_reg.predict(df_test) lr_test_predictions = [round(value) for value in y_pred] y_pred = lr_reg.predict(df_train) lr_train_predictions = [round(value) for value in y_pred] # ### Using Random Forest Regressor # In[ ]: # Training a hyper-parameter tuned random forest regressor on our train data # find more about LinearRegression function here http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html # ------------------------- # default paramters # sklearn.ensemble.RandomForestRegressor(n_estimators=10, criterion=’mse’, max_depth=None, min_samples_split=2, # min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=’auto’, max_leaf_nodes=None, min_impurity_decrease=0.0, # min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=1, random_state=None, verbose=0, warm_start=False) # some of methods of RandomForestRegressor() # apply(X) Apply trees in the forest to X, return leaf indices. # decision_path(X) Return the decision path in the forest # fit(X, y[, sample_weight]) Build a forest of trees from the training set (X, y). # get_params([deep]) Get parameters for this estimator. # predict(X) Predict regression target for X. # score(X, y[, sample_weight]) Returns the coefficient of determination R^2 of the prediction. # ----------------------- # video link1: https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/regression-using-decision-trees-2/ # video link2: https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/what-are-ensembles/ # ----------------------- regr1 = RandomForestRegressor(max_features='sqrt',min_samples_leaf=4,min_samples_split=3,n_estimators=40, n_jobs=-1) regr1.fit(df_train, tsne_train_output) # In[ ]: # Predicting on test data using our trained random forest model # the models regr1 is already hyper parameter tuned # the parameters that we got above are found using grid search y_pred = regr1.predict(df_test) rndf_test_predictions = [round(value) for value in y_pred] y_pred = regr1.predict(df_train) rndf_train_predictions = [round(value) for value in y_pred] # In[ ]: #feature importances based on analysis using random forest print (df_train.columns) print (regr1.feature_importances_) # ### Using XgBoost Regressor # In[ ]: # Training a hyper-parameter tuned Xg-Boost regressor on our train data # find more about XGBRegressor function here http://xgboost.readthedocs.io/en/latest/python/python_api.html?#module-xgboost.sklearn # ------------------------- # default paramters # xgboost.XGBRegressor(max_depth=3, learning_rate=0.1, n_estimators=100, silent=True, objective='reg:linear', # booster='gbtree', n_jobs=1, nthread=None, gamma=0, min_child_weight=1, max_delta_step=0, subsample=1, colsample_bytree=1, # colsample_bylevel=1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, base_score=0.5, random_state=0, seed=None, # missing=None, **kwargs) # some of methods of RandomForestRegressor() # fit(X, y, sample_weight=None, eval_set=None, eval_metric=None, early_stopping_rounds=None, verbose=True, xgb_model=None) # get_params([deep]) Get parameters for this estimator. # predict(data, output_margin=False, ntree_limit=0) : Predict with data. NOTE: This function is not thread safe. # get_score(importance_type='weight') -> get the feature importance # ----------------------- # video link1: https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/regression-using-decision-trees-2/ # video link2: https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/what-are-ensembles/ # ----------------------- x_model = xgb.XGBRegressor( learning_rate =0.1, n_estimators=1000, max_depth=3, min_child_weight=3, gamma=0, subsample=0.8, reg_alpha=200, reg_lambda=200, colsample_bytree=0.8,nthread=4) x_model.fit(df_train, tsne_train_output) # In[ ]: #predicting with our trained Xg-Boost regressor # the models x_model is already hyper parameter tuned # the parameters that we got above are found using grid search y_pred = x_model.predict(df_test) xgb_test_predictions = [round(value) for value in y_pred] y_pred = x_model.predict(df_train) xgb_train_predictions = [round(value) for value in y_pred] # In[ ]: #feature importances x_model.get_booster().get_score(importance_type='weight') # ### Calculating the error metric values for various models # In[ ]: train_mape=[] test_mape=[] train_mape.append((mean_absolute_error(tsne_train_output,df_train['ft_1'].values))/(sum(tsne_train_output)/len(tsne_train_output))) train_mape.append((mean_absolute_error(tsne_train_output,df_train['exp_avg'].values))/(sum(tsne_train_output)/len(tsne_train_output))) train_mape.append((mean_absolute_error(tsne_train_output,rndf_train_predictions))/(sum(tsne_train_output)/len(tsne_train_output))) train_mape.append((mean_absolute_error(tsne_train_output, xgb_train_predictions))/(sum(tsne_train_output)/len(tsne_train_output))) train_mape.append((mean_absolute_error(tsne_train_output, lr_train_predictions))/(sum(tsne_train_output)/len(tsne_train_output))) test_mape.append((mean_absolute_error(tsne_test_output, df_test['ft_1'].values))/(sum(tsne_test_output)/len(tsne_test_output))) test_mape.append((mean_absolute_error(tsne_test_output, df_test['exp_avg'].values))/(sum(tsne_test_output)/len(tsne_test_output))) test_mape.append((mean_absolute_error(tsne_test_output, rndf_test_predictions))/(sum(tsne_test_output)/len(tsne_test_output))) test_mape.append((mean_absolute_error(tsne_test_output, xgb_test_predictions))/(sum(tsne_test_output)/len(tsne_test_output))) test_mape.append((mean_absolute_error(tsne_test_output, lr_test_predictions))/(sum(tsne_test_output)/len(tsne_test_output))) # In[ ]: print ("Error Metric Matrix (Tree Based Regression Methods) - MAPE") print ("--------------------------------------------------------------------------------------------------------") print ("Baseline Model - Train: ",train_mape[0]," Test: ",test_mape[0]) print ("Exponential Averages Forecasting - Train: ",train_mape[1]," Test: ",test_mape[1]) print ("Linear Regression - Train: ",train_mape[3]," Test: ",test_mape[3]) print ("Random Forest Regression - Train: ",train_mape[2]," Test: ",test_mape[2]) # ### Error Metric Matrix # In[ ]: print ("Error Metric Matrix (Tree Based Regression Methods) - MAPE") print ("--------------------------------------------------------------------------------------------------------") print ("Baseline Model - Train: ",train_mape[0]," Test: ",test_mape[0]) print ("Exponential Averages Forecasting - Train: ",train_mape[1]," Test: ",test_mape[1]) print ("Linear Regression - Train: ",train_mape[4]," Test: ",test_mape[4]) print ("Random Forest Regression - Train: ",train_mape[2]," Test: ",test_mape[2]) print ("XgBoost Regression - Train: ",train_mape[3]," Test: ",test_mape[3]) print ("--------------------------------------------------------------------------------------------------------") # # Assignments # # # In[ ]: ''' Task 1: Incorporate Fourier features as features into Regression models and measure MAPE.
Task 2: Perform hyper-parameter tuning for Regression models. 2a. Linear Regression: Grid Search 2b. Random Forest: Random Search 2c. Xgboost: Random Search Task 3: Explore more time-series features using Google search/Quora/Stackoverflow to reduce the MAPE to < 12% ''' # ### Models after adding fourier transform features . # In[ ]: # find more about LinearRegression function here http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html # ------------------------- # default paramters # sklearn.linear_model.LinearRegression(fit_intercept=True, normalize=False, copy_X=True, n_jobs=1) # some of methods of LinearRegression() # fit(X, y[, sample_weight]) Fit linear model. # get_params([deep]) Get parameters for this estimator. # predict(X) Predict using the linear model # score(X, y[, sample_weight]) Returns the coefficient of determination R^2 of the prediction. # set_params(**params) Set the parameters of this estimator. # ----------------------- # video link: https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/geometric-intuition-1-2-copy-8/ # ----------------------- from sklearn.linear_model import LinearRegression lr_reg=LinearRegression().fit(df_train, tsne_train_output) y_pred = lr_reg.predict(df_test) lr_test_predictions = [round(value) for value in y_pred] y_pred = lr_reg.predict(df_train) lr_train_predictions = [round(value) for value in y_pred] # ### Using Random Forest Regressor # In[ ]: # Training a hyper-parameter tuned random forest regressor on our train data # find more about LinearRegression function here http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html # ------------------------- # default paramters # sklearn.ensemble.RandomForestRegressor(n_estimators=10, criterion=’mse’, max_depth=None, min_samples_split=2, # min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=’auto’, max_leaf_nodes=None, min_impurity_decrease=0.0, # min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=1, random_state=None, verbose=0, warm_start=False) # some of methods of RandomForestRegressor() # apply(X) Apply trees in the forest to X, return leaf indices. # decision_path(X) Return the decision path in the forest # fit(X, y[, sample_weight]) Build a forest of trees from the training set (X, y). # get_params([deep]) Get parameters for this estimator. # predict(X) Predict regression target for X. # score(X, y[, sample_weight]) Returns the coefficient of determination R^2 of the prediction. # ----------------------- # video link1: https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/regression-using-decision-trees-2/ # video link2: https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/what-are-ensembles/ # ----------------------- regr1 = RandomForestRegressor(max_features='sqrt',min_samples_leaf=4,min_samples_split=3,n_estimators=40, n_jobs=-1) regr1.fit(df_train, tsne_train_output) # In[ ]: # Predicting on test data using our trained random forest model # the models regr1 is already hyper parameter tuned # the parameters that we got above are found using grid search y_pred = regr1.predict(df_test) rndf_test_predictions = [round(value) for value in y_pred] y_pred = regr1.predict(df_train) rndf_train_predictions = [round(value) for value in y_pred] # In[ ]: #feature importances based on analysis using random forest print (df_train.columns) print (regr1.feature_importances_) # ### Using XgBoost Regressor # In[ ]: # Training a hyper-parameter tuned Xg-Boost regressor on our train data # find more about XGBRegressor function here http://xgboost.readthedocs.io/en/latest/python/python_api.html?#module-xgboost.sklearn # ------------------------- # default paramters # xgboost.XGBRegressor(max_depth=3, learning_rate=0.1, n_estimators=100, silent=True, objective='reg:linear', # booster='gbtree', n_jobs=1, nthread=None, gamma=0, min_child_weight=1, max_delta_step=0, subsample=1, colsample_bytree=1, # colsample_bylevel=1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, base_score=0.5, random_state=0, seed=None, # missing=None, **kwargs) # some of methods of RandomForestRegressor() # fit(X, y, sample_weight=None, eval_set=None, eval_metric=None, early_stopping_rounds=None, verbose=True, xgb_model=None) # get_params([deep]) Get parameters for this estimator. # predict(data, output_margin=False, ntree_limit=0) : Predict with data. NOTE: This function is not thread safe. # get_score(importance_type='weight') -> get the feature importance # ----------------------- # video link1: https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/regression-using-decision-trees-2/ # video link2: https://www.appliedaicourse.com/course/applied-ai-course-online/lessons/what-are-ensembles/ # ----------------------- xgb_model = xgb.XGBRegressor( learning_rate =0.1, n_estimators=1000, max_depth=3, min_child_weight=3, gamma=0, subsample=0.8, reg_alpha=200, reg_lambda=200, colsample_bytree=0.8,nthread=4) xgb_model.fit(df_train, tsne_train_output) # In[ ]: #predicting with our trained Xg-Boost regressor # the models x_model is already hyper parameter tuned # the parameters that we got above are found using grid search y_pred = xgb_model.predict(df_test) xgb_test_predictions = [round(value) for value in y_pred] y_pred = xgb_model.predict(df_train) xgb_train_predictions = [round(value) for value in y_pred] # In[ ]: #feature importances xgb_model.get_booster().get_score(importance_type='weight') # ### Calculating the error metric values for various models # In[ ]: train_mape=[] test_mape=[] train_mape.append((mean_absolute_error(tsne_train_output,df_train['ft_1'].values))/(sum(tsne_train_output)/len(tsne_train_output))) train_mape.append((mean_absolute_error(tsne_train_output,df_train['exp_avg'].values))/(sum(tsne_train_output)/len(tsne_train_output))) train_mape.append((mean_absolute_error(tsne_train_output,rndf_train_predictions))/(sum(tsne_train_output)/len(tsne_train_output))) train_mape.append((mean_absolute_error(tsne_train_output, xgb_train_predictions))/(sum(tsne_train_output)/len(tsne_train_output))) train_mape.append((mean_absolute_error(tsne_train_output, lr_train_predictions))/(sum(tsne_train_output)/len(tsne_train_output))) test_mape.append((mean_absolute_error(tsne_test_output, df_test['ft_1'].values))/(sum(tsne_test_output)/len(tsne_test_output))) test_mape.append((mean_absolute_error(tsne_test_output, df_test['exp_avg'].values))/(sum(tsne_test_output)/len(tsne_test_output))) test_mape.append((mean_absolute_error(tsne_test_output, rndf_test_predictions))/(sum(tsne_test_output)/len(tsne_test_output))) test_mape.append((mean_absolute_error(tsne_test_output, xgb_test_predictions))/(sum(tsne_test_output)/len(tsne_test_output))) test_mape.append((mean_absolute_error(tsne_test_output, lr_test_predictions))/(sum(tsne_test_output)/len(tsne_test_output))) # In[ ]: print ("Error Metric Matrix (Tree Based Regression Methods) - MAPE") print ("--------------------------------------------------------------------------------------------------------") print ("Baseline Model - Train: ",train_mape[0]," Test: ",test_mape[0]) print ("Exponential Averages Forecasting - Train: ",train_mape[1]," Test: ",test_mape[1]) print ("Linear Regression - Train: ",train_mape[3]," Test: ",test_mape[3]) print ("Random Forest Regression - Train: ",train_mape[2]," Test: ",test_mape[2]) # ### Error Metric Matrix # In[ ]: print ("Error Metric Matrix (Tree Based Regression Methods) - MAPE") print ("--------------------------------------------------------------------------------------------------------") print ("Baseline Model - Train: ",train_mape[0]," Test: ",test_mape[0]) print ("Exponential Averages Forecasting - Train: ",train_mape[1]," Test: ",test_mape[1]) print ("Linear Regression - Train: ",train_mape[4]," Test: ",test_mape[4]) print ("Random Forest Regression - Train: ",train_mape[2]," Test: ",test_mape[2]) print ("XgBoost Regression - Train: ",train_mape[3]," Test: ",test_mape[3]) print ("--------------------------------------------------------------------------------------------------------") # ## Observations: # # 1. We have used fourier features in train data . # 2. We could not get good results , which are very similar without using these features . # # ### Triple Exponential Smoothing (Holt's-Winter) # Exponential smoothing is a time series forecasting method for univariate data. # # This method is based on three smoothing equations stationary components, trend and seasonal. # # alpha= data smoothing factor ( 0= len(series): # we are forecasting m = i - len(series) + 1 result.append((smooth + m*trend) + seasonals[i%slen]) else: val = series[i] last_smooth, smooth = smooth, alpha*(val-seasonals[i%slen]) + (1-alpha)*(smooth+trend) trend = beta * (smooth-last_smooth) + (1-beta)*trend seasonals[i%slen] = gamma*(val-smooth) + (1-gamma)*seasonals[i%slen] result.append(smooth+trend+seasonals[i%slen]) return result alpha = 0.2 beta = 0.15 gamma = 0.2 season_len = 24 predict_values_triple =[] predict_list_triple = [] for r in range(0,40): predict_values_triple = triple_exponential_smoothing(regions_cum[r][0:13104], season_len, alpha, beta, gamma, 0) predict_list_triple.append(predict_values_triple[5:]) # In[ ]: tsne_train_flat_triple_exp = [i[:9169] for i in predict_list_triple] tsne_test_flat_triple_exp = [i[9169:] for i in predict_list_triple] # In[ ]: tsne_train_triple_exp_feat = sum(tsne_train_flat_triple_exp,[]) tsne_test_triple_exp_feat = sum(tsne_test_flat_triple_exp,[]) # In[ ]: df_train['triple_exp'] = tsne_train_triple_exp_feat df_test['triple_exp'] = tsne_test_triple_exp_feat # In[ ]: df_test.head() # In[ ]: from sklearn.linear_model import LinearRegression from sklearn.model_selection import GridSearchCV from sklearn.preprocessing import MinMaxScaler from sklearn.linear_model import SGDRegressor params = {'fit_intercept':[True, False], 'normalize':[True, False]} model = LinearRegression(n_jobs = -1) lr_reg = GridSearchCV(model, params, scoring = 'neg_mean_absolute_error', cv = 3) lr_reg.fit(df_train, tsne_train_output) print("Best Estimators",lr_reg.best_params_) y_pred = lr_reg.predict(df_test) lr_test_predictions = [round(value) for value in y_pred] y_pred = lr_reg.predict(df_train) lr_train_predictions = [round(value) for value in y_pred] # ### Random Forest Regressor # In[ ]: # Training a hyper-parameter tuned random forest regressor on our train data # find more about LinearRegression function here http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html # ------------------------- # default paramters # sklearn.ensemble.RandomForestRegressor(n_estimators=10, criterion=’mse’, max_depth=None, min_samples_split=2, # min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=’auto’, max_leaf_nodes=None, min_impurity_decrease=0.0, # min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=1, random_state=None, verbose=0, warm_start=False) # some of methods of RandomForestRegressor() # apply(X) Apply trees in the forest to X, return leaf indices. # decision_path(X) Return the decision path in the forest # fit(X, y[, sample_weight]) Build a forest of trees from the training set (X, y). # get_params([deep]) Get parameters for this estimator. # predict(X) Predict regression target for X. # score(X, y[, sample_weight]) Returns the coefficient of determination R^2 of the prediction. model = RandomForestRegressor(n_jobs=-1) params = {'max_depth' : [3,4,5,6], 'min_samples_split' : [2,3,4,5,7], 'max_features':['sqrt', 'log2'], 'min_samples_leaf':[1,5,10,100]} regr1 = GridSearchCV(model, params, scoring = 'neg_mean_absolute_error', cv = None) regr1.fit(df_train, tsne_train_output) print("Best Estimators",regr1.best_params_) y_pred = regr1.predict(df_test) rndf_test_predictions = [round(value) for value in y_pred] y_pred = regr1.predict(df_train) rndf_train_predictions = [round(value) for value in y_pred] # ### XgBoost Regressor # In[ ]: # Training a hyper-parameter tuned Xg-Boost regressor on our train data from xgboost import XGBRegressor model = XGBRegressor(n_jobs = -1) params = {'subsample':[0.7, 0.8, 0.9],'min_child_weight':[3, 5],'learning_rate':[0.01,0.1,1],'n_estimators':[100,150,200] ,'reg_lambda':[200, 300, 400],'max_depth': [3, 4, 5]} xgb_model = GridSearchCV(model, params, scoring = 'neg_mean_absolute_error', cv = None) xgb_model.fit(df_train, tsne_train_output) print("Best Estimators: ",xgb_model.best_params_) #predicting with our trained Xg-Boost regressor # the models x_model is already hyper parameter tuned # the parameters that we got above are found using grid search y_pred = xgb_model.predict(df_test) xgb_test_predictions = [round(value) for value in y_pred] y_pred = xgb_model.predict(df_train) xgb_train_predictions = [round(value) for value in y_pred] # ### Feature Importance : # In[ ]: print(df_test.columns) xgb_model.best_estimator_.feature_importances_ # ### Calculating the error metric values for various models # In[ ]: train_mape=[] test_mape=[] train_mape.append((mean_absolute_error(tsne_train_output,df_train['ft_1'].values))/(sum(tsne_train_output)/len(tsne_train_output))) train_mape.append((mean_absolute_error(tsne_train_output,df_train['exp_avg'].values))/(sum(tsne_train_output)/len(tsne_train_output))) train_mape.append((mean_absolute_error(tsne_train_output,rndf_train_predictions))/(sum(tsne_train_output)/len(tsne_train_output))) train_mape.append((mean_absolute_error(tsne_train_output, xgb_train_predictions))/(sum(tsne_train_output)/len(tsne_train_output))) train_mape.append((mean_absolute_error(tsne_train_output, lr_train_predictions))/(sum(tsne_train_output)/len(tsne_train_output))) test_mape.append((mean_absolute_error(tsne_test_output, df_test['ft_1'].values))/(sum(tsne_test_output)/len(tsne_test_output))) test_mape.append((mean_absolute_error(tsne_test_output, df_test['exp_avg'].values))/(sum(tsne_test_output)/len(tsne_test_output))) test_mape.append((mean_absolute_error(tsne_test_output, rndf_test_predictions))/(sum(tsne_test_output)/len(tsne_test_output))) test_mape.append((mean_absolute_error(tsne_test_output, xgb_test_predictions))/(sum(tsne_test_output)/len(tsne_test_output))) test_mape.append((mean_absolute_error(tsne_test_output, lr_test_predictions))/(sum(tsne_test_output)/len(tsne_test_output))) # ### Error Metric Matrix # In[ ]: print ("Error Metric Matrix (Tree Based Regression Methods) - MAPE") print ("--------------------------------------------------------------------------------------------------------") print ("Baseline Model - Train: ",train_mape[0]," Test: ",test_mape[0]) print ("Exponential Averages Forecasting - Train: ",train_mape[1]," Test: ",test_mape[1]) print ("Linear Regression - Train: ",train_mape[4]," Test: ",test_mape[4]) print ("Random Forest Regression - Train: ",train_mape[2]," Test: ",test_mape[2]) print ("--------------------------------------------------------------------------------------------------------") print ("|XgBoost Regression - Train: ",train_mape[3]," Test:",test_mape[3],"|") print ("--------------------------------------------------------------------------------------------------------") # ## Conclusion: # # 1. We have used fourier features and triple exp. smoothing for getting better results than previous models . # 2. After hyper parameter tuning we could achieve test MAPE score less than 12 for two models . # 3. Linear Regression and XGBoost after tuning gives MAPE < 12. # 4. XGBoost is best model with MAP test score as 10