#!/usr/bin/env python # coding: utf-8 # # Exploring Ebay Car Sales Data # # In this project, we'll work with a dataset of used cars from eBay Kleinanzeigen, a classifieds section of the German eBay website. # # The dataset was originally scraped and uploaded to [Kaggle](https://www.kaggle.com/orgesleka/used-cars-database/data). The version of the dataset we are working with is a sample of 50,000 data points that was prepared by Dataquest including simulating a less-cleaned version of the data. # # The data dictionary provided with data is as follows: # # - dateCrawled - When this ad was first crawled. All field-values are taken from this date. # - name - Name of the car. # - seller - Whether the seller is private or a dealer. # - offerType - The type of listing # - price - The price on the ad to sell the car. # - abtest - Whether the listing is included in an A/B test. # - vehicleType - The vehicle Type. # - yearOfRegistration - The year in which the car was first registered. # - gearbox - The transmission type. # - powerPS - The power of the car in PS. # - model - The car model name. # - kilometer - How many kilometers the car has driven. # - monthOfRegistration - The month in which the car was first registered. # - fuelType - What type of fuel the car uses. # - brand - The brand of the car. # - notRepairedDamage - If the car has a damage which is not yet repaired. # - dateCreated - The date on which the eBay listing was created. # - nrOfPictures - The number of pictures in the ad. # - postalCode - The postal code for the location of the vehicle. # - lastSeenOnline - When the crawler saw this ad last online. # # The aim of this project is to clean the data and analyze the included used car listings. # In[1]: import pandas as pd import numpy as np autos = pd.read_csv('autos.csv', encoding = 'Latin-1') # ## Dataset Description # In[2]: autos.info() autos.head() # This information shows how the dataset contains 20 columns, most of which are strings. Some columns have null values, but none have more than ~20% null values. # # ### Fixing the columns name # In[3]: print(autos.columns) # In[4]: new_col = ['date_crawled', 'name', 'seller', 'offer_type', 'price', 'abtest', 'vehicle_type', 'registration_year', 'gearbox', 'power_ps', 'model', 'odometer', 'registration_month', 'fuel_type', 'brand', 'unrepaired_damage', 'ad_created', 'nr_of_pictures', 'postal_code', 'last_seen'] autos.columns = new_col autos.head(3) # We have converted the column names from camelcase to snakecase and reword some of the column names based on the data dictionary to be more descriptive. # # ## Data Exploration and Cleaning # Now, we present some basic data exploration to determine what other cleaning tasks need to be done. # In[5]: autos.describe(include = 'all') # These results show how seller, offer_type and nr_of_pictures columns have mostly one value. Then, they will be dropped. # # The columns gearbox and unrepaired_damage need more investigation since they present only two different values, with one of them is presented in most of the dataset. Moreover, these columns present missing values. # # On the other hand, the columns price and odometer are numeric values stored as text. We will convert these values to the numeric type. # # #### Droping columns with mostly one value # Let's drop seller, offer_type and nr_of_pictures columns because they have mostly one value. # In[6]: autos.drop(["offer_type","seller","nr_of_pictures"],axis=1).head(5) # #### Cleaning columns price and odometer # Let's clean and convert price and odometer # In[7]: autos["price"] = autos["price"].str.replace('$','') autos["price"] = autos["price"].str.replace(',','') autos["price"] = autos["price"].astype(int) # In[8]: autos["odometer"] = autos["odometer"].str.replace('km','') autos["odometer"] = autos["odometer"].str.replace(',','') autos["odometer"] = autos["odometer"].astype(int) autos.rename({"odometer": "odometer_km"}, axis=1, inplace=True) # We continue exploring the data, specifically looking for data that doesn't look right. We'll start by analyzing the odometer_km and price columns using minimum and maximum values and look for any values that look unrealistically high or low (outliers) that we might want to remove. # In[9]: print("Shape odometer_km", autos["odometer_km"].unique().shape) print("Describe the odometer_km:") print(autos["odometer_km"].describe()) autos["odometer_km"].value_counts() # In[10]: print("Shape price", autos["price"].unique().shape) print("Describe the price:") print(autos["price"].describe()) autos_price_sorted = autos["price"].value_counts().sort_index(ascending = True) print(autos_price_sorted.head(20)) print(autos_price_sorted.tail(20)) # The values from the odometer_km seem to be logical. However, we will remove the rows with a price equal to 0 and the ones with values higher than 350000, since the values in the price column show a jump from 350000 to 999999. # In[11]: autos = autos[autos['price'].between(1,350000)] autos.describe() # The new statistical description of this column show how the average price is almost 5900, where most of the values are between 1200 and 7490. # # ### Exploring date columns # First, we will understand how the values in the three string columns are formatted. These columns all represent full timestamp values: # In[12]: autos[['date_crawled','ad_created','last_seen']][0:5] # To understand the date range, we can extract just the first 10 characters that represent date values. Then, we calculate the values's distribution in these columns. # #### Exploring date_crawled column # In[13]: date_crawled = autos['date_crawled'].str[:10] date_crawled.value_counts(normalize=True, dropna=False).sort_index(ascending = True) # The results show how the distribution of the values in the date_crawled column are balanced in most of the dates. However, the last three dates the number of ad crawled decrease respect to the rest of the dates, since the percentage in these days is less than 1.5%. Notice that, in march there are three exceptional dates, where the percentege is also less than 1.5. # # #### Exploring ad_created column # In[14]: ad_created = autos['ad_created'].str[:10] value_counts_ad_created = ad_created.value_counts(normalize=True, dropna=False).sort_index(ascending = True) print(value_counts_ad_created.head(10)) print(value_counts_ad_created.tail(35)) value_counts_ad_created.describe() # The first dates stored in the dataset are at the end of 2015, where the number of ad created was less than 1%, almost one ad created most of the dates. However, at the begining of March, 2016, this number was increased, reaching 3% until almost the begining of april. # #### Exploring last_seen column # In[15]: last_seen = autos['last_seen'].str[:10] value_counts_last_seen = last_seen.value_counts(normalize=True, dropna=False).sort_index(ascending = True) print(value_counts_last_seen) value_counts_last_seen.describe() # The results show how the distribution of the values in the last_seen column are balanced in most of the dates, with values around one and two percentage. However, these values increased in the last three dates, since the percentage in these days was higher than 12%. # # #### Exploring registration_year column # In[16]: autos["registration_year"].describe() # This description shows how the registration_year column present odd values, since the minimum value is 1000 and the maximum value is 9999. # # Let's count the number of listings with cars that fall outside the 1900 - 2016 interval and see if it's safe to remove those rows entirely, or if we need more custom logic. # In[17]: autos_year_1900 = autos[autos['registration_year'] < 1900] print("Number of autos with a year registration less than 1900:",autos_year_1900.shape[0]) autos_year_2016 = autos[autos['registration_year'] > 2016] print("Number of autos with a year registration higuer than 2016:",autos_year_2016.shape[0]) print("\nValues count of the registration year less than 1940:") print(autos.loc[(autos["registration_year"] < 1940),"registration_year"].value_counts().sort_index(ascending = True).head(10)) # The number of autos with a registration year less than 1900 is 5. These rows will be removed since they present an invalid registration year. # # Now, let's analyse the autos with a registration year 1910, which is the next year stored after 1900. # In[18]: autos[autos["registration_year"] == 1910] # There are five rows that contain 1910 in the registration_year column. Most of them present missing values for the vehicle_type, gearbox, fuel_type, unrepaired_damage and model column. Moreover, the registration_month column contain 0 value for all the rows, which represent a wrong value. The price column also present odd values, range between 1 and 599. Finally, with a very low probability, ebay could sell a car registered in 1910. For this reason, these rows will be removed. # # Let's analyse the autos with a registration year between 1927 and 1940, which with some probabiltiy may be ebay sell a car with these registration years. # In[19]: autos_1927_1940 = autos[autos["registration_year"].between(1927,1940)] percent_missing = autos_1927_1940.isnull().sum() * 100 / len(autos_1927_1940) print("Percent missing for the autos with registration year between 1927-1940: \n", percent_missing) autos_1927_1940.describe() # The results show how the prices in these rows are quite logic and how these rows present few missing values for almost all the columns. # # Then, the lowest acceptable value will be 1927 for the registration year column and the highest will be 2016 because a car can't be first registered after the listing was seen, then any vehicle with a registration year above 2016 is definitely inaccurate. # In[20]: autos = autos[autos['registration_year'].between(1927,2016)] value_counts_autos_reg_year = autos['registration_year'].value_counts(normalize=True).sort_index(ascending = True) print(value_counts_autos_reg_year.head(30)) print(value_counts_autos_reg_year.tail(20)) value_counts_autos_reg_year.describe() # We can see how in the early decades of the 1900s, a few number of autos with these registration years where stored, and after the 60s the percentage increase. Moreover, the highest percentage (3-6%) are represented at the end of the 90s until 2011. # # ### Exploring variations across different car brands # # When working with data on cars, it's natural to explore variations across different car brands. We will explore the unique values in the brand column, and decide on which brands we want to aggregate by. # In[21]: value_counts_brand = autos['brand'].value_counts(normalize=True) print(value_counts_brand.head(10)) autos['brand'].describe() # We select for a futher analysis the brands that have a percentage of the total values higher than 5%. The description of the brand column show that it present 40 unique values, where the top one is volkswagen. # In[22]: brands_selected = value_counts_brand[value_counts_brand > 0.05] brand_mean_prices = {} brands_mean_mileage = {} for b in brands_selected.index.values: prices_brand = autos.loc[autos["brand"] == b, "price"] mileage_brand = autos.loc[autos["brand"] == b, "odometer_km"] brand_mean_prices[b] = round(prices_brand.sum() / len(prices_brand), 2) brands_mean_mileage[b] = round(mileage_brand.sum() / len(mileage_brand), 2) bm_price_series = pd.Series(brand_mean_prices) bm_mileage_series = pd.Series(brands_mean_mileage) df_brands = pd.DataFrame(bm_price_series, columns=['mean_price']) df_brands['mean_mileage_km'] = pd.Series(brands_mean_mileage) df_brands # The above results show that the most expensive brands are Audi, BMW and Mercedes Benz and the less one Ford and Opel. Finally, Volkswagen is between them. The average mileage is quite similar for all the brands. However, note that the BMW and Mercedes Benz present the highest values for this column. # In[ ]: