Benchmark for querying the MovieLens dataset using pandas and bcolz

In [1]:
import os.path
import numpy as np
import pandas as pd
import bcolz
from time import time

Import the CSV files in a pandas dataframe

In [2]:
# Files where the data is (using the 10M dataset here)
dset = 'ml-10m'
ftags = os.path.join(dset, 'tags.dat')
fdata = os.path.join(dset, 'ratings.dat.gz')
fitem = os.path.join(dset, 'movies.dat')

zdset = 'ml-10m.blz'
pdset = 'ml-10m.h5'
In [3]:
# Global settings for bcolz and pandas (for experimenting, modify some of the lines below)
bcolz.defaults.cparams['cname'] = 'blosclz'
bcolz.defaults.cparams['clevel'] = 1
#bcolz.defaults.eval_vm = "numexpr"
#bcolz.blosc_set_nthreads(4)
#bcolz.numexpr.set_num_threads(4)
# from pandas.computation import expressions as expr
# expr.set_use_numexpr(True)
# expr.set_numexpr_threads(1)
In [4]:
# Load the ratings in a pandas dataframe
t0 = time()
# pass in column names for each CSV
t_cols = ['user_id', 'movie_id', 'tag', 'unix_timestamp']
tags = pd.read_csv(ftags, sep=';', names=t_cols)
#print("Info for tags:", tags.info())

r_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
ratings = pd.read_csv(fdata, sep=';', names=r_cols, compression='gzip')

m_cols = ['movie_id', 'title', 'genres']
movies = pd.read_csv(fitem, sep=';', names=m_cols,
                     dtype={'title': "S100", 'genres': "S100"})
print("Time for parsing the data: %.2f" % (time()-t0,)) 

t0 = time()
# create one merged DataFrame
movie_ratings = pd.merge(movies, ratings)
# You probably need more than 8 GB in RAM for the next merge
#lens = pd.merge(movie_ratings, tags, on='user_id')
lens = movie_ratings
print("Time for dataframe merges: %.2f" % (time()-t0,)) 
print("Info for movie_ratings:", movie_ratings.info())
print("Info for lens:", lens.info())
Time for parsing the data: 5.33
Time for dataframe merges: 1.36
<class 'pandas.core.frame.DataFrame'>
Int64Index: 10000054 entries, 0 to 10000053
Data columns (total 6 columns):
movie_id          int64
title             object
genres            object
user_id           int64
rating            float64
unix_timestamp    int64
dtypes: float64(1), int64(3), object(2)('Info for movie_ratings:', None)
<class 'pandas.core.frame.DataFrame'>
Int64Index: 10000054 entries, 0 to 10000053
Data columns (total 6 columns):
movie_id          int64
title             object
genres            object
user_id           int64
rating            float64
unix_timestamp    int64
dtypes: float64(1), int64(3), object(2)('Info for lens:', None)

Compute the size of the dataframe, taking in account the true object overhead

In [5]:
# Size of the dataframe in-memory
rsize = lens.values.nbytes / 2**20.
print("Size for regular values in lens dataframe: %.1f MB" % (rsize,))
Size for regular values in lens dataframe: 457.8 MB
In [6]:
# Compute the overhead of the objects in dataframe
dfo = lens.blocks['object']
import sys
overhead = sum(sys.getsizeof(o) for o in dfo.values.flat) / 2**20.
print("Overhead of objects: %.1f MB" % (overhead ,))
Overhead of objects: 1115.1 MB
In [7]:
# Total size:
dfsize = rsize + overhead
print("Total size for the dataframe: %.1f MB" % (dfsize,))
Total size for the dataframe: 1572.8 MB

Querying with pandas

In [8]:
# Simple query
t0 = time()
#result = lens[lens['title'] == 'Tom and Huck (1995)']
result = lens.query("title == 'Tom and Huck (1995)'")
dfsmplqtime = time() - t0
print("time (and length) for simple query with pandas: %.2f (%d)" %
      (dfsmplqtime, len(result)))
#print repr(result)
time (and length) for simple query with pandas: 0.56 (899)
In [9]:
# Complex query
t0 = time()
#result = lens[(lens['title'] == 'Tom and Huck (1995)') & (lens['rating'] == 5)]['user_id']
result = lens.query("(title == 'Tom and Huck (1995)') & (rating == 5)")['user_id']
dfcmplxqtime = time() - t0
print("time (and length) for complex query with pandas: %.2f (%d)" %
      (dfcmplxqtime, len(result)))
time (and length) for complex query with pandas: 0.57 (74)

Build a ctable out of the dataframe

In [10]:
# Get a bcolz ctable out of the lens dataframe (using compression by default)
t0 = time()
zlens = bcolz.ctable.fromdataframe(lens)
print("time for ctable conversion: %.2f" % (time()-t0))
#print repr(zlens)
time for ctable conversion: 2.89
In [11]:
# Get rid of the dataframe to cleanup memory
del lens
In [12]:
# Size for the compressed ctable
zsize = zlens.cbytes / 2**20.
print("Total size for the ctable: %.1f MB" % (zsize,))
# Compression ratio
print("Compression ratio the ctable: %.1fx" % (zlens.nbytes / float(zlens.cbytes),))
# Reduction in size compared with the dataframe
print("Storage savings compared with a dataframe: %.1fx" % (dfsize / zsize,))
Total size for the ctable: 90.6 MB
Compression ratio the ctable: 20.2x
Storage savings compared with a dataframe: 17.4x

Query the ctable

In [13]:
# Simple query
t0 = time()
result = zlens["title == 'Tom and Huck (1995)'"]
zsmplqtime = time() - t0
print("time (and length) for simple query with bcolz: %.2f (%d)" %
      (zsmplqtime, len(result)))
#print repr(result)
time (and length) for simple query with bcolz: 0.22 (899)
In [14]:
# Complex query
t0 = time()
#result = zlens["(title == 'Tom and Huck (1995)') & (rating == 5)"]['user_id']
result = [r.user_id for r in zlens.where(
    "(title == 'Tom and Huck (1995)') & (rating == 5)", outcols=['user_id'])]
zcmplxqtime = time() - t0
print("time (and length) for complex query with bcolz: %.2f (%d)" %
      (zcmplxqtime, len(result)))
#print(repr(result))
time (and length) for complex query with bcolz: 0.26 (74)

Testing an uncompressed ctable

In [15]:
# Size for the uncompressed ctable
nzlens = zlens.copy(cparams=bcolz.cparams(clevel=0))
nzsize = nzlens.cbytes / 2**20.
print("Total size for the ctable: %.1f MB" % (nzsize,))
# Compression ratio
print("Compression ratio the ctable: %.1fx" % (nzlens.nbytes / float(nzlens.cbytes),))
# Reduction in size compared with the dataframe
print("Storage savings compared with a dataframe: %.1fx" % (dfsize / nzsize,))
Total size for the ctable: 1833.2 MB
Compression ratio the ctable: 1.0x
Storage savings compared with a dataframe: 0.9x
In [16]:
# Time for a simple query with the uncompressed ctable
t0 = time()
result = nzlens["title == 'Tom and Huck (1995)'"]
nzsmplqtime = time() - t0
print("time (and length) for simple query with bcolz: %.2f (%d)" %
      (nzsmplqtime, len(result)))
#print repr(result)
time (and length) for simple query with bcolz: 0.28 (899)
In [17]:
# Time for a complex query with the uncompressed ctable
t0 = time()
#result = zlens["(title == 'Tom and Huck (1995)') & (rating == 5)"]['user_id']
result = [r.user_id for r in nzlens.where(
    "(title == 'Tom and Huck (1995)') & (rating == 5)", outcols=['user_id'])]
nzcmplxqtime = time() - t0
print("time (and length) for complex query with bcolz: %.2f (%d)" %
      (nzcmplxqtime, len(result)))
time (and length) for complex query with bcolz: 0.32 (74)
In [18]:
# Get rid of previous dataset to clean up memory
del nzlens

Testing a disk-based ctable

In [19]:
# Size for a disk-based ctable
fname = "ctable-10m.bcolz"
import shutil
if os.path.exists(fname):
    shutil.rmtree(fname)
dblens = zlens.copy(rootdir=fname)
# "du -sh "ctable-10m.bcolz"
dbsize = dblens.cbytes / 2**20.
print("Total size for the ctable: %.1f MB" % (dbsize,))
# Compression ratio
print("Compression ratio for the ctable: %.1fx" % (dblens.nbytes / float(dblens.cbytes),))
# Reduction in size compared with the dataframe
print("Storage savings compared with a dataframe: %.1fx" % (dfsize / dbsize,))
Total size for the ctable: 90.6 MB
Compression ratio the ctable: 20.2x
Storage savings compared with a dataframe: 17.4x
In [20]:
!du -sh "ctable-10m.bcolz"
90M	ctable-10m.bcolz
In [21]:
# Time for a simple query with the on-disk ctable
t0 = time()
result = dblens["title == 'Tom and Huck (1995)'"]
dbsmplqtime = time() - t0
print("time (and length) for simple query with on-disk bcolz: %.2f (%d)" %
      (dbsmplqtime, len(result)))
#print repr(result)
time (and length) for simple query with on-disk bcolz: 0.33 (899)
In [22]:
# Time for a complex query with the uncompressed ctable
t0 = time()
#result = dblens["(title == 'Tom and Huck (1995)') & (rating == 5)"]['user_id']
result = [r.user_id for r in dblens.where(
    "(title == 'Tom and Huck (1995)') & (rating == 5)", outcols=['user_id'])]
dbcmplxqtime = time() - t0
print("time (and length) for complex query with on-disk bcolz: %.2f (%d)" %
      (dbcmplxqtime, len(result)))
time (and length) for complex query with on-disk bcolz: 0.38 (74)

Plots

In [23]:
%matplotlib inline
# Sizes
index = ["pandas", "bcolz (memory, nocompr)", "bcolz (memory, compr)", "bcolz (disk, compr)"]
#index = ["bcolz (disk)", "bcolz (compr)", "bcolz (nocompr)", "pandas"]
df = pd.DataFrame({'size (MB)': [dfsize, nzsize, zsize, dbsize]}, index=index)
pd.options.display.mpl_style = 'default'
df.plot(kind='barh', figsize=(12,7), fontsize=16, title="Size of the datasets")
Out[23]:
<matplotlib.axes.AxesSubplot at 0x7f43b30cb9d0>
In [24]:
# Simple query times
df = pd.DataFrame({'time (sec)': [dfsmplqtime, nzsmplqtime, zsmplqtime, dbsmplqtime]}, index=index)
pd.options.display.mpl_style = 'default'
df.plot(kind='barh', figsize=(12,5), fontsize=16, title="Simple query times for MovieLens 10M")
Out[24]:
<matplotlib.axes.AxesSubplot at 0x7f43b30cb290>
In [25]:
# Complex query times
df = pd.DataFrame({'time (sec)': [dfcmplxqtime, nzcmplxqtime, zcmplxqtime, dbcmplxqtime]}, index=index)
pd.options.display.mpl_style = 'default'
df.plot(kind='barh', figsize=(12,5), fontsize=16, title="Complex query times for MovieLens 10M")
Out[25]:
<matplotlib.axes.AxesSubplot at 0x7f43b2294410>
In [ ]: