Import necessary dependencies and settings

In [1]:
import pandas as pd
import numpy as np
import re
import nltk

Sample corpus of text documents

In [2]:
corpus = ['The sky is blue and beautiful.',
          'Love this blue and beautiful sky!',
          'The quick brown fox jumps over the lazy dog.',
          'The brown fox is quick and the blue dog is lazy!',
          'The sky is very blue and the sky is very beautiful today',
          'The dog is lazy but the brown fox is quick!'    
]
labels = ['weather', 'weather', 'animals', 'animals', 'weather', 'animals']
corpus = np.array(corpus)
corpus_df = pd.DataFrame({'Document': corpus, 
                          'Category': labels})
corpus_df = corpus_df[['Document', 'Category']]
corpus_df
Out[2]:
Document Category
0 The sky is blue and beautiful. weather
1 Love this blue and beautiful sky! weather
2 The quick brown fox jumps over the lazy dog. animals
3 The brown fox is quick and the blue dog is lazy! animals
4 The sky is very blue and the sky is very beaut... weather
5 The dog is lazy but the brown fox is quick! animals

Simple text pre-processing

In [3]:
wpt = nltk.WordPunctTokenizer()
stop_words = nltk.corpus.stopwords.words('english')

def normalize_document(doc):
    # lower case and remove special characters\whitespaces
    doc = re.sub(r'[^a-zA-Z0-9\s]', '', doc, re.I)
    doc = doc.lower()
    doc = doc.strip()
    # tokenize document
    tokens = wpt.tokenize(doc)
    # filter stopwords out of document
    filtered_tokens = [token for token in tokens if token not in stop_words]
    # re-create document from filtered tokens
    doc = ' '.join(filtered_tokens)
    return doc

normalize_corpus = np.vectorize(normalize_document)
In [4]:
norm_corpus = normalize_corpus(corpus)
norm_corpus
Out[4]:
array(['sky blue beautiful', 'love blue beautiful sky',
       'quick brown fox jumps lazy dog', 'brown fox quick blue dog lazy',
       'sky blue sky beautiful today', 'dog lazy brown fox quick'],
      dtype='<U30')

Bag of Words Model

In [5]:
from sklearn.feature_extraction.text import CountVectorizer

cv = CountVectorizer(min_df=0., max_df=1.)
cv_matrix = cv.fit_transform(norm_corpus)
cv_matrix = cv_matrix.toarray()
cv_matrix
Out[5]:
array([[1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0],
       [1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0],
       [0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0],
       [0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0],
       [1, 1, 0, 0, 0, 0, 0, 0, 0, 2, 1],
       [0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0]], dtype=int64)
In [6]:
vocab = cv.get_feature_names()
pd.DataFrame(cv_matrix, columns=vocab)
Out[6]:
beautiful blue brown dog fox jumps lazy love quick sky today
0 1 1 0 0 0 0 0 0 0 1 0
1 1 1 0 0 0 0 0 1 0 1 0
2 0 0 1 1 1 1 1 0 1 0 0
3 0 1 1 1 1 0 1 0 1 0 0
4 1 1 0 0 0 0 0 0 0 2 1
5 0 0 1 1 1 0 1 0 1 0 0

Bag of N-Grams Model

In [7]:
bv = CountVectorizer(ngram_range=(2,2))
bv_matrix = bv.fit_transform(norm_corpus)
bv_matrix = bv_matrix.toarray()
vocab = bv.get_feature_names()
pd.DataFrame(bv_matrix, columns=vocab)
Out[7]:
beautiful sky beautiful today blue beautiful blue dog blue sky brown fox dog lazy fox jumps fox quick jumps lazy lazy brown lazy dog love blue quick blue quick brown sky beautiful sky blue
0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1
1 1 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0
2 0 0 0 0 0 1 0 1 0 1 0 1 0 0 1 0 0
3 0 0 0 1 0 1 1 0 1 0 0 0 0 1 0 0 0
4 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 1 1
5 0 0 0 0 0 1 1 0 1 0 1 0 0 0 0 0 0

TF-IDF Model

In [8]:
from sklearn.feature_extraction.text import TfidfVectorizer

tv = TfidfVectorizer(min_df=0., max_df=1., use_idf=True)
tv_matrix = tv.fit_transform(norm_corpus)
tv_matrix = tv_matrix.toarray()

vocab = tv.get_feature_names()
pd.DataFrame(np.round(tv_matrix, 2), columns=vocab)
Out[8]:
beautiful blue brown dog fox jumps lazy love quick sky today
0 0.60 0.52 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.60 0.00
1 0.46 0.39 0.00 0.00 0.00 0.00 0.00 0.66 0.00 0.46 0.00
2 0.00 0.00 0.38 0.38 0.38 0.54 0.38 0.00 0.38 0.00 0.00
3 0.00 0.36 0.42 0.42 0.42 0.00 0.42 0.00 0.42 0.00 0.00
4 0.36 0.31 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.72 0.52
5 0.00 0.00 0.45 0.45 0.45 0.00 0.45 0.00 0.45 0.00 0.00

Document Similarity

In [9]:
from sklearn.metrics.pairwise import cosine_similarity

similarity_matrix = cosine_similarity(tv_matrix)
similarity_df = pd.DataFrame(similarity_matrix)
similarity_df
Out[9]:
0 1 2 3 4 5
0 1.000000 0.753128 0.000000 0.185447 0.807539 0.000000
1 0.753128 1.000000 0.000000 0.139665 0.608181 0.000000
2 0.000000 0.000000 1.000000 0.784362 0.000000 0.839987
3 0.185447 0.139665 0.784362 1.000000 0.109653 0.933779
4 0.807539 0.608181 0.000000 0.109653 1.000000 0.000000
5 0.000000 0.000000 0.839987 0.933779 0.000000 1.000000

Clustering documents using similarity features

In [10]:
from sklearn.cluster import KMeans

km = KMeans(n_clusters=2)
km.fit_transform(similarity_df)
cluster_labels = km.labels_
cluster_labels = pd.DataFrame(cluster_labels, columns=['ClusterLabel'])
pd.concat([corpus_df, cluster_labels], axis=1)
Out[10]:
Document Category ClusterLabel
0 The sky is blue and beautiful. weather 1
1 Love this blue and beautiful sky! weather 1
2 The quick brown fox jumps over the lazy dog. animals 0
3 The brown fox is quick and the blue dog is lazy! animals 0
4 The sky is very blue and the sky is very beaut... weather 1
5 The dog is lazy but the brown fox is quick! animals 0

Topic models

In [11]:
from sklearn.decomposition import LatentDirichletAllocation

lda = LatentDirichletAllocation(n_topics=2, max_iter=100, random_state=42)
dt_matrix = lda.fit_transform(tv_matrix)
features = pd.DataFrame(dt_matrix, columns=['T1', 'T2'])
features
C:\Program Files\Anaconda3\lib\site-packages\sklearn\decomposition\online_lda.py:508: DeprecationWarning: The default value for 'learning_method' will be changed from 'online' to 'batch' in the release 0.20. This warning was introduced in 0.18.
  DeprecationWarning)
Out[11]:
T1 T2
0 0.190615 0.809385
1 0.176860 0.823140
2 0.846148 0.153852
3 0.815229 0.184771
4 0.180563 0.819437
5 0.839140 0.160860

Show topics and their weights

In [12]:
tt_matrix = lda.components_
for topic_weights in tt_matrix:
    topic = [(token, weight) for token, weight in zip(vocab, topic_weights)]
    topic = sorted(topic, key=lambda x: -x[1])
    topic = [item for item in topic if item[1] > 0.6]
    print(topic)
    print()
[('fox', 1.7265536238698524), ('quick', 1.7264910761871224), ('dog', 1.7264019823624879), ('brown', 1.7263774760262807), ('lazy', 1.7263567668213813), ('jumps', 1.0326450363521607), ('blue', 0.7770158513472083)]

[('sky', 2.263185143458752), ('beautiful', 1.9057084998062579), ('blue', 1.7954559705805626), ('love', 1.1476805311187976), ('today', 1.0064979209198706)]

Clustering documents using topic model features

In [13]:
km = KMeans(n_clusters=2)
km.fit_transform(features)
cluster_labels = km.labels_
cluster_labels = pd.DataFrame(cluster_labels, columns=['ClusterLabel'])
pd.concat([corpus_df, cluster_labels], axis=1)
Out[13]:
Document Category ClusterLabel
0 The sky is blue and beautiful. weather 0
1 Love this blue and beautiful sky! weather 0
2 The quick brown fox jumps over the lazy dog. animals 1
3 The brown fox is quick and the blue dog is lazy! animals 1
4 The sky is very blue and the sky is very beaut... weather 0
5 The dog is lazy but the brown fox is quick! animals 1

Word Embeddings

In [14]:
from gensim.models import word2vec

wpt = nltk.WordPunctTokenizer()
tokenized_corpus = [wpt.tokenize(document) for document in norm_corpus]

# Set values for various parameters
feature_size = 10    # Word vector dimensionality  
window_context = 10          # Context window size                                                                                    
min_word_count = 1   # Minimum word count                        
sample = 1e-3   # Downsample setting for frequent words

w2v_model = word2vec.Word2Vec(tokenized_corpus, size=feature_size, 
                          window=window_context, min_count = min_word_count,
                          sample=sample)
C:\Program Files\Anaconda3\lib\site-packages\gensim\utils.py:860: UserWarning: detected Windows; aliasing chunkize to chunkize_serial
  warnings.warn("detected Windows; aliasing chunkize to chunkize_serial")
Using TensorFlow backend.
In [15]:
w2v_model.wv['sky']
Out[15]:
array([ 0.01000661, -0.02096199,  0.03674392, -0.03230235,  0.02149896,
       -0.01101904,  0.01973963,  0.00615971, -0.01040678, -0.00668737], dtype=float32)
In [16]:
def average_word_vectors(words, model, vocabulary, num_features):
    
    feature_vector = np.zeros((num_features,),dtype="float64")
    nwords = 0.
    
    for word in words:
        if word in vocabulary: 
            nwords = nwords + 1.
            feature_vector = np.add(feature_vector, model[word])
    
    if nwords:
        feature_vector = np.divide(feature_vector, nwords)
        
    return feature_vector
    
   
def averaged_word_vectorizer(corpus, model, num_features):
    vocabulary = set(model.wv.index2word)
    features = [average_word_vectors(tokenized_sentence, model, vocabulary, num_features)
                    for tokenized_sentence in corpus]
    return np.array(features)
In [17]:
w2v_feature_array = averaged_word_vectorizer(corpus=tokenized_corpus, model=w2v_model,
                                             num_features=feature_size)
pd.DataFrame(w2v_feature_array)
Out[17]:
0 1 2 3 4 5 6 7 8 9
0 -0.010540 -0.015367 0.005373 -0.020741 0.030717 -0.022407 -0.001724 0.004722 0.026881 0.011909
1 -0.017797 -0.013693 -0.003599 -0.015436 0.022831 -0.017905 0.010470 0.001540 0.025658 0.016208
2 -0.020869 -0.018273 -0.019681 -0.004124 -0.010980 0.001654 -0.001310 0.003395 0.003760 0.010851
3 -0.017561 -0.017866 -0.016438 -0.007601 -0.005687 -0.008843 -0.002385 0.001444 0.005643 0.012638
4 0.002371 -0.006731 0.017480 -0.014220 0.022088 -0.014882 0.003067 0.002605 0.021167 0.006461
5 -0.018306 -0.012056 -0.015671 -0.011617 -0.011667 -0.005490 0.005404 -0.003512 -0.003198 0.013306
In [18]:
from sklearn.cluster import AffinityPropagation

ap = AffinityPropagation()
ap.fit(w2v_feature_array)
cluster_labels = ap.labels_
cluster_labels = pd.DataFrame(cluster_labels, columns=['ClusterLabel'])
pd.concat([corpus_df, cluster_labels], axis=1)
Out[18]:
Document Category ClusterLabel
0 The sky is blue and beautiful. weather 0
1 Love this blue and beautiful sky! weather 0
2 The quick brown fox jumps over the lazy dog. animals 1
3 The brown fox is quick and the blue dog is lazy! animals 1
4 The sky is very blue and the sky is very beaut... weather 0
5 The dog is lazy but the brown fox is quick! animals 1