#!/usr/bin/env python # coding: utf-8 # # Text Classification Using Topic Modelling # Topic modelling is a good intro to NLP. # Note that a lot of below is based on [this](https://towardsdatascience.com/nlp-extracting-the-main-topics-from-your-dataset-using-lda-in-minutes-21486f5aa925) article. # # My intention with this is # * learn topic modelling # * learn spacy # # As you will see (in earlier commits) the spacy part didn't pan out. It worked ok but the doc parsing was super slow for this volume. I'm not sure why that was, given I'd heard great things about it. I've tried to leave links on the previous commits to discussions around tuning spacy and some of the things I tried. # In[1]: import numpy as np import pandas as pd from sklearn.datasets import fetch_20newsgroups import gensim from gensim.utils import simple_preprocess from gensim.parsing.preprocessing import STOPWORDS from nltk.stem import WordNetLemmatizer, SnowballStemmer from nltk.stem.porter import * np.random.seed(42) # ## Load the dataset # # We'll use a dataset of news articles grouped into 20 news categories - but just use 7 for this example. # I've tried to pick groups that should have a decent seperation. # In[2]: categories = [ 'comp.windows.x', 'rec.autos', 'rec.sport.baseball', 'rec.sport.hockey', 'sci.space', 'soc.religion.christian', 'talk.politics.guns' ] # In[3]: newsgroups_train = fetch_20newsgroups(subset='train', categories=categories) newsgroups_test = fetch_20newsgroups(subset='test', categories=categories) # Lets looks at an example # In[4]: print(newsgroups_train.data[6]) # In[5]: target_newsgroup = newsgroups_test.target_names[newsgroups_train.target[6]] print('Group: {}'.format(target_newsgroup)) # In[6]: print(newsgroups_train.filenames.shape, newsgroups_train.target.shape) # This should be enough rows normally. Though it is split over 7 categories which may not be enough. # Lets see how heavy each category is. # In[7]: import collections collections.Counter(newsgroups_train.target) # I could map the keys to the category names but you can see by eye that it is a really balanced dataset. # ## Data Preprocessing # We transform the data to basically optimise it so the ML algorithm recieves the strongest signal. # # * Tokenization: Split the text into sentences and the sentences into words. Lowercase the words and remove punctuation. # * Words that have fewer than 3 characters are removed. # * Remove stopwords: such as the, is, at, which, and on. # * Lemmatize: Words in third person are changed to first person and verbs in past and future tenses are changed into present. # * Stemming: Words are reduced to their root form. # # Lemmatizing is a mapping of the word to its base form i.e. went -> go. # Stemming is more of a function on the word such as removing the 'ing' from the end of words. # We do the lemmatizing and then stemming in the lemma may be a totally different spelt word (going -> go is similar but went -> go has a totally different spelling). # The stemming can often result in a 'invalid' word such as argue -> argu which the lemmatizing wouldn't accept. # In[8]: stemmer = SnowballStemmer('english') # Porter2 stemmer def lemmatize_stemming(text): lemmatized = WordNetLemmatizer().lemmatize(text, pos='v') return stemmer.stem(lemmatized) def preprocess(text): """ Tokenise and lemmatize text """ result=[] for token in gensim.utils.simple_preprocess(text): if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3: result.append(lemmatize_stemming(token)) return result # Check the output of the preprocessing # In[9]: doc_sample = 'This disk has failed many times. I would like to get it replaced.' proc = preprocess(doc_sample) print(proc) # Preprocess all the messages we have (in parallel) # In[10]: import multiprocessing pool = multiprocessing.Pool() processed_docs = list(pool.map(preprocess, newsgroups_train.data)) # In[11]: print(processed_docs[:2]) # ## Create Bag of words # # A dictionary is the number of times a word appears in the training set. # A mapping between words and their integer ids. # In[12]: dictionary = gensim.corpora.Dictionary(processed_docs) # In[13]: for i in range(5): print(i, dictionary[i]) # Filter out tokens that appear in # * less than 15 documents or # * more than 10% of documents # * after (1) and (2), keep only the first 100k most frequent tokens # In[14]: dictionary.filter_extremes(no_below=15, no_above=0.1, keep_n=100000) # Convert document (a list of words) into the bag-of-words format. # A list of (token_id, token_count) tuples # In[15]: bow_corpus = [dictionary.doc2bow(doc) for doc in processed_docs] # In[16]: # i.e. the 3rd word from the 10th message bow_doc_x = bow_corpus[10] bow_word_x = 3 print('{} - {}'.format( bow_doc_x[5], dictionary[bow_doc_x[bow_word_x][0]] )) # ## Build the LDA Model # (Latent Dirichlet Allocation) # If observations are words collected into documents, it posits that each document is a mixture of a small number of topics and that each word's presence is attributable to one of the document's topics # * **alpha** and **eta** are hyperparameters that affect sparsity of the document-topic (theta) and topic-word (lambda) distributions. We will let these be the default values for now(default value is `1/num_topics`) # - Alpha is the per document topic distribution. # * High alpha: Every document has a mixture of all topics(documents appear similar to each other). # * Low alpha: Every document has a mixture of very few topics # # - Eta is the per topic word distribution. # * High eta: Each topic has a mixture of most words(topics appear similar to each other). # * Low eta: Each topic has a mixture of few words. # # In[17]: lda_model = gensim.models.LdaMulticore( bow_corpus, num_topics=7, id2word=dictionary, passes=10, workers=4) # ## Evaluate the model # In[18]: lda_model.show_topics() # The show_topics shows what topics lda has found. You can see that the first is clearly about religion. The numbers show how much weight each word adds to a document being part of a topic. (TODO can you get negative numbers that are detrimental to classifying a topic?) # # This is an unsupervised algorithm so it never sees that target field we have. # Baseball isn't seperately represented. It looks the the language used is very similar to baseball. # However it has found 2 catagories for religion. I'm guessing Christians conversations have a decent split talking about solid entities like jesus and church, and then there are other conversations around spirituality and life in general. # In[19]: categories_map = { 3: 'comp.windows.x', 6: 'rec.autos', -1: 'rec.sport.baseball', 1: 'rec.sport.hockey', 5: 'sci.space', 2: 'soc.religion.christian', 0: 'soc.religion.christian', 4: 'talk.politics.guns' } # Testing model on unseen document # In[20]: num = 2 unseen_document = newsgroups_test.data[num] print(unseen_document) print(newsgroups_test.target_names[newsgroups_test.target[num]]) # The document is from the soc.religion.christian group. Pushing it through the model gives the estimations of which catagories LDA thinks the document belongs to. # In[21]: # Data preprocessing step for the unseen document bow_vector = dictionary.doc2bow(preprocess(unseen_document)) pred = sorted(lda_model[bow_vector], key=lambda tup: -1*tup[1]) print(pred) # In[22]: print('predicts {} with a probability of {:.2f}%'.format(categories_map[pred[0][0]], pred[0][1]*100)) # Correctly classifies as religion. It also shows a 20% chance of being about windows and 14% about hockey. # This is a multiclass classification so rather than it being x% chance of the doc falling into a category, it is more like the document is covered by a number of categories with religion being the most covered. # ### Check Accuracy # We have a test dataset we can use to check the accuracy. We preprocess and pass the documents through the model and see how many match up to the true topic. # In[23]: import multiprocessing pool = multiprocessing.Pool() test_processed_docs = list(pool.map(preprocess, newsgroups_test.data)) # In[24]: test_bow_corpus = [dictionary.doc2bow(doc) for doc in test_processed_docs] # Get the predicted values from the model # In[25]: y_pred = [] for i, doc in enumerate(test_bow_corpus): pred_all = sorted(lda_model[doc], key=lambda tup: -1*tup[1]) pred_cat = categories_map[pred_all[0][0]] y_pred.append(newsgroups_test.target_names.index(pred_cat)) # Get the ground truth values from the target field in the data # In[26]: y_true = newsgroups_test.target # In[27]: newsgroups_test.target_names # Accuracy is the proportion of correct predictions of the model # # Accuracy = Number of correct predictions / Total number of predictions # In[28]: from sklearn.metrics import accuracy_score accuracy_score(y_true, y_pred) # Accuracy is good for binary classification problems but less accurate in this case. It doesn't handle the belief in other topics that are covered in a document. # TODO: find a proper classification measure # Create a confusion matrix to show. This is a grid of the predicted values on the horizontal and the true values on the vertical # In[29]: # creating a confusion matrix from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_true, y_pred) # Y_pred ->, y_true \/ # In[30]: cm # You can see that the diagonal has the high numbers, where the prediction was correct. # There are 174 messages where the model predicted hockey but the ground truth was cars. This could easily be where there is crossover with cars and sports. # People are discussing cars in the hockey newsgroup. # ### pyLDAvis ### # Interactive topic model visualization # # This is a really good library for visuallising the topics and how the interact with each other. It sits ontop of the model and really pulls a ton of extra information out. # Note that running it freezes the python kernel as it launches a web server. (I've attached a screenshot) # In[31]: # import pyLDAvis.gensim # pyLDAvis.enable_notebook() # prepared = pyLDAvis.gensim.prepare(lda_model, bow_corpus, dictionary) # pyLDAvis.show(prepared) # ![title](pyLDAvis_screenshot.png) # * You can see 1 top left is the sports topics, large because it contains both hockey and baseball in a single topic # * 2, 3 and 6 are cars space and guns with a margin of crossover # * 7 and 5 are the religion topics with large crossover. I would have expected more, though I guess at some point a large overlap becomes seen as a large topic. # * 4 windows is out on its own # ### Conclusion # This definately needs some tuning which I'll pick up in the future. Places for tuning are # * The Preprocessing # * Do we need both stemming and lemmatization # * Try a different stemmer. The SnowballStemmer is meant to strike a balance between being too aggressive or passive. # * Tune the LDA hyperparameters # * Alpha and eta. Also maybe the number of passes may produce different results # * Try a different model # * I see Non-negative matrix factorization being mentioned a lot # * RNN's are good at handling the abstract nature of language and they retain the sentence context. (Though last time I used an RNN I got better results with a non DL model) # # Ultimately this is a multi class classification problem and I would expect to get better results using supervised learning algorithms. # I was discussing this with someone and they mentioned using bigrams to handle the context that gets lost when using a bag of words. I would like to look into this, I thought about it later on and I'm hoping that using bigrams will give a sort of chain that the model can pick up on. # i.e. Now we have A, B, C. # With Bigrams we would have A, B, C and AB and BC, which give a sort of gives a relationship from A to C. # I'm not sure if the model will pick that relationship up but bigrams will definately add a stronger signal for the model.