from nltk.corpus import stopwords import pprint as pp stopset = set(stopwords.words('english')) print type(stopset) stopset.update(["ruby tuesday"]) # add token stopset.remove("own") # remove token # single lang print "--English stopset" print stopset # multi lang print print "--Multi language stopset" langs=['danish', 'dutch', 'english', 'french', 'german', 'italian','norwegian', 'portuguese', 'russian', 'spanish', 'swedish'] stop_list = [] for lang in langs: stop_list.extend(stopwords.words(lang)) stop_words_set=set(stop_list) # -- could save to disk -- print stop_words_set with open('text_corpus.txt', 'r') as f: documents=[] for line in f.readlines(): documents.append(line.strip()) pp.pprint(documents) from gensim import corpora, models, similarities import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO, filename="./log/topic-log") logr = logging.getLogger("topic_model") logr.info("#"*15 + " started " + "#"*15) print "Dictionary (full text corpus):" dictionary = corpora.Dictionary(line.lower().split() for line in open('text_corpus.txt')) print dictionary print (dictionary.token2id) print print "Dictionary (removed stopwords and once-ids):" stop_ids = [dictionary.token2id[stopword] for stopword in stop_words_set if stopword in dictionary.token2id] once_ids = [tokenid for tokenid, corpus_freq in dictionary.dfs.iteritems() if corpus_freq == 1] #remove stop_ids,"+",once_ids dictionary.filter_tokens(bad_ids=stop_ids + once_ids,good_ids=None) ## consider: dictionary.filter_extremes(no_below=2) dictionary.compactify() print dictionary print (dictionary.token2id) import copy print "Add documents to dictionary dynamically:" print "doc to add = \"Pooh bear says, 'People say nothing is impossible, but I do nothing every day.'\"" print print "doc tokenized =",[item for item in "Pooh bear says 'People say nothing is impossible, but I do nothing every day.'".lower().split() if item not in stop_ids] print docs=[[item for item in "Pooh bear says, 'People say nothing is impossible, but I do nothing every day.'".lower().split() if item not in stop_ids]] d=copy.deepcopy(dictionary) d.add_documents(docs) d.compactify() print "#NOTE: since we were only splitting on space, the punctuation is included." print print d print d.token2id vector_corpus=[] with open('text_corpus.txt', 'r') as f: for line in f.readlines(): vector_corpus.append(dictionary.doc2bow(line.lower().split())) print "Vector corpus:" pp.pprint(vector_corpus) counter=0 print dictionary # save to disk corpora.MmCorpus.serialize('vector_corpus.mm', vector_corpus) serialized_corpus = corpora.MmCorpus('vector_corpus.mm') pp.pprint(list(serialized_corpus)) from gensim import corpora, models, similarities tfidf = models.TfidfModel(vector_corpus, normalize=False) # trains the model print tfidf corpus_tfidf=tfidf[vector_corpus] print (dictionary.token2id) for doc in corpus_tfidf: print doc # tfidf = ( term frequency) * (inverse document frequency) # tfidf = (# of instances of word in single doc / # of words in single doc) * ln(# of total documents / # of docs in which word appears) = tfidf # the tfidf matrix can be used to convert any vector ( uniq id, count per doc ) to ( uniq id, tfidf score ) from itertools import * number_of_clusters=3 lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=number_of_clusters) # initialize an LSI transformation lda = models.ldamodel.LdaModel(corpus_tfidf, id2word=dictionary, num_topics=number_of_clusters,\ update_every=1, chunksize=10000, passes=1) corpus_lsi = lsi[corpus_tfidf] # create a double wrapper over the original corpus: bow->tfidf->fold-in-lsi corpus_lda = lda[corpus_tfidf] # for item in corpus_lsi: # print (item) print "-"*10+"LDA"+"-"*10 t=0 for t, item in enumerate(lda.print_topics(number_of_clusters)): print "topic#{0}: {1}".format(t,item) print for item in corpus_lda: print item #print lsi.show_topics() #print lsi.print_topic(0,topn=1) # save to disk #print lsi.projection.s #lsi.save('corpus_lsi.lsi') #lsi=models.LsiModel.load print print #models.lsimodel.clip_spectrum(0.1,4,discard=0.001) # Find the threshold, let's set the threshold to be 1/#clusters, # To prove that the threshold is sane, we average the sum of all probabilities: scores = list(chain(*[[score for topic,score in topic] \ for topic in [doc for doc in corpus_lda]])) threshold = sum(scores)/len(scores) print "threshold:",threshold print cluster1 = [j for i,j in zip(corpus_lda,documents) if i[0][1] > threshold] cluster2 = [j for i,j in zip(corpus_lda,documents) if i[1][1] > threshold] cluster3 = [j for i,j in zip(corpus_lda,documents) if i[2][1] > threshold] print "topic#0: {0}".format(cluster1) print "topic#1: {0}".format(cluster2) print "topic#2: {0}".format(cluster3) print print print "-"*10+"LSI"+"-"*10 t=0 for t, item in enumerate(lsi.print_topics(number_of_clusters)): print "topic#{0}: {1}".format(t,item) print for item in corpus_lsi: print item #print lsi.show_topics() #print lsi.print_topic(0,topn=1) # save to disk #print lsi.projection.s #lsi.save('corpus_lsi.lsi') #lsi=models.LsiModel.load print print #models.lsimodel.clip_spectrum(0.1,4,discard=0.001) # Find the threshold, let's set the threshold to be 1/#clusters, # To prove that the threshold is sane, we average the sum of all probabilities: scores = list(chain(*[[score for topic,score in topic] \ for topic in [doc for doc in corpus_lsi]])) threshold = sum(scores)/len(scores) print "threshold:",threshold print cluster1 = [j for i,j in zip(corpus_lsi,documents) if i[0][1] > threshold] cluster2 = [j for i,j in zip(corpus_lsi,documents) if i[1][1] > threshold] cluster3 = [j for i,j in zip(corpus_lsi,documents) if i[2][1] > threshold] print "topic#1: {0}".format(cluster1) print "topic#2: {0}".format(cluster2) print "topic#3: {0}".format(cluster3) #play space