We get the cirrussearch dump of wikinews (a dump meant for elastic-search indexation).
LANG="english"
%%bash
fdate=20170327
fname=enwikinews-$fdate-cirrussearch-content.json.gz
if [ ! -e $fname ]
then
wget "https://dumps.wikimedia.org/other/cirrussearch/$fdate/$fname"
fi
# iterator
import gzip
import json
FDATE = 20170327
FNAME = "enwikinews-%s-cirrussearch-content.json.gz" % FDATE
def iter_texts(fpath=FNAME):
with gzip.open(fpath, "rt") as f:
for l in f:
data = json.loads(l)
if "title" in data:
yield data["title"]
yield data["text"]
# also prepare nltk
import nltk
nltk.download("punkt")
nltk.download("stopwords")
[nltk_data] Downloading package punkt to /home/ubuntu/nltk_data... [nltk_data] Package punkt is already up-to-date! [nltk_data] Downloading package stopwords to /home/ubuntu/nltk_data... [nltk_data] Package stopwords is already up-to-date!
True
we arrange the corpus as required by gensim
# make a custom tokenizer
import re
from nltk.tokenize import sent_tokenize
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer('\w[\w-]*|\d[\d,]*')
# prepare a text
def prepare(txt):
# lower case
txt = txt.lower()
return [tokenizer.tokenize(sent)
for sent in sent_tokenize(txt, language=LANG)]
# we put all data in ram, it's not so much
corpus = []
for txt in iter_texts():
corpus.extend(prepare(txt))
# how many sentences and words ?
words_count = sum(len(s) for s in corpus)
print("Corpus has %d words in %d sentences" % (words_count, len(corpus)))
Corpus has 1003521 words in 46159 sentences
The Phrases
model gives us the possiblity of handling common terms, that is words that appears much time in a text and are there only to link objects between them.
While you could remove them, you may information, for "the president is in america" is not the same as "the president of america"
The common_terms parameter Phrases can help you deal with them in a smarter way, keeping them around but avoiding them to crush frequency statistics.
from gensim.models.phrases import Phrases
Using TensorFlow backend.
# which are the stop words we will use
from nltk.corpus import stopwords
" ".join(stopwords.words(LANG))
'i me my myself we our ours ourselves you your yours yourself yourselves he him his himself she her hers herself it its itself they them their theirs themselves what which who whom this that these those am is are was were be been being have has had having do does did doing a an the and but if or because as until while of at by for with about against between into through during before after above below to from up down in out on off over under again further then once here there when where why how all any both each few more most other some such no nor not only own same so than too very s t can will just don should now d ll m o re ve y ain aren couldn didn doesn hadn hasn haven isn ma mightn mustn needn shan shouldn wasn weren won wouldn'
# a version of corups without stop words
stop_words = frozenset(stopwords.words(LANG))
def stopwords_filter(txt):
return [w for w in txt if w not in stop_words]
st_corpus = [stopwords_filter(txt) for txt in corpus]
# bigram std
%time bigram = Phrases(st_corpus)
# bigram with common terms
%time bigram_ct = Phrases(corpus, common_terms=stopwords.words(LANG))
CPU times: user 1.33 s, sys: 16 ms, total: 1.34 s Wall time: 1.34 s CPU times: user 1.64 s, sys: 24 ms, total: 1.67 s Wall time: 1.67 s
What are (some of) the bigram founds thanks to common terms
# grams that have more than 2 terms, are those with common terms
ct_ngrams = set((g[1], g[0].decode("utf-8"))
for g in bigram_ct.export_phrases(corpus)
if len(g[0].split()) > 2)
ct_ngrams = sorted(list(ct_ngrams))
print(len(ct_ngrams), "grams with common terms found")
# highest scores
ct_ngrams[-20:]
510 grams with common terms found
[(5339.47619047619, 'borussia m gladbach'), (5460.194782608696, 'billboard in jakarta'), (5606.450000000001, 'christ of latter-day'), (5862.954248366013, 'skull and bones'), (6006.910714285714, 'preserved in amber'), (6129.452168746287, 'aisyah and doan'), (6158.114416475973, 'funded by your generous'), (6407.371428571429, 'restored as burkina'), (7081.831578947369, 'click on the donate'), (7234.129032258064, 'qatar of intervening'), (7377.621673923561, 'sinks in suva'), (8146.123931623933, 'lahm to hang'), (8163.0819009100105, 'istanbul s ataturk'), (8305.851851851852, 'derails in tabasco'), (9060.929292929293, 'poet of apostasy'), (9593.925133689841, 'creator of kinder'), (10512.09375, 'consulate in irbil'), (12176.904977375565, 'newsworthy and entertaining'), (15829.976470588235, 'santos over nepotism'), (16272.689342403628, 'hotness of bhut')]
# did we found any bigram with same words but different stopwords
import collections
by_terms = collections.defaultdict(set)
for ngram, score in bigram_ct.export_phrases(corpus):
grams = ngram.split()
by_terms[(grams[0], grams[-1])].add(ngram)
for k, v in by_terms.items():
if len(v) > 1:
print(b"-".join(k).decode("utf-8")," : ", [w.decode("utf-8") for w in v])
location-united : ['location of the united', 'location of united'] magnitude-6 : ['magnitude 6', 'magnitude of 6'] tuition-fees : ['tuition and fees', 'tuition fees'] pleaded-guilty : ['pleaded not guilty', 'pleaded guilty'] found-guilty : ['found not guilty', 'found guilty'] france-germany : ['france germany', 'france and germany'] earlier-week : ['earlier this week', 'earlier in the week'] since-2003 : ['since 2003', 'since the 2003'] contact-admissions : ['contact the admissions', 'contact admissions'] created-text : ['created from text', 'created from the text'] external-inter-wiki : ['external and inter-wiki', 'external inter-wiki']