#!/usr/bin/env python # coding: utf-8 # In[1]: from itertools import chain import nltk from sklearn.metrics import classification_report, confusion_matrix from sklearn.preprocessing import LabelBinarizer import sklearn import pycrfsuite print(sklearn.__version__) # # Let's use CoNLL 2002 data to build a NER system # # CoNLL2002 corpus is available in NLTK. We use Spanish data. # In[2]: nltk.corpus.conll2002.fileids() # In[3]: get_ipython().run_cell_magic('time', '', "train_sents = list(nltk.corpus.conll2002.iob_sents('esp.train'))\ntest_sents = list(nltk.corpus.conll2002.iob_sents('esp.testb'))\n") # Data format: # In[4]: train_sents[0] # ## Features # # Next, define some features. In this example we use word identity, word suffix, word shape and word POS tag; also, some information from nearby words is used. # # This makes a simple baseline, but you certainly can add and remove some features to get (much?) better results - experiment with it. # In[5]: def word2features(sent, i): word = sent[i][0] postag = sent[i][1] features = [ 'bias', 'word.lower=' + word.lower(), 'word[-3:]=' + word[-3:], 'word[-2:]=' + word[-2:], 'word.isupper=%s' % word.isupper(), 'word.istitle=%s' % word.istitle(), 'word.isdigit=%s' % word.isdigit(), 'postag=' + postag, 'postag[:2]=' + postag[:2], ] if i > 0: word1 = sent[i-1][0] postag1 = sent[i-1][1] features.extend([ '-1:word.lower=' + word1.lower(), '-1:word.istitle=%s' % word1.istitle(), '-1:word.isupper=%s' % word1.isupper(), '-1:postag=' + postag1, '-1:postag[:2]=' + postag1[:2], ]) else: features.append('BOS') if i < len(sent)-1: word1 = sent[i+1][0] postag1 = sent[i+1][1] features.extend([ '+1:word.lower=' + word1.lower(), '+1:word.istitle=%s' % word1.istitle(), '+1:word.isupper=%s' % word1.isupper(), '+1:postag=' + postag1, '+1:postag[:2]=' + postag1[:2], ]) else: features.append('EOS') return features def sent2features(sent): return [word2features(sent, i) for i in range(len(sent))] def sent2labels(sent): return [label for token, postag, label in sent] def sent2tokens(sent): return [token for token, postag, label in sent] # This is what word2features extracts: # In[6]: sent2features(train_sents[0])[0] # Extract the features from the data: # In[7]: get_ipython().run_cell_magic('time', '', 'X_train = [sent2features(s) for s in train_sents]\ny_train = [sent2labels(s) for s in train_sents]\n\nX_test = [sent2features(s) for s in test_sents]\ny_test = [sent2labels(s) for s in test_sents]\n') # ## Train the model # # To train the model, we create pycrfsuite.Trainer, load the training data and call 'train' method. # First, create pycrfsuite.Trainer and load the training data to CRFsuite: # In[8]: get_ipython().run_cell_magic('time', '', 'trainer = pycrfsuite.Trainer(verbose=False)\n\nfor xseq, yseq in zip(X_train, y_train):\n trainer.append(xseq, yseq)\n') # Set training parameters. We will use L-BFGS training algorithm (it is default) with Elastic Net (L1 + L2) regularization. # In[9]: trainer.set_params({ 'c1': 1.0, # coefficient for L1 penalty 'c2': 1e-3, # coefficient for L2 penalty 'max_iterations': 50, # stop earlier # include transitions that are possible, but not observed 'feature.possible_transitions': True }) # Possible parameters for the default training algorithm: # In[10]: trainer.params() # Train the model: # In[11]: get_ipython().run_cell_magic('time', '', "trainer.train('conll2002-esp.crfsuite')\n") # trainer.train saves model to a file: # In[12]: get_ipython().system('ls -lh ./conll2002-esp.crfsuite') # We can also get information about the final state of the model by looking at the trainer's logparser. If we had tagged our input data using the optional group argument in add, and had used the optional holdout argument during train, there would be information about the trainer's performance on the holdout set as well. # In[13]: trainer.logparser.last_iteration # We can also get this information for every step using trainer.logparser.iterations # In[15]: print len(trainer.logparser.iterations), trainer.logparser.iterations[-1] # ## Make predictions # # To use the trained model, create pycrfsuite.Tagger, open the model and use "tag" method: # In[13]: tagger = pycrfsuite.Tagger() tagger.open('conll2002-esp.crfsuite') # Let's tag a sentence to see how it works: # In[14]: example_sent = test_sents[0] print(' '.join(sent2tokens(example_sent)), end='\n\n') print("Predicted:", ' '.join(tagger.tag(sent2features(example_sent)))) print("Correct: ", ' '.join(sent2labels(example_sent))) # ## Evaluate the model # In[15]: def bio_classification_report(y_true, y_pred): """ Classification report for a list of BIO-encoded sequences. It computes token-level metrics and discards "O" labels. Note that it requires scikit-learn 0.15+ (or a version from github master) to calculate averages properly! """ lb = LabelBinarizer() y_true_combined = lb.fit_transform(list(chain.from_iterable(y_true))) y_pred_combined = lb.transform(list(chain.from_iterable(y_pred))) tagset = set(lb.classes_) - {'O'} tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1]) class_indices = {cls: idx for idx, cls in enumerate(lb.classes_)} return classification_report( y_true_combined, y_pred_combined, labels = [class_indices[cls] for cls in tagset], target_names = tagset, ) # Predict entity labels for all sentences in our testing set ('testb' Spanish data): # In[16]: get_ipython().run_cell_magic('time', '', 'y_pred = [tagger.tag(xseq) for xseq in X_test]\n') # ..and check the result. Note this report is not comparable to results in CONLL2002 papers because here we check per-token results (not per-entity). Per-entity numbers will be worse. # In[17]: print(bio_classification_report(y_test, y_pred)) # ## Let's check what classifier learned # In[18]: from collections import Counter info = tagger.info() def print_transitions(trans_features): for (label_from, label_to), weight in trans_features: print("%-6s -> %-7s %0.6f" % (label_from, label_to, weight)) print("Top likely transitions:") print_transitions(Counter(info.transitions).most_common(15)) print("\nTop unlikely transitions:") print_transitions(Counter(info.transitions).most_common()[-15:]) # We can see that, for example, it is very likely that the beginning of an organization name (B-ORG) will be followed by a token inside organization name (I-ORG), but transitions to I-ORG from tokens with other labels are penalized. Also note I-PER -> B-LOC transition: a positive weight means that model thinks that a person name is often followed by a location. # # Check the state features: # In[19]: def print_state_features(state_features): for (attr, label), weight in state_features: print("%0.6f %-6s %s" % (weight, label, attr)) print("Top positive:") print_state_features(Counter(info.state_features).most_common(20)) print("\nTop negative:") print_state_features(Counter(info.state_features).most_common()[-20:]) # Some observations: # # * **8.743642 B-ORG word.lower=psoe-progresistas** - the model remembered names of some entities - maybe it is overfit, or maybe our features are not adequate, or maybe remembering is indeed helpful; # * **5.195429 I-LOC -1:word.lower=calle**: "calle" is a street in Spanish; model learns that if a previous word was "calle" then the token is likely a part of location; # * **-3.529449 O word.isupper=True**, ** -2.913103 O word.istitle=True **: UPPERCASED or TitleCased words are likely entities of some kind; # * **-2.585756 O postag=NP** - proper nouns (NP is a proper noun in the Spanish tagset) are often entities. # ## What to do next # # 1. Load 'testa' Spanish data. # 2. Use it to develop better features and to find best model parameters. # 3. Apply the model to 'testb' data again. # # The model in this notebook is just a starting point; you certainly can do better!