#!/usr/bin/env python # coding: utf-8 # # spaCy Tutorial # **(C) 2019-2024 by [Damir Cavar](http://damir.cavar.me/)** # **Version:** 1.8, January 2024 # **Download:** This and various other Jupyter notebooks are available from my [GitHub repo](https://github.com/dcavar/python-tutorial-for-ipython). # This is a tutorial related to the L665 course on Machine Learning for NLP focusing on Deep Learning, Spring 2018, and [L645 Advanced Natural Language Processing](http://damir.cavar.me/l645/) in Fall 2023 at Indiana University. The following tutorial assumes that you are using a newer distribution of [Python 3.x](https://python.org/) and [spaCy](https://spacy.io/) 3.5 or newer. # ## Requirements # # The following code examples presuppose a running [Python 3.x](https://python.org/) environment with [Jupyter Lab](https://jupyter.org/) and [spaCy](https://spacy.io/) installed. # # To install [spaCy](https://spacy.io/) follow the instructions on the [Install spaCy page](https://spacy.io/usage). # # In[ ]: get_ipython().system('pip install -U pip setuptools wheel') # The following installation of spaCy is ideal for my environment, i.e., using a GPU and CUDA 12.x. See the [spaCy homepage](https://spacy.io/usage) for detailed installation instructions. # In[ ]: get_ipython().system("pip install -U 'spacy[cuda12x,transformers,lookups,ja]'") # Once [spaCy](https://spacy.io/) is installed, install the language models using the following commands. # # For the small English model: # # python -m spacy download en_core_web_sm # # For the medium English language model: # # python -m spacy download en_core_web_md # # For the large English language model: # # python -m spacy download en_core_web_lg # # For the small Spanish language model: # # python -m spacy download es_core_news_sm # In[ ]: get_ipython().system('python -m spacy download en_core_web_sm') get_ipython().system('python -m spacy download en_core_web_md') get_ipython().system('python -m spacy download en_core_web_lg') get_ipython().system('python -m spacy download es_core_news_sm') # ## Introduction to spaCy # Follow the instructions on the [spaCy homepage](https://spacy.io/usage/) about installation of the module and language models. Your local spaCy module is correctly installed, if the following command is successfull: # In[1]: import spacy # We can load the English NLP pipeline in the following way: # In[2]: nlp = spacy.load("es_core_news_sm") # ### Tokenization # In[3]: doc = nlp(u'Como estas? Estoy bien.') for token in doc: print(token.text, token.lemma_) # ### Part-of-Speech Tagging # We can tokenize and part of speech tag the individual tokens using the following code: # In[4]: doc = nlp(u'Como estas? Estoy bien.') for token in doc: print("\t".join( (token.text, str(token.idx), token.lemma_, token.pos_, token.tag_, token.dep_, token.shape_, str(token.is_alpha), str(token.is_stop) ))) # The above output contains for every token in a line the token itself, the lemma, the Part-of-Speech tag, the dependency label, the orthographic shape (upper and lower case characters as X or x respectively), the boolean for the token being an alphanumeric string, and the boolean for it being a *stopword*. # ### Dependency Parse # Using the same approach as above for PoS-tags, we can print the Dependency Parse relations: # In[5]: for token in doc: print(token.text, token.dep_, token.head.text, token.head.pos_, [child for child in token.children]) # As specified in the code, each line represents one token. The token is printed in the first column, followed by the dependency relation to it from the token in the third column, followed by its main category type. # ### Named Entity Recognition # Similarly to PoS-tags and Dependency Parse Relations, we can print out Named Entity labels: # In[6]: nlp = spacy.load("en_core_web_lg") # In[8]: text = "John Lee Hooker loves Ali Hassan Kuban when driving on the highway." doc = nlp(text) for ent in doc.ents: print(ent.text, ent.start_char, ent.end_char, ent.label_) # We can extend the input with some more entities: # In[9]: doc = nlp(u'Ali Hassan Kuban said that Apple Inc. from California will buy Google in May 2018.') # The corresponding NE-labels are: # In[10]: for ent in doc.ents: print(ent.text, ent.start_char, ent.end_char, ent.label_) # ### Pattern Matching in spaCy # You can define patterns in [spaCy](https://spacy.io/) and generate a label (here *HelloWorld*) whenever there is a matching pattern in some text using the [spaCy](https://spacy.io/) [Matcher](https://spacy.io/api/matcher) class. In the code below we print out the label, offset of matching sub-string, and the real match string in the text. # In[ ]: from spacy.matcher import Matcher matcher = Matcher(nlp.vocab) pattern = [{'LOWER': 'hello'}, {'IS_PUNCT': True}, {'LOWER': 'world'}] matcher.add('HelloWorld', [pattern]) doc = nlp(u'Hello, world! Hello... world!') matches = matcher(doc) for match_id, start, end in matches: string_id = nlp.vocab.strings[match_id] # Get string representation span = doc[start:end] # The matched span print(match_id, string_id, start, end, span.text) print("-" * 50) doc = nlp(u'Hello, world! Hello world!') matches = matcher(doc) for match_id, start, end in matches: string_id = nlp.vocab.strings[match_id] # Get string representation span = doc[start:end] # The matched span print(match_id, string_id, start, end, span.text) # ### spaCy is Missing # From the linguistic standpoint, when looking at the analytical output of the NLP pipeline in spaCy, there are some important components missing: # - Clause boundary detection # - Anaphora resolution (partially solved in the Coreference modules) # - Temporal reference resolution # - ... # There are add-on modules that provide annotations for additional linguistic levels, as for example: # # - Constituent structure trees (scope relations over constituents and phrases) # - Coreference analysis # # You can find various such addons in the [spaCy Universe](https://spacy.io/universe). # # #### Clause Boundary Detection # Complex sentences consist of clauses. For precise processing of semantic properties of natural language utterances we need to segment the sentences into clauses. The following sentence: # *The man said that the woman claimed that the child broke the toy.* # can be broken into the following clauses: # - Matrix clause: [ *the man said* ] # - Embedded clause: [ *that the woman claimed* ] # - Embedded clause: [ *that the child broke the toy* ] # These clauses do not form an ordered list or flat sequence, they in fact are hierarchically organized. The matrix clause verb selects as its complement an embedded finite clause with the complementizer *that*. The embedded predicate *claimed* selects the same kind of clausal complement. We express this hierarchical relation in form of embedding in tree representations: # [ *the man said* [ *that the woman claimed* [ *that the child broke the toy* ] ] ] # Or using a graphical representation in form of a tree: # # The hierarchical relation of sub-clauses is relevant when it comes to semantics. The clause *John sold his car* can be interpreted as an assertion that describes an event with *John* as the agent, and *the car* as the object of a *selling* event in the past. If the clause is embedded under a matrix clause that contains a sentential negation, the proposition is assumed to NOT be true: [ *Mary did not say that* [ *John sold his car* ] ] # It is possible with additional effort to translate the Dependency Trees into clauses and reconstruct the clause hierarchy into a relevant form or data structure. SpaCy does not offer a direct data output of such relations. # One problem still remains, and this is *clausal discontinuities*. None of the common NLP pipelines, and spaCy in particular, can deal with any kind of discontinuities in any reasonable way. Discontinuities can be observed when sytanctic structures are split over the clause or sentence, or elements ocur in a cannoically different position, as in the following example: # *Which car did John claim that Mary took?* # The embedded clause consists of the sequence [ *Mary took which car* ]. One part of the sequence appears dislocated and precedes the matrix clause in the above example. Simple Dependency Parsers cannot generate any reasonable output that makes it easy to identify and reconstruct the relations of clausal elements in these structures. # #### Constitutent Structure Trees # Dependency Parse trees are a simplification of relations of elements in the clause. They ignore structural and hierarchical relations in a sentence or clause, as shown in the examples above. Instead the Dependency Parse trees show simple functional relations in the sense of sentential functions like *subject* or *object* of a verb. # SpaCy does not output any kind of constituent structure and more detailed relational properties of phrases and more complex structural units in a sentence or clause. # Since many semantic properties are defined or determined in terms of structural relations and hierarchies, that is *scope relations*, this is more complicated to reconstruct or map from the Dependency Parse trees. # #### Anaphora Resolution # SpaCy does not offer any anaphora resolution annotation. That is, the referent of a pronoun, as in the following examples, is not annotated in the resulting linguistic data structure: # - *John saw **him**.* # - *John said that **he** saw the house.* # - *Tim sold **his** house. **He** moved to Paris.* # - *John saw **himself** in the mirror.* # Knowing the restrictions of pronominal binding (in English for example), we can partially generate the potential or most likely anaphora - antecedent relations. This - however - is not part of the spaCy output. # One problem, however, is that spaCy does not provide parse trees of the *constituent structure* and *clausal hierarchies*, which is crucial for the correct analysis of pronominal anaphoric relations. # #### Coreference Analysis # Some NLP pipelines are capable of providing coreference analyses for constituents in clauses. For example, the two clauses should be analyzed as talking about the same subject: # *The CEO of Apple, Tim Cook, decided to apply for a job at Google. Cook said that he is not satisfied with the quality of the iPhones anymore. He prefers the Pixel 2.* # The constituents [ *the CEO of Apple, Tim Cook* ] in the first sentence, [ *Cook* ] in the second sentence, and [ *he* ] in the third, should all be tagged as referencing the same entity, that is the one mentioned in the first sentence. SpaCy does not provide such a level of analysis or annotation. # #### Temporal Reference # For various analysis levels it is essential to identify the time references in a sentence or utterance, for example the time the utterance is made or the time the described event happened. # Certain tenses are expressed as periphrastic constructions, including auxiliaries and main verbs. SpaCy does not provide the relevant information to identify these constructions and tenses. # ## Using the Dependency Parse Visualizer # More on Dependency Parse trees # In[11]: import spacy # We can load the visualizer: # In[12]: from spacy import displacy # Loading the English NLP pipeline: # In[21]: nlp = spacy.load("en_core_web_trf") # Process an input sentence: # In[32]: #doc = nlp(u'John said yesterday that Mary bought a new car for her older son.') #doc = nlp(u"Dick ran and Jane danced yesterday.") #doc = nlp(u"Tim Cook loves apples in the evening.") #doc = nlp(u"Born in a small town, she took the midnight train going anywhere.") #doc = nlp(u"John met Peter and Susan Paul.") doc = nlp(u'The horse that was raced past the barn fell.') # If you want to generate a visualization running code outside of the Jupyter notebook, you could use the following code. You should not use this code, if you are running the notebook. Instead, use the function *display.render* two cells below. # # Visualizing the Dependency Parse tree can be achieved by running the following server code and opening up a new tab on the URL [http://localhost:5000/](http://localhost:5000/). You can shut down the server by clicking on the stop button at the top in the notebook toolbar. (To launch the server, uncomment the follwoing line and run the cell.) # In[ ]: # displacy.serve(doc, style='dep') # Instead of serving the graph, one can render it directly into a Jupyter Notebook: # In[33]: displacy.render(doc, style='dep', jupyter=True, options={"distance": 120}) # In addition to the visualization of the Dependency Trees, we can visualize named entity annotations: # In[26]: text = """Apple decided to fire Tim Cook and hire somebody called John Doe as the new CEO. They also discussed a merger with Google. On the long run it seems more likely that Apple will merge with Amazon and Microsoft with Google. The companies will all relocate to Austin in Texas before the end of the century. John Doe bought a Porsche.""" # In[27]: doc = nlp(text) displacy.render(doc, style='ent', jupyter=True) # ## Vectors # To use vectors in spaCy, you might consider installing the larger models for the particular language. The common module and language packages only come with the small models. The larger models can be installed as described on the [spaCy vectors page](https://spacy.io/usage/vectors-similarity): # # python -m spacy download en_core_web_lg # # The large model *en_core_web_lg* contains more than 1 million unique vectors. # Let us restart all necessary modules again, in particular spaCy: # In[1]: import spacy # We can now import the English NLP pipeline to process some word list. Since the small models in spacy only include context-sensitive tensors, we should use the dowloaded large model for better word vectors. We load the large model as follows: # In[2]: nlp = spacy.load('en_core_web_lg') # We can process a list of words by the pipeline using the *nlp* object: # In[3]: tokens = nlp(u'dog poodle beagle cat banana apple') # As described in the spaCy chapter *[Word Vectors and Semantic Similarity](https://spacy.io/usage/vectors-similarity)*, the resulting elements of *Doc*, *Span*, and *Token* provide a method *similarity()*, which returns the similarities between words: # In[4]: for token1 in tokens: # print(token1.vector) for token2 in tokens: print(token1, token2, token1.similarity(token2)) # We can access the *vectors* of these objects using the *vector* attribute: # In[5]: tokens = nlp(u'dog cat banana grungle') for token in tokens: print(token.text, token.has_vector, token.vector_norm, token.is_oov) # The attribute *has_vector* returns a boolean depending on whether the token has a vector in the model or not. The token *grungle* has no vector. It is also out-of-vocabulary (OOV), as the fourth column shows. Thus, it also has a norm of $0$, that is, it has a length of $0$. # Here the token vector has a length of $300$. We can print out the vector for a token: # In[6]: n = 0 print(tokens[n].text, len(tokens[n].vector), tokens[n].vector) # Here just another example of similarities for some famous words: # In[7]: tokens = nlp(u'queen king chef') for token1 in tokens: for token2 in tokens: print(token1, token2, token1.similarity(token2)) # ### Similarities in Context # In spaCy parsing, tagging and NER models make use of vector representations of contexts that represent the *meaning of words*. A text *meaning representation* is represented as an array of floats, i.e. a tensor, computed during the NLP pipeline processing. With this approach words that have not been seen before can be typed or classified. SpaCy uses a 4-layer convolutional network for the computation of these tensors. In this approach these tensors model a context of four words left and right of any given word. # Let us use the example from the spaCy documentation and check the word *labrador*: # In[8]: tokens = nlp(u'labrador') for token in tokens: print(token.text, token.has_vector, token.vector_norm, token.is_oov) # We can now test for the context: # In[9]: doc1 = nlp(u"The labrador barked.") doc2 = nlp(u"The labrador swam.") doc3 = nlp(u"The people on Labrador are Canadians.") dog = nlp(u"dog") count = 0 for doc in [doc1, doc2, doc3]: lab = doc count += 1 print(str(count) + ":", lab.similarity(dog)) # Using this strategy we can compute document or text similarities as well: # In[10]: docs = ( nlp(u"Paris is the largest city in France."), nlp(u"Vilnius is the capital of Lithuania."), nlp(u"An emu is a large bird.") ) for x in range(len(docs)): zset = set(range(len(docs))) zset.remove(x) for y in zset: print(x, y, docs[x].similarity(docs[y])) # We can vary the word order in sentences and compare them: # In[11]: docs = [nlp(u"dog bites man"), nlp(u"man bites dog"), nlp(u"man dog bites"), nlp(u"cat eats mouse")] for doc in docs: for other_doc in docs: print('"' + doc.text + '"', '"' + other_doc.text + '"', doc.similarity(other_doc)) # ### Custom Models # #### Optimization # In[20]: nlp = spacy.load('en_core_web_lg') # ## Training Models # This example code for training an NER model is based on the [training example in spaCy](https://github.com/explosion/spaCy/blob/master/examples/training/train_ner.py). # We will import some components from the *__future__* module. Read its [documentation here](https://docs.python.org/3/library/__future__.html). # In[1]: from __future__ import unicode_literals, print_function # We import the [*random*](https://docs.python.org/3/library/random.html) module for pseudo-random number generation: # In[2]: import random # We import the *Path* object from the [*pathlib*](https://docs.python.org/3/library/pathlib.html) module: # In[3]: from pathlib import Path # We import *spaCy*: # In[4]: import spacy # We also import the minibatch and compounding module from *spaCy.utils*: # In[5]: from spacy.util import minibatch, compounding from spacy.training.example import Example # The training data is formated as JSON: # In[6]: TRAIN_DATA = [ ("Who is Shaka Khan?", {"entities": [(7, 17, "PERSON")]}), ("I like London and Berlin.", {"entities": [(7, 13, "LOC"), (18, 24, "LOC")]}), ] # We created a blank 'xx' model: # In[7]: nlp = spacy.blank("xx") # create blank Language class ner = nlp.add_pipe("ner", last=True) # We add the named entity labels to the NER model: # In[8]: for _, annotations in TRAIN_DATA: for ent in annotations.get("entities"): ner.add_label(ent[2]) # Assuming that the model is empty and untrained, we reset and initialize the weights randomly using: # In[9]: nlp.begin_training() # We would not do this, if the model is supposed to be tuned or retrained on new data. # We get all pipe-names in the model that are not our NER related pipes to disable them during training: # In[10]: pipe_exceptions = ["ner", "trf_wordpiecer", "trf_tok2vec"] other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions] # We can now disable the other pipes and train just the NER uing 100 iterations: # In[11]: with nlp.disable_pipes(*other_pipes): # only train NER for itn in range(100): random.shuffle(TRAIN_DATA) losses = {} # batch up the examples using spaCy's minibatch batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001)) for batch in batches: for text, annotations in batch: print(text) print(annotations) doc = nlp.make_doc(text) example = Example.from_dict(doc, annotations) nlp.update([example], drop=0.5, # dropout - make it harder to memorise data losses=losses, ) print("Losses", losses) # We can test the trained model: # In[12]: for text, _ in TRAIN_DATA: doc = nlp(text) print("Entities", [(ent.text, ent.label_) for ent in doc.ents]) print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc]) # We can define the output directory where the model will be saved as the *models* folder in the directory where the notebook is running: # In[33]: output_dir = Path("./models/") # Save model to output dir: # In[34]: if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) # To make sure everything worked out well, we can test the saved model: # In[35]: nlp2 = spacy.load(output_dir) for text, _ in TRAIN_DATA: doc = nlp2(text) print("Entities", [(ent.text, ent.label_) for ent in doc.ents]) print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc]) # **(C) 2021-2024 by [Damir Cavar](http://damir.cavar.me/) <>**