#!/usr/bin/env python # coding: utf-8 # # Byte-Pair Encoding tokenization # BPE training starts by computing the unique set of words used in the corpus (after the normalization and pre-tokenization steps are completed), then building the vocabulary by taking all the symbols used to write those words. As a very simple example, let’s say our corpus uses these five words: # # This material was adapted from the Huggingface tutorial available here: # # https://huggingface.co/learn/nlp-course/chapter6/5?fw=pt # In[21]: corpus = ["hug", "pug", "pun", "bun", "hugs"] # In[23]: vocab = set([ c for w in corpus for c in w ]) print(vocab) # After getting this base vocabulary, we add new tokens until the desired vocabulary size is reached by learning merges, which are rules to merge two elements of the existing vocabulary together into a new one. So, at the beginning these merges will create tokens with two characters, and then, as training progresses, longer subwords. # # At any step during the tokenizer training, the BPE algorithm will search for the most frequent pair of existing tokens (by “pair,” here we mean two consecutive tokens in a word). That most frequent pair is the one that will be merged, and we rinse and repeat for the next step. # # Going back to our previous example, let’s assume the words had the following frequencies: # In[24]: corpus = [("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5)] # "hug" was present 10 times in the corpus, "pug" 5 times, "pun" 12 times, "bun" 4 times, and "hugs" 5 times. We start the training by splitting each word into characters (the ones that form our initial vocabulary) so we can see each word as a list of tokens: # In[4]: corpus = [("h" "u" "g", 10), ("p" "u" "g", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "u" "g" "s", 5)] # Then we look at pairs. The pair ("h", "u") is present in the words "hug" and "hugs", so 15 times total in the corpus. It’s not the most frequent pair, though: the most frequent is ("u", "g"), which is present in "hug", "pug", and "hugs", for a grand total of 20 times in the vocabulary. # # Thus, the first merge rule learned by the tokenizer is ("u", "g") -> "ug", which means that "ug" will be added to the vocabulary, and the pair should be merged in all the words of the corpus. At the end of this stage, the vocabulary and corpus look like this: # In[26]: vocab = ["b", "g", "h", "n", "p", "s", "u", "ug"] corps = [("h" "ug", 10), ("p" "ug", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "ug" "s", 5)] # Now we have some pairs that result in a token longer than two characters: the pair ("h", "ug"), for instance (present 15 times in the corpus). The most frequent pair at this stage is ("u", "n"), however, present 16 times in the corpus, so the second merge rule learned is ("u", "n") -> "un". Adding that to the vocabulary and merging all existing occurrences leads us to: # In[28]: vocab = ["b", "g", "h", "n", "p", "s", "u", "ug", "un"] corpus = [("h" "ug", 10), ("p" "ug", 5), ("p" "un", 12), ("b" "un", 4), ("h" "ug" "s", 5)] # Now the most frequent pair is ("h", "ug"), so we learn the merge rule ("h", "ug") -> "hug", which gives us our first three-letter token. After the merge, the corpus looks like this: # In[29]: vocab = ["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"] corpus = [("hug", 10), ("p" "ug", 5), ("p" "un", 12), ("b" "un", 4), ("hug" "s", 5)] # And we continue like this until we reach the desired vocabulary size. Usually we provide the number of merges we want to obtain a particular vocabulary size. # ### Tokenization Algorithm # # Tokenization follows the training process closely, in the sense that new inputs are tokenized by applying the following steps: # # 1. Normalization # 1. Pre-tokenization # 1. Splitting the words into individual characters # 1. Applying the merge rules learned in order on those splits # # Let’s take the example we used during training, with the three merge rules learned: # ``` # ("u", "g") -> "ug" # ("u", "n") -> "un" # ("h", "ug") -> "hug" # ``` # The word "bug" will be tokenized as ["b", "ug"]. "mug", however, will be tokenized as ["[UNK]", "ug"] since the letter "m" was not in the base vocabulary. Likewise, the word "thug" will be tokenized as ["[UNK]", "hug"]: the letter "t" is not in the base vocabulary, and applying the merge rules results first in "u" and "g" being merged and then "hu" and "g" being merged. # **Question**: How do you think the word "unhug" will be tokenized? # ### Implementing BPE for sub-word tokenization # Install the Transformers, Datasets, and Evaluate libraries to run this notebook. # In[9]: get_ipython().system('pip install datasets evaluate transformers[sentencepiece]') # First we need a corpus, so let’s create a simple one with a few sentences: # In[31]: corpus = [ "This is a sample corpus.", "This corpus will be used to show how subword tokenization works.", "This section shows several tokenizer algorithms.", "Hopefully, you will be able to understand how they are trained and generate tokens.", ] # Next, we need to pre-tokenize that corpus into words. Since we are replicating a BPE tokenizer (like GPT-2), we will use the gpt2 tokenizer for the pre-tokenization: # In[32]: from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("gpt2") # Then we compute the frequencies of each word in the corpus as we do the pre-tokenization: # In[33]: from collections import defaultdict word_freqs = defaultdict(int) for text in corpus: words_with_offsets = tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(text) new_words = [word for word, offset in words_with_offsets] for word in new_words: word_freqs[word] += 1 print(word_freqs) # The next step is to compute the base vocabulary, formed by all the characters used in the corpus: # In[34]: alphabet = [] for word in word_freqs.keys(): for letter in word: if letter not in alphabet: alphabet.append(letter) alphabet.sort() print(alphabet) # We also add the special tokens used by the model at the beginning of that vocabulary. In the case of GPT-2, the only special token is `<|endoftext|>`: # In[35]: vocab = ['<|endoftext|>'] + alphabet.copy() # We now need to split each word into individual characters, to be able to start training: # In[36]: splits = {word: [c for c in word] for word in word_freqs.keys()} # Now that we are ready for training, let’s write a function that computes the frequency of each pair. We’ll need to use this at each step of the training: # In[37]: def compute_pair_freqs(splits): pair_freqs = defaultdict(int) for word, freq in word_freqs.items(): split = splits[word] if len(split) == 1: continue for i in range(len(split) - 1): pair = (split[i], split[i + 1]) pair_freqs[pair] += freq return pair_freqs # Let’s have a look at a part of this dictionary after the initial splits: # In[38]: pair_freqs = compute_pair_freqs(splits) for i, key in enumerate(pair_freqs.keys()): print(f"{key}: {pair_freqs[key]}") if i >= 5: break # Finding the most frequent pair only takes a quick loop: # In[39]: best_pair = "" max_freq = None for pair, freq in pair_freqs.items(): if max_freq is None or max_freq < freq: best_pair = pair max_freq = freq print(best_pair, max_freq) # So the first merge to learn is ('Ġ', 't') -> 'Ġt', and we add 'Ġt' to the vocabulary: # In[40]: merges = {("Ġ", "t"): "Ġt"} vocab.append("Ġt") # To continue, we need to apply that merge in our splits dictionary. Let’s write another function for this: # In[41]: def merge_pair(a, b, splits): for word in word_freqs: split = splits[word] if len(split) == 1: continue i = 0 while i < len(split) - 1: if split[i] == a and split[i + 1] == b: split = split[:i] + [a + b] + split[i + 2 :] else: i += 1 splits[word] = split return splits # And we can have a look at the result of the first merge: # In[42]: splits = merge_pair("Ġ", "t", splits) print(splits["Ġtrained"]) # Now we have everything we need to loop until we have learned all the merges we want. Let’s aim for a vocab size of 50: # In[43]: vocab_size = 50 while len(vocab) < vocab_size: pair_freqs = compute_pair_freqs(splits) best_pair = "" max_freq = None for pair, freq in pair_freqs.items(): if max_freq is None or max_freq < freq: best_pair = pair max_freq = freq splits = merge_pair(*best_pair, splits) merges[best_pair] = best_pair[0] + best_pair[1] vocab.append(best_pair[0] + best_pair[1]) # As a result, we’ve learned 19 merge rules (the initial vocabulary had a size of 31 — 30 characters in the alphabet, plus the special token): # In[44]: print(merges) # In[45]: print(vocab) # To tokenize a new text, we pre-tokenize it, split it, then apply all the merge rules learned: # In[46]: def tokenize(text): pre_tokenize_result = tokenizer._tokenizer.pre_tokenizer.pre_tokenize_str(text) pre_tokenized_text = [word for word, offset in pre_tokenize_result] splits = [[l for l in word] for word in pre_tokenized_text] for pair, merge in merges.items(): for idx, split in enumerate(splits): i = 0 while i < len(split) - 1: if split[i] == pair[0] and split[i + 1] == pair[1]: split = split[:i] + [merge] + split[i + 2 :] else: i += 1 splits[idx] = split return sum(splits, []) # We can try this on any text composed of characters in the alphabet: # In[48]: tokenize("This is not a token.") # Our implementation will throw an error if there is an unknown character since we didn’t do anything to handle them. GPT-2 doesn’t actually have an unknown token (it’s impossible to get an unknown character when using byte-level BPE), but this could happen here because we did not include all the possible bytes in the initial vocabulary. This aspect of BPE is beyond the scope of this section, so we’ve left the details out. # ### Training a transformers library tokenizer # In[49]: training_corpus = [ [i] for i in corpus ] print(training_corpus) # In[50]: bpe_tokenizer = tokenizer.train_new_from_iterator(training_corpus, 275) # do 275 merges tokens = bpe_tokenizer.tokenize("This is not a token") print(tokens) # ## OpenAI vocab # # This 50K vocabulary is created using OpenAI's variant of BPE sub-word tokenization called [tiktoken](https://github.com/openai/tiktoken) and is available here: # # https://huggingface.co/gpt2/blob/main/vocab.json # In[51]: import json gpt_vocab = None with open('gpt_vocab.json', 'r') as f: gpt_vocab = json.load(f) if gpt_vocab: print(gpt_vocab['<|endoftext|>']) try: print(gpt_vocab['Anoop']) except: print('Anoop does not exist') print(gpt_vocab['An']) print(gpt_vocab['oop']) # ## End # In[64]: from IPython.core.display import HTML def css_styling(): styles = open("../css/notebook.css", "r").read() return HTML(styles) css_styling()