The objective of this notebook is to help those who want to accelerate inference time on CPU for tasks models of Hugging Face (NER, QA, Classification...).
source: https://discuss.pytorch.org/t/model-eval-vs-with-torch-no-grad/19615/2
model.eval()
will notify all your layers that you are in eval mode, that way, batchnorm or dropout layers will work in eval mode instead of training mode.torch.no_grad()
impacts the autograd engine and deactivate it. It will reduce memory usage and speed up computations but you won’t be able to backprop (which you don’t want in an eval script).import platform
platform.platform()
'Linux-5.4.104+-x86_64-with-Ubuntu-18.04-bionic'
from psutil import *
cpu_count(),cpu_stats()
(2, scpustats(ctx_switches=695139, interrupts=346474, soft_interrupts=359163, syscalls=0))
!cat /proc/cpuinfo
processor : 0 vendor_id : GenuineIntel cpu family : 6 model : 79 model name : Intel(R) Xeon(R) CPU @ 2.20GHz stepping : 0 microcode : 0x1 cpu MHz : 2199.998 cache size : 56320 KB physical id : 0 siblings : 2 core id : 0 cpu cores : 1 apicid : 0 initial apicid : 0 fpu : yes fpu_exception : yes cpuid level : 13 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear arch_capabilities bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa bogomips : 4399.99 clflush size : 64 cache_alignment : 64 address sizes : 46 bits physical, 48 bits virtual power management: processor : 1 vendor_id : GenuineIntel cpu family : 6 model : 79 model name : Intel(R) Xeon(R) CPU @ 2.20GHz stepping : 0 microcode : 0x1 cpu MHz : 2199.998 cache size : 56320 KB physical id : 0 siblings : 2 core id : 0 cpu cores : 1 apicid : 1 initial apicid : 1 fpu : yes fpu_exception : yes cpuid level : 13 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear arch_capabilities bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa bogomips : 4399.99 clflush size : 64 cache_alignment : 64 address sizes : 46 bits physical, 48 bits virtual power management:
!df -h
Filesystem Size Used Avail Use% Mounted on overlay 108G 47G 62G 43% / tmpfs 64M 0 64M 0% /dev tmpfs 6.4G 0 6.4G 0% /sys/fs/cgroup shm 5.9G 0 5.9G 0% /dev/shm /dev/root 2.0G 1.2G 821M 59% /sbin/docker-init tmpfs 6.4G 28K 6.4G 1% /var/colab /dev/sda1 81G 51G 31G 63% /etc/hosts tmpfs 6.4G 0 6.4G 0% /proc/acpi tmpfs 6.4G 0 6.4G 0% /proc/scsi tmpfs 6.4G 0 6.4G 0% /sys/firmware
virtual_memory()
svmem(total=13622198272, available=12802056192, percent=6.0, used=543846400, free=10809655296, active=1039499264, inactive=1529974784, buffers=125227008, cached=2143469568, shared=1171456, slab=192012288)
%%capture
!pip install transformers
import transformers, torch, numpy as np
print("transformers:",transformers.__version__)
print("torch:",torch.__version__)
print("numpy:",np.__version__)
transformers: 4.11.3 torch: 1.9.0+cu111 numpy: 1.19.5
from time import perf_counter
def timer(f,*args):
start = perf_counter()
f(*args)
return (1000 * (perf_counter() - start))
model_checkpoint = "pierreguillou/bert-base-cased-squad-v1.1-portuguese"
# model_checkpoint = "pierreguillou/bert-large-cased-squad-v1.1-portuguese"
context = r"""
A pandemia de COVID-19, também conhecida como pandemia de coronavírus, é uma pandemia em curso de COVID-19,
uma doença respiratória aguda causada pelo coronavírus da síndrome respiratória aguda grave 2 (SARS-CoV-2).
A doença foi identificada pela primeira vez em Wuhan, na província de Hubei, República Popular da China,
em 1 de dezembro de 2019, mas o primeiro caso foi reportado em 31 de dezembro do mesmo ano.
Acredita-se que o vírus tenha uma origem zoonótica, porque os primeiros casos confirmados
tinham principalmente ligações ao Mercado Atacadista de Frutos do Mar de Huanan, que também vendia animais vivos.
Em 11 de março de 2020, a Organização Mundial da Saúde declarou o surto uma pandemia. Até 8 de fevereiro de 2021,
pelo menos 105 743 102 casos da doença foram confirmados em pelo menos 191 países e territórios,
com cerca de 2 308 943 mortes e 58 851 440 pessoas curadas.
"""
question = "Quando começou a pandemia de Covid-19 no mundo?"
Before evaluating its inference time, let's check that our QA model is working well.
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True)
model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint)
model.eval();
Downloading: 0%| | 0.00/494 [00:00<?, ?B/s]
Downloading: 0%| | 0.00/862 [00:00<?, ?B/s]
Downloading: 0%| | 0.00/205k [00:00<?, ?B/s]
Downloading: 0%| | 0.00/112 [00:00<?, ?B/s]
Downloading: 0%| | 0.00/413M [00:00<?, ?B/s]
# code source: https://huggingface.co/transformers/master/task_summary.html#extractive-question-answering
# tokenize inputs
inputs = tokenizer(question, context, add_special_tokens=True, return_tensors="pt")
# get outputs
outputs = model(**inputs)
answer_start_scores = outputs.start_logits
answer_end_scores = outputs.end_logits
# Get the most likely beginning of answer with the argmax of the score
answer_start = torch.argmax(answer_start_scores)
# Get the most likely end of answer with the argmax of the score
answer_end = torch.argmax(answer_end_scores) + 1
input_ids = inputs["input_ids"].tolist()[0]
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]))
# print answer
print(f"Question: {question}")
print(f"Answer: {answer}")
Question: Quando começou a pandemia de Covid-19 no mundo? Answer: 1 de dezembro de 2019
That's the right answer!
We can use Pipeline, too.
from transformers import pipeline
nlp = pipeline("question-answering", model=model_checkpoint)
# get result
result = nlp(question, context)
# print answer
print(f"Question: {question}")
print(f"Answer: {result['answer']} (score: {round(result['score'], 4)})")
Question: Quando começou a pandemia de Covid-19 no mundo? Answer: 1 de dezembro de 2019 (score: 0.713)
That's the right answer!
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True)
model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint)
model.eval();
num=100
total = 0
for i in range(num):
start = perf_counter()
inputs = tokenizer(question, context, add_special_tokens=True, return_tensors="pt")
diff = perf_counter() - start
total += diff
mean_tokenizer = round((total/num)*1000,2)
print(f'average time: {mean_tokenizer} ms')
average time: 0.73 ms
# put model and inputs to cpu
model = model.to('cpu')
inputs = inputs.to('cpu')
# get mean time
with torch.no_grad():
mean_time_cpu = round(np.mean([timer(model,inputs.input_ids,inputs.token_type_ids,inputs.attention_mask) for _ in range(100)]),2)
print(f'average time: {mean_time_cpu} ms')
average time: 889.07 ms
Now, we can evaluate the time to get the answer.
# get outputs
with torch.no_grad():
outputs = model(**inputs)
num = 100
total = 0
for i in range(num):
start = perf_counter()
answer_start_scores = outputs.start_logits
answer_end_scores = outputs.end_logits
# Get the most likely beginning of answer with the argmax of the score
answer_start = torch.argmax(answer_start_scores)
# Get the most likely end of answer with the argmax of the score
answer_end = torch.argmax(answer_end_scores) + 1
input_ids = inputs["input_ids"].tolist()[0]
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]))
diff = perf_counter() - start
total += diff
mean_time_cpu_answer = round((total/num)*1000,2)
print(f'average time: {mean_time_cpu_answer} ms')
average time: 1.01 ms
Then, we have the total time when the model is on the CPU:
total_cpu = round(mean_tokenizer + mean_time_cpu + mean_time_cpu_answer,2)
print(f'time: {total_cpu} ms')
time: 890.81 ms
We can use Pipeline, too.
from transformers import pipeline
We have the total time when the model is on the CPU:
# put model and inputs to cpu
nlp = pipeline("question-answering", model=model_checkpoint, use_fast=True, device=-1)
# get mean time
with torch.no_grad():
pipeline_mean_time_cpu = round(np.mean([timer(nlp,question,context) for _ in range(100)]),2)
print(f'average time: {pipeline_mean_time_cpu} ms')
average time: 872.47 ms
import pandas as pd
raw_data = {
'Latency on CPU (ms)': [mean_time_cpu, pipeline_mean_time_cpu],
}
df = pd.DataFrame(raw_data,
index=pd.Index(['Without pipeline', 'With pipeline']),
columns=pd.Index(['Latency on CPU (ms)']))
df
Latency on CPU (ms) | |
---|---|
Without pipeline | 889.07 |
With pipeline | 872.47 |
import numpy as np
import matplotlib.pyplot as plt
labels = ['CPU']
data = [mean_time_cpu, pipeline_mean_time_cpu]
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
X = np.arange(1)
ax.bar(X - 0.1, data[0], color = 'r', width = 0.2, label='Without pipeline')
ax.bar(X + 0.1, data[1], color = 'g', width = 0.2, label='With pipeline')
# axes and title
x = np.arange(len(labels)) # the label locations
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.set_ylabel('Latency (ms)')
ax.set_title('Inference latency of PyTorch model (CPU) in Google Colab')
leg = ax.legend();
Pipeline does not help improve latency on CPU.
ONNX Runtime helps accelerate PyTorch and TensorFlow models in production, on CPU or GPU. As an open source library built for performance and broad platform support, ONNX Runtime is used in products and services handling over 20 billion inferences each day.
You can use ONNX Runtime and Hugging Face Transformers together to improve the experience of training and deploying NLP models. Hugging Face has made it easy to inference Transformer models with ONNX Runtime with the transformers/convert_graph_to_onnx.py which generates a model that can be loaded by ONNX Runtime.
!python -m transformers.onnx --help
usage: Hugging Face ONNX Exporter tool [-h] -m MODEL [--feature {causal-lm,causal-lm-with-past,default,default-with-past,masked-lm,seq2seq-lm,seq2seq-lm-with-past,sequence-classification,sequence-classification-with-past,token-classification}] [--opset OPSET] [--atol ATOL] output positional arguments: output Path indicating where to store generated ONNX model. optional arguments: -h, --help show this help message and exit -m MODEL, --model MODEL Model's name of path on disk to load. --feature {causal-lm,causal-lm-with-past,default,default-with-past,masked-lm,seq2seq-lm,seq2seq-lm-with-past,sequence-classification,sequence-classification-with-past,token-classification} Export the model with some additional feature. --opset OPSET ONNX opset version to export the model with (default 12). --atol ATOL Absolute difference tolerence when validating the model.
SUPPORTED_PIPELINES = [
"feature-extraction",
"ner",
"sentiment-analysis",
"fill-mask",
"question-answering",
"text-generation",
"translation_en_to_fr",
"translation_en_to_de",
"translation_en_to_ro",
]
Get the file convert_graph_to_onnx.py
!wget https://raw.githubusercontent.com/huggingface/transformers/master/src/transformers/convert_graph_to_onnx.py
--2021-10-23 16:56:09-- https://raw.githubusercontent.com/huggingface/transformers/master/src/transformers/convert_graph_to_onnx.py Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ... Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 18640 (18K) [text/plain] Saving to: ‘convert_graph_to_onnx.py’ convert_graph_to_on 100%[===================>] 18.20K --.-KB/s in 0s 2021-10-23 16:56:10 (43.5 MB/s) - ‘convert_graph_to_onnx.py’ saved [18640/18640]
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True)
num=100
total = 0
for i in range(num):
start = perf_counter()
# WARNING!!!!!!! return_tensors="np" and not return_tensors="pt"
inputs = tokenizer(question, context, add_special_tokens=True, return_tensors="np")
diff = perf_counter() - start
total += diff
onnx_mean_tokenizer = round((total/num)*1000,2)
print(f'average time: {onnx_mean_tokenizer} ms')
average time: 0.71 ms
%%capture
# onnxruntime cpu
!pip install onnx
!pip install onnxruntime
import onnxruntime as ort
ort.get_device()
'CPU'
import onnxruntime
print("onnxruntime:",onnxruntime.__version__)
onnxruntime: 1.9.0
model_checkpoint_onnx = 'onnx_cpu/' + model_checkpoint.replace('/','-') + '.onnx'
!python convert_graph_to_onnx.py \
--pipeline question-answering \
--model {model_checkpoint} \
--tokenizer {model_checkpoint} \
--framework pt \
--opset 11 \
--check-loading \
--use-external-format \
--quantize \
{model_checkpoint_onnx}
====== Converting model to ONNX ====== ONNX opset version set to: 11 Loading pipeline (model: pierreguillou/bert-base-cased-squad-v1.1-portuguese, tokenizer: pierreguillou/bert-base-cased-squad-v1.1-portuguese) Creating folder /content/onnx_cpu Using framework PyTorch: 1.9.0+cu111 Found input input_ids with shape: {0: 'batch', 1: 'sequence'} Found input token_type_ids with shape: {0: 'batch', 1: 'sequence'} Found input attention_mask with shape: {0: 'batch', 1: 'sequence'} Found output output_0 with shape: {0: 'batch', 1: 'sequence'} Found output output_1 with shape: {0: 'batch', 1: 'sequence'} Ensuring inputs are in correct order position_ids is not present in the generated input list. Generated inputs order: ['input_ids', 'attention_mask', 'token_type_ids'] ====== Optimizing ONNX model ====== 2021-10-23 16:56:33.317869392 [W:onnxruntime:, inference_session.cc:1419 Initialize] Serializing optimized model with Graph Optimization level greater than ORT_ENABLE_EXTENDED and the NchwcTransformer enabled. The generated model may contain hardware specific optimizations, and should only be used in the same environment the model was optimized in. Optimized model has been written at /content/onnx_cpu/pierreguillou-bert-base-cased-squad-v1.onnx: ✔ /!\ Optimized model contains hardware specific operators which might not be portable. /!\ As of onnxruntime 1.4.0, models larger than 2GB will fail to quantize due to protobuf constraint. This limitation will be removed in the next release of onnxruntime. WARNING:root:onnxruntime.quantization.quantize is deprecated. Please use quantize_static for static quantization, quantize_dynamic for dynamic quantization. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator FusedMatMul. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator Gelu. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator FusedMatMul. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator Gelu. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator FusedMatMul. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator Gelu. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator FusedMatMul. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator Gelu. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator FusedMatMul. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator Gelu. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator FusedMatMul. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator Gelu. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator FusedMatMul. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator Gelu. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator FusedMatMul. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator Gelu. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator FusedMatMul. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator Gelu. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator FusedMatMul. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator Gelu. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator FusedMatMul. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator Gelu. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator FusedMatMul. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Warning: Unsupported operator Gelu. No schema registered for this operator. Warning: Unsupported operator LayerNormalization. No schema registered for this operator. Quantized model has been written at /content/onnx_cpu/pierreguillou-bert-base-cased-squad-v1-quantized.onnx: ✔ ====== Check exported ONNX model(s) ====== Checking ONNX model loading from: /content/onnx_cpu/pierreguillou-bert-base-cased-squad-v1.1-portuguese.onnx ... Model /content/onnx_cpu/pierreguillou-bert-base-cased-squad-v1.1-portuguese.onnx correctly loaded: ✔ Checking ONNX model loading from: /content/onnx_cpu/pierreguillou-bert-base-cased-squad-v1.onnx ... Model /content/onnx_cpu/pierreguillou-bert-base-cased-squad-v1.onnx correctly loaded: ✔ Checking ONNX model loading from: /content/onnx_cpu/pierreguillou-bert-base-cased-squad-v1-quantized.onnx ... Model /content/onnx_cpu/pierreguillou-bert-base-cased-squad-v1-quantized.onnx correctly loaded: ✔
import onnxruntime as ort
# copy/paste the path to the file xxx.quantized.onnx
ort_session = ort.InferenceSession("/content/" + model_checkpoint_onnx)
num = 100
total = 0
for i in range(num):
start = perf_counter()
outputs = ort_session.run(None, dict(inputs))
diff = perf_counter() - start
total += diff
onnx_mean_time_cpu = round((total/num)*1000,2)
print(f'average time: {onnx_mean_time_cpu} ms')
average time: 711.5 ms
Now, we can evaluate the time to get the answer.
num = 100
total = 0
for i in range(num):
start = perf_counter()
# code source: https://huggingface.co/transformers/master/task_summary.html#extractive-question-answering
answer_start_scores = outputs[0]
answer_end_scores = outputs[1]
# Get the most likely beginning of answer with the argmax of the score
answer_start = np.argmax(answer_start_scores)
# Get the most likely end of answer with the argmax of the score
answer_end = np.argmax(answer_end_scores) + 1
input_ids = inputs["input_ids"].tolist()[0]
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]))
diff = perf_counter() - start
total += diff
# print(f"Question: {question}")
# print(f"Answer: {answer}")
onnx_mean_time_cpu_answer = round((total/num)*1000,2)
print(f'average time: {onnx_mean_time_cpu_answer} ms')
average time: 0.07 ms
print(f"Question: {question}")
print(f"Answer: {answer}")
Question: Quando começou a pandemia de Covid-19 no mundo? Answer: 1 de dezembro de 2019
Then, we have the total time when the model is on the CPU:
onnx_total_cpu = round(onnx_mean_tokenizer + onnx_mean_time_cpu + onnx_mean_time_cpu_answer,2)
print(f'time: {onnx_total_cpu} ms')
time: 712.28 ms
import pandas as pd
raw_data = {
'Latency on CPU (ms)': [mean_time_cpu, onnx_mean_time_cpu],
}
df = pd.DataFrame(raw_data,
index=pd.Index(['PyTorch (without pipeline)', 'ONNX Runtime']),
columns=pd.Index(['Latency on CPU (ms)']))
df
Latency on CPU (ms) | |
---|---|
PyTorch (without pipeline) | 889.07 |
ONNX Runtime | 711.50 |
import numpy as np
import matplotlib.pyplot as plt
labels = ['CPU']
data = [mean_time_cpu, onnx_mean_time_cpu]
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
X = np.arange(1)
ax.bar(X - 0.1, data[0], color = 'r', width = 0.2, label='PyTorch (without pipeline)')
ax.bar(X + 0.1, data[1], color = 'g', width = 0.2, label='ONNX Runtime')
# axes and title
x = np.arange(len(labels)) # the label locations
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.set_ylabel('Latency (ms)')
ax.set_title('Inference latency (CPU) in Google Colab')
leg = ax.legend();
(source: Configuration-based approach) Transformers v4.9.0 introduces a new package: transformers.onnx
. This package allows converting checkpoints to an ONNX graph by leveraging configuration objects. These configuration objects come ready made for a number of model architectures, and are made to be easily extendable to other architectures.
Ready-made configurations include the following models:
ALBERT
BART
BERT
DistilBERT
GPT Neo
LayoutLM
Longformer
mBART
OpenAI GPT-2
RoBERTa
T5
XLM-RoBERTa
Run transformers.onnx
(or the conversion script located at transformers/convert_graph_to_onnx.py). This script takes a few arguments such as the model to be exported and the framework you want to export from (PyTorch or TensorFlow).
WARNING: which arguments to use?
It will be exported under onnx/pierreguillou-bert-base-cased-squad-v1.1-portuguese
.
%%time
model_checkpoint_onnx = 'onnx/' + model_checkpoint.replace('/','-')
!python -m transformers.onnx --model {model_checkpoint} {model_checkpoint_onnx}
Some weights of the model checkpoint at pierreguillou/bert-base-cased-squad-v1.1-portuguese were not used when initializing BertModel: ['qa_outputs.bias', 'qa_outputs.weight'] - This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of BertModel were not initialized from the model checkpoint at pierreguillou/bert-base-cased-squad-v1.1-portuguese and are newly initialized: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight'] You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. Using framework PyTorch: 1.9.0+cu111 Overriding 1 configuration item(s) - use_cache -> False Validating ONNX model... -[✓] ONNX model outputs' name match reference model ({'pooler_output', 'last_hidden_state'} - Validating ONNX Model output "last_hidden_state": -[✓] (2, 8, 768) matches (2, 8, 768) -[✓] all values close (atol: 0.0001) - Validating ONNX Model output "pooler_output": -[✓] (2, 768) matches (2, 768) -[✓] all values close (atol: 0.0001) All good, model saved at: onnx/pierreguillou-bert-base-cased-squad-v1.1-portuguese/model.onnx CPU times: user 104 ms, sys: 43.8 ms, total: 148 ms Wall time: 13.3 s
The outputs can be obtained by taking a look at the ONNX configuration of each model. For example, for BERT:
from transformers.models.bert import BertOnnxConfig, BertConfig
config = BertConfig()
onnx_config = BertOnnxConfig(config)
output_keys = list(onnx_config.outputs.keys())
output_keys
['last_hidden_state', 'pooler_output']
WARNING: these outputs do not allow to get an answer!!!!!
We can not continue.
# import onnxruntime as ort
# ort_session = ort.InferenceSession('onnx/pierreguillou-bert-base-cased-squad-v1.1-portuguese/model.onnx')
According to Pytorch’s documentation: “TorchScript is a way to create serializable and optimizable models from PyTorch code”. Pytorch’s two modules JIT and TRACE allow the developer to export their model to be re-used in other programs, such as efficiency-oriented C++ programs.
Hugging Face provided an interface that allows the export of 🤗 Transformers models to TorchScript so that they can be reused in a different environment than a Pytorch-based python program.
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True, torchscript=True)
model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint, torchscript=True)
model.eval();
model.bert.embeddings.word_embeddings.weight.data[0][0].item()
0.009311608970165253
import sys
sys.getsizeof(model.bert.embeddings.word_embeddings.weight.data[0][0].item())
24
num=100
total = 0
for i in range(num):
start = perf_counter()
inputs = tokenizer(question, context, add_special_tokens=True, return_tensors="pt")
diff = perf_counter() - start
total += diff
torchscript_mean_tokenizer = round((total/num)*1000,2)
print(f'average time: {torchscript_mean_tokenizer} ms')
average time: 0.82 ms
# put model and inputs to cpu
model = model.to('cpu')
inputs = inputs.to('cpu')
# get mean time
with torch.no_grad():
traced_model = torch.jit.trace(model, [inputs.input_ids,inputs.token_type_ids,inputs.attention_mask])
torchscript_mean_time_cpu = round(np.mean([timer(traced_model,inputs.input_ids,inputs.token_type_ids,inputs.attention_mask) for _ in range(100)]))
print(f'{torchscript_mean_time_cpu}ms')
849ms
import pandas as pd
raw_data = {
'Latency on CPU (ms)': [mean_time_cpu, onnx_mean_time_cpu, torchscript_mean_time_cpu],
}
df = pd.DataFrame(raw_data,
index=pd.Index(['PyTorch (without pipeline)', 'ONNX Runtime', 'TorchScript']),
columns=pd.Index(['Latency on CPU (ms)']))
df
Latency on CPU (ms) | |
---|---|
PyTorch (without pipeline) | 889.07 |
ONNX Runtime | 711.50 |
TorchScript | 849.00 |
import numpy as np
import matplotlib.pyplot as plt
labels = ['CPU']
data = [mean_time_cpu, onnx_mean_time_cpu, torchscript_mean_time_cpu]
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
X = np.arange(1)
ax.bar(X - 0.2, data[0], color = 'r', width = 0.2, label='PyTorch (without pipeline)')
ax.bar(X, data[1], color = 'g', width = 0.2, label='ONNX Runtime')
ax.bar(X + 0.2, data[2], color = 'b', width = 0.2, label='TorchScript')
# axes and title
x = np.arange(len(labels)) # the label locations
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.set_ylabel('Latency (ms)')
ax.set_title('Inference latency (CPU) in Google Colab')
leg = ax.legend();