In [1]:
from google.colab import drive
drive.mount('/content/gdrive')
import os
os.chdir('/content/gdrive/My Drive/finch/tensorflow2/text_classification/clue/main')
Mounted at /content/gdrive
In [2]:
!pip install transformers
Collecting transformers
  Downloading https://files.pythonhosted.org/packages/2c/4e/4f1ede0fd7a36278844a277f8d53c21f88f37f3754abf76a5d6224f76d4a/transformers-3.4.0-py3-none-any.whl (1.3MB)
     |████████████████████████████████| 1.3MB 4.5MB/s 
Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from transformers) (2.23.0)
Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.6/dist-packages (from transformers) (4.41.1)
Requirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from transformers) (20.4)
Requirement already satisfied: filelock in /usr/local/lib/python3.6/dist-packages (from transformers) (3.0.12)
Requirement already satisfied: dataclasses; python_version < "3.7" in /usr/local/lib/python3.6/dist-packages (from transformers) (0.7)
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from transformers) (1.18.5)
Requirement already satisfied: protobuf in /usr/local/lib/python3.6/dist-packages (from transformers) (3.12.4)
Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.6/dist-packages (from transformers) (2019.12.20)
Collecting tokenizers==0.9.2
  Downloading https://files.pythonhosted.org/packages/7c/a5/78be1a55b2ac8d6a956f0a211d372726e2b1dd2666bb537fea9b03abd62c/tokenizers-0.9.2-cp36-cp36m-manylinux1_x86_64.whl (2.9MB)
     |████████████████████████████████| 2.9MB 31.9MB/s 
Collecting sacremoses
  Downloading https://files.pythonhosted.org/packages/7d/34/09d19aff26edcc8eb2a01bed8e98f13a1537005d31e95233fd48216eed10/sacremoses-0.0.43.tar.gz (883kB)
     |████████████████████████████████| 890kB 61.4MB/s 
Collecting sentencepiece!=0.1.92
  Downloading https://files.pythonhosted.org/packages/e5/2d/6d4ca4bef9a67070fa1cac508606328329152b1df10bdf31fb6e4e727894/sentencepiece-0.1.94-cp36-cp36m-manylinux2014_x86_64.whl (1.1MB)
     |████████████████████████████████| 1.1MB 51.6MB/s 
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->transformers) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->transformers) (2020.6.20)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->transformers) (1.24.3)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->transformers) (3.0.4)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from packaging->transformers) (1.15.0)
Requirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from packaging->transformers) (2.4.7)
Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf->transformers) (50.3.2)
Requirement already satisfied: click in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers) (7.1.2)
Requirement already satisfied: joblib in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers) (0.17.0)
Building wheels for collected packages: sacremoses
  Building wheel for sacremoses (setup.py) ... done
  Created wheel for sacremoses: filename=sacremoses-0.0.43-cp36-none-any.whl size=893257 sha256=9b0ba30bf03d454a74f2d3b89a2dbafecf6bb0c311139fc7e073dfba505d4f2b
  Stored in directory: /root/.cache/pip/wheels/29/3c/fd/7ce5c3f0666dab31a50123635e6fb5e19ceb42ce38d4e58f45
Successfully built sacremoses
Installing collected packages: tokenizers, sacremoses, sentencepiece, transformers
Successfully installed sacremoses-0.0.43 sentencepiece-0.1.94 tokenizers-0.9.2 transformers-3.4.0
In [3]:
from transformers import BertTokenizer, TFBertModel
from sklearn.metrics import classification_report
import os
import json
import time
import logging
import pprint
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa

print("TensorFlow Version", tf.__version__)
print('GPU Enabled:', tf.test.is_gpu_available())
TensorFlow Version 2.3.0
WARNING:tensorflow:From <ipython-input-3-07fefb93fcab>:13: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.config.list_physical_devices('GPU')` instead.
GPU Enabled: True
In [4]:
def get_vocab(f_path):
  word2idx = {}
  with open(f_path) as f:
    for i, line in enumerate(f):
      line = line.rstrip()
      word2idx[line] = i
  return word2idx

params = {
  'pretrain_path': 'bert-base-chinese',
  'train_path': '../data/train.txt',
  'test_path': '../data/test.txt',
  'batch_size': 16,
  'buffer_size': 31728,
  'init_lr': 1e-5,
  'max_lr': 4e-5,
  'label_smooth': .2,
  'n_epochs': 12,
  'num_patience': 5,
}

params['label2idx'] = get_vocab('../vocab/label.txt')

tokenizer = BertTokenizer.from_pretrained(params['pretrain_path'],
                                          lowercase = True,
                                          add_special_tokens = True)

In [5]:
# stream data from text files
def data_generator(f_path, params):
  with open(f_path) as f:
    print('Reading', f_path)
    for line in f:
      line = json.loads(line.rstrip())
      text, label = line['content'], line['label']
      text = list(text)
      text = ['[CLS]'] + text + ['[SEP]']
      text = tokenizer.convert_tokens_to_ids(text)
      seg = [0] * len(text)
      label = params['label2idx'][label]
      yield (text, seg), int(label)


def dataset(is_training, params):
  _shapes = (([None], [None]), ())
  _types = ((tf.int32, tf.int32), tf.int32)
  _pads = ((0, 0), -1)
  
  if is_training:
    ds = tf.data.Dataset.from_generator(
      lambda: data_generator(params['train_path'], params),
      output_shapes = _shapes,
      output_types = _types,)
    ds = ds.shuffle(params['buffer_size'])
    ds = ds.padded_batch(params['batch_size'], _shapes, _pads)
    ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
  else:
    ds = tf.data.Dataset.from_generator(
      lambda: data_generator(params['test_path'], params),
      output_shapes = _shapes,
      output_types = _types,)
    ds = ds.padded_batch(params['batch_size'], _shapes, _pads)
    ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
  
  return ds
In [6]:
# input stream ids check
(text, seg), _ = next(data_generator(params['train_path'], params))
print(text)
print(seg)
Reading ../data/train.txt
[101, 112, 872, 4761, 6887, 1914, 840, 1914, 7353, 6818, 3300, 784, 720, 1408, 8043, 1506, 1506, 3300, 4788, 2357, 5456, 100, 4696, 4638, 741, 677, 1091, 4638, 872, 1420, 1521, 100, 872, 2157, 6929, 1779, 4788, 2357, 3221, 686, 4518, 677, 3297, 1920, 4638, 4788, 2357, 8024, 1506, 1506, 8024, 7745, 872, 4638, 1568, 2124, 3221, 6432, 2225, 1217, 2861, 4478, 4105, 2357, 3221, 686, 4518, 677, 3297, 1920, 4638, 4105, 2357, 1568, 100, 1506, 1506, 1506, 112, 112, 4268, 4268, 8024, 1961, 4638, 1928, 1355, 5456, 8013, 2769, 812, 1920, 2812, 7370, 3488, 2094, 6963, 6206, 5436, 677, 3341, 2769, 4692, 1168, 3312, 1928, 5361, 7027, 3300, 1928, 1355, 100, 671, 2137, 3221, 166, 166, 809, 1184, 1931, 1168, 4638, 8024, 872, 6432, 3221, 679, 3221, 8043, 138, 4495, 4567, 140, 102]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
In [7]:
class BertFinetune(tf.keras.Model):
  def __init__(self, params):
    super(BertFinetune, self).__init__()
    self.bert = TFBertModel.from_pretrained(params['pretrain_path'],
                                            trainable = True)
    self.drop_1 = tf.keras.layers.Dropout(.1)
    self.fc = tf.keras.layers.Dense(300, tf.nn.swish, name='down_stream/fc')
    self.drop_2 = tf.keras.layers.Dropout(.1)
    self.out = tf.keras.layers.Dense(len(params['label2idx']), name='down_stream/out')

  def call(self, bert_inputs, training):
    bert_inputs = [tf.cast(inp, tf.int32) for inp in bert_inputs]
    x = self.bert(bert_inputs, training=training)
    x = x[1]
    x = self.drop_1(x, training=training)
    x = self.fc(x)
    x = self.drop_2(x, training=training)
    x = self.out(x)
    return x
In [8]:
model = BertFinetune(params)
model.build([[None, None], [None, None], [None, None]])


Some layers from the model checkpoint at bert-base-chinese were not used when initializing TFBertModel: ['nsp___cls', 'mlm___cls']
- This IS expected if you are initializing TFBertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model).
- This IS NOT expected if you are initializing TFBertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
All the layers of TFBertModel were initialized from the model checkpoint at bert-base-chinese.
If your task is similar to the task the model of the checkpoint was trained on, you can already use TFBertModel for predictions without further training.
In [9]:
step_size = 2 * params['buffer_size'] // params['batch_size']
decay_lr = tfa.optimizers.Triangular2CyclicalLearningRate(
  initial_learning_rate = params['init_lr'],
  maximal_learning_rate = params['max_lr'],
  step_size = step_size,)
optim = tf.optimizers.Adam(params['init_lr'])
global_step = 0

best_acc = .0
count = 0

t0 = time.time()
logger = logging.getLogger('tensorflow')
logger.setLevel(logging.INFO)

for _ in range(params['n_epochs']):
  # TRAINING
  for ((text, seg), labels) in dataset(is_training=True, params=params):
    with tf.GradientTape() as tape:
      logits = model([text, tf.sign(text), seg], training=True)
      loss = tf.compat.v1.losses.softmax_cross_entropy(
        tf.one_hot(labels, len(params['label2idx']), dtype=tf.float32),
        logits = logits,
        label_smoothing = params['label_smooth'],)
      
    optim.lr.assign(decay_lr(global_step))
    grads = tape.gradient(loss, model.trainable_variables)
    grads, _ = tf.clip_by_global_norm(grads, 5.)
    optim.apply_gradients(zip(grads, model.trainable_variables))
    
    if global_step % 100 == 0:
      logger.info("Step {} | Loss: {:.4f} | Spent: {:.1f} secs | LR: {:.6f}".format(
          global_step, loss.numpy().item(), time.time()-t0, optim.lr.numpy().item()))
      t0 = time.time()
    global_step += 1
  
  # EVALUATION
  m = tf.keras.metrics.Accuracy()
  intent_true = []
  intent_pred = []

  for ((text, seg), labels) in dataset(is_training=False, params=params):
    logits = model([text, tf.sign(text), seg], training=False)
    y_intent = tf.argmax(logits, -1)
    m.update_state(y_true=labels, y_pred=y_intent)
    intent_true += labels.numpy().flatten().tolist()
    intent_pred += y_intent.numpy().flatten().tolist()

  acc = m.result().numpy()
  logger.info("Evaluation: Testing Accuracy: {:.3f}".format(acc))

  logger.info('\n'+classification_report(y_true = intent_true,
                                         y_pred = intent_pred,
                                         labels = list(params['label2idx'].values()),
                                         target_names = list(params['label2idx'].keys()),
                                         digits=3))

  if acc > best_acc:
    best_acc = acc
    model.save_weights('../model/bert_finetune')
    count = 0
  else:
    count += 1
  logger.info("Best Accuracy: {:.3f}".format(best_acc))

  if count == params['num_patience']:
    print(params['num_patience'], "times not improve the best result, therefore stop training")
    break
Reading ../data/train.txt
INFO:tensorflow:Step 0 | Loss: 1.9473 | Spent: 7.5 secs | LR: 0.000010
INFO:tensorflow:Step 100 | Loss: 1.9586 | Spent: 37.1 secs | LR: 0.000011
INFO:tensorflow:Step 200 | Loss: 1.5733 | Spent: 37.1 secs | LR: 0.000012
INFO:tensorflow:Step 300 | Loss: 1.4134 | Spent: 37.1 secs | LR: 0.000012
INFO:tensorflow:Step 400 | Loss: 1.5317 | Spent: 36.8 secs | LR: 0.000013
INFO:tensorflow:Step 500 | Loss: 1.4112 | Spent: 37.1 secs | LR: 0.000014
INFO:tensorflow:Step 600 | Loss: 1.4376 | Spent: 37.2 secs | LR: 0.000015
INFO:tensorflow:Step 700 | Loss: 1.2993 | Spent: 37.3 secs | LR: 0.000015
INFO:tensorflow:Step 800 | Loss: 1.2889 | Spent: 37.0 secs | LR: 0.000016
INFO:tensorflow:Step 900 | Loss: 1.0945 | Spent: 36.8 secs | LR: 0.000017
INFO:tensorflow:Step 1000 | Loss: 1.5279 | Spent: 36.5 secs | LR: 0.000018
INFO:tensorflow:Step 1100 | Loss: 1.5483 | Spent: 37.1 secs | LR: 0.000018
INFO:tensorflow:Step 1200 | Loss: 1.1676 | Spent: 36.9 secs | LR: 0.000019
INFO:tensorflow:Step 1300 | Loss: 1.3101 | Spent: 36.8 secs | LR: 0.000020
INFO:tensorflow:Step 1400 | Loss: 1.7779 | Spent: 36.5 secs | LR: 0.000021
INFO:tensorflow:Step 1500 | Loss: 1.3142 | Spent: 36.9 secs | LR: 0.000021
INFO:tensorflow:Step 1600 | Loss: 1.7055 | Spent: 37.1 secs | LR: 0.000022
INFO:tensorflow:Step 1700 | Loss: 1.4840 | Spent: 37.3 secs | LR: 0.000023
INFO:tensorflow:Step 1800 | Loss: 1.3933 | Spent: 36.8 secs | LR: 0.000024
INFO:tensorflow:Step 1900 | Loss: 1.4167 | Spent: 37.3 secs | LR: 0.000024
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.603
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.617     0.788     0.692      1448
   happiness      0.612     0.742     0.671       978
        like      0.628     0.433     0.512       453
       anger      0.469     0.257     0.332       447
        fear      0.471     0.358     0.407        67
    surprise      0.594     0.184     0.281       103
     disgust      0.586     0.361     0.447       471

    accuracy                          0.603      3967
   macro avg      0.568     0.446     0.477      3967
weighted avg      0.594     0.603     0.581      3967

INFO:tensorflow:Best Accuracy: 0.603
Reading ../data/train.txt
INFO:tensorflow:Step 2000 | Loss: 1.4271 | Spent: 72.1 secs | LR: 0.000025
INFO:tensorflow:Step 2100 | Loss: 1.2842 | Spent: 36.8 secs | LR: 0.000026
INFO:tensorflow:Step 2200 | Loss: 1.1932 | Spent: 37.0 secs | LR: 0.000027
INFO:tensorflow:Step 2300 | Loss: 1.4256 | Spent: 36.8 secs | LR: 0.000027
INFO:tensorflow:Step 2400 | Loss: 1.1900 | Spent: 37.2 secs | LR: 0.000028
INFO:tensorflow:Step 2500 | Loss: 1.5834 | Spent: 36.4 secs | LR: 0.000029
INFO:tensorflow:Step 2600 | Loss: 1.4049 | Spent: 36.9 secs | LR: 0.000030
INFO:tensorflow:Step 2700 | Loss: 1.1436 | Spent: 36.6 secs | LR: 0.000030
INFO:tensorflow:Step 2800 | Loss: 1.4951 | Spent: 37.3 secs | LR: 0.000031
INFO:tensorflow:Step 2900 | Loss: 1.5373 | Spent: 37.1 secs | LR: 0.000032
INFO:tensorflow:Step 3000 | Loss: 1.4432 | Spent: 36.7 secs | LR: 0.000033
INFO:tensorflow:Step 3100 | Loss: 1.1019 | Spent: 37.2 secs | LR: 0.000033
INFO:tensorflow:Step 3200 | Loss: 1.5572 | Spent: 37.4 secs | LR: 0.000034
INFO:tensorflow:Step 3300 | Loss: 1.4271 | Spent: 36.4 secs | LR: 0.000035
INFO:tensorflow:Step 3400 | Loss: 1.7480 | Spent: 36.8 secs | LR: 0.000036
INFO:tensorflow:Step 3500 | Loss: 1.5242 | Spent: 37.5 secs | LR: 0.000036
INFO:tensorflow:Step 3600 | Loss: 1.4127 | Spent: 36.9 secs | LR: 0.000037
INFO:tensorflow:Step 3700 | Loss: 1.6115 | Spent: 37.0 secs | LR: 0.000038
INFO:tensorflow:Step 3800 | Loss: 1.6928 | Spent: 36.9 secs | LR: 0.000039
INFO:tensorflow:Step 3900 | Loss: 1.3409 | Spent: 36.9 secs | LR: 0.000040
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.599
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.658     0.730     0.692      1448
   happiness      0.543     0.810     0.650       978
        like      0.609     0.457     0.522       453
       anger      0.514     0.246     0.333       447
        fear      0.700     0.313     0.433        67
    surprise      0.677     0.204     0.313       103
     disgust      0.580     0.355     0.440       471

    accuracy                          0.599      3967
   macro avg      0.612     0.445     0.483      3967
weighted avg      0.600     0.599     0.578      3967

INFO:tensorflow:Best Accuracy: 0.603
Reading ../data/train.txt
INFO:tensorflow:Step 4000 | Loss: 1.4086 | Spent: 68.4 secs | LR: 0.000040
INFO:tensorflow:Step 4100 | Loss: 1.1599 | Spent: 36.2 secs | LR: 0.000039
INFO:tensorflow:Step 4200 | Loss: 1.4546 | Spent: 37.0 secs | LR: 0.000038
INFO:tensorflow:Step 4300 | Loss: 1.4650 | Spent: 37.2 secs | LR: 0.000037
INFO:tensorflow:Step 4400 | Loss: 1.5272 | Spent: 36.7 secs | LR: 0.000037
INFO:tensorflow:Step 4500 | Loss: 1.2615 | Spent: 36.8 secs | LR: 0.000036
INFO:tensorflow:Step 4600 | Loss: 1.0522 | Spent: 36.9 secs | LR: 0.000035
INFO:tensorflow:Step 4700 | Loss: 1.0705 | Spent: 37.2 secs | LR: 0.000034
INFO:tensorflow:Step 4800 | Loss: 1.1493 | Spent: 36.9 secs | LR: 0.000034
INFO:tensorflow:Step 4900 | Loss: 1.4740 | Spent: 36.2 secs | LR: 0.000033
INFO:tensorflow:Step 5000 | Loss: 1.1759 | Spent: 36.6 secs | LR: 0.000032
INFO:tensorflow:Step 5100 | Loss: 1.1291 | Spent: 37.1 secs | LR: 0.000031
INFO:tensorflow:Step 5200 | Loss: 1.0017 | Spent: 36.6 secs | LR: 0.000031
INFO:tensorflow:Step 5300 | Loss: 1.4552 | Spent: 37.1 secs | LR: 0.000030
INFO:tensorflow:Step 5400 | Loss: 1.2616 | Spent: 37.2 secs | LR: 0.000029
INFO:tensorflow:Step 5500 | Loss: 1.1934 | Spent: 37.3 secs | LR: 0.000028
INFO:tensorflow:Step 5600 | Loss: 1.0608 | Spent: 37.5 secs | LR: 0.000028
INFO:tensorflow:Step 5700 | Loss: 1.1430 | Spent: 37.1 secs | LR: 0.000027
INFO:tensorflow:Step 5800 | Loss: 1.2614 | Spent: 37.3 secs | LR: 0.000026
INFO:tensorflow:Step 5900 | Loss: 1.6205 | Spent: 37.5 secs | LR: 0.000025
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.614
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.678     0.711     0.694      1448
   happiness      0.676     0.718     0.696       978
        like      0.542     0.585     0.563       453
       anger      0.439     0.391     0.414       447
        fear      0.590     0.343     0.434        67
    surprise      0.632     0.233     0.340       103
     disgust      0.488     0.461     0.474       471

    accuracy                          0.614      3967
   macro avg      0.578     0.492     0.516      3967
weighted avg      0.610     0.614     0.608      3967

INFO:tensorflow:Best Accuracy: 0.614
Reading ../data/train.txt
INFO:tensorflow:Step 6000 | Loss: 0.9686 | Spent: 71.6 secs | LR: 0.000025
INFO:tensorflow:Step 6100 | Loss: 0.9867 | Spent: 36.6 secs | LR: 0.000024
INFO:tensorflow:Step 6200 | Loss: 1.3597 | Spent: 36.7 secs | LR: 0.000023
INFO:tensorflow:Step 6300 | Loss: 1.0194 | Spent: 37.5 secs | LR: 0.000022
INFO:tensorflow:Step 6400 | Loss: 1.0725 | Spent: 37.6 secs | LR: 0.000022
INFO:tensorflow:Step 6500 | Loss: 1.3804 | Spent: 36.6 secs | LR: 0.000021
INFO:tensorflow:Step 6600 | Loss: 0.9228 | Spent: 37.5 secs | LR: 0.000020
INFO:tensorflow:Step 6700 | Loss: 1.3261 | Spent: 37.6 secs | LR: 0.000019
INFO:tensorflow:Step 6800 | Loss: 1.1698 | Spent: 37.0 secs | LR: 0.000019
INFO:tensorflow:Step 6900 | Loss: 1.3063 | Spent: 36.4 secs | LR: 0.000018
INFO:tensorflow:Step 7000 | Loss: 0.9127 | Spent: 37.0 secs | LR: 0.000017
INFO:tensorflow:Step 7100 | Loss: 1.1986 | Spent: 37.1 secs | LR: 0.000016
INFO:tensorflow:Step 7200 | Loss: 0.9023 | Spent: 37.1 secs | LR: 0.000016
INFO:tensorflow:Step 7300 | Loss: 1.2342 | Spent: 36.4 secs | LR: 0.000015
INFO:tensorflow:Step 7400 | Loss: 1.2517 | Spent: 37.1 secs | LR: 0.000014
INFO:tensorflow:Step 7500 | Loss: 0.9801 | Spent: 37.2 secs | LR: 0.000013
INFO:tensorflow:Step 7600 | Loss: 1.2822 | Spent: 36.7 secs | LR: 0.000013
INFO:tensorflow:Step 7700 | Loss: 1.0287 | Spent: 36.6 secs | LR: 0.000012
INFO:tensorflow:Step 7800 | Loss: 0.9768 | Spent: 37.2 secs | LR: 0.000011
INFO:tensorflow:Step 7900 | Loss: 1.1309 | Spent: 36.8 secs | LR: 0.000010
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.617
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.663     0.744     0.701      1448
   happiness      0.667     0.737     0.700       978
        like      0.569     0.528     0.548       453
       anger      0.442     0.338     0.383       447
        fear      0.606     0.299     0.400        67
    surprise      0.337     0.330     0.333       103
     disgust      0.560     0.435     0.490       471

    accuracy                          0.617      3967
   macro avg      0.549     0.487     0.508      3967
weighted avg      0.607     0.617     0.608      3967

INFO:tensorflow:Best Accuracy: 0.617
Reading ../data/train.txt
INFO:tensorflow:Step 8000 | Loss: 0.9553 | Spent: 71.1 secs | LR: 0.000010
INFO:tensorflow:Step 8100 | Loss: 0.7956 | Spent: 36.7 secs | LR: 0.000011
INFO:tensorflow:Step 8200 | Loss: 0.9688 | Spent: 37.2 secs | LR: 0.000011
INFO:tensorflow:Step 8300 | Loss: 1.1078 | Spent: 37.7 secs | LR: 0.000011
INFO:tensorflow:Step 8400 | Loss: 1.0921 | Spent: 37.7 secs | LR: 0.000012
INFO:tensorflow:Step 8500 | Loss: 1.0747 | Spent: 37.4 secs | LR: 0.000012
INFO:tensorflow:Step 8600 | Loss: 0.9179 | Spent: 37.9 secs | LR: 0.000013
INFO:tensorflow:Step 8700 | Loss: 1.2412 | Spent: 37.8 secs | LR: 0.000013
INFO:tensorflow:Step 8800 | Loss: 1.0199 | Spent: 37.7 secs | LR: 0.000013
INFO:tensorflow:Step 8900 | Loss: 1.1427 | Spent: 37.8 secs | LR: 0.000014
INFO:tensorflow:Step 9000 | Loss: 0.9892 | Spent: 37.8 secs | LR: 0.000014
INFO:tensorflow:Step 9100 | Loss: 1.2770 | Spent: 37.8 secs | LR: 0.000014
INFO:tensorflow:Step 9200 | Loss: 0.9902 | Spent: 39.0 secs | LR: 0.000015
INFO:tensorflow:Step 9300 | Loss: 0.8045 | Spent: 38.8 secs | LR: 0.000015
INFO:tensorflow:Step 9400 | Loss: 1.0973 | Spent: 38.5 secs | LR: 0.000016
INFO:tensorflow:Step 9500 | Loss: 1.0240 | Spent: 38.9 secs | LR: 0.000016
INFO:tensorflow:Step 9600 | Loss: 0.8575 | Spent: 38.9 secs | LR: 0.000016
INFO:tensorflow:Step 9700 | Loss: 0.8479 | Spent: 38.5 secs | LR: 0.000017
INFO:tensorflow:Step 9800 | Loss: 0.9444 | Spent: 38.1 secs | LR: 0.000017
INFO:tensorflow:Step 9900 | Loss: 1.4572 | Spent: 38.6 secs | LR: 0.000017
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.599
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.637     0.723     0.677      1448
   happiness      0.664     0.729     0.695       978
        like      0.606     0.472     0.531       453
       anger      0.422     0.338     0.375       447
        fear      0.564     0.328     0.415        67
    surprise      0.327     0.350     0.338       103
     disgust      0.496     0.410     0.449       471

    accuracy                          0.599      3967
   macro avg      0.531     0.479     0.497      3967
weighted avg      0.590     0.599     0.591      3967

INFO:tensorflow:Best Accuracy: 0.617
Reading ../data/train.txt
INFO:tensorflow:Step 10000 | Loss: 0.9980 | Spent: 71.6 secs | LR: 0.000018
INFO:tensorflow:Step 10100 | Loss: 0.9896 | Spent: 38.4 secs | LR: 0.000018
INFO:tensorflow:Step 10200 | Loss: 0.9471 | Spent: 38.2 secs | LR: 0.000019
INFO:tensorflow:Step 10300 | Loss: 1.0556 | Spent: 38.1 secs | LR: 0.000019
INFO:tensorflow:Step 10400 | Loss: 0.8405 | Spent: 38.3 secs | LR: 0.000019
INFO:tensorflow:Step 10500 | Loss: 0.9489 | Spent: 38.2 secs | LR: 0.000020
INFO:tensorflow:Step 10600 | Loss: 0.9527 | Spent: 38.3 secs | LR: 0.000020
INFO:tensorflow:Step 10700 | Loss: 0.8758 | Spent: 37.9 secs | LR: 0.000020
INFO:tensorflow:Step 10800 | Loss: 0.9225 | Spent: 38.4 secs | LR: 0.000021
INFO:tensorflow:Step 10900 | Loss: 1.1007 | Spent: 38.9 secs | LR: 0.000021
INFO:tensorflow:Step 11000 | Loss: 0.8838 | Spent: 38.5 secs | LR: 0.000022
INFO:tensorflow:Step 11100 | Loss: 1.0512 | Spent: 39.0 secs | LR: 0.000022
INFO:tensorflow:Step 11200 | Loss: 0.7795 | Spent: 38.5 secs | LR: 0.000022
INFO:tensorflow:Step 11300 | Loss: 0.9527 | Spent: 38.6 secs | LR: 0.000023
INFO:tensorflow:Step 11400 | Loss: 0.8633 | Spent: 38.2 secs | LR: 0.000023
INFO:tensorflow:Step 11500 | Loss: 0.7974 | Spent: 38.5 secs | LR: 0.000023
INFO:tensorflow:Step 11600 | Loss: 0.8772 | Spent: 38.8 secs | LR: 0.000024
INFO:tensorflow:Step 11700 | Loss: 1.0459 | Spent: 37.7 secs | LR: 0.000024
INFO:tensorflow:Step 11800 | Loss: 1.2666 | Spent: 38.0 secs | LR: 0.000025
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.590
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.679     0.651     0.665      1448
   happiness      0.656     0.728     0.690       978
        like      0.514     0.532     0.523       453
       anger      0.398     0.380     0.389       447
        fear      0.414     0.358     0.384        67
    surprise      0.562     0.262     0.358       103
     disgust      0.456     0.476     0.466       471

    accuracy                          0.590      3967
   macro avg      0.526     0.484     0.496      3967
weighted avg      0.589     0.590     0.587      3967

INFO:tensorflow:Best Accuracy: 0.617
Reading ../data/train.txt
INFO:tensorflow:Step 11900 | Loss: 0.8846 | Spent: 71.1 secs | LR: 0.000025
INFO:tensorflow:Step 12000 | Loss: 0.8562 | Spent: 38.6 secs | LR: 0.000025
INFO:tensorflow:Step 12100 | Loss: 0.9976 | Spent: 38.9 secs | LR: 0.000024
INFO:tensorflow:Step 12200 | Loss: 0.9156 | Spent: 38.6 secs | LR: 0.000024
INFO:tensorflow:Step 12300 | Loss: 0.8393 | Spent: 38.1 secs | LR: 0.000023
INFO:tensorflow:Step 12400 | Loss: 0.8486 | Spent: 38.3 secs | LR: 0.000023
INFO:tensorflow:Step 12500 | Loss: 1.1017 | Spent: 38.1 secs | LR: 0.000023
INFO:tensorflow:Step 12600 | Loss: 0.9382 | Spent: 38.2 secs | LR: 0.000022
INFO:tensorflow:Step 12700 | Loss: 0.8543 | Spent: 38.3 secs | LR: 0.000022
INFO:tensorflow:Step 12800 | Loss: 0.9903 | Spent: 37.6 secs | LR: 0.000022
INFO:tensorflow:Step 12900 | Loss: 0.9732 | Spent: 37.8 secs | LR: 0.000021
INFO:tensorflow:Step 13000 | Loss: 0.8583 | Spent: 37.9 secs | LR: 0.000021
INFO:tensorflow:Step 13100 | Loss: 1.0292 | Spent: 38.5 secs | LR: 0.000020
INFO:tensorflow:Step 13200 | Loss: 0.9933 | Spent: 38.7 secs | LR: 0.000020
INFO:tensorflow:Step 13300 | Loss: 1.1365 | Spent: 38.4 secs | LR: 0.000020
INFO:tensorflow:Step 13400 | Loss: 0.8004 | Spent: 38.4 secs | LR: 0.000019
INFO:tensorflow:Step 13500 | Loss: 0.7875 | Spent: 38.4 secs | LR: 0.000019
INFO:tensorflow:Step 13600 | Loss: 0.7817 | Spent: 38.8 secs | LR: 0.000019
INFO:tensorflow:Step 13700 | Loss: 1.1031 | Spent: 38.5 secs | LR: 0.000018
INFO:tensorflow:Step 13800 | Loss: 0.9359 | Spent: 38.6 secs | LR: 0.000018
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.596
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.646     0.731     0.686      1448
   happiness      0.712     0.682     0.697       978
        like      0.528     0.554     0.541       453
       anger      0.489     0.248     0.329       447
        fear      0.391     0.403     0.397        67
    surprise      0.235     0.340     0.278       103
     disgust      0.460     0.461     0.460       471

    accuracy                          0.596      3967
   macro avg      0.494     0.488     0.484      3967
weighted avg      0.594     0.596     0.589      3967

INFO:tensorflow:Best Accuracy: 0.617
Reading ../data/train.txt
INFO:tensorflow:Step 13900 | Loss: 0.7844 | Spent: 72.3 secs | LR: 0.000017
INFO:tensorflow:Step 14000 | Loss: 0.7736 | Spent: 38.1 secs | LR: 0.000017
INFO:tensorflow:Step 14100 | Loss: 0.7941 | Spent: 38.8 secs | LR: 0.000017
INFO:tensorflow:Step 14200 | Loss: 0.9319 | Spent: 38.3 secs | LR: 0.000016
INFO:tensorflow:Step 14300 | Loss: 0.8008 | Spent: 38.5 secs | LR: 0.000016
INFO:tensorflow:Step 14400 | Loss: 0.8172 | Spent: 38.9 secs | LR: 0.000016
INFO:tensorflow:Step 14500 | Loss: 1.1247 | Spent: 38.7 secs | LR: 0.000015
INFO:tensorflow:Step 14600 | Loss: 0.9359 | Spent: 38.6 secs | LR: 0.000015
INFO:tensorflow:Step 14700 | Loss: 0.8079 | Spent: 39.2 secs | LR: 0.000014
INFO:tensorflow:Step 14800 | Loss: 0.9339 | Spent: 39.0 secs | LR: 0.000014
INFO:tensorflow:Step 14900 | Loss: 0.8109 | Spent: 39.0 secs | LR: 0.000014
INFO:tensorflow:Step 15000 | Loss: 0.9960 | Spent: 39.3 secs | LR: 0.000013
INFO:tensorflow:Step 15100 | Loss: 0.7873 | Spent: 38.7 secs | LR: 0.000013
INFO:tensorflow:Step 15200 | Loss: 0.8254 | Spent: 38.8 secs | LR: 0.000013
INFO:tensorflow:Step 15300 | Loss: 0.7774 | Spent: 39.4 secs | LR: 0.000012
INFO:tensorflow:Step 15400 | Loss: 0.9404 | Spent: 39.1 secs | LR: 0.000012
INFO:tensorflow:Step 15500 | Loss: 1.0644 | Spent: 39.1 secs | LR: 0.000011
INFO:tensorflow:Step 15600 | Loss: 0.7835 | Spent: 38.9 secs | LR: 0.000011
INFO:tensorflow:Step 15700 | Loss: 0.7750 | Spent: 38.4 secs | LR: 0.000011
INFO:tensorflow:Step 15800 | Loss: 0.9432 | Spent: 38.4 secs | LR: 0.000010
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.595
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.665     0.681     0.673      1448
   happiness      0.693     0.713     0.703       978
        like      0.565     0.534     0.549       453
       anger      0.394     0.369     0.381       447
        fear      0.426     0.343     0.380        67
    surprise      0.330     0.320     0.325       103
     disgust      0.446     0.452     0.449       471

    accuracy                          0.595      3967
   macro avg      0.503     0.488     0.494      3967
weighted avg      0.591     0.595     0.593      3967

INFO:tensorflow:Best Accuracy: 0.617
Reading ../data/train.txt
INFO:tensorflow:Step 15900 | Loss: 0.7816 | Spent: 70.5 secs | LR: 0.000010
INFO:tensorflow:Step 16000 | Loss: 0.7732 | Spent: 38.0 secs | LR: 0.000010
INFO:tensorflow:Step 16100 | Loss: 1.0210 | Spent: 37.5 secs | LR: 0.000010
INFO:tensorflow:Step 16200 | Loss: 0.7755 | Spent: 37.1 secs | LR: 0.000011
INFO:tensorflow:Step 16300 | Loss: 0.9242 | Spent: 36.9 secs | LR: 0.000011
INFO:tensorflow:Step 16400 | Loss: 0.8829 | Spent: 37.5 secs | LR: 0.000011
INFO:tensorflow:Step 16500 | Loss: 0.7734 | Spent: 37.4 secs | LR: 0.000011
INFO:tensorflow:Step 16600 | Loss: 0.8016 | Spent: 37.2 secs | LR: 0.000011
INFO:tensorflow:Step 16700 | Loss: 0.7761 | Spent: 37.3 secs | LR: 0.000012
INFO:tensorflow:Step 16800 | Loss: 0.7782 | Spent: 37.4 secs | LR: 0.000012
INFO:tensorflow:Step 16900 | Loss: 0.7823 | Spent: 38.0 secs | LR: 0.000012
INFO:tensorflow:Step 17000 | Loss: 0.8804 | Spent: 37.2 secs | LR: 0.000012
INFO:tensorflow:Step 17100 | Loss: 0.7814 | Spent: 37.7 secs | LR: 0.000012
INFO:tensorflow:Step 17200 | Loss: 0.7997 | Spent: 37.3 secs | LR: 0.000013
INFO:tensorflow:Step 17300 | Loss: 1.0547 | Spent: 36.7 secs | LR: 0.000013
INFO:tensorflow:Step 17400 | Loss: 0.8736 | Spent: 38.0 secs | LR: 0.000013
INFO:tensorflow:Step 17500 | Loss: 0.7806 | Spent: 37.0 secs | LR: 0.000013
INFO:tensorflow:Step 17600 | Loss: 0.7967 | Spent: 37.6 secs | LR: 0.000013
INFO:tensorflow:Step 17700 | Loss: 1.0661 | Spent: 37.3 secs | LR: 0.000013
INFO:tensorflow:Step 17800 | Loss: 0.7786 | Spent: 37.0 secs | LR: 0.000014
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.579
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.649     0.674     0.661      1448
   happiness      0.683     0.673     0.678       978
        like      0.617     0.470     0.534       453
       anger      0.373     0.412     0.391       447
        fear      0.481     0.373     0.420        67
    surprise      0.287     0.301     0.294       103
     disgust      0.417     0.444     0.430       471

    accuracy                          0.579      3967
   macro avg      0.501     0.478     0.487      3967
weighted avg      0.583     0.579     0.579      3967

INFO:tensorflow:Best Accuracy: 0.617
5 times not improve the best result, therefore stop training