In [1]:
from google.colab import drive
drive.mount('/content/gdrive')
import os
os.chdir('/content/gdrive/My Drive/finch/tensorflow2/text_classification/clue/main')
Drive already mounted at /content/gdrive; to attempt to forcibly remount, call drive.mount("/content/gdrive", force_remount=True).
In [2]:
!pip install transformers
Requirement already satisfied: transformers in /usr/local/lib/python3.6/dist-packages (3.4.0)
Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.6/dist-packages (from transformers) (4.41.1)
Requirement already satisfied: protobuf in /usr/local/lib/python3.6/dist-packages (from transformers) (3.12.4)
Requirement already satisfied: filelock in /usr/local/lib/python3.6/dist-packages (from transformers) (3.0.12)
Requirement already satisfied: tokenizers==0.9.2 in /usr/local/lib/python3.6/dist-packages (from transformers) (0.9.2)
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from transformers) (1.18.5)
Requirement already satisfied: dataclasses; python_version < "3.7" in /usr/local/lib/python3.6/dist-packages (from transformers) (0.7)
Requirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from transformers) (20.4)
Requirement already satisfied: sacremoses in /usr/local/lib/python3.6/dist-packages (from transformers) (0.0.43)
Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.6/dist-packages (from transformers) (2019.12.20)
Requirement already satisfied: sentencepiece!=0.1.92 in /usr/local/lib/python3.6/dist-packages (from transformers) (0.1.94)
Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from transformers) (2.23.0)
Requirement already satisfied: six>=1.9 in /usr/local/lib/python3.6/dist-packages (from protobuf->transformers) (1.15.0)
Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf->transformers) (50.3.2)
Requirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from packaging->transformers) (2.4.7)
Requirement already satisfied: joblib in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers) (0.17.0)
Requirement already satisfied: click in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers) (7.1.2)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->transformers) (2.10)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->transformers) (1.24.3)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->transformers) (3.0.4)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->transformers) (2020.6.20)
In [3]:
from transformers import BertTokenizer, TFBertModel
from sklearn.metrics import classification_report
import os
import json
import time
import logging
import pprint
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa

print("TensorFlow Version", tf.__version__)
print('GPU Enabled:', tf.test.is_gpu_available())
TensorFlow Version 2.3.0
WARNING:tensorflow:From <ipython-input-3-07fefb93fcab>:13: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.config.list_physical_devices('GPU')` instead.
GPU Enabled: True
In [4]:
def get_vocab(f_path):
  word2idx = {}
  with open(f_path) as f:
    for i, line in enumerate(f):
      line = line.rstrip()
      word2idx[line] = i
  return word2idx

params = {
  'pretrain_path': 'bert-base-chinese',
  'train_path': '../data/train.txt',
  'test_path': '../data/test.txt',
  'batch_size': 16,
  'buffer_size': 31728,
  'init_lr': 1e-5,
  'max_lr': 3e-5,
  'label_smooth': .2,
  'n_epochs': 16,
  'num_patience': 7,
}

params['label2idx'] = get_vocab('../vocab/label.txt')

tokenizer = BertTokenizer.from_pretrained(params['pretrain_path'],
                                          lowercase = True,
                                          add_special_tokens = True)
In [5]:
# stream data from text files
def data_generator(f_path, params):
  with open(f_path) as f:
    print('Reading', f_path)
    for line in f:
      line = json.loads(line.rstrip())
      text, label = line['content'], line['label']
      text = list(text)
      text = ['[CLS]'] + text + ['[SEP]']
      text = tokenizer.convert_tokens_to_ids(text)
      text = [idx for idx in text if idx != 100]
      seg = [0] * len(text)
      label = params['label2idx'][label]
      yield (text, seg), int(label)


def dataset(is_training, params):
  _shapes = (([None], [None]), ())
  _types = ((tf.int32, tf.int32), tf.int32)
  _pads = ((0, 0), -1)
  
  if is_training:
    ds = tf.data.Dataset.from_generator(
      lambda: data_generator(params['train_path'], params),
      output_shapes = _shapes,
      output_types = _types,)
    ds = ds.shuffle(params['buffer_size'])
    ds = ds.padded_batch(params['batch_size'], _shapes, _pads)
    ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
  else:
    ds = tf.data.Dataset.from_generator(
      lambda: data_generator(params['test_path'], params),
      output_shapes = _shapes,
      output_types = _types,)
    ds = ds.padded_batch(params['batch_size'], _shapes, _pads)
    ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
  
  return ds
In [6]:
# input stream ids check
(text, seg), _ = next(data_generator(params['train_path'], params))
print(text)
print(seg)
Reading ../data/train.txt
[101, 112, 872, 4761, 6887, 1914, 840, 1914, 7353, 6818, 3300, 784, 720, 1408, 8043, 1506, 1506, 3300, 4788, 2357, 5456, 4696, 4638, 741, 677, 1091, 4638, 872, 1420, 1521, 872, 2157, 6929, 1779, 4788, 2357, 3221, 686, 4518, 677, 3297, 1920, 4638, 4788, 2357, 8024, 1506, 1506, 8024, 7745, 872, 4638, 1568, 2124, 3221, 6432, 2225, 1217, 2861, 4478, 4105, 2357, 3221, 686, 4518, 677, 3297, 1920, 4638, 4105, 2357, 1568, 1506, 1506, 1506, 112, 112, 4268, 4268, 8024, 1961, 4638, 1928, 1355, 5456, 8013, 2769, 812, 1920, 2812, 7370, 3488, 2094, 6963, 6206, 5436, 677, 3341, 2769, 4692, 1168, 3312, 1928, 5361, 7027, 3300, 1928, 1355, 671, 2137, 3221, 166, 166, 809, 1184, 1931, 1168, 4638, 8024, 872, 6432, 3221, 679, 3221, 8043, 138, 4495, 4567, 140, 102]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
In [7]:
class BertFinetune(tf.keras.Model):
  def __init__(self, params):
    super(BertFinetune, self).__init__()
    self.bert = TFBertModel.from_pretrained(params['pretrain_path'], trainable=True)
    self.bert.load_weights('../model/bert_further_pretrain.h5', by_name=True, skip_mismatch=True)
    self.drop_1 = tf.keras.layers.Dropout(.1)
    self.fc = tf.keras.layers.Dense(300, tf.nn.swish, name='down_stream/fc')
    self.drop_2 = tf.keras.layers.Dropout(.1)
    self.out = tf.keras.layers.Dense(len(params['label2idx']), name='down_stream/out')

  def call(self, bert_inputs, training):
    bert_inputs = [tf.cast(inp, tf.int32) for inp in bert_inputs]
    x = self.bert(bert_inputs, training=training)
    x = x[1]
    x = self.drop_1(x, training=training)
    x = self.fc(x)
    x = self.drop_2(x, training=training)
    x = self.out(x)
    return x
In [8]:
model = BertFinetune(params)
model.build([[None, None], [None, None], [None, None]])
print(model.weights[5])
Some layers from the model checkpoint at bert-base-chinese were not used when initializing TFBertModel: ['nsp___cls', 'mlm___cls']
- This IS expected if you are initializing TFBertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model).
- This IS NOT expected if you are initializing TFBertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
All the layers of TFBertModel were initialized from the model checkpoint at bert-base-chinese.
If your task is similar to the task the model of the checkpoint was trained on, you can already use TFBertModel for predictions without further training.
<tf.Variable 'tf_bert_model/bert/encoder/layer_._0/attention/self/query/kernel:0' shape=(768, 768) dtype=float32, numpy=
array([[ 1.17034979e-01,  1.03050740e-02,  3.92456539e-03, ...,
        -4.94852997e-02,  1.51344994e-02,  1.61471162e-02],
       [-1.14780711e-02, -1.14245294e-02, -5.72041189e-03, ...,
         2.50339527e-02, -9.23038423e-02, -3.46598551e-02],
       [ 7.27476738e-03, -1.93304732e-03,  3.19076665e-02, ...,
         3.57448182e-04, -4.74225469e-02,  8.60330183e-05],
       ...,
       [ 9.20681655e-03, -4.24182415e-03,  1.04002036e-01, ...,
         6.80211261e-02, -3.62900496e-02,  9.39155445e-02],
       [ 7.00696744e-03,  3.88552323e-02, -3.30189355e-02, ...,
         1.01353722e-02,  4.20274995e-02, -2.48862822e-02],
       [-8.36427808e-02,  1.15034226e-02, -3.14635271e-03, ...,
         7.24309906e-02, -1.34374751e-02,  1.20966239e-02]], dtype=float32)>
In [9]:
step_size = 4 * params['buffer_size'] // params['batch_size']
decay_lr = tfa.optimizers.Triangular2CyclicalLearningRate(
  initial_learning_rate = params['init_lr'],
  maximal_learning_rate = params['max_lr'],
  step_size = step_size,)
optim = tf.optimizers.Adam(params['init_lr'])
global_step = 0

best_acc = .0
count = 0

t0 = time.time()
logger = logging.getLogger('tensorflow')
logger.setLevel(logging.INFO)

for _ in range(params['n_epochs']):
  # TRAINING
  for ((text, seg), labels) in dataset(is_training=True, params=params):
    with tf.GradientTape() as tape:
      logits = model([text, tf.sign(text), seg], training=True)
      loss = tf.compat.v1.losses.softmax_cross_entropy(
        tf.one_hot(labels, len(params['label2idx']), dtype=tf.float32),
        logits = logits,
        label_smoothing = params['label_smooth'],)
      
    optim.lr.assign(decay_lr(global_step))
    grads = tape.gradient(loss, model.trainable_variables)
    grads, _ = tf.clip_by_global_norm(grads, 5.)
    optim.apply_gradients(zip(grads, model.trainable_variables))
    
    if global_step % 100 == 0:
      logger.info("Step {} | Loss: {:.4f} | Spent: {:.1f} secs | LR: {:.6f}".format(
        global_step, loss.numpy().item(), time.time()-t0, optim.lr.numpy().item()))
      t0 = time.time()
    global_step += 1
  
  # EVALUATION
  m = tf.keras.metrics.Accuracy()
  intent_true = []
  intent_pred = []

  for ((text, seg), labels) in dataset(is_training=False, params=params):
    logits = model([text, tf.sign(text), seg], training=False)
    y_intent = tf.argmax(logits, -1)
    m.update_state(y_true=labels, y_pred=y_intent)
    intent_true += labels.numpy().flatten().tolist()
    intent_pred += y_intent.numpy().flatten().tolist()

  acc = m.result().numpy()
  logger.info("Evaluation: Testing Accuracy: {:.3f}".format(acc))
  logger.info('\n'+classification_report(y_true = intent_true,
                                         y_pred = intent_pred,
                                         labels = list(params['label2idx'].values()),
                                         target_names = list(params['label2idx'].keys()),
                                         digits = 3))

  if acc > best_acc:
    best_acc = acc
    model.save_weights('../model/bert_finetune')
    count = 0
  else:
    count += 1
  logger.info("Best Accuracy: {:.3f}".format(best_acc))

  if count == params['num_patience']:
    print(params['num_patience'], "times not improve the best result, therefore stop training")
    break
Reading ../data/train.txt
INFO:tensorflow:Step 0 | Loss: 2.4657 | Spent: 7.9 secs | LR: 0.000010
INFO:tensorflow:Step 100 | Loss: 1.6077 | Spent: 52.5 secs | LR: 0.000010
INFO:tensorflow:Step 200 | Loss: 1.6519 | Spent: 52.2 secs | LR: 0.000011
INFO:tensorflow:Step 300 | Loss: 1.5934 | Spent: 51.4 secs | LR: 0.000011
INFO:tensorflow:Step 400 | Loss: 1.6724 | Spent: 52.7 secs | LR: 0.000011
INFO:tensorflow:Step 500 | Loss: 1.5454 | Spent: 53.2 secs | LR: 0.000011
INFO:tensorflow:Step 600 | Loss: 1.3832 | Spent: 51.4 secs | LR: 0.000012
INFO:tensorflow:Step 700 | Loss: 1.9512 | Spent: 52.4 secs | LR: 0.000012
INFO:tensorflow:Step 800 | Loss: 1.4674 | Spent: 51.8 secs | LR: 0.000012
INFO:tensorflow:Step 900 | Loss: 1.6502 | Spent: 52.0 secs | LR: 0.000012
INFO:tensorflow:Step 1000 | Loss: 1.4109 | Spent: 51.3 secs | LR: 0.000013
INFO:tensorflow:Step 1100 | Loss: 1.3613 | Spent: 52.8 secs | LR: 0.000013
INFO:tensorflow:Step 1200 | Loss: 1.6511 | Spent: 51.8 secs | LR: 0.000013
INFO:tensorflow:Step 1300 | Loss: 1.6729 | Spent: 53.0 secs | LR: 0.000013
INFO:tensorflow:Step 1400 | Loss: 1.6051 | Spent: 52.0 secs | LR: 0.000014
INFO:tensorflow:Step 1500 | Loss: 1.6727 | Spent: 51.2 secs | LR: 0.000014
INFO:tensorflow:Step 1600 | Loss: 1.6040 | Spent: 52.6 secs | LR: 0.000014
INFO:tensorflow:Step 1700 | Loss: 1.3703 | Spent: 53.2 secs | LR: 0.000014
INFO:tensorflow:Step 1800 | Loss: 1.2714 | Spent: 50.1 secs | LR: 0.000015
INFO:tensorflow:Step 1900 | Loss: 1.8310 | Spent: 51.7 secs | LR: 0.000015
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.609
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.618     0.804     0.699      1448
   happiness      0.590     0.772     0.669       978
        like      0.612     0.415     0.495       453
       anger      0.530     0.275     0.362       447
        fear      0.686     0.358     0.471        67
    surprise      0.560     0.272     0.366       103
     disgust      0.754     0.287     0.415       471

    accuracy                          0.609      3967
   macro avg      0.621     0.455     0.497      3967
weighted avg      0.616     0.609     0.584      3967

INFO:tensorflow:Best Accuracy: 0.609
Reading ../data/train.txt
INFO:tensorflow:Step 2000 | Loss: 1.3704 | Spent: 98.7 secs | LR: 0.000015
INFO:tensorflow:Step 2100 | Loss: 1.2159 | Spent: 51.6 secs | LR: 0.000015
INFO:tensorflow:Step 2200 | Loss: 1.3923 | Spent: 51.9 secs | LR: 0.000016
INFO:tensorflow:Step 2300 | Loss: 1.4696 | Spent: 53.2 secs | LR: 0.000016
INFO:tensorflow:Step 2400 | Loss: 1.5440 | Spent: 52.7 secs | LR: 0.000016
INFO:tensorflow:Step 2500 | Loss: 1.1541 | Spent: 51.9 secs | LR: 0.000016
INFO:tensorflow:Step 2600 | Loss: 1.4194 | Spent: 52.9 secs | LR: 0.000017
INFO:tensorflow:Step 2700 | Loss: 1.2069 | Spent: 52.3 secs | LR: 0.000017
INFO:tensorflow:Step 2800 | Loss: 1.3124 | Spent: 51.4 secs | LR: 0.000017
INFO:tensorflow:Step 2900 | Loss: 1.6382 | Spent: 52.5 secs | LR: 0.000017
INFO:tensorflow:Step 3000 | Loss: 1.4531 | Spent: 52.4 secs | LR: 0.000018
INFO:tensorflow:Step 3100 | Loss: 1.1757 | Spent: 51.2 secs | LR: 0.000018
INFO:tensorflow:Step 3200 | Loss: 1.1861 | Spent: 52.6 secs | LR: 0.000018
INFO:tensorflow:Step 3300 | Loss: 1.2443 | Spent: 52.2 secs | LR: 0.000018
INFO:tensorflow:Step 3400 | Loss: 1.4613 | Spent: 51.6 secs | LR: 0.000019
INFO:tensorflow:Step 3500 | Loss: 1.3806 | Spent: 51.6 secs | LR: 0.000019
INFO:tensorflow:Step 3600 | Loss: 1.2805 | Spent: 51.5 secs | LR: 0.000019
INFO:tensorflow:Step 3700 | Loss: 1.4037 | Spent: 52.4 secs | LR: 0.000019
INFO:tensorflow:Step 3800 | Loss: 1.2319 | Spent: 52.1 secs | LR: 0.000020
INFO:tensorflow:Step 3900 | Loss: 1.1012 | Spent: 52.0 secs | LR: 0.000020
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.623
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.642     0.785     0.707      1448
   happiness      0.692     0.693     0.693       978
        like      0.620     0.494     0.550       453
       anger      0.475     0.376     0.419       447
        fear      0.500     0.358     0.417        67
    surprise      0.464     0.252     0.327       103
     disgust      0.535     0.452     0.490       471

    accuracy                          0.623      3967
   macro avg      0.561     0.487     0.515      3967
weighted avg      0.613     0.623     0.613      3967

INFO:tensorflow:Best Accuracy: 0.623
Reading ../data/train.txt
INFO:tensorflow:Step 4000 | Loss: 1.2545 | Spent: 98.4 secs | LR: 0.000020
INFO:tensorflow:Step 4100 | Loss: 0.9937 | Spent: 52.6 secs | LR: 0.000020
INFO:tensorflow:Step 4200 | Loss: 1.2786 | Spent: 52.1 secs | LR: 0.000021
INFO:tensorflow:Step 4300 | Loss: 1.1370 | Spent: 52.0 secs | LR: 0.000021
INFO:tensorflow:Step 4400 | Loss: 1.5074 | Spent: 51.9 secs | LR: 0.000021
INFO:tensorflow:Step 4500 | Loss: 1.1501 | Spent: 51.5 secs | LR: 0.000021
INFO:tensorflow:Step 4600 | Loss: 1.3676 | Spent: 52.4 secs | LR: 0.000022
INFO:tensorflow:Step 4700 | Loss: 1.2441 | Spent: 51.2 secs | LR: 0.000022
INFO:tensorflow:Step 4800 | Loss: 1.3548 | Spent: 53.0 secs | LR: 0.000022
INFO:tensorflow:Step 4900 | Loss: 1.0256 | Spent: 52.1 secs | LR: 0.000022
INFO:tensorflow:Step 5000 | Loss: 1.3658 | Spent: 51.3 secs | LR: 0.000023
INFO:tensorflow:Step 5100 | Loss: 1.1033 | Spent: 52.7 secs | LR: 0.000023
INFO:tensorflow:Step 5200 | Loss: 1.4401 | Spent: 52.1 secs | LR: 0.000023
INFO:tensorflow:Step 5300 | Loss: 1.2112 | Spent: 51.2 secs | LR: 0.000023
INFO:tensorflow:Step 5400 | Loss: 1.0271 | Spent: 52.2 secs | LR: 0.000024
INFO:tensorflow:Step 5500 | Loss: 1.4420 | Spent: 51.4 secs | LR: 0.000024
INFO:tensorflow:Step 5600 | Loss: 1.1698 | Spent: 52.3 secs | LR: 0.000024
INFO:tensorflow:Step 5700 | Loss: 1.1475 | Spent: 52.2 secs | LR: 0.000024
INFO:tensorflow:Step 5800 | Loss: 1.2101 | Spent: 52.2 secs | LR: 0.000025
INFO:tensorflow:Step 5900 | Loss: 1.2013 | Spent: 52.1 secs | LR: 0.000025
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.618
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.630     0.789     0.701      1448
   happiness      0.690     0.679     0.685       978
        like      0.570     0.563     0.567       453
       anger      0.456     0.374     0.411       447
        fear      0.619     0.388     0.477        67
    surprise      0.449     0.301     0.360       103
     disgust      0.625     0.355     0.453       471

    accuracy                          0.618      3967
   macro avg      0.577     0.493     0.522      3967
weighted avg      0.613     0.618     0.607      3967

INFO:tensorflow:Best Accuracy: 0.623
Reading ../data/train.txt
INFO:tensorflow:Step 6000 | Loss: 1.0648 | Spent: 95.6 secs | LR: 0.000025
INFO:tensorflow:Step 6100 | Loss: 1.4518 | Spent: 51.8 secs | LR: 0.000025
INFO:tensorflow:Step 6200 | Loss: 0.9293 | Spent: 50.1 secs | LR: 0.000026
INFO:tensorflow:Step 6300 | Loss: 1.0803 | Spent: 51.3 secs | LR: 0.000026
INFO:tensorflow:Step 6400 | Loss: 1.1286 | Spent: 53.0 secs | LR: 0.000026
INFO:tensorflow:Step 6500 | Loss: 0.9707 | Spent: 50.5 secs | LR: 0.000026
INFO:tensorflow:Step 6600 | Loss: 1.0772 | Spent: 51.6 secs | LR: 0.000027
INFO:tensorflow:Step 6700 | Loss: 1.1677 | Spent: 52.3 secs | LR: 0.000027
INFO:tensorflow:Step 6800 | Loss: 1.0315 | Spent: 51.8 secs | LR: 0.000027
INFO:tensorflow:Step 6900 | Loss: 1.4247 | Spent: 50.8 secs | LR: 0.000027
INFO:tensorflow:Step 7000 | Loss: 1.0438 | Spent: 52.9 secs | LR: 0.000028
INFO:tensorflow:Step 7100 | Loss: 1.3262 | Spent: 51.7 secs | LR: 0.000028
INFO:tensorflow:Step 7200 | Loss: 1.1865 | Spent: 51.7 secs | LR: 0.000028
INFO:tensorflow:Step 7300 | Loss: 1.2423 | Spent: 52.7 secs | LR: 0.000028
INFO:tensorflow:Step 7400 | Loss: 1.1526 | Spent: 52.2 secs | LR: 0.000029
INFO:tensorflow:Step 7500 | Loss: 1.4292 | Spent: 51.6 secs | LR: 0.000029
INFO:tensorflow:Step 7600 | Loss: 1.0350 | Spent: 52.7 secs | LR: 0.000029
INFO:tensorflow:Step 7700 | Loss: 1.4139 | Spent: 52.3 secs | LR: 0.000029
INFO:tensorflow:Step 7800 | Loss: 0.9457 | Spent: 51.1 secs | LR: 0.000030
INFO:tensorflow:Step 7900 | Loss: 1.2751 | Spent: 52.2 secs | LR: 0.000030
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.596
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.640     0.712     0.674      1448
   happiness      0.663     0.708     0.684       978
        like      0.527     0.554     0.540       453
       anger      0.457     0.275     0.344       447
        fear      0.641     0.373     0.472        67
    surprise      0.464     0.252     0.327       103
     disgust      0.456     0.456     0.456       471

    accuracy                          0.596      3967
   macro avg      0.550     0.476     0.500      3967
weighted avg      0.586     0.596     0.586      3967

INFO:tensorflow:Best Accuracy: 0.623
Reading ../data/train.txt
INFO:tensorflow:Step 8000 | Loss: 0.8293 | Spent: 96.6 secs | LR: 0.000030
INFO:tensorflow:Step 8100 | Loss: 1.2590 | Spent: 50.5 secs | LR: 0.000030
INFO:tensorflow:Step 8200 | Loss: 0.9160 | Spent: 52.0 secs | LR: 0.000029
INFO:tensorflow:Step 8300 | Loss: 1.0591 | Spent: 52.1 secs | LR: 0.000029
INFO:tensorflow:Step 8400 | Loss: 1.1453 | Spent: 52.4 secs | LR: 0.000029
INFO:tensorflow:Step 8500 | Loss: 1.2780 | Spent: 52.4 secs | LR: 0.000029
INFO:tensorflow:Step 8600 | Loss: 1.0326 | Spent: 51.8 secs | LR: 0.000028
INFO:tensorflow:Step 8700 | Loss: 1.2354 | Spent: 51.6 secs | LR: 0.000028
INFO:tensorflow:Step 8800 | Loss: 1.0220 | Spent: 52.3 secs | LR: 0.000028
INFO:tensorflow:Step 8900 | Loss: 1.0607 | Spent: 51.8 secs | LR: 0.000028
INFO:tensorflow:Step 9000 | Loss: 1.1424 | Spent: 52.0 secs | LR: 0.000027
INFO:tensorflow:Step 9100 | Loss: 1.4936 | Spent: 51.0 secs | LR: 0.000027
INFO:tensorflow:Step 9200 | Loss: 1.1461 | Spent: 52.9 secs | LR: 0.000027
INFO:tensorflow:Step 9300 | Loss: 0.8976 | Spent: 51.8 secs | LR: 0.000027
INFO:tensorflow:Step 9400 | Loss: 0.9495 | Spent: 51.4 secs | LR: 0.000026
INFO:tensorflow:Step 9500 | Loss: 1.0473 | Spent: 53.0 secs | LR: 0.000026
INFO:tensorflow:Step 9600 | Loss: 1.1852 | Spent: 52.2 secs | LR: 0.000026
INFO:tensorflow:Step 9700 | Loss: 1.2617 | Spent: 51.8 secs | LR: 0.000026
INFO:tensorflow:Step 9800 | Loss: 0.8494 | Spent: 51.7 secs | LR: 0.000025
INFO:tensorflow:Step 9900 | Loss: 0.8234 | Spent: 52.9 secs | LR: 0.000025
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.594
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.653     0.706     0.678      1448
   happiness      0.696     0.651     0.673       978
        like      0.563     0.512     0.536       453
       anger      0.407     0.438     0.422       447
        fear      0.482     0.403     0.439        67
    surprise      0.411     0.291     0.341       103
     disgust      0.458     0.450     0.454       471

    accuracy                          0.594      3967
   macro avg      0.524     0.493     0.506      3967
weighted avg      0.593     0.594     0.592      3967

INFO:tensorflow:Best Accuracy: 0.623
Reading ../data/train.txt
INFO:tensorflow:Step 10000 | Loss: 1.0128 | Spent: 95.1 secs | LR: 0.000025
INFO:tensorflow:Step 10100 | Loss: 0.9964 | Spent: 52.4 secs | LR: 0.000025
INFO:tensorflow:Step 10200 | Loss: 0.7933 | Spent: 53.1 secs | LR: 0.000024
INFO:tensorflow:Step 10300 | Loss: 1.0881 | Spent: 51.7 secs | LR: 0.000024
INFO:tensorflow:Step 10400 | Loss: 0.8109 | Spent: 51.2 secs | LR: 0.000024
INFO:tensorflow:Step 10500 | Loss: 0.8347 | Spent: 52.6 secs | LR: 0.000024
INFO:tensorflow:Step 10600 | Loss: 0.9485 | Spent: 52.4 secs | LR: 0.000023
INFO:tensorflow:Step 10700 | Loss: 0.8736 | Spent: 52.3 secs | LR: 0.000023
INFO:tensorflow:Step 10800 | Loss: 0.9428 | Spent: 50.9 secs | LR: 0.000023
INFO:tensorflow:Step 10900 | Loss: 0.7979 | Spent: 51.6 secs | LR: 0.000023
INFO:tensorflow:Step 11000 | Loss: 1.0312 | Spent: 52.9 secs | LR: 0.000022
INFO:tensorflow:Step 11100 | Loss: 1.0309 | Spent: 52.3 secs | LR: 0.000022
INFO:tensorflow:Step 11200 | Loss: 1.0386 | Spent: 52.0 secs | LR: 0.000022
INFO:tensorflow:Step 11300 | Loss: 1.1921 | Spent: 52.5 secs | LR: 0.000022
INFO:tensorflow:Step 11400 | Loss: 0.8878 | Spent: 51.9 secs | LR: 0.000021
INFO:tensorflow:Step 11500 | Loss: 0.9012 | Spent: 53.2 secs | LR: 0.000021
INFO:tensorflow:Step 11600 | Loss: 0.8559 | Spent: 53.2 secs | LR: 0.000021
INFO:tensorflow:Step 11700 | Loss: 0.8542 | Spent: 54.4 secs | LR: 0.000020
INFO:tensorflow:Step 11800 | Loss: 1.0043 | Spent: 52.4 secs | LR: 0.000020
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.600
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.638     0.740     0.685      1448
   happiness      0.703     0.657     0.679       978
        like      0.544     0.530     0.537       453
       anger      0.411     0.380     0.395       447
        fear      0.571     0.358     0.440        67
    surprise      0.383     0.301     0.337       103
     disgust      0.506     0.425     0.462       471

    accuracy                          0.600      3967
   macro avg      0.537     0.484     0.505      3967
weighted avg      0.594     0.600     0.594      3967

INFO:tensorflow:Best Accuracy: 0.623
Reading ../data/train.txt
INFO:tensorflow:Step 11900 | Loss: 1.0174 | Spent: 97.7 secs | LR: 0.000020
INFO:tensorflow:Step 12000 | Loss: 0.8801 | Spent: 52.7 secs | LR: 0.000020
INFO:tensorflow:Step 12100 | Loss: 0.8958 | Spent: 53.7 secs | LR: 0.000019
INFO:tensorflow:Step 12200 | Loss: 0.9331 | Spent: 53.6 secs | LR: 0.000019
INFO:tensorflow:Step 12300 | Loss: 0.9086 | Spent: 52.9 secs | LR: 0.000019
INFO:tensorflow:Step 12400 | Loss: 0.7893 | Spent: 51.6 secs | LR: 0.000019
INFO:tensorflow:Step 12500 | Loss: 0.8025 | Spent: 54.2 secs | LR: 0.000018
INFO:tensorflow:Step 12600 | Loss: 1.1277 | Spent: 52.7 secs | LR: 0.000018
INFO:tensorflow:Step 12700 | Loss: 0.9814 | Spent: 52.6 secs | LR: 0.000018
INFO:tensorflow:Step 12800 | Loss: 0.8136 | Spent: 52.4 secs | LR: 0.000018
INFO:tensorflow:Step 12900 | Loss: 0.7849 | Spent: 52.3 secs | LR: 0.000017
INFO:tensorflow:Step 13000 | Loss: 0.7934 | Spent: 52.8 secs | LR: 0.000017
INFO:tensorflow:Step 13100 | Loss: 1.1223 | Spent: 52.1 secs | LR: 0.000017
INFO:tensorflow:Step 13200 | Loss: 0.7820 | Spent: 51.0 secs | LR: 0.000017
INFO:tensorflow:Step 13300 | Loss: 1.0456 | Spent: 53.7 secs | LR: 0.000016
INFO:tensorflow:Step 13400 | Loss: 0.7805 | Spent: 51.8 secs | LR: 0.000016
INFO:tensorflow:Step 13500 | Loss: 0.8692 | Spent: 52.1 secs | LR: 0.000016
INFO:tensorflow:Step 13600 | Loss: 0.7875 | Spent: 52.6 secs | LR: 0.000016
INFO:tensorflow:Step 13700 | Loss: 0.7970 | Spent: 52.3 secs | LR: 0.000015
INFO:tensorflow:Step 13800 | Loss: 0.8613 | Spent: 53.1 secs | LR: 0.000015
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.591
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.688     0.655     0.671      1448
   happiness      0.652     0.714     0.681       978
        like      0.554     0.512     0.532       453
       anger      0.382     0.438     0.408       447
        fear      0.551     0.403     0.466        67
    surprise      0.311     0.320     0.316       103
     disgust      0.486     0.446     0.465       471

    accuracy                          0.591      3967
   macro avg      0.518     0.498     0.506      3967
weighted avg      0.593     0.591     0.591      3967

INFO:tensorflow:Best Accuracy: 0.623
Reading ../data/train.txt
INFO:tensorflow:Step 13900 | Loss: 0.7844 | Spent: 95.0 secs | LR: 0.000015
INFO:tensorflow:Step 14000 | Loss: 0.8876 | Spent: 52.3 secs | LR: 0.000015
INFO:tensorflow:Step 14100 | Loss: 0.9479 | Spent: 53.1 secs | LR: 0.000014
INFO:tensorflow:Step 14200 | Loss: 0.8110 | Spent: 53.9 secs | LR: 0.000014
INFO:tensorflow:Step 14300 | Loss: 0.8053 | Spent: 52.6 secs | LR: 0.000014
INFO:tensorflow:Step 14400 | Loss: 0.7739 | Spent: 51.4 secs | LR: 0.000014
INFO:tensorflow:Step 14500 | Loss: 0.9199 | Spent: 52.9 secs | LR: 0.000013
INFO:tensorflow:Step 14600 | Loss: 0.7794 | Spent: 52.5 secs | LR: 0.000013
INFO:tensorflow:Step 14700 | Loss: 0.7884 | Spent: 52.1 secs | LR: 0.000013
INFO:tensorflow:Step 14800 | Loss: 0.8832 | Spent: 51.3 secs | LR: 0.000013
INFO:tensorflow:Step 14900 | Loss: 0.9401 | Spent: 52.9 secs | LR: 0.000012
INFO:tensorflow:Step 15000 | Loss: 0.9802 | Spent: 52.2 secs | LR: 0.000012
INFO:tensorflow:Step 15100 | Loss: 0.9639 | Spent: 52.4 secs | LR: 0.000012
INFO:tensorflow:Step 15200 | Loss: 0.9615 | Spent: 52.6 secs | LR: 0.000012
INFO:tensorflow:Step 15300 | Loss: 0.7789 | Spent: 51.3 secs | LR: 0.000011
INFO:tensorflow:Step 15400 | Loss: 0.7816 | Spent: 52.1 secs | LR: 0.000011
INFO:tensorflow:Step 15500 | Loss: 1.0715 | Spent: 52.1 secs | LR: 0.000011
INFO:tensorflow:Step 15600 | Loss: 0.9279 | Spent: 52.8 secs | LR: 0.000011
INFO:tensorflow:Step 15700 | Loss: 0.8965 | Spent: 52.7 secs | LR: 0.000010
INFO:tensorflow:Step 15800 | Loss: 0.9517 | Spent: 51.8 secs | LR: 0.000010
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.598
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.667     0.698     0.682      1448
   happiness      0.695     0.679     0.687       978
        like      0.550     0.519     0.534       453
       anger      0.405     0.468     0.434       447
        fear      0.500     0.388     0.437        67
    surprise      0.312     0.330     0.321       103
     disgust      0.494     0.412     0.449       471

    accuracy                          0.598      3967
   macro avg      0.518     0.499     0.506      3967
weighted avg      0.599     0.598     0.597      3967

INFO:tensorflow:Best Accuracy: 0.623
Reading ../data/train.txt
INFO:tensorflow:Step 15900 | Loss: 0.8018 | Spent: 97.4 secs | LR: 0.000010
INFO:tensorflow:Step 16000 | Loss: 0.7772 | Spent: 53.2 secs | LR: 0.000010
INFO:tensorflow:Step 16100 | Loss: 0.8860 | Spent: 52.9 secs | LR: 0.000010
INFO:tensorflow:Step 16200 | Loss: 0.8194 | Spent: 52.5 secs | LR: 0.000010
INFO:tensorflow:Step 16300 | Loss: 0.9774 | Spent: 53.1 secs | LR: 0.000011
INFO:tensorflow:Step 16400 | Loss: 0.7771 | Spent: 52.5 secs | LR: 0.000011
INFO:tensorflow:Step 16500 | Loss: 0.7768 | Spent: 52.6 secs | LR: 0.000011
INFO:tensorflow:Step 16600 | Loss: 0.7751 | Spent: 51.3 secs | LR: 0.000011
INFO:tensorflow:Step 16700 | Loss: 0.8277 | Spent: 53.1 secs | LR: 0.000011
INFO:tensorflow:Step 16800 | Loss: 0.7716 | Spent: 51.4 secs | LR: 0.000011
INFO:tensorflow:Step 16900 | Loss: 0.8081 | Spent: 52.6 secs | LR: 0.000011
INFO:tensorflow:Step 17000 | Loss: 0.7719 | Spent: 52.7 secs | LR: 0.000011
INFO:tensorflow:Step 17100 | Loss: 1.0267 | Spent: 50.8 secs | LR: 0.000012
INFO:tensorflow:Step 17200 | Loss: 0.7776 | Spent: 53.0 secs | LR: 0.000012
INFO:tensorflow:Step 17300 | Loss: 0.7736 | Spent: 52.0 secs | LR: 0.000012
INFO:tensorflow:Step 17400 | Loss: 0.7784 | Spent: 52.4 secs | LR: 0.000012
INFO:tensorflow:Step 17500 | Loss: 0.7850 | Spent: 52.7 secs | LR: 0.000012
INFO:tensorflow:Step 17600 | Loss: 1.0499 | Spent: 51.8 secs | LR: 0.000012
INFO:tensorflow:Step 17700 | Loss: 0.7800 | Spent: 53.7 secs | LR: 0.000012
INFO:tensorflow:Step 17800 | Loss: 0.7763 | Spent: 52.5 secs | LR: 0.000012
Reading ../data/test.txt
INFO:tensorflow:Evaluation: Testing Accuracy: 0.594
INFO:tensorflow:
              precision    recall  f1-score   support

     sadness      0.633     0.742     0.684      1448
   happiness      0.663     0.687     0.675       978
        like      0.604     0.475     0.532       453
       anger      0.434     0.277     0.338       447
        fear      0.381     0.478     0.424        67
    surprise      0.316     0.301     0.308       103
     disgust      0.478     0.439     0.458       471

    accuracy                          0.594      3967
   macro avg      0.501     0.486     0.488      3967
weighted avg      0.584     0.594     0.584      3967

INFO:tensorflow:Best Accuracy: 0.623
7 times not improve the best result, therefore stop training