In [ ]:
!wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.9.0-linux-x86_64.tar.gz
!tar -xzf elasticsearch-7.9.0-linux-x86_64.tar.gz
!chown -R daemon:daemon elasticsearch-7.9.0
--2020-11-16 08:58:45--  https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.9.0-linux-x86_64.tar.gz
Resolving artifacts.elastic.co (artifacts.elastic.co)... 151.101.2.222, 151.101.66.222, 151.101.130.222, ...
Connecting to artifacts.elastic.co (artifacts.elastic.co)|151.101.2.222|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 319531145 (305M) [application/x-gzip]
Saving to: ‘elasticsearch-7.9.0-linux-x86_64.tar.gz’

elasticsearch-7.9.0 100%[===================>] 304.73M  21.4MB/s    in 12s     

2020-11-16 08:58:58 (25.5 MB/s) - ‘elasticsearch-7.9.0-linux-x86_64.tar.gz’ saved [319531145/319531145]

In [ ]:
!pip install elasticsearch -q
!pip install transformers -q
     |████████████████████████████████| 327kB 14.1MB/s 
     |████████████████████████████████| 1.3MB 11.5MB/s 
     |████████████████████████████████| 890kB 16.5MB/s 
     |████████████████████████████████| 2.9MB 46.1MB/s 
     |████████████████████████████████| 1.1MB 41.1MB/s 
  Building wheel for sacremoses (setup.py) ... done
In [ ]:
from subprocess import Popen, PIPE, STDOUT
from elasticsearch import Elasticsearch
from elasticsearch import helpers
from transformers import BertTokenizer

import os
import time
import pprint
import numpy as np
import tensorflow_hub as hub
In [ ]:
es_server = Popen(
  ['elasticsearch-7.9.0/bin/elasticsearch'], 
  stdout = PIPE, stderr = STDOUT,
  preexec_fn = lambda: os.setuid(1))
In [ ]:
!curl -X GET "localhost:9200/"
{
  "name" : "a1b053ed1dd3",
  "cluster_name" : "elasticsearch",
  "cluster_uuid" : "KnwBQ6MNSluGjjA8DiIVyw",
  "version" : {
    "number" : "7.9.0",
    "build_flavor" : "default",
    "build_type" : "tar",
    "build_hash" : "a479a2a7fce0389512d6a9361301708b92dff667",
    "build_date" : "2020-08-11T21:36:48.204330Z",
    "build_snapshot" : false,
    "lucene_version" : "8.6.0",
    "minimum_wire_compatibility_version" : "6.8.0",
    "minimum_index_compatibility_version" : "6.0.0-beta1"
  },
  "tagline" : "You Know, for Search"
}
In [ ]:
def gen_data():
  with open('/content/gdrive/My Drive/finch/es/free_chat/data/basic.txt') as f:
    for line in f:
      line = line.rstrip()
      q, a = line.split('<SEP>')
      bert_inp = ['[CLS]'] + list(q) + ['[SEP]']
      bert_inp = tokenizer.convert_tokens_to_ids(bert_inp)
      bert_seg = [0] * len(bert_inp)
      bert_mask = [1] * len(bert_inp)
      res, _ = encoder([np.asarray([bert_inp], dtype=np.int32),
                        np.asarray([bert_mask], dtype=np.int32),
                        np.asarray([bert_seg], dtype=np.int32)])
      yield {
        '_index': 'chatbot',
        'question': q,
        'answer': a,
        'question_embedding': res[0].numpy(),}
In [ ]:
from google.colab import drive
drive.mount('/content/gdrive')
Mounted at /content/gdrive
In [ ]:
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese',
                                           lowercase = True,
                                           add_special_tokens = True)

encoder = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_zh_L-12_H-768_A-12/2",
                         trainable = False)
In [ ]:
es = Elasticsearch()
print(es.ping())

es.indices.create(index='chatbot')

mapping = {
  'properties': {
    'question': {
      'type': 'text',
    },
    'question_embedding': {
      'type': 'dense_vector',
      'dims': 768,}}}
es.indices.put_mapping(body=mapping, index='chatbot')

helpers.bulk(es, gen_data())
True
Out[ ]:
(366, [])
In [ ]:
correct, total = 0., 0.
while True:
  text_inp = input('Input:')
  if text_inp == '|quit':
    break
  t0 = time.time()
  bert_inp = ['[CLS]'] + list(text_inp) + ['[SEP]']
  bert_inp = tokenizer.convert_tokens_to_ids(bert_inp)
  bert_seg = [0] * len(bert_inp)
  bert_mask = [1] * len(bert_inp)
  res, _ = encoder([np.asarray([bert_inp], dtype=np.int32),
                    np.asarray([bert_mask], dtype=np.int32),
                    np.asarray([bert_seg], dtype=np.int32)])
  query_vector = res[0].numpy()
  script_query = {
  'script_score': {
    'query': {'match_all': {}},
    'script': {
      'source': "cosineSimilarity(params.query_vector, doc['question_embedding']) + 1.0",
      'params': {'query_vector': query_vector},}}}
  dsl = {
    'query': script_query,
    '_source': {'excludes': ['question_embedding']},}
  hits = es.search(index='chatbot', body=dsl)['hits']['hits']
  print('Match:', hits[0]['_source'])
  print('%.2f sec' % (time.time() - t0))
  text_inp = input('Is the answer correct?')
  if text_inp == 'yes':
    correct += 1.
  total += 1
  print()
print('Correct: {} | Total: {} | Accuracy: {:.3f}'.format(correct, total, correct/total))
Input:早安
/usr/local/lib/python3.6/dist-packages/elasticsearch/connection/base.py:190: ElasticsearchDeprecationWarning: The vector functions of the form function(query, doc['field']) are deprecated, and the form function(query, 'field') should be used instead. For example, cosineSimilarity(query, doc['field']) is replaced by cosineSimilarity(query, 'field').
  warnings.warn(message, category=ElasticsearchDeprecationWarning)
Match: {'question': '早安', 'answer': '早安'}
1.33 sec
Is the answer correct?yes

Input:天气
Match: {'question': '天气', 'answer': '天气还行 要查具体的吗'}
0.16 sec
Is the answer correct?yes

Input:几点了
Match: {'question': '现在几点了', 'answer': '要查一下现在的具体时间吗'}
0.12 sec
Is the answer correct?yes

Input:冷
Match: {'question': '冷', 'answer': '多穿衣服哈'}
0.12 sec
Is the answer correct?yes

Input:热死了
Match: {'question': '笑死我了', 'answer': '瞧把你开心的'}
0.12 sec
Is the answer correct?no

Input:激动死了
Match: {'question': '太激动了', 'answer': '发生什么好事了 说来听听'}
0.13 sec
Is the answer correct?yes

Input:我伤心
Match: {'question': '我好伤心', 'answer': '怎么了 说来听听'}
0.12 sec
Is the answer correct?yes

Input:厉害了
Match: {'question': '吓死我了', 'answer': '怎么了 别害怕 我陪你'}
0.12 sec
Is the answer correct?no

Input:我爱你
Match: {'question': '我爱你', 'answer': '我也爱你'}
0.13 sec
Is the answer correct?yes

Input:我喜欢你
Match: {'question': '我爱你', 'answer': '我也爱你'}
0.12 sec
Is the answer correct?yes

Input:|quit
Correct: 8.0 | Total: 10.0 | Accuracy: 0.800