#!/usr/bin/env python # coding: utf-8 # # Tweet Analytics with Word2vec(Skip-gram Model) # [tsu-nera(@tsu_nera)](https://twitter.com/tsu_nera) のアカウントのツイートをword2vecで分析します。 # In[1]: import time import numpy as np import pandas as pd import tensorflow as tf # ## 前処理 # In[2]: raw_data = pd.read_csv('tweets.csv') # In[3]: text = raw_data['text'] len(text) # In[4]: # studyplusの垂れ流しツイートは削除 text = text[text.str.contains("#studyplus") == False] # fitbitのライフログは削除 text = text[text.str.contains("おはようございます。今日は") == False] len(text) # ## Mecabインストール on Ubuntu 16.04 LTS # # * [Ubuntu 14.04 に Mecab と mecab-python3 をインストール - Qiita](http://qiita.com/elm200/items/2c2aa2093e670036bb30) # # In[ ]: get_ipython().system('sudo apt install mecab libmecab-dev mecab-ipadic mecab-ipadic-utf8') # In[ ]: get_ipython().system('pip install mecab-python3') # In[5]: import MeCab m = MeCab.Tagger("-Owakati") items = m.parse("安倍晋三首相は、国会で施政方針演説を行った。") print(items) # ## tweetデータ作成 # # * [Ubuntu + word2vecでtwitterの自分のアカウントのデータを自然言語処理してみた | from umentu import stupid](https://www.blog.umentu.work/ubuntu-word2vec%e3%81%a7twitter%e3%81%ae%e8%87%aa%e5%88%86%e3%81%ae%e3%82%a2%e3%82%ab%e3%82%a6%e3%83%b3%e3%83%88%e3%81%ae%e3%83%87%e3%83%bc%e3%82%bf%e3%82%92%e8%87%aa%e7%84%b6%e8%a8%80%e8%aa%9e/) # * [Pythonで余計な文字列を削除する方法 | hacknote](http://hacknote.jp/archives/19937/) # In[6]: import re f_out = open("tweets_wakati.txt", "w" ) for line in text.iteritems(): line = re.sub('https?://[\w/:%#\$&\?\(\)~\.=\+\-…]+','', line[1]) line = re.sub('RT', "", line) line = re.sub(r'[!-~]', "", line)#半角記号,数字,英字 line = re.sub(r'[︰-@]', "", line)#全角記号 line = re.sub('\n', " ", line)#改行文字 f_out.write(m.parse(line)) f_out.close() # ## PreProcess # # ここからは、以下の実装をそのまま。 # * [deep-learning/Skip-Grams-Solution.ipynb at master · udacity/deep-learning](https://github.com/udacity/deep-learning/blob/master/embeddings/Skip-Grams-Solution.ipynb) # In[7]: from collections import Counter def preprocess(text): # Replace punctuation with tokens so we can use them in our model text = text.lower() text = text.replace('。', ' ') text = text.replace('、', ' ') text = text.replace('"', ' ') text = text.replace(';', ' ') text = text.replace('!', ' ') text = text.replace('?', ' ') text = text.replace('(', ' ') text = text.replace(')', ' ') text = text.replace('(', ' ') text = text.replace(')', ' ') text = text.replace('「', ' ') text = text.replace('」', ' ') text = text.replace('--', ' ') text = text.replace('?', ' ') # text = text.replace('\n', ' ') text = text.replace(':', ' ') words = text.split() # Remove all words with 5 or fewer occurences word_counts = Counter(words) trimmed_words = [word for word in words if word_counts[word] > 5] return trimmed_words def get_batches(int_text, batch_size, seq_length): """ Return batches of input and target :param int_text: Text with the words replaced by their ids :param batch_size: The size of batch :param seq_length: The length of sequence :return: A list where each item is a tuple of (batch of input, batch of target). """ n_batches = int(len(int_text) / (batch_size * seq_length)) # Drop the last few characters to make only full batches xdata = np.array(int_text[: n_batches * batch_size * seq_length]) ydata = np.array(int_text[1: n_batches * batch_size * seq_length + 1]) x_batches = np.split(xdata.reshape(batch_size, -1), n_batches, 1) y_batches = np.split(ydata.reshape(batch_size, -1), n_batches, 1) return list(zip(x_batches, y_batches)) def create_lookup_tables(words): """ Create lookup tables for vocabulary :param words: Input list of words :return: A tuple of dicts. The first dict.... """ word_counts = Counter(words) sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True) int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)} vocab_to_int = {word: ii for ii, word in int_to_vocab.items()} return vocab_to_int, int_to_vocab # In[8]: with open('tweets_wakati.txt') as f: text = f.read() words = preprocess(text) print(words[:30]) # In[9]: print("Total words: {}".format(len(words))) print("Unique words: {}".format(len(set(words)))) # In[10]: vocab_to_int, int_to_vocab = create_lookup_tables(words) int_words = [vocab_to_int[word] for word in words] # In[11]: from collections import Counter import random threshold = 1e-3 word_counts = Counter(int_words) total_count = len(int_words) freqs = {word: count/total_count for word, count in word_counts.items()} p_drop = {word: 1 - np.sqrt(threshold/freqs[word]) for word in word_counts} train_words = [word for word in int_words if random.random() < (1 - p_drop[word])] # In[12]: def get_target(words, idx, window_size=5): ''' Get a list of words in a window around an index. ''' R = np.random.randint(1, window_size+1) start = idx - R if (idx - R) > 0 else 0 stop = idx + R target_words = set(words[start:idx] + words[idx+1:stop+1]) return list(target_words) # In[13]: def get_batches(words, batch_size, window_size=5): ''' Create a generator of word batches as a tuple (inputs, targets) ''' n_batches = len(words)//batch_size # only full batches words = words[:n_batches*batch_size] for idx in range(0, len(words), batch_size): x, y = [], [] batch = words[idx:idx+batch_size] for ii in range(len(batch)): batch_x = batch[ii] batch_y = get_target(batch, ii, window_size) y.extend(batch_y) x.extend([batch_x]*len(batch_y)) yield x, y # ## Build skip-gram Model # In[14]: train_graph = tf.Graph() with train_graph.as_default(): inputs = tf.placeholder(tf.int32, [None], name='inputs') labels = tf.placeholder(tf.int32, [None, None], name='labels') # In[15]: n_vocab = len(int_to_vocab) n_embedding = 50 # Number of embedding features with train_graph.as_default(): embedding = tf.Variable(tf.random_uniform((n_vocab, n_embedding), -1, 1)) embed = tf.nn.embedding_lookup(embedding, inputs) # In[16]: n_sampled = 100 with train_graph.as_default(): softmax_w = tf.Variable(tf.truncated_normal((n_vocab, n_embedding), stddev=0.1)) softmax_b = tf.Variable(tf.zeros(n_vocab)) # Calculate the loss using negative sampling loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b, labels, embed, n_sampled, n_vocab) cost = tf.reduce_mean(loss) optimizer = tf.train.AdamOptimizer().minimize(cost) # In[17]: with train_graph.as_default(): ## From Thushan Ganegedara's implementation valid_size = 16 # Random set of words to evaluate similarity on. valid_window = 20 # pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent valid_examples = np.array(random.sample(range(valid_window), valid_size//2)) valid_examples = np.append(valid_examples, random.sample(range(1000,1000+valid_window), valid_size//2)) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # We use the cosine distance: norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True)) normalized_embedding = embedding / norm valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset) similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding)) # ## Training # In[18]: get_ipython().system('mkdir checkpoints') # In[19]: epochs = 30 batch_size = 50 window_size = 10 with train_graph.as_default(): saver = tf.train.Saver() with tf.Session(graph=train_graph) as sess: iteration = 1 loss = 0 sess.run(tf.global_variables_initializer()) for e in range(1, epochs+1): batches = get_batches(train_words, batch_size, window_size) start = time.time() for x, y in batches: feed = {inputs: x, labels: np.array(y)[:, None]} train_loss, _ = sess.run([cost, optimizer], feed_dict=feed) loss += train_loss if iteration % 100 == 0: end = time.time() print("Epoch {}/{}".format(e, epochs), "Iteration: {}".format(iteration), "Avg. Training loss: {:.4f}".format(loss/100), "{:.4f} sec/batch".format((end-start)/100)) loss = 0 start = time.time() if iteration % 1000 == 0: # note that this is expensive (~20% slowdown if computed every 500 steps) sim = similarity.eval() for i in range(valid_size): valid_word = int_to_vocab[valid_examples[i]] top_k = 8 # number of nearest neighbors nearest = (-sim[i, :]).argsort()[1:top_k+1] log = 'Nearest to %s:' % valid_word for k in range(top_k): close_word = int_to_vocab[nearest[k]] log = '%s %s,' % (log, close_word) print(log) iteration += 1 save_path = saver.save(sess, "checkpoints/text8.ckpt") embed_mat = sess.run(normalized_embedding) # In[20]: with train_graph.as_default(): saver = tf.train.Saver() with tf.Session(graph=train_graph) as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) embed_mat = sess.run(embedding) # ## Visualize data # In[22]: get_ipython().run_line_magic('matplotlib', 'inline') get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'") import matplotlib.pyplot as plt from sklearn.manifold import TSNE import matplotlib.font_manager as fm plt.rcParams['font.family'] = 'IPAGothic' # In[23]: viz_words = 500 tsne = TSNE() embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :]) # In[24]: fig, ax = plt.subplots(figsize=(14, 14)) for idx in range(viz_words): plt.scatter(*embed_tsne[idx, :], color='steelblue') plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7) # In[ ]: