# Load the model that we created in Part 2
from gensim.models import Word2Vec
model = Word2Vec.load("model/300features_40minwords_10context")
Using TensorFlow backend.
type(model.wv.syn0)
numpy.ndarray
直接运行model.syn0的时候,报错,查了下发现在gensim1.0版本后,已经废除了这种用法,应该用model.wv.syn0 instead of model.syn0。
model.wv.syn0.shape
(16490, 300)
syn0的每一行,即代表词汇表中的一个单词,即有16490个单词。列代表特征向量的大小,即300,这个是我们在part 2训练时设定的数字。我们设置的最小单词频度是40(即出现40次以下的单词会被忽略),最后得到一个有16492个单词的词汇表,每个词有300个特征。
单个词向量可以通过下面的方法查看:
model['flower'][:20]
array([-0.03601145, 0.04414343, 0.04036437, -0.13137981, 0.01652543, -0.05171297, 0.06046093, 0.02729285, 0.03909649, 0.06548062, -0.01999683, 0.09914493, -0.05525199, -0.1404988 , -0.03727261, 0.05552262, -0.05796909, -0.00742995, -0.03437098, -0.06527088], dtype=float32)
上面会返回一个1x300的numpy数组,因为太长这里只打印出前20个。
为了之后的计算,这里导入之前的数据集:
import pandas as pd
train = pd.read_csv("data/labeledTrainData.tsv", header=0,
delimiter="\t", quoting=3)
test = pd.read_csv( "data/testData.tsv", header=0, delimiter="\t", quoting=3 )
unlabeled_train = pd.read_csv("data/unlabeledTrainData.tsv", header=0,
delimiter="\t", quoting=3 )
# Verify the number of reviews that were read (100,000 in total)
print("Read %d labeled train reviews, %d labeled test reviews, and %d unlabeled reviews\n"
% (train["review"].size, test["review"].size, unlabeled_train["review"].size ))
Read 25000 labeled train reviews, 25000 labeled test reviews, and 50000 unlabeled reviews
导入之前的函数:
from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords
def review_to_wordlist(review, remove_stopwords=False):
# Function to convert a document to a sequence of words,
# optionally removing stop words. Returns a list of words.
# 1. Remove HTML
review_text = BeautifulSoup(review, 'lxml').get_text()
# 2. Remove non-letters
review_text = re.sub("[^a-zA-Z]"," ", review_text)
# 3. Convert words to lower case and split them
words = review_text.lower().split()
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
# 5. Return a list of words
return(words)
# Download the punkt tokenizer for sentence splitting
import nltk.data
# nltk.download()
# Load the punkt tokenizer
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# Define a function to split a review into parsed sentences
def review_to_sentences( review, tokenizer, remove_stopwords=False ):
# Function to split a review into parsed sentences. Returns a
# list of sentences, where each sentence is a list of words
# 1. Use the NLTK tokenizer to split the paragraph into sentences
raw_sentences = tokenizer.tokenize(review.strip())
# 2. Loop over each sentence
sentences = []
for raw_sentence in raw_sentences:
# If a sentence is empty, skip it
if len(raw_sentence) > 0:
# Otherwise, call review_to_wordlist to get a list of words
sentences.append( review_to_wordlist( raw_sentence, remove_stopwords ))
# Return the list of sentences (each sentence is a list of words,
# so this returns a list of lists
return sentences
电影评论数据集处理起来一个比较麻烦的地方在于,评论的长度是不一样的。我们需要提取出每一个词的向量,然后把它们转换为一个特征集,而且每个评论的特征长度是一样的。
因为每一个单词有一个300维的特征,我们可以用特征操作来把一个评论中的单词合并起来。一个简单的方法就是对所有的词向量取平均。(如果取平均的话,我们需要移除stop words,因为会带来噪音)
下面是计算特征向量平均值的代码:
import numpy as np # Make sure that numpy is imported
def makeFeatureVec(words, model, num_features):
# Function to average all of the word vectors in a given paragraph
# Pre-initialize an empty numpy array (for speed)
featureVec = np.zeros((num_features,), dtype="float32")
nwords = 0
# Index2word is a list that contains the names of the words in
# the model's vocabulary. Convert it to a set, for speed
index2word_set = set(model.wv.index2word)
# Loop over each word in the review and, if it is in the model's
# vocaublary, add its feature vector to the total
for word in words:
if word in index2word_set:
nwords = nwords + 1
featureVec = np.add(featureVec, model[word])
# Divide the result by the number of words to get the average
featureVec = np.divide(featureVec, nwords)
return featureVec
def getAvgFeatureVecs(reviews, model, num_features):
# Given a set of reviews (each one a list of words), calculate
# the average feature vector for each one and return a 2D numpy array
# Initialize a counter
counter = 0
# Preallocate a 2D numpy array, for speed
reviewFeatureVecs = np.zeros((len(reviews),num_features),dtype="float32")
# Loop through the reviews
for review in reviews:
# Print a status message every 1000th review
if counter%1000 == 0.:
print("Review %d of %d" % (counter, len(reviews)))
# Call the function (defined above) that makes average feature vectors
reviewFeatureVecs[counter] = makeFeatureVec(review, model, num_features)
# Increment the counter
counter = counter + 1
return reviewFeatureVecs
接下来我们调用上面的函数来给每一个评论创建一个平均向量。下面会运行几分钟:
# Set values for various parameters
num_features = 300 # Word vector dimensionality
min_word_count = 40 # Minimum word count
num_workers = 4 # Number of threads to run in parallel
context = 10 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# ****************************************************************
# Calculate average feature vectors for training and testing sets,
# using the functions we defined above. Notice that we now use stop word removal.
clean_train_reviews = []
for review in train["review"]:
clean_train_reviews.append( review_to_wordlist( review, remove_stopwords=True ))
trainDataVecs = getAvgFeatureVecs( clean_train_reviews, model, num_features )
print("Creating average feature vecs for test reviews")
clean_test_reviews = []
for review in test["review"]:
clean_test_reviews.append( review_to_wordlist( review, remove_stopwords=True ))
testDataVecs = getAvgFeatureVecs( clean_test_reviews, model, num_features )
Review 0 of 25000 Review 1000 of 25000 Review 2000 of 25000 Review 3000 of 25000 Review 4000 of 25000 Review 5000 of 25000 Review 6000 of 25000 Review 7000 of 25000 Review 8000 of 25000 Review 9000 of 25000 Review 10000 of 25000 Review 11000 of 25000 Review 12000 of 25000 Review 13000 of 25000 Review 14000 of 25000 Review 15000 of 25000 Review 16000 of 25000 Review 17000 of 25000 Review 18000 of 25000 Review 19000 of 25000 Review 20000 of 25000 Review 21000 of 25000 Review 22000 of 25000 Review 23000 of 25000 Review 24000 of 25000 Creating average feature vecs for test reviews Review 0 of 25000 Review 1000 of 25000 Review 2000 of 25000 Review 3000 of 25000 Review 4000 of 25000 Review 5000 of 25000 Review 6000 of 25000 Review 7000 of 25000 Review 8000 of 25000 Review 9000 of 25000 Review 10000 of 25000 Review 11000 of 25000 Review 12000 of 25000 Review 13000 of 25000 Review 14000 of 25000 Review 15000 of 25000 Review 16000 of 25000 Review 17000 of 25000 Review 18000 of 25000 Review 19000 of 25000 Review 20000 of 25000 Review 21000 of 25000 Review 22000 of 25000 Review 23000 of 25000 Review 24000 of 25000
数据处理结束,我们得到了每个评论的平均向量,我们用这个特征向量来训练一个随机森林模型。注意,在Part 1中,我们只用了有标记的数据来训练模型。
# Fit a random forest to the training data, using 100 trees
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier( n_estimators = 100 )
print("Fitting a random forest to labeled training data...")
forest = forest.fit( trainDataVecs, train["sentiment"] )
# Test & extract results
result = forest.predict( testDataVecs )
# Write the test results
output = pd.DataFrame( data={"id":test["id"], "sentiment":result} )
output.to_csv( "result/Word2Vec_AverageVectors.csv", index=False, quoting=3 )
Fitting a random forest to labeled training data...
提交结果后,得分0.82912,效果比Bag_of_Words_model低一点(0.84272)。
word2vec会给词义上相近的单词进行聚类,所以另一个可行的方法是利用一个类中单词的相似性。这种对单词分组的方法叫做向量量化。第一步要做的,就是找到词聚类的中心,可以通过聚类算法来做的,比如K-Means。
在K-Means中,对于一个段落,需要设置一个K,或聚类的数量。如何决定创建多少个聚类?反复试验的结果表示,数量较少的类,比如一个类里有5个词,会有更好的效果。我们使用scikit-learn来实现kmeans
如果K很大的话,会非常慢;原文大概运行了40min。这里我们设置一个计时器来查看花了多长时间
from sklearn.cluster import KMeans
import time
start = time.time() # Start time
# Set "k" (num_clusters) to be 1/5th of the vocabulary size,
# or an average of 5 words per cluster
word_vectors = model.wv.syn0
num_clusters = word_vectors.shape[0] // 5
# Initalize a k-means object and use it to extract centroids
kmeans_clustering = KMeans( n_clusters = num_clusters, n_jobs=-2 )
idx = kmeans_clustering.fit_predict( word_vectors )
# Get the end time and print how long the process took
end = time.time()
elapsed = end - start
print("Time taken for K Means clustering: ", elapsed, "seconds.")
Time taken for K Means clustering: 531.7709980010986 seconds.
idx.shape
(16490,)
idx: Index of the cluster each sample belongs to.
现在每一个单词的聚合赋值存储在了idx里,原始Word2vec模型里的词汇表,保存在model.wv.index2word。为了方便,我们用把这些集合在一个字典里:
# Create a Word / Index dictionary,
# mapping each vocabulary word to a cluster number
word_centroid_map = dict(zip( model.wv.index2word, idx ))
word_centroid_map包含的是每个单词所属于的类
上面有些抽象,现在我们看一下每个类里有什么。我们打印出类0~类9:
# For the first 10 clusters
for cluster in range(0, 10):
# Print the cluster number
print("\nCluster %d" % cluster)
# Find all of the words for that cluster number, and print them out
words = []
for i in range(0, len(word_centroid_map.values())):
if( list(word_centroid_map.values())[i] == cluster ):
words.append(list(word_centroid_map.keys())[i])
print(words)
Cluster 0 ['convert', 'cater', 'mislead', 'lure', 'entice', 'appease'] Cluster 1 ['raines', 'joanne', 'cristina', 'belle', 'courtship', 'elsie', 'alonso', 'vera', 'sheik', 'violet', 'hazel', 'connie', 'ayesha', 'lola', 'isabelle'] Cluster 2 ['veiled', 'thinly', 'contradiction'] Cluster 3 ['allied', 'armies', 'battleship', 'germs', 'congress', 'advancing', 'elite', 'invading', 'graf', 'infantry', 'spee', 'fought', 'testing', 'iraqi', 'chavez', 'bombing'] Cluster 4 ['factory', 'local', 'shop', 'supermarket'] Cluster 5 ['paranoid', 'distraught', 'suspicious', 'resigned', 'uncaring', 'chronic', 'understandably', 'suicidal', 'romantically', 'impotent', 'withdrawn', 'disillusioned'] Cluster 6 ['sand', 'rainbow', 'water', 'ashes'] Cluster 7 ['schedule', 'nbc', 'airing', 'weekly', 'fox', 'vh', 'itv', 'cbs', 'bbc', 'rerun', 'radio', 'wb', 'airs', 'cw', 'abc'] Cluster 8 ['johnnie', 'scoop', 'napoleon', 'fatty', 'champ', 'fields', 'sands', 'abbot'] Cluster 9 ['characterization', 'characterizations', 'characterisation', 'characterisations', 'handling']
我们可以看到这些类的质量差别很大。cluster 1基本包含的是名字,有类里的词之间是有关系的,但是有些类的词之间就没什么关系。
不管怎么说,现在每个单词都有了一个类,我们可以写一个函数,把评论转化为重心袋(convert reviews into bags-of-centroids)。其实就像词袋一样,但是这种方法是用语义上相关的类,而不是单独的单词:
def create_bag_of_centroids( wordlist, word_centroid_map ):
# The number of clusters is equal to the highest cluster index
# in the word / centroid map
num_centroids = max( word_centroid_map.values() ) + 1
# Pre-allocate the bag of centroids vector (for speed)
bag_of_centroids = np.zeros( num_centroids, dtype="float32" )
# Loop over the words in the review. If the word is in the vocabulary,
# find which cluster it belongs to, and increment that cluster count
# by one
for word in wordlist:
if word in word_centroid_map:
index = word_centroid_map[word]
bag_of_centroids[index] += 1
# Return the "bag of centroids"
return bag_of_centroids
上面的函数会对每一个评论返还一个numpy数组,这个数组代表特征,特征的数量和聚类的数量一样。max(word_centroid_map.values())
得到的结果是3297,所以一共是3298个类。
然后我们给训练集和测试集构建bags of centroids。然后训练随机森林并检验结果:
# Pre-allocate an array for the training set bags of centroids (for speed)
train_centroids = np.zeros((train["review"].size, num_clusters), dtype="float32")
# Transform the training set reviews into bags of centroids
counter = 0
for review in clean_train_reviews:
train_centroids[counter] = create_bag_of_centroids( review, word_centroid_map )
counter += 1
# Repeat for test reviews
test_centroids = np.zeros(( test["review"].size, num_clusters), dtype="float32" )
counter = 0
for review in clean_test_reviews:
test_centroids[counter] = create_bag_of_centroids(review, word_centroid_map )
counter += 1
# Fit a random forest and extract predictions
forest = RandomForestClassifier(n_estimators = 100)
# Fitting the forest may take a few minutes
print("Fitting a random forest to labeled training data...")
forest = forest.fit(train_centroids,train["sentiment"])
result = forest.predict(test_centroids)
# Write the test results
output = pd.DataFrame(data={"id":test["id"], "sentiment":result})
output.to_csv( "result/BagOfCentroids.csv", index=False, quoting=3 )
Fitting a random forest to labeled training data...
结果是0.83864,比Bag_of_Words低(0.84272),比Word2Vec_AverageVectors高(0.82912)