from numpy import *
def loadDataSet():
"""
创建实验样本
return: 单词列表postingList, 所属类别classVec
"""
postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], #[0,0,1,1,1......]
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0, 1, 0, 1, 0, 1] # 1 is abusive, 0 not
return postingList, classVec
def createVocabList(dataSet):
"""
获取所有单词的集合
:param dataSet: 数据集
:return: 所有单词的集合(即不含重复元素的单词列表)
"""
vocabSet = set([]) # create empty set
for document in dataSet:
# 操作符 | 用于求两个集合的并集
vocabSet = vocabSet | set(document) # union of the two sets
return list(vocabSet)
def setOfWords2Vec(vocabList, inputSet):
"""
遍历查看该单词是否出现,出现该单词则将该单词置1
:param vocabList: 所有单词集合列表
:param inputSet: 输入数据集
:return: 匹配列表[0,1,0,1...],其中 1与0 表示词汇表中的单词是否出现在输入的数据集中
"""
# 创建一个和词汇表等长的向量,并将其元素都设置为0
returnVec = [0] * len(vocabList)# [0,0......]
# 遍历文档中的所有单词,如果出现了词汇表中的单词,则将输出的文档向量中的对应值设为1
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else:
print ("the word: %s is not in my Vocabulary!" % word)
return returnVec
listOPosts, listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
myVocabList
['so', 'maybe', 'not', 'cute', 'to', 'stop', 'worthless', 'food', 'quit', 'park', 'how', 'stupid', 'love', 'dalmation', 'flea', 'posting', 'ate', 'steak', 'my', 'garbage', 'dog', 'help', 'him', 'is', 'licks', 'I', 'problems', 'buying', 'has', 'take', 'mr', 'please']
setOfWords2Vec(myVocabList, listOPosts[0])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1]
setOfWords2Vec(myVocabList, listOPosts[3])
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
def _trainNB0(trainMatrix, trainCategory):
"""
训练数据原版
:param trainMatrix: 文件单词矩阵 [[1,0,1,1,1....],[],[]...]
:param trainCategory: 文件对应的类别[0,1,1,0....],列表长度等于单词矩阵数,其中的1代表对应的文件是侮辱性文件,0代表不是侮辱性矩阵
:return:
"""
# 文件数
numTrainDocs = len(trainMatrix)
# 单词数
numWords = len(trainMatrix[0])
# 侮辱性文件的出现概率,即trainCategory中所有的1的个数,
# 代表的就是多少个侮辱性文件,与文件的总数相除就得到了侮辱性文件的出现概率
pAbusive = sum(trainCategory) / float(numTrainDocs)
# 构造单词出现次数列表
p0Num = zeros(numWords) # [0,0,0,.....]
p1Num = zeros(numWords) # [0,0,0,.....]
# 整个数据集单词出现总数
p0Denom = 0.0
p1Denom = 0.0
for i in range(numTrainDocs):
# 遍历所有的文件,如果是侮辱性文件,就计算此侮辱性文件中出现的侮辱性单词的个数
if trainCategory[i] == 1:
p1Num += trainMatrix[i] #[0,1,1,....]->[0,1,1,...]
p1Denom += sum(trainMatrix[i])
else:
# 如果不是侮辱性文件,则计算非侮辱性文件中出现的侮辱性单词的个数
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
# 类别1,即侮辱性文档的[P(F1|C1),P(F2|C1),P(F3|C1),P(F4|C1),P(F5|C1)....]列表
# 即 在1类别下,每个单词出现次数的占比
p1Vect = p1Num / p1Denom# [1,2,3,5]/90->[1/90,...]
# 类别0,即正常文档的[P(F1|C0),P(F2|C0),P(F3|C0),P(F4|C0),P(F5|C0)....]列表
# 即 在0类别下,每个单词出现次数的占比
p0Vect = p0Num / p0Denom
return p0Vect, p1Vect, pAbusive
def trainNB0(trainMatrix, trainCategory):
"""
训练数据优化版本
:param trainMatrix: 文件单词矩阵
:param trainCategory: 文件对应的类别
:return:
"""
# 总文件数
numTrainDocs = len(trainMatrix)
# 总单词数
numWords = len(trainMatrix[0])
# 侮辱性文件的出现概率
pAbusive = sum(trainCategory) / float(numTrainDocs)
# 构造单词出现次数列表
# p0Num 正常的统计
# p1Num 侮辱的统计
# 避免单词列表中的任何一个单词为0,而导致最后的乘积为0,所以将每个单词的出现次数初始化为 1
p0Num = ones(numWords)#[0,0......]->[1,1,1,1,1.....]
p1Num = ones(numWords)
# 整个数据集单词出现总数,2.0根据样本/实际调查结果调整分母的值(2主要是避免分母为0,当然值可以调整)
# p0Denom 正常的统计
# p1Denom 侮辱的统计
p0Denom = 2.0
p1Denom = 2.0
for i in range(numTrainDocs):
if trainCategory[i] == 1:
# 累加辱骂词的频次
p1Num += trainMatrix[i]
# 对每篇文章的辱骂的频次 进行统计汇总
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
# 类别1,即侮辱性文档的[log(P(F1|C1)),log(P(F2|C1)),log(P(F3|C1)),log(P(F4|C1)),log(P(F5|C1))....]列表
p1Vect = log(p1Num / p1Denom)
# 类别0,即正常文档的[log(P(F1|C0)),log(P(F2|C0)),log(P(F3|C0)),log(P(F4|C0)),log(P(F5|C0))....]列表
p0Vect = log(p0Num / p0Denom)
return p0Vect, p1Vect, pAbusive
listOPosts, listClasses = loadDataSet()
myVocalist = createVocabList(listOPosts)
trainMat = []
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
print (len(trainMat))
print (sum(listClasses))
6 3
p0V, p1V, pAb = trainNB0(trainMat, listClasses)
pAb
0.5
p0V
array([-2.56494936, -3.25809654, -3.25809654, -2.56494936, -2.56494936, -2.56494936, -3.25809654, -3.25809654, -3.25809654, -3.25809654, -2.56494936, -3.25809654, -2.56494936, -2.56494936, -2.56494936, -3.25809654, -2.56494936, -2.56494936, -1.87180218, -3.25809654, -2.56494936, -2.56494936, -2.15948425, -2.56494936, -2.56494936, -2.56494936, -2.56494936, -3.25809654, -2.56494936, -3.25809654, -2.56494936, -2.56494936])
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
"""
使用算法:
# 将乘法转换为加法
乘法:P(C|F1F2...Fn) = P(F1F2...Fn|C)P(C)/P(F1F2...Fn)
加法:P(F1|C)*P(F2|C)....P(Fn|C)P(C) -> log(P(F1|C))+log(P(F2|C))+....+log(P(Fn|C))+log(P(C))
:param vec2Classify: 待测数据[0,1,1,1,1...],即要分类的向量
:param p0Vec: 类别0,即正常文档的[log(P(F1|C0)),log(P(F2|C0)),log(P(F3|C0)),log(P(F4|C0)),log(P(F5|C0))....]列表
:param p1Vec: 类别1,即侮辱性文档的[log(P(F1|C1)),log(P(F2|C1)),log(P(F3|C1)),log(P(F4|C1)),log(P(F5|C1))....]列表
:param pClass1: 类别1,侮辱性文件的出现概率
:return: 类别1 or 0
"""
# 计算公式 log(P(F1|C))+log(P(F2|C))+....+log(P(Fn|C))+log(P(C))
# 使用 NumPy 数组来计算两个向量相乘的结果,这里的相乘是指对应元素相乘,即先将两个向量中的第一个元素相乘,然后将第2个元素相乘,以此类推。
# 我的理解是:这里的 vec2Classify * p1Vec 的意思就是将每个词与其对应的概率相关联起来
# 可以理解为 1.单词在词汇表中的条件下,文件是good 类别的概率 也可以理解为 2.在整个空间下,文件既在词汇表中又是good类别的概率
p1 = sum(vec2Classify * p1Vec) + log(pClass1)
p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)
if p1 > p0:
return 1
else:
return 0
def testingNB():
"""
测试朴素贝叶斯算法
"""
# 1. 加载数据集
listOPosts, listClasses = loadDataSet()
# 2. 创建单词集合
myVocabList = createVocabList(listOPosts)
# 3. 计算单词是否出现并创建数据矩阵
trainMat = []
for postinDoc in listOPosts:
# 返回m*len(myVocabList)的矩阵, 记录的都是0,1信息
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
# 4. 训练数据
p0V, p1V, pAb = trainNB0(array(trainMat), array(listClasses))
# 5. 测试数据
testEntry = ['love', 'my', 'dalmation']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print (testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb))
testEntry = ['stupid', 'garbage']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print (testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb))
testingNB()
['love', 'my', 'dalmation'] classified as: 0 ['stupid', 'garbage'] classified as: 1
#朴素贝叶斯词袋模型
def bagOfWords2VecMN(vocabList, inputSet):
returnVec = [0] * len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] += 1
return returnVec
mySent = 'This book is the best book pn python or M.L. I have ever laid eyes upon.'
mySent.split()
['This', 'book', 'is', 'the', 'best', 'book', 'pn', 'python', 'or', 'M.L.', 'I', 'have', 'ever', 'laid', 'eyes', 'upon.']
import re
regEx = re.compile('\\W*')
listOfTokens = regEx.split(mySent)
listOfTokens
/Users/kyzhang/anaconda/lib/python3.6/site-packages/ipykernel_launcher.py:3: FutureWarning: split() requires a non-empty pattern match. This is separate from the ipykernel package so we can avoid doing imports until
['This', 'book', 'is', 'the', 'best', 'book', 'pn', 'python', 'or', 'M', 'L', 'I', 'have', 'ever', 'laid', 'eyes', 'upon', '']
[tok for tok in listOfTokens if len(tok) > 0]
['This', 'book', 'is', 'the', 'best', 'book', 'pn', 'python', 'or', 'M', 'L', 'I', 'have', 'ever', 'laid', 'eyes', 'upon']
[tok.lower() for tok in listOfTokens if len(tok) > 0]
['this', 'book', 'is', 'the', 'best', 'book', 'pn', 'python', 'or', 'm', 'l', 'i', 'have', 'ever', 'laid', 'eyes', 'upon']
emailText = open('email/ham/18.txt').read()
listOfTokens = regEx.split(emailText)
/Users/kyzhang/anaconda/lib/python3.6/site-packages/ipykernel_launcher.py:2: FutureWarning: split() requires a non-empty pattern match.
# 切分文本
def textParse(bigString):
'''
Desc:
接收一个大字符串并将其解析为字符串列表
Args:
bigString -- 大字符串
Returns:
去掉少于 2 个字符的字符串,并将所有字符串转换为小写,返回字符串列表
'''
import re
# 使用正则表达式来切分句子,其中分隔符是除单词、数字外的任意字符串
listOfTokens = re.split(r'\W*', bigString)
return [tok.lower() for tok in listOfTokens if len(tok) > 2]
def spamTest():
'''
Desc:
对贝叶斯垃圾邮件分类器进行自动化处理。
Args:
none
Returns:
对测试集中的每封邮件进行分类,若邮件分类错误,则错误数加 1,最后返回总的错误百分比。
'''
docList = []
classList = []
fullText = []
for i in range(1, 26):
# 切分,解析数据,并归类为 1 类别
wordList = textParse(open('email/spam/%d.txt' % i).read())
docList.append(wordList)
classList.append(1)
# 切分,解析数据,并归类为 0 类别
wordList = textParse(open('email/ham/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
# 创建词汇表
vocabList = createVocabList(docList)
trainingSet = list(range(50))
testSet = []
# 随机取 10 个邮件用来测试
for i in range(10):
# random.uniform(x, y) 随机生成一个范围为 x - y 的实数
randIndex = int(random.uniform(0, len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat = []
trainClasses = []
for docIndex in trainingSet:
trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V, p1V, pSpam = trainNB0(array(trainMat), array(trainClasses))
errorCount = 0
for docIndex in testSet:
wordVector = setOfWords2Vec(vocabList, docList[docIndex])
if classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
errorCount += 1
print ('the errorCount is: ', errorCount)
print ('the testSet length is :', len(testSet))
print ('the error rate is :', float(errorCount)/len(testSet))
spamTest()
the errorCount is: 0 the testSet length is : 10 the error rate is : 0.0
/Users/kyzhang/anaconda/lib/python3.6/re.py:212: FutureWarning: split() requires a non-empty pattern match. return _compile(pattern, flags).split(string, maxsplit)
# 将文本文件解析成 词条向量
def setOfWords2VecMN(vocabList,inputSet):
returnVec=[0]*len(vocabList) # 创建一个其中所含元素都为0的向量
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)]+=1
return returnVec
#文件解析
def textParse(bigString):
import re
listOfTokens=re.split(r'\W*', bigString)
return [tok.lower() for tok in listOfTokens if len(tok)>2]
#RSS源分类器及高频词去除函数
def calcMostFreq(vocabList,fullText):
import operator
freqDict={}
for token in vocabList: #遍历词汇表中的每个词
freqDict[token]=fullText.count(token) #统计每个词在文本中出现的次数
sortedFreq=sorted(freqDict.items(),key=operator.itemgetter(1),reverse=True) #根据每个词出现的次数从高到底对字典进行排序
return sortedFreq[:30] #返回出现次数最高的30个单词
def localWords(feed1,feed0):
import feedparser
docList=[];classList=[];fullText=[]
minLen=min(len(feed1['entries']),len(feed0['entries']))
for i in range(minLen):
wordList=textParse(feed1['entries'][i]['summary']) #每次访问一条RSS源
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
wordList=textParse(feed0['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList=createVocabList(docList)
top30Words=calcMostFreq(vocabList,fullText)
for pairW in top30Words:
if pairW[0] in vocabList:vocabList.remove(pairW[0]) #去掉出现次数最高的那些词
trainingSet=list(range(2*minLen));testSet=[]
for i in range(20):
randIndex=int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat=[];trainClasses=[]
for docIndex in trainingSet:
trainMat.append(bagOfWords2VecMN(vocabList,docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam=trainNB0(array(trainMat),array(trainClasses))
errorCount=0
for docIndex in testSet:
wordVector=bagOfWords2VecMN(vocabList,docList[docIndex])
if classifyNB(array(wordVector),p0V,p1V,pSpam)!=classList[docIndex]:
errorCount+=1
print ('the error rate is:',float(errorCount)/len(testSet))
return vocabList,p0V,p1V
import feedparser
ny = feedparser.parse('http://newyork.craiglist.org/stp/index.rss')
sf = feedparser.parse('http://sfbay.craigslist.org/stp/index.rss')
vocabList,pSF,pNY = localWords(ny, sf)
the error rate is: 0.4
/Users/kyzhang/anaconda/lib/python3.6/re.py:212: FutureWarning: split() requires a non-empty pattern match. return _compile(pattern, flags).split(string, maxsplit)
# 最具表征性的词汇显示函数
def getTopWords(ny,sf):
import operator
vocabList,p0V,p1V=localWords(ny,sf)
topNY=[];topSF=[]
for i in range(len(p0V)):
if p0V[i]>-6.0:topSF.append((vocabList[i],p0V[i]))
if p1V[i]>-6.0:topNY.append((vocabList[i],p1V[i]))
sortedSF=sorted(topSF,key=lambda pair:pair[1],reverse=True)
print ("SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**")
for item in sortedSF:
print (item[0])
sortedNY=sorted(topNY,key=lambda pair:pair[1],reverse=True)
print ("NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**")
for item in sortedNY:
print (item[0])
getTopWords(ny, sf)
/Users/kyzhang/anaconda/lib/python3.6/re.py:212: FutureWarning: split() requires a non-empty pattern match. return _compile(pattern, flags).split(string, maxsplit)
the error rate is: 0.35 SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF** work married asian hit buddy get hang down cool please oakland body activities drama girl they head hello any from break old area white both attractive couple now mature years there all around see expectations friend something brown lookin here maybe seeking girls has fun were meet american was scorpios business therapy build 150 will racial require than give sebastopol best teasing exploring hands picture other things enjoy friendship 40s together visiting fresh week occupational boob job relationship moved got circle bbw sperm dance home favor year had untold ready hold start east hmu showings sometimes however always gay apart men caring fit sjsu reason burner lately great night medical outside dude online single when crime swimming strings thousands normal full pleasurable nerdy saying harmless wondering goes wanna since hard straight latina peps 3rd car want seem level much read distraction control chemistry odds life 82kg dinner specific gentleman public share first sucks right spots boyfriend stupid oysters benefits hot include lbs guys ladies sexy rewards simply pittsburg feel hotel partner zuno565 moutain private summary stress film rocky been clean little lean one kik black open eyes toward consider don real without happy parties rialto being ethnicity mix after woman away disease kind compensate flash also freaks everyone unconventional 37f depending female handle hair hey once dont male present companionship myself attraction routine beaches professional 180lbs rushing her uninhibited convo degree book anyo mex enjoyment comfort weight slim bay francisco executive skinny try swimmer younger know party token location puss lot living possibly donor laugh respectful currently assets san fucked join while how commitment acting free drinks safe goodbye trying attention movies craigslist caregivers mutual otherwise height etc those stop nothing 198cm likes submissive think make ill respond grow indian romantic graduated types monotony early coffee five 9pm yonkers strange must because then comfortable oral expiring race treat aggression hormone type serious which specialize very defenseless few panty onl rom issue scream pre located latin everything womans cute part most fatass thick point dining yawn cut civilizations regarding low feeling delicious dinners nasty traveling gentle haven partying detailed beautiful contact your already fck lov upfront focus lady she boys nights answer ankle massage str8 provide hosting wash his avoid easily blk games speak 20s personality result okay styling places genuine near animals 287 same consuming sloppy smart times totally museums appointment 30s females lets kevin find studio problem help music meeting sma only before intelligent mid february offbeat wry name model sexual lease stats connect through refined repeat next could hate made state cant howdy abuse excuse anything bit sit loves cafes pic town hilarious ongoing 37yo cuddling take did curious skin artist sex able ccsf articulate cultural usually mushroom smoke place special mean animes vernon front aggressive ever interested iam kids alone weird super wouldn self basic checked send drummer major wanting familiarity crave well ass exist sautéed manhattan chair humor status chance couch lowkey dick tattoos strong philosophy offer makin sedate myths live transgender conceivable fantasies save doesn tied dog guess having let actually eat antisocial lunch over chances moan stay panties bottoming scratching lifestyle today enough picky our perspective conversation another origina educated 9am nyc inch eight abusive coms fungus happily onions mountains email questions getting strike puke reply busy milf even verbally lonely slightly parks classy although spank zero anyone port desperate date requiring people remaining about city age host replacement masculine sci steal reading ancient brute bottom man return weekends mind extremely funny minded might conveniently where smooth excites information hobbies backstory trans horny went into move top talk put beat hispanic pics sense vgl cuddle play isn roads hand nature cuffed leaves rough cry 6530 food close bear house emails income marches back daddy june satisfy biz dudesdnt beach ive quiet shape brought current matter tall size chat fear taken money needs gross these travel hamper facing hanging lost cliche soon realized edm haircutting deets often lame search person own every tag heads care nice 347 chester dominate input lol room dates come thing prefer laid many message hung 420 numerous tales answers isolate sad month discreet events outgoing beauty mwm artistic wants similar fair fungi really arts hopefully sound chill big muscleboy them plus penis std loud should going NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY** very make really most times old get been myself type serious moved your year however find they could mean now little eight don host back size travel down dominate five therapy yonkers strange must will because oral hormone please which few onl issue scream pre hands everything womans cute other fatass thick point yawn cut regarding low feeling delicious body hit nasty relationship traveling activities detailed fck home upfront focus answer massage str8 provide hosting avoid easily games speak personality result places hmu same consuming fit lately problem dude hello only before full model from refined repeat hate made state cant howdy anything sit ongoing seem curious artist able articulate usually mushroom life vernon aggressive alone weird wouldn self basic white wanting familiarity sautéed lowkey guys sexy strong offer feel conceivable save couple dog actually antisocial over moan lifestyle enough picky perspective nyc inch black abusive open fungus onions email questions years real puke all busy even verbally lonely being although zero anyone requiring remaining about city female replacement masculine weekends mind minded might where information trans move talk hispanic see play comfort friend something skinny cry 6530 try food know bear house daddy living laugh june satisfy dudesdnt ive tall money needs gross these here soon realized while free has fun tag 347 movies input otherwise meet laid likes submissive message numerous isolate sad discreet outgoing beauty fair fungi muscleboy them penis going coffee scorpios 9pm business build 150 then racial comfortable expiring race treat aggression asian require than give specialize defenseless panty rom sebastopol best teasing exploring located latin picture part things enjoy friendship 40s together dining visiting fresh week oakland civilizations dinners occupational boob job gentle haven got circle bbw partying beautiful contact sperm already lov dance lady she favor boys nights ankle wash his had untold blk ready 20s hold okay start styling east genuine near animals drama showings sometimes 287 always sloppy gay smart totally museums appointment apart 30s men caring sjsu females reason lets burner kevin great night studio medical outside online help music girl meeting single head sma when crime swimming intelligent strings thousands mid february normal offbeat wry pleasurable nerdy name any saying sexual harmless lease wondering goes wanna since stats connect break through hard straight latina next peps 3rd car abuse excuse bit loves cafes pic want town hilarious level much 37yo read cuddling take did distraction area skin sex control chemistry odds ccsf cultural smoke place special 82kg dinner animes specific front buddy gentleman public ever interested share first iam kids sucks right super spots boyfriend stupid checked send drummer major work both crave well oysters benefits ass exist hot manhattan chair humor include lbs status chance couch dick tattoos attractive ladies rewards philosophy makin sedate myths live simply pittsburg transgender fantasies hotel doesn married tied partner guess having let zuno565 eat lunch moutain chances stay private summary panties stress mature film hang bottoming scratching rocky today clean our lean one conversation another origina educated 9am kik eyes coms toward happily mountains consider there getting strike without happy reply milf around parties rialto slightly parks ethnicity classy mix after spank woman away disease port kind compensate desperate date people flash also freaks everyone unconventional 37f age depending handle hair sci hey steal reading ancient once brute bottom man return dont extremely funny male present companionship attraction conveniently routine beaches professional smooth excites 180lbs rushing her uninhibited convo hobbies backstory horny degree went book anyo into top mex put beat pics sense vgl cuddle expectations enjoyment isn roads hand nature cuffed weight slim leaves bay rough francisco executive swimmer close younger party token location emails puss income marches lot possibly donor biz beach respectful quiet shape brought current currently matter chat fear taken brown assets lookin san fucked hamper facing hanging join lost cliche maybe edm seeking how haircutting commitment acting deets often lame search drinks girls person safe own goodbye trying every heads attention care nice craigslist caregivers chester were mutual lol room dates height etc those stop nothing come cool thing prefer 198cm many hung think 420 tales answers month ill events mwm respond artistic wants grow american similar arts hopefully sound chill big indian romantic was plus graduated types std loud should monotony early