您的位置:首页 > 编程语言 > Python开发

Python:通过gensim和jieba分词进行文本相似度分析

2017-01-20 00:00 531 查看
#! -*- coding:utf-8 -*-

import pymongo
import codecs,sys
from pymongo import MongoClient
import jieba
from gensim import corpora, models, similarities
import nltk
import jieba.analyse
from nltk.tokenize import word_tokenize
from pprint import pprint # pretty-printer

reload(sys)
sys.setdefaultencoding('utf-8')

kickpath="" #"/root/python/"

dics=[]
dits={}
labels={}
count=1
mydoclist =[]
courses=[]
questions=[]
uuids=[]

#通过jieba中文分词生成词条
def jieba_preprocess_cn(courses, low_freq_filter = True):
#jieba.analyse.set_stop_words("../extra_dict/stop_words.txt")
#jieba.analyse.set_idf_path("../extra_dict/idf.txt.big");
texts_tokenized = []
for document in courses:
texts_tokenized_tmp = []
words= jieba.cut(document,cut_all=True)
tages= jieba.analyse.extract_tags(document,500)
texts_tokenized.append(tages)

texts_filtered_stopwords = texts_tokenized
pprint(texts_filtered_stopwords)

#去除标点符号
english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%']
texts_filtered = [[word for word in document if not word in english_punctuations] for document intexts_filtered_stopwords]

#去除过低频词
if low_freq_filter:
# remove words that appear only once
from collections import defaultdict
frequency = defaultdict(int)
for text in texts_filtered:
&nbs
7fe0
p; for
token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > 1] for text in texts_filtered]
else:
texts = texts_filtered
pprint(texts)
return texts

def train_by_lsi(lib_texts):
#为了能看到过程日志
#import logging
#logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

dictionary = corpora.Dictionary(lib_texts)
corpus = [dictionary.doc2bow(text) for text in lib_texts] #doc2bow(): 将collection words 转为词袋,用两元组(word_id, word_frequency)表示
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]

#拍脑袋的:训练topic数量为10的LSI模型
lsi = models.LsiModel(corpus_tfidf, id2word=dictionary) #, num_topics=10)
index = similarities.MatrixSimilarity(lsi[corpus]) # index 是 gensim.similarities.docsim.MatrixSimilarity 实例

dictionary.save(kickpath+"kick.dict")
lsi.save(kickpath+"kick.lsi")
index.save(kickpath+"kick.index")
return (index, dictionary, lsi)

if __name__ == '__main__':
conn = MongoClient("xxx", 27017)
db = conn.health
db.authenticate("xx", "xxx")
content = db.kickchufang.find({'doctorId':'huanghuang'})
index=0
for i in content:
line = str(i['desc']) #.decode("utf-8") #.encode("GB18030"))
#print "line:",line
uuid = i['uuid']
uuids.append(uuid)
#print uuid, line
courses.append(line)
print str(index)
index=index+1
#if (index>10):
# break

man_file = open(kickpath+"kick.uuids", 'w')
print(uuids, man_file)
man_file.close()
courses_name = courses

# 库建立完成 -- 这部分可能数据很大,可以预先处理好,存储起来
lib_texts = jieba_preprocess_cn(courses)
(index, dictionary, lsi) = train_by_lsi(lib_texts)
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: