知识点普及
词频:某个词在该文档中出现的次数停用词:数据处理时过滤掉某些字或词,如:网站、的等语料库:也就是我们要分析的所有文档的集合中文分词:将汉字序列分成一个个单独的词
使用第三方库介绍
jieba
jieba.cut(content)
content 为分词的句子
pandas
pandas.DataFrame()生成DataFrame对象
pandas.DataFrame.groupby()分组统计
分组统计实例
pandas.DataFrame.groupby(by=列名数组)[统计列名数组].agg({ 统计项名称:统计函数})
wordcloudpython构建词云的库文件
安装方式请自行案例
词云实现
#!/usr/bin/env python # coding=utf-8 import os import jieba import codecs import pandas as pd import numpy as npfrom wordcloud import WordCloud,ImageColorGenerator import matplotlib.pyplot as plt #导入所用库文件 basefile = data # 语料库加载 f_in = codecs.open(basefile+'an.txt','r','utf-8') content = f_in.read() #分词,生成segments列表segments = [] segs = jieba.cut(content)for seg in segs: if len(seg)>1: segments.append(seg) #生成DataFrame对象segmentDF = pd.DataFrame({'segment':segments}) #分组统计segStat = segmentDF.groupby( by = ['segment'] )['segment'].agg({ '计数':np.size}).reset_index().sort_values(by = ['计数'], ascending = False ) #加载停用词 stopwords = pd.read_csv( "./StopwordsCN.txt", encoding='utf8', index_col=False) #移除停用词,并做去反操作fSegStat = segStat[ ~segStat.segment.isin(stopwords.stopword)] #构建词云文件wordcloud = WordCloud( font_path='./simhei.ttf', #词云展示字体 background_color="black", #词云展示背景颜色 ) words = fSegStat.set_index('segment').to_dict()wordcloud.fit_words(words['计数'])plt.imshow(wordcloud)plt.show()
效果展示
词云美化
from scipy.misc import imread#读取图片背景 bimg = imread(basefile+'An.png') wordcloud = WordCloud( background_color="white", mask=bimg, font_path='./simhei.ttf')wordcloud = wordcloud.fit_words(words['计数']) #设置图片大小 plt.figure( num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k') #获取图片颜色 bimgColors = ImageColorGenerator(bimg)plt.axis("off") #重置词云颜色 plt.imshow(wordcloud.recolor(color_func=bimgColors))plt.show()