用jieba做分词,用wordcloud包做词云就可以了
# 读取文件内容file = 'd:/艾萨克·阿西莫夫/奇妙的航程.TXT'
f = open(file, 'r', encoding='gbk')
text = f.read()
f.close()
# 使用jieba分词,因为wordcloud是以空格识别单词边界的
import jieba
text = ' '.join(jieba.cut(text))
# 掩码图片,单色图就好
from scipy.misc import imread
color_mask = imread('D:/Pictures/7218.png')
# 建立词云对象,因为是中文,指定一个中文字体,不然可能会乱码
# WordCloud的参数可以控制很多内容,请自行阅读包的文档
import wordcloud
w = wordcloud.WordCloud(font_path='C:/Windows/Fonts/msyh.ttc',
max_words=100,
mask=color_mask)
# 载入以空格分词的字符串
w.generate(text)
# 生成图片
w.to_file('d:/img1.png')
import refrom collections import Counter
from matplotlib.pyplot import pie,show
f = 't.txt'
c = Counter(re.findall(r'(\w{3,})',open(f).read().lower())).most_common(20)
pie([i[1] for i in c],labels=[i[0] for i in c])
show()