{
char first[20] = {0}
char second[20] = {0}
char key[20] = {0}
//input
printf("please input first word:")
scanf("%s", first)
printf("please input second word:")
scanf("%s", second)
//begin
bool found = false
for (int i = 0 i < 20 i ++)
{
char tmp = first[i]
if (tmp == second[0])
{
found = true
}
if (found == true)
{
int len = 0
for (int l = i, j = 0 l < 20, j < 20 l ++, j ++)
{
if (first[l] != 0)
{
key[j] = first[l]
len ++
}
else
break
}
for (int j = 0 j < len j ++)
{
if (key[j] != second[j])
{
found = false
memset(key, 0x00, sizeof(key)/sizeof(key[0]))
}
}
if (found)
printf("the max keyword is: %s \n", key)
else
printf("can not found the key word!! \n")
}
}
return 0
}
期末复习比较忙过段时间来专门写scrapy框架使用,今天介绍如何用python生成词云,虽然网上有很多词云生成工具,不过自己用python来写是不是更有成就感。
今天要生成的是励志歌曲的词云,百度文库里面找了20来首,如《倔强》,海阔天空是,什么的大家熟悉的。
所要用到的python库有 jieba(一个中文分词库)、wordcould 、matplotlib、PIL、numpy。
首先我们要做的是读取歌词。我将歌词存在了文件目录下励志歌曲文本中。
现在来读取他
12345#encoding=gbklyric= ''f=open('./励志歌曲歌词.txt','r')for i in f: lyric+=f.read()加入#encoding=gbk是为了防止后面操作报错SyntaxError: Non-UTF-8 code starting with '\xc0'
然后我们用jieba分词来对歌曲做分词提取出词频高的词
123456import jieba.analyseresult=jieba.analyse.textrank(lyric,topK=50,withWeight=True)keywords = dict()for i in result: keywords[i[0]]=i[1]print(keywords)得到结果:
然后我们就可以通过wrodcloud等库来生成词云了
首先先自己找一张图片来作为生成词云的形状的图
12345678910111213from PIL import Image,ImageSequenceimport numpy as npimport matplotlib.pyplot as pltfrom wordcloud import WordCloud,ImageColorGeneratorimage= Image.open('./tim.jpg')graph = np.array(image)wc = WordCloud(font_path='./fonts/simhei.ttf',background_color='White',max_words=50,mask=graph)wc.generate_from_frequencies(keywords)image_color = ImageColorGenerator(graph)plt.imshow(wc)plt.imshow(wc.recolor(color_func=image_color))plt.axis("off")plt.show()保存生成图片
1wc.to_file('dream.png')完整代码:
1234567891011121314151617181920212223242526272829#encoding=gbkimport jieba.analysefrom PIL import Image,ImageSequenceimport numpy as npimport matplotlib.pyplot as pltfrom wordcloud import WordCloud,ImageColorGeneratorlyric= ''f=open('./励志歌曲歌词.txt','r')for i in f: lyric+=f.read() result=jieba.analyse.textrank(lyric,topK=50,withWeight=True)keywords = dict()for i in result: keywords[i[0]]=i[1]print(keywords) image= Image.open('./tim.jpg')graph = np.array(image)wc = WordCloud(font_path='./fonts/simhei.ttf',background_color='White',max_words=50,mask=graph)wc.generate_from_frequencies(keywords)image_color = ImageColorGenerator(graph)plt.imshow(wc)plt.imshow(wc.recolor(color_func=image_color))plt.axis("off")plt.show()wc.to_file('dream.png')以上这篇python生成词云的实现方法(推荐)就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持脚本之家。
1、创建一个dict.txt,写入分词,一个词占一行。每一行分三部分:词语、词频、词性,用空格隔开,顺序不可颠倒。2、在分词前通过jieba.load_userdict(file_name)来加载分词字典。
3、点击保存就成功创建成语词库了。