思路
先对文本进行读写操作,利用jieba分词对待分词的文本进行分词,然后将分开的词之间用空格隔断;然后调用extract_tags()函数提取文本关键词;
代码
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2019/5/19 19:10 # @Author : cunyu # @Site : cunyu1943.github.io # @File : Seg.py # @Software: PyCharm import jieba import jieba.analyse # 待分词的文本路径 sourceTxt = './source.txt' # 分好词后的文本路径 targetTxt = './target.txt' # 对文本进行操作 with open(sourceTxt, 'r', encoding = 'utf-8') as sourceFile, open(targetTxt, 'a+', encoding = 'utf-8') as targetFile: for line in sourceFile: seg = jieba.cut(line.strip(), cut_all = False) # 分好词之后之间用空格隔断 output = ' '.join(seg) targetFile.write(output) targetFile.write('\n') prinf('写入成功!') # 提取关键词 with open(targetTxt, 'r', encoding = 'utf-8') as file: text = file.readlines() """ 几个参数解释: * text : 待提取的字符串类型文本 * topK : 返回TF-IDF权重最大的关键词的个数,默认为20个 * withWeight : 是否返回关键词的权重值,默认为False * allowPOS : 包含指定词性的词,默认为空 """ keywords = jieba.analyse.extract_tags(str(text), topK = 10, withWeight=True, allowPOS=()) print(keywords) print('提取完毕!')