编写一个词频统计软件,使用jieba库,统计某个文档中出现频率前15的单词。
转载---
import jieba
import jieba.analyse
import codecs
import re
from collections import Counter
class WordCounter(object):
def count_from_file(self, file, top_limit=0):
with codecs.open(file, 'r', 'utf-8') as f:
content = f.read()
content = re.sub(r'\s+', r' ', content)
content = re.sub(r'\.+', r' ', content)
return self.count_from_str(content, top_limit=top_limit)
def count_from_str(self, content, top_limit=0):
if top_limit <= 0:
top_limit = 100
tags = jieba.analyse.extract_tags(content, topK=100)
words = jieba.cut(content)
counter = Counter()
for word in words:
if word in tags:
counter[word] += 1
return counter.most_common(top_limit)
if __name__ == '__main__':
counter = WordCounter()
result = counter.count_from_file(r'/tmp/abc.txt', top_limit=10)
for k, v in result:
print k, v
————————————————
版权声明:本文为CSDN博主「blue_lll」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
原文链接: