stopwords = [line.strip() for line in open('stop.txt', 'r', encoding='UTF-8').readlines()]
novelList = list(jieba.lcut(novel))
novelDict = {}
# 統計出詞頻字典
for word in novelList:
if word not in stopwords:
# 不統計字數為一的詞
if len(word) == 1:
continue
else:
novelDict[word] = novelDict.get(word, 0) + 1
(四)、根據詞頻排序并輸出
import jieba # jieba中文分詞庫
with open('test.txt', 'r', encoding='UTF-8') as novelFile:
novel = novelFile.read()
# print(novel)
stopwords = [line.strip() for line in open('stop.txt', 'r', encoding='UTF-8').readlines()]
novelList = list(jieba.lcut(novel))
novelDict = {}
# 統計出詞頻字典
for word in novelList:
if word not in stopwords:
# 不統計字數為一的詞
if len(word) == 1:
continue
else:
novelDict[word] = novelDict.get(word, 0) + 1
# 對詞頻進行排序
novelListSorted = list(novelDict.items())
novelListSorted.sort(key=lambda e: e[1], reverse=True)
# 列印前10詞頻
topWordNum = 0
for topWordTup in novelListSorted[:10]:
print(topWordTup)
from matplotlib import pyplot as plt
x = [c for c,v in novelListSorted]
y = [v for c,v in novelListSorted]
plt.plot(x[:10],y[:10],color='r')
plt.show()