本文共 1584 字,大约阅读时间需要 5 分钟。
import reimport requestsimport jiebafrom bs4 import BeautifulSoup as bpfrom nltk.classify import NaiveBayesClassifier# 爬取李白和杜甫的诗集urls = ['https://so.gushiwen.org/authors/authorvsw_b90660e3e492A{}.aspx','https://so.gushiwen.org/authors/authorvsw_515ea88d1858A{}.aspx']author = ['李白','杜甫']for index,url in enumerate(urls): file = open('{}.txt'.format(author[index]),'w',encoding='utf8') longstr = '' for n in range(1,21): urlp = url.format(n) r = requests.get(urlp,) bs1 = bp(r.text,'lxml') contsons = bs1.select('div .contson') for conston in contsons: longstr += re.sub('\s|[,。]','',conston.text) file.write(longstr) file.close()text1 = open(r"李白.txt", "rb").read()list1 = jieba.cut(text1)text2 = open(r"杜甫.txt", "rb").read()list2 = jieba.cut(text2)# 数据准备libai = list1dufu = list2# 特征提取def word_feats(words): return dict([(word, True) for word in words])libai_features = [(word_feats(lb), 'lb') for lb in libai]dufu_features = [(word_feats(df), 'df') for df in dufu]train_set = libai_features + dufu_features# 训练决策classifier = NaiveBayesClassifier.train(train_set)# 分析测试sentence = input("请输入一句你喜欢的诗:")print("\n")seg_list = jieba.cut(sentence)seg_list = [seg for seg in seg_list]# 统计结果lb = 0df = 0for word in seg_list: classResult = classifier.classify(word_feats(word)) if classResult == 'lb': lb = lb + 1 if classResult == 'df': df = df + 1# 呈现比例x = float(str(float(lb) / len(seg_list)))y = float(str(float(df) / len(seg_list)))print('李白的可能性:%.2f%%' % (x * 100))print('杜甫的可能性:%.2f%%' % (y * 100))
转载地址:http://jeben.baihongyu.com/