在 TF-IDF 特征提取的基础上对模型建立与评估

在 TF-IDF 特征提取的基础上, 分别采用逻辑回归、 朴素贝叶斯两种模型对文本数据的分类效果进行比较(准确率、召回率、F1);逻辑回归对分类标签种类比较(准确率、召回率、F1)
数据来源:https://github.com/aceimnorstuvwxz/toutiao-text-classfication-dataset

import numpy as np
import pandas as pd
import time
import jieba
import re
import string
import pickle
import matplotlib.pyplot as plt


from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
from sklearn.model_selection import StratifiedKFold



# # Pandas设置
# pd.set_option("display.max_columns", None)  # 设置显示完整的列
# pd.set_option("display.max_rows", None)  # 设置显示完整的行
# pd.set_option("display.expand_frame_repr", False)  # 设置不折叠数据
# pd.set_option("display.max_colwidth", 100)  # 设置列的最大宽度


# 加载数据集
def data():
    a=[]
    with open(r'D:\桌面\文本分类\data.txt',encoding='utf-8') as f:
        a.append(f.readlines())
    df = pd.DataFrame(a[0])
    df['label']=df[0].apply(lambda x:x.split('_!_')[1])      #对第二个!前的数据名称设置为label
    df['label_desc']=df[0].apply(lambda x:x.split('_!_')[2]) #对第三个!前的数据名称设置为label_desc
    df['sentence']=df[0].apply(lambda x:x.split('_!_')[3])   #对第四个!前的数据名称设置为sentence
    return df.iloc[0:,1:] 
df=data()


#去重仅保留第一次出现的评论
df=df.drop_duplicates(keep='first',inplace=False)
#去除空白行
df= df.dropna(axis=0)



STOPWORDS = r'D:\桌面\文本分类\停用词\stopwords-master\哈工大停用词表.txt'
stoplist = []
content_list = []
for word in open(STOPWORDS,encoding='utf-8'):
    stoplist.append(word.strip())   #添加停用词表


#去除无效符号
def qu_dian(text):
    punc = r'[~`!#$%^&*()_+-=|\';":/.,?><~·!@#¥%……&*()——+-=“:’;、。,?》《{}]'
    ff=re.sub(punc, "",text)
    f1=jieba.lcut(ff,cut_all=False)     #中文分词
    def rn(x):
        return x.replace(' ','')
    ben=list(filter(rn,f1))
    return ben

df['quchu']=df['sentence'].apply(qu_dian)

#去除停用词
def qu_word(x):
    real=[]
    for i in x:
        if i not in stoplist:
            real.append(i)
    return real

df['quchu']=df['quchu'].apply(qu_word)



# 实例化CountVectorizer
vectorizer = CountVectorizer()
# 将文本数据转化为特征矩阵
X = vectorizer.fit_transform(df['sentence'].tolist())
# 实例化TfidfTransformer
transformer = TfidfTransformer()
# 对特征矩阵进行tf-idf转换
X = transformer.fit_transform(X)
# 实例化StratifiedKFold
skf = StratifiedKFold(n_splits=5)

# 对数据进行划分
for train_index, test_index in skf.split(X, df['label']):
    X_train, X_test = X[train_index], X[test_index]
    y_train, y_test = df['label'][train_index], df['label'][test_index]


import numpy as np
import pandas as pd
import time
import jieba
import re
import string
import pickle
import matplotlib.pyplot as plt


from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB


# # Pandas设置
# pd.set_option("display.max_columns", None)  # 设置显示完整的列
# pd.set_option("display.max_rows", None)  # 设置显示完整的行
# pd.set_option("display.expand_frame_repr", False)  # 设置不折叠数据
# pd.set_option("display.max_colwidth", 100)  # 设置列的最大宽度


# 加载数据集
def data():
    a=[]
    with open(r'F:\项目\jupternotebook\吴恩达机器学习2022\文本数据分析\datas.txt',encoding='utf-8') as f:
        a.append(f.readlines())
    df = pd.DataFrame(a[0])
    df['label']=df[0].apply(lambda x:x.split('_!_')[1])      #对第二个!前的数据名称设置为label
    df['label_desc']=df[0].apply(lambda x:x.split('_!_')[2]) #对第三个!前的数据名称设置为label_desc
    df['sentence']=df[0].apply(lambda x:x.split('_!_')[3])   #对第四个!前的数据名称设置为sentence
    return df.iloc[0:,1:] 
df=data()


#去重仅保留第一次出现的评论
df=df.drop_duplicates(keep='first',inplace=False)
#去除空白行
df= df.dropna(axis=0)



STOPWORDS = r'F:\项目\jupternotebook\吴恩达机器学习2022\文本数据分析\哈工大停用词表.txt'
stoplist = []
content_list = []
for word in open(STOPWORDS,encoding='utf-8'):
    stoplist.append(word.strip())   #添加停用词表


#去除无效符号
def qu_dian(text):
    punc = r'[~`!#$%^&*()_+-=|\';":/.,?><~·!@#¥%……&*()——+-=“:’;、。,?》《{}]'
    ff=re.sub(punc, "",text)
    f1=jieba.lcut(ff,cut_all=False)     #中文分词
    def rn(x):
        return x.replace(' ','')
    ben=list(filter(rn,f1))
    return ben

df['quchu']=df['sentence'].apply(qu_dian)

#去除停用词
def qu_word(x):
    real=[]
    for i in x:
        if i not in stoplist:
            real.append(i)
    return real

df['quchu']=df['quchu'].apply(qu_word)



# 实例化CountVectorizer
vectorizer = CountVectorizer()
# 将文本数据转化为特征矩阵
X = vectorizer.fit_transform(df['sentence'].tolist())
# 实例化TfidfTransformer
transformer = TfidfTransformer()
# 对特征矩阵进行tf-idf转换
X = transformer.fit_transform(X)
# 实例化StratifiedKFold
skf = StratifiedKFold(n_splits=5)

    
# 对数据进行划分
y = df['label']
for train_index, test_index in skf.split(X,y):
    X_train, X_test = X[train_index], X[test_index]
    y_train, y_test = y.iloc[train_index], y.iloc[test_index]
    
#逻辑回归

clf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial')
clf.fit(X_train, y_train)
y_pred_log = clf.predict(X_test)

#计算准确率、召回率、F1
accuracy = accuracy_score(y_test, y_pred_log)
precision = precision_score(y_test, y_pred_log, average='macro')
recall = recall_score(y_test, y_pred_log, average='macro')
f1 = 2 * precision * recall / (precision + recall)
print("逻辑回归分类结果:\n准确率:", accuracy, "\n召回率:", recall, "\nF1:", f1)


#朴素贝叶斯

clf = MultinomialNB()
clf.fit(X_train, y_train)
y_pred_clf = clf.predict(X_test)

#计算准确率、召回率、F1
accuracy = accuracy_score(y_test, y_pred_clf)
precision = precision_score(y_test, y_pred_clf, average='macro')
recall = recall_score(y_test, y_pred_clf, average='macro')
f1 = 2 * precision * recall / (precision + recall)
print("朴素贝叶斯分类结果:\n准确率:", accuracy, "\n召回率:", recall, "\nF1:", f1)

# 评估分类效果
print(classification_report(y_test, y_pred_log))



import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import classification_report, accuracy_score, f1_score, recall_score

# 加载文本数据
df = pd.read_csv('text_data.csv')

# 分割数据为训练数据和测试数据
train_data = df[:int(len(df)*0.8)]
test_data = df[int(len(df)*0.8):]

# 提取 TF-IDF 特征
vectorizer = TfidfVectorizer()
train_features = vectorizer.fit_transform(train_data['text'])
test_features = vectorizer.transform(test_data['text'])

# 逻辑回归模型
lr = LogisticRegression()
lr.fit(train_features, train_data['label'])

# 预测
lr_predictions = lr.predict(test_features)

# 计算准确率
lr_accuracy = accuracy_score(test_data['label'], lr_predictions)

# 计算召回率
lr_recall = recall_score(test_data['label'], lr_predictions, average='weighted')

# 计算 F1 值
lr_f1 = f1_score(test_data['label'], lr_predictions, average='weighted')

# 朴素贝叶斯模型
nb = MultinomialNB()
nb.fit(train_features, train_data['label'])

# 预测
nb_predictions = nb.predict(test_features)

# 计算准确率
nb_accuracy = accuracy_score(test_data['label'], nb_predictions)

# 计算召回率
nb_recall = recall_score(test_data['label'], nb_predictions, average='weighted')

# 计算 F1 值
nb_f1 = f1_score(test_data['label'], nb_predictions, average='weighted')
不知道你这个问题是否已经解决, 如果还没有解决的话:

如果你已经解决了该问题, 非常希望你能够分享一下解决方案, 写成博客, 将相关链接放在评论区, 以帮助更多的人 ^-^

在TF-IDF特征提取的基础上,分别采用逻辑回归、朴素贝叶斯两种模型对文本数据的分类效果进行比较(准确率、召回率、F1) ;逻辑回归对分类标签种类比较(准确率、召回率、F1)

比较逻辑回归和朴素贝叶斯在文本数据分类中的效果可以通过几种指标进行评估,这些指标包括准确率、召回率和F1分数。

准确率是分类器预测正确的样本数与总样本数的比值,即:

准确率 = 预测正确的样本数 / 总样本数

召回率是分类器预测正确的正样本数与所有真正的正样本数的比值,即:

召回率 = 预测正确的正样本数 / 真正的正样本数

F1分数是准确率和召回率的调和平均值,即:

F1分数 = 2 x 准确率 x 召回率 / (准确率 + 召回率)

对于不同的分类模型,它们在不同的文本数据集上的分类效果可能会有所不同,因此比较两个模型的效果需要实际测试。
同样,对于逻辑回归模型,对不同分类标签种类的分类效果也可以通过准确率、召回率和F1分数进行比较,以评估模型对不同种类的分类效果。