你好同学,代码供参考:
close;clear;clc;
%% 二分类:数据获取
Features = 30;
load('train.mat');% 导入train数据
load('test.mat'); % 导入test数据
train_data=[ones(size(train,1),1),train(:,3:end)];
max_train_data = max(train_data);
train_data = train_data./repmat(max_train_data,size(train_data,1),1);%归一化
train_label=train(:,2);
test_data=[ones(size(test,1),1),test(:,3:end)];
test_data = test_data./repmat(max_train_data,size(test_data,1),1);%归一化
test_label=test(:,2);
[m1,n1] = size(train_data);
[m2,n2] = size(test_data);
%% 训练
%设定学习率delta;迭代次数;模型参数
delta=0.005; % 学习率
num = 200;% 学习两百次
theta=rand(1,Features+1);% 除w之外多一个偏置b
L=zeros(1,num);
for I = 1:num
dt=zeros(1,Features);
loss=0;
for i=1:m1
Data_Features=train_data(i,1:Features+1);
Data_Label=train_label(i,1);
t = theta * Data_Features';
t(t<=-10) = -10;
t(t>10) = 10;
h=1/(1+exp(-t)); % h为P(Y=1|X) = exp(w·x)/[1+exp(w·x)]
dt=(Data_Label-h) * Data_Features; % 对数似然函数对w的求导
theta=theta + delta*dt; % 梯度下降法更新参数w
loss = loss + Data_Label*log(h)+(1-Data_Label)*log(1-h);% 对数似然函数
end
% 由于问题划归为由极大似然估计估计参数,是对似然函数求极大值
% 统一起见应用梯度下降法,归为对极大似然函数相反数的极小值求解,此处除以了样本数量,为平均损失
loss=-loss/m1;
L(I) = loss;% 作损失函数图
if loss<0.001
break;
end
end
%% 作图
figure(1);
plot(L);
title('损失函数');
%% 测试准确率
acc=0;
for i=1:m2
Data_Features=test_data(i,1:Features+1)';
Data_Label=test_label(i);
P_Y1=1/(1+exp(-theta * Data_Features));% P(Y=1|X) = exp(w·x)/[1+exp(w·x)]
if P_Y1>0.5 && Data_Label==1
acc=acc+1;
elseif P_Y1<=0.5 && Data_Label==0
acc=acc+1;
end
end
fprintf('训练测试完成!\n应用模型:逻辑斯蒂回归\n优化算法:梯度下降\ntest_acc:%6.2f%%\n',acc/m2*100)
然后结果是:
损失函数:
训练测试完成!
应用模型:逻辑斯蒂回归
优化算法:梯度下降
test_acc: 97.50%
满足了测试精度要求,哈哈哈,有帮助望采纳呢,谢谢啦