有列表[34,-12,-56,78,-22,9,-1,87],要求计算其中正数,负数和零的个数。要求完成下面的函数。正确的输出为“正数的个数为4,负数的个数为4,零的个数为2”。
def count_numbers(lst):
positive_count = 0
negative_count = 0
zero_count = 0
for num in lst:
if num > 0:
positive_count += 1
elif num < 0:
negative_count += 1
else:
zero_count += 1
result = "正数的个数为 {}, 负数的个数为 {}, 零的个数为 {}".format(positive_count, negative_count, zero_count)
return result
# 示例列表
numbers = [34, -12, -56, 78, -22, 9, -1, 87]
result = count_numbers(numbers)
print(result)
import numpy as np
import matplotlib.pyplot as plt
import scipy
import seaborn
from pylab import *
import warnings
warnings.filterwarnings('ignore')
mpl.rcParams['font.sans-serif'] = ['SimHei']
#模型函数
def sigmoid(x,theta):
z=x.dot(theta)
h=1/(1+np.exp(-z))
return h
#代价函数(正则化)
def cost(x,h,y,lamba,theta):
m,n=x.shape
r=lamba/(2*m)*theta.T.dot(theta)
j=-1/(m)*np.sum(y*np.log(h)+(1-y)*np.log(1-h))+r
return j
#梯度下降函数(正则化)
def gradeDecline(x,y,alpha,lamba,nums):
m,n=x.shape
theta=np.zeros(n)
j=np.zeros(nums)
for i in range(nums):
h=sigmoid(x,theta)
j[i]=cost(x,h,y,lamba,theta)
rr = lamba / m * theta
dietatheta=1/m*x.T.dot(h-y)+rr
theta=theta-alpha*dietatheta
return theta,j,h
#精度函数
def score(x,theta):
m,n=x.shape
count=0
for i in range(m):
h=sigmoid(x,theta)
if np.where(h[i]>0.5,1,0)==y[i]:
count+=1
accurancy=count/m
return accurancy
#缩放
def suofang(x):
xmin=np.min(x,axis=0)
xmax=np.max(x,axis=0)
s=(x-xmin)/(xmax-xmin)
return s
if __name__ == '__main__':
# 1.加载数据
data=np.loadtxt(r'E:\机器学习\机器学习1\机器\线性回归\ex2data1.txt',delimiter=',')
# 2.切割数据
x = data[:, :-1]
y = data[:, -1]
m, n = x.shape
print('x=', x)
# print(y)
#缩放
x=suofang(x)
# 3.拼接
xx = np.c_[np.ones(m), x]
print(xx)
#4.调用梯度下降函数
theta, j,h= gradeDecline(xx, y, 0.01,0.2, 10000)
print(theta)
# # 画散点图
plt.scatter(x[y==0,0],x[y==0,1],c='red')
plt.scatter(x[y==1,0],x[y==1,1],c='blue')
#画直线,两点确定一条直线
# x1min = np.min(x[:, 0])
# x1max = np.max(x[:, 0])
x1min=x[:,0].min() # 第一个特征的最小值
x1max=x[:,0].max()#第一个特征的最大值
x2min = -(theta[0] + theta[1] * x1min) / theta[2]
x2max = -(theta[0] + theta[1] * x1max) / theta[2]
plt.plot([x1min, x1max], [x2min, x2max], c='pink')
plt.show()
#画代价函数图
plt.plot(j)
plt.show()
#求精确度
print('lamda调节前的精度:',score(xx,theta)*100,'%')
theta1, j1, h1 = gradeDecline(xx, y, 0.01, 0.04, 10000)
print('lamda调节前的精度:', score(xx, theta1) * 100, '%')
注意:逻辑回归正则化与逻辑回归代价函数和梯度下降函数略有区别。
def func(lst):
a = 0
b = 0
c = 0
for num in lst:
if num > 0:
a += 1
elif num < 0:
b += 1
else:
c += 1
print("正数的个数为{},负数的个数为{},零的个数为{}".format(a, b, c))