可以这样实现:
python
import scipy.optimize as opt
import pandas as pd
# 读取CSV文件
df = pd.read_csv('data.csv')
# 定义预测函数
def predict(x):
return 1 / (1 + np.exp(-x))
# 调用优化函数,得到预测结果
results = []
for x in df.values:
result = opt.minimize(lambda y: (predict(y) - x[3]) ** 2, x[2], method='Nelder-Mead')
results.append(result.x)
# 将结果写入CSV第四列
df['predictions'] = results
df.loc[df['predictions'] < 0.5, 'predictions'] = 0
df.loc[df['predictions'] >= 0.5, 'predictions'] = 1
# 输出结果
df.to_csv('result.csv', index=False)
这段代码会:
1. 读取CSV数据
2. 定义sigmoid预测函数predict
3. 使用Nelder-Mead方法对每个样本进行优化,得到预测结果
4. 将预测结果写入CSV第四列
5. 小于0.5的设为0,大于等于0.5的设为1
6. 输出结果CSV
这样就实现了将优化结果写入CSV并进行0-1分类。
from scipy.optimize import OptimizeResult
def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1,
maxiter=100, callback=None, **options):
bestx = x0
besty = fun(x0)
funcalls = 1
niter = 0
improved = True
stop = False
while improved and not stop and niter < maxiter:
improved = False
niter += 1
for dim in range(np.size(x0)):
for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]:
testx = np.copy(bestx)
testx[dim] = s
testy = fun(testx, *args)
funcalls += 1
if testy < besty:
besty = testy
bestx = testx
improved = True
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
stop = True
break
return OptimizeResult(fun=besty, x=bestx, nit=niter,
nfev=funcalls, success=(niter > 1))
x0 = [1.35, 0.9, 0.8, 1.1, 1.2]
res = minimize(rosen, x0, method=custmin, options=dict(stepsize=0.05))
res.x
# array([1., 1., 1., 1., 1.])