机器学习pycharm线性回归代码讲解

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

from linear_regression import LinearRegression

data = pd.read_csv('../data/world-happiness-report-2017.csv')

train_data = data.sample(frac = 0.8)
test_data = data.drop(train_data.index)

input_param_name = 'Economy..GDP.per.Capita.'
output_param_name = 'Happiness.Score'

x_train = train_data[[input_param_name]].values
y_train = train_data[[output_param_name]].values

x_test = test_data[input_param_name].values
y_test = test_data[output_param_name].values

plt.scatter(x_train,y_train,label='Train data')
plt.scatter(x_test,y_test,label='test data')
plt.xlabel(input_param_name)
plt.ylabel(output_param_name)
plt.title('Happy')
plt.legend()
plt.show()

num_iterations = 500
learning_rate = 0.01

linear_regression = LinearRegression(x_train,y_train)
(theta,cost_history) = linear_regression.train(learning_rate,num_iterations)

print ('开始时的损失:',cost_history[0])
print ('训练后的损失:',cost_history[-1])

plt.plot(range(num_iterations),cost_history)
plt.xlabel('Iter')
plt.ylabel('cost')
plt.title('GD')
plt.show()

predictions_num = 100

x_predictions = np.linspace(x_train.min(),x_train.max(),predictions_num).reshape(predictions_num,1)
y_predictions = linear_regression.predict(x_predictions)

plt.scatter(x_train,y_train,label='Train data')
plt.scatter(x_test,y_test,label='test data')
plt.plot(x_predictions,y_predictions,'r',label = 'Prediction')
plt.xlabel(input_param_name)
plt.ylabel(output_param_name)
plt.title('Happy')
plt.legend()
plt.show()
有没有会的给我简单讲解一下每段代码都是干什么的 谢谢啦

注释给你写好了,如有帮助,请点击我评论上方【采纳该答案】按钮支持一下,谢谢!

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

from linear_regression import LinearRegression#前几行都是导入包

data = pd.read_csv('../data/world-happiness-report-2017.csv')#读取csv文件到data

# 得到训练和测试数据
train_data = data.sample(frac = 0.8)#frac 抽取行的比例 例如frac=0.8,就是抽取其中80%
test_data = data.drop(train_data.index)#使用drop函数删除表中index

input_param_name = 'Economy..GDP.per.Capita.'# 输入特征名字
output_param_name = 'Happiness.Score'# 输出特征名字

x_train = train_data[[input_param_name]].values
# .values表示转换成ndarray格式 [input_param_name]表示列值
y_train = train_data[[output_param_name]].values
# .values表示转换成ndarray格式 [output_par  am_name]表示列值

x_test = test_data[input_param_name].values#上面是训练集,这是测试集
y_test = test_data[output_param_name].values

# 散点图绘制
plt.scatter(x_train,y_train,label='Train data')#
plt.scatter(x_test,y_test,label='test data')
plt.xlabel(input_param_name)
plt.ylabel(output_param_name)
plt.title('Happy')
plt.legend()
plt.show()

num_iterations = 500# 迭代次数
learning_rate = 0.01# 学习率

linear_regression = LinearRegression(x_train,y_train)
(theta,cost_history) = linear_regression.train(learning_rate,num_iterations)
# 调用train模块传入学习率和和迭代次数

print ('开始时的损失:',cost_history[0])
# cost_history[0]表示开始的
print ('训练后的损失:',cost_history[-1])
# cost_history[-1]表示最后的那次
plt.plot(range(num_iterations),cost_history)
plt.xlabel('Iter')
plt.ylabel('cost')
plt.title('GD')
plt.show()

predictions_num = 100

x_predictions = np.linspace(x_train.min(),x_train.max(),predictions_num).reshape(predictions_num,1)
y_predictions = linear_regression.predict(x_predictions)

plt.scatter(x_train,y_train,label='Train data')
plt.scatter(x_test,y_test,label='test data')
plt.plot(x_predictions,y_predictions,'r',label = 'Prediction')
plt.xlabel(input_param_name)
plt.ylabel(output_param_name)
plt.title('Happy')
plt.legend()
plt.show()

您好,我是有问必答小助手,您的问题已经有小伙伴帮您解答,感谢您对有问必答的支持与关注!
PS:问答VIP年卡 【限时加赠:IT技术图书免费领】,了解详情>>> https://vip.csdn.net/askvip?utm_source=1146287632