Tensorflow实现径向基神经网络预测问题,代码是否有误?

#激励函数 掰弯利器
import tensorflow.compat.v1 as tf 
tf.disable_v2_behavior()
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#存放模型和数据
data_path = "data/model"
filename = "data/testdata.csv"
#导入数据
normal_train = pd.read_csv('1500h.csv',header=None)
normal_train = np.array(normal_train)
#数据预处理 因为质量变量和过程变量参数采样频率不同,顾每5个取1个参数
data_train=[]
for i in range(len(normal_train)):
    if i%5 == 0:
        data_train.append(normal_train[i,:])
data1 = np.array(data_train)
def dealwith(data,a):
    datax_norm = np.hstack((data[:,:22],data[:,41:45],data[:,46:49],data[:,50:52]))
    datay_norm = data[:,39].reshape(len(data1),1)
    return datax_norm[:a,:],datax_norm[a:,:],datay_norm[:a,:],datay_norm[a:,:]
def Rescalling(data):
    Max=data.max(axis=0)
    Min=data.min(axis=0)
    ranges = Max-Min
    m = data.shape[0]
    normData = data - np.tile(Min, (m, 1))
    normData = normData/np.tile(ranges, (m, 1))
    normData = 2*normData-1
    return normData,ranges,Min
trainx_norm,testx_norm,trainy_norm,testy_norm = dealwith(data1,5500)
datax_norm,datax_ranges,datax_Min = Rescalling(trainx_norm)
datay_norm,datay_ranges,datay_Min = Rescalling(trainy_norm)
def add_layer(inputs,in_size,hidden_size,out_size,activation_function=None):
    ## 隐含层到输出层权重和偏置
    W = tf.Variable(tf.random_normal([hidden_size,out_size]))
    b = tf.Variable(tf.random_normal([1,out_size]))
    c = tf.Variable(tf.random_normal([hidden_size,in_size]))
    delta = tf.Variable(tf.random_normal([1,hidden_size]))
    
    ### 特征样本与RBF均值的距离
    dist = tf.reduce_sum(tf.square(tf.subtract(tf.tile(X,[hidden_size,1]),c)),1)  
    dist = tf.multiply(1.0,tf.transpose(dist))                                                                                     
    delta_2 = tf.square(delta)                                                                                 
    RBF_OUT = tf.exp(tf.multiply(-1.0,tf.divide(dist,tf.multiply(2.0,delta_2))))    
    
    #dist=tf.reduce_sum(tf.square(tf.subtract(tf.tile(x,[20,1]),c)),1)  #欧氏距离(维度:1*20)
    #delta_2 = tf.square(delta) 
    #rbf_out = tf.exp(tf.multiply(-1.0,tf.divide(dist,tf.multiply(2.0,delta_2))))    #通过径向基函数算得隐层输出
    
    outputs = tf.matmul(RBF_OUT,W)+b
    if activation_function is None:
        outputs = outputs
    else:
        outputs = activation_function(outputs)
    return outputs
X = tf.placeholder('float',[None,31],name = 'X')
Y = tf.placeholder('float',[None,1],name = 'Y')
prediction = add_layer(X,31,100,1,activation_function=None)
# 4.声明代价函数优化算法
cost = tf.reduce_mean(tf.pow(Y - prediction,2)) #损失函数为均方误差
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost) #优化算法为梯度下降法
with tf.Session() as sess:
    ##初始化所有参数
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
    for epoch in range(10):
        for i in range(len(datax_norm)):
            feed = {X:datax_norm[i,:].reshape(1,31),Y:datay_norm[i,:].reshape(1,1)}
            sess.run(train_op,feed_dict = feed)
        if epoch%100.0 == 0:
            for j in range(len(datax_norm)):
                total_loss = sess.run(cost,feed_dict = {X:datax_norm[i,:].reshape(1,31),Y:datay_norm[i,:].reshape(1,1)})
            print('Loss function at step %d is %s'%(epoch,total_loss))
    print('Training complete!')
    saver = tf.train.Saver()
    saver.save(sess,data_path)#储存模型
testx_act = 2*(testx_norm-np.tile(datax_Min, (testx_norm.shape[0], 1)))/np.tile(datax_ranges, (testx_norm.shape[0], 1))-1
#用于预测模型上任意一点
init = tf.initialize_all_variables()
with tf.Session() as sess2:
    sess2.run(init)
    saver = tf.train.Saver()
    saver.restore(sess2, data_path)
    A = []
    for i in range(len(testx_act)):
        feed = {X:testx_act[i,:].reshape(1,31)}
        #sess.run(train_op,feed_dict = feed)
        #for i in range(len(trX)):
        #sess.run(train_op,feed_dict = {X:trX,Y:trY})
        test_pre = sess2.run(prediction, feed_dict=feed)
        A.append(test_pre)
A = np.array(A).reshape(len(testx_act),1)
#主要是这样写有没有问题
def add_layer(inputs,in_size,hidden_size,out_size,activation_function=None):
    ## 隐含层到输出层权重和偏置
    W = tf.Variable(tf.random_normal([hidden_size,out_size]))
    b = tf.Variable(tf.random_normal([1,out_size]))
    c = tf.Variable(tf.random_normal([hidden_size,in_size]))
    delta = tf.Variable(tf.random_normal([1,hidden_size]))
    
    ### 特征样本与RBF均值的距离
    dist = tf.reduce_sum(tf.square(tf.subtract(tf.tile(X,[hidden_size,1]),c)),1)  
    dist = tf.multiply(1.0,tf.transpose(dist))                                                                                     
    delta_2 = tf.square(delta)                                                                                 
    RBF_OUT = tf.exp(tf.multiply(-1.0,tf.divide(dist,tf.multiply(2.0,delta_2))))    
    
    #dist=tf.reduce_sum(tf.square(tf.subtract(tf.tile(x,[20,1]),c)),1)  #欧氏距离(维度:1*20)
    #delta_2 = tf.square(delta) 
    #rbf_out = tf.exp(tf.multiply(-1.0,tf.divide(dist,tf.multiply(2.0,delta_2))))    #通过径向基函数算得隐层输出
    
    outputs = tf.matmul(RBF_OUT,W)+b
    if activation_function is None:
        outputs = outputs
    else:
        outputs = activation_function(outputs)
    return outputs
X = tf.placeholder('float',[None,31],name = 'X')
Y = tf.placeholder('float',[None,1],name = 'Y')
prediction = add_layer(X,31,100,1,activation_function=None)
# 4.声明代价函数优化算法
cost = tf.reduce_mean(tf.pow(Y - prediction,2)) #损失函数为均方误差
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost) #优化算法为梯度下降法
with tf.Session() as sess:
    ##初始化所有参数
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
    for epoch in range(10):
        for i in range(len(datax_norm)):
            feed = {X:datax_norm[i,:].reshape(1,31),Y:datay_norm[i,:].reshape(1,1)}
            sess.run(train_op,feed_dict = feed)
        if epoch%100.0 == 0:
            for j in range(len(datax_norm)):
                total_loss = sess.run(cost,feed_dict = {X:datax_norm[i,:].reshape(1,31),Y:datay_norm[i,:].reshape(1,1)})
            print('Loss function at step %d is %s'%(epoch,total_loss))
    print('Training complete!')
    saver = tf.train.Saver()
    saver.save(sess,data_path)#储存模型