根据已有的少量样本(一维数据),利用GAN对抗生成网络生成大量相似的一维数据

我手里极端场景下的风电场出力值数据比较少,我需要利用对抗生成网络来扩展数据,数据就是风电场出力(一维);大概能筛选出3000多条数据,类似【40,51.2,56.2,55.1,42.1】;很简单的一维数据,我想生成出大量类似的数据
目前我在网上找到了一个程序,试着改了改,我刚接触编程没多久,其中也有好多不明白的地方,改完生成的数据一点对不上。
我想知道怎样把我自己的数据加进去训练,或者有哪位兄弟有其它更好的GAN程序能完成上述问题的?万分感谢
#下面是我自己找到的代码:利用gan网络完成对一维数据点的生成(他使用自己生成的高斯分布的值当作了真实样本)


import argparse
import numpy as np
from scipy.stats import norm
import tensorflow as tf
import matplotlib.pyplot as plt
from matplotlib import animation
import seaborn as sns
sns.set(color_codes=True)

seed = 42
np.random.seed(seed)
tf.set_random_seed(seed)


class DataDistribution(object):  # 真实数据
    def __init__(self):
        self.mu = 4
        self.sigma = 0.5
    def sample(self, N):
        samples = np.random.normal(self.mu, self.sigma, N)
        samples.sort()
        return samples

class GeneratorDistibution(object):  # 随机噪音点,初始化输入
    def __init__(self, range):
        self.range = range
    def sample(self, N):
        return np.linspace(-self.range, self.range, N) + np.random.normal(N) * 0.01

def linear(input, output_dim, scope=None, stddev=1.0):  # 单网络层
    norm = tf.random_normal_initializer(stddev=stddev)
    const = tf.constant_initializer(0.0)
    with tf.variable_scope(scope or 'linear'):  # 初始化 w, b参数
        w = tf.get_variable('w', [input.get_shape()[1], output_dim], initializer=norm)
        b = tf.get_variable('b', [output_dim], initializer=const)
        return tf.matmul(input, w) + b

def generator(input, h_dim):
    h0 = tf.nn.softplus(linear(input, h_dim, 'g0'))
    h1 = linear(h0, 1, 'g1')
    return h1

def discriminator(input, h_dim):  # 预训练判别D网络
    h0 = tf.tanh(linear(input, h_dim * 2, 'd0'))
    h1 = tf.tanh(linear(h0, h_dim * 2, 'd1'))
    h2 = tf.tanh(linear(h1, h_dim * 2, scope='d2'))

    h3 = tf.sigmoid(linear(h2, 1, scope='d3'))  # 输出结果 0/1
    return h3

def optimizer(loss, var_list, initial_learning_rate):  # 学习率不断衰减的学习策略
    decay = 0.95
    num_deacy_steps = 150  # 每迭代150次进行一次学习率衰减
    batch = tf.Variable(0)
    learning_rate = tf.train.exponential_decay(
        initial_learning_rate,
        batch,
        num_deacy_steps,
        decay,
        staircase=True
    )
    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss,
        global_step=batch,
        var_list=var_list
    )
    return optimizer

class GAN(object):  # 构造模型  G生成网络是希望生成的值(本质上是Falee)被判别网络认定为True  D判别网络是希望分清输入的是True还是False
    def __init__(self, data, gen, num_steps, batch_size, log_every):
        self.data = data
        self.gen = gen
        self.num_steps = num_steps
        self.batch_size = batch_size
        self.log_every = log_every
        self.mlp_hidden_size = 4  # 隐层神经元个数
        self.learning_rate = 0.03
        self._create_model()
    def _create_model(self):

        with tf.variable_scope('D_pre'):  # 作用域  预训练判别D网络 作用:训练该网络是希望可以拿出一组还不错的参数来初始化真正的判别网络
            self.pre_input = tf.placeholder(tf.float32, shape=(self.batch_size, 1))
            self.pre_labels = tf.placeholder(tf.float32, shape=(self.batch_size, 1))
            D_pre = discriminator(self.pre_input, self.mlp_hidden_size)  # 预训练判别D网络
            self.pre_loss = tf.reduce_mean(tf.square(D_pre - self.pre_labels))  # 损失函数
            self.pre_opt = optimizer(self.pre_loss, None, self.learning_rate)


        with tf.variable_scope('Gen'):  # G网络,用来生成模仿的值 x-->G(x)
            self.z = tf.placeholder(tf.float32, shape=(self.batch_size, 1))
            self.G = generator(self.z, self.mlp_hidden_size)

        with tf.variable_scope('Disc') as scope:  # D网络 判别网络
            self.x = tf.placeholder(tf.float32, shape=(self.batch_size, 1))
            self.D1 = discriminator(self.x, self.mlp_hidden_size)  # 真实数据输入得到网络输出
            scope.reuse_variables()  # 使用相同的网络参数
            self.D2 = discriminator(self.G, self.mlp_hidden_size)  # 生成网络输入得到网络输出

        # 定义GAN网络的损失函数
        self.loss_d = tf.reduce_mean(-tf.log(self.D1) - tf.log(1-self.D2))  # 希望D1 --> 1  D2 --> 0
        self.loss_g = tf.reduce_mean(-tf.log(self.D2))  # 希望D2 --> 1

        # 初始化参数
        self.d_pre_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='D_pre')
        self.d_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Disc')
        self.g_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Gen')

        self.opt_d = optimizer(self.loss_d, self.d_params, self.learning_rate)
        self.opt_g = optimizer(self.loss_g, self.g_params, self.learning_rate)

    # 训练模型
    def train(self):
        with tf.Session() as session:
            tf.global_variables_initializer().run()  # 初始化全局变量

            num_pretrain_steps = 1000
            for step in range(num_pretrain_steps):  # 先训练D_pre网络
                d = (np.random.random(self.batch_size) - 0.5) * 10.0  # 随机初始化
                labels = norm.pdf(d, loc=self.data.mu, scale=self.data.sigma)  # 根据d得到高斯值的生成
                pretrain_loss, _  = session.run([self.pre_loss, self.pre_opt], {
                    self.pre_input: np.reshape(d, (self.batch_size, 1)),
                    self.pre_labels: np.reshape(labels, (self.batch_size, 1))
                })
            self.weightsD = session.run(self.d_pre_params)  # 得到D_pre网络的参数

            # 对D网络进行参数初始化
            for i, v in enumerate(self.d_params):
                session.run(v.assign(self.weightsD[i]))

            # 训练对抗神经网络
            for step in range(self.num_steps):
                x = self.data.sample(self.batch_size)
                z = self.gen.sample(self.batch_size)
                loss_d, _ = session.run([self.loss_d, self.opt_d], {
                    self.x: np.reshape(x, (self.batch_size, 1)),
                    self.z: np.reshape(z, (self.batch_size, 1))
                })

                z = self.gen.sample(self.batch_size)
                loss_g, _ = session.run([self.loss_g, self.opt_g], {
                    self.z: np.reshape(z, (self.batch_size, 1))
                })

                if step % self.log_every == 0:
                    print('{}; loss_d:{},\tloss_g:{}'.format(step, loss_d, loss_g))  # 打印loss信息
                if step % 100 == 0 or step == 0 or step == self.num_steps - 1:
                    self._plot_distribitions(session)

    def _samples(self, session, num_points=10000, num_bins=100):
        xs = np.linspace(-self.gen.range, self.gen.range, num_points)
        bins = np.linspace(-self.gen.range, self.gen.range, num_bins)

        d = self.data.sample(num_points)
        pd, _ = np.histogram(d, bins=bins, density=True)

        zs = np.linspace(-self.gen.range, self.gen.range, num_points)
        g = np.zeros((num_points, 1))
        for i in range(num_points // self.batch_size):
            g[self.batch_size * i : self.batch_size * (i+1)] = session.run(self.G, {
                self.z: np.reshape(
                    zs[self.batch_size * i: self.batch_size * (i+1)],
                    (self.batch_size, 1)
                )
            })
        pg, _ = np.histogram(g, bins=bins, density=True)
        return pd, pg

    def _plot_distribitions(self, session):
        pd, pg = self._samples(session)
        p_x = np.linspace(-self.gen.range, self.gen.range,len(pd))
        f, ax = plt.subplots(1)
        ax.set_ylim(0, 1)
        plt.plot(p_x, pd, label='real data')
        plt.plot(p_x, pg, label='generated data')
        plt.xlabel('Data values')
        plt.ylabel('probility density')
        plt.legend()
        plt.show()

def main(args):
    model = GAN(
        DataDistribution(),
        GeneratorDistibution(range=8),
        args.num_steps,  # 迭代次数
        args.batch_size,  # 一次迭代数据量
        args.log_every,  # 间隔多少次输出loss信息
    )
    model.train()

def parse_args():  # 参数
    parser = argparse.ArgumentParser()
    parser.add_argument('--num-steps', type=int, default=1200)
    parser.add_argument('--batch-size', type=int, default=12)
    parser.add_argument('--log-every', type=int, default=10)
    return parser.parse_args()

if __name__ == '__main__':
    main(parse_args())

你好,我是有问必答小助手,非常抱歉,本次您提出的有问必答问题,技术专家团超时未为您做出解答


本次提问扣除的有问必答次数,将会以问答VIP体验卡(1次有问必答机会、商城购买实体图书享受95折优惠)的形式为您补发到账户。


因为有问必答VIP体验卡有效期仅有1天,您在需要使用的时候【私信】联系我,我会为您补发。