神经网络的实现Python版本
将本网络改成3个输入,2个隐层,每个隐层4个节点
In [1]
import numpy as np
In [2]
def sigmoid(x):
# Sigmoid activation function: f(x) = 1 / (1 + e^(-x))
return 1 / (1 + np.exp(-x))
def deriv_sigmoid(x):
# Derivative of sigmoid: f'(x) = f(x) * (1 - f(x))
fx = sigmoid(x)
return fx * (1 - fx)
def mse_loss(y_true, y_pred):
# y_true and y_pred are numpy arrays of the same length
return ((y_true - y_pred) ** 2).mean()
定义神经网络
2个输入节点,2个隐层节点 ,1个输出节点
网络结构图.png
In [3]
class OurNeuralNetwork():
A neural network with:
- 2 inputs
- a hidden layer with 2 neurons (h1, h2)
- an output layer with 1 neuron (o1)
*** DISCLAIMER ***
The code below is intend to be simple and educational, NOT optimal.
Real neural net code looks nothing like this. Do NOT use this code.
Instead, read/run it to understand how this specific network works.
"""
def __init__(self): # 初始化参数
# weights
self.w1 = np.random.normal()
self.w2 = np.random.normal()
self.w3 = np.random.normal()
self.w4 = np.random.normal()
self.w5 = np.random.normal()
self.w6 = np.random.normal()
# biases
self.b1 = np.random.normal()
self.b2 = np.random.normal()
self.b3 = np.random.normal()
def feedforward(self, x): # 定义前向传播
# x is a numpy array with 2 elements, for example [input1, input2]
h1 = sigmoid(self.w1 * x[0] + self.w2 * x[1] + self.b1)
h2 = sigmoid(self.w3 * x[0] + self.w4 * x[1] + self.b2)
o1 = sigmoid(self.w5 * h1 + self.w6 * h2 + self.b3)
return o1
def train(self, data, all_y_trues):
"""
- data is a (n x 2) numpy array, n = # samples in the dataset.
- all_y_trues is a numpy array with n elements.
Elements in all_y_trues correspond to those in data.
"""
learn_rate = 0.01
epochs = 1000 # number of times to loop through the entire dataset
for epoch in range(epochs):
for x, y_true in zip(data, all_y_trues):
# - - - Do a feedforward (we'll need these values later)
sum_h1 = self.w1 * x[0] + self.w2 * x[1] + self.b1
h1 = sigmoid(sum_h1)
sum_h2 = self.w3 * x[0] + self.w4 * x[1] + self.b2
h2 = sigmoid(sum_h2)
sum_o1 = self.w5 * x[0] + self.w6 * x[1] + self.b3
o1 = sigmoid(sum_o1)
y_pred = o1
# - - - Calculate partial derivatives.
# - - - Naming: d_L_d_w1 represents "partial L / partial w1"
d_L_d_ypred = -2 * (y_true - y_pred)
# Neuron o1
d_ypred_d_w5 = h1 * deriv_sigmoid(sum_o1)
d_ypred_d_w6 = h2 * deriv_sigmoid(sum_o1)
d_ypred_d_b3 = deriv_sigmoid(sum_o1)
d_ypred_d_h1 = self.w5 * deriv_sigmoid(sum_o1)
d_ypred_d_h2 = self.w6 * deriv_sigmoid(sum_o1)
# Neuron h1
d_h1_d_w1 = x[0] * deriv_sigmoid(sum_h1)
d_h1_d_w2 = x[1] * deriv_sigmoid(sum_h1)
d_h1_d_b1 = deriv_sigmoid(sum_h1)
# Neuron h2
d_h2_d_w3 = x[0] * deriv_sigmoid(sum_h2)
d_h2_d_w4 = x[0] * deriv_sigmoid(sum_h2)
d_h2_d_b2 = deriv_sigmoid(sum_h2)
# - - - update weights and biases
# Neuron o1
self.w5 -= learn_rate * d_L_d_ypred * d_ypred_d_w5
self.w6 -= learn_rate * d_L_d_ypred * d_ypred_d_w6
self.b3 -= learn_rate * d_L_d_ypred * d_ypred_d_b3
# Neuron h1
self.w1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w1
self.w2 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w2
self.b1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_b1
# Neuron h2
self.w3 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w3
self.w4 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w4
self.b2 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_b2
# - - - Calculate total loss at the end of each epoch
if epoch % 10 == 0:
y_preds = np.apply_along_axis(self.feedforward, 1, data)
loss = mse_loss(all_y_trues, y_preds)
print("Epoch %d loss: %.3f", (epoch, loss))
data = np.array([
[-2, -1], # Alice
[25, 6], # Bob
[17, 4], # Charlie
[-15, -6] # diana
])
all_y_trues = np.array([
1, # Alice
0, # Bob
0, # Charlie
1 # diana
])
network = OurNeuralNetwork()
network.train(data, all_y_trues)
emily = np.array([-7, -3]) # 128 pounds, 63 inches
frank = np.array([20, 2]) # 155 pounds, 68 inches
print("Emily: %.3f" % network.feedforward(emily)) # 0.951 - F
print("Frank: %.3f" % network.feedforward(frank)) # 0.039 - M
Epoch %d loss: %.3f (0, 0.09968608579345187)
Epoch %d loss: %.3f (10, 0.08978137909701703)
Epoch %d loss: %.3f (20, 0.08183257067020515)
Epoch %d loss: %.3f (30, 0.075275034289523)
Epoch %d loss: %.3f (40, 0.06972045655057196)
Epoch %d loss: %.3f (50, 0.06490256965760956)
Epoch %d loss: %.3f (60, 0.06063940988251769)
Epoch %d loss: %.3f (70, 0.05680689435003017)
Epoch %d loss: %.3f (80, 0.053320436134632)
Epoch %d loss: %.3f (90, 0.05012226670444711)
Epoch %d loss: %.3f (100, 0.0471727423198623)
Epoch %d loss: %.3f (110, 0.044444389351556514)
Epoch %d loss: %.3f (120, 0.04191781982572582)
Epoch %d loss: %.3f (130, 0.03957892626354316)
Epoch %d loss: %.3f (140, 0.03741695845948788)
Epoch %d loss: %.3f (150, 0.03542321474346011)
Epoch %d loss: %.3f (160, 0.03359016573509726)
Epoch %d loss: %.3f (170, 0.03191088454700942)
Epoch %d loss: %.3f (180, 0.030378694290426247)
Epoch %d loss: %.3f (190, 0.02898696851617425)
Epoch %d loss: %.3f (200, 0.02772903732959604)
Epoch %d loss: %.3f (210, 0.02659816413384191)
Epoch %d loss: %.3f (220, 0.025587566993441103)
Epoch %d loss: %.3f (230, 0.02469046550330666)
Epoch %d loss: %.3f (240, 0.023900139414477117)
Epoch %d loss: %.3f (250, 0.023209989476907982)
Epoch %d loss: %.3f (260, 0.022613594242746823)
Epoch %d loss: %.3f (270, 0.02210475908961764)
Epoch %d loss: %.3f (280, 0.021677555596654557)
Epoch %d loss: %.3f (290, 0.021326350744542484)
Epoch %d loss: %.3f (300, 0.021045826314127453)
Epoch %d loss: %.3f (310, 0.020830989418124217)
Epoch %d loss: %.3f (320, 0.02067717539988553)
Epoch %d loss: %.3f (330, 0.020580044444006504)
Epoch %d loss: %.3f (340, 0.020535573225561442)
Epoch %d loss: %.3f (350, 0.020540042825624525)
Epoch %d loss: %.3f (360, 0.02059002399658023)
Epoch %d loss: %.3f (370, 0.02068236069767869)
Epoch %d loss: %.3f (380, 0.02081415265710708)
Epoch %d loss: %.3f (390, 0.02098273756271393)
Epoch %d loss: %.3f (400, 0.02118567334571609)
Epoch %d loss: %.3f (410, 0.021420720903014542)
Epoch %d loss: %.3f (420, 0.021685827504568718)
Epoch %d loss: %.3f (430, 0.021979111051597028)
Epoch %d loss: %.3f (440, 0.022298845287334515)
Epoch %d loss: %.3f (450, 0.022643446012467216)
Epoch %d loss: %.3f (460, 0.02301145831990293)
Epoch %d loss: %.3f (470, 0.023401544836051204)
Epoch %d loss: %.3f (480, 0.023812474936326232)
Epoch %d loss: %.3f (490, 0.024243114889448972)
Epoch %d loss: %.3f (500, 0.024692418876880555)
Epoch %d loss: %.3f (510, 0.02515942082918269)
Epoch %d loss: %.3f (520, 0.025643227019316726)
Epoch %d loss: %.3f (530, 0.026143009353102133)
Epoch %d loss: %.3f (540, 0.026657999298659554)
Epoch %d loss: %.3f (550, 0.027187482399207808)
Epoch %d loss: %.3f (560, 0.027730793316718173)
Epoch %d loss: %.3f (570, 0.028287311357395727)
Epoch %d loss: %.3f (580, 0.028856456433564815)
Epoch %d loss: %.3f (590, 0.02943768542015003)
Epoch %d loss: %.3f (600, 0.030030488867469535)
Epoch %d loss: %.3f (610, 0.030634388035432024)
Epoch %d loss: %.3f (620, 0.031248932217415176)
Epoch %d loss: %.3f (630, 0.03187369632507451)
Epoch %d loss: %.3f (640, 0.032508278708083566)
Epoch %d loss: %.3f (650, 0.03315229918533671)
Epoch %d loss: %.3f (660, 0.03380539726645624)
Epoch %d loss: %.3f (670, 0.03446723054455324)
Epoch %d loss: %.3f (680, 0.035137473243099814)
Epoch %d loss: %.3f (690, 0.035815814901499717)
Epoch %d loss: %.3f (700, 0.03650195918550369)
Epoch %d loss: %.3f (710, 0.037195622810021686)
Epoch %d loss: %.3f (720, 0.03789653456314882)
Epoch %d loss: %.3f (730, 0.03860443442135996)
Epoch %d loss: %.3f (740, 0.03931907274684707)
Epoch %d loss: %.3f (750, 0.040040209558891184)
Epoch %d loss: %.3f (760, 0.04076761387198388)
Epoch %d loss: %.3f (770, 0.04150106309414844)
Epoch %d loss: %.3f (780, 0.04224034247957489)
Epoch %d loss: %.3f (790, 0.042985244630274144)
Epoch %d loss: %.3f (800, 0.04373556904198999)
Epoch %d loss: %.3f (810, 0.04449112169008385)
Epoch %d loss: %.3f (820, 0.04525171465153612)
Epoch %d loss: %.3f (830, 0.04601716575959071)
Epoch %d loss: %.3f (840, 0.04678729828791628)
Epoch %d loss: %.3f (850, 0.04756194066146456)
Epoch %d loss: %.3f (860, 0.04834092619148704)
Epoch %d loss: %.3f (870, 0.04912409283241865)
Epoch %d loss: %.3f (880, 0.049911282958563306)
Epoch %d loss: %.3f (890, 0.0507023431587172)
Epoch %d loss: %.3f (900, 0.051497124047047795)
Epoch %d loss: %.3f (910, 0.05229548008870985)
Epoch %d loss: %.3f (920, 0.05309726943882656)
Epoch %d loss: %.3f (930, 0.053902353793597584)
Epoch %d loss: %.3f (940, 0.05471059825241311)
Epoch %d loss: %.3f (950, 0.055521871189962824)
Epoch %d loss: %.3f (960, 0.056336044137425045)
Epoch %d loss: %.3f (970, 0.0571529916719072)
Epoch %d loss: %.3f (980, 0.057972591313391074)
Epoch %d loss: %.3f (990, 0.05879472342850367)
Emily: 0.985
Frank: 0.345