标签:pre input == nump 权重 return one important log
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
import tensorflow as tf
import numpy as np
#add_laye
def add_layer(inputs, in_size, out_size, activation_function=None):
# add one more layer and return the output of this layer
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
# Make up some real data
x_data = np.linspace(-1,1,300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])
# add hidden layer
layer1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
# add output layer
prediction = add_layer(layer1, 10, 1, activation_function=None)
# the error between prediciton and real data
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
reduction_indices=[1]))
#Select the optimizer to minimize the loss
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
# Important step---Initializes all variables
init=tf.initialize_all_variables()
# Sess.run ---it will start the operation
sess=tf.Session()
sess.run(init)
#Iterations of 201 times
for i in range(1000):
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
if i % 50 == 0:
# to see the step improvement
print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))
这篇文介绍了如何用tensorflow搭建神经网络,课程的学习来源于莫烦教学。
1、如何搭建神经网络
用神经网络进行数据的训练,首先要把训练数据的网络结构搭建好。在文中我们搭建了一个一维输入和一维输出的网络层,中间的隐藏层可以由程序员自己设定。
2、代码讲解
搭建神经网络
#add_laye def add_layer(inputs, in_size, out_size, activation_function=None): # add one more layer and return the output of this layer
Weights = tf.Variable(tf.random_normal([in_size, out_size])) #[in_size, out_size]指安装输入输出的格式随机生成weights的变量
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)#指tf.zeros[]指生成那种类型的数
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
以上是添加神经层函数,设定了权重和偏差,设定好之后就可以当为一个神经网络的模 板。上面代码中 activation_function=None,代表没有激励函数,可以百度一下tensorflow中的有哪些激励函数。
第二步是输入真实的数据,就是我们实际中需要检测的数据,在这里直接用随机生成的数据进行测评
# Make up some real data
x_data = np.linspace(-1,1,300)[:, np.newaxis] #生成300个从区间-1到1的数字,[:, np.newaxis]指的是数据类型
noise = np.random.normal(0, 0.05, x_data.shape) #生成噪音,目的是使生成的数据不要太线性化
y_data = np.square(x_data) - 0.5 + noise #定义y_data的公式
定义节点接收数据,为什么要定义节点呢,因为运算出来的数据我们不知道是什么,但是我们可以提前给他留一个位置,产生数据的时候就直接放在这个节点中
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 1]) #[None, 1]是定义是数据的属性
ys = tf.placeholder(tf.float32, [None, 1])
建立输入层和输出层
# add hidden layer
layer1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu) #xs是输入函数,1表示输入函数为1,10是由程序员自己设定的隐藏层函数,激励函数是tf.nn.relu
# add output layer
prediction = add_layer(layer1, 10, 1, activation_function=None)#输出首先要根据layer1,10指的是输入的隐藏层数,1是输出层数,这里不使用激励函数
定义损失函数和选择优化器
# the error between prediciton and real data
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
reduction_indices=[1])) #loss的公式,所有方差求和的平均值
#Select the optimizer to minimize the loss
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss) #选择梯度下降算法,使损失函数最小
初始化
# Important step---Initializes all variables
init=tf.initialize_all_variables() #只要定义了variables的都要有一个变量的初始化
# Sess.run ---it will start the operation
sess=tf.Session()
sess.run(init) #运行(init)
数据的训练
#Iterations of 1000 times
for i in range(1000):
sess.run(train_step, feed_dict={xs: x_data, ys: y_data}) #只要经过placeholder在sess.run时就要加feed_dict
if i % 50 == 0:
# to see the step improvement
print(sess.run(loss, feed_dict={xs: x_data, ys: y_data})) #打印出损失函数,损失函数也经过placeholder的运算
训练结果
0.175579
0.0228203
0.0119585
0.010242
0.0091564
0.00811643
0.00709723
0.00639308
0.00586769
0.00541264
0.00496604
0.00459476
0.00430421
0.00406194
0.00387606
0.00373667
0.00362331
0.00353008
0.00345284
0.00338251
Process finished with exit code 0
标签:pre input == nump 权重 return one important log
原文地址:http://www.cnblogs.com/kekejieer/p/7300055.html