您的位置:首页 > 理论基础 > 计算机网络

tensorflow之搭建神经网络

2017-08-12 21:19 375 查看
# -*- coding: utf-8 -*-
"""
# 环境:tensorflow 0.12/python3.6.1
# 作者: 王磊
"""

import tensorflow as tf
import numpy as np

"""
#=== 真实数据  集合
"""
x_data = np.linspace(-1,1,300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise# 一元二次曲线

"""
#=== 输入数据  监督学习  训练数据
"""
xs = tf.placeholder(tf.float32, [None, 1])#None:样本个数随意
ys = tf.placeholder(tf.float32, [None, 1])#None:样本个数随意

"""
#=== 函数:添加神经网络层
#=== 函参:
inputs输入数据
up_size上一层神经元结点个数
current_size当前层神经元结点个数
activation_function激活函数
"""

def add_layer(inputs, up_size, current_size, activation_function=None):
# 权值
Weights = tf.Variable(tf.random_normal([up_size, current_size]))
# 偏置
biases = tf.Variable(tf.zeros([1, current_size]) + 0.1)
# 加权和
Wx_plus_b = tf.matmul(inputs, Weights) + biases

if activation_function is None:
# 线性输出
outputs = Wx_plus_b
else:
# 非线性输出(激活函数)
outputs = activation_function(Wx_plus_b)
return outputs

"""
#=== 搭建神经网络
"""
# 添加隐含层
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
# 添加输出层
prediction = add_layer(l1, 10, 1, activation_function=None)

"""
#=== 优化神经网络
"""
# 损失函数(MSE均方误差)
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),reduction_indices=[1]))
# 梯度下降
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

"""
#=== 初始化神经网络
"""
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

"""
#=== 训练神经网络
"""
for i in range(1000):
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
if i % 10 == 0:
print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: