Tensorflow学习笔记Demo002
2018-03-26 19:44
357 查看
很久没有使用更新tensorflow的教程了,今天来用tensorflow来搭建神经网络。搭建的具体过程就不在这里介绍了,只要根据原理一步步的跟进即可;
废话不多说;
第一个例子是使用tensorflow搭建的最小二乘法的机器学习拟合线性回归曲线(虽然sklearn中有相应的包可以实现相应的功能)import tensorflow as tf
import numpy as np
from sklearn.datasets import fetch_california_housing使用的数据是加州房价数据,数据来源是sklearn中自带的数据
housing=fetch_california_housing()
m,n=housing.data.shape87
housing_data_plus_bias=np.c_[np.ones((m,1)),housing.data]
X=tf.constant(housing_data_plus_bias,dtype=tf.float32,name="X")
#the type of y can not be list the shape should be [[],[],[]]
y=tf.constant(housing.target.reshape(-1,1),dtype=tf.float32,name="y")
XT=tf.transpose(X)
#使用最小二乘法
theta=tf.matmul(tf.matmul(tf.reverse(tf.matmul(XT,X)),XT),y)
with tf.Session() as sess:
sess.run(theta)
#或者使用eval直接进行使用;
#theta=theta.eval()也可以采用梯度下降法实现(tensorflow实现)#使用梯度下降法
learning_rate=0.01
n_epochs=1000
X=tf.constant(housing_data_plus_bias,dtype=tf.float32,name="X")
y=tf.constant(housing.target,dtype=tf.float32,name="y")
#liang zhong leixing de bianliang
#
theta=tf.Variable(tf.random_normal((n+1,1),-1.0,1.0),name="theta")
ypredict=tf.matmul(X,theta)
#tf.square() tf.square_difference()
error=ypredict-y
mse=tf.reduce_mean(tf.square(ypredict-y))
#define the gradients
gradients=2/m*tf.matmul(tf.transpose(X),error)
#define the learning steps
training_op=tf.assign(theta,theta-learning_rate*gradients)
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
#chu shi hua shuju
for epoch in range(n_epochs):
if epoch%100==0:
print("while the epoch is :%s, the mse is %s"%(epoch,mse.eval())
sess.run(training_op)
best_theta=theta.eval()当然优化函数可以使用tensorflow自带的优化器;使用梯度下降优化函数;#using the tensorflow optimize
optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
training_op=optimizer.minimize(mse)
#shiyong
nit=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
#chu shi hua shuju
for epoch in range(n_epochs):
if epoch%100==0:
print("while the epoch is :%s, the mse is %s"%(epoch,mse.eval())
sess.run(training_op)
best_theta=theta.eval()
使用mom优化函数:#vt=0.9*vt+grad
#theta=theta-alpha*vt
optimizer2=tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9)
training_op=optimizer2.minimize(mse)损失函数是mse
#使用mini batch(随机梯度下降法来优化mse),#mini-batch Gradient Descent
#caiyong feed_dict fangshi
X=tf.placeholder(tf.float32,shape=(None,n+1),name="X")
y=tf.placeholder(tf.float32,shape=(None,1),name="y")
#define the min batch
batch_size=100
n_batches=int(np.ceil(m/batch_size))
#epoch is the number of iterations
def fetch_batch(epoch,batch_index,batch_size):
return X[batch_index*batch_size:(batch_index+1)*batch_size],y[batch_index*batch_size:(batch_index+1)*batch_size]
with tf.Session() as sess:
for epoch in range(epochs):
for i in range(n_batches):
X_train,y_train=fetch_batch(epoch,i,batch_size)
sess.run(training_op,feed_dict={X:X_train,y:y_train})
if(epoch%10==10):
print("the loss function is %s"%(sess.run(mse))
best_theta=theta.eval()
废话不多说;
第一个例子是使用tensorflow搭建的最小二乘法的机器学习拟合线性回归曲线(虽然sklearn中有相应的包可以实现相应的功能)import tensorflow as tf
import numpy as np
from sklearn.datasets import fetch_california_housing使用的数据是加州房价数据,数据来源是sklearn中自带的数据
housing=fetch_california_housing()
m,n=housing.data.shape87
housing_data_plus_bias=np.c_[np.ones((m,1)),housing.data]
X=tf.constant(housing_data_plus_bias,dtype=tf.float32,name="X")
#the type of y can not be list the shape should be [[],[],[]]
y=tf.constant(housing.target.reshape(-1,1),dtype=tf.float32,name="y")
XT=tf.transpose(X)
#使用最小二乘法
theta=tf.matmul(tf.matmul(tf.reverse(tf.matmul(XT,X)),XT),y)
with tf.Session() as sess:
sess.run(theta)
#或者使用eval直接进行使用;
#theta=theta.eval()也可以采用梯度下降法实现(tensorflow实现)#使用梯度下降法
learning_rate=0.01
n_epochs=1000
X=tf.constant(housing_data_plus_bias,dtype=tf.float32,name="X")
y=tf.constant(housing.target,dtype=tf.float32,name="y")
#liang zhong leixing de bianliang
#
theta=tf.Variable(tf.random_normal((n+1,1),-1.0,1.0),name="theta")
ypredict=tf.matmul(X,theta)
#tf.square() tf.square_difference()
error=ypredict-y
mse=tf.reduce_mean(tf.square(ypredict-y))
#define the gradients
gradients=2/m*tf.matmul(tf.transpose(X),error)
#define the learning steps
training_op=tf.assign(theta,theta-learning_rate*gradients)
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
#chu shi hua shuju
for epoch in range(n_epochs):
if epoch%100==0:
print("while the epoch is :%s, the mse is %s"%(epoch,mse.eval())
sess.run(training_op)
best_theta=theta.eval()当然优化函数可以使用tensorflow自带的优化器;使用梯度下降优化函数;#using the tensorflow optimize
optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
training_op=optimizer.minimize(mse)
#shiyong
nit=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
#chu shi hua shuju
for epoch in range(n_epochs):
if epoch%100==0:
print("while the epoch is :%s, the mse is %s"%(epoch,mse.eval())
sess.run(training_op)
best_theta=theta.eval()
使用mom优化函数:#vt=0.9*vt+grad
#theta=theta-alpha*vt
optimizer2=tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9)
training_op=optimizer2.minimize(mse)损失函数是mse
#使用mini batch(随机梯度下降法来优化mse),#mini-batch Gradient Descent
#caiyong feed_dict fangshi
X=tf.placeholder(tf.float32,shape=(None,n+1),name="X")
y=tf.placeholder(tf.float32,shape=(None,1),name="y")
#define the min batch
batch_size=100
n_batches=int(np.ceil(m/batch_size))
#epoch is the number of iterations
def fetch_batch(epoch,batch_index,batch_size):
return X[batch_index*batch_size:(batch_index+1)*batch_size],y[batch_index*batch_size:(batch_index+1)*batch_size]
with tf.Session() as sess:
for epoch in range(epochs):
for i in range(n_batches):
X_train,y_train=fetch_batch(epoch,i,batch_size)
sess.run(training_op,feed_dict={X:X_train,y:y_train})
if(epoch%10==10):
print("the loss function is %s"%(sess.run(mse))
best_theta=theta.eval()
#save model 保存模型的方法 init=tf.global_variables_initializer() saver=tf.train.Saver() with tf.Session() as sess: sess.run(init) #... for epoch in epochs: save_path=saver.save(sess,"./temp/my_model.ckpt") besttheta=theta.eval() save_path=saver.save(sess,"./temp/my_model.ckpt")
相关文章推荐
- javascript时钟代码 DEMO-002
- 【起航计划 002】2015 起航计划 Android APIDemo的魔鬼步伐 01
- C++ Demo 002 : 让 STL 中的 cout 输出彩色的文字
- 【Cocos2d-x lua篇002】Demo讲解之Lua和C++牵手
- Python 2.7.9 Demo - 001.print_hello_world - 002.print_chinese
- 【起航计划 002】2015 起航计划 Android APIDemo的魔鬼步伐 01
- Tensorflow学习笔记Demo003
- MySQL PLSQL Demo - 002.变量定义、赋值
- 链表demo002
- json-lib方法demo,对象,map,list,json之间的转化
- Android百度地图实践 Demo中的一个Application类
- LeetCode 002 Add Two Numbers
- vue+node+es6+webpack创建简单vue的demo
- Android 友盟分享简单Demo
- 历代显卡精彩演示DEMO赏析点评之NV篇_1(转载)
- Android学习备忘002——ListView/GridView&BaseAdapter
- EasyDemo*参数传递Demo(on Github)
- 七牛云存储Demo征集大赛正式启动,Google Glass等你来拿!
- python+flask+SAE 微信公共平台开发的小小的demo
- 11 Spring MVC 访问入参获取和拦截器使用(@RequestParam 和 Interceptors)登录模块demo