码迷,mamicode.com
首页 > Web开发 > 详细

《Tensorflow实战》之6.3VGGnet学习

时间:2017-12-20 16:48:58      阅读:221      评论:0      收藏:0      [点我收藏+]

标签:2.0   may   required   sum   ons   cas   type   row   .so   

这是我改写的代码,可以运行,但是过拟合现象严重,不知道怎么修改比较好

# -*- coding: utf-8 -*-
"""
Created on Wed Dec 20 14:45:35 2017

@author: Administrator
"""

#coding:utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================



import tensorflow as tf
import numpy as np

data_name = ‘YaleB_32x32.mat‘
sele_num  = 10
import matlab.engine
eng = matlab.engine.start_matlab()
t = eng.data_imread_MSE(data_name,sele_num)
eng.quit()
#t = np.array(t)
Train_Ma  = np.array(t[0]).astype(np.float32)
Train_Lab = np.array(t[1]).astype(np.int8)
Test_Ma   = np.array(t[2]).astype(np.float32)
Test_Lab  = np.array(t[3]).astype(np.int8)
Num_fea   = Train_Ma.shape[1]
Num_Class = Train_Lab.shape[1]
image_row    = 32
image_column = 32



def conv_op(input_op, name, kh, kw, n_out, dh, dw, p):
    n_in = input_op.get_shape()[-1].value

    with tf.name_scope(name) as scope:
        kernel = tf.get_variable(scope+"w",
                                 shape=[kh, kw, n_in, n_out],
                                 dtype=tf.float32, 
                                 initializer=tf.contrib.layers.xavier_initializer_conv2d())
        conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding=‘SAME‘)
        bias_init_val = tf.constant(0.0, shape=[n_out], dtype=tf.float32)
        biases = tf.Variable(bias_init_val, trainable=True, name=‘b‘)
        z = tf.nn.bias_add(conv, biases)
        activation = tf.nn.relu(z, name=scope)
        p += [kernel, biases]
        return activation
# 全连接层函数
def fc_op(input_op, name, n_out, p):
    n_in = input_op.get_shape()[-1].value

    with tf.name_scope(name) as scope:
        kernel = tf.get_variable(scope+"w",
                                 shape=[n_in, n_out],
                                 dtype=tf.float32, 
                                 initializer=tf.contrib.layers.xavier_initializer())
        biases = tf.Variable(tf.constant(0.1, shape=[n_out], dtype=tf.float32), name=‘b‘)
        activation = tf.nn.relu_layer(input_op, kernel, biases, name=scope)
        p += [kernel, biases]
        return activation

def mpool_op(input_op, name, kh, kw, dh, dw):
    return tf.nn.max_pool(input_op,
                          ksize=[1, kh, kw, 1],
                          strides=[1, dh, dw, 1],
                          padding=‘SAME‘,
                          name=name)


    # assume input_op shape is 224x224x3
sess = tf.InteractiveSession()
# ---------- 定义 输入和输出 --------------- #
x = tf.placeholder(tf.float32, [None, Num_fea])
y_ = tf.placeholder(tf.float32, [None, Num_Class])
x_image = tf.reshape(x, [-1,image_row,image_column,1])
keep_prob = tf.placeholder(tf.float32)
    # block 1 -- outputs 112x112x64
p = []
conv1_1 = conv_op(x_image, name="conv1_1", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)
conv1_2 = conv_op(conv1_1,  name="conv1_2", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)
pool1   = mpool_op(conv1_2,   name="pool1",   kh=2, kw=2, dw=2, dh=2)

# block 2 -- outputs 56x56x128
conv2_1 = conv_op(pool1,    name="conv2_1", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)
conv2_2 = conv_op(conv2_1,  name="conv2_2", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)
pool2   = mpool_op(conv2_2,   name="pool2",   kh=2, kw=2, dh=2, dw=2)

# # block 3 -- outputs 28x28x256
conv3_1 = conv_op(pool2,    name="conv3_1", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
conv3_2 = conv_op(conv3_1,  name="conv3_2", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
conv3_3 = conv_op(conv3_2,  name="conv3_3", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)    
pool3   = mpool_op(conv3_3,   name="pool3",   kh=2, kw=2, dh=2, dw=2)

# block 4 -- outputs 14x14x512
conv4_1 = conv_op(pool3,    name="conv4_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv4_2 = conv_op(conv4_1,  name="conv4_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv4_3 = conv_op(conv4_2,  name="conv4_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
pool4   = mpool_op(conv4_3,   name="pool4",   kh=2, kw=2, dh=2, dw=2)

# block 5 -- outputs 7x7x512
conv5_1 = conv_op(pool4,    name="conv5_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv5_2 = conv_op(conv5_1,  name="conv5_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
conv5_3 = conv_op(conv5_2,  name="conv5_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
pool5   = mpool_op(conv5_3,   name="pool5",   kh=2, kw=2, dw=2, dh=2)

# flatten
shp = pool5.get_shape()
flattened_shape = shp[1].value * shp[2].value * shp[3].value
resh1 = tf.reshape(pool5, [-1, flattened_shape], name="resh1")

    # fully connected
fc6 = fc_op(resh1, name="fc6", n_out=4096, p=p)
fc6_drop = tf.nn.dropout(fc6, keep_prob, name="fc6_drop")

fc7 = fc_op(fc6_drop, name="fc7", n_out=4096, p=p)
fc7_drop = tf.nn.dropout(fc7, keep_prob, name="fc7_drop")

fc8 = fc_op(fc7_drop, name="fc8", n_out=Num_Class, p=p)
predictions = tf.nn.softmax(fc8)

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(predictions), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(predictions,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.global_variables_initializer().run()

for i in range(1000):
    train_accuracy = accuracy.eval(feed_dict={
        x:Train_Ma, y_: Train_Lab, keep_prob: 1.0})
    print("step %d, training accuracy %g"%(i, train_accuracy))
    train_step.run(feed_dict={x: Train_Ma, y_: Train_Lab, keep_prob: 0.8})

print("test accuracy %g"%accuracy.eval(feed_dict={
    x: Test_Ma, y_: Test_Lab, keep_prob: 1.0}))

 

另外一种更简便的改写

# -*- coding: utf-8 -*-
"""
Created on Wed Dec 20 15:40:44 2017

@author: Administrator
"""

# -*- coding: utf-8 -*-
"""
Created on Wed Dec 20 14:45:35 2017

@author: Administrator
"""

#coding:utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================



import tensorflow as tf
import numpy as np

data_name = ‘YaleB_32x32.mat‘
sele_num  = 10
import matlab.engine
eng = matlab.engine.start_matlab()
t = eng.data_imread_MSE(data_name,sele_num)
eng.quit()
#t = np.array(t)
Train_Ma  = np.array(t[0]).astype(np.float32)
Train_Lab = np.array(t[1]).astype(np.int8)
Test_Ma   = np.array(t[2]).astype(np.float32)
Test_Lab  = np.array(t[3]).astype(np.int8)
Num_fea   = Train_Ma.shape[1]
Num_Class = Train_Lab.shape[1]
image_row    = 32
image_column = 32



def conv_op(input_op, name, kh, kw, n_out, dh, dw, p):
    n_in = input_op.get_shape()[-1].value

    with tf.name_scope(name) as scope:
        kernel = tf.get_variable(scope+"w",
                                 shape=[kh, kw, n_in, n_out],
                                 dtype=tf.float32, 
                                 initializer=tf.contrib.layers.xavier_initializer_conv2d())
        conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding=‘SAME‘)
        bias_init_val = tf.constant(0.0, shape=[n_out], dtype=tf.float32)
        biases = tf.Variable(bias_init_val, trainable=True, name=‘b‘)
        z = tf.nn.bias_add(conv, biases)
        activation = tf.nn.relu(z, name=scope)
        p += [kernel, biases]
        return activation
# 全连接层函数
def fc_op(input_op, name, n_out, p):
    n_in = input_op.get_shape()[-1].value

    with tf.name_scope(name) as scope:
        kernel = tf.get_variable(scope+"w",
                                 shape=[n_in, n_out],
                                 dtype=tf.float32, 
                                 initializer=tf.contrib.layers.xavier_initializer())
        biases = tf.Variable(tf.constant(0.1, shape=[n_out], dtype=tf.float32), name=‘b‘)
        activation = tf.nn.relu_layer(input_op, kernel, biases, name=scope)
        p += [kernel, biases]
        return activation

def mpool_op(input_op, name, kh, kw, dh, dw):
    return tf.nn.max_pool(input_op,
                          ksize=[1, kh, kw, 1],
                          strides=[1, dh, dw, 1],
                          padding=‘SAME‘,
                          name=name)


    # assume input_op shape is 224x224x3
    # block 1 -- outputs 112x112x64
def inference_op(input_op, keep_prob):
    p = []
    # assume input_op shape is 224x224x3

    # block 1 -- outputs 112x112x64
    conv1_1 = conv_op(input_op, name="conv1_1", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)
    conv1_2 = conv_op(conv1_1,  name="conv1_2", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)
    pool1 = mpool_op(conv1_2,   name="pool1",   kh=2, kw=2, dw=2, dh=2)

    # block 2 -- outputs 56x56x128
    conv2_1 = conv_op(pool1,    name="conv2_1", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)
    conv2_2 = conv_op(conv2_1,  name="conv2_2", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)
    pool2 = mpool_op(conv2_2,   name="pool2",   kh=2, kw=2, dh=2, dw=2)

    # # block 3 -- outputs 28x28x256
    conv3_1 = conv_op(pool2,    name="conv3_1", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
    conv3_2 = conv_op(conv3_1,  name="conv3_2", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)
    conv3_3 = conv_op(conv3_2,  name="conv3_3", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)    
    pool3 = mpool_op(conv3_3,   name="pool3",   kh=2, kw=2, dh=2, dw=2)

    # block 4 -- outputs 14x14x512
    conv4_1 = conv_op(pool3,    name="conv4_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
    conv4_2 = conv_op(conv4_1,  name="conv4_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
    conv4_3 = conv_op(conv4_2,  name="conv4_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
    pool4 = mpool_op(conv4_3,   name="pool4",   kh=2, kw=2, dh=2, dw=2)

    # block 5 -- outputs 7x7x512
    conv5_1 = conv_op(pool4,    name="conv5_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
    conv5_2 = conv_op(conv5_1,  name="conv5_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
    conv5_3 = conv_op(conv5_2,  name="conv5_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)
    pool5 = mpool_op(conv5_3,   name="pool5",   kh=2, kw=2, dw=2, dh=2)

    # flatten
    shp = pool5.get_shape()
    flattened_shape = shp[1].value * shp[2].value * shp[3].value
    resh1 = tf.reshape(pool5, [-1, flattened_shape], name="resh1")

    # fully connected
    fc6 = fc_op(resh1, name="fc6", n_out=4096, p=p)
    fc6_drop = tf.nn.dropout(fc6, keep_prob, name="fc6_drop")

    fc7 = fc_op(fc6_drop, name="fc7", n_out=4096, p=p)
    fc7_drop = tf.nn.dropout(fc7, keep_prob, name="fc7_drop")

    fc8 = fc_op(fc7_drop, name="fc8", n_out=Num_Class, p=p)
    predictions = tf.nn.softmax(fc8)
    return predictions, fc8, p

# ---------- 定义 输入和输出 --------------- #
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, [None, Num_fea])
y_ = tf.placeholder(tf.float32, [None, Num_Class])
x_image = tf.reshape(x, [-1,image_row,image_column,1])
keep_prob = tf.placeholder(tf.float32)
predictions, fc8, p = inference_op(x_image, keep_prob)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(predictions), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(predictions,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.global_variables_initializer().run()

for i in range(100):
    train_accuracy = accuracy.eval(feed_dict={
        x:Train_Ma, y_: Train_Lab, keep_prob: 1.0})
    print("step %d, training accuracy %g"%(i, train_accuracy))
    train_step.run(feed_dict={x: Train_Ma, y_: Train_Lab, keep_prob: 0.8})

print("test accuracy %g"%accuracy.eval(feed_dict={
    x: Test_Ma, y_: Test_Lab, keep_prob: 1.0}))

  

  

《Tensorflow实战》之6.3VGGnet学习

标签:2.0   may   required   sum   ons   cas   type   row   .so   

原文地址:http://www.cnblogs.com/Jerry-PR/p/8074076.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!