码迷,mamicode.com
首页 > 其他好文 > 详细

『TensorFlow』徒手装高达_初号机大改_全模组升级版

时间:2017-06-03 11:27:16      阅读:260      评论:0      收藏:0      [点我收藏+]

标签:restore   exp   code   optimizer   路径   save   inf   div   on()   

全卷积网络单张不定大小图片测试代码原型(新模块):

输出原网络输出featuremap,不经任何处理(1*n*n*class)

import AlexNet_FCN as Net
import AlexNet_train as train
import random
import tensorflow as tf

IMAGE_PATH = ‘./flower_photos/daisy/5673728_71b8cb57eb.jpg‘


def TF_featuremap(input_size):
    x = tf.placeholder(tf.float32, [1, input_size[0], input_size[1], 3])
    y = Net.inference(x, N_CLASS=5, train=False)

    with tf.Session() as sess:

        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(‘./model/‘)
        saver.restore(sess, ckpt.model_checkpoint_path)
        global_step = ckpt.model_checkpoint_path.split(‘/‘)[-1].split(‘-‘)[-1]

        img_raw = tf.gfile.FastGFile(IMAGE_PATH, ‘rb‘).read()
        img = sess.run(tf.expand_dims(tf.image.resize_images(
            tf.image.decode_jpeg(img_raw),[input_size[0],input_size[1]],method=random.randint(0,3)),0))

        res = sess.run(y, feed_dict={x: img})
        print(global_step, ‘\n‘, sess.run(tf.argmax(res, 2)))
        print(global_step, ‘\n‘, res.shape)

        return res


if __name__==‘__main__‘:
    TF_featuremap()

 

网络主干(全卷积化升级):

import tensorflow as tf


‘‘‘AlexNet全卷积版‘‘‘

def inference(X,regularizer=None,N_CLASS=10,train=True):
    with tf.name_scope(‘conv1‘) as scope:
        W_c1 = tf.Variable(tf.truncated_normal([11, 11, 3, 64], stddev=0.1))
        b_c1 = tf.Variable(tf.random_normal([64]))
        conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(X, W_c1, strides=[1, 4, 4, 1], padding=‘SAME‘), b_c1), name=scope)
        print(conv1.name, ‘   ‘, conv1.get_shape().as_list())

        conv1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding=‘VALID‘, name=‘pool1‘)
        print(conv1.name, ‘   ‘, conv1.get_shape().as_list())

        if train: conv1 = tf.nn.dropout(conv1, 0.5)

    with tf.name_scope(‘conv2‘) as scope:
        W_c2 = tf.Variable(tf.truncated_normal([5, 5, 64, 192], stddev=0.1))
        b_c2 = tf.Variable(tf.random_normal([192]))
        conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, W_c2, strides=[1, 1, 1, 1], padding=‘SAME‘), b_c2), name=scope)
        print(conv2.name, ‘   ‘, conv2.get_shape().as_list())

        conv2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding=‘VALID‘,  name=‘pool2‘)
        print(conv2.name, ‘   ‘, conv2.get_shape().as_list())

        if train: conv2 = tf.nn.dropout(conv2, 0.5)

    with tf.name_scope(‘conv3‘) as scope:
        W_c3 = tf.Variable(tf.truncated_normal([3, 3, 192, 384], stddev=0.01))
        b_c3 = tf.Variable(tf.random_normal([384]))
        conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, W_c3, strides=[1, 1, 1, 1], padding=‘SAME‘), b_c3),name=scope)
        print(conv3.name, ‘   ‘, conv3.get_shape().as_list())
        if train: conv3 = tf.nn.dropout(conv3, 0.5)

    with tf.name_scope(‘conv4‘) as scope:
        W_c4 = tf.Variable(tf.truncated_normal([3, 3, 384, 256], stddev=0.01))
        b_c4 = tf.Variable(tf.random_normal([256]))
        conv4 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv3, W_c4, strides=[1, 1, 1, 1], padding=‘SAME‘), b_c4), name=scope)
        print(conv4.name, ‘   ‘, conv4.get_shape().as_list())
        if train: conv4 = tf.nn.dropout(conv4, 0.5)

    with tf.name_scope(‘conv5‘) as scope:
        W_c5 = tf.Variable(tf.truncated_normal([3, 3, 256, 256], stddev=0.01))
        b_c5 = tf.Variable(tf.random_normal([256]))
        conv5 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv4, W_c5, strides=[1, 1, 1, 1], padding=‘SAME‘), b_c5), name=scope)
        print(conv5.name, ‘   ‘, conv5.get_shape().as_list())
        conv5 = tf.nn.max_pool(conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding=‘VALID‘, name=‘pool5‘)
        print(conv5.name, ‘   ‘, conv5.get_shape().as_list())
        if train: conv5 = tf.nn.dropout(conv5, 0.5)

    # Fully connected layer
    with tf.name_scope(‘fc1_conv‘) as scope:
        W_fc1 = tf.Variable(tf.truncated_normal([6, 6, 256, 4096], stddev=0.01))
        b_fc1 = tf.Variable(tf.random_normal([4096]))
        # 步长设为featuremap尺寸完成降维的预演
        fc1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv5, W_fc1, strides=[1, 6, 6, 1], padding=‘SAME‘), b_fc1), name=scope)
        print(fc1.name, ‘   ‘, fc1.get_shape().as_list())
        # fc1 = tf.nn.max_pool(fc1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding=‘VALID‘, name=‘pool5‘)
        # print(fc1.name, ‘   ‘, fc1.get_shape().as_list())
        if train: fc1 = tf.nn.dropout(fc1, 0.5)

    with tf.name_scope(‘fc2_conv‘) as scope:
        W_fc2 = tf.Variable(tf.truncated_normal([1, 1, 4096, 4096], stddev=0.01))
        b_fc2 = tf.Variable(tf.random_normal([4096]))
        fc2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(fc1, W_fc2, strides=[1, 1, 1, 1], padding=‘SAME‘), b_fc2), name=scope)
        print(fc2.name, ‘   ‘, fc2.get_shape().as_list())
        # fc2 = tf.nn.max_pool(fc2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding=‘VALID‘, name=‘pool5‘)
        # print(fc2.name, ‘   ‘, fc2.get_shape().as_list())
        if train: fc2 = tf.nn.dropout(fc2, 0.5)

    with tf.name_scope(‘fc3_conv‘) as scope:
        W_fc3 = tf.Variable(tf.truncated_normal([1, 1, 4096, N_CLASS], stddev=0.01))
        b_fc3 = tf.Variable(tf.random_normal([N_CLASS]))
        out_put = tf.nn.bias_add(tf.nn.conv2d(fc2, W_fc3, strides=[1, 1, 1, 1], padding=‘SAME‘), b_fc3, name=scope)
        print(out_put.name, ‘   ‘, out_put.get_shape().as_list())
        # fc3 = tf.nn.max_pool(fc3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding=‘VALID‘, name=‘pool5‘)
        # print(fc3.name, ‘   ‘, fc3.get_shape().as_list())
        if train: out_put = tf.nn.dropout(out_put, 0.5)

    return tf.squeeze(out_put)

 

数据IO部分(没有大的改动):

import glob
import os.path
import random
import numpy as np
import tensorflow as tf

def creat_image_lists(validation_percentage,testing_percentage,INPUT_DATA):
    ‘‘‘
    将图片(无路径文件名)信息保存在字典中
    :param validation_percentage: 验证数据百分比 
    :param testing_percentage:    测试数据百分比
    :param INPUT_DATA:            最外层数据路径(到达类目录上层)
    :return:                      字典{标签:{文件夹:str,训练:[],验证:[],测试:[]},...}
    ‘‘‘
    result = {}
    sub_dirs = [x[0] for x in os.walk(INPUT_DATA)]
    # 由于os.walk()列表第一个是‘./‘,所以排除
    is_root_dir = True            #<-----
    # 遍历各个label文件夹
    for sub_dir in sub_dirs:
        if is_root_dir:           #<-----
            is_root_dir = False
            continue

        extensions = [‘jpg‘, ‘jpeg‘, ‘JPG‘, ‘JPEG‘]
        file_list  = []
        dir_name   = os.path.basename(sub_dir)
        # 遍历各个可能的文件尾缀
        for extension in extensions:
            # file_glob = os.path.join(INPUT_DATA,dir_name,‘*.‘+extension)
            file_glob = os.path.join(sub_dir, ‘*.‘ + extension)
            file_list.extend(glob.glob(file_glob))      # 匹配并收集路径&文件名
            # print(file_glob,‘\n‘,glob.glob(file_glob))
        if not file_list: continue

        label_name = dir_name.lower()                   # 生成label,实际就是小写文件夹名

        # 初始化各个路径&文件收集list
        training_images   = []
        testing_images    = []
        validation_images = []

        # 去路径,只保留文件名
        for file_name in file_list:
            base_name = os.path.basename(file_name)

            # 随机划分数据给验证和测试
            chance = np.random.randint(100)
            if chance < validation_percentage:
                validation_images.append(base_name)
            elif chance < (validation_percentage + testing_percentage):
                testing_images.append(base_name)
            else:
                training_images.append(base_name)
        # 本标签字典项生成
        result[label_name] = {
            ‘dir‘        : dir_name,
            ‘training‘   : training_images,
            ‘testing‘    : testing_images,
            ‘validation‘ : validation_images
        }
    return result

def get_image_path(image_lists, image_dir, label_name, index, category):
    ‘‘‘
    获取单张图片的存储地址
    :param image_lists: 全图片字典
    :param image_dir:   外层文件夹(内部是标签文件夹)
    :param label_name:  标签名
    :param index:       随机数索引
    :param category:    training or validation
    :return:            图片中间变量地址
    ‘‘‘
    label_lists   = image_lists[label_name]
    category_list = label_lists[category]       # 获取目标category图片列表
    mod_index     = index % len(category_list)  # 随机获取一张图片的索引
    base_name     = category_list[mod_index]    # 通过索引获取图片名
    return os.path.join(image_dir,label_lists[‘dir‘],base_name)


def get_random_cached_inputs(sess,n_class,image_lists,batch,category,INPUT_DATA,shape=[299,299]):
    ‘‘‘
    函数随机获取一个batch的图片作为训练数据,并调整大小至输入层大小
    调用get_image_path
    :param sess:        会话句柄
    :param n_class:     分类数目
    :param image_lists: 图片字典
    :param batch:       batch大小
    :param category:    training or validation
    :param INPUT_DATA:  最外层图片文件夹
    :param shape:       输入层size
    :return:            image & label数组
    ‘‘‘
    images = []
    labels = []
    for i in range(batch):
        label_index = random.randrange(n_class)              # 标签索引随机生成
        label_name  = list(image_lists.keys())[label_index]  # 标签名获取
        image_index = random.randrange(65536)                # 标签内图片索引随机种子

        image_path = get_image_path(image_lists, INPUT_DATA, label_name, image_index, category)
        image_raw  = tf.gfile.FastGFile(image_path, ‘rb‘).read()
        image_data = sess.run(tf.image.resize_images(tf.image.decode_jpeg(image_raw),
                                                     shape,method=random.randint(0,3)))

        ground_truth = np.zeros(n_class,dtype=np.float32)
        ground_truth[label_index] = 1.0                      # 标准结果[0,0,1,0...]
        # 收集瓶颈张量和label
        images.append(image_data)
        labels.append(ground_truth)
    return images,labels


def get_test_images(sess,image_lists,n_class,shape=[299,299]):
    ‘‘‘
    获取所有test数据
    调用get_image_path
    :param sess:         会话句柄 
    :param image_lists:  图片字典
    :param n_class:      分类数目 
    :param shape:        输入层size
    :return: 
    ‘‘‘
    test_images  = []
    ground_truths = []
    label_name_list = list(image_lists.keys())
    for label_index,label_name in enumerate(image_lists[label_name_list]):
        category = ‘testing‘
        for image_index, unused_base_name in enumerate(image_lists[label_name][category]): # 索引, {文件名}

            image_path = get_image_path(image_lists, INPUT_DATA, label_name, image_index, category)
            image_raw = tf.gfile.FastGFile(image_path, ‘rb‘).read()
            image_data = sess.run(tf.image.resize_images(tf.image.decode_jpeg(image_raw),
                                                         shape, method=random.randint(0, 3)))

            ground_truth = np.zeros(n_class, dtype=np.float32)
            ground_truth[label_index] = 1.0
            test_images.append(image_data)
            ground_truths.append(ground_truth)
    return test_images, ground_truths


if __name__==‘__main__‘:

    INPUT_DATA = ‘./flower_photos‘  # 数据文件夹
    VALIDATION_PERCENTAGE = 10  # 验证用数据百分比
    TEST_PERCENTAGE = 10  # 测试用数据百分比
    BATCH_SIZE = 128  # 数据包大小
    INPUT_SIZE = [224, 224]  # 标准输入尺寸

    image_dict = creat_image_lists(VALIDATION_PERCENTAGE,TEST_PERCENTAGE,INPUT_DATA)
    n_class = len(image_dict.keys())
    sess  = tf.Session()
    category = ‘training‘
    image_list, label_list = get_random_cached_inputs(
        sess, n_class, image_dict, BATCH_SIZE, category,INPUT_DATA,INPUT_SIZE)
    # exit()

 

训练部分(save&restore添加,可视化添加):

import AlexNet_FCN as Net
import tensorflow as tf
import image_info as img_io
import random


‘‘‘训练参数‘‘‘

INPUT_DATA = ‘./flower_photos‘    # 数据文件夹
VALIDATION_PERCENTAGE = 10        # 验证用数据百分比
TEST_PERCENTAGE = 10              # 测试用数据百分比
BATCH_SIZE = 128                  # 数据包大小
INPUT_SIZE = [224, 224]           # 标准输入尺寸
MAX_STEP   = 5000                 # 最大迭代轮数

def loss(logits, labels):
    """
    Add L2Loss to all the trainable variables.
    Args:
        logits: Logits from inference().
        labels: Labels from distorted_inputs or inputs(). 1-D tensor
                of shape [batch_size]
    Returns:
        Loss tensor of type float.
    """
    cross_entropy_mean = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=tf.argmax(labels,1), logits=logits), name=‘cross_entropy‘)
    tf.add_to_collection(‘losses‘, cross_entropy_mean)
    return tf.add_n(tf.get_collection(‘losses‘), name=‘total_loss‘)

if __name__==‘__main__‘:

    image_dict = img_io.creat_image_lists(VALIDATION_PERCENTAGE, TEST_PERCENTAGE, INPUT_DATA)
    n_class = len(image_dict.keys())

    image_holder = tf.placeholder(tf.float32,[BATCH_SIZE, INPUT_SIZE[0], INPUT_SIZE[1], 3])
    label_holder = tf.placeholder(tf.float32,[BATCH_SIZE, n_class])

    # 正则化函数选择并向前传播
    regularizer = tf.contrib.layers.l2_regularizer(0.0001)
    logits = Net.inference(image_holder, regularizer, n_class)

    # loss计算并反向传播
    # loss = loss(logits, label_holder)
    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(label_holder,1),logits=logits))
    tf.summary.scalar(‘loss‘,loss)
    train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss)

    # 正确率计算
    with tf.name_scope(‘evaluation‘):
        correct_prediction = tf.equal(tf.argmax(logits,1),tf.argmax(label_holder,1))
        evaluation_step    = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
        tf.summary.scalar(‘accuracy‘,evaluation_step)
        test = tf.argmax(logits,1)

    with tf.Session() as sess:
        # 确保在变量生成在最后
        init = tf.global_variables_initializer().run()
        # 保存变量
        saver = tf.train.Saver()
        # 收集可视化记录
        merge = tf.summary.merge_all()
        # 日志书写器 & 默认已经加载图
        writer = tf.summary.FileWriter(‘./logs‘,sess.graph)

        print(image_holder.name)
        for step in range(MAX_STEP):

            print(step)
            # 训练过程
            image_batch, label_batch = img_io.get_random_cached_inputs(
                sess, n_class, image_dict, BATCH_SIZE, ‘training‘, INPUT_DATA, INPUT_SIZE)
            sess.run(train_step,feed_dict={
                image_holder:image_batch,label_holder:label_batch})
            # print(sess.run(logits, feed_dict={
            #     image_holder: image_batch, label_holder: label_batch}))
            # print(sess.run(‘fc1:0‘, feed_dict={
            #     image_holder: image_batch, label_holder: label_batch})[random.randint(1,128)])
            # print(sess.run(_, feed_dict={image_holder: image_batch, label_holder: label_batch}))

            # 验证过程
            if True:#(step % 3 == 0) or (step + 1 == MAX_STEP):
                image_batch_val, label_batch_val = img_io.get_random_cached_inputs(
                    sess, n_class, image_dict, BATCH_SIZE, ‘validation‘, INPUT_DATA, INPUT_SIZE)
                validation_accuracy = sess.run(evaluation_step, feed_dict={
                    image_holder:image_batch_val,label_holder:label_batch_val})
                print(‘Step %d: Validation accuracy on random sampled %d examples = %.1f%%‘ %
                      (step, BATCH_SIZE, validation_accuracy * 100))
                #print(‘fc1:\n‘, sess.run(‘fc1_conv:0‘, feed_dict={
                #    image_holder: image_batch_val, label_holder: label_batch_val}))
                #print(‘fc2:\n‘, sess.run(‘fc2_conv:0‘, feed_dict={
                #    image_holder: image_batch_val, label_holder: label_batch_val}))
                print(‘fc3:\n‘,sess.run(test, feed_dict={
                    image_holder: image_batch_val, label_holder: label_batch_val}))

            # 保存模型
            if (step % 10 == 0) or (step + 1 == MAX_STEP) or step == 1:
                saver.save(sess,‘./model/model.ckpt‘,global_step=step)
            # 可视化
            if (step % 100 == 0) or (step + 1 == MAX_STEP):
                result = sess.run(merge,feed_dict={image_holder:image_batch,label_holder:label_batch})
                writer.add_summary(result,step)

        # 测试过程
        image_batch_test, label_batch_test =             img_io.get_test_images(sess,image_dict,n_class,INPUT_SIZE)
        test_accuracy = sess.run(evaluation_step, feed_dict={
            image_holder: image_batch_test, label_holder: label_batch_test})
        print(‘Final test accuracy = %.1f%%‘ % (test_accuracy * 100))

 

『TensorFlow』徒手装高达_初号机大改_全模组升级版

标签:restore   exp   code   optimizer   路径   save   inf   div   on()   

原文地址:http://www.cnblogs.com/hellcat/p/6936322.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!