码迷,mamicode.com
首页 > 其他好文 > 详细

如何训练inception网络

时间:2019-09-18 10:35:49      阅读:115      评论:0      收藏:0      [点我收藏+]

标签:run   fine   dep   bat   connect   cannot   average   port   work   

其实我写的有点害怕,因为我不知道我做的对不对,电脑的GPU不行,只跑出了两个epoch的结果就跑不动了,我也不知道是不是程序真的有问题,嗯,我就是一个傻狗屌丝女。先将inception_v3原来的模型放进来用来获取logits。

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf

from VGG16 import inception_utils

slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)


def inception_v3_base(inputs,
                      final_endpoint=Mixed_7c,
                      min_depth=16,
                      depth_multiplier=1.0,
                      scope=None):
    end_points = {}

    if depth_multiplier <= 0:
        raise ValueError(depth_multiplier is not greater than zero.)
    depth = lambda d: max(int(d * depth_multiplier), min_depth)

    with tf.variable_scope(scope, InceptionV3, [inputs]):
        with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
                            stride=1, padding=VALID):
            # 299 x 299 x 3
            end_point = Conv2d_1a_3x3
            net = slim.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points
            # 149 x 149 x 32
            end_point = Conv2d_2a_3x3
            net = slim.conv2d(net, depth(32), [3, 3], scope=end_point)
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points
            # 147 x 147 x 32
            end_point = Conv2d_2b_3x3
            net = slim.conv2d(net, depth(64), [3, 3], padding=SAME, scope=end_point)
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points
            # 147 x 147 x 64
            end_point = MaxPool_3a_3x3
            net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points
            # 73 x 73 x 64
            end_point = Conv2d_3b_1x1
            net = slim.conv2d(net, depth(80), [1, 1], scope=end_point)
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points
            # 73 x 73 x 80.
            end_point = Conv2d_4a_3x3
            net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points
            # 71 x 71 x 192.
            end_point = MaxPool_5a_3x3
            net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points
            # 35 x 35 x 192.

        # Inception blocks
        with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
                            stride=1, padding=SAME):
            # mixed: 35 x 35 x 256.
            end_point = Mixed_5b
            with tf.variable_scope(end_point):
                with tf.variable_scope(Branch_0):
                    branch_0 = slim.conv2d(net, depth(64), [1, 1], scope=Conv2d_0a_1x1)
                with tf.variable_scope(Branch_1):
                    branch_1 = slim.conv2d(net, depth(48), [1, 1], scope=Conv2d_0a_1x1)
                    branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
                                           scope=Conv2d_0b_5x5)
                with tf.variable_scope(Branch_2):
                    branch_2 = slim.conv2d(net, depth(64), [1, 1], scope=Conv2d_0a_1x1)
                    branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
                                           scope=Conv2d_0b_3x3)
                    branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
                                           scope=Conv2d_0c_3x3)
                with tf.variable_scope(Branch_3):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope=AvgPool_0a_3x3)
                    branch_3 = slim.conv2d(branch_3, depth(32), [1, 1],
                                           scope=Conv2d_0b_1x1)
                net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points

            # mixed_1: 35 x 35 x 288.
            end_point = Mixed_5c
            with tf.variable_scope(end_point):
                with tf.variable_scope(Branch_0):
                    branch_0 = slim.conv2d(net, depth(64), [1, 1], scope=Conv2d_0a_1x1)
                with tf.variable_scope(Branch_1):
                    branch_1 = slim.conv2d(net, depth(48), [1, 1], scope=Conv2d_0b_1x1)
                    branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
                                           scope=Conv_1_0c_5x5)
                with tf.variable_scope(Branch_2):
                    branch_2 = slim.conv2d(net, depth(64), [1, 1],
                                           scope=Conv2d_0a_1x1)
                    branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
                                           scope=Conv2d_0b_3x3)
                    branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
                                           scope=Conv2d_0c_3x3)
                with tf.variable_scope(Branch_3):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope=AvgPool_0a_3x3)
                    branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
                                           scope=Conv2d_0b_1x1)
                net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points

            # mixed_2: 35 x 35 x 288.
            end_point = Mixed_5d
            with tf.variable_scope(end_point):
                with tf.variable_scope(Branch_0):
                    branch_0 = slim.conv2d(net, depth(64), [1, 1], scope=Conv2d_0a_1x1)
                with tf.variable_scope(Branch_1):
                    branch_1 = slim.conv2d(net, depth(48), [1, 1], scope=Conv2d_0a_1x1)
                    branch_1 = slim.conv2d(branch_1, depth(64), [5, 5],
                                           scope=Conv2d_0b_5x5)
                with tf.variable_scope(Branch_2):
                    branch_2 = slim.conv2d(net, depth(64), [1, 1], scope=Conv2d_0a_1x1)
                    branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
                                           scope=Conv2d_0b_3x3)
                    branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
                                           scope=Conv2d_0c_3x3)
                with tf.variable_scope(Branch_3):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope=AvgPool_0a_3x3)
                    branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
                                           scope=Conv2d_0b_1x1)
                net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points

            # mixed_3: 17 x 17 x 768.
            end_point = Mixed_6a
            with tf.variable_scope(end_point):
                with tf.variable_scope(Branch_0):
                    branch_0 = slim.conv2d(net, depth(384), [3, 3], stride=2,
                                           padding=VALID, scope=Conv2d_1a_1x1)
                with tf.variable_scope(Branch_1):
                    branch_1 = slim.conv2d(net, depth(64), [1, 1], scope=Conv2d_0a_1x1)
                    branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
                                           scope=Conv2d_0b_3x3)
                    branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2,
                                           padding=VALID, scope=Conv2d_1a_1x1)
                with tf.variable_scope(Branch_2):
                    branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding=VALID,
                                               scope=MaxPool_1a_3x3)
                net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points

            # mixed4: 17 x 17 x 768.
            end_point = Mixed_6b
            with tf.variable_scope(end_point):
                with tf.variable_scope(Branch_0):
                    branch_0 = slim.conv2d(net, depth(192), [1, 1], scope=Conv2d_0a_1x1)
                with tf.variable_scope(Branch_1):
                    branch_1 = slim.conv2d(net, depth(128), [1, 1], scope=Conv2d_0a_1x1)
                    branch_1 = slim.conv2d(branch_1, depth(128), [1, 7],
                                           scope=Conv2d_0b_1x7)
                    branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
                                           scope=Conv2d_0c_7x1)
                with tf.variable_scope(Branch_2):
                    branch_2 = slim.conv2d(net, depth(128), [1, 1], scope=Conv2d_0a_1x1)
                    branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
                                           scope=Conv2d_0b_7x1)
                    branch_2 = slim.conv2d(branch_2, depth(128), [1, 7],
                                           scope=Conv2d_0c_1x7)
                    branch_2 = slim.conv2d(branch_2, depth(128), [7, 1],
                                           scope=Conv2d_0d_7x1)
                    branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
                                           scope=Conv2d_0e_1x7)
                with tf.variable_scope(Branch_3):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope=AvgPool_0a_3x3)
                    branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
                                           scope=Conv2d_0b_1x1)
                net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points

            # mixed_5: 17 x 17 x 768.
            end_point = Mixed_6c
            with tf.variable_scope(end_point):
                with tf.variable_scope(Branch_0):
                    branch_0 = slim.conv2d(net, depth(192), [1, 1], scope=Conv2d_0a_1x1)
                with tf.variable_scope(Branch_1):
                    branch_1 = slim.conv2d(net, depth(160), [1, 1], scope=Conv2d_0a_1x1)
                    branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
                                           scope=Conv2d_0b_1x7)
                    branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
                                           scope=Conv2d_0c_7x1)
                with tf.variable_scope(Branch_2):
                    branch_2 = slim.conv2d(net, depth(160), [1, 1], scope=Conv2d_0a_1x1)
                    branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
                                           scope=Conv2d_0b_7x1)
                    branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],
                                           scope=Conv2d_0c_1x7)
                    branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
                                           scope=Conv2d_0d_7x1)
                    branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
                                           scope=Conv2d_0e_1x7)
                with tf.variable_scope(Branch_3):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope=AvgPool_0a_3x3)
                    branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
                                           scope=Conv2d_0b_1x1)
                net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points
            # mixed_6: 17 x 17 x 768.
            end_point = Mixed_6d
            with tf.variable_scope(end_point):
                with tf.variable_scope(Branch_0):
                    branch_0 = slim.conv2d(net, depth(192), [1, 1], scope=Conv2d_0a_1x1)
                with tf.variable_scope(Branch_1):
                    branch_1 = slim.conv2d(net, depth(160), [1, 1], scope=Conv2d_0a_1x1)
                    branch_1 = slim.conv2d(branch_1, depth(160), [1, 7],
                                           scope=Conv2d_0b_1x7)
                    branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
                                           scope=Conv2d_0c_7x1)
                with tf.variable_scope(Branch_2):
                    branch_2 = slim.conv2d(net, depth(160), [1, 1], scope=Conv2d_0a_1x1)
                    branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
                                           scope=Conv2d_0b_7x1)
                    branch_2 = slim.conv2d(branch_2, depth(160), [1, 7],
                                           scope=Conv2d_0c_1x7)
                    branch_2 = slim.conv2d(branch_2, depth(160), [7, 1],
                                           scope=Conv2d_0d_7x1)
                    branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
                                           scope=Conv2d_0e_1x7)
                with tf.variable_scope(Branch_3):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope=AvgPool_0a_3x3)
                    branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
                                           scope=Conv2d_0b_1x1)
                net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points

            # mixed_7: 17 x 17 x 768.
            end_point = Mixed_6e
            with tf.variable_scope(end_point):
                with tf.variable_scope(Branch_0):
                    branch_0 = slim.conv2d(net, depth(192), [1, 1], scope=Conv2d_0a_1x1)
                with tf.variable_scope(Branch_1):
                    branch_1 = slim.conv2d(net, depth(192), [1, 1], scope=Conv2d_0a_1x1)
                    branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
                                           scope=Conv2d_0b_1x7)
                    branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
                                           scope=Conv2d_0c_7x1)
                with tf.variable_scope(Branch_2):
                    branch_2 = slim.conv2d(net, depth(192), [1, 1], scope=Conv2d_0a_1x1)
                    branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
                                           scope=Conv2d_0b_7x1)
                    branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
                                           scope=Conv2d_0c_1x7)
                    branch_2 = slim.conv2d(branch_2, depth(192), [7, 1],
                                           scope=Conv2d_0d_7x1)
                    branch_2 = slim.conv2d(branch_2, depth(192), [1, 7],
                                           scope=Conv2d_0e_1x7)
                with tf.variable_scope(Branch_3):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope=AvgPool_0a_3x3)
                    branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
                                           scope=Conv2d_0b_1x1)
                net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points

            # mixed_8: 8 x 8 x 1280.
            end_point = Mixed_7a
            with tf.variable_scope(end_point):
                with tf.variable_scope(Branch_0):
                    branch_0 = slim.conv2d(net, depth(192), [1, 1], scope=Conv2d_0a_1x1)
                    branch_0 = slim.conv2d(branch_0, depth(320), [3, 3], stride=2,
                                           padding=VALID, scope=Conv2d_1a_3x3)
                with tf.variable_scope(Branch_1):
                    branch_1 = slim.conv2d(net, depth(192), [1, 1], scope=Conv2d_0a_1x1)
                    branch_1 = slim.conv2d(branch_1, depth(192), [1, 7],
                                           scope=Conv2d_0b_1x7)
                    branch_1 = slim.conv2d(branch_1, depth(192), [7, 1],
                                           scope=Conv2d_0c_7x1)
                    branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], stride=2,
                                           padding=VALID, scope=Conv2d_1a_3x3)
                with tf.variable_scope(Branch_2):
                    branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding=VALID,
                                               scope=MaxPool_1a_3x3)
                net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points
            # mixed_9: 8 x 8 x 2048.
            end_point = Mixed_7b
            with tf.variable_scope(end_point):
                with tf.variable_scope(Branch_0):
                    branch_0 = slim.conv2d(net, depth(320), [1, 1], scope=Conv2d_0a_1x1)
                with tf.variable_scope(Branch_1):
                    branch_1 = slim.conv2d(net, depth(384), [1, 1], scope=Conv2d_0a_1x1)
                    branch_1 = tf.concat(axis=3, values=[
                        slim.conv2d(branch_1, depth(384), [1, 3], scope=Conv2d_0b_1x3),
                        slim.conv2d(branch_1, depth(384), [3, 1], scope=Conv2d_0b_3x1)])
                with tf.variable_scope(Branch_2):
                    branch_2 = slim.conv2d(net, depth(448), [1, 1], scope=Conv2d_0a_1x1)
                    branch_2 = slim.conv2d(
                        branch_2, depth(384), [3, 3], scope=Conv2d_0b_3x3)
                    branch_2 = tf.concat(axis=3, values=[
                        slim.conv2d(branch_2, depth(384), [1, 3], scope=Conv2d_0c_1x3),
                        slim.conv2d(branch_2, depth(384), [3, 1], scope=Conv2d_0d_3x1)])
                with tf.variable_scope(Branch_3):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope=AvgPool_0a_3x3)
                    branch_3 = slim.conv2d(
                        branch_3, depth(192), [1, 1], scope=Conv2d_0b_1x1)
                net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points

            # mixed_10: 8 x 8 x 2048.
            end_point = Mixed_7c
            with tf.variable_scope(end_point):
                with tf.variable_scope(Branch_0):
                    branch_0 = slim.conv2d(net, depth(320), [1, 1], scope=Conv2d_0a_1x1)
                with tf.variable_scope(Branch_1):
                    branch_1 = slim.conv2d(net, depth(384), [1, 1], scope=Conv2d_0a_1x1)
                    branch_1 = tf.concat(axis=3, values=[
                        slim.conv2d(branch_1, depth(384), [1, 3], scope=Conv2d_0b_1x3),
                        slim.conv2d(branch_1, depth(384), [3, 1], scope=Conv2d_0c_3x1)])
                with tf.variable_scope(Branch_2):
                    branch_2 = slim.conv2d(net, depth(448), [1, 1], scope=Conv2d_0a_1x1)
                    branch_2 = slim.conv2d(
                        branch_2, depth(384), [3, 3], scope=Conv2d_0b_3x3)
                    branch_2 = tf.concat(axis=3, values=[
                        slim.conv2d(branch_2, depth(384), [1, 3], scope=Conv2d_0c_1x3),
                        slim.conv2d(branch_2, depth(384), [3, 1], scope=Conv2d_0d_3x1)])
                with tf.variable_scope(Branch_3):
                    branch_3 = slim.avg_pool2d(net, [3, 3], scope=AvgPool_0a_3x3)
                    branch_3 = slim.conv2d(
                        branch_3, depth(192), [1, 1], scope=Conv2d_0b_1x1)
                net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
            end_points[end_point] = net
            if end_point == final_endpoint: return net, end_points
        raise ValueError(Unknown final endpoint %s % final_endpoint)


def inception_v3(inputs,
                 num_classes=1000,
                 is_training=True,
                 dropout_keep_prob=0.8,
                 min_depth=16,
                 depth_multiplier=1.0,
                 prediction_fn=slim.softmax,
                 spatial_squeeze=True,
                 reuse=None,
                 create_aux_logits=True,
                 scope=InceptionV3,
                 global_pool=False):
    if depth_multiplier <= 0:
        raise ValueError(depth_multiplier is not greater than zero.)
    depth = lambda d: max(int(d * depth_multiplier), min_depth)

    with tf.variable_scope(scope, InceptionV3, [inputs], reuse=reuse) as scope:
        with slim.arg_scope([slim.batch_norm, slim.dropout],
                            is_training=is_training):
            net, end_points = inception_v3_base(
                inputs, scope=scope, min_depth=min_depth,
                depth_multiplier=depth_multiplier)

            # Auxiliary Head logits
            if create_aux_logits and num_classes:
                with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
                                    stride=1, padding=SAME):
                    aux_logits = end_points[Mixed_6e]
                    with tf.variable_scope(AuxLogits):
                        aux_logits = slim.avg_pool2d(
                            aux_logits, [5, 5], stride=3, padding=VALID,
                            scope=AvgPool_1a_5x5)
                        aux_logits = slim.conv2d(aux_logits, depth(128), [1, 1],
                                                 scope=Conv2d_1b_1x1)

                        # Shape of feature map before the final layer.
                        kernel_size = _reduced_kernel_size_for_small_input(
                            aux_logits, [5, 5])
                        aux_logits = slim.conv2d(
                            aux_logits, depth(768), kernel_size,
                            weights_initializer=trunc_normal(0.01),
                            padding=VALID, scope=Conv2d_2a_{}x{}.format(*kernel_size))
                        aux_logits = slim.conv2d(
                            aux_logits, num_classes, [1, 1], activation_fn=None,
                            normalizer_fn=None, weights_initializer=trunc_normal(0.001),
                            scope=Conv2d_2b_1x1)
                        if spatial_squeeze:
                            aux_logits = tf.squeeze(aux_logits, [1, 2], name=SpatialSqueeze)
                        end_points[AuxLogits] = aux_logits

            # Final pooling and prediction
            with tf.variable_scope(Logits):
                if global_pool:
                    # Global average pooling.
                    net = tf.reduce_mean(net, [1, 2], keep_dims=True, name=GlobalPool)
                    end_points[global_pool] = net
                else:
                    # Pooling with a fixed kernel size.
                    kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])
                    net = slim.avg_pool2d(net, kernel_size, padding=VALID,
                                          scope=AvgPool_1a_{}x{}.format(*kernel_size))
                    end_points[AvgPool_1a] = net
                if not num_classes:
                    return net, end_points
                # 1 x 1 x 2048
                net = slim.dropout(net, keep_prob=dropout_keep_prob, scope=Dropout_1b)
                end_points[PreLogits] = net
                # 2048
                logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
                                     normalizer_fn=None, scope=Conv2d_1c_1x1)
                if spatial_squeeze:
                    logits = tf.squeeze(logits, [1, 2], name=SpatialSqueeze)
                # 1000
            end_points[Logits] = logits
            end_points[Predictions] = prediction_fn(logits, scope=Predictions)
    return logits, end_points


inception_v3.default_image_size = 299


def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
    """Define kernel size which is automatically reduced for small input.
    If the shape of the input images is unknown at graph construction time this
    function assumes that the input images are is large enough.
    Args:
      input_tensor: input tensor of size [batch_size, height, width, channels].
      kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
    Returns:
      a tensor with the kernel size.
    TODO(jrru): Make this function work with unknown shapes. Theoretically, this
    can be done with the code below. Problems are two-fold: (1) If the shape was
    known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
    handle tensors that define the kernel size.
        shape = tf.shape(input_tensor)
        return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
                           tf.minimum(shape[2], kernel_size[1])])
    """
    shape = input_tensor.get_shape().as_list()
    if shape[1] is None or shape[2] is None:
        kernel_size_out = kernel_size
    else:
        kernel_size_out = [min(shape[1], kernel_size[0]),
                           min(shape[2], kernel_size[1])]
    return kernel_size_out


inception_v3_arg_scope = inception_utils.inception_arg_scope

 

inception_utils的代码在下面,它是对slim定义卷积和池化的一些参数做默认的规定,这样不需要每次都进行相同的规定

#!/usr/bin/env python
# -*- coding:utf-8 -*-
#@Time  : 2019/9/17 10:12
#@Author: zhangtao
#@File  : inception_utils.py

# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains common code shared by all inception models.
Usage of arg scope:
  with slim.arg_scope(inception_arg_scope()):
    logits, end_points = inception.inception_v3(images, num_classes,
                                                is_training=is_training)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf

slim = tf.contrib.slim


def inception_arg_scope(weight_decay=0.00004,
                        use_batch_norm=True,
                        batch_norm_decay=0.9997,
                        batch_norm_epsilon=0.001,
                        activation_fn=tf.nn.relu,
                        batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
  """Defines the default arg scope for inception models.
  Args:
    weight_decay: The weight decay to use for regularizing the model.
    use_batch_norm: "If `True`, batch_norm is applied after each convolution.
    batch_norm_decay: Decay for batch norm moving average.
    batch_norm_epsilon: Small float added to variance to avoid dividing by zero
      in batch norm.
    activation_fn: Activation function for conv2d.
    batch_norm_updates_collections: Collection for the update ops for
      batch norm.
  Returns:
    An `arg_scope` to use for the inception models.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      decay: batch_norm_decay,
      # epsilon to prevent 0s in variance.
      epsilon: batch_norm_epsilon,
      # collection containing update_ops.
      updates_collections: batch_norm_updates_collections,
      # use fused batch norm if possible.
      fused: None,
  }
  if use_batch_norm:
    normalizer_fn = slim.batch_norm
    normalizer_params = batch_norm_params
  else:
    normalizer_fn = None
    normalizer_params = {}
  # Set weight_decay for weights in Conv and FC layers.
  with slim.arg_scope([slim.conv2d, slim.fully_connected],
                      weights_regularizer=slim.l2_regularizer(weight_decay)):
    with slim.arg_scope(
        [slim.conv2d],
        weights_initializer=slim.variance_scaling_initializer(),
        activation_fn=activation_fn,
        normalizer_fn=normalizer_fn,
        normalizer_params=normalizer_params) as sc:
      return sc

 

然后就是使用logits对图像进行训练,其实我也不知道我做的对不对,如果电脑允许的伙伴就拿去试试吧

from VGG16.DecodeRecord import *
import VGG16.inception_v3 as inception_v3
from VGG16.CreateTfrecordsFile import *
import tensorflow as tf
from datetime import datetime
import tensorflow.contrib.slim as slim

shuffle_size=200
labels_num=5
batch_size=8
resize_height=299
resize_width=299
channels=3
data_shape=[batch_size,resize_height,resize_width,channels]
training_steps=10000
train_record_file=D:/软件/pycharmProject/wenyuPy/Dataset/VGG16/record/train.tfrecords
validation_record_file=D:/软件/pycharmProject/wenyuPy/Dataset/VGG16/record/validation.tfrecords
#get the images_batch and labels_batch
def get_batches(tfrecords_file):
    dataset=tf.data.TFRecordDataset(tfrecords_file)
    dataset=dataset.map(decode_example)
    dataset=dataset.shuffle(shuffle_size).batch(batch_size)
    iterator=tf.compat.v1.data.make_one_shot_iterator(dataset)
    images_batch,labels_batch=iterator.get_next()
    return images_batch,labels_batch

def get_example_nums(tf_records_filenames):
    nums=0
    for record in tf.python_io.tf_record_iterator(tf_records_filenames):
        nums+=1
    return nums

input_images=tf.compat.v1.placeholder(dtype=tf.float32,shape=[None,resize_height,resize_width,channels],name=input)
input_labels=tf.compat.v1.placeholder(dtype=tf.int32,shape=[None,labels_num],name=label)

keep_prob=tf.compat.v1.placeholder(tf.float32,name=keep_prob)
is_training=tf.compat.v1.placeholder(tf.bool,name=is_training)

def train(train_record_file,
          labels_num,
          ):
    #[base_lr,max_steps]=train_param
    #[batch_size,resize_height,resize_width,channels]=data_shape


    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        out,end_points=inception_v3.inception_v3(inputs=input_images,num_classes=labels_num,
                                dropout_keep_prob=keep_prob,is_training=is_training)
    #添加交叉熵损失
    tf.losses.softmax_cross_entropy(onehot_labels=input_labels,logits=out)
    #添加正则化损失
    loss=tf.compat.v1.losses.get_total_loss(add_regularization_losses=True)
    global_steps = tf.Variable(0, trainable=False)
    learning_rate=tf.compat.v1.train.exponential_decay(0.05,global_steps,150,0.9)
    optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=learning_rate)
    #optimizer = tf.train.GradientDescentOptimizer(learning_rate=base_lr)
    update_ops=tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op=optimizer.minimize(loss)
        #train_op = slim.learning.create_train_op(total_loss=loss, optimizer=optimizer)
    accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(out,1),tf.argmax(input_labels,1)),tf.float32))
    #saver=tf.train.Saver()
    init_op = tf.compat.v1.global_variables_initializer()
    train_record_nums=get_example_nums(train_record_file)
    #validation_record_nums=get_example_nums(validation_record_file)
    max_train_steps=int(train_record_nums/batch_size)
    epoches_nums=10*max_train_steps
    train_losses=[]
    train_acc=[]
    with tf.compat.v1.Session() as sess:
        sess.run(init_op)
        for i in range(epoches_nums):
            train_images, train_labels = get_batches(train_record_file)
            train_x,train_y=sess.run([train_images,train_labels])
            _,train_loss=sess.run([train_op,loss],feed_dict={
                input_images:train_x,input_labels:train_y,
                keep_prob:0.5,is_training:True
            })

            #train_losses.append(train_loss)
            #train_acc.append(accuracy)
            if i%max_train_steps==0:
                train_acc=sess.run([accuracy],feed_dict={
                    input_images: train_x,
                    input_labels: train_y,
                    keep_prob: 1.0, is_training: False
                })
                print(train_loss, train_acc)


if __name__==__main__:
    base_lr=0.01
    train_log_step=100
    train(train_record_file,labels_num)

好了,这就是我的学习过程了,真的觉得好难啊,也不知道做的对不对

 

如何训练inception网络

标签:run   fine   dep   bat   connect   cannot   average   port   work   

原文地址:https://www.cnblogs.com/daremosiranaihana/p/11539880.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!