码迷,mamicode.com
首页 > Web开发 > 详细

tensorflow---darknet53

时间:2019-06-06 16:09:17      阅读:190      评论:0      收藏:0      [点我收藏+]

标签:method   support   imp   alpha   initial   dom   shape   variable   normal   

#! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : backbone.py
# Author : YunYang1994
# Created date: 2019-02-17 11:03:35
# Description :
#
#================================================================

import core.common as common
import tensorflow as tf


def darknet53(input_data, trainable):

with tf.variable_scope(‘darknet‘):

input_data = common.convolutional(input_data, filters_shape=(3, 3, 3, 32), trainable=trainable, name=‘conv0‘)
input_data = common.convolutional(input_data, filters_shape=(3, 3, 32, 64),
trainable=trainable, name=‘conv1‘, downsample=True)

for i in range(1):
input_data = common.residual_block(input_data, 64, 32, 64, trainable=trainable, name=‘residual%d‘ %(i+0))

input_data = common.convolutional(input_data, filters_shapresidual_blocke=(3, 3, 64, 128),
trainable=trainable, name=‘conv4‘, downsample=True)

for i in range(2):
input_data = common.residual_block(input_data, 128, 64, 128, trainable=trainable, name=‘residual%d‘ %(i+1))

input_data = common.convolutional(input_data, filters_shape=(3, 3, 128, 256),
trainable=trainable, name=‘conv9‘, downsample=True)

for i in range(8):
input_data = common.residual_block(input_data, 256, 128, 256, trainable=trainable, name=‘residual%d‘ %(i+3))

route_1 = input_data
input_data = common.convolutional(input_data, filters_shape=(3, 3, 256, 512),
trainable=trainable, name=‘conv26‘, downsample=True)

for i in range(8):
input_data = common.residual_block(input_data, 512, 256, 512, trainable=trainable, name=‘residual%d‘ %(i+11))

route_2 = input_data
input_data = common.convolutional(input_data, filters_shape=(3, 3, 512, 1024),
trainable=trainable, name=‘conv43‘, downsample=True)

for i in range(4):
input_data = common.residual_block(input_data, 1024, 512, 1024, trainable=trainable, name=‘residual%d‘ %(i+19))

return route_1, route_2, input_data

#! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : common.py
# Author : YunYang1994
# Created date: 2019-02-28 09:56:29
# Description :
#
#================================================================

import tensorflow as tf


def convolutional(input_data, filters_shape, trainable, name, downsample=False, activate=True, bn=True):
#卷积层名称
with tf.variable_scope(name):
#如果需要下采样
if downsample:
pad_h, pad_w = (filters_shape[0] - 2) // 2 + 1, (filters_shape[1] - 2) // 2 + 1
paddings = tf.constant([[0, 0], [pad_h, pad_h], [pad_w, pad_w], [0, 0]])
input_data = tf.pad(input_data, paddings, ‘CONSTANT‘)
strides = (1, 2, 2, 1)
padding = ‘VALID‘
else:
strides = (1, 1, 1, 1)
padding = "SAME"
#定义一个变量
weight = tf.get_variable(name=‘weight‘, dtype=tf.float32, trainable=True,
shape=filters_shape, initializer=tf.random_normal_initializer(stddev=0.01))
conv = tf.nn.conv2d(input=input_data, filter=weight, strides=strides, padding=padding)
#如果归一化
if bn:
conv = tf.layers.batch_normalization(conv, beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(), training=trainable)

#如果不归一化
else:
bias = tf.get_variable(name=‘bias‘, shape=filters_shape[-1], trainable=True,
dtype=tf.float32, initializer=tf.constant_initializer(0.0))
conv = tf.nn.bias_add(conv, bias)

if activate == True: conv = tf.nn.leaky_relu(conv, alpha=0.1)

return conv


def residual_block(input_data, input_channel, filter_num1, filter_num2, trainable, name):

short_cut = input_data

with tf.variable_scope(name):
input_data = convolutional(input_data, filters_shape=(1, 1, input_channel, filter_num1),
trainable=trainable, name=‘conv1‘)
input_data = convolutional(input_data, filters_shape=(3, 3, filter_num1, filter_num2),
trainable=trainable, name=‘conv2‘)

residual_output = input_data + short_cut

return residual_output



def route(name, previous_output, current_output):

with tf.variable_scope(name):
output = tf.concat([current_output, previous_output], axis=-1)

return output


def upsample(input_data, name, method="deconv"):
assert method in ["resize", "deconv"]

if method == "resize":
with tf.variable_scope(name):
input_shape = tf.shape(input_data)
output = tf.image.resize_nearest_neighbor(input_data, (input_shape[1] * 2, input_shape[2] * 2))

if method == "deconv":
# replace resize_nearest_neighbor with conv2d_transpose To support TensorRT optimization
numm_filter = input_data.shape.as_list()[-1]
output = tf.layers.conv2d_transpose(input_data, numm_filter, kernel_size=2, padding=‘same‘,
strides=(2,2), kernel_initializer=tf.random_normal_initializer())

return output





tensorflow---darknet53

标签:method   support   imp   alpha   initial   dom   shape   variable   normal   

原文地址:https://www.cnblogs.com/shuimuqingyang/p/10984988.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!