码迷,mamicode.com
首页 > 其他好文 > 详细

CNN_minist

时间:2017-12-16 13:06:03      阅读:166      评论:0      收藏:0      [点我收藏+]

标签:eva   load   pre   nim   tdd   either   writing   实战   optimizer   

这是根据《tensorflow实战》第5.2节改写的COIL20分类程序

# -*- coding: utf-8 -*-
"""
Created on Sat Dec 16 10:02:46 2017

@author: Administrator
"""

#%%
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
data_name = ‘COIL20.mat‘
sele_num  = 10
import matlab.engine
eng = matlab.engine.start_matlab()
t = eng.data_imread_MSE(data_name,sele_num)
eng.quit()
#t = np.array(t)
Train_Ma  = np.array(t[0]).astype(np.float32)
Train_Lab = np.array(t[1]).astype(np.int8)
Test_Ma   = np.array(t[2]).astype(np.float32)
Test_Lab  = np.array(t[3]).astype(np.int8)
Num_fea   = Train_Ma.shape[1]
Num_Class = Train_Lab.shape[1]
image_row    = 32
image_column = 32
import tensorflow as tf
sess = tf.InteractiveSession()


def weight_variable(shape):
  initial = tf.truncated_normal(shape, stddev=0.1)
  return tf.Variable(initial)

def bias_variable(shape):
  initial = tf.constant(0.1, shape=shape)
  return tf.Variable(initial)
  
def conv2d(x, W):
  return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=‘SAME‘)

def max_pool_2x2(x):
  return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding=‘SAME‘)  
                        
x = tf.placeholder(tf.float32, [None, Num_fea])
y_ = tf.placeholder(tf.float32, [None, Num_Class])
x_image = tf.reshape(x, [-1,image_row,image_column,1])
                        
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)

W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)

W_fc1 = weight_variable([8 * 8 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 8*8*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

W_fc2 = weight_variable([1024, Num_Class])
b_fc2 = bias_variable([Num_Class])
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.global_variables_initializer().run()
for i in range(1000):
    train_accuracy = accuracy.eval(feed_dict={
        x:Train_Ma, y_: Train_Lab, keep_prob: 1.0})
    print("step %d, training accuracy %g"%(i, train_accuracy))
    train_step.run(feed_dict={x: Train_Ma, y_: Train_Lab, keep_prob: 0.5})

print("test accuracy %g"%accuracy.eval(feed_dict={
    x: Test_Ma, y_: Test_Lab, keep_prob: 1.0}))

 

matlab数据库读取代码

function output = data_imread_MSE(name,sele_num)
% 用于 tensorflow下的 3.2节 softmax regression的数据读取
% 数据存储为细胞组形式,4个元祖分别为 训练矩阵,训练标签,测试矩阵,测试标签
% 其中 训练矩阵和测试矩阵都是一行一个样本
% 测试标签为 MSE的one-hot矩阵 一行只有一个元素为1 一行为一个样本的类标
addpath(‘H:\2015629房师兄代码\data set‘);
load (name);
fea = double(fea);
nnClass = length(unique(gnd));  % The number of classes;
num_Class = [];
for i = 1:nnClass
    num_Class = [num_Class length(find(gnd==i))]; %The number of samples of each class
end
%%------------------select training samples and test samples--------------%% 
Train_Ma  = [];
Train_Lab = [];
Test_Ma   = [];
Test_Lab  = [];
for j = 1:nnClass    
    idx = find(gnd==j);
    randIdx  = randperm(num_Class(j));
    Train_Ma = [Train_Ma; fea(idx(randIdx(1:sele_num)),:)];            % select select_num samples per class for training
    Train_Lab= [Train_Lab;gnd(idx(randIdx(1:sele_num)))];
    Test_Ma  = [Test_Ma;fea(idx(randIdx(sele_num+1:num_Class(j))),:)];  % select remaining samples per class for test
    Test_Lab = [Test_Lab;gnd(idx(randIdx(sele_num+1:num_Class(j))))];
end
Train_Ma = Train_Ma‘;                       % transform to a sample per column
Train_Ma = Train_Ma./repmat(sqrt(sum(Train_Ma.^2)),[size(Train_Ma,1) 1]);
Test_Ma  = Test_Ma‘;
Test_Ma  = Test_Ma./repmat(sqrt(sum(Test_Ma.^2)),[size(Test_Ma,1) 1]);  % -------------

label = unique(Train_Lab);
Train_Lab = bsxfun(@eq, Train_Lab, label‘);

label = unique(Test_Lab);
Test_Lab = bsxfun(@eq, Test_Lab, label‘);

output = cell(1,4);
output{1} = Train_Ma‘;
output{2} = Train_Lab;
output{3} = Test_Ma‘;
output{4} = Test_Lab;
end

  

  

CNN_minist

标签:eva   load   pre   nim   tdd   either   writing   实战   optimizer   

原文地址:http://www.cnblogs.com/Jerry-PR/p/8045991.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!