码迷,mamicode.com
首页 > 其他好文 > 详细

DEMO

时间:2019-05-23 15:53:11      阅读:115      评论:0      收藏:0      [点我收藏+]

标签:param   卷积   适应   fusion   javascrip   坐标轴   tick   ges   https   


clc; clear; close all; load ..\CFCC_mat\all_data_label.mat % load all_data_label_02.mat %matlab神经网络工具箱权值和阈值每次都是随机初始化的,加上"setdemorandstream(pi)"即可固定随机种子,使其训练结果不变 setdemorandstream(pi); train_y=train_y‘; test_y=test_y‘; P=size(test_x); w=size(train_x); %% category=26;%26种类别 M=26*10;%测试用样本数 bp_number=80; %隐层神经元个数 % bp_number =round( log( size(train_x,2) )/ log(2) ); input_label_test=zeros(1,M); % output_label=zeros(1,M); %% for j=1:P(1) %P(1)=90 input_label_test(j)= find(test_y(:,j)) ; %正确,返回T1中第j列不为0元素的行数 end %% %特征值归一化; [input,minI,maxI] = premnmx( train_x‘) ;%input归一化矩阵,minI,每行最小值,maxI,每行最大值 %for i=10:300 %创建神经网络,隐藏层与输出层对应的传输转移函数分别为"tansig","logsig"%[24,9] net=newff(minmax(input),[bp_number,category],{‘tansig‘ ‘logsig‘} ,‘traingdx‘); % ‘traingdx‘ 自适应调整学习速率附加动量因子梯度下降反向传播算法训练函数 net.trainParam.lr=0.001;%学习率 net.trainParam.show=5000;%显示中间结果的周期 net.trainParam.epochs=20000;%最大迭代次数 net.trainParam.goal=0.0001; %神经网络训练的目标误差 %%开始训练 net=train(net,input,train_y); %T9x900 save trainNet net minI %% %train trainInput = tramnmx ( train_x‘ , minI, maxI ) ; TT=sim(net,trainInput);%返回测试样本的输出预测值 error2 = train_y - round(TT); RX=0; for s=1:w(1) if(sum(error2(:,s))==0) RX = RX + 1; end end Trainidentifyrate = RX/(w(1)) %% % 测试数据归一化 test testInput = tramnmx ( test_x‘ , minI, maxI ) ; RS=sim(net,testInput);%test_input 400xM,每一列为一个样本 RS9x90,9类一共M个样本 % U=round(RS); [~,output_label_test] = max(RS,[],1); %返回每列最大值所处行数 %% %视图化 % test_labels =input_label_test‘; % predict_label=output_label_test‘; figure; hold on; plot(input_label_test‘,‘o‘); %test_labels plot(output_label_test‘,‘r+‘);%predict_label xlabel(‘测试集样本‘,‘FontSize‘,12); ylabel(‘类别标签‘,‘FontSize‘,12); legend(‘实际测试集分类‘,‘预测测试集分类‘); title(‘测试集的实际分类和预测分类图‘,‘FontSize‘,12); %设置x轴范围和刻度 set(gca,‘XLim‘,[0 260]);%X轴的数据显示范围 set(gca,‘XTick‘,[0:10:260]);%设置要显示坐标刻度 % set(gca,‘XTickLabel‘,[0:1:10]);%给坐标加标签 %设置y轴范围和刻度 set(gca,‘YLim‘,[1 26]);%X轴的数据显示范围 set(gca,‘YTick‘,[0:1:26]);%设置要显示坐标刻度 % set(gca,‘YTickLabel‘,[95:1:101]);%给坐标加标签 grid on; %% III 计算准确率 c=input_label_test-output_label_test; number=0; error_label_number =1; for i=1:1:M if(c(1,i)==0) number = number+1; end % if (abs(c(1,i))==1 ) %不等于 % error_label(error_label_number,1)= i; %误差为1错误标签位置 % error_label_number = error_label_number+1; % end % if (abs(c(1,i))~=0 ) %不等于 error_location(error_label_number,1)= i; %误差为1错误标签位置 error_label_number = error_label_number+1; end end accuracy = number/M save error_label.mat error_location %% data =[input_label_test‘,output_label_test‘]; data =transform(data); figure; hold on; plot(data(:,1)‘,‘o‘); %test_labels plot(data(:,2)‘,‘r+‘);%predict_label xlabel(‘测试集样本‘,‘FontSize‘,12); ylabel(‘类别标签‘,‘FontSize‘,12); legend(‘实际测试集分类‘,‘预测测试集分类‘); title(‘测试集的实际分类和预测分类图‘,‘FontSize‘,12); %设置x轴范围和刻度 set(gca,‘XLim‘,[0 260]);%X轴的数据显示范围 set(gca,‘XTick‘,[0:10:260]);%设置要显示坐标刻度 % set(gca,‘XTickLabel‘,[0:1:10]);%给坐标加标签 %设置y轴范围和刻度 set(gca,‘YLim‘,[1 26]);%X轴的数据显示范围 set(gca,‘YTick‘,[0:1:26]);%设置要显示坐标刻度 % set(gca,‘YTickLabel‘,[95:1:101]);%给坐标加标签 grid on; %% data =[input_label_test‘,output_label_test‘]; data =data(:,1)-data(:,2); AA=zeros(26,1) for i=1:26 number=0; for j=1:10 if data( (i-1)*10+j ,1 )==0 number=number+1; end end AA(i,1)=number; end %% confusion_matrix1( output_label_test,input_label_test)

  FUNCTION  

confusion_matrix1 混淆矩阵画图
function confusion_matrix1(act1, det1)
%act1为真实的标签,det1为预测标签
%输入的是行向量
%https://blog.csdn.net/xuyingjie125/article/details/78417760
[mat, order] = confusionmat(act1, det1);
%confusionmat用来构建混淆矩阵,mat返回混淆矩阵,order返回每个变量所在的类
%mat(i,j)表示训练集中i类在测试集中被分到j类的个数
k = max(order);   %k为分类的个数

imagesc(mat); %imagesc函数将矩阵mat的元素数值按大小转化为不同颜色,并在坐标轴对应位置以这种颜色染色
colormap(flipud(gray)); %转换成灰度图
%colormap函数是用来设定和获取当前色图的函数
%flipud是矩阵倒转命令,gray是matlab内置矩阵
%gray()会返回M×3的矩阵
title(‘混淆矩阵‘);
textStrings = num2str(mat(:),‘%d‘);
%num2str(x,precision)把数组x转换成字符串形式表示,precision为精度
textStrings = strtrim(cellstr(textStrings));
%cellstr是将字符数组转换成cell类型
%裁切字符串的开头和尾部的空格,制表,回车符

%meshgrid是MATLAB中用于生成网格采样点的函数
[x,y] = meshgrid(1:k);
hStrings = text(x(:),y(:),textStrings(:),‘HorizontalAlignment‘,‘center‘);
%text函数表示对文字string定位于用坐标轴指定的位置,且对指定的属性进行设置
%HorizontalAlignment表示文字水平方向的对齐方式,center表示文本外框中间对齐
midValue = mean(get(gca,‘CLim‘)); 
%get(gca,‘CLim‘)获取当前的坐标轴句柄,并设置它的CLim(ColorLimit)值,用来使不同的图使用相同的色标
%mean函数返回均值,使得midValue获得图色标的均值0.5000
textColors = repmat(mat(:) > midValue,1,3);
%将矩阵[mat(:)>midValue]复制1×3块的矢量(颜色值必须为包含3个元素的数值矢量)
%把矩阵[mat(:)>midValue]作为矩阵textColors的元素
%B=repmat(A,M,N)这是一个处理大矩阵且内容有重复时使用,其功能是以A的内容堆叠在(M×N)的矩阵B中,
%B矩阵的大小由M×N及A矩阵的内容决定,
%如果A是一个3×4×5的矩阵,则矩阵B变为6×12×5

set(hStrings,{‘Color‘},num2cell(textColors,2));
%n=1表示把每列的所有行转换为cell,n=2表示把每行的所有列转换为cell,将结构阵列转换成异质阵列
%然后set去重后放在hStrings

%根据自己的分类要求更改类别标签

set(gca,‘XTick‘,1:26,‘XTickLabel‘,{‘1‘,‘2‘,‘3‘,‘4‘,‘5‘,‘6‘,‘7‘,‘8‘,‘9‘,‘10‘,‘11‘,‘12‘,‘13‘,‘14‘,‘15‘,‘16‘,‘17‘,‘18‘,‘19‘,‘20‘,‘21‘,‘22‘,‘23‘,‘24‘,‘25‘,‘26‘},‘YTick‘,1:26,‘YTickLabel‘,{‘1‘,‘2‘,‘3‘,‘4‘,‘5‘,‘6‘,‘7‘,‘8‘,‘9‘,‘10‘,‘11‘,‘12‘,‘13‘,‘14‘,‘15‘,‘16‘,‘17‘,‘18‘,‘19‘,‘20‘,‘21‘,‘22‘,‘23‘,‘24‘,‘25‘,‘26‘});
% --------------------- 
% 浣滆?锛歴undreamoon 
% 鏉ユ簮锛欳SDN 
% 鍘熸枃锛歨ttps://blog.csdn.net/sundreamoon/article/details/79958207 
% 鐗堟潈澹版槑锛氭湰鏂囦负鍗氫富鍘熷垱鏂囩珷锛岃浆杞借闄勪笂鍗氭枃閾炬帴锛?

  相关系数 code

%% 相关系数
%% V. 性能评价
%%
% 1. 相对误差error
input_test_label = actual_label;
error = abs(predict_label - input_test_label)./input_test_label;
[N,~]=size(input_test_label);
%%
% 2. 决定系数R^2
R2 = (N * sum(predict_label .* input_test_label) - sum(predict_label) * sum(input_test_label))^2 / ((N * sum((predict_label).^2) - (sum(predict_label))^2) * (N * sum((input_test_label).^2) - (sum(input_test_label))^2)); 

%%
% 3. 结果对比
result = [input_test_label‘ predict_label‘ error‘]

%% VI. 绘图
figure
plot(1:N,input_test_label,‘b:*‘,1:N,predict_label,‘r-o‘)
legend(‘真实值‘,‘预测值‘)
xlabel(‘预测样本‘)
ylabel(‘标签值‘)
string = {‘测试集预测结果对比‘;[‘R^2=‘ num2str(R2)]};
title(string)

  SVM code

%% SVM神经网络的数据分类预测----意大利葡萄酒种类识别
% 
% 
% <html>
% <table border="0" width="600px" id="table1">	<tr>		<td><b><font size="2">该案例作者申明:</font></b></td>	</tr>	<tr>		<td><span class="comment"><font size="2">1:本人长期驻扎在此<a target="_blank" href="http://www.ilovematlab.cn/forum-158-1.html"><font color="#0000FF">板块</font></a>里,对<a target="_blank" href="http://www.ilovematlab.cn/thread-48362-1-1.html"><font color="#0000FF">该案例</font></a>提问,做到有问必答。</font></span></td></tr><tr>	<td><span class="comment"><font size="2">2:此案例有配套的教学视频,配套的完整可运行Matlab程序。</font></span></td>	</tr>	<tr>		<td><span class="comment"><font size="2">		3:以下内容为该案例的部分内容(约占该案例完整内容的1/10)。</font></span></td>	</tr>		<tr>		<td><span class="comment"><font size="2">		4:此案例为原创案例,转载请注明出处(<a target="_blank" href="http://www.ilovematlab.cn/">Matlab中文论坛</a>,<a target="_blank" href="http://www.ilovematlab.cn/forum-158-1.html">《Matlab神经网络30个案例分析》</a>)。</font></span></td>	</tr>		<tr>		<td><span class="comment"><font size="2">		5:若此案例碰巧与您的研究有关联,我们欢迎您提意见,要求等,我们考虑后可以加在案例里。</font></span></td>	</tr>		<tr>		<td><span class="comment"><font size="2">		6:您看到的以下内容为初稿,书籍的实际内容可能有少许出入,以书籍实际发行内容为准。</font></span></td>	</tr><tr>		<td><span class="comment"><font size="2">		7:此书其他常见问题、预定方式等,<a target="_blank" href="http://www.ilovematlab.cn/thread-47939-1-1.html">请点击这里</a>。</font></span></td>	</tr></table>
% </html>
% 


%% 清空环境变量
close all;
clear;
clc;
format compact;
%% 数据提取

% 载入测试数据wine,其中包含的数据为classnumber = 3,wine:178*13的矩阵,wine_labes:178*1的列向量
load chapter12_wine.mat;

% 画出测试数据的box可视化图
figure;
boxplot(wine,‘orientation‘,‘horizontal‘,‘labels‘,categories);
title(‘wine数据的box可视化图‘,‘FontSize‘,12);
xlabel(‘属性值‘,‘FontSize‘,12);
grid on;

% 画出测试数据的分维可视化图
figure
subplot(3,5,1);
hold on
for run = 1:178
    plot(run,wine_labels(run),‘*‘);
end
xlabel(‘样本‘,‘FontSize‘,10);
ylabel(‘类别标签‘,‘FontSize‘,10);
title(‘class‘,‘FontSize‘,10);
for run = 2:14
    subplot(3,5,run);
    hold on;
    str = [‘attrib ‘,num2str(run-1)];
    for i = 1:178
        plot(i,wine(i,run-1),‘*‘);
    end
    xlabel(‘样本‘,‘FontSize‘,10);
    ylabel(‘属性值‘,‘FontSize‘,10);
    title(str,‘FontSize‘,10);
end

% 选定训练集和测试集

% 将第一类的1-30,第二类的60-95,第三类的131-153做为训练集
train_wine = [wine(1:30,:);wine(60:95,:);wine(131:153,:)];
% 相应的训练集的标签也要分离出来
train_wine_labels = [wine_labels(1:30);wine_labels(60:95);wine_labels(131:153)];
% 将第一类的31-59,第二类的96-130,第三类的154-178做为测试集
test_wine = [wine(31:59,:);wine(96:130,:);wine(154:178,:)];
% 相应的测试集的标签也要分离出来
test_wine_labels = [wine_labels(31:59);wine_labels(96:130);wine_labels(154:178)];

%% 数据预处理
% 数据预处理,将训练集和测试集归一化到[0,1]区间

[mtrain,ntrain] = size(train_wine);
[mtest,ntest] = size(test_wine);

dataset = [train_wine;test_wine];
% mapminmax为MATLAB自带的归一化函数
[dataset_scale,ps] = mapminmax(dataset‘,0,1);
dataset_scale = dataset_scale‘;

train_wine = dataset_scale(1:mtrain,:);
test_wine = dataset_scale( (mtrain+1):(mtrain+mtest),: );
%% SVM网络训练
model = svmtrain(train_wine_labels, train_wine, ‘-c 2 -g 1‘);

%% SVM网络预测
[predict_label, accuracy , pred] = svmpredict(test_wine_labels, test_wine, model);

%% 结果分析

% 测试集的实际分类和预测分类图
% 通过图可以看出只有一个测试样本是被错分的
figure;
hold on;
plot(test_wine_labels,‘o‘);
plot(predict_label,‘r*‘);
xlabel(‘测试集样本‘,‘FontSize‘,12);
ylabel(‘类别标签‘,‘FontSize‘,12);
legend(‘实际测试集分类‘,‘预测测试集分类‘);
title(‘测试集的实际分类和预测分类图‘,‘FontSize‘,12);
grid on;

%%
% 
% <html>
% <table align="center" >	<tr>		<td align="center"><font size="2">版权所有:</font><a
% href="http://www.ilovematlab.cn/">Matlab中文论坛</a>   <script
% src="http://s3.cnzz.com/stat.php?id=971931&web_id=971931&show=pic" language="JavaScript" ></script> </td>	</tr></table>
% </html>
% 

   III  CNN

main

%%%  matlab实现LeNet-5
%%%  作者:xd.wp
%%%  时间:2016.10.22  14:29
%% 程序说明
%          1、池化(pooling)采用平均2*2
%          2、网络结点数说明:
%                           输入层:28*28
%                           第一层:24*24(卷积)*20
%                           tanh
%                           第二层:12*12(pooling)*20
%                           第三层:100(全连接)
%                           第四层:10(softmax)
%          3、网络训练部分采用800个样本,检验部分采用100个样本
clear all;clc;
%% 网络初始化
layer_c1_num=20;
layer_s1_num=20;
layer_f1_num=100;
layer_output_num=26;
%权值调整步进
yita=0.01;
%bias初始化
bias_c1=(2*rand(1,20)-ones(1,20))/sqrt(20);
bias_f1=(2*rand(1,100)-ones(1,100))/sqrt(20);
%卷积核初始化
[kernel_c1,kernel_f1]=init_kernel(layer_c1_num,layer_f1_num);
%pooling核初始化
pooling_a=ones(2,2)/4;
%全连接层的权值
weight_f1=(2*rand(20,100)-ones(20,100))/sqrt(20);
weight_output=(2*rand(100,layer_output_num)-ones(100,layer_output_num))/sqrt(100);
disp(‘网络初始化完成......‘);
%% 开始网络训练
disp(‘开始网络训练......‘);
for iter=1:10
for n=1:100
    for m=1:26
        %读取样本
%         train_data=imread(strcat(num2str(m),‘_‘,num2str(n),‘.bmp‘));
        
        path = [‘C:\Users\Administrator\Desktop\流程\时域频域图\figure_2\figure_28_28\‘,[num2str(m),‘_‘,num2str(n)],‘.bmp‘];%如0_1~0_10
        train_data=imread(path);
        train_data=double(train_data);
        % 去均值
%       train_data=wipe_off_average(train_data);
        %前向传递,进入卷积层1
        for k=1:layer_c1_num
            state_c1(:,:,k)=convolution(train_data,kernel_c1(:,:,k));
            %进入激励函数
            state_c1(:,:,k)=tanh(state_c1(:,:,k)+bias_c1(1,k));
            %进入pooling1
            state_s1(:,:,k)=pooling(state_c1(:,:,k),pooling_a);
        end
        %进入f1层
        [state_f1_pre,state_f1_temp]=convolution_f1(state_s1,kernel_f1,weight_f1);
        %进入激励函数
        for nn=1:layer_f1_num
            state_f1(1,nn)=tanh(state_f1_pre(:,:,nn)+bias_f1(1,nn));
        end
        %进入softmax层
        for nn=1:layer_output_num
            output(1,nn)=exp(state_f1*weight_output(:,nn))/sum(exp(state_f1*weight_output));
        end
       %% 误差计算部分
        Error_cost=-output(1,m);
%         if (Error_cost<-0.98)
%             break;
%         end
        %% 参数调整部分
        [kernel_c1,kernel_f1,weight_f1,weight_output,bias_c1,bias_f1]=CNN_upweight(yita,Error_cost,m-1,train_data,...
                                                                                                state_c1,state_s1,...
                                                                                                state_f1,state_f1_temp,...
                                                                                                output,...
                                                                                                kernel_c1,kernel_f1,weight_f1,weight_output,bias_c1,bias_f1);
    end    
end
end
disp(‘网络训练完成,开始检验......‘);
%% 测试阶段
number = 1;  
count=0;
for n=101:110
    for m=1:26
        %读取样本
%         train_data=imread(strcat(num2str(m),‘_‘,num2str(n),‘.bmp‘));
        path = [‘C:\Users\Administrator\Desktop\流程\时域频域图\figure_2\figure_28_28\‘,[num2str(m),‘_‘,num2str(n)],‘.bmp‘];%如0_1~0_10
        %读取样本
        train_data=imread(path);
        train_data=double(train_data);
        % 去均值
%       train_data=wipe_off_average(train_data);
        %前向传递,进入卷积层1
        for k=1:layer_c1_num
            state_c1(:,:,k)=convolution(train_data,kernel_c1(:,:,k));
            %进入激励函数
            state_c1(:,:,k)=tanh(state_c1(:,:,k)+bias_c1(1,k));
            %进入pooling1
            state_s1(:,:,k)=pooling(state_c1(:,:,k),pooling_a);
        end
        %进入f1层
        [state_f1_pre,state_f1_temp]=convolution_f1(state_s1,kernel_f1,weight_f1);
        %进入激励函数
        for nn=1:layer_f1_num
            state_f1(1,nn)=tanh(state_f1_pre(:,:,nn)+bias_f1(1,nn));
        end
        %进入softmax层
        for nn=1:layer_output_num
            output(1,nn)=exp(state_f1*weight_output(:,nn))/sum(exp(state_f1*weight_output));
        end
        [p,classify]=max(output);
        if (classify==m+1)
            count=count+1;
        end
        fprintf(‘真实类别为%d  预测类别为%d  概率值为%d \n‘,m,classify,p);
        infor(number,1) = m;
        infor(number,2) = classify;
        infor(number,3) = p;
        number = number+1;

        

    end
end

%% 准确率计算
 number = 1;
A = size(infor);
for i=1:A(1)
    if infor(i,1)- infor(i,2)==0
    
    number =number+1;
    end
    
end


accuracy = number/A(1)

save data_1.mat

 FUNCTION  convolution

function [state]=convolution(data,kernel)

%实现卷积层操作?
[data_row,data_col]=size(data);
[kernel_row,kernel_col]=size(kernel);
for m=1:data_col-kernel_col+1
    for n=1:data_row-kernel_row+1
        state(m,n)=sum(sum(data(m:m+kernel_row-1,n:n+kernel_col-1).*kernel));
    end
end

  FUNCTION   convolution_f1

function [state_f1,state_f1_temp]=convolution_f1(state_s1,kernel_f1,weight_f1)
%% 完成卷积层2操作
layer_f1_num=size(weight_f1,2);
layer_s1_num=size(weight_f1,1);
%%
for n=1:layer_f1_num
    count=0;
    for m=1:layer_s1_num
        temp=state_s1(:,:,m)*weight_f1(m,n);
        count=count+temp;
    end
    state_f1_temp(:,:,n)=count;
    state_f1(:,:,n)=convolution(state_f1_temp(:,:,n),kernel_f1(:,:,n));
end
end

 FUNCTION CNN_upweight

function [kernel_c1,kernel_f1,weight_f1,weight_output,bias_c1,bias_f1]=CNN_upweight(yita,Error_cost,classify,train_data,state_c1,state_s1,state_f1,state_f1_temp,...
                                                                                                output,kernel_c1,kernel_f1,weight_f1,weight_output,bias_c1,bias_f1)
%%%     完成参数更新,权值和卷积核
%% 结点数目
layer_c1_num=size(state_c1,3);
layer_s1_num=size(state_s1,3);
layer_f1_num=size(state_f1,2);
layer_output_num=size(output,2);
 
[c1_row,c1_col,~]=size(state_c1);
[s1_row,s1_col,~]=size(state_s1);
 
[kernel_c1_row,kernel_c1_col]=size(kernel_c1(:,:,1));
[kernel_f1_row,kernel_f1_col]=size(kernel_f1(:,:,1));
%% 保存网络权值
kernel_c1_temp=kernel_c1;
kernel_f1_temp=kernel_f1;
 
weight_f1_temp=weight_f1;
weight_output_temp=weight_output;
%% Error计算
label=zeros(1,layer_output_num);
label(1,classify+1)=1;
delta_layer_output=output-label;
%% 更新weight_output
for n=1:layer_output_num
    delta_weight_output_temp(:,n)=delta_layer_output(1,n)*state_f1‘;
end
weight_output_temp=weight_output_temp-yita*delta_weight_output_temp;
 
%% 更新bias_f1以及kernel_f1
for n=1:layer_f1_num
    count=0;
    for m=1:layer_output_num
        count=count+delta_layer_output(1,m)*weight_output(n,m);
    end
    %bias_f1
    delta_layer_f1(1,n)=count*(1-tanh(state_f1(1,n)).^2);
    delta_bias_f1(1,n)=delta_layer_f1(1,n);
    %kernel_f1
    delta_kernel_f1_temp(:,:,n)=delta_layer_f1(1,n)*state_f1_temp(:,:,n);
end
bias_f1=bias_f1-yita*delta_bias_f1;
kernel_f1_temp=kernel_f1_temp-yita*delta_kernel_f1_temp;
%% 更新weight_f1
for n=1:layer_f1_num
    delta_layer_f1_temp(:,:,n)=delta_layer_f1(1,n)*kernel_f1(:,:,n);
end
for n=1:layer_s1_num
    for m=1:layer_f1_num
        delta_weight_f1_temp(n,m)=sum(sum(delta_layer_f1_temp(:,:,m).*state_s1(:,:,n)));
    end
end
weight_f1_temp=weight_f1_temp-yita*delta_weight_f1_temp;
 
%% 更新 bias_c1
for n=1:layer_s1_num
    count=0;
    for m=1:layer_f1_num
        count=count+delta_layer_f1_temp(:,:,m)*weight_f1(n,m);   
    end
    delta_layer_s1(:,:,n)=count;
    delta_layer_c1(:,:,n)=kron(delta_layer_s1(:,:,n),ones(2,2)/4).*(1-tanh(state_c1(:,:,n)).^2);
    delta_bias_c1(1,n)=sum(sum(delta_layer_c1(:,:,n)));
end
bias_c1=bias_c1-yita*delta_bias_c1;
%% 更新 kernel_c1
for n=1:layer_c1_num
    delta_kernel_c1_temp(:,:,n)=rot90(conv2(train_data,rot90(delta_layer_c1(:,:,n),2),‘valid‘),2);
end
kernel_c1_temp=kernel_c1_temp-yita*delta_kernel_c1_temp;
 
%% 网络权值更新
kernel_c1=kernel_c1_temp;
kernel_f1=kernel_f1_temp;
 
weight_f1=weight_f1_temp;
weight_output=weight_output_temp;
 
end

  FUNCTION init_kernel

function [kernel_c1,kernel_f1]=init_kernel(layer_c1_num,layer_f1_num)
%卷积核初始化
for n=1:layer_c1_num
    kernel_c1(:,:,n)=(2*rand(5,5)-ones(5,5))/12;
end
for n=1:layer_f1_num
    kernel_f1(:,:,n)=(2*rand(12,12)-ones(12,12));
end
end

  FUNCTION pooling

 

function state=pooling(data,pooling_a)
%% 实现取样层pooling操作
[data_row,data_col]=size(data);
[pooling_row,pooling_col]=size(pooling_a);
for m=1:data_col/pooling_col
    for n=1:data_row/pooling_row
        state(m,n)=sum(sum(data(2*m-1:2*m,2*n-1:2*n).*pooling_a));
    end
end
end

  view figure

clc
clear
load data_1.mat infor accuracy
%%
A = size(infor);
figure(1)
hold on
plot(infor(:,1),‘bo‘);%实际类别
plot(infor(:,2),‘r+‘);%predict_label
legend(‘实际测试集分类‘,‘预测测试集分类‘);
%设置x轴范围和刻度
set(gca,‘XLim‘,[0 260]);%X轴的数据显示范围
set(gca,‘XTick‘,[0:10:260]);%设置要显示坐标刻度
% set(gca,‘XTickLabel‘,[0:1:10]);%给坐标加标签 

%设置y轴范围和刻度

set(gca,‘YLim‘,[1 26]);%X轴的数据显示范围
set(gca,‘YTick‘,[0:1:26]);%设置要显示坐标刻度
% set(gca,‘YTickLabel‘,[95:1:101]);%给坐标加标签 
grid on;

%%
data = transform(infor);
figure(2)
hold on
plot(data(:,1),‘bo‘);%实际类别
plot(data(:,2),‘r+‘);%predict_label
legend(‘实际测试集分类‘,‘预测测试集分类‘);
%设置x轴范围和刻度
set(gca,‘XLim‘,[0 260]);%X轴的数据显示范围
set(gca,‘XTick‘,[0:10:260]);%设置要显示坐标刻度
% set(gca,‘XTickLabel‘,[0:1:10]);%给坐标加标签 

%设置y轴范围和刻度

set(gca,‘YLim‘,[1 26]);%X轴的数据显示范围
set(gca,‘YTick‘,[0:1:26]);%设置要显示坐标刻度
% set(gca,‘YTickLabel‘,[95:1:101]);%给坐标加标签 
grid on;
%% 计算每个类别错误个数
error_cat_num = zeros(26,1);
for i=1:26
    number =0;
    for j= i*10-9 : i*10
        if data(j,1)- data(j,2)==0
        number =number+1;
        end
    end
   error_cat_num(i,1)=number; 
    
end


%%
%% 相关系数

%%
% 1. 相对误差error
input_test_label = data(:,1);
predict_label = data(:,2);
error = abs(predict_label - input_test_label)./input_test_label;
[N,~]=size(input_test_label);
%%
% 2. 决定系数R^2
R2 = (N * sum(predict_label .* input_test_label) - sum(predict_label) * sum(input_test_label))^2 / ((N * sum((predict_label).^2) - (sum(predict_label))^2) * (N * sum((input_test_label).^2) - (sum(input_test_label))^2)); 

%%
% 3. 结果对比
result = [input_test_label‘ predict_label‘ error‘]

%% VI. 绘图
figure
plot(1:N,input_test_label,‘b:*‘,1:N,predict_label,‘r-o‘)
legend(‘真实值‘,‘预测值‘)
xlabel(‘预测样本‘)
ylabel(‘标签值‘)
string = {‘测试集预测结果对比‘;[‘R^2=‘ num2str(R2)]};
title(string)

  

 

DEMO

标签:param   卷积   适应   fusion   javascrip   坐标轴   tick   ges   https   

原文地址:https://www.cnblogs.com/hkkeryu/p/10912024.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!