码迷,mamicode.com
首页 > 其他好文 > 详细

pytorch简单框架

时间:2019-09-15 16:53:38      阅读:128      评论:0      收藏:0      [点我收藏+]

标签:lin   port   atp   print   文件   datasets   roo   wave   tin   

网络搭建:

mynn.py:

import torch
from torch import nn
class mynn(nn.Module):
def __init__(self):
super(mynn, self).__init__()
self.layer1 = nn.Sequential(
nn.Linear(3520, 4096), nn.BatchNorm1d(4096), nn.ReLU(True)
)
self.layer2 = nn.Sequential(
nn.Linear(4096, 4096), nn.BatchNorm1d(4096), nn.ReLU(True)
)
self.layer3 = nn.Sequential(
nn.Linear(4096, 4096), nn.BatchNorm1d(4096), nn.ReLU(True)
)
self.layer4 = nn.Sequential(
nn.Linear(4096, 4096), nn.BatchNorm1d(4096), nn.ReLU(True)
)
self.layer5 = nn.Sequential(
nn.Linear(4096, 3072), nn.BatchNorm1d(3072), nn.ReLU(True)
)
self.layer6 = nn.Sequential(
nn.Linear(3072, 2048), nn.BatchNorm1d(2048), nn.ReLU(True)
)
self.layer7 = nn.Sequential(
nn.Linear(2048, 1024), nn.BatchNorm1d(1024), nn.ReLU(True)
)
self.layer8 = nn.Sequential(
nn.Linear(1024, 256), nn.BatchNorm1d(256), nn.ReLU(True)
)
self.layer9 = nn.Sequential(
nn.Linear(256, 64), nn.BatchNorm1d(64), nn.ReLU(True)
)
self.layer10 = nn.Sequential(
nn.Linear(64, 32), nn.BatchNorm1d(32), nn.ReLU(True)
)
self.layer11 = nn.Sequential(
nn.Linear(32, 3)
)

def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
x = self.layer7(x)
x = self.layer8(x)
x = self.layer9(x)
x = self.layer10(x)
x = self.layer11(x)
return x

Dataset重定义:
mydataset.py

import os
from torch.utils import data
import numpy as np
from astropy.io import fits
from torchvision import transforms as T
from PIL import Image
import pandas as pd

class mydataset(data.Dataset):

def __init__(self,csv_file,root_dir=None,transform=None):
self.landmarks_frame=np.loadtxt(open(csv_file,"rb"),delimiter=",") #landmarks_frame是一个numpy矩阵
self.root_dir=root_dir
self.transform=transform
def __len__(self):
return len(self.landmarks_frame)
def __getitem__(self, idx):
lfit=self.landmarks_frame[idx,:]
lable=lfit[len(lfit)-1]
datafit=lfit[0:(len(lfit)-1)]
return lable,datafit
主程序:
main.py
import torch
from torch import nn, optim
from torchvision import datasets, transforms
from torch.autograd import Variable
#from models import Mynet, my_AlexNet, my_VGG
from sdata import mydataset
import time
import numpy as np
from model import mynn
if __name__ == ‘__main__‘: #如果Dataloader开启num_workers > 0 必须要在‘__main__‘下才能消除报错

data_train = mydataset.mydataset(csv_file="G:\\DATA\\train.csv",root_dir=None,transform=None)
#data_test = mydataset(test=True)
data_test = mydataset.mydataset(csv_file="G:\\DATA\\test.csv", root_dir=None, transform=None)
data_loader_train = torch.utils.data.DataLoader(dataset=data_train,
batch_size=256,
shuffle=True,
num_workers=0,
pin_memory=True)
data_loader_test = torch.utils.data.DataLoader(dataset=data_test,
batch_size=256,
shuffle=True,
num_workers=0,
pin_memory=True)
print("**dataloader done**")
model = mynn.mynn()

if torch.cuda.is_available():
#model = model.cuda()
model.to(torch.device(‘cuda‘))
#损失函数
criterion = nn.CrossEntropyLoss()
#optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
#优化算法
optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-4)
n_epochs = 1000

global_train_acc = []

s_time = time.time()

for epoch in range(n_epochs):
running_loss = 0.0
running_correct = 0.0
print(‘Epoch {}/{}‘.format(epoch, n_epochs))
for label,datafit in data_loader_train:
x_train, y_train = datafit,label
#x_train, y_train = Variable(x_train.cuda()), Variable(y_train.cuda())
x_train, y_train = x_train.to(torch.device(‘cuda‘)), y_train.to(torch.device(‘cuda‘))
x_train=x_train.float()
y_train=y_train.long()
#x_train, y_train = Variable(x_train), Variable(y_train)
outputs = model(x_train)
_, pred = torch.max(outputs.data, 1)
optimizer.zero_grad()
loss = criterion(outputs, y_train)
loss.backward()
optimizer.step()

running_loss += loss.item()
running_correct += torch.sum(pred == y_train.data)

testing_correct = 0.0
for label,datafit in data_loader_test:
x_test, y_test = datafit,label
x_test=x_test.float()
y_test=y_test.long()
x_test, y_test = Variable(x_test.cuda()), Variable(y_test.cuda())
# x_test, y_test = Variable(x_test), Variable(y_test)
outputs = model(x_test)
_, pred = torch.max(outputs.data, 1)
testing_correct += torch.sum(pred == y_test.data)

print(‘Loss is:{:.4f}, Train Accuracy is:{:.4f}%, Test Accuracy ‘
‘is:{:.4f}‘.format(running_loss / len(data_train),
100 * running_correct / len(data_train),
100 * testing_correct / len(data_test)))


e_time = time.time()
print(‘time_run is :‘, e_time - s_time)
print(‘*******done******‘)

将天文数据写入csv中:
main.py
# -*- coding: utf-8 -*-
"""
Spyder Editor

This is a temporary script file.
"""

import matplotlib.pyplot as plt
from astropy.io import fits
import os
import matplotlib
matplotlib.use(‘Qt5Agg‘)
from astropy.io import fits
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.decomposition import PCA
def getData(fitPath,cla):
fileList=[] #所有.fit文件
files=os.listdir(fitPath) #返回一个列表,其中包含在目录条目的名称
y=[]
for f in files:
if os.path.isfile(fitPath+‘/‘+f) and f[-4:-1]==".fi":
fileList.append(fitPath+‘/‘+f) #添加文件
len=90000
x=np.ones(3521)
num=1
for path in fileList:
f = fits.open(path)
header = f[0].header # fit文件中的各种标识

SPEC_CLN = header[‘SPEC_CLN‘]
SN_G = header[‘SN_G‘]
NAXIS1 = header[‘NAXIS1‘] # 光谱数据维度
COEFF0 = header[‘COEFF0‘]
COEFF1 = header[‘COEFF1‘]
wave = np.ones(NAXIS1) # 光谱图像中的横坐标
for i in range(NAXIS1):
wave[i] = i
logwavelength = COEFF0 + wave * COEFF1
for i in range(NAXIS1):
wave[i] = 10 ** logwavelength[i]
min=0
for i in range(NAXIS1-1):
if wave[i]<=4000 and wave[i+1]>=4000:
min=i
spec = f[0].data[0, :] # 光谱数据 fit中的第一行数据
spec=spec[min:min+3521]
spec=np.array(spec)
spec[3520]=cla
if num==1:
x=spec
num=2
else:
x=np.row_stack((x,spec))
#np.savetxt(csvPath,x, delimiter=‘,‘)
return x

if __name__ == ‘__main__‘:
x=getData("G:\DATA\STAR",0)
x_train,x_test=train_test_split(x,test_size=0.1 ,random_state=0)

y=getData("G:\DATA\QSO",1)
y_train, y_test = train_test_split(y, test_size=0.1, random_state=0)
x_train = np.row_stack((x_train,y_train))
x_test=np.row_stack((x_test,y_test))

z=getData("G:\DATA\GALAXY",2)
z_train, z_test = train_test_split(z, test_size=0.1, random_state=0)
x_train=np.row_stack((x_train,z_train))
x_test = np.row_stack((x_test,z_test))
np.savetxt("G:\\DATA\\train.csv",x_train, delimiter=‘,‘)
np.savetxt("G:\\DATA\\test.csv", x_test, delimiter=‘,‘)


pytorch简单框架

标签:lin   port   atp   print   文件   datasets   roo   wave   tin   

原文地址:https://www.cnblogs.com/invisible2/p/11523330.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!