标签:sig error 而不是 aar sha txt dex 学习 parameter
import numpy as np
"""
function:
加载数据
parameter:
无
returns:
dataMat - 数据集
labelMat - 标签集
"""
def loadDataSet():
dataMat = []#数据集
labelMat = []#标签集
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat, labelMat
"""
function:
计算sigmoid函数
parameters:
inX - 变量
return:
ans - 结果
"""
def sigmoid(inX):
ans = 1.0 / (1+np.exp(-inX))
return ans
"""
function:
梯度上升
parameters:
dataMatIn - 数据集
classLabels - 标签集
return:
weights - 最优参数
"""
def gradAscent(dataMatIn, classLabels):
dataMatrix = np.mat(dataMatIn)
labelMat = np.mat(classLabels).transpose()#矩阵转置
m,n = np.shape(dataMatrix)
alpha = 0.001#步长
maxCycles = 500#迭代次数
weights = np.ones((n,1))
for k in range(maxCycles):
h = sigmoid(dataMatrix * weights)#100*3矩阵和100*1矩阵*乘,而不是dot乘
error = labelMat - h#这里labelMat广播了
weights = weights + alpha * dataMatrix.transpose() * error
return weights
if __name__ == "__main__":
dataArr, labelMat = loadDataSet()
w = gradAscent(dataArr,labelMat)
print(w)
import numpy as np
import matplotlib.pyplot as plt
"""
function:
加载数据
parameter:
无
returns:
dataMat - 数据集
labelMat - 标签集
"""
def loadDataSet():
dataMat = []#数据集
labelMat = []#标签集
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat, labelMat
"""
function:
计算sigmoid函数
parameters:
inX - 变量
return:
ans - 结果
"""
def sigmoid(inX):
ans = 1.0 / (1+np.exp(-inX))
return ans
"""
function:
梯度上升
parameters:
dataMatIn - 数据集
classLabels - 标签集
return:
weights - 最优参数
"""
def gradAscent(dataMatIn, classLabels):
dataMatrix = np.mat(dataMatIn)
labelMat = np.mat(classLabels).transpose()#矩阵转置
m,n = np.shape(dataMatrix)
alpha = 0.001#步长
maxCycles = 500#迭代次数
weights = np.ones((n,1))
for k in range(maxCycles):
h = sigmoid(dataMatrix * weights)#100*3矩阵和100*1矩阵*乘,而不是dot乘
error = labelMat - h#这里labelMat广播了
weights = weights + alpha * dataMatrix.transpose() * error
return weights
"""
function:
做图
parameters:
dataMat - 数据集
weights - 系数
return:
无
"""
def plotBestFit(dataMat,weights):
dataMatrix = np.array(dataMat)
x1 = dataMatrix[0:,1]
y1 = dataMatrix[0:,2]
plt.scatter(x1,y1,s=25,marker='o')
plt.xlim([-4,4])
plt.ylim([-5,20])
x2 = np.arange(-3.0, 3.0, 0.1)
y2 = (-weights[0]-weights[1]*x2)/weights[2]
y2 = y2.T
plt.plot(x2,y2)
plt.show()
if __name__ == "__main__":
dataMat, labelMat = loadDataSet()
weights = gradAscent(dataMat,labelMat)
plotBestFit(dataMat,weights)
import numpy as np
import matplotlib.pyplot as plt
"""
function:
加载数据
parameter:
无
returns:
dataMat - 数据集
labelMat - 标签集
"""
def loadDataSet():
dataMat = []#数据集
labelMat = []#标签集
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat, labelMat
"""
function:
计算sigmoid函数
parameters:
inX - 变量
return:
ans - 结果
"""
def sigmoid(inX):
ans = 1.0 / (1+np.exp(-inX))
return ans
"""
function:
梯度上升
parameters:
dataMatIn - 数据集
classLabels - 标签集
return:
weights - 最优参数
"""
def gradAscent(dataMatIn, classLabels):
dataMatrix = np.mat(dataMatIn)
labelMat = np.mat(classLabels).transpose()#矩阵转置
m,n = np.shape(dataMatrix)
alpha = 0.001#步长
maxCycles = 500#迭代次数
weights = np.ones((n,1))
for k in range(maxCycles):
h = sigmoid(dataMatrix * weights)#100*3矩阵和100*1矩阵*乘,而不是dot乘
error = labelMat - h#这里labelMat广播了
weights = weights + alpha * dataMatrix.transpose() * error
return weights
"""
function:
做图
parameters:
dataMat - 数据集
weights - 系数
return:
无
"""
def plotBestFit(dataMat,weights):
dataMatrix = np.array(dataMat)
x1 = dataMatrix[0:,1]
y1 = dataMatrix[0:,2]
plt.scatter(x1,y1,s=25,marker='o')
plt.xlim([-4,4])
plt.ylim([-5,20])
x2 = np.arange(-3.0, 3.0, 0.1)
y2 = (-weights[0]-weights[1]*x2)/weights[2]
y2 = y2.T
plt.plot(x2,y2)
plt.show()
"""
function:
随机梯度上升
parameters:
dataMatrix - 数据集
classLabels - 标签集
return:
weights - 参数
"""
def stocGradAscent0(dataMatrix, classLabels):
m, n = np.shape(dataMatrix)
alpha = 0.01
weights = np.ones(n)
for i in range(m):
h = sigmoid(sum(dataMatrix[i]*weights))
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights
if __name__ == "__main__":
dataArr, labelMat = loadDataSet()
weights = stocGradAscent0(np.array(dataArr),labelMat)
plotBestFit(dataArr,weights)
import numpy as np
import matplotlib.pyplot as plt
"""
function:
加载数据
parameter:
无
returns:
dataMat - 数据集
labelMat - 标签集
"""
def loadDataSet():
dataMat = []#数据集
labelMat = []#标签集
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat, labelMat
"""
function:
计算sigmoid函数
parameters:
inX - 变量
return:
ans - 结果
"""
def sigmoid(inX):
ans = 1.0 / (1+np.exp(-inX))
return ans
"""
function:
梯度上升
parameters:
dataMatIn - 数据集
classLabels - 标签集
return:
weights - 最优参数
"""
def gradAscent(dataMatIn, classLabels):
dataMatrix = np.mat(dataMatIn)
labelMat = np.mat(classLabels).transpose()#矩阵转置
m,n = np.shape(dataMatrix)
alpha = 0.001#步长
maxCycles = 500#迭代次数
weights = np.ones((n,1))
for k in range(maxCycles):
h = sigmoid(dataMatrix * weights)#100*3矩阵和100*1矩阵*乘,而不是dot乘
error = labelMat - h#这里labelMat广播了
weights = weights + alpha * dataMatrix.transpose() * error
return weights
"""
function:
做图
parameters:
dataMat - 数据集
weights - 系数
return:
无
"""
def plotBestFit(dataMat,weights):
dataMatrix = np.array(dataMat)
x1 = dataMatrix[0:,1]
y1 = dataMatrix[0:,2]
plt.scatter(x1,y1,s=25,marker='o')
plt.xlim([-4,4])
plt.ylim([-5,20])
x2 = np.arange(-3.0, 3.0, 0.1)
y2 = (-weights[0]-weights[1]*x2)/weights[2]
y2 = y2.T
plt.plot(x2,y2)
plt.show()
"""
function:
随机梯度上升
parameters:
dataMatrix - 数据集
classLabels - 标签集
return:
weights - 参数
"""
def stocGradAscent0(dataMatrix, classLabels):
m, n = np.shape(dataMatrix)
alpha = 0.01
weights = np.ones(n)
for i in range(m):
h = sigmoid(sum(dataMatrix[i]*weights))
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights
"""
function:
随机梯度上升改进
parameters:
dataMatrix - 数据集
classLabels - 标签集
numIter - 迭代次数
return:
weights - 参数
"""
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m,n = np.shape(dataMatrix)
weights = np.ones(n)
for j in range(numIter):
dataIndex = list(range(m))
for i in range(m):
alpha = 4 / (1.0+j+i)+0.01
randIndex = int(np.random.uniform(0,len(dataIndex)))
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del(dataIndex[randIndex])
return weights
if __name__ == "__main__":
dataArr, labelMat = loadDataSet()
weights = stocGradAscent1(np.array(dataArr),labelMat)
plotBestFit(dataArr,weights)
import numpy as np
"""
function:
计算sigmoid函数
parameters:
inX - 变量
return:
ans - 结果
"""
def sigmoid(inX):
ans = 1.0 / (1+np.exp(-inX))
return ans
"""
function:
随机梯度上升改进
parameters:
dataMatrix - 数据集
classLabels - 标签集
numIter - 迭代次数
return:
weights - 参数
"""
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m,n = np.shape(dataMatrix)
weights = np.ones(n)
for j in range(numIter):
dataIndex = list(range(m))
for i in range(m):
alpha = 4 / (1.0+j+i)+0.01
randIndex = int(np.random.uniform(0,len(dataIndex)))
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del(dataIndex[randIndex])
return weights
"""
function:
分类器
parameters:
inX - 待分类向量
weights - 参数
returns:
分类结果
"""
def classifyVector(inX, weights):
prob = sigmoid(sum(inX*weights))
if prob > 0.5:
return 1.0
else:
return 0.0
"""
function:
整个计算流程
parameters:
无
returns:
erroRate - 错误率
"""
def colicTest():
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []
trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(np.array(trainingSet),trainingLabels,500)
errorCount = 0
numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if (int(classifyVector(np.array(lineArr), trainWeights)) !=
int(currLine[21])):
errorCount += 1
errorRate = (float(errorCount) / numTestVec)
print("the error rate of this test is: %f" % errorRate)
return errorRate
def multiTest():
numTests = 10
errorSum = 0.0
for k in range(numTests):
errorSum += colicTest()
print("after %d itrations the average error rate is: %f" % (numTests,errorSum/float(numTests)))
if __name__ == "__main__":
multiTest()
标签:sig error 而不是 aar sha txt dex 学习 parameter
原文地址:https://www.cnblogs.com/w-j-c/p/10483122.html