import numpy as np
import math
import matplotlib.pyplot as plt
from sklearn import linear_model
x = [[20,3],[23,7],[31,10],[42,13],[50,7],[60,5]]
y = [0,1,1,1,0,0]
lr = linear_model.LogisticRegression()
lr.fit(x,y)
testX = [[28,10]]
label = lr.predict(testX)
print(label)#预测分类值
prob = lr.predict_proba(testX)
print(prob)#预测分类分别是0和1的概率
print lr.coef_,lr.intercept_,lr.n_iter_ #输出特征向量、截距、迭代次数
#根据上述输出的特征向量和截距,输出Logistic函数图像
x1 = np.arange(20, 60, 1)
x2 = np.arange(30, -10, -1)#也可以写成x2 = np.arange(-10, 30, 1)
#x1和x2不光个数要相同,还要和模拟样本数据吻合,如果x2 = np.arange(20, 60, 1)就看不到S型图像了
x = (-0.19730001)*x1+(0.91555745)*x2-0.04131838
y =1/( 1+math.e**(-x))
plt.scatter(x, y, c = ‘r‘, marker = ‘o‘)
plt.show()
#用预测点计算Logistic函数结果,即预测分类较大的概率值
x1=28
x2=10
x= (-0.19730001)*x1+(0.91555745)*x2-0.04131838
y =1/( 1+math.e**(-x))
print y