import numpy as np import pandas as pd import matplotlib.pyplot as plt import
os 按照表格样式打印出数据，方便可视化：

path = 'data' + os.sep + 'LogiReg_data.txt' pdData = pd.read_csv(path,header =
None ,names = ['Exam 1','Exam 2','admin']) pdData.head()

admin就是是否被录取。

positive = pdData[pdData['admin'] == 1] negative = pdData[pdData['admin'] ==
0] fix,ax = plt.subplots(figsize = (10,5)) ax.scatter(positive['Exam
1'],positive['Exam 2'],s = 30,c = 'b',marker = 'o',label = 'Admitted')
ax.scatter(negative['Exam 1'],negative['Exam 2'],s = 30,c = 'r',marker =
'x',label = 'Not Admintted') ax.legend() ax.set_xlabel('Exam 1 score')
ax.set_ylabel('Exam 2 socre') plt.show()

logistic regression

1，Sigmoid，一个可以把值映射成概率的函数。

2，model，返回函数预测结果值。

3，cost，根据参数计算损失。

4，gradient，根据每个参数计算梯度方向。

5，descent，进行参数更新。

6，accuracy，计算精度

Sigmoid函数模块：

def sigmoid(z):     return 1 / (1+np.exp(-z)) 可以画出Sigmoid函数的图像：

nums = np.arange(-10,10,step = 1) fig,ax = plt.subplots(figsize = (12,4))
ax.plot(nums,sigmoid(nums),'r') plt.show()
model模块：

model模块是用于计算参数和特征值的乘积的，也就是构建一个模型。

def model(X,theta): return sigmoid(np.dot(X,theta.T))
np.dot函数就是矩阵乘法，theta.T就是矩阵的转置。

pdData.insert(0,'ones',1) matrix = pdData.as_matrix() cols = matrix.shape[1] X
= matrix[:,0:cols-1]#特征值 Y = matrix[:,cols-1:cols]#真实值 theta = np.zeros([1,3])

cost损失函数：

def cost(X,y,theta): left = np.multiply(-y,np.log(model(X,theta)))
print(left[1:5]) right = np.multiply(1-y,np.log(1-model(X,theta)))
print(right[1:5]) return np.sum(left-right)/(len(X))

def gradient(X,y,theta): grad = np.zeros.shape(theta.shape) error =
np.multiply(model(X,theta) - y).ravel() for j in range (len(theta.ravel)): term
= np.multiply(error,X[:,j]) grad[0,j] = np.sum(term)/(len(X)) return grad

STOP_ITER = 0 STOP_COST = 1 STOP_GRAD = 2 def
stopCriterion(type,value,threshold): if type == STOP_ITER: return value >
threshold elif type == STOP_COST: return abs(value[-1]-value[-2])<threshold
elif type == STOP_GRAD: np.linalg.norm(value) < threshold
np.linalg.norm()是求范式的意思，如果不加参数就是默认。

import numpy.random #洗牌 def shuffleData(data): np.random.shuffle(data) cols =
data.shape[1] X = data[:, 0:cols-1] y = data[:, cols-1:] print('X',X[1:5])
print('Y',Y[1:5]) return X, y使用np.random.shuffle(矩阵)来洗牌，再重新获取函数。

import time def
descent(data,theta,batchsize,stopType,thresh,alpha):#数据，参数，梯度下降的类型，停止的类型，停止的边缘，学习率
init_time = time.time() i = 0#迭代次数 k = 0#每一次梯度下降的数据 X,y = shuffleData(data)
grad = np.zeros(theta.shape) costs = [cost(X,y,theta)] while True: grad =
gradient(X[k:k+batchsize],y[k:k+batchsize],theta)#梯度下降 k += batchsize if k >=
100:#大于总数据 k = 0 X,y = shuffleData(data)#重新再来 theta = theta - alpha * grad
costs.append(cost(X,y,theta)) i += 1 if stopType == STOP_ITER: value = i elif
stopType == STOP_COST: value = costs elif stopType == STOP_GRAD: value = grad
if stopCriterion(stopType,value,thresh): break return
theta,i-1,costs,grad,time.time() - init_time执行函数：

def runExpe(data,theta,batchSize,stopType,thresh,alpha): n = 100
theta,iter,costs,grad,dur =
descent(data,theta,batchSize,stopType,thresh,alpha）#核心函数 name = "Original" if
(data[:,1]>2).sum() > 1 else "Scaled" name += " data - learning rate: {} -
".format(alpha) if batchSize==n: strDescType = "Gradient" elif batchSize==1:
strDescType = "Stochastic" else: strDescType = "Mini-batch
({})".format(batchSize) name += strDescType + " descent - Stop: " if stopType
== STOP_ITER: strStop = "{} iterations".format(thresh) elif stopType ==
STOP_COST: strStop = "costs change < {}".format(thresh) else: strStop =
"gradient norm < {}".format(thresh) name += strStop print ("***{}\nTheta: {} -
Iter: {} - Last cost: {:03.2f} - Duration: {:03.2f}s".format( name, theta,
iter, costs[-1], dur)) fig, ax = plt.subplots(figsize=(12,4))
ax.plot(np.arange(len(costs)), costs, 'r') ax.set_xlabel('Iterations')
ax.set_ylabel('Cost') ax.set_title(name.upper() + ' - Error vs. Iteration')
plt.show() return theta只有descent求梯度下降才是核心，其他都是画图用的。

result = runExpe(matrix, theta, 100,STOP_COST, thresh=0.000001, alpha=0.001)