
#gradient descent
cost_iterations = []
for i in range(self.iterations):
z = np.dot(X, self.w)
hw = self.sigmoid(z)
m = len(y)
gradient = 1/m*np.dot(X.T,(hw-y))
self.w -= self.alpha*gradient
cost_iterations.append(self.cost(hw,y))
self.final_cost = self.cost(hw, y)
def predict_prob(self, X):
X = self.add_intercept(X)
return self.sigmoid(np.dot(X, self.w))
def predict(self, X):
return self.predict_prob(X).round()
def plot_scatter(self, X, y):
plt.figure(figsize=(19.20, 10.80))
plt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], color='#06b2d6', marker = 'o', label='0')
plt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], color='#0085A1', marker = 'o', label='1')
plt.legend()
x1_min, x1_max = X[:,0].min(), X[:,0].max(),
x2_min, x2_max = X[:,1].min(), X[:,1].max(),
xx1, xx2 = np.meshgrid(np.linspace(x1_min, x1_max), np.linspace(x2_min, x2_max))
grid = np.c_[xx1.ravel(), xx2.ravel()]
probs = self.predict_prob(grid).reshape(xx1.shape)
plt.contour(xx1, xx2, probs, [0.5], linewidths=1, colors='black')
plt.xlim([x1_min-x1_min*0.05,x1_max+x1_max*0.02])
plt.ylim([x2_min-x2_min*0.05,x2_max+x2_max*0.02])
This is all the code we need in order to implement Logistic Regression from scratch. Let us try to test it
for our dataset:
LR = LogisticRegression(alpha = 0.0016)
LR.fit(X, y)
print('min cost:', LR.final_cost)
7