Created
March 31, 2019 05:07
-
-
Save Niranjankumar-c/9e8cc3ff85413a8541c016f40789bdc6 to your computer and use it in GitHub Desktop.
Sigmoid Neuron Model
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class SigmoidNeuron: | |
#intialization | |
def __init__(self): | |
self.w = None | |
self.b = None | |
#forward pass | |
def perceptron(self, x): | |
return np.dot(x, self.w.T) + self.b | |
def sigmoid(self, x): | |
return 1.0/(1.0 + np.exp(-x)) | |
#updating the gradients using mean squared error loss | |
def grad_w_mse(self, x, y): | |
y_pred = self.sigmoid(self.perceptron(x)) | |
return (y_pred - y) * y_pred * (1 - y_pred) * x | |
def grad_b_mse(self, x, y): | |
y_pred = self.sigmoid(self.perceptron(x)) | |
return (y_pred - y) * y_pred * (1 - y_pred) | |
#updating the gradients using cross entropy loss | |
def grad_w_ce(self, x, y): | |
y_pred = self.sigmoid(self.perceptron(x)) | |
if y == 0: | |
return y_pred * x | |
elif y == 1: | |
return -1 * (1 - y_pred) * x | |
else: | |
raise ValueError("y should be 0 or 1") | |
def grad_b_ce(self, x, y): | |
y_pred = self.sigmoid(self.perceptron(x)) | |
if y == 0: | |
return y_pred | |
elif y == 1: | |
return -1 * (1 - y_pred) | |
else: | |
raise ValueError("y should be 0 or 1") | |
#model fit method | |
def fit(self, X, Y, epochs=1, learning_rate=1, initialise=True, loss_fn="mse", display_loss=False): | |
# initialise w, b | |
if initialise: | |
self.w = np.random.randn(1, X.shape[1]) | |
self.b = 0 | |
if display_loss: | |
loss = {} | |
for i in tqdm_notebook(range(epochs), total=epochs, unit="epoch"): | |
dw = 0 | |
db = 0 | |
for x, y in zip(X, Y): | |
if loss_fn == "mse": | |
dw += self.grad_w_mse(x, y) | |
db += self.grad_b_mse(x, y) | |
elif loss_fn == "ce": | |
dw += self.grad_w_ce(x, y) | |
db += self.grad_b_ce(x, y) | |
m = X.shape[1] | |
self.w -= learning_rate * dw/m | |
self.b -= learning_rate * db/m | |
if display_loss: | |
Y_pred = self.sigmoid(self.perceptron(X)) | |
if loss_fn == "mse": | |
loss[i] = mean_squared_error(Y, Y_pred) | |
elif loss_fn == "ce": | |
loss[i] = log_loss(Y, Y_pred) | |
if display_loss: | |
plt.plot(loss.values()) | |
plt.xlabel('Epochs') | |
if loss_fn == "mse": | |
plt.ylabel('Mean Squared Error') | |
elif loss_fn == "ce": | |
plt.ylabel('Log Loss') | |
plt.show() | |
def predict(self, X): | |
Y_pred = [] | |
for x in X: | |
y_pred = self.sigmoid(self.perceptron(x)) | |
Y_pred.append(y_pred) | |
return np.array(Y_pred) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
log_loss(Y, Y_pred)
There is no import of the function itself. Have you ever run this code at least once before showing such a shame to people?