Skip to content

multilayer_perceptron_classifier #10387

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
75 changes: 57 additions & 18 deletions machine_learning/multilayer_perceptron_classifier.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,68 @@
from sklearn.neural_network import MLPClassifier
import numpy as np

X = [[0.0, 0.0], [1.0, 1.0], [1.0, 0.0], [0.0, 1.0]]
y = [0, 1, 0, 0]

class MLP:
"""
A simple Multi-Layer Perceptron with one hidden layer.
"""

clf = MLPClassifier(
solver="lbfgs", alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1
)
def __init__(self, num_inputs, num_hidden, num_outputs):
"""
Initialize the weights and biases of the network.
Weights are initialized randomly using numpy's random rand function.
Biases are initialized to zero.
"""
self.weights1 = np.random.rand(num_inputs, num_hidden)
self.biases1 = np.zeros(num_hidden)
self.weights2 = np.random.rand(num_hidden, num_outputs)
self.biases2 = np.zeros(num_outputs)

clf.fit(X, y)
def forward(self, inputs):
"""
Perform the forward pass of the MLP.

Parameters:
inputs (np.ndarray): Input data

test = [[0.0, 0.0], [0.0, 1.0], [1.0, 1.0]]
Y = clf.predict(test)
Returns:
np.ndarray: Outputs from the MLP
"""

# Calculate the outputs from the first hidden layer
z1 = inputs.dot(self.weights1) + self.biases1
a1 = np.tanh(z1)

def wrapper(y):
"""
>>> wrapper(Y)
[0, 0, 1]
"""
return list(y)
# Calculate the outputs from the second output layer
z2 = a1.dot(self.weights2) + self.biases2
return z2

def fit(self, inputs, labels, epochs, learning_rate):
"""
Train the MLP using backpropagation.

Parameters:
inputs (np.ndarray): Input data
labels (np.ndarray): Target outputs
epochs (int): Number of training iterations
learning_rate (float): Learning rate for weight updates
"""

for _ in range(epochs):
# Perform the forward pass
outputs = self.forward(inputs)

# Calculate the gradients via backpropagation
dz2 = 2 * (outputs - labels)
dw2 = np.dot(self.a1.T, dz2)
db2 = dz2.sum(axis=0)

if __name__ == "__main__":
import doctest
da1 = np.dot(dz2, self.weights2.T)
dz1 = da1 * (1 - np.power(self.a1, 2))
dw1 = np.dot(inputs.T, dz1)
db1 = dz1.sum(axis=0)

doctest.testmod()
# Update the weights and biases
self.weights1 -= learning_rate * dw1
self.biases1 -= learning_rate * db1
self.weights2 -= learning_rate * dw2
self.biases2 -= learning_rate * db2