import numpy as np22 Perceptron
22.1 Step function (activation function)
# Step function (activation function)
def step_function(x):
return 1 if x >= 0 else 022.2 Perceptron Model
# Perceptron model
class Perceptron:
def __init__(self, input_size, learning_rate=0.1):
self.weights = np.zeros(input_size + 1) # Initialize weights and bias to 0
self.learning_rate = learning_rate
def predict(self, inputs):
# Add bias term to the inputs
summation = np.dot(inputs, self.weights[1:]) + self.weights[0]
return step_function(summation)
def train(self, training_inputs, labels, epochs=10):
for _ in range(epochs):
for inputs, label in zip(training_inputs, labels):
prediction = self.predict(inputs)
# Update weights and bias
## Weight
self.weights[1:] += self.learning_rate * (label - prediction) * inputs
## Bias
self.weights[0] += self.learning_rate * (label - prediction)22.3 Execute
22.3.1 AND Logic Gait
# Training data (AND logic gate)
training_inputs = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]])
labels = np.array([0, 0, 0, 1]) # AND gate outputs
# Initialize perceptron
perceptron = Perceptron(input_size=2)
# Train perceptron
perceptron.train(training_inputs, labels, epochs=10)
# Test the perceptron
test_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
for inputs in test_inputs:
print(f"Input: {inputs} -> Output: {perceptron.predict(inputs)}")Input: [0 0] -> Output: 0
Input: [0 1] -> Output: 0
Input: [1 0] -> Output: 0
Input: [1 1] -> Output: 1
22.3.2 OR Logic Gait
# Training data (OR logic gate)
training_inputs = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]])
labels = np.array([0, 1, 1, 1]) # OR gate outputs
# Initialize perceptron
perceptron = Perceptron(input_size=2)
# Train perceptron
perceptron.train(training_inputs, labels, epochs=10)
# Test the perceptron
test_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
for inputs in test_inputs:
print(f"Input: {inputs} -> Output: {perceptron.predict(inputs)}")Input: [0 0] -> Output: 0
Input: [0 1] -> Output: 1
Input: [1 0] -> Output: 1
Input: [1 1] -> Output: 1