Question: How do I improve my Python code and is it correct? If not, can you help me fix it ? Is this the best outcome?

How do I improve my Python code and is it correct? If not, can you help me fix it?
Is this the best outcome? I got an 86.67 which I assume is 86% accuracy. Please only edit steps 2,3,5,6,7,8,9 with the ### YOUR CODE HERE label. Thank you.
Step 1
Import the libraries.
import numpy as np
from csv import reader
from random import seed
from random import randrange
Step 2
Load the csv file.
def load_csv(filename, skip = False):
dataset = list()
### YOUR CODE HERE
# Opens the file in read-only mode
with open(filename,'r') as file: # Open the file
csv_reader = reader(file)
if skip:
next(csv_reader) # Skip header
for row in csv_reader:
if not row:
continue
dataset.append([float(value) for value in row]) # Convert values to float
return dataset
Step 3
Split the dataset into X_train, Y_train, X_test, Y_test sets.
def train_test_split(dataset, split):
### YOUR CODE HERE
# Shuffle and split the dataset
np.random.shuffle(dataset)
train_size = int(split * len(dataset))
train_data = dataset[:train_size]
test_data = dataset[train_size:]
# Split features (X) and labels (y)
X_train =[row[:-1] for row in train_data]
y_train =[row[-1] for row in train_data]
X_test =[row[:-1] for row in test_data]
y_test =[row[-1] for row in test_data]
return X_train, y_train, X_test, y_test
Step 4
Defining the Perceptron class that contains the weights, bias, learning rate and epochs.
class Perceptron:
def __init__(self, input_size, bias, learning_rate, epochs):
self.weights = np.zeros(input_size)
self.bias = bias
self.learning_rate = learning_rate
self.epochs = epochs
Step 5
Define the activation function.
def activation_function(x):
# Step function for perceptron
### YOUR CODE HERE
result =1 if x >=0 else 0 # Use a step function
return result
Step 6
Defining the predict function with the inputs, weights and bias values.
def predict(inputs, weights, bias):
### YOUR CODE HERE
weighted_sum = np.dot(inputs, weights)+ bias
return activation_function(weighted_sum)
Step 7
Define the train function.
def train(X_train, y_train, learning_rate, epochs, weights, bias):
prediction = None
error = None
for _ in range(epochs):
### YOUR CODE HERE
for i in range(len(X_train)):
prediction = predict(X_train[i], weights, bias) # Calculate prediction
error = y_train[i]- prediction # Calculate the error
weights += learning_rate * error * np.array(X_train[i]) # Update weights
bias += learning_rate * error # Update bias
return weights, bias
Step 8
Define the accuracy for the perceptron.
def perceptron_accuracy(y, y_hat):
# overwrite the accuracy value with your own code
accuracy =0
### YOUR CODE HERE
correct = sum(1 for actual, predicted in zip(y, y_hat) if actual == predicted)
accuracy = correct / len(y)*100
return accuracy
Step 9
Implemented the Perceptron Nerual Network.
# Set the seed
seed(1)
# Load the csv file
filename = 'moons.csv'
dataset = load_csv(filename, skip=True)
# Configure the perception with the bias, learning rate and epochs
# Note the initial values are dummy and must changed for an accurate network
# The split value for the training and test sets
custom_split =0
# The bias term is a constant value added to the weighted sum of inputs
custom_bias =-1
# The learning rate controls how much the weights are adjusted during training
custom_learning_rate =-1
# The number of epochs defines how many times the perceptron will iterate over the training data
custom_epochs =-1
# Set your values here
###
### YOUR CODE HERE
custom_split =0.8 # Use 80% of the data for training and 20% for testing
custom_bias =0 # Initial bias value
custom_learning_rate =0.01 # Learning rate for adjusting weights
custom_epochs =100 # Number of epochs to train the model
###
# Split the dataset for both training and testing
X_train, y_train, X_test, y_test = train_test_split(dataset, split=custom_split)
perceptron = Perceptron(input_size=2, bias=custom_bias, learning_rate=custom_learning_rate, epochs=custom_epochs)
# Training
weights, bias = train(X_train, y_train, perceptron.learning_rate, perceptron.epochs, perceptron.weights, perceptron.bias)
# Predictions
y_hat =[]
# Testing
for i in range(len(X_test)):
prediction = predict(X_test[i], weights, bias)
y_hat.append(prediction)
print(f"Input: {X_test[i]}, Predicted: {prediction}, Actual: {y_test[i]}")
# Test for Accuracy
perceptron_accuracy(y_test, y_hat)

Step by Step Solution

There are 3 Steps involved in it

1 Expert Approved Answer
Step: 1 Unlock blur-text-image
Question Has Been Solved by an Expert!

Get step-by-step solutions from verified subject matter experts

Step: 2 Unlock
Step: 3 Unlock

Students Have Also Explored These Related Programming Questions!