Question: import numpy as np from sklearn import datasets import matplotlib.pyplot as plt from matplotlib.colors import colorConverter, ListedColormap % matplotlib inline class Network: def _ _
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
from matplotlib.colors import colorConverter, ListedColormap
matplotlib inline
class Network:
def initself sizes:
Initialize the neural network
:param sizes: a list of the number of neurons in each layer
# save the number of layers in the network
self.L lensizes
# store the list of layer sizes
self.sizes sizes
# initialize the bias vectors for each hidden and output layer
self.b nprandom.randnn for n in self.sizes:
# initialize the matrices of weights for each hidden and output layer
self.W nprandom.randnn m for m n in zipselfsizes: self.sizes:
# initialize the derivatives of biases for backprop
self.db npzerosn for n in self.sizes:
# initialize the derivatives of weights for backprop
self.dW npzerosn m for m n in zipselfsizes: self.sizes:
# initialize the activities on each hidden and output layer
self.z npzerosn for n in self.sizes
# initialize the activations on each hidden and output layer
self.a npzerosn for n in self.sizes
# Initialize delta assuming layers
self.delta npzerosn for n in self.sizes:
def gself z:
sigmoid activation function
:param z: vector of activities to apply activation to
return npexpz
def gprimeself z:
derivative of sigmoid activation function
:param z: vector of activities to apply derivative of activation to
return self.gz self.gz
def gradlossself a y:
evaluate gradient of cost function for squaredloss Cayay
:param a: activations on output layer
:param y: vectorencoded label
return a y
def forwardpropself x:
take an feature vector and propagate through network
:param x: input feature vector
if lenxshape:
x xreshape
# TODO: step Initialize activation on initial layer to x
# self.actgselfz
# self.Znpdotselfact,self.W
# self.yhatgselfZ
# your code here check the logic self.al
self.a x
# Step : Loop over layers and compute activities and activations
for layer in rangeselfL:
self.zlayer npdotselfWlayer self.alayer self.blayer
self.alayer self.gselfzlayer
# Return the final prediction
return self.a
## TODO: step Loop over layers and compute activities and activations
## Use Sigmoid activation function defined above
# your code here
def backpropself x y:
Back propagation to get derivatives of cost function wrt weights and biases for a given training example
:param x: training features
:param y: vectorencoded label
# Ensure input and output have correct shape
if lenxshape:
x xreshape
if lenyshape:
y yreshape
# Forward propagation to fill in activities and activations
self.forwardpropx
# Calculate delta for the output layer
self.delta self.gradlossselfa y self.gprimeselfz
# Backward pass through the network
for l in reversedrange self.L:
self.deltal npdotselfWlT self.deltal self.gprimeselfzl
# Compute gradients for weights and biases
for l in rangeselfL :
if l :
self.dWl npdotselfdeltal xT
else:
self.dWl npdotselfdeltal self.al T
self.dbl self.deltal
the above code gives the error
IndexError Traceback most recent call last
in
# test backprop
nn Network
nnbackpropXtrain: ytrain:
printnnW
in backpropself x y
# Backward pass through the network
for l in reversedrange self.L:
self.deltal npdotselfWlT self.deltal self.gprimeselfzl
# Compute gradients for weights and biases
IndexError: list index out o
Step by Step Solution
There are 3 Steps involved in it
1 Expert Approved Answer
Step: 1 Unlock
Question Has Been Solved by an Expert!
Get step-by-step solutions from verified subject matter experts
Step: 2 Unlock
Step: 3 Unlock
