Question: # Your task is to replace ? ? ? import numpy as np # First we set the state of the network sigma =

# Your task is to replace ???
import numpy as np
# First we set the state of the network
\sigma = np.tanh
w1=1.3
b1=-0.1
# Then we define the neuron activation.
def a1(a0):
z = w1* a0+ b1
return \sigma (z)
# Experiment with different values of x below.
x =1
print((a1(x)-0)**2)
# First define our sigma function.
sigma = np.tanh
# Next define the feed-forward equation.
def a1(w1, b1, a0) :
z = w1* a0+ b1
return sigma(z)
# The individual cost function is the square of the difference between
# the network output and the training data output.
def C (w1, b1, x, y) :
return (a1(w1, b1, x)- y)**2
# This function returns the derivative of the cost function with
# respect to the weight.
def dCdw (w1, b1, x, y) :
z =???
dCda =??? # Derivative of cost with activation
dadz =1/np.cosh(z)**2 # derivative of activation with weighted sum z
dzdw =??? # derivative of weighted sum z with weight
return ??? # Return the chain rule product.
# This function returns the derivative of the cost function with
# respect to the bias.
# It is very similar to the previous function.
# You should complete this function.
def dCdb (w1, b1, x, y) :
z =???
dCda =???
dadz =???
# Change the next line to give the derivative of
# the weighted sum, z, with respect to the bias, b.
dzdb =???
return ???
# Define the activation function.
sigma = np.tanh
# Let's use a random initial weight and bias.
W = np.array([[-0.94529712,-0.2667356,-0.91219181],
[2.05529992,1.21797092,0.22914497]])
b = np.array([0.61273249,1.6422662])
# define our feed forward function
def a1(a0) :
# Notice the next line is almost the same as previously,
# except we are using matrix multiplication rather than scalar multiplication
z =???
# Everything else is the same though,
return sigma(z)
# Next, if a training example is,
x = np.array([0.7,0.6,0.2])
y = np.array([0.9,0.6])
# Then the cost function is,
d =??? # Vector difference between observed and expected activation
C =??? # Absolute value squared of the difference.
# First define our sigma function.
sigma = np.tanh
# Next define the feed-forward equation.
def a1(w1, b1, a0) :
z =???
return sigma(z)
# This function returns the derivative of the cost function with
# respect to the weight.
def dCdw (w1, b1, x, y) :
dCda =??? # Derivative of cost with activation
dadz =??? # derivative of activation with weighted sum z
J =???
dzdw =???# derivative of weighted sum z with weight
J =???
return J # Return the chain rule product.
# This function returns the derivative of the cost function with
# respect to the bias.
# It is very similar to the previous function.
# You should complete this function.
def dCdb (w1, b1, x, y) :
dCda =???
dadz =???
# Change the next line to give the derivative of
# the weighted sum, z, with respect to the bias, b.
dzdb =???
return dCda * dadz * dzdb

Step by Step Solution

There are 3 Steps involved in it

1 Expert Approved Answer
Step: 1 Unlock blur-text-image
Question Has Been Solved by an Expert!

Get step-by-step solutions from verified subject matter experts

Step: 2 Unlock
Step: 3 Unlock

Students Have Also Explored These Related Databases Questions!