Question: I have the assertation file and the make up debbug and setting file let me know where should i upload it? the bold area in
I have the assertation file and the make up debbug and setting file let me know where should i upload it? the bold area in the code need to be edited and make a loop that can rectify erroe by itself and can give result to 0.01
Question:
Take my working code for the Mazur neural network and extract three functions: feed_forward, calculate_error, and back_propagate. They may continue to use global variables for the weight matrices and node vectors. The calculate_error function should return the value of totalError.
Then write a main function that repeatedly calls the three functions for the inputs and targets specified in the article. It should print out the error after each cycle. Stop when the error falls below some threshold such as 0.01.
// mazur-nn.cc
// See https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/
#include
#include
#include
#include
#include "assertions.hh"
using namespace std;
// 3-layer network architecture
const int NUM_INPUTS = 2;
const int NUM_HIDDEN = 2;
const int NUM_OUTPUTS = 2;
const double LEARNING_RATE = 0.5; // Greek letter eta () in the article
vector inputs(NUM_INPUTS);
vector hidden(NUM_HIDDEN);
vector outputs(NUM_OUTPUTS);
vector targets = {0.01, 0.99};
double inputHiddenWts [NUM_INPUTS] [NUM_HIDDEN] =
{ {.15 , .25},
{.20 , .30}
};
double inputBias = .35;
double hiddenOutputWts [NUM_HIDDEN] [NUM_OUTPUTS] =
{ {.40, .50},
{.45, .55}
};
double hiddenBias = .6;
double sigmoid(double x)
{
return 1 / (1 + exp(-x));
}
int main()
{
cout << "HELLO ";
assert(inputs.size() == NUM_INPUTS);
assert(outputs.size() == NUM_OUTPUTS);
///////////////////////// FEED FORWARD calculations
// Provide inputs to the network
inputs.at(0) = .05;
inputs.at(1) = .10;
// Calculate hiddens -- now using loops so it can scale to any
// NUM_INPUTS and NUM_HIDDENS.
for(unsigned hi = 0; hi < NUM_HIDDEN; hi++) {
hidden.at(hi) = inputBias;
for(unsigned ii = 0; ii < NUM_INPUTS; ii++) {
hidden.at(hi) += inputHiddenWts[ii][hi] * inputs.at(ii);
}
hidden.at(hi) = sigmoid(hidden.at(hi));
cout << hidden.at(hi) << ' ';
}
assert_approx(0.59327, hidden.at(0));
assert_approx(0.596884378, hidden.at(1));
// Calculate outputs -- also using loops.
for(unsigned oi = 0; oi < NUM_OUTPUTS; oi++) {
outputs.at(oi) = hiddenBias;
for(unsigned hi = 0; hi < NUM_HIDDEN; hi++) {
outputs.at(oi) += hiddenOutputWts[hi][oi] * hidden.at(hi);
}
outputs.at(oi) = sigmoid(outputs.at(oi));
cout << outputs.at(oi) << ' ';
}
assert_approx(0.75136507, outputs.at(0));
assert_approx(0.772928465, outputs.at(1));
///////////////////////// BACK PROPAGATION calculations
// Calculate errors
vector outErrors(NUM_OUTPUTS);
double totalError = 0;
for(unsigned oi = 0; oi < NUM_OUTPUTS; oi++) {
outErrors.at(oi) = outputs.at(oi) - targets.at(oi);
totalError += pow(outErrors.at(oi), 2);
}
totalError /= NUM_OUTPUTS;
assert_approx(0.298371109, totalError);
// Adjust weights between hidden and output layers -- we store the
// updated weights in a separate matrix, because we'll need the old
// values of the weights for updating the input layer.
double newHiddenOutputWts [NUM_HIDDEN] [NUM_OUTPUTS] = { 0 };
double errorsPerHiddenOutputWts [NUM_HIDDEN] [NUM_OUTPUTS] = { 0 };
vector outPerNet(NUM_OUTPUTS);
vector hiddenErrors(NUM_HIDDEN);
for(unsigned oi = 0; oi < NUM_OUTPUTS; oi++) {
outPerNet.at(oi) = outputs.at(oi) * (1 - outputs.at(oi));
for(unsigned hi = 0; hi < NUM_HIDDEN; hi++) {
double errorPerWt = outErrors.at(oi) * outPerNet.at(oi) * hidden.at(hi);
newHiddenOutputWts[hi][oi] =
hiddenOutputWts[hi][oi] - LEARNING_RATE * errorPerWt;
// Continue calculating some errors needed for input layer
errorsPerHiddenOutputWts[hi][oi] =
outErrors.at(oi) * outPerNet.at(oi) * hiddenOutputWts[hi][oi];
hiddenErrors[hi] += errorsPerHiddenOutputWts[hi][oi];
}
}
assert_approx(0.35891648, newHiddenOutputWts[0][0]); // Ta-da!
assert_approx(0.408666186, newHiddenOutputWts[1][0]);
assert_approx(0.511301270, newHiddenOutputWts[0][1]);
assert_approx(0.561370121, newHiddenOutputWts[1][1]);
// Copy newHiddenOutputWts into old hiddenOutputWts for next time.
for(unsigned oi = 0; oi < NUM_OUTPUTS; oi++) {
for(unsigned hi = 0; hi < NUM_HIDDEN; hi++) {
hiddenOutputWts[hi][oi] = newHiddenOutputWts[hi][oi];
}
}
assert_approx(0.055399425, errorsPerHiddenOutputWts[0][0]);
assert_approx(-0.019049119, errorsPerHiddenOutputWts[0][1]);
assert_approx(0.036350306, hiddenErrors[0]);
// Adjust weights between input and hidden layers. We can update
// those in-place.
for(unsigned hi = 0; hi < NUM_HIDDEN; hi++) {
double hiddenPerNet = hidden.at(hi) * (1 - hidden.at(hi));
for(unsigned ii = 0; ii < NUM_INPUTS; ii++) {
double errorPerWt = hiddenErrors.at(hi) * hiddenPerNet * inputs.at(ii);
inputHiddenWts[ii][hi] -= LEARNING_RATE * errorPerWt;
}
}
assert_approx(0.149780716, inputHiddenWts[0][0]);
assert_approx(0.19956143, inputHiddenWts[1][0]);
assert_approx(0.24975114, inputHiddenWts[0][1]);
assert_approx(0.29950229, inputHiddenWts[1][1]);
cout << "YAY ";
return 0;
}
Step by Step Solution
There are 3 Steps involved in it
Get step-by-step solutions from verified subject matter experts
