Question: #include #include #include #include double Trap(double a, double b, int n, double h){ //a, b, n will have copy of local_a, local_b, local_n double approx,
#include
#include
#include
#include
double Trap(double a, double b, int n, double h){ //a, b, n will have copy of local_a, local_b, local_n
double approx, x_i;
approx= (exp(a)+ exp(b))/2.0;
for (int i=1; i< n; i++){
x_i = a + i*h;
approx += exp(x_i);
}
approx = h * approx;
return approx;
}
int main(int argc, char** argv) {
int n, local_n, p;
double a, b, h, local_a, local_b;
double local_int, total_int;
// Initialize the MPI environment
MPI_Init(NULL, NULL);
// Get the number of processes
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
// Get the rank of the process
int my_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if(my_rank == 0){
printf("Please enter a, b, n:");
fflush(stdout);
scanf("%lf %lf %d", &a, &b, &n);
}
fflush(stdout);
MPI_Bcast(&a, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&b, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
h = (b-a)/n;
local_n = n/world_size;
local_a = a + my_rank*local_n*h;
local_b = local_a + local_n*h;
local_int = Trap(local_a, local_b, local_n, h);
if(my_rank != 0){
MPI_Send(&local_int, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD);
}
else{
total_int = local_int;
for(p=1; p < world_size;p++){
MPI_Recv(&local_int, 1, MPI_DOUBLE, p, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
total_int +=local_int;
}
}
if (my_rank ==0){
printf("With n = % d trapezoids, our estimate ", n);
printf("of the integral from %f to %f = %f ", a, b, total_int);
}
// Finalize the MPI environment.
MPI_Finalize();
return 0;
}
Write C program and parallelize it using MPI that that compute area under the curve using the trapezoidal role. Let each process compute area under the curve for subinterval and then sum (i.e. reduce operation) in process 0. In example 5, we read a, b, and n from user using scanf through process0, then process 0 broadcast them to all other processes. In this labwork, replace lines from 59 68 with MPI_Reduce collective communication. Here, we will use MPI_SUM operator. Time the parallel code (start timing before Trap function and end timing after MPI_Reduce) and run it using 1, 2, 4, 8 processes (nodes) and report execution time, and speedup = Tserial/Tparallel, and efficiency = speedup/#threads Please consider these values for a=0, b=10, n=100000000
Step by Step Solution
There are 3 Steps involved in it
1 Expert Approved Answer
Step: 1 Unlock
Question Has Been Solved by an Expert!
Get step-by-step solutions from verified subject matter experts
Step: 2 Unlock
Step: 3 Unlock
