Question: Hi please give a Matlab code such that it addresses ALL aspects of Question 3 using only Newton Second - Order Method and need to

Hi please give a Matlab code such that it addresses ALL aspects of Question 3 using only Newton Second-Order Method and need to determine a good estimates of the starting points in order for newton method to work .My idea is to use gradient descent to determine a good estimate of starting points then pass to newton method to converge? I kept getting wrong figures and distances for my for my code. Please help.
function [min_distance, t1_min, t2_min]= Distance(r1, e1, e2, a1, b1, n1, r2, e1_2, e2_2, a2, b2, n2)
% Define tolerance and maximum iterations for optimization
tol =1e-6;
max_iter =100;
% Set initial values for t1 and t2
t1_init =5; % Initial angle for superellipse 1
t2_init =5; % Initial angle for superellipse 2
% Initial Phase: Gradient Descent to find a good estimate
[t1_gd, t2_gd]= gradient_descent(a1, b1, n1, a2, b2, n2, tol, max_iter, t1_init, t2_init);
% Refinement Phase: Newton's Method for precise convergence
[t1_min, t2_min, min_distance]= find_min_distance(a1, b1, n1, a2, b2, n2, tol, max_iter, t1_gd, t2_gd);
% Draw superellipses and closest points
draw_superellipses(r1, e1, e2, a1, b1, n1, r2, e1_2, e2_2, a2, b2, n2, t1_min, t2_min);
end
function [t1_gd, t2_gd]= gradient_descent(a1, b1, n1, a2, b2, n2, tol, max_iter, t1_init, t2_init)
% Initialize parameters for gradient descent
t1= t1_init;
t2= t2_init;
alpha =0.05; % Adjusted learning rate for gradient descent
for iter =1:max_iter
[~, grad]= distance_and_gradient(t1, t2, a1, b1, n1, a2, b2, n2);
% Update parameters using gradient descent
t1= t1- alpha * grad(1);
t2= t2- alpha * grad(2);
% Check convergence
if norm(grad)< tol
break;
end
end
% Return the final parameters from gradient descent
t1_gd = t1;
t2_gd = t2;
end
function [t1_min, t2_min, min_distance]= find_min_distance(a1, b1, n1, a2, b2, n2, tol, max_iter, t1_init, t2_init)
% Initialize parameters for Newton's method
t1= t1_init;
t2= t2_init;
iter =0;
regularization_factor =1e-6; % Regularization to prevent singular Hessian
while iter < max_iter
[d, grad, hessian]= distance_and_gradient(t1, t2, a1, b1, n1, a2, b2, n2);
% Add regularization to Hessian
hessian = hessian + regularization_factor * eye(2);
% Check if Hessian is valid
if all(isfinite(hessian(:))) && rank(hessian)==2 && cond(hessian)<1e10
% Calculate optimal update direction using Newton's method
delta =-hessian \ grad; % Solve linear system
% Update parameters with the found step size
t1= t1+ delta(1);
t2= t2+ delta(2);
else
warning('Hessian is singular or contains non-finite values, stopping optimization.');
min_distance = d; % Return the last computed distance
t1_min = t1;
t2_min = t2;
return; % Exit the function
end
% Check convergence
if norm(grad)< tol
break;
end
iter = iter +1; % Increment iteration counter
end
% Final minimum distance
min_distance = d; % This is the calculated distance
t1_min = t1;
t2_min = t2;
end. function [x, y]= superellipse_point(t, a, b, n)
% Compute (x, y) point on the superellipse given parameter t
x = a * sign(cos(t)).* abs(cos(t)).^(2/n);
y = b * sign(sin(t)).* abs(sin(t)).^(2/n);
end
function [x, y]= superellipse_points(r, a, b, n, t)
% Compute points on the superellipse given parameter t
[x, y]= arrayfun(@(ti) superellipse_point(ti, a, b, n), t);
x = x + r(1);
y = y + r(2);
end
function [d, grad, hessian]= distance_and_gradient(t1, t2, a1, b1, n1, a2, b2, n2)
% Calculate distance and gradient for Newton's method
[x1, y1]= superellipse_point(t1, a1, b1, n1);
[x2, y2]= superellipse_point(t2, a2, b2, n2);
% Calculate distance
delta_x = x2- x1;
delta_y = y2- y1;
d = sqrt(delta_x^2+ delta_y^2);
% Regularization to avoid division by zero
if d <1e-10
d =1e-10; % Set a small distance to avoid NaN
end
% Compute gradients
dx_dt1=-(a1* sin(t1)* abs(cos(t1))^(2/n1-1))* delta_x / d;
dy_dt1=(b1* cos(t1)* abs(sin(t1))^(2/n1-1))* delta_y / d;
dx_dt2=(a2* sin(t2)* abs(cos(t2))^(2/n2-1))* delta_x / d;
dy_dt2=-(b2* cos(t2)* abs(sin(t2))^(2/n2-1))* delta_y / d;
grad =[dx_dt1+ dy_dt1; dx_dt2+ dy_dt2]; % Gradient vector
% Compute Hessian (second derivatives)
hessian = zeros(2,2);
% For each element of Hessian, compute the appropriate second derivatives
hessian(1,1)=(a1^2*(cos(t1)^(2/n1-2)*(2/n1)* sin(t1)^2+ sin(t1

Step by Step Solution

There are 3 Steps involved in it

1 Expert Approved Answer
Step: 1 Unlock blur-text-image
Question Has Been Solved by an Expert!

Get step-by-step solutions from verified subject matter experts

Step: 2 Unlock
Step: 3 Unlock

Students Have Also Explored These Related Programming Questions!