clear
echo on
%This program illustrates the three root finding
%techniques of bisection, Newton's method, and the
%secant method.  In all three cases we will try to
%find the roots of f(x)=x^3-2.  For the bisection
%method, all we have to know is that the root lies
%between 1 and 2.  All of the methods require that
%we define the function f(x), provided with the
%accompanying matlab routine.  We also require (for
%Newton's method) the derivative fprime(x).  We use
%the anonymous function command to define these in 
%the script:

f = @(x) x.^3-2;
fprime = @(x) 3*x.^2;

%We set the interval:
a=1;
b=2;
pause
%First, let's plot the function:

y=[a:(b-a)/50:b];
figure(1)
plot(y,f(y));
xlabel('y')
ylabel('f(y)')
grid on
pause
hold on

%Note that f(a)*f(b)<0 for our function
%We shall let the left edge of the current interval
%be given by alpha, the right edge by beta,
%and the midpoint by mid(i).  We know that the
%answer is just 2^(1/3), so we can determine the
%error at each step as well.  Let's start:
pause
if f(a)*f(b)>0,
  disp('The zero is not in the interval!')
else
  alpha(1)=a;
  beta(1)=b;
  mid(1)=(a+b)/2;
  plot(mid(1),f(mid(1)),'o')
  for i=1:24,
    if f(alpha)*f(mid(i))<0,
      beta=mid(i);
    else,
      alpha=mid(i);
    end;
    mid(i+1)=(alpha+beta)/2;
    figure(1)
    plot(mid(i),f(mid(i)),'o')
    pause(1)
    echo off
  end
end
hold off
echo on
pause

%Now let's see what the error looks like:
answer=2^(1/3);
figure(2)
plot(log10(abs(mid-answer)))
grid on
xlabel('iteration')
ylabel('log10(error bound)')
%Note that the progression to the minimum is not
%steady, but that the behavior on this semi-log
%plot is approximately linear.  The slope of the
%average line is log10(C) which is about -.301 for
%C=1/2.
pause

%OK, now let's look at Newton's method.  Here we
%need both f(x) and fprime(x).  Let's start the
%integration procedure at x(1)=1.  We also shall
%set a tolerance for determining when we have
%reached a solution.  So:
tol=1e-7;
xlast=a; %this is just used to terminate iterations
         %if the distance between successive points
         %gets too small.  To start, we just set it
         %to any value different from x(1) by
         %greater than the tolerance.
x(1)=(b+a)/2;
i=1;
figure(1)
plot(y,f(y))
xlabel('y')
ylabel('f(y)')
grid
hold on
plot(x(i),f(x(i)),'o')
pause(1)
while min(abs(f(x(i))),abs(x(i)-xlast))>tol,
  x(i+1)=x(i)-f(x(i))/fprime(x(i));
  xlast=x(i);
  i=i+1;
  figure(1)
  plot(x(i),f(x(i)),'o')
  pause(1)
  echo off
end
echo on
hold off
pause
%Note that it did not take nearly as many iterations
%to reach the set tolerance!  The total number of
%iterations was only:
i
%which is alot less than the bisection method.
pause

%Let's look at the error as a function of iteration.
%Thus:
[m,nbisection]=size(mid);
[m,nnewton]=size(x);
figure(2)
plot([1:nbisection],log10(abs(mid-answer)),...
     [1:nnewton],log10(abs(x-answer)))
grid on
xlabel('iteration')
ylabel('log10(error)')
%Note that the Newton's method error really falls
%off fast!  This is a consequence of a quadratic
%convergence.
pause

%We can estimate the rate of convergence r from a
%log-log plot of error(i+1) vs error(i).  The rate
%r is just the slope of this plot.  Thus:
figure(3)
loglog(abs(x(1:nnewton-1)-answer),abs(x(2:nnewton)-answer),...
     abs(mid(1:nbisection-1)-answer),abs(mid(2:nbisection)-answer))
grid on
xlabel('error(i)')
ylabel('error(i+1)')
%The slope of the bisection error result is about
%half of the quadratic Newton's method result, and
%also is alot noisier!
pause

%OK, now for the final technique, the secant method.
%In this technique we need two different starting
%points.  These are used to estimate (via a finite
%difference approximation) the local derivative.
%That, in turn, is used in a Newton-like algorithm
%to estimate a new point in the iteration.  We only
%keep the last two points.  So:

tol=1e-7;
xs(1)=(b+a)/2;
xs(2)=a+(b-a)/4;
i=2;
figure(1)
plot(y,f(y))
xlabel('y')
ylabel('f(y)')
grid
hold on
plot(xs(1:2),f(xs(1:2)),'o')
pause(1)
while min(abs(f(xs(i))),abs(xs(i)-xs(i-1)))>tol,
  xs(i+1)=xs(i)-f(xs(i))*...
          (xs(i)-xs(i-1))/(f(xs(i))-f(xs(i-1)));
  i=i+1;
  figure(1)
  plot(xs(i),f(xs(i)),'o')
  pause(1)
  echo off
end
echo on
hold off
pause

%This approach takes a few more iterations than
%Newton's method, but still has a super-linear
%rate of convergence.  Let's look at the error:
[m,nsec]=size(xs);
figure(2)
plot([1:nbisection],log10(abs(mid-answer)),...
     [1:nnewton],log10(abs(x-answer)),...
     [2:nsec],log10(abs(xs(2:nsec)-answer)))
grid on
xlabel('iteration')
ylabel('log10(error)')
%where the convergence of the secant method lies
%between the other two.
pause

%To conclude, we examine the rate of convergence
%of the secant method by plotting log(error(i+1))
%vs. log(error(i)):
figure(3)
loglog(abs(x(1:nnewton-1)-answer),...
     abs(x(2:nnewton)-answer),...
     abs(mid(1:nbisection-1)-answer),...
     abs(mid(2:nbisection)-answer),...
     abs(xs(2:nsec-1)-answer),...
     abs(xs(3:nsec)-answer))
grid on
xlabel('error(i)')
ylabel('error(i+1)')
%where the slope of the secant method lies between
%that of the other two.  This slope is given by
%r = 0.5*(1+5^.5) = 1.618.  The inverse of r is
%known as the golden ratio since it satisfies
%the condition 1/r = 1-(1/r)^2.  This number is
%used in the golden search technique for finding
%the minimum of unimodal functions.  We may get
%to this later.
echo off