\input{template}
\input{macros}

\usepackage{color, graphicx}
\usepackage{amssymb, amsmath}
\usepackage{epsfig}

\begin{document}
\lecture{12} {The Simplex Algorithm III}{ Prekshu Ajmera}

Let $x_0$ be the current extreme point. From previous lectures we already know that:
\begin{itemize}
\item
The directions of the neighboring extreme points of $x_0$ are given by the columns of the matrix $-A'^{-1}$
\item
We can write $Ax_0\le b$ as 
\begin{equation}
A'x_0 = b'
\end{equation}
\begin{equation}
A''x_0 < b''
\end{equation}
where $A'$ consists of $n$ linearly independent rows(hyperplanes).
\end{itemize}
In this lecture we will give the stopping condition of the Simplex algorithm and prove the correctness  of the algorithm.
%% -----------------------------------------------------------------------------
%%                          Beginning of Section 1.
%% -----------------------------------------------------------------------------
\section{Stopping Condition}
\textit{If all the neighboring extreme points of $x_0$ have a cost $\le$ the cost of $x_0$ then $x_0$ is optimal and the algorithm stops at $x_0$. }

\paragraph*{Proof}

First of all note that this is \textit{not} the same as saying that $x_{0}$ is local maximum and hence by a previously proved theorem, it is also a global maximum. This is so because we just know that the cost at $x_{0}$ is maximum as compared to its \textit{neighbours} - not compared to a small enough \textit{neighbourhood} around it.

\subsection*{First Approach}
Let us consider a small neighborhood $N$ of $x_0$. Also, assume that we can write any point $p \in N$ as a convex combination of all the neighboring extreme points of $x_0$ i.e.
\begin{equation}
p = \sum_{i=0}^n \lambda_{i}x_{i}  \,\,\,\,; \,\,\,\,0 \le \lambda_i \le 1,  \Sigma \lambda_i = 1
\end{equation}

This is similar to taking a weighted average with $\lambda_is$ be the probabilities.\\
Now we know that $\forall{i}$ $c^{T}x_i \leq c^{T}x_0$. Hence,

\begin{equation}
c^{T}p = c^{T}(\sum_{i=0}^n \lambda_{i}x_{i}) = \sum_{i=0}^n \lambda_{i}c^{T}x_{i}  \leq  c^{T}x_{0}(\sum_{i=0}^n \lambda_{i}) = c^{T}x_{0}
\end{equation}
Thus, $x_{0}$ is a local maximum and consequently a global maximum. \\
\textbf{To be shown} : (i) $p = \sum \lambda_{i}x_{i}$ (ii) Starting criteria
\subsection*{Second Approach}

Assume that $x_0$ is not an optimal point and there is some other point $x_{opt}$ which is optimal. Thus, the cost increases along $x_{opt} - x_0$.

\begin{figure}[hpt]
\begin{center}
\includegraphics[width=2.0in]{lec12fig1}
\end{center}
\end{figure}

Now, $x_{0}$ is an extreme point\\\\
$\Rightarrow$ $A^{'}$ has full rank\\
$\Rightarrow$ $-{A^{'}}^{-1}$ has full rank\\
$\Rightarrow$ $n$ columns form a basis of the space\\
$\Rightarrow$ vector $x_{opt} - x_{0}$ can be written as a linear combination of these columns. Hence,
\begin{equation}
x_{opt} - x_{0} = \sum_{i} \beta_{i} ((-{A^{'}}^{-1})^{i})
\end{equation}

Pre-multiplying with $A^{'}$ in the above equation, we get 
\begin{equation}
A^{'}x_{opt} - A^{'}x_{0} = \sum_{i} \beta_{i} A^{'}((-{A^{'}}^{-1})^{i})
\end{equation}
What can we say about $\beta_i$ ?

Lets look at the following figure:
\begin{figure}[hpt]
\begin{center}
\includegraphics[width=2.0in]{lec12fig2}
\end{center}
\end{figure}

Any point in the region $R$ can be written as $a\beta_1 + b\beta_2$. For feasibility, $\beta_1$ and $\beta_2$ have to be positive, otherwise we will be out of the region $R$ ($Ax \le b$). Thus, intutively it should be clear that $\beta_i \geqslant 0$. 

More formal proof for $\beta_i \geqslant 0$ will be given in the next lecture.

Now pre-multiplying with $c^{T}$ in (5), we get 
\begin{equation}
c^{T}x_{opt} - c^{T}x_{0} = \sum_{i} \beta_{i} c^{T}((-{A^{'}}^{-1})^{i}) 
\end{equation}

Now, $(-{A^{'}}^{-1})^{i} = x_{i} - x_{0}$ where $x_{i}$'s are the neighboring extreme points of $x_{0}$. Since we have stopped at $x_{0}$, $\forall{x_{i}}$ $c^{T}x_{i} - c^{T}x_{0} \leq 0$. Also, $\beta_{j} \geqslant 0$. This implies that the R.H.S. of the above equation is $\leqslant 0$. Hence, 
\begin{equation}
c^{T}x_{opt} - c^{T}x_{0} \leqslant 0 
\end{equation}

Thus, we find that cost decreases along $x_{opt} - x_0$ which is a contradiction. So, our assumption was wrong and  $x_{0}$ is indeed an optimal point. Hence proved ! 

\end{document}

