\input{template}
\input{macros}
\usepackage{color, graphicx}
\usepackage{epsfig}
\usepackage{amssymb, amsmath}

\begin{document}
\lecture{13}{Proof of correctness of the Simplex algorithm and introduction to duality}{Shashvat Rai}

From the previous lectures, we know the following fact that, if $x_0$ is an extreme point given by
\begin{equation}
 A^{'}x_0=b^{'}
\end{equation}
\begin{equation}
 A^{''}x_0<b^{''}
\end{equation}
then the neighbours of $x_0$ are along the columns of $-{A^{'}}^{-1}$.

\begin{Thm}
 If the cost decreases along the columns of $-{A^{'}}^{-1}$, then $x_0$ is optimal.
\end{Thm}
\begin{figure}[h]
\centering
\includegraphics[scale=1.0]{x0xrep}
\caption{Representation of (x-x0)}
\label{figure:Representation of (x-x0)}
\end{figure}
\begin{proof}
 As $A^{'}$ has full rank, $-{A^{'}}^{-1}$ also has full rank. Thus the columns of $-{A^{'}}^{-1}$ form a basis of $\Re^n$.
Hence any vector can be written as a linear combinations of these $n$ columns.
\begin{equation}
 x-x_0=\sum(\beta_j){(-{A^{'}}^{-1})}^j
\end{equation}
Premultiplication with $A^{'}$
\begin{equation}
 A^{'}x-A^{'}x_0=\sum\beta_jA^{'}{(-{A^{'}}^{-1})}^j
\end{equation}
Since $x$ is feasible $A^{'}x <= b^{'}$. Also $A^{'}x_0=b^{'}$.
Thus the L.H.S. of equation 4 is a vector each of whose components is at most zero. The R.H.S. is the vector ${(\beta_1,\beta_2,...,\beta_n)}^{T}$. This implies that $\beta_j >= 0$ for all $j$. Hence $x - x_0 = \sum \beta_j{(-{A^{'}}^{-1})}^{j}$ where $\beta_j >= 0$.
Premultiplication with $c^{T}$ gives
\begin{equation}
 c^{T}x - c^{T}x_0 = \sum \beta_j c^{T} {(-{A^{'}}^{-1})}^{j}
\end{equation}
Since $\beta_j >= 0$ and $c^{T} {(-{A^{'}}^{-1})}^{j} <= 0$ for all $j$, therefore $c^{T}x <= c^{T}x_0$. Hence $x_0$ is optimal.
\end{proof}

\textbf{Discussion:} \newline
Let $x_0$ be the optimal point and 
\begin{equation}
 A^{'}x_0=b^{'}
\end{equation}
and 
\begin{equation}
 A^{''}x_0<b^{''}
\end{equation}
The cost decreases along the columns of $-{A^{'}}^{-1}$. This can be written as
\begin{equation}
 c^{T}{A^{'}}^{-1} = (y_1, y_2, ...., y_n) \text{ where } y_i>=0, \forall i 
\end{equation}
This means that the cost vector is a positive linear combination of the normals to the hyperplanes. \\
\begin{figure}[h]
\centering
\includegraphics[scale=0.8]{2dcase}
\caption{2-D example of extreme but infeasible points}
\label{figure:2D case}
\end{figure}
Consider all points (not necessarily feasible) given by $n$ linearly independent hyperplanes, where the cost vector can be written as a positive linear combination of the normals. These points are the candidate maximums.  \\

Take any such point $x$ say  \\
Then $c^{T}x >= c^{T}x_0$, where $x_0$ is the optimal point. Clearly this can be written as another linear program in the following manner - 
As $c^{T}x = y^{T}A^{'}x = y^{T}b^{'}$, thus dual linear program is 
\begin{center}
 min $y^{T} b^{'}$ \\
 $A^{T}y=c$ \\
 $y >=0$
\end{center}
 

\end{document}
