\input{template}
\input{macros}

\usepackage{epsfig}
\usepackage{color,graphicx}
\begin{document}
\lecture{13}{Proof of correctness of Simplex Algorithm(contd) and Introduction to Duality Theorem}{Akhil Lodha}


Let $x_{0}$ be an extreme point. Suppose it is given by :
\begin{equation}
A^{'}x_{0} = b^{'},A^{''}x_{0} < b^{''}
\end{equation}

From lecture 11 we know that the neighbours of $x_{0}$ are along the columns of ${-A^{'}}^{-1}$

\section{Proof of correctness of Simplex algorithm}

\begin{Thm}
If the cost decreases along the columns of ${-A^{'}}^{-1}$ then $x_{0}$ is optimal.
\end{Thm}

\begin{proof}
The columns of ${-A^{'}}^{-1}$ span $R^{n}$. Let $x_{opt}$ be an optimal point i.e. $c^{T}x_{opt} \geq c^{T}x_{0}$ then we need to show that $c^{T}x_{opt} \leq c^{T}x_{0}$ to establish $c^{T}x_{opt} = c^{T}x_{0}$ and hence $x_{opt} = x_{0}$. Since the columns of ${-A^{'}}^{-1}$ are a basis the vector $x_{opt} - x_{0}$ can be represented as a linear combination of them.

\begin{equation}
x_{opt} - x_{0} = \sum\beta_{j}{({-A^{'}}^{-1})}^{j}
\end{equation}

Now consider $A'(x_{opt} - x_{0})$

\begin{equation}
A'x_{opt} - A'x_{0} =  \sum\beta_{j}A'{({-A^{'}}^{-1})}^{j}
\end{equation}

We know that $A'x_{opt} \leq b' and  A'x_{0} = b'$ hence $A'(x_{opt} - x_{0}) \leq 0$. Also note that $A'{({-A^{'}}^{-1})}^{j}$ is an $n\times1$ vector whose $j^{th}$ element is -1 and remaining elements are 0. Hence 


\begin{equation}
 A'x_{opt} - A'x_{0} = \begin{matrix}\left( \begin{array}{c}
 -\beta_{1}\\ -\beta{2} \\ .\\.\\.\\-\beta{n}  
\end{array} \right) \end{matrix} 
\end{equation}
 
$ \Rightarrow  \forall {j}    \beta_{j} >=0 $

From the discussion above we infer that $\beta_{j} \geq 0 $ for each j.

Now consider 

\begin{equation}
c^{T}x_{opt} - c^{T}x_{0} =  \sum\beta_{j}c^{T}{({-A^{'}}^{-1})}^{j}
\end{equation}

Since the cost decreases along the columns of ${-A^{'}}^{-1}$ we have $c^{T}{({-A^{'}}^{-1})}^{j} \leq 0 $ and since $\beta_{j} \geq 0 $  we conclude that  $\sum\beta_{j}c^{T}{({-A^{'}}^{-1})}^{j} \leq 0 $


Hence $c^{T}x_{opt} \leq c^{T}x_{0}$ but we know that $c^{T}x_{opt} \geq c^{T}x_{0}$   and  $c^{T}x_{opt} = c^{T}x_{0}$.
\end{proof}

\textbf{Note:} Using the above theorem we can now state that when the Simplex Algorithm terminates it gives us an optimal solution.

\section{Introduction to Duality theorem}

Let $x_{0}$ be an optimal point. Using the termination condition of Simplex Algorithm we know that cost decreases  along the columns of  ${-A^{'}}^{-1}$.In other words, 

\begin{equation}
c^{T}({-A^{'}}^{-1}) = (\gamma_{1},\gamma_{2},...,\gamma_{n}), 
\gamma_{i} <= 0 
\end{equation}

or

\begin{equation}
c^{T}({A^{'}}^{-1}) = (y_{1},y_{2},...,y_{n}),y_{i} >= 0\\
\end{equation}
\begin{equation}
c^{T}({A^{'}}^{-1} ) = y^{T}, y_{i} >= 0
\end{equation}
\begin{equation} 
c^{T}({A^{'}}^{-1}A^{'}) = y^{T}A^{'}
\end{equation}
\begin{equation}
 c^{T} = y^{T}A^{'}
\end{equation}

We observe that at the optimal point the cost vector can be written as a \textit{non-negative} linear combination of the rows of $A'$. This means that $x_{0}$ is optimal iff $x_{0}$ is feasible and the cost can be written as a non-negative linear combination of the rows of $A'$.


\begin{figure}[hpt]
 \begin{center}
 \includegraphics[width=4.0in,height=3.5in]{lecture13afig}
\caption{$a, a', b, b' \geq 0$. Cost can be written as a positive linear combination of normals to the hyperplanes.}
\end{center}

\end{figure}

The rows of $A'$ are also the direction normals to the respective hyperplanes. So a restatement of the above is as follows. Suppose $x_{0}$ is an extreme point given by the intersection of $n$ linearly independent hyperplanes then the cost vector can be written as a non-negative linear combination of the normals to these hyperplanes.

Now consider all the points (not necessarily feasible) given by the intersection of $n$ linearly independent hyperplanes where the cost vector can be written as a positive linear combination of the normals. We will show that among such points only the feasible point will have the lowest cost.

Consider the feasible point $x_{0}$ and any other point say $x$ satisfying the above requirements, then $x - x_{0}$ can be written as a positive linear combination of the columns of $A'$ where 

\begin{equation}
 A'x = b' , A^{''}x < b^{''}
\end{equation}
 
Note that the cost decreases along the columns of ${-A^{'}}^{-1}$ . Following the steps of the proof of the previous theorem one can show that $x - x_{0}$ can be written as a non negative linear combination of the columns of  ${-A^{'}}^{-1}$. Since the cost decreases along the columns of ${-A^{'}}^{-1}$, the cost at $x$ is at least the cost at  $x_{0}$.

We also note that at such points the cost is  $c^{T}x = y^{T}A'x = y^{T}b'$.

This motivates the definition of the following LP called the dual:

\begin{equation}
 minimize :  y^{T}b
\end{equation}
\begin{equation}
 A^{T}y = c
\end{equation}
\begin{equation}
 y>=0
\end{equation}

\end{document}

