%\documentclass[12pt]{article}
\documentclass{article}
\usepackage{repdefs,psfig,latexsym,color}
\usepackage{exscale}
\renewcommand{\theequation}{\arabic{equation}}
\renewcommand{\ip}[2]{{#1}^T #2}
\newcommand{\tmininx}[1]{ {\renewcommand{\arraystretch}{0.8}
\begin{array}[t]{c}
\mbox{minimize} \vspace*{-1mm} \\
\mbox{ $\scriptstyle x \in \tinyRe^n #1 $ }
\end{array} \;} }
\newcommand{\Span}{\mbox{span}}
\newcommand{\Rank}{\mbox{rank}}
\begin{document}
\title{CNAc: Continuous Optimization \\ Problem set 5 --- interior-point methods}
\author{Honour School of Mathematics, Oxford University\\
Hilary Term \the\year, Dr Nick Gould}
\date{}
\maketitle
\noindent
{\bf Instructions:} Asterisked problems are intended as a
homework assignment.
% while the nonasterisked problem is not
%compulsory but can further help you understand the material.
Please put your solutions in Denis Zuev's pigeon hole
at the Maths Institute by 9AM on Monday of 8th week.
\vspace{1cm}
\noindent
A positive scalar sequence $\{ \sigma_k \}$ with limit $0$ is said to converge
at a {\em Q-rate} $q$ if
\disp{\lim_{k\rightarrow\infty} \frac{ \sigma_{k+1}}{\sigma_k^q} \leq \kappa}
for some constant $\kappa$---here ``Q'' stands for ``Quotient'', and
the number $q$ is sometimes known as the {\em Q-factor}.
The convergence is said to be {\em Q-linear}
if $q = 1$ and $\kappa < 1$, it is {\em Q-superlinear} if
$q > 1$ or $q = 1$ and $\kappa = 0$ and
{\em Q-quadratic} if $q = 2$. The Q-rate of convergence a vector sequence
$\{ x_k \}$ to its limit $x_*$ is that of the sequence $\{ \sigma_k \}$
where $\sigma_k = \| x_k - x_*\|$ for some appropriate norm.
\vspace{0.5cm}
\noindent
{\bf *Problem 1.}
\vspace{0.1cm}
\noindent
What is the Q-rate of convergence of the following sequences $\{\sigma_k\}$?
\begin{itemize}
\item[(a)] $\sigma_k = 1 / \log(k+1)$
\item[(b)] $\sigma_k = 2^{-k}$
\item[(c)] $\sigma_k = 2^{-k^2}$
\item[(d)] $\sigma_k = 2^{-2^k}$
\end{itemize}
\vspace{0.5cm}
\noindent
{\bf *Problem 2.}
\vspace{0.1cm}
\noindent
Consider the {\em reciprocal} barrier function
\disp{\Phi(x,\mu) = f(x) + \sum_{i=1}^m \frac{\mu}{c_i(x)}}
for the inequality constrained optimization problem of
minimizing $f(x)$ subject $c_i(x) \geq 0$ for $i = 1, \ldots, m$.
By setting the gradient of $\Phi$ to zero, suggest suitable
Lagrange multiplier estimates $y(x)$. Hence state and prove
the analog of Theorem 6.1 for the reciprocal barrier function.
\vspace{0.5cm}
\noindent
{\bf *Problem 3.}
\vspace{0.1cm}
\noindent
\begin{itemize}
\item[(a)] Show that the logarithmic barrier function
for the problem of minimizing $1/(1+x^2)$ subject to $x \geq 1$
is unbounded from below for all $\mu$.\newline
[{\it Thus the barrier function approach will not always work.}]
\item[(b)] Find the minimizer $x(\mu)$, and its related Lagrange multiplier
estimate $y(\mu)$, of the logarithmic barrier function
for the problem of minimizing $\half x^2$ subject to $x \geq 2a$ where $a > 0$.
What is the rate of convergence of $x(\mu)$ to $x_*$ as a function of $\mu$?
And the rate of convergence of $y(\mu)$ to $y_*$ as a function of $\mu$?
\newline
[{\it Problems with strictly complementary solutions generally have
$x(\mu) - x_* = O(\mu)$ and $y(x(\mu)) - y_* = O(\mu)$
as $\mu \rightarrow 0$.}]
\item[(c)]
Find the minimizer $x(\mu)$, and its related Lagrange multiplier
estimate $y(\mu)$, of the logarithmic barrier function
for the problem of minimizing $\half x^2$ subject to $x \geq 0$.
How do the errors $x(\mu) - x_*$ and $y(\mu) - y_*$ behave as a
function of $\mu$?
\newline
[{\it Without strict complementarity, the errors
$x(\mu) - x_*$ and $y(x(\mu)) - y_*$ are generally larger
than in the strictly complementary case.}]
\end{itemize}
\end{document}