\documentclass[11pt]{article}

\usepackage{epsfig}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{amstext}
\usepackage{amscd}
\usepackage{amsmath}
\usepackage{xspace}
\usepackage{theorem}
\usepackage{float}

%\usepackage{layout}% if you want to see the layout parameters
                     % and now use \layout command in the body

% This is the stuff for normal spacing
\makeatletter
 \setlength{\textwidth}{6.5in}
 \setlength{\oddsidemargin}{0in}
 \setlength{\evensidemargin}{0in}
 \setlength{\topmargin}{0.25in}
 \setlength{\textheight}{8.25in}
 \setlength{\headheight}{0pt}
 \setlength{\headsep}{0pt}
 \setlength{\marginparwidth}{59pt}

 \setlength{\parindent}{0pt}
 \setlength{\parskip}{5pt plus 1pt}
 \setlength{\theorempreskipamount}{5pt plus 1pt}
 \setlength{\theorempostskipamount}{0pt}
 \setlength{\abovedisplayskip}{8pt plus 3pt minus 6pt}
 \setlength{\intextsep}{15pt plus 3pt minus 6pt}

 \renewcommand{\section}{\@startsection{section}{1}{0mm}%
                                   {2ex plus -1ex minus -.2ex}%
                                   {1.3ex plus .2ex}%
                                   {\normalfont\Large\bfseries}}%
 \renewcommand{\subsection}{\@startsection{subsection}{2}{0mm}%
                                     {1ex plus -1ex minus -.2ex}%
                                     {1ex plus .2ex}%
                                     {\normalfont\large\bfseries}}%
 \renewcommand{\subsubsection}{\@startsection{subsubsection}{3}{0mm}%
                                     {1ex plus -1ex minus -.2ex}%
                                     {1ex plus .2ex}%
                                     {\normalfont\normalsize\bfseries}}
 \renewcommand\paragraph{\@startsection{paragraph}{4}{0mm}%
                                    {1ex \@plus1ex \@minus.2ex}%
                                    {-1em}%
                                    {\normalfont\normalsize\bfseries}}
 \renewcommand\subparagraph{\@startsection{subparagraph}{5}{\parindent}%
                                       {2.0ex \@plus1ex \@minus .2ex}%
                                       {-1em}%
                                      {\normalfont\normalsize\bfseries}}
\makeatother

\newcounter{thelecture}

\newenvironment{proof}{{\bf Proof:  }}{\hfill\rule{2mm}{2mm}}
\newenvironment{proofof}[1]{{\bf Proof of #1:  }}{\hfill\rule{2mm}{2mm}}
\newenvironment{proofofnobox}[1]{{\bf#1:  }}{}
\newenvironment{example}{{\bf Example:  }}{\hfill\rule{2mm}{2mm}}

%\renewcommand{\theequation}{\thesection.\arabic{equation}}
%\renewcommand{\thefigure}{\thesection.\arabic{figure}}

\newtheorem{fact}{Fact}
\newtheorem{lemma}[fact]{Lemma}
\newtheorem{theorem}[fact]{Theorem}
\newtheorem{definition}[fact]{Definition}
\newtheorem{corollary}[fact]{Corollary}
\newtheorem{proposition}[fact]{Proposition}
\newtheorem{claim}[fact]{Claim}
\newtheorem{exercise}[fact]{Exercise}

% math notation
\newcommand{\R}{\ensuremath{\mathbb R}}
\newcommand{\Z}{\ensuremath{\mathbb Z}}
\newcommand{\N}{\ensuremath{\mathbb N}}
\newcommand{\B}{\ensuremath{\mathbb B}}
\newcommand{\F}{\ensuremath{\mathcal F}}
\newcommand{\SymGrp}{\ensuremath{\mathfrak S}}
\newcommand{\prob}[1]{\ensuremath{\text{{\bf Pr}$\left[#1\right]$}}}
\newcommand{\expct}[1]{\ensuremath{\text{{\bf E}$\left[#1\right]$}}}
\newcommand{\size}[1]{\ensuremath{\left|#1\right|}}
\newcommand{\ceil}[1]{\ensuremath{\left\lceil#1\right\rceil}}
\newcommand{\floor}[1]{\ensuremath{\left\lfloor#1\right\rfloor}}
\newcommand{\ang}[1]{\ensuremath{\langle{#1}\rangle}}
\newcommand{\poly}{\operatorname{poly}}
\newcommand{\polylog}{\operatorname{polylog}}

% anupam's abbreviations
\newcommand{\e}{\epsilon}
\newcommand{\eps}{\epsilon}
\newcommand{\half}{\ensuremath{\frac{1}{2}}}
\newcommand{\junk}[1]{}
\newcommand{\sse}{\subseteq}
\newcommand{\union}{\cup}
\newcommand{\meet}{\wedge}
\newcommand{\dist}[1]{\|{#1}\|_{\text{dist}}}
\newcommand{\hooklongrightarrow}{\lhook\joinrel\longrightarrow}
\newcommand{\embeds}[1]{\;\lhook\joinrel\xrightarrow{#1}\;}
\newcommand{\mnote}[1]{\normalmarginpar \marginpar{\tiny #1}}

\newcommand{\OPT}{\textsf{OPT}}
\newcommand{\That}{\widehat{T}}
\newcommand{\Ghat}{\widehat{G}}
\newcommand{\chat}{\widehat{c}}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Document begins here %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\newcommand{\hwheadings}[3]{
{\bf Approximation Algorithms} \hfill {{\bf Homework #1}}\\
{{\bf Date:} #2} \hfill {{\bf Due:} #3} \\
\rule[0.1in]{\textwidth}{0.025in}
%\thispagestyle{empty}
}

\begin{document}

\hwheadings{3}{Sept 28, 2005}{Oct 5, 2005}

\textbf{Note:} In all these problems: if we ask you to show an
approximation ratio of $\rho$, you should tell us about the best
approximation ratio you did prove, be it better or worse than
$\rho$.

\begin{enumerate}

\item \textbf{(K-Center Problem Revisited)} Recall the K-center problem:
  We are given an undirected {\em complete} graph $G = (V,E)$ with
  nonnegative {\em metric} distances $d$ on the edges; i.e., $d_{ij} +
  d_{jk} \geq d_{ik}$ for all $i, j, k \in V$. Given $G$ and a positive
  integer $K$, the $K$-center problem asks for a subset $C \subseteq V$
  of vertices of size $|C| = K$ that minimizes
  \begin{gather}
    \max_{v \in V} \, d(v, C). \label{eq1}
  \end{gather}
  Let $D^*$ be this minimum value when $|C| = K$.
  
  Now, we consider a variant: In this, we insist on a solution such that
  the distance from every node to its closest center is at most some
  specified distance $D^*$, and want minimize the number of centers $K'$
  to locate in order to achieve this. (I.e., to defeat the hardness of
  the problem, instead of relaxing the distance guarantee, we are
  instead allowing ourselves to place more centers.)
  
  Design an algorithm for this problem that places a set of centers $C$
  with $|C| = O(K \log |V|)$, and ensures that~(\ref{eq1}) is at most
  $D^*$.

\item \textbf{(Max Leaf Spanning Tree Revisited)} The \emph{locality gap} 
of an algorithm for this problem is the largest number $\alpha \geq 1$ for 
which you can show a LOT $T$ such that $\ell(T) \leq \ell(T^*)/\alpha$, 
where $T^*$ is a max-leaf spanning tree. 
    
The algorithm in class showed that $\alpha$ can never be more than $10$. 
By exhibiting a suitable family of examples, give the best lower bound 
you can for the locality gap of the algorithm given in class.

\item \textbf{(Multiway Cut Revisited)} We can look at \textsc{Multiway
    Cut} as a coloring problem: color each node in $V$ with one of $k$
  colors such that the terminal $s_i$ is colored with color $i$, so as
  to minimize the number of bichromatic edges. (Make sure you believe
  this!)

  Consider an extension of the problem: here each vertex $v \in V$ has
  an associated coloring cost function $C_v: [k] \to \R_{\geq 0}$ such
  that the cost of coloring $v$ with color $i$ is $C_v(i)$. Now we want
  find a coloring $f: V \to [k]$ so as to minimize the total cost
  \begin{gather}
    \Phi(f) = \sum_{v \in V} C_v(f(v)) + \text{ number of bichromatic
      edges in $f$ }.
  \end{gather}
  Note that if we set $C_{s_j}(i)$ to be $0$ if $i = j$ and $\infty$
  otherwise, and for each non-terminal node $v$, we set $C_v(i) = 0$ for
  all colors $i$, then we get back the \textsc{Multiway Cut} problem.

  \begin{enumerate}
  \item Our local search algorithm will make moves of the following
    form: if we are at coloring $f$, pick a color $i$ and try to find
    the \emph{best} coloring $f'$ obtained from $f$ by recoloring some
    of the vertices by the color $i$. I.e., $f'$ satisfies the property
    that either $f'(v) = i$ or $f'(v) = f(v)$, and it is the one with
    the least cost. Call such a best coloring an \emph{$i$-move}. (In
    case of ties, choose one arbitrarily.) \emph{Note that we have not shown
    how to find such an $i$-move; we will discuss this issue later.}

    Show that if $f$ is a local optimum with respect to these moves,
    (i.e., none of the $k$ potential $i$-moves results in the cost
    strictly decreasing), then $\Phi(f) \leq 2 \Phi(\OPT)$. As usual,
    $\OPT$ is the optimal coloring.

  \item Since it may take a long time to reach a local minimum, we can
    change the algorithm to make a move from $f$ to $f'$ as long as it
    decreases the cost by at most $\Phi(f) \times (\eps/k)$. Show that
    if we start from a coloring $f_0$, then the algorithm takes at most
    \begin{gather}
      O\bigg(\frac{\log (\frac{\Phi(f_0)}{\Phi(\OPT)})}{- \log (1 -
      \eps/k)} \bigg)
      \label{BVZ-rounds}
    \end{gather}
    local improvement steps to reach a solution of cost $2(1+\eps)
    \Phi(\OPT)$.

  \item Note that the number of steps in the above solution is not
    \emph{strongly polynomial}: if the coloring costs $C_v(\cdot)$ are
    very large, the number of rounds may be very large (albeit
    polynomial in the representation of the instance). One way to fix
    this is to choose the start state $f_0$ carefully. Can you show a
    choice of $f_0$ so that~(\ref{BVZ-rounds}) is at most $\poly(n, k,
    \eps)$?

    What about the case when $k \gg n$? Can you change the algorithm so
    that the number of steps to reach a near-local-optimum is at most
    $\poly(n, \eps)$?

  \item Suppose you now wanted to make smaller local-search moves of the
    form: pick a vertex $v$ and a color $i$, and paint $v$ with color
    $i$ if the resulting $\Phi(f)$ decreases. (These moves are called
    the \emph{Glauber dynamics}.) Note that the new algorithm makes much
    smaller moves than the one above, and hence may take more time to
    reach a local optimum.

    All local minima of this new process are also $2$-approximate: give
    a proof or a counterexample.
  \end{enumerate}
  \textbf{Remark: } We did not address the question: given a color $i$
  and a coloring $f$, how can we find the best $i$-move?  Despite the
  fact that there may be $\Omega(2^n)$ possible $i$-moves to consider,
  we can indeed find it efficiently using an $s$-$t$ min-cut computation
  in a suitably defined graph! (We'll show how to do this in the
  answers, or you can think about it.)
\end{enumerate}


\end{document}
