\documentclass[fleqn]{article}
\usepackage{haldefs}
\usepackage{notes}
\usepackage{url}
\begin{document}
\lecture{Machine Learning}{HW06: Probabilistic and neural modeling}{CS 726, Fall 2011}
% IF YOU ARE USING THIS .TEX FILE AS A TEMPLATE, PLEASE REPLACE
% "CS 726, Fall 2011" WITH YOUR NAME AND UID.
Hand in at: \url{http://www.cs.utah.edu/~hal/handin.pl?course=cs726}.
Remember that only PDF submissions are accepted. We encourage using
\LaTeX\ to produce your writeups. See \verb+hw00.tex+ for an example
of how to do so. You can make a \verb+.pdf+ out of the \verb+.tex+ by
running ``\verb+pdflatex hw00.tex+''.
\begin{enumerate}
\item Optimize the following constrained problem using a log transform
and the method of Lagrange multipliers (you may assume all $x$s are
non-negative):
\begin{equation}
\min_{\vec\th} \prod_d \th_d^{x_d} \qquad
\text{subj. to } \norm{\vec\th}_2 = 1
\end{equation}
%\begin{solution}
%\end{solution}
\item What is the difference between assuming that features are
independent and assuming that features are independent given the
label?
%\begin{solution}
%\end{solution}
\item Suppose you wanted to derive a probabilistic model for
regression that corresponded to \emph{absolute} penalties on
predictions (rather than the quadratic penalties in the standard
model). What noise distribution would you have to assume (as
opposed to the Gaussian assumption)? (Hint: the answer can be found
elsewhere in the chapter in a different context.)
%\begin{solution}
%\end{solution}
\end{enumerate}
\end{document}