%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Beamer Presentation
% LaTeX Template
% Version 1.0 (10/11/12)
%
% This template has been downloaded from:
% http://www.LaTeXTemplates.com
%
% License:
% CC BY-NC-SA 3.0 (http://creativecommons.org/licenses/by-nc-sa/3.0/)
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

% ----------------------------------------------------------------------------------------
%	PACKAGES AND THEMES
% ----------------------------------------------------------------------------------------

\documentclass[hyperref={colorlinks=true}]{beamer}

\mode<presentation> {

  % The Beamer class comes with a number of default slide themes
  % which change the colors and layouts of slides. Below this is a list
  % of all the themes, uncomment each in turn to see what they look like.

  \usetheme{default}
  % \usetheme{AnnArbor}
  % \usetheme{Antibes}
  % \usetheme{Bergen}
  % \usetheme{Berkeley}
  % \usetheme{Berlin}
  % \usetheme{Boadilla}
  % \usetheme{CambridgeUS}
  % \usetheme{Copenhagen}
  % \usetheme{Darmstadt}
  % \usetheme{Dresden}
  % \usetheme{Frankfurt}
  % \usetheme{Goettingen}
  % \usetheme{Hannover}
  % \usetheme{Ilmenau}
  % \usetheme{JuanLesPins}
  % \usetheme{Luebeck}
  % \usetheme{Madrid}
  % \usetheme{Malmoe}
  % \usetheme{Marburg}
  % \usetheme{Montpellier}
  % \usetheme{PaloAlto}
  % \usetheme{Pittsburgh}
  % \usetheme{Rochester}
  % \usetheme{Singapore}
  % \usetheme{Szeged}
  % \usetheme{Warsaw}

  % As well as themes, the Beamer class has a number of color themes
  % for any slide theme. Uncomment each of these in turn to see how it
  % changes the colors of your current slide theme.

  % \usecolortheme{albatross}
  % \usecolortheme{beaver}
  % \usecolortheme{beetle}
  % \usecolortheme{crane}
  % \usecolortheme{dolphin}
  % \usecolortheme{dove}
  % \usecolortheme{fly}
  % \usecolortheme{lily}
  % \usecolortheme{orchid}
  \usecolortheme{rose}
  % \usecolortheme{seagull}
  % \usecolortheme{seahorse}
  % \usecolortheme{whale}
  % \usecolortheme{wolverine}

  % \setbeamertemplate{footline} % To remove the footer line in all slides uncomment this line
  % \setbeamertemplate{footline}[page number] % To replace the footer line in all slides with a simple slide count uncomment this line

  % \setbeamertemplate{navigation symbols}{} % To remove the navigation symbols from the bottom of all slides uncomment this line
}

\usepackage{graphicx} % Allows including images
\usepackage{booktabs} % Allows the use of \toprule, \midrule and \bottomrule in tables
\usepackage{cancel}
\usepackage{amsmath}
\usepackage{amssymb}
% \usepackage{showframe}
\usepackage{caption}

%\usepackage{subcaption}
\usepackage{tcolorbox}
\usepackage{tabularx}
\usepackage{array}
\usepackage{pgfplots}
\tcbuselibrary{skins}


\usepackage{subfig}
\beamertemplatenavigationsymbolsempty
\usepackage{color, colortbl}
\definecolor{LRed}{rgb}{1,.8,.8}
\definecolor{MRed}{rgb}{1,.6,.6}

\usepackage{tikz}
\usetikzlibrary{shapes,arrows,shapes.multipart,fit,shapes.misc,positioning}
\newcommand{\der}[2]{\frac{\partial #1}{\partial #2}}
%\usepackage[labelformat=empty]{caption}

\newcommand{\ra}[1]{\renewcommand{\arraystretch}{#1}}

\newcommand\marktopleft[1]{%
    \tikz[overlay,remember picture]
        \node (marker-#1-a) at (0,1.5ex) {};%
}
\newcommand\markbottomright[1]{%
    \tikz[overlay,remember picture]
        \node (marker-#1-b) at (7ex,0) {};%
    \tikz[overlay,remember picture,thick,dashed,inner sep=3pt]
        \node[draw=black,rounded corners=0pt,fill=red,opacity=.2,fit=(marker-#1-a.center) (marker-#1-b.center)] {};%
}


% ----------------------------------------------------------------------------------------
%	TITLE PAGE
% ----------------------------------------------------------------------------------------

\title[]{Macro II: Stochastic Processes II} % The short title appears at the bottom of every slide, the full
% title is only on the title page

\author{Professor Griffy} % Your name
\institute[University at Albany, SUNY] % Your institution as it will appear on the bottom of
% every slide, may be shorthand to save space
{
UAlbany  \ % Your institution for the title page
}
\date{Spring 2021} % Date, can be changed to a custom date

\begin{document}
% \frametitle{Covariances}

\begin{frame}
  \titlepage % Print the title page as the first slide
\end{frame}




% ----------------------------------------------------------------------------------------
%	PRESENTATION SLIDES
% ----------------------------------------------------------------------------------------

% ------------------------------------------------
\section{Introduction} % Sections can be created in order to organize your presentation into discrete blocks, all sections and subsections are automatically printed in the table of contents as an overview of the talk
% ------------------------------------------------

\begin{frame}
  \frametitle{Introduction}
  \begin{itemize}
  \item Today: study a variety of stochastic processes that show up in macroeconomics.
  \item Then, discuss detrending data.
  \end{itemize}
\end{frame}

% ------------------------------------------------

\begin{frame}
  \frametitle{Stochastic Processes}
  \begin{itemize}
  \item Conditional expectations and linear projections
  \item White noise
  \item AR(1)
  \item MA(1)
  \item ARMA(p,q)
  \item Detrending data
  \end{itemize}
\end{frame}

% ------------------------------------------------

\section{Stochastic Processes}

\begin{frame}
  \frametitle{What is a Stochastic Process?}
\begin{itemize}
\item Stochastic process is an infinite sequence of random variables $%
\left\{ X_{t}\right\} _{t=-\infty }^{\infty }$

\item j'th autocovariance = $\gamma _{j}=C\left( X_{t},X_{t-j}\right) $

\item Strict stationarity: distribution of $\left(
X_{t},X_{t+j_{1}},X_{t+j_{2}},...X_{t+j_{n}},\right) $ does not depend on $t$

\item Covariance stationarity: $\bar{X}_{t}$ and $C\left(
X_{t},X_{t-j}\right) $ do not depend on $t$
\end{itemize}
\end{frame}

\section{Conditional distributions}

\begin{frame}
  \frametitle{Defining a Conditional Density}
\begin{itemize}
\item Work with random vector $\underline{x}=(X,Y)\thicksim F(x,y)$.

\begin{itemize}
\item $X$ and $Y$ are random variables

\item $x$ and $y$ are realizations of the random variables

\item $F(x,y)$ is joint cumulative distribution

\item $f\left( x,y\right) $ is joint density function
\end{itemize}
\end{itemize}
\end{frame}

\begin{frame}
  \frametitle{Conditional Variables and Independence}
\begin{itemize}
\item Conditional probability

\begin{itemize}
\item when $\Pr (\underline{x}\in B)>0$,
\begin{equation*}
\Pr (\left. \underline{x}\in A\right\vert \underline{x}\in B)=\Pr (A|B)=%
\frac{\Pr (A\cap B)}{\Pr (B)}.
\end{equation*}
\end{itemize}

\item Conditional distribution $F(\left. y\right\vert x)$ (handles $\Pr
(B)=0 $)

\begin{itemize}
\item Marginal distribution: $F_{X}\left( x\right) =\Pr \left( X\leq
x\right) $

\item $F\left( \left. y\right\vert x\right) $ is $\Pr \left( Y\leq y\right) $
conditional on $X\leq x$
\end{itemize}
\end{itemize}
\end{frame}

\begin{frame}
\begin{itemize}
\item Independence: The random variables $X$ and $Y$ are independent if%
\begin{equation*}
F\left( x,y\right) =F_{X}\left( x\right) F_{Y}\left( y\right)
\end{equation*}

\begin{itemize}
\item If $X$ and $Y$ are independent, then
\begin{equation*}
F\left( \left. y\right\vert x\right) =F_{Y}\left( y\right)
\end{equation*}%
and%
\begin{equation*}
F\left( \left. x\right\vert y\right) =F_{X}\left( x\right)
\end{equation*}

\item i.i.d means independent and identically distributed
\end{itemize}

\item Conditional (mathematical, rational) expectation%
\begin{equation*}
E\left( \left. Y\right\vert x\right) =\int_{-\infty }^{\infty }ydF\left(
\left. y\right\vert x\right) =\int_{-\infty }^{\infty }yf\left( \left.
y\right\vert x\right) dy.
\end{equation*}%

\end{itemize}
\end{frame}

\section{Conditional Expectations and Linear Projections}

\begin{frame}
  \frametitle{Conditional Expectations as OLS}
\begin{itemize}
\item Conditional expectaton: function that minimizes the mean squared
forecast error%
\begin{equation*}
E\left( Y|\underline{X}\right) =\arg \min_{\left\{ f\left( \cdot \right)
\right\} }E\left( \left[ Y-f\left( \underline{X}\right) \right] ^{2}\right)
\end{equation*}%
where $\underline{X}$ is a vector

\item Best linear predictor or linear projection: linear function that
minimizes the mean squared forecast error
\begin{equation*}
\hat{E}\left( Y|\underline{X}\right) =\arg \min_{\left\{ \text{ linear }%
f\left( \cdot \right) \right\} }E\left( \left[ Y-f\left( \underline{X}%
\right) \right] ^{2}\right)
\end{equation*}

\item $\hat{E}\left( Y|\underline{X}\right) =E\left( Y|\underline{X}\right) $
when $E\left( Y|\underline{X}\right) $ is linear

\item OLS estimate since OLS gives best linear unbiased estimates
\end{itemize}
\end{frame}

%

% \subsection{Least squares normal equations}

% \begin{frame}\frametitle{OLS Restated}
% \begin{itemize}
% \item Objective%
% \begin{equation*}
% \underset{\left\{ \alpha ,\left\{ \beta _{j}\right\} _{j=1}^{J}\right\} }{%
% \min }E\left( \left[ Y-\left( \alpha +\sum\nolimits_{j=1}^{J}X_{j}\beta
% _{j}\right) \right] ^{2}\right)
% \end{equation*}

% \item First order conditions (Note: problem is convex)%
% \begin{eqnarray*}
% -2E\left( \left[ Y-\left( \alpha +\sum\nolimits_{j=1}^{J}X_{j}\beta
% _{j}\right) \right] \right) &=&0, \\
% -2E\left( \left[ Y-\left( \alpha +\sum\nolimits_{j=1}^{J}X_{j}\beta
% _{j}\right) \right] X_{k}\right) &=&0, \\
% for\;k=1,2,...,J. &&
% \end{eqnarray*}%
% \end{itemize}
% \end{frame}


% \begin{frame}\frametitle{OLS Restated}
% \begin{itemize}
% \item Forecast error $\varepsilon =Y-\left( \alpha
% +\sum\nolimits_{j=1}^{J}X_{j}\beta _{j}\right) $ is

% \begin{itemize}
% \item Zero-mean

% \item Uncorrelated with $\underline{X}$
% \end{itemize}

% \item In matrix form,%
% \begin{equation*}
% \hspace{-0.5in}E\left(
% \begin{array}{l}
% Y \\
% X_{1}Y \\
% \vdots \\
% X_{J}Y%
% \end{array}%
% \right) =E\left(
% \begin{array}{llll}
% 1 & X_{1} & \cdots & X_{J} \\
% X_{1} & X_{1}X_{1} & \vdots & X_{1}X_{J} \\
% \vdots & \vdots & \vdots & \vdots \\
% X_{J} & X_{J}X_{1} & \vdots & X_{J}X_{J}%
% \end{array}%
% \right) \left(
% \begin{array}{l}
% \alpha \\
% \beta _{1} \\
% \vdots \\
% \beta _{J}%
% \end{array}%
% \right) .
% \end{equation*}%
% \end{itemize}
% \end{frame}
% %

% \begin{frame}  \frametitle{Derivation}
% \begin{itemize}
% \item Let $\mathbf{X}$ be a row vector and rewrite in matrix form%
% \begin{equation*}
% E\left(
% \begin{array}{l}
% Y \\
% \mathbf{X}^{\prime }Y%
% \end{array}%
% \right) =E\left[
% \begin{array}{ll}
% 1 & \mathbf{X} \\
% \mathbf{X}^{\prime } & \mathbf{X}^{\prime }\mathbf{X}%
% \end{array}%
% \right] \left(
% \begin{array}{l}
% \alpha \\
% \mathbf{\beta }%
% \end{array}%
% \right) .
% \end{equation*}

% \begin{itemize}
% \item First line:
% \begin{equation*}
% \alpha =E\left( Y\right) -E\left( \mathbf{X}\right) \mathbf{\beta }
% \end{equation*}

% \item Second line:%
% \begin{eqnarray*}
% \hspace{-0.75in}E\left( \mathbf{X}^{\prime }Y\right) &=&E\left( \mathbf{X}%
% ^{\prime }\right) \alpha +E\left( \mathbf{X}^{\prime }\mathbf{X}\right)
% \mathbf{\beta } \\
% &=&E\left( \mathbf{X}^{\prime }\right) E\left( Y\right) -E\left( \mathbf{X}%
% ^{\prime }\right) E\left( \mathbf{X}\right) \mathbf{\beta }+E\left( \mathbf{X%
% }^{\prime }\mathbf{X}\right) \mathbf{\beta } \\
% &=&\left[ E\left( \mathbf{X}^{\prime }\mathbf{X}\right) -E\left( \mathbf{X}%
% ^{\prime }\right) E\left( \mathbf{X}\right) \right] \mathbf{\beta }+E\left(
% \mathbf{X}^{\prime }\right) E\left( Y\right) \\
% &=&E\left( \mathbf{X}^{\prime }\right) E\left( Y\right) +V\left( \mathbf{X}%
% ^{\prime }\right) \mathbf{\beta }.
% \end{eqnarray*}%

% \item Second line (continued)%
% \begin{equation*}
% E\left( \mathbf{X}^{\prime }Y\right) -E\left( \mathbf{X}^{\prime }\right)
% E\left( Y\right) =V\left( \mathbf{X}^{\prime }\right) \mathbf{\beta }
% \end{equation*}
% \end{itemize}
% \end{itemize}
% \end{frame}


% \begin{frame}\frametitle{Final Derivation}
% \begin{itemize}
% \item Solving for $\beta $%
% \begin{equation*}
% \mathbf{\beta }=\left[ V\left( \mathbf{X}^{\prime }\right) \right]
% ^{-1}C\left( \mathbf{X}^{\prime },Y\right) ,
% \end{equation*}

% \item Substituting into the linear projection%
% \begin{eqnarray*}
% \widehat{E}\left( \left. Y\right\vert \mathbf{X}\right) &=&\alpha +\mathbf{%
% X\beta } \\
% &=&E\left( Y\right) -E\left( \mathbf{X}\right) \mathbf{\beta +X\beta } \\
% &=&E\left( Y\right) +\left[ \mathbf{X}-E\left( \mathbf{X}\right) \right]
% \mathbf{\beta }
% \end{eqnarray*}%

% \item Final result
% \begin{eqnarray*}
% \mathbf{\beta } &=&\left[ V\left( \mathbf{X}^{\prime }\right) \right]
% ^{-1}C\left( \mathbf{X}^{\prime },Y\right) , \\
% \widehat{E}\left( \left. Y\right\vert \mathbf{X}\right) &=&E\left( Y\right) +%
% \left[ \mathbf{X}-E\left( \mathbf{X}\right) \right] \mathbf{\beta }.
% \end{eqnarray*}
% \end{itemize}
% \end{frame}

% \begin{frame}\frametitle{Summing up}
% \begin{itemize}
% \item Analytical shortcut: First find $\mathbf{\beta }$, using variances and
% covariances; then find $\alpha $, using $\mathbf{\beta }$, $E\left( Y\right)
% $ and $E\left( \mathbf{X}\right) $.

% \item Scalar case%
% \begin{equation*}
% \widehat{E}\left( \left. Y\right\vert X\right) =\alpha +\beta X=E\left(
% Y\right) +\frac{\sigma _{xy}}{\sigma _{x}^{2}}\left[ X-E\left( X\right) %
% \right] .
% \end{equation*}%
% \end{itemize}
% \end{frame}


\section{Introduction to ARMA processes (stationary)}
\begin{frame}
  \frametitle{White noise}

\begin{equation*}
\left\{ \varepsilon _{t}\right\} _{t=-\infty }^{\infty }
\end{equation*}

\begin{itemize}
\item $E\left( \varepsilon _{t}\right) =0,\ \forall t$

\item $V\left( \varepsilon _{t}\right) =\sigma _{\varepsilon }^{2},\ \forall
t$

\item $C\left( \varepsilon _{t},\varepsilon _{t-j}\right) =0,\ \forall t,\
j\neq 0$
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{First-order autoregressive (AR(1)) process}

\begin{equation*}
x_{t}=\alpha +\phi x_{t-1}+\varepsilon _{t}
\end{equation*}

\begin{itemize}
\item $\varepsilon _{t}$ is white noise and $\left\vert \phi \right\vert <1$
as required by stationarity

\item By recursive substitution under stationarity%
\begin{eqnarray*}
x_{t} &=&\alpha +\varepsilon _{t}+\phi \left[ \alpha +\phi
x_{t-2}+\varepsilon _{t-1}\right] \\
&=&\frac{\alpha }{1-\phi }+\overset{\infty }{\underset{j=0}{\sum }}\phi
^{j}\varepsilon _{t-j}.
\end{eqnarray*}

\item $E\left( x_{t}\right) =\alpha /\left( 1-\phi \right) $
\end{itemize}
\end{frame}

\begin{frame}
  \frametitle{Moments}

\begin{itemize}
\item Facts%
\begin{equation*}
V\left( aX+bY\right) =a^{2}V\left( X\right) +b^{2}V\left( Y\right)
+2abC\left( X,Y\right)
\end{equation*}%
\begin{equation*}
C\left( aX+bY,cX+dY\right) =acV\left( X\right) +bdV\left( Y\right) +\left(
ad+bc\right) C\left( X,Y\right)
\end{equation*}

\item Since the value of $x_{t}$ can be expressed as%
\begin{equation*}
x_{t}=\frac{\alpha }{1-\phi }+\overset{\infty }{\underset{j=0}{\sum }}\phi
^{j}\varepsilon _{t-j},
\end{equation*}

\item The variance of $x_{t}$ is
\begin{equation*}
V\left( x_{t}\right) =\overset{\infty }{\underset{j=0}{\sum }}\left( \phi
^{j}\right) ^{2}\sigma _{\varepsilon }^{2}=\frac{\sigma _{\varepsilon }^{2}}{%
1-\phi ^{2}}.
\end{equation*}%
\end{itemize}
\end{frame}


\begin{frame}\frametitle{Covariances}
\begin{itemize}
\item Covariance%
\begin{eqnarray*}
C\left( x_{t},x_{t-1}\right) &=&C\left( \alpha +\phi x_{t-1}+\varepsilon
_{t},x_{t-1}\right) \\
&=&0+\phi V\left( X\right) +0=\phi \frac{\sigma _{\varepsilon }^{2}}{1-\phi
^{2}}, \\
C\left( x_{t},x_{t-k}\right) &=&C\left( \phi ^{k}x_{t-k}+\overset{k-1}{%
\underset{j=0}{\sum }}\phi ^{j}\varepsilon _{t-j},x_{t-k}\right) \\
&=&\phi ^{k}\frac{\sigma _{\varepsilon }^{2}}{1-\phi ^{2}}=\phi ^{k}V\left(
x_{t}\right) .
\end{eqnarray*}

\item Expectation: If $\left\{ \varepsilon _{t}\right\} $ is i.i.d. and $%
\alpha =0$, $E\left( \left. x_{t}\right\vert x_{t-k}\right) =\phi
^{k}x_{t-k} $
\end{itemize}
\end{frame}

\begin{frame}
  \frametitle{AR(p)}

\begin{itemize}
\item Autoregressive function of $p$ lagged $x^{\prime }s$%
\begin{equation*}
x_{t}=\;\alpha +\phi _{1}x_{t-1}+\phi _{2}x_{t-2}+...+\phi
_{p}x_{t-p}+\varepsilon _{t}
\end{equation*}

\item Defining $x_{t-j}=L^{j}x_{t},$ we can rewrite an AR(p) process as
\begin{equation*}
\left( 1-\phi _{1}L-\phi _{2}L^{2}-...-\phi _{p}L^{p}\right) x_{t}=\alpha
+\varepsilon _{t}
\end{equation*}

\item Stationarity condition: The roots of
\begin{equation*}
1-\phi _{1}z-\phi _{2}z^{2}-...-\phi _{p}z^{p}=0
\end{equation*}%
lie outside the unit circle ($|z|>1$ when real)
\end{itemize}
\end{frame}

\begin{frame}\frametitle{AR(p)}
\begin{itemize}
\item For AR(1), $\phi <1$ yields stationarity

\item If stationarity holds, we can rewrite $x_{t}$ as a function of
infinitely lagged $\varepsilon ^{\prime }s$
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{First-order moving average (MA(1)) process}

\begin{equation*}
x_{t}=\alpha +\varepsilon _{t}+\theta \varepsilon _{t-1}
\end{equation*}

\begin{itemize}
\item $\varepsilon _{t}$ is white noise

\item $E\left( x_{t}\right) =\alpha $

\item $V\left( x_{t}\right) =\left( 1+\theta ^{2}\right) \sigma
_{\varepsilon }^{2}$

\item $C\left( x_{t},x_{t-1}\right) =C\left( \varepsilon _{t}+\theta
\varepsilon _{t-1},\varepsilon _{t-1}+\theta \varepsilon _{t-2}\right)
=\theta \sigma _{\varepsilon }^{2}$

\item $C\left( x_{t},x_{t-k}\right) =C\left( \varepsilon _{t}+\theta
\varepsilon _{t-1},\varepsilon _{t-k}+\theta \varepsilon _{t-k-1}\right) $ $%
=0,\ k>1$
\end{itemize}
\end{frame}


\begin{frame}
  \frametitle{MA cont'd}
\begin{itemize}
\item Rewrite with lag operator as%
\begin{equation*}
x_{t}-\alpha =(1+\theta L)\varepsilon _{t}
\end{equation*}

\item When the root of
\begin{equation*}
1+\theta z=0
\end{equation*}%
lie outside unit circle (when $|\theta |<1)$ $x_{t}$ is said to be invertible%
\begin{eqnarray*}
\varepsilon _{t} &=&\frac{\left( x_{t}-\alpha \right) }{(1+\theta L)} \\
&=&\frac{-\alpha }{1+\theta }+\overset{\infty }{\underset{j=0}{\sum }}\left(
-\theta \right) ^{j}x_{t-j},
\end{eqnarray*}
\item Express residual as infinite recursion of lagged $x\prime s$
\end{itemize}
\end{frame}

\begin{frame}
  \frametitle{MA(q)}
\begin{itemize}
\item $x_{t}$ is a function of $q$ lagged residuals%
\begin{equation*}
x_{t}=\alpha +\varepsilon _{t}+\theta _{1}\varepsilon _{t-1}+\theta
_{2}\varepsilon _{t-2}+...+\theta _{q}\varepsilon _{t-q}
\end{equation*}

\item Rewriting with lag operator yields%
\begin{equation*}
\left( 1+\theta _{1}L+\theta _{2}L^{2}+...+\theta _{q}L^{q}\right)
\varepsilon _{t}=x_{t}-\alpha
\end{equation*}

\item Invertibility condition is that the roots of
\begin{equation*}
1+\theta _{1}L+\theta _{2}L^{2}+...+\theta _{q}L^{q}=0
\end{equation*}%
lie outside the unit circle ($|z|>1$ when real)

\item If the invertibility condition holds, we can write the $\varepsilon
_{t}$ as an infinite function of lagged $x^{\prime }s$
\end{itemize}
\end{frame}

\section{ARMA(p,q) process}

\begin{frame}  \frametitle{ARMA process}
\begin{align*}
x_{t}=& \;\alpha +\phi _{1}x_{t-1}+\phi _{2}x_{t-2}+...+\phi _{p}x_{t-p} \\
& +\varepsilon _{t}+\theta _{1}\varepsilon _{t-1}+\theta _{2}\varepsilon
_{t-2}+...+\theta _{q}\varepsilon _{t-q}.
\end{align*}

\begin{itemize}
\item Stationarity condition

\begin{itemize}

\item Depends entirely on autoregressive coefficients

\item The roots of
\begin{equation*}
1-\phi _{1}z-\phi _{2}z^{2}-...-\phi _{p}z^{p}=0,
\end{equation*}%
must lie outside the unit circle $\left( |\cdot |>1\text{ when real}\right) $
\end{itemize}
\end{itemize}
\end{frame}


\begin{frame}\frametitle{EX}
\begin{itemize}
\item Example: For AR(1)
\begin{equation*}
1-\phi _{1}z=0
\end{equation*}%
implying%
\begin{equation*}
z=\frac{1}{\phi _{1}}
\end{equation*}%
need%
\begin{equation*}
|z|=|\phi _{1}^{-1}|>1\hspace{0.2in}\text{requiring }|\phi _{1}|<1
\end{equation*}%

\end{itemize}
\end{frame}

  \begin{frame}
    \frametitle{Invertibility Condition}
\begin{itemize}
\item The roots of
\begin{equation*}
1-\theta _{1}z-\theta _{2}z^{2}-...-\theta _{p}z^{p}=0,
\end{equation*}%
must lie on or outside the unit circle $\left( |\cdot |>1\text{ when real}%
\right) $

\item Example: For MA(1)%
\begin{equation*}
1-\theta _{1}z=0
\end{equation*}%
implying%
\begin{equation*}
z=\frac{1}{\theta _{1}}
\end{equation*}%
need%
\begin{equation*}
|z|=|\theta _{1}^{-1}|\geq 1\hspace{0.2in}\text{requiring }|\theta _{1}|\leq
1,
\end{equation*}%
where unity is included as a limit
\end{itemize}
\end{frame}

\begin{frame}\frametitle{Stationarity and invertibility}
\begin{itemize}
\item Stationarity and invertibility imply

\begin{itemize}
\item if $\varepsilon _{t}$ is i.i.d., then $\varepsilon _{t}$ is the
innovation to $x_{t}$%
\begin{equation*}
\varepsilon _{t}=x_{t}-E\left( \left. x_{t}\right\vert
x_{t-1},x_{t-2},....\right) .
\end{equation*}

\item knowledge of the entire sequences $\left\{ \varepsilon _{t-j}\right\}
_{j=0}^{\infty }$ and $\left\{ x_{t-j}\right\} _{j=0}^{\infty }$ is
equivalent
\end{itemize}
\end{itemize}
\end{frame}

\section{Detrending}

\begin{frame}\frametitle{Detrending}
\begin{itemize}
\item Most of our (business cycle) models have nothing to say about trends in the data.
\item i.e., these models generally don't explain growth.
\item Need to detrend to get an appropriate data series.
\item $Y_{t}=X_{t}+z_{t}$

\begin{itemize}
\item $X_{t}$ is stationary

\item $z_{t}$ is a trend
\end{itemize}

\item Trend stationary

\begin{itemize}
\item $z_{t}$ is deterministic

\item example: $z_{t}=\alpha t$
\end{itemize}
\end{itemize}
\end{frame}

\begin{frame}\frametitle{Difference Stationary}
\begin{itemize}
\item Difference stationary

\begin{itemize}
\item $z_{t}$ is a random walk with $\left\{ \varepsilon _{t}\right\} $ a
white noise process%
\begin{equation*}
z_{t}=z_{0}+\sum_{j=1}^{t}\varepsilon _{t-j}
\end{equation*}%
\begin{equation*}
z_{t-1}=z_{0}+\sum_{j=1}^{t-1}\varepsilon _{t-j}
\end{equation*}%
\begin{equation*}
z_{t}-z_{t-1}=\varepsilon _{t}
\end{equation*}%

\end{itemize}
\end{itemize}
\end{frame}

\subsection{Detrend Data before Computing Moments}

\begin{frame}\frametitle{Three approaches}
\begin{itemize}
\item Linear detrending: Regress data on time and take residuals

\item Use Hodrick-Prescott filter to separate data into a trend component
and residuals and take residuals

\item First difference the data
\end{itemize}
\end{frame}

\begin{frame}\frametitle{HP Filter}
\begin{itemize}
\item Most common approach: HP Filter.
\item Idea: isolate low-frequency trends from high frequency cycles.
\item Let $\{ y_t\}_{t=1}^\infty$ be a given series, where $y_{t} = x_{t} + z_{t}$ as before.
\item $x_{t}$ is the trend component, $z_{t}$ is cyclical.
\item Let $\lambda$ be a parameter to be specified later, and consider the problem
\begin{align}
\min\limits_{x_1,x_2,\hdots,x_T}\textstyle\sum\limits_{t=1}^T (y_t-x_t)^2+\lambda\textstyle\sum\limits_{t=2}^{T-1}[(x_{t+1}-x_t)-(x_t-x_{t-1})]^2
\end{align}
\item What is going on here?
  \begin{itemize}
  \item We are minimizing the cyclical component (first part), by moving the trend closer to the data.
  \item But we are getting penalized ($\lambda$) for making the trend too closely reflect the data.
  \end{itemize}
\end{itemize}
\end{frame}


\section{Conclusion}
% ------------------------------------------------


\begin{frame}
  \frametitle{Next Time}
  \begin{itemize}
  \item Discuss expectations in linear difference equations.
  \item Please turn your homework in by this evening.
  \item See my webpage for new homework (may not be up by class).
  \end{itemize}
\end{frame}


\end{document}
