diff --git a/.github/workflows/matlab.yml b/.github/workflows/matlab.yml index 716acaf..8815ee4 100644 --- a/.github/workflows/matlab.yml +++ b/.github/workflows/matlab.yml @@ -104,4 +104,11 @@ jobs: with: command: | cd("progs/matlab") - BVARZLB_run \ No newline at end of file + BVARZLB_run + - name: Run week 13 scripts + uses: matlab-actions/run-command@v1 + with: + command: | + cd("progs/matlab") + rbcLogutilSSTest + rbcSSTest \ No newline at end of file diff --git a/.gitignore b/.gitignore index 4be4979..0bea063 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,15 @@ week_*_solution.tex progs/matlab/BVARZLB_results_noZLB.log progs/matlab/BVARZLB_results_withZLB.log + +progs/dynare/\+rbcCES/ + +progs/dynare/\+rbcLogutil/ + +progs/dynare/rbcCES/ + +progs/dynare/rbcLogutil/ + +progs/dynare/rbcCES.log + +progs/dynare/rbcLogutil.log diff --git a/README.md b/README.md index da4585e..9cec913 100644 --- a/README.md +++ b/README.md @@ -255,6 +255,30 @@ Familiarize yourself with + +
+ Week 13: Introduction to DSGE models + +### Goals + +* understand the DSGE model framework, its basic structure and key challenges +* understand the algebra of a basic RBC model and of a basic New Keynesian model +* compute the steady-state of the RBC model with either MATLAB or Dynare + + +### To Do + +* [x] Review the solutions of [last week's exercises](https://github.com/wmutschl/Quantitative-Macroeconomics/releases/latest/download/week_12.pdf) and write down all your questions +* [x] Read Fernandez-Villaverde, Rubio-Ramirez, and Schorfheide (2016, Ch.1) and Torres (2013, Ch. 1). +* [x] Read EITHER Gali (2015, Ch. 3) OR Heijdra (2017, Ch. 9) OR Romer (2019, Ch. 7) OR Woodford (2003, Ch. 3) OR Walsh (2017, Ch. 8) +* [x] Watch [Algebra of New Keynesian models](https://mutschler.eu/dynare/models/nk/) +* [x] Make note of all the aspects and concepts that you are not familiar with or that you find difficult to understand. +* [x] Do exercise sheet 13 +* [x] If you have questions, get in touch with me via email or (better) [schedule a meeting](https://schedule.mutschler.eu) + +
+ + ## Content We cover modern theoretical macroeconomics (the study of aggregated variables such as economic growth, unemployment and inflation by means of structural macroeconomic models) and combine it with econometric methods (the application of formal statistical methods in empirical economics). We focus on the quantitative aspects and methods for solving and estimating the most prominent model classes in macroeconomics: Structural Vector Autoregressive (SVAR) and Dynamic Stochastic General Equilibrium (DSGE) models. Using these two model strands, the theoretical and methodological foundations of quantitative macroeconomics is taught. The students are thus enabled to understand the analyses and forecasts of public (universities, central banks, economic research institutes) as well as private (business banks, political consultations) research departments, but also to derive and empirically evaluate their own structural macroeconomic models. diff --git a/exercises/_common_header.tex b/exercises/_common_header.tex index 56c9bc3..671e7dc 100644 --- a/exercises/_common_header.tex +++ b/exercises/_common_header.tex @@ -13,44 +13,9 @@ \usepackage{hyperref} \usepackage{enumitem} \usepackage{graphicx} -\usepackage[usenames,dvipsnames]{xcolor} +\usepackage[dvipsnames]{xcolor} \definecolor{mygreen}{rgb}{0,0.4,0} \definecolor{mygray}{rgb}{0.5,0.5,0.5} -% \usepackage{listingsutf8} -% \lstset{language=Matlab, % Use MATLAB -% backgroundcolor=\color{white}, % choose the background color; you must add \usepackage{color} or \usepackage{xcolor} -% frame=leftline, % Single frame around code -% basicstyle=\footnotesize, % Use small true type font -% breaklines=true, % sets automatic line breaking -% breakatwhitespace=false, % sets if automatic breaks should only happen at whitespace -% captionpos=t, % sets the caption-position to bottom -% keywordstyle=[1]\color{Blue}\bfseries, % MATLAB functions bold and blue -% keywordstyle=[2]\color{Purple}, % MATLAB function arguments purple -% keywordstyle=[3]\color{Blue}\underbar, % User functions underlined and blue -% morekeywords={matlab2tikz,varobs,model,var,end,estimation,parameters,estimated_params,varexo,shocks,steady_state_model,check,steady,stoch_simul,stderr,corr,steady_state,initval}, -% deletekeywords={beta,log,LOG,PI,pi,Pi,what}, % if you want to delete keywords from the given language -% identifierstyle=, % Nothing special about identifiers -% commentstyle=\usefont{T1}{pcr}{m}{sl}\color{mygreen}\small, % Comments small dark green courier -% stringstyle=\color{Purple}, % Strings are purple -% showstringspaces=false, % Don't put marks in string spaces -% showspaces=false, % show spaces everywhere adding particular underscores; it overrides 'showstringspaces' -% showtabs=false, % show tabs within strings adding particular underscores -% tabsize=3, % 3 spaces per tab -% morecomment=[l][\color{Blue}]{...}, % Line continuation (...) like blue comment -% morecomment=[l]{//}, -% morecomment=[s]{/*}{*/}, -% %numbers=left, % Line numbers on left -% numberblanklines=false, -% firstnumber=1, % Line numbers start with line 1 -% numberstyle=\tiny\color{mygray}, % Line numbers are lightgray -% numbersep=5pt, % how far the line-numbers are from the code -% numberbychapter=false, -% stepnumber=5, % Line numbers go in steps of 5 -% escapeinside={(*@}{@*)}, % if you want to add LaTeX within your code -% keepspaces=true, % keeps spaces in text, useful for keeping indentation of code (possibly needs columns=flexible) -% rulecolor=\color{black}, % if not set, the frame-color may be changed on line-breaks within not-black text (e.g. comments (green here)) -% title=\lstname, % show the filename of files included with \lstinputlisting; also try caption instead of title -% } \usepackage[numbered,framed]{matlab-prettifier} \usepackage[backend=biber,style=authoryear]{biblatex} \addbibresource{literature/_biblio.bib} @@ -61,9 +26,6 @@ \usepackage{tikz} \usetikzlibrary{positioning} \usetikzlibrary{decorations.text} -%\makeatletter -%\newcommand*{\currentname}{\@currentlabelname} -%\makeatother \renewcommand{\contentsname}{Overview} \usepackage[ diff --git a/exercises/dsge_definition_challenges_structure.tex b/exercises/dsge_definition_challenges_structure.tex new file mode 100644 index 0000000..3976b4c --- /dev/null +++ b/exercises/dsge_definition_challenges_structure.tex @@ -0,0 +1,22 @@ +\section[DSGE Models: Definition, Key Challenges, Basic Structure]{DSGE Models: Definition, Key Challenges, Basic Structure\label{ex:DSGEModelsDefinitionChallengesStructure}} +\begin{enumerate} +\item Briefly define the term and key challenges of \textbf{D}ynamic \textbf{S}tochastic \textbf{G}eneral \textbf{E}quilibrium (DSGE) models. +What are DSGE models useful for? +\item Outline the common structure of a DSGE model. +How do Neo-Classical, New-Classical and New-Keynesian models differ? +\item Comment whether or not the assumptions underlying DSGE models should be as realistic as possible. +For example, a very common assumption is that all agents live forever. +\end{enumerate} + +\paragraph{Readings} +\begin{itemize} +\item \textcite[Ch.~1]{Fernandez-Villaverde.Rubio-Ramirez.Schorfheide_2016_SolutionEstimationMethods} +\item \textcite[Ch.~1]{Torres_2013_IntroductionDynamicMacroeconomic} +\end{itemize} + +\begin{solution}\textbf{Solution to \nameref{ex:DSGEModelsDefinitionChallengesStructure}} +\ifDisplaySolutions +\input{exercises/dsge_definition_challenges_structure_solution.tex} +\fi +\newpage +\end{solution} \ No newline at end of file diff --git a/exercises/dsge_definition_challenges_structure_solution.tex b/exercises/dsge_definition_challenges_structure_solution.tex new file mode 100644 index 0000000..63dc36a --- /dev/null +++ b/exercises/dsge_definition_challenges_structure_solution.tex @@ -0,0 +1,125 @@ +\begin{enumerate} +\item DSGE models use modern macroeconomic theory to explain and predict co-movements of aggregate time series. +DSGE models start from what we call the micro-foundations of macroeconomics (i.e. to be consistent with the underlying behavior of economic agents), +with a heart based on the rational expectation forward-looking economic behavior of agents. +In reality all macro variables are related to each other, either directly or indirectly, + so there is no \enquote{cetribus paribus}, but a dynamic stochastic general equilibrium system. +\begin{itemize} + \item General Equilibrium (GE): equations must always hold. + \\ + Short-run: decisions, quantities and prices adjust such that equations are full-filled. + \\ + Long-run: steady-state, i.e. a condition or situation where variables do not change their value (e.g. balanced-growth path where the rate of growth is constant). + \item Stochastic (S): disturbances (or shocks) make the system deviate from its steady-state, we get business cycles or, more general, a data-generating process + \item Dynamic (D): Agents are forward-looking and solve intertemporal optimization problems. + When a disturbance hits the economy, macroeconomic variables do not return to equilibrium instantaneously, + but change gradually over time, producing complex reactions. + Furthermore, some decisions like investment or saving only make sense in a dynamic context. + We can analyze and quantify the effects after + (i) a temporary shock: how does the economy return to its steady-state, or + (ii) a permanent shock: how does the economy transition to a new steady-state. +\end{itemize} + +Basic model structure: +\begin{align*} + E_t \left[f(y_{t+1}, y_t, y_{t-1},u_t)\right]=0 +\end{align*} +where $E_t$ is the expectation operator with information conditional up to and including period $t$, + $y_t$ is a vector of endogenous variables at time $t$, + $u_t$ a vector of exogenous shocks or random disturbances with proper density functions. +$f(\cdot)$ is what we call economic theory. +\\ +\textbf{First key challenge:} values of endogenous variables in a given period of time depend on future expected values. +We need dynamic programming techniques to find the optimality conditions which define the economic behavior of the agents. +The solution to this system is called a decision or \textbf{policy function}: +\begin{align*} + y_t = g(y_{t-1},u_t) +\end{align*} +describing optimal behavior of all agents given the current state of the world $y_{t-1}$ and after observing current shocks $u_t$. +\\ +\textbf{Second key challenge}: DSGE models cannot be solved analytically, except for some very simple and unrealistic examples. +We have to resort to numerical methods and a computer to find an approximated solution. +\\ +\textbf{third key challenge}: Once the theoretical model and solution is at hands, the next step is the application to the data. +A common procedure called calibration is assigning values to the parameters of the model + by using previous information or matching some key ratios or moments provided by the data. +More recently, researchers are commonly applying formal statistical methods to estimate the parameters using + maximum likelihood, Bayesian techniques, indirect inference, or a method of moments. + +\item The dynamic equilibrium is the result from the combination of economic decisions taken by all economic agents. +For example, the following agents or sectors are commonly included: +\begin{itemize} + \item Households: benefit from private consumption, leisure and possibly other things like money holdings or state services; + subject to a budget constraint in which they finance their expenditures via (utility-reducing) work, renting capital and buying (government) bonds + $\hookrightarrow$ maximization of utility + + \item Firms produce a variety of products with the help of rented equipment (capital) and labor. + They (possibly) have market power over their product and are responsible for the design, manufacture and price of their products. + $\hookrightarrow$ cost minimization or profit maximization + \item Monetary policy follows a feedback rule for either interest rates or money supply (growth). + For instance: nominal interest rate reacts to deviations of the current (or lagged) inflation rate from its target and of current output from potential output. + + \item Fiscal policy (the government) collects taxes from households and companies + in order to finance government expenditures (possibly utility-enhancing) and government investment (possibly productivity-enhancing). + In addition, the government can issue debt securities. + \end{itemize} + There is no limitation, i.e. you can also add other agents and sectors like financial intermediaries (banks), international trade, research \& development, climate, etc. + + \item Neoclassical or New-Classical models are basically the same terminology (unless you study economic history or really want to dive into the different school of thoughts). + Basically, both approaches focus on so-called \textbf{micro-foundations}, + the one more in a classical sense (focus on real rigidities) + and the other more in a Keynesian sense (focus on nominal rigidities). + In principle this is already evident in the baseline RBC model and the baseline New-Keynesian model: + \begin{itemize} + \item RBC model is the canonical neoclassical model: + reduce economy to the interaction of just one (representative) consumer/household and one (representative) firm. + Representative household takes decisions in terms of how much to consume (save) and how much time is devoted to work (leisure). + Representative firm decides how much it will produce. + Equilibrium of the economy will be defined by a situation in which all decisions taken by all economic agents are compatible and feasible. + One can show that business cycles can be generated by one special disturbance: + total factor productivity or neutral technological shock; + hence, the model generates so-called real business cycles without nominal frictions. + Moreover, there is monetary neutrality in the model. + \item New-Keynesian models have the same foundations as New-Classical general equilibrium models, + but incorporate different types of rigidities in the economy. + Whereas new classical DSGE models are constructed on the basis of a perfect competition environment, + New-Keynesian models include additional elements to the basic model such as imperfect competitions, + existence of adjustment costs in investment process, + liquidity constraints or rigidities in the determination of prices and wages. + Due to these nominal rigidities there is no monetary neutrality in the short run. + Moreover, New-Keynesian models have become the leading macroeconomic paradigm. + \end{itemize} + Noth that the scale of DSGE models has grown over time with incorporation of a large number of features. + To name a few: consumption habit formation, nominal and real rigidities, non-Ricardian agents, + investment adjustment costs, investment-specific technological change, taxes, public spending, public capital, human capital, + household production, imperfect competition, monetary union, steady-state unemployment, green vs. brown production sector etc. + + \item The degree of realism offered by an economic model is not a goal per se to be pursued by macroeconomists; + typically we are focused on the model's \textbf{usefulness} in explaining macroeconomic reality. + General strategy is the construction of formal structures through equations that reflect the interrelationships between the different economic variables. + These simplified structures is what we call a model. + The essential question is not that these theoretical constructions are realistic descriptions of the economy, + but that they are able to explain the dynamics observed in the economy. + Therefore, it is not possible to reject a model ex-ante because it is based on assumptions that we believe are not realistic. + Rather, the validations must be based on the usefulness of these models to explain reality, and whether they are more useful than other models. + Of course, most of the times unrealistic assumptions will yield non-useful models; + often, however, simplified assumptions that are a very rough approximation of reality yield quite useful models. + Either way, the DSGE model paradigm is up-front with our assumptions + and provide the EXACT model dynamics in terms of mathematical correct formulations that can be challenged, adapted and, ideally, improved. + + Regarding the assumption that the lifetime of economic agents is assumed to be infinite: + We know that the lifetime of consumers, firms and governments is in fact finite. + Nevertheless, in most models this is a valid approximation of reality, + because for solving and simulating these models is is not important that agents actually live forever, + but that they use the infinite time horizon as \textbf{their reference period for taking economic decisions}. + Framed this way, the assumption becomes highly realistic. + Viewing at the economy from a macroeconomic point of view: + No government thinks it will cease to exist at some point in the future and + no entrepreneur takes decisions based on the idea that the firm will go bankrupt sometime in the future. + Granted, for consumers this is rather weak; however,, we may think about families, dynasties or households rather than individual consumers. + Again, the infinite time planning horizon assumption is a feasible one. + On the other hand, if you want to study the finite life cycle of an agent (school-work-retirement) or pension schemes, + the so-called Overlapping-Generations (OLG) framework is probably more adequate. + Either way, we need the same methods and techniques to deal with OLG models as we do with New-Keynesian models or RBC models, + because all these models belong to the same class, i.e. are all DSGE models. +\end{enumerate} \ No newline at end of file diff --git a/exercises/nk_model_algebra.tex b/exercises/nk_model_algebra.tex new file mode 100644 index 0000000..90240a5 --- /dev/null +++ b/exercises/nk_model_algebra.tex @@ -0,0 +1,231 @@ +\section[The Algebra of New Keynesian Models]{The Algebra of New Keynesian Models\label{ex:AlgebraNewKeynesianModels}} +Consider the basic New Keynesian (NK) model without capital, a linear production function and \textcite{Calvo_1983_StaggeredPricesUtilitymaximizing} price frictions. + +\paragraph{Households} The economy is assumed to be inhabited by a large number of identical households. +The representative household maximizes present as well as expected future utility +\begin{align*} +\max E_t \sum_{j=0}^{\infty} \beta^{j} U(c_{t+j}, l_{t+j}, z_{t+j}) +\end{align*} +with \(\beta <1\) denoting the discount factor and \(E_t\) is the expectation operator conditional on information at time \(t\). +The contemporaneous utility function +\begin{align*} +U(c_t, l_t, z_t) = z_t \cdot \left( \frac{c_t^{1-\sigma}}{1-\sigma} - \frac{l_t^{1+\varphi}}{1+\varphi} \right) +\end{align*} +has three arguments: a consumption index \(c_t\), a labor supply index \(l_t\) which corresponds to either hours worked or employed household members, + and an exogenous preference shifter \(z_t\). +Note that the marginal utility of consumption is positive, whereas more labor reduces utility. +The inverse of \(\sigma \) is the intertemporal elasticity of substitution, + whereas the inverse of \(\varphi \) is the Frisch elasticity of labor. +Note that the exogenous preference shifter \(z_t\) influences only intertemporal decisions, but not intratemporal ones. +The consumption index is formed by aggregating a continuum of goods represented on the interval \(h\in [0,1]\) + into a single consumption good using a \textcite{Dixit.Stiglitz_1977_MonopolisticCompetitionOptimum} aggregation technology: +\begin{align*} + c_t = {\left(\int_0^1 c_t(h)^{\frac{\epsilon-1}{\epsilon}} dh \right)}^{\frac{\epsilon}{\epsilon-1}} +\end{align*} +That is, \(c_t(h)\) denotes the quantity of good \(h\) consumed by the household in period \(t\). +\(\epsilon>1\) is an elasticity parameter measuring the \emph{love-of-variety}. +The household decides how to allocate its consumption expenditures among the different goods by taking the price \(P_t(h)\) of good \(h\) as given + and maximizing the consumption index \(c_t\) for any given level of expenditures. +Similarly, in each period the household takes the nominal wage \(W_t\) as given and supplies perfectly elastic labor service to the firm sector. +In return she receives nominal labor income \(W_t l_t\) and, additionally, + nominal profits and dividends \(P_t \int_0^1 {div}_t(f)df\) from each firm \(f\in[0,1]\) in the intermediate goods sector, + because the firms are owned by the household. +Moreover, the household purchases a quantity of one-period nominally riskless bonds \(B_t\) at price \(Q_t\). +The bond matures the following period and pays one unit of money at maturity. +Income and wealth are used to finance consumption expenditures. +In total this defines the (nominal) budget constraint of the household +\begin{align*} +\int_0^1 P_t(h) c_t(h) dh + Q_t B_t \leq B_{t-1} + W_t l_t + P_t \int_0^1 div_t(f) df +\end{align*} +In addition, it is assumed that the household is subject to a solvency constraint that prevents it from engaging in Ponzi-type schemes: +\begin{align*} +\lim_{T \rightarrow \infty} E_t \left \{ \Lambda_{t,T} \frac{B_T}{P_T }\right \} \geq 0 +\end{align*} +for all periods \(t\), where +\begin{align} +\Lambda_{t,T} = \beta^{T-t} \frac{\partial{U(c_{T},l_{T},z_T)}/\partial c_T}{\partial{U(c_{t},l_{t},z_T)}/\partial c_t} \label{eq:StochasticDiscountFactor} +\end{align} +denotes the stochastic discount factor. + +Furthermore, let \(\Pi_t=P_t/P_{t-1}\) denote the gross inflation rate, + then the following relationships for the nominal interest rate \(R_t\) and the real interest rate \(r_t\) hold: +\begin{align} +Q_t &= \frac{1}{R_t} \label{eq:NominalInterestRate} +\\ +R_t &= r_t E_t \Pi_{t+1} \label{eq:RealInterestRate} +\end{align} + +\begin{enumerate} +\item Explain the economic intuition behind equation \eqref{eq:NominalInterestRate} that determines the nominal interest rate + and equation~\eqref{eq:RealInterestRate} that determines the real interest rate. + +\item Is there debt in this model? In other words, what is the optimal path for \(B_t\) in this model? + +\item Explain the difference between the solvency constraint \( \lim_{T \rightarrow \infty} E_t {\left \{ \Lambda_{t,T} \frac{B_T}{P_T} \right \}} \geq 0 \) +and the transversality condition \(\lim_{T \rightarrow \infty} E_t \left \{ \Lambda_{t,T} \frac{B_T}{P_T} \right \} = 0\) which holds in the optimum allocation. + +\item Show that cost minimization of consumption expenditures implies +\begin{align*} +c_t(h) &= {\left(\frac{P_t(h)}{P_t}\right)}^{-\epsilon} c_t +\\ +P_t &= {\left(\int_0^1 P_t(h)^{1-\epsilon}dh\right)}^{\frac{1}{1-\epsilon}} +\end{align*} +Interpret these equations. What does this imply for the budget constraint? + +\item Derive the intratemporal and intertemporal optimality conditions: +\begin{align*} +w_t &:= \frac{W_t}{P_t} = - \frac{\frac{\partial U(c_t,l_t,z_t)}{\partial l_t}}{\frac{\partial U(c_t,l_t,z_t)}{\partial c_t}} +\\ +\frac{\partial U(c_t,l_t,z_t)}{\partial c_t} &= \beta E_t \left[\frac{\partial U(c_{t+1},l_{t+1},z_{t+1})}{\partial c_{t+1}} r_t \right] +\end{align*} +where \(w_t\) denotes the real wage and \(\Pi_{t+1} = P_{t+1}/P_t\) the gross inflation rate. Interpret these equations. +\end{enumerate} + +\paragraph{Firms: final good} +The economy is populated by a continuum of firms indexed by \(f \in [0,1]\) that produce differentiated goods \(y_t(f)\). +The technology for transforming these intermediate goods into the final output good \(y_t\) has the \textcite{Dixit.Stiglitz_1977_MonopolisticCompetitionOptimum} form: +\begin{align} +y_t = {\left[\int\limits_0^1 {(y_t(f))}^{\frac{\epsilon-1}{\epsilon}}df\right]}^{\frac{\epsilon}{\epsilon-1}} \label{eq:Firms_DS_Aggregator} +\end{align} +where \(\epsilon>1\) is the substitution elasticity between inputs, the so-called \emph{love-of-variety}. +\begin{enumerate}[resume] +\item Show that profit maximization in the final goods sector implies: +\begin{align*} +y_t(f) &= {\left(\frac{P_t(f)}{P_t}\right)}^{-\epsilon} y_t +\\ +P_t &= {\left[\int_{0}^{1} P_t(f)^{1-\epsilon}df\right]}^{\frac{1}{1-\epsilon}} +\end{align*} +Interpret these equations. What does this imply for profits in the final goods sector? +\end{enumerate} + +\paragraph{Firms: intermediate goods} +Intermediate firm \(f\) uses the following linear production function to produce their differentiated good +\begin{align} +y_t(f) = a_t l_{d,t}(f) \label{eq:Firms_ProductionFunction} +\end{align} +where \(a_t\) denotes the common technology level available to all firms. +Firms face perfectly competitive factor markets for hiring labor \(l_{d,t}(f)\). +Real profits of firm \(f\) are equal to revenues from selling its differentiated good at price \(P_t(f)\) minus costs from hiring labor at wage \(w_t\): +\begin{align} +{div}_t(f) = \frac{P_t(f)}{P_t} y_t(f) - w_t l_{d,t}(f) \label{eq:Firms.Profits} +\end{align} +The objective of the firm is to choose contingent plans for \(P_t(f)\) and \(l_{d,t}(f)\) so as to maximize the present discounted value of nominal dividend payments given by +\begin{align*} +E_t \sum_{j=0}^{\infty}\Lambda_{t,t+j} P_{t+j} div_{t+j}(f) +\end{align*} +where household's stochastic discount factor \(\Lambda_{t,t+j}\) takes into account that firms are owned by the household. + +Prices of intermediate goods are determined by nominal contracts as in \textcite{Calvo_1983_StaggeredPricesUtilitymaximizing} and \textcite{Yun_1996_NominalPriceRigidity}. +In each period firm \(f\) faces a constant probability \(1-\theta, 0\leq \theta \leq 1\), of being able to re-optimize the price \(P_t(f)\) of its good \(y_t(f)\). +The probability is independent of the time it last reset its price. +Formally: +\begin{align} +P_t(f) = \begin{cases} +\widetilde{P}_t(f) & \text{with probability } 1-\theta +\\ +P_{t-1}(f) & \text{with probability } \theta +\end{cases} +\label{eq:NewKeynesianCalvoMechanism} +\end{align} +where \(\widetilde{P}_t(f)\) is the re-optimized price in period \(t\). +Accordingly, when a firm cannot re-set its price for \(j\) periods, its price in period \(t+j\) is given by \(\widetilde{P}_t(f)\) + and stays there until the firm can optimize it again. +Hence, the firm's objective in \(t\) is to set \(\widetilde{P}_t(f)\) to maximize expected profits + until it can re-optimize the price again in some future period \(t+j\). +The probability to be stuck at the same price for \(j\) periods is given by \(\theta^j\). + +\begin{enumerate}[resume] +\item Derive the following expression for the stochastic discount factor: +\begin{align*} +\Lambda_{t,t+1+j} = \beta \frac{\lambda_{t+1}}{\lambda_t} \Pi_{t+1}^{-1} \Lambda_{t+1,t+1+j} +\end{align*} + +\item Show that the optimal labor demand schedule of intermediate good firm \(f\) is given by: +\begin{align*} +w_t = mc_t(f) a_t = mc_t(f) \frac{y_t(f)}{l_{d,t}(f)} +\end{align*} +where \(mc_t(f)\) are real marginal costs of firm \(f\). +What does this imply for aggregate real marginal costs \(mc_t=\int_0^1 mc_t(f)df\)? + +\item Denote \(\widetilde{p}_t := \frac{\widetilde{P}_t(f)}{P_t}\) and show that optimal price setting of intermediate firms must satisfy: +\begin{align*} +\widetilde{p}_t \cdot s_{1,t} &= \frac{\epsilon}{\epsilon-1} \cdot s_{2,t} \\ +s_{1,t}&=y_t \frac{\partial U(c_t,n^s_t,z_t)}{\partial c_t} + \beta \theta E_t \Pi_{t+1}^{\epsilon-1}s_{1,t+1} \\ +s_{2,t}&= mc_t y_{t} \frac{\partial U(c_t,n^s_t,z_t)}{\partial c_t} + \beta \theta E_t \Pi_{t+1}^{\epsilon} s_{2,t+1} +\end{align*} +Explain why firms that reset prices set the same price, i.e. +\(\widetilde{P}_t(f) =\widetilde{P}_t\) or in other words we can drop the \(f\). + +\item Show that the law of motion for the optimal re-set price \(\widetilde{p}_t = \frac{\widetilde{P}_t(f)}{P_t}\) is given by: +\begin{align*} +1&=\theta \Pi_{t}^{\epsilon-1}+\left(1-\theta\right) \widetilde{p}_t^{1-\epsilon} +\end{align*} + +\end{enumerate} + +\paragraph{Monetary Policy} +The central bank adjusts the nominal interest rate \(R_t\) according to an interest rate rule in response to deviations of (i) gross inflation \(\Pi_t\) from a target \(\Pi^*\) and (ii) output \(y_t\) from steady-state output \(y\): +\begin{align} + R_t = R {\left(\frac{\pi_t}{\pi^*}\right)}^{\phi_\pi} {\left(\frac{y_t}{y}\right)}^{\phi_y} e^{\nu_t} \label{eq:NewKeynesianMonetaryPolicyRule} +\end{align} +where \(R\) denotes the nominal interest rate in steady state, \(\phi_\pi \) the sensitivity parameter to inflation deviations, \(\phi_y\) the feedback parameter of the output gap and \(\nu_t\) an exogenous deviation to the rule. + + +\paragraph{Exogenous variables and stochastic shocks} +The exogenous preference shifter \(z_t\), the level of technology \(a_t\) and the exogenous deviations \(\nu_t\) from the monetary rule evolve according to +\begin{align} + \log{z_t} &= \rho_z \log{z_{t-1}} + \varepsilon_{z,t} \label{eq:NewKeynesianLoMPreferenceShifter}\\ + \log{a_t} &= \rho_a \log{a_{t-1}} + \varepsilon_{a,t} \label{eq:NewKeynesianLoMTechnology}\\ + \nu_t &= \rho_\nu \nu_{t-1} + \varepsilon_{\nu,t} \label{eq:NewKeynesianLoMMonPol} +\end{align} +with persistence parameters \(\rho_z\), \(\rho_a\) and \(\rho_\nu \). +The preference shock \(\varepsilon_{z,t}\), the productivity shock \(\varepsilon_{a,t}\) and the monetary policy shock \(\varepsilon_{\nu,t}\) are iid Gaussian: +\begin{align*} + \begin{pmatrix} + \varepsilon_{z,t}\\\varepsilon_{a,t}\\\varepsilon_{\nu,t} + \end{pmatrix} + \sim N\left(\begin{pmatrix} 0\\0\\0\end{pmatrix}, \begin{pmatrix} \sigma_z^2 & 0& 0\\0 & \sigma_{a}^2& 0\\0 & 0 & \sigma_{\nu}^2\end{pmatrix}\right) +\end{align*} + +\paragraph{Market clearing} +\begin{enumerate}[resume] + \item What does market clearing imply for private bonds \(B_t\)? + \item Explain why labor market clearing implies: + \begin{align*} + l_t = \int_0^1 l_{d,t}(f) df + \end{align*} + \item Show that aggregate real profits of the intermediate firms are given by + \begin{align*} + div_t \equiv \int_{0}^{1} div_t(f) df = y_t - w_t l_t + \end{align*} + \item Show that aggregate demand is given by + \begin{align} + y_t = c_t + \end{align} + \item Denote \(p_t^* = \int_{0}^1 {\left(\frac{P_t(f)}{P_t}\right)}^{-\epsilon} df\). + Show that aggregate supply is given by + \begin{align*} + p_t^* y_t = a_t l_t + \end{align*} + Explain why \(p_t^*\) is called the price efficiency distortion. + \item Derive the law of motion for the price efficiency distortion \(p_t^*\): + \begin{align} + p_{t}^*=\left(1-\theta\right) \widetilde{p}_t^{-\epsilon}+\theta \pi_{t}^{\epsilon} p_{t-1}^* + \end{align} +\end{enumerate} + +\paragraph{Readings} +\begin{itemize} + \item \textcite[Ch.~3]{Gali_2015_MonetaryPolicyInflation} + \item \textcite[Ch.~19]{Heijdra_2017_FoundationsModernMacroeconomics} + \item \textcite[Ch.~7]{Romer_2019_AdvancedMacroeconomics} + \item \textcite[Ch.~3]{Woodford_2003_InterestPricesFoundations} + \item \textcite[Ch.~8]{Walsh_2017_MonetaryTheoryPolicy} +\end{itemize} + +\begin{solution}\textbf{Solution to \nameref{ex:AlgebraNewKeynesianModels}} +\ifDisplaySolutions +\input{exercises/nk_model_algebra_solution.tex} +\fi +\newpage +\end{solution} \ No newline at end of file diff --git a/exercises/nk_model_algebra_solution.tex b/exercises/nk_model_algebra_solution.tex new file mode 100644 index 0000000..b6ad40c --- /dev/null +++ b/exercises/nk_model_algebra_solution.tex @@ -0,0 +1,387 @@ +\begin{enumerate} +\item Equation~\eqref{eq:NominalInterestRate} captures that bond prices are inversely related to interest rates. +When the interest rate goes up, the price of bonds falls. +Intuitively, this makes sense because if you are paying less for a fixed nominal return (at par), + your expected return should be higher. +More specifically to our model, we consider so-called zero-coupon bonds or discount bonds. +These bonds don't pay any interest but derive their value from the difference between the purchase price and the par value (or the face value) paid at maturity. +On maturity the bondholder receives the face value of his investment. +So instead of interest payments, you get a large discount on the face value of the bond; that is the price is lower than the face value. +In other words, investors profit from the difference between the buying price and the face value, contrary to the usual interest income. +In our model, we consider zero-coupon bonds with a face value of 1. +So suppose that you buy such a bond at a price of 0.8, then although the bond pays no interest, + your compensation is the difference between the initial price and the face value. +Let \(R_t\) denote the \emph{gross yield to maturity} of a zero-coupon bond, + that is the discount rate that sets the present value of the promised bond payments equal to the current market price of the bond. +So the price of a Zero-Coupon bond is equal to +\begin{align*} +Q_t = \frac{1}{R_t} +\end{align*} +In our example this would imply \(R_t=1.25\). +As there are no other investment opportunities in this model \(R_t\) is also equal to the nominal interest rate in the economy. + +Equation \eqref{eq:RealInterestRate} is the so-called Fisherian equation + which states that the gross real return on a bond \(r_t\) is equivalent to the gross nominal interest rate divided by the expected gross inflation rate. +Inflation expectations are responsible for the difference between nominal and real interest rates, showing that future expectations matter for the economy. + +\item In equilibrium, bond-holding is always zero in all periods: \(B_t=0\). +This is due to the fact that in this model we have a representative agent and only private bonds. +If all agents were borrowing, there would be nobody they could be borrowing from. +If all were lenders, nobody would like to borrow from them. +In sum the price of bonds (or more specifically the nominal interest rate) adjusts such + that bonds across all agents are in zero net supply as markets need to clear in equilibrium. +Note, though, that this bond market clearing condition is imposed \emph{after} you derive the households optimality conditions + as household savings behavior in equilibrium still needs to be consistent with the bond market clearing. + +\item The \emph{No-Ponzi-Game} or \emph{solvency} condition is an external constraint imposed on the individual by the market or other participants. +You forbid your agent from acquiring infinite debt that is never repaid, a so-called Ponzi-scheme. +That is, the individual is restricted from financing consumption by raising debt and then raising debt again to repay the previous debt and finance again consumption and so on. +The individual would very much like to violate it, though, so we need to impose this constraint. +In short: the \emph{solvency} condition prevents that households consume more than they earn and refinance their additional consumption with excessive borrowing. + +The \emph{transversality condition} is an optimality condition that states that it is not optimal to start accumulating assets and never consume them, i.e. +\(\lim_{T \rightarrow \infty} E_t \left \{ \Lambda_{t,T} \frac{B_T}{P_T} \right \} \leq 0\). +But with respect to optimality you would still want to run a Ponzi-scheme if allowed one. +\(\lim_{T \rightarrow \infty} E_t \left \{ \Lambda_{t,T} \frac{B_T}{P_T} \right \} \leq 0\) +combined with +\(\lim_{T \rightarrow \infty} E_t \left \{ \Lambda_{t,T} \frac{B_T}{P_T} \right \} \geq 0\) +yields \(\lim_{T \rightarrow \infty} E_t \left \{ \Lambda_{t,T} \frac{B_T}{P_T} \right \} = 0\). +This condition must be satisfied in order for the individual to maximize intertemporal utility implying that at the limit wealth should be zero. +In other words, if at the limit wealth is positive it means that the household could have increased its consumption without necessarily needing to work more hours; + thus implying that consumption was not maximized and therefore contradicting the fact that the household behaves optimally. +In short: transversality conditions make sure that households do no have any leftover savings (in terms of bonds or capital) + as this does not correspond to an optimal path of utility-enhancing consumption. + +We never need to actually include this condition into our codes, but implicitly we use it to pick a certain steady state or trajectory. +For instance in the RBC model we have three possible steady states \((k_t=0,c_t=0)\), \(k_t>0,c_t=0\) or \(k_t>0,c_t>0\). +We do not consider the first one because in this case the economy does not exist. +All the trajectories leading to the second one violate the transversality condition, + so finally we select the third steady state as the \emph{good one} and this is exactly the one that is most interesting from an economic point of view. + +Coming back to our model, both the \emph{solvency} and \emph{transversality condition} are actually full-filled already + as bond-holding is always zero in all periods including the hypothetical asymptotic end of life: \(B_t=0\) for all \(t\). +So these conditions are rather trivial in this model setting, but are important in more sophisticated models. + +\item The household minimizes consumption expenditures \(\int_0^1 P_t(h) c_t(h)dh\) by choosing \(c_t(h)\) and taking the aggregation technology into account. +That is, the Lagrangian is given by: +\begin{align*} + \pounds^c = \int_0^1 P_t(h) c_t(h)dh + P_t \left( c_t - {\left[\int_0^1 {(c_t(h))}^{\frac{\epsilon-1}{\epsilon}}dh\right]}^{\frac{\epsilon}{\epsilon-1}} \right) +\end{align*} +where \(P_t\) denotes the Lagrange multiplier, i.e.\ the cost of an additional unit in the index \(c_t\). +Setting the derivative with respect to \(c_t(h)\) equal to zero yields: +\begin{align*} + \frac{\partial \pounds^c}{\partial c_t(h)} = P_t(h) - P_t \left(\frac{\epsilon}{\epsilon-1}\right) \underbrace{{\left[\int_0^1 {(c_t(h))}^{\frac{\epsilon-1}{\epsilon}}dh\right]}^{\frac{\epsilon}{\epsilon-1}-1}}_{c_t^{1/\epsilon}} \left(\frac{\epsilon-1}{\epsilon}\right) {(c_t(h))}^{\frac{\epsilon-1}{\epsilon}-1} = 0 +\end{align*} +which can be simplified to: +\begin{align*} + c_t(h) = {\left(\frac{P_t(h)}{P_t}\right)}^{-\epsilon} c_t +\end{align*} +Note that this is the demand function for each consumption good \(c_t(h)\). +Accordingly, \(\epsilon \) is the (constant) demand elasticity. + +Plugging this expression into the aggregation technology yields: +\begin{align} + &c_t^{\frac{\epsilon-1}{\epsilon}} = \int_0^1 {(c_t(h))}^{\frac{\epsilon-1}{\epsilon}}dh = \int_0^1 {\left({\left(\frac{P_t(h)}{P_t}\right)}^{-\epsilon} c_t\right)}^{\frac{\epsilon-1}{\epsilon}}dh = c_t^{\frac{\epsilon-1}{\epsilon}} P_t^{\epsilon-1} \int_0^1 {(P_t(h))}^{1-\epsilon}dh \nonumber + \\ + \Leftrightarrow & + P_t = {\left[\int_0^1 {(P_t(h))}^{1-\epsilon}dh\right]}^{\frac{1}{1-\epsilon}} \nonumber + \\ + \Leftrightarrow & + 1 = \int_0^1 {\left(\frac{P_t(h)}{P_t}\right)}^{1-\epsilon}dh \label{eq:AggregatePriceIndex} +\end{align} +Similar to the aggregation technology for the consumption index \(c_t\), + \(P_t\) can be interpreted as the aggregation technology for the different prices \(P_t(h)\). + +In the budget constraint, we can now get rid of one integral \(\int_0^1 c_t(h) P_t(h) dh = P_t c_t\), because: +\begin{align*} + \int_0^1 c_t(h) P_t(h) dh = \int_0^1 {\left(\frac{P_t(h)}{P_t}\right)}^{-\epsilon} c_t P_t(h) dh + = P_t c_t \underbrace{\int_0^1 {\left(\frac{P_t(h)}{P_t}\right)}^{1-\epsilon} dh}_{\overset{\eqref{eq:AggregatePriceIndex}}{=1}} = P_t c_t +\end{align*} +That is, conditional on optimal behavior of households, + total consumption expenditures can be rewritten as the product of the aggregate price index times the aggregate consumption quantity index. + +\item Due to our assumptions, the solvency and transversality conditions as well as the concave optimization problem, + we can rule out corner solutions and neglect the non-negativity constraints in the variables and the budget constraint; + hence, we only need to focus on the first-order conditions. +The Lagrangian for the household's problem is: +\begin{align*} + \pounds^{HH} &= E_t \sum_{j=0}^{\infty} \beta^j \left \{ U\left({c}_{t+j},l_{t+j},z_{t+j}\right) \right \} + \\ + & + \beta^j \lambda_{t+j} \left \{ \int_0^1 div_{t+j}(f) df + w_{t+j} l_{t+j} + \underbrace{\frac{B_{t-1+j}}{P_{t-1+j}}}_{b_{t-1+j}} \underbrace{\frac{P_{t-1+j}}{P_{t+j}}}_{\Pi_{t+j}^{-1}} - Q_{t+j} \underbrace{\frac{B_{t+j}}{P_{t+j}}}_{b_{t+j}} - c_{t+j} + \right \} +\end{align*} +where \(\beta^j\lambda_{t+j}\) are the Lagrange multipliers corresponding to period \(t+j\)'s \textbf{real} budget constraint + (be aware of the difference between nominal and real variables and constraints; for instance, \(b_t=B_t/P_t\) is real debt). +The problem is not to choose \({\{c_t,l_t,b_{t}\}}_{t=0}^\infty \) all at once in an open-loop policy, + but to choose these variables sequentially given the information at time \(t\) in a closed-loop policy, + i.e.\ at period \(t\) decision rules for \({\{c_t,l_t,b_{t}\}}\) given the information set at period \(t\); + at period \(t+1\) decision rules for \({\{c_{t+1},l_{t+1},b_{t+1}\}}\) given the information set at period \(t+1\), etc. + +\paragraph{First-order condition with respect to \(c_t\)} +\begin{align} + \lambda_t = \frac{\partial U(c_t,l_t,z_t)}{\partial c_t} = z_t c_t^{-\sigma} +\end{align} +This is the marginal consumption utility function, i.e.\ the benefit (shadow price) of an additional unit of revenue (e.g.\ dividends or labor income) in the budget constraint. + +\paragraph{First-order condition with respect to \(l_t\)} +\begin{align} + w_t = - \frac{\partial U(c_t,l_t,z_t)/\partial l_t}{\lambda_t} = - \frac{\partial U(c_t,l_t,z_t)/\partial l_t}{\partial U(c_t,l_t,z_t)/\partial c_t} = l_t^{\varphi} c_t^{\sigma} +\end{align} +This is the \textbf{intratemporal} optimality condition or, in other words, the labor supply curve of the household. +Note that the preference shifter \(z_t\) has no effect on this intratemporal decision. + +\paragraph{First-order condition with respect to \(b_t\)} +\begin{align} + \lambda_t Q_t &= \beta E_t \left[\lambda_{t+1} \Pi_{t+1}^{-1} \right] +\end{align} +Combined with \eqref{eq:NominalInterestRate} and \eqref{eq:RealInterestRate} this yields the so-called Euler equation, + i.e.\ the \textbf{intratemporal} choice between consumption and saving: +\begin{align} + \frac{\partial U(c_t,l_t,z_t)}{\partial c_t} &= \beta E_t \left[\frac{\partial U(c_{t+1},l_{t+1},z_{t+1})}{\partial c_{t+1}} R_t \Pi_{t+1}^{-1} \right] + \\ + z_t c_t^{-\sigma} &= \beta E_t \left[z_{t+1} c_{t+1}^{-\sigma} \right] r_t + \label{eq:HH.Euler} +\end{align} +In words, intertemporal optimality is characterized by an indifference condition: +An additional unit of consumption yields either marginal utility today in the amount of \(\frac{\partial U(c_t,l_t,z_t)}{\partial c_t}\) (left-hand side). +Or, alternatively, this unit of consumption can be saved given the real interest rate \(r_t\). +This saved consumption unit has a present marginal utility value of \(\beta E_t \left[z_{t+1} c_{t+1}^{-\sigma} \right] r_t\) (right-hand side). +An optimal allocation equates these two choices. + +\item The output packers maximize profits \(P_t y_t - \int_{0}^{1} P_t(f) y_t(f) df\) subject to~\eqref{eq:Firms_DS_Aggregator}. +The Lagrangian is +\begin{align*} + \pounds^{p} = P_t y_t - \int_{0}^{1} P_t(f) y_t(f) df + \Lambda_t^p \left \{ {\left[\int\limits_0^1 {(y_t(f))}^{\frac{\epsilon-1}{\epsilon}}df\right]}^{\frac{\epsilon}{\epsilon-1}} - y_t \right \} +\end{align*} +where \(\Lambda_t^p\) is the Lagrange multiplier corresponding to the aggregation technology. +The first-order condition w.r.t \(y_t\) is +\begin{align} + P_t = \Lambda_{t}^p +\end{align} +\(\Lambda_{t}^p\) is the gain of an additional output unit; hence, equal to the aggregate price index \(P_t\). + +The first-order condition w.r.t \(y_t(f)\) yields: +\begin{align} + \frac{\partial \pounds^{p} }{\partial y_t(f)} & = -P_t(f) + \Lambda_{t}^p \frac{\epsilon}{\epsilon-1} \left[\int_{0}^1 y_t(f)^{\frac{\epsilon-1}{\epsilon}}df\right]^{\frac{\epsilon}{\epsilon-1}-1} \frac{\epsilon-1}{\epsilon} {(y_t(f))}^{\frac{\epsilon-1}{\epsilon}-1} = 0 +\label{eq:Firms_Relative_Demand} +\end{align} +Note that \(\left[\int_{0}^1 {(y_t(f))}^{\frac{\epsilon-1}{\epsilon}}df\right] = y_t^{\frac{\epsilon-1}{\epsilon}}\) and \(\Lambda_{t}^p=P_t\). +Therefore: +\begin{align} + P_t(f) = P_t {\left[y_t^{\frac{\epsilon-1}{\epsilon}}\right]}^{\frac{\epsilon-\epsilon+1}{\epsilon-1}} {(y_t(f))}^{\frac{\epsilon-1-\epsilon}{\epsilon}} = P_t {\left(\frac{y_t(f)}{y_t}\right)}^{\frac{-1}{\epsilon}} +\end{align} +Reordering yields +\begin{align} + y_t(f) = {\left(\frac{P_t(f)}{P_t}\right)}^{-\epsilon} y_t \label{eq:firms_demand} +\end{align} +This is the demand curve for intermediate good \(y_t(f)\). +Again we see that \(\epsilon \) is the constant demand elasticity. + +The aggregate price index is implicitly determined by inserting the demand curve~\eqref{eq:firms_demand} into the aggregator~\eqref{eq:Firms_DS_Aggregator} +\begin{align} + y_t &= {\left[\int\limits_0^1 {\left({\left(\frac{P_t(f)}{P_t}\right)}^{-\epsilon} y_t\right)}^{\frac{\epsilon-1}{\epsilon}}df\right]}^{\frac{\epsilon}{\epsilon-1}} +\\ +\Leftrightarrow + P_t &= {\left[\int_{0}^{1} {(P_t(f))}^{1-\epsilon}df\right]}^{\frac{1}{1-\epsilon}} \label{eq:Firms_DS_Pricing} +\end{align} + +\item As the firms are owned by the households, the nominal stochastic discount factor, + \(\Lambda_{t,t+j}\), between \(t\) and \(t+j\) is derived from the Euler equation~\eqref{eq:HH.Euler} of the households + \(\lambda_t = \beta E_t \left[\lambda_{t+1} R_t \Pi_{t+1}^{-1} \right]\) + which implies for the stochastic discount factor: +\begin{align*} + E_t \Lambda_{t,t+j} = E_t 1/R_{t+j} = E_t\beta^j \frac{\lambda_{t+j}}{\lambda_{t}}\frac{P_t}{P_{t+j}} +\end{align*} +From here, we can establish the following relationships: +\begin{align} + \Lambda_{t,t} &= 1 \label{eq:Firms.Stochastic.Discount.Current} + \\ + \Lambda_{t+1,t+1+j} & = \beta^j \frac{\lambda_{t+1+j}}{\lambda_{t+1}} \frac{P_{t+1}}{P_{t+1+j}} + \\ + \Lambda_{t,t+1+j} & = \beta^{j+1} \frac{\lambda_{t+1+j}}{\lambda_{t}} \frac{P_{t}}{P_{t+1+j}} = \beta \frac{\lambda_{t+1}}{\lambda_{t}} \frac{P_{t}}{P_{t+1}} \beta^j \frac{\lambda_{t+1+j}}{\lambda_{t+1}} \frac{P_{t+1}}{P_{t+1+j}} = \beta \frac{\lambda_{t+1}}{\lambda_t} \Pi_{t+1}^{-1} \Lambda_{t+1,t+1+j} \label{eq:Firms.Stochastic.Discount.Relationship} +\end{align} +We will need this later to derive the recursive nonlinear price setting equations. + +\item The Lagrangian of the intermediate firm is +\begin{footnotesize} +\begin{align*} + \pounds^f &= E_t \sum_{j=0}^{\infty}\Lambda_{t,t+j} P_{t+j} \left[\frac{P_{t+j}(f)}{P_{t+j}} y_{t+j}(f) - w_{t+j} l_{d,t+j}(f) + mc_{t+j}(f)\left(a_{t+j} l_{d,t+j}(f) - y_{t+j}(f)\right)\right] +\end{align*} +\end{footnotesize} +\(mc_t(f)\) denotes the Lagrange multiplier which is the shadow price of producing an additional output unit in the optimum; + obviously, this is our understanding of real marginal costs. +Taking the derivative wrt \(l_{d,t}(f)\) actually boils down to a static problem (as we only need to evaluate for \(j=0\)) and yields: +\begin{align} + w_t &= mc_t(f) a_t = mc_t(f) \frac{y_t(f)}{l_{d,t}(f)} \label{eq:Firms.Labor.Demand} +\end{align} +where we substituted the production function~\eqref{eq:Firms_ProductionFunction} for \(a_t\). +This is the labor demand function, which implies that the labor-to-output ratio is the same across firms and equal to \(a_t\). +Note that all firms face the same factor prices and all have access to the same production technology \(a_t\); + hence, from the above equation it is evident that marginal costs are identical across firms +\begin{align} + mc_t(f) = \frac{w_t}{a_t}\label{eq:Firms.Marginal.Costs} +\end{align} +This means that aggregate marginal costs are also equal to the ratio between the real wage and technology: +\begin{align*} + mc_t = \int_0^1 mc_t(f) df = \frac{w_t}{a_t} +\end{align*} + +\item The Lagrangian of the intermediate firm is +\begin{footnotesize} +\begin{align} + \pounds^f &= E_t \sum_{j=0}^{\infty}\Lambda_{t,t+j} P_{t+j} \left[ {\left(\frac{P_{t+j}(f)}{P_{t+j}}\right)}^{1-\epsilon} y_{t+j} - w_{t+j} l_{d,t+j}(f) + mc_{t+j}(f)\left(a_{t+j} l_{d,t+j}(f) - \left(\frac{P_{t+j}(f)}{P_{t+j}}\right)^{-\epsilon} y_{t+j}\right)\right] + \label{eq:Firms.Lagrangian} +\end{align} +\end{footnotesize} +where (compared to above) we used the demand curve~\eqref{eq:firms_demand} to substitute for \(y_t(f)\). +When firms decide how to set their price they need to take into account that due to the Calvo mechanism + they might get stuck at \(\widetilde{P}_t(f)\) for a number of periods \(j=1,2,\ldots \) before they can re-optimize again. +The probability of such a situation is \(\theta^j\). +Therefore, when firms are able to change prices in period \(t\), they take this into account and the above Lagrangian of the expected discounted sum of nominal profits becomes: +\begin{align} + \pounds^{f^c} &= E_t \sum_{j=0}^{\infty}\theta^j \Lambda_{t,t+j} P_{t+j}\left[ \left(\frac{\widetilde{P}_{t}(f)}{P_{t+j}}\right)^{1-\epsilon} y_{t+j} - w_{t+j} l_{d,t+j}(f) + mc_{t+j}\left(a_{t+j} l_{d,t+j}(f) - {\left(\frac{\widetilde{P}_{t}(f)}{P_{t+j}}\right)}^{-\epsilon} y_{t+j}\right)\right] + \\ + &= E_t \sum_{j=0}^{\infty}\theta^j \Lambda_{t,t+j} P_{t+j}^\epsilon y_{t+j} \left[ \widetilde{P}_{t}(f)^{1-\epsilon} - P_{t+j} \cdot mc_{t+j} \cdot \widetilde{P}_{t}(f)^{-\epsilon} \right] + \ldots + \label{eq:Firms.Lagrangian.Calvo} +\end{align} +where in the second line we focus only on relevant parts for the optimization wrt to \(\widetilde{P}_t(f)\). +Moreover, we took into account that \(mc_t(f) = mc_t\). + +The first-order condition of maximizing \(\pounds^{f^c}\) wrt to \(\widetilde{P}_t(f)\) is +\begin{align} + 0= E_t \sum_{j=0}^{\infty}\theta^j \Lambda_{t,t+j} P_{t+j}^\epsilon y_{t+j} \left[ (1-\epsilon)\cdot \widetilde{P}_{t}(f)^{-\epsilon} +\epsilon \cdot P_{t+j} \cdot mc_{t+j} \widetilde{P}_{t}(f)^{-\epsilon-1}\right] +\end{align} +As \(\widetilde{P}_t(f)>0\) does not depend on \(j\), we multiply by \({\widetilde{P}_t(f)}^{\epsilon+1}\): +\begin{align} + 0= E_t \sum_{j=0}^{\infty}\theta^j \Lambda_{t,t+j} P_{t+j}^{\epsilon} y_{t+j} \left[ (1-\epsilon)\cdot\widetilde{P}_t(f) +\epsilon \cdot P_{t+j} \cdot mc_{t+j} \right] +\end{align} +Rearranging +\begin{align} + \widetilde{P}_t(f) \cdot E_t \sum_{j=0}^{\infty}\theta^j \Lambda_{t,t+j} P_{t+j}^{\epsilon} y_{t+j} = \frac{\epsilon}{\epsilon-1} \cdot E_t \sum_{j=0}^{\infty}\theta^j \Lambda_{t,t+j} P_{t+j}^{\epsilon+1} y_{t+j} mc_{t+j} +\end{align} +Dividing both sides by \(P_t^{\epsilon+1}\) +\begin{align} + \underbrace{\frac{\widetilde{P}_t(f)}{P_t}}_{\widetilde{p}_t} \cdot \underbrace{E_t\sum_{j=0}^{\infty}\theta^j \Lambda_{t,t+j} \left(\frac{P_{t+j}}{P_t}\right)^{\epsilon} y_{t+j}}_{S_{1,t}} = \frac{\epsilon}{\epsilon-1} \cdot \underbrace{E_t \sum_{j=0}^{\infty}\theta^j \Lambda_{t,t+j} {\left(\frac{P_{t+j}}{P_t}\right)}^{\epsilon+1} y_{t+j} mc_{t+j}}_{S_{2,t}} +\end{align} +Note that all firms that reset prices face the same problem and therefore set the same price, \(\widetilde{P}_t(f) =\widetilde{P}_t\). +This is also evident by looking at the infinite sums, \(S_{1,t}\) and \(S_{2,t}\), because these do not depend on \(f\). +Therefore, we can drop the \(f\) in \(\widetilde{P}_t(f)\) and define \(\widetilde{p}_t:= \frac{\widetilde{P}_t}{P_t}\). +The first-order condition can thus be written compactly: +\begin{align} + \widetilde{p}_t \cdot S_{1,t} = \frac{\epsilon}{\epsilon-1} \cdot S_{2,t} +\end{align} + +Moreover, the two infinite sums can be written recursively. +For this we make use of the relationships for the stochastic discount factor~\eqref{eq:Firms.Stochastic.Discount.Current} and~\eqref{eq:Firms.Stochastic.Discount.Relationship}. +The first recursive sum can be written as: +\begin{align*} + S_{1,t} &= + E_t\sum_{j=0}^{\infty}\theta^j \Lambda_{t,t+j} \left(\frac{P_{t+j}}{P_t}\right)^{\epsilon} y_{t+j} + \\ + &= y_t + E_t\sum^{\infty}_{\color{red}{j=1}}\theta^{\color{red}{j}} \Lambda_{t,t+\color{red}{j}} {\left(\frac{P_{t+\color{red}{j}}}{P_t}\right)}^{\epsilon} y_{t+\color{red}{j}} + \\ + &= y_t + E_t\sum^{\infty}_{\color{red}{j=0}}\theta^{\color{red}{j+1}} \Lambda_{t,t+\color{red}{j+1}} {\left(\frac{P_{t+\color{red}{j+1}}}{P_t}\right)}^{\epsilon} y_{t+\color{red}{j+1}} + \\ + &= y_t + E_t\sum^{\infty}_{j=0}\theta^{j+1} {\color{blue}{\Lambda_{t,t+j+1}}} {\left({\color{green}{\frac{P_{t+j+1}}{P_{t+1}}\frac{P_{t+1}}{P_{t}}}}\right)}^{\epsilon} y_{t+j+1} + \\ + &= y_t + E_t\sum^{\infty}_{j=0}\theta^{j+1} {\color{blue}{\beta \frac{\lambda_{t+1}}{\lambda_t}\Pi_{t+1}^{-1} \Lambda_{t+1,t+1+j}}} {\left({\color{green}{\frac{P_{t+j+1}}{P_{t+1}}\Pi_{t+1}}}\right)}^{\epsilon} y_{t+j+1} + \\ + &= y_t + \theta\beta E_t\frac{\lambda_{t+1}}{\lambda_t}\Pi_{t+1}^{\epsilon-1} \underbrace{E_t\sum^{\infty}_{j=0}\theta^{j} \Lambda_{t+1,t+1+j} {\left(\frac{P_{t+j+1}}{P_{t+1}}\right)}^{\epsilon} y_{t+j+1}}_{=S_{1,t+1}} +\end{align*} +The second recursive sum can be written as +\begin{align*} + S_{2,t} &= + E_t\sum_{j=0}^{\infty}\theta^j \Lambda_{t,t+j} {\left(\frac{P_{t+j}}{P_t}\right)}^{\epsilon+1} y_{t+j} mc_{t+j} + \\ + &= y_t mc_t + E_t\sum^{\infty}_{\color{red}{j=1}}\theta^{\color{red}{j}} \Lambda_{t,t+\color{red}{j}} {\left(\frac{P_{t+\color{red}{j}}}{P_t}\right)}^{\epsilon+1} y_{t+\color{red}{j}} mc_{t+\color{red}{j}} + \\ + &= y_t mc_t + E_t\sum^{\infty}_{\color{red}{j=0}}\theta^{\color{red}{j+1}} \Lambda_{t,t+\color{red}{j+1}} {\left(\frac{P_{t+\color{red}{j+1}}}{P_t}\right)}^{\epsilon+1} y_{t+\color{red}{j+1}} mc_{t+\color{red}{j+1}} + \\ + &= y_t mc_t + E_t\sum^{\infty}_{j=0}\theta^{j+1} {\color{blue}{\Lambda_{t,t+j+1}}} {\left({\color{green}{\frac{P_{t+j+1}}{P_{t+1}}\frac{P_{t+1}}{P_{t}}}}\right)}^{\epsilon+1} y_{t+j+1} mc_{t+j+1} + \\ + &= y_t mc_t + E_t\sum^{\infty}_{j=0}\theta^{j+1} {\color{blue}{\beta \frac{\lambda_{t+1}}{\lambda_t}\Pi_{t+1}^{-1}\Lambda_{t+1,t+1+j}}} {\left({\color{green}{\frac{P_{t+j+1}}{P_{t+1}}\Pi_{t+1}}}\right)}^{\epsilon+1} y_{t+j+1} mc_{t+j+1} + \\ + &= y_t mc_t + + \theta\beta E_t\frac{\lambda_{t+1}}{\lambda_t}\Pi_{t+1}^{\epsilon} \underbrace{E_t\sum^{\infty}_{j=0}\theta^{j} \Lambda_{t+1,t+1+j} {\left(\frac{P_{t+j+1}}{P_{t+1}}\right)}^{\epsilon+1} y_{t+j+1} mc_{t+j+1}}_{=S_{2,t+1}} + \end{align*} +To sum up: +\begin{align} + S_{1,t} &= y_t + \theta \beta E_t \frac{\lambda_{t+1}}{\lambda_{t}} \Pi_{t+1}^{\epsilon-1} S_{1,t+1}\\ + S_{2,t} &= y_t mc_t + \theta \beta E_t \frac{\lambda_{t+1}}{\lambda_{t}} \Pi_{t+1}^{\epsilon} S_{2,t+1} +\end{align} +\item The law of motion for \(\widetilde{p}_t=\frac{\widetilde{P}_t}{P_t}\) is given by the aggregate price index~\eqref{eq:Firms_DS_Pricing} which can be re-arranged to +\begin{align} + 1 &= \int_{0}^{1} {\left(\frac{P_t(f)}{P_t}\right)}^{1-\epsilon}df \label{eq:Aggregate.Price} +\end{align} +Due to the Calvo mechanism we get that \((1-\theta)\) firms can re-set their price to \(\widetilde{P}_t\), + whereas the remaining \(\theta \) firms cannot and set their price equal to \(P_{t-1}\). +Therefore: +\begin{align} + 1 &= \int_{optimizers} {\left(\frac{P_t(f)}{P_t}\right)}^{1-\epsilon} df + \int_{non-optimizers} {\left(\frac{P_t(f)}{P_t}\right)}^{1-\epsilon} df \\ + 1&= (1-\theta) {\left(\frac{\widetilde{P}_t}{P_t}\right)}^{1-\epsilon} + \theta \int_{0}^1 {\left(\frac{P_{t-1}(f)}{P_t}{\color{red}{\frac{P_{t-1}}{P_{t-1}}}}\right)}^{1-\epsilon}df\\ + 1&= (1-\theta) \widetilde{p}_t^{1-\epsilon} + \theta {\left(\frac{P_{t-1}}{P_{t}}\right)}^{1-\epsilon} \int_{0}^1 {\left(\frac{P_{t-1}(f)}{P_{t-1}}\right)}^{1-\epsilon}df\\ + 1&=(1-\theta) \widetilde{p}_t^{1-\epsilon} + \theta \Pi_t^{1-\epsilon} \underbrace{\int_{0}^{1} {\left(\frac{P_{t-1}(f)}{P_{t-1}} \right)}^{1-\epsilon}df}_{\overset{\eqref{eq:Aggregate.Price}}{=}1}\\ + 1&=(1-\theta) \widetilde{p}_t^{1-\epsilon} + \theta \Pi_t^{\epsilon-1} +\end{align} + +\item Private bonds \(B_t\) are in zero net supply on the budget constraint. +Note that this condition can only be imposed after taking first order conditions. +It would be invalid to eliminate bonds already in the budget constraint of the household. +Even if bonds are in zero net supply, households savings behavior in equilibrium still needs to be consistent with the bond market clearing. + +\item In an equilibrium, labor demand from the intermediate firms needs to be equal to the labor supply of the households; hence: +\begin{align} + \int_{0}^1 l_{d,t}(f) df = l_t +\end{align} +so \(l_t\) denotes equilibrium hours worked (both supplied and demanded). + +\item Given the demand for good \(y_t(f)\) and the Dixit-Stiglitz aggregation technology, we get: +\begin{align*} + \int_0^1 y_t(f) P_t(f) df = \int_0^1 {\left(\frac{P_t(f)}{P_t}\right)}^{-\epsilon} y_t P_t(f) df = P_t y_t \underbrace{\int_0^1 {\left(\frac{P_t(f)}{P_t}\right)}^{1-\epsilon} df}_{\overset{\eqref{eq:AggregatePriceIndex}}{=1}} = P_t y_t +\end{align*} +Moreover, from the labor market we have \(l_t = \int_{0}^1 l_{d,t}(f) df\). +Plugging both expressions into aggregate real profits: +\begin{align*} + div_t = \int_0^1 div_t(f) df = \int_0^1 \frac{P_t(f)}{P_t} y_t(f) df - \int_0^1 w_t l_{d,t}(f) df = y_t - w_t l_t +\end{align*} + +\item Revisit the budget constraint in real terms: +\begin{align*} + \int_0^1 \frac{P_t(h)}{P_t} c_t(h) dh + Q_t b_t \leq b_{t-1}\Pi_t^{-1} + w_t l_t + \int_0^1 div_t(f) df +\end{align*} +which becomes +\begin{align*} + c_t = w_t l_t + (y_t - w_t l_t) = y_t +\end{align*} +in an optimal allocation with cleared markets. This is the aggregate demand equation. + +\item Define \(y_t^{sum} = \int_{0}^1 y_t(f) df\). +Using the production function~\eqref{eq:Firms_ProductionFunction} and labor market clearing we get: +\begin{align} + y_t^{sum} = \int_{0}^1 a_t l_{d,t}(f) df = a_t l_t +\end{align} +Furthermore, due to the demand for intermediate good \(y_t(f)\) in \eqref{eq:firms_demand} we get: +\begin{align} + y_t^{sum} = y_t \underbrace{\int_{0}^1 {\left(\frac{P_t(f)}{P_t}\right)}^{-\epsilon} df}_{=p_t^*} +\end{align} +Equating both yields: +\begin{align} + p_t^* y_t = a_t l_t +\end{align} +This is the aggregate supply equation. +Price frictions, however, imply that resources will not be efficiently allocated + as prices are too high because not all firms can re-optimize their price in every period. +This inefficiency is measured by \(p_t^*<1\). + +\item The law of motion for the efficiency distortion \(p_t^*\) is given due to the Calvo price mechanism, i.e.: +\begin{align*} + p_t^* &= \int_0^1{\left(\frac{P_t(f)}{P_t}\right)}^{-\epsilon} df\\ + p_t^* &= \int_{optimizers} {\left(\frac{P_t(f)}{P_t}\right)}^{-\epsilon} df + \int_{non-optimizers}{\left(\frac{P_t(f)}{P_t}\right)}^{-\epsilon} df\\ + p_t^* & = (1-\theta) \widetilde{p}_t^{-\epsilon} + \theta \int_0^1 {\left(\frac{P_{t-1}(f)}{P_t}\right)}^{-\epsilon} df\\ + p_t^* & = (1-\theta) \widetilde{p}_t^{-\epsilon} + \theta \int_0^1 {\left(\frac{P_{t-1}(f)}{P_t }\frac{P_{t-1}}{P_{t-1}}\right)}^{-\epsilon} df\\ + p_t^* & = (1-\theta) \widetilde{p}_t^{-\epsilon} + \theta {\left(\frac{P_{t-1}}{P_{t}}\right)}^{-\epsilon} \int_0^1 {\left(\frac{P_{t-1}(f)}{P_{t-1} }\right)}^{-\epsilon} df\\ + p_t^* & = (1-\theta) \widetilde{p}_t^{-\epsilon} + \theta \Pi_t^{\epsilon} \underbrace{\int_0^1 {\left(\frac{P_{t-1}(f)}{P_{t-1} }\right)}^{-\epsilon} df}_{=p_{t-1}^*}\\ + p_t^* & = (1-\theta) \widetilde{p}_t^{-\epsilon} + \theta \Pi_t^{\epsilon} p_{t-1}^* +\end{align*} + +\end{enumerate} \ No newline at end of file diff --git a/exercises/rbc_model.tex b/exercises/rbc_model.tex new file mode 100644 index 0000000..88efb99 --- /dev/null +++ b/exercises/rbc_model.tex @@ -0,0 +1,125 @@ +\section[RBC model]{RBC model\label{ex:RBCModel}} +Consider the basic Real Business Cycle (RBC) model with leisure. +The representative household maximizes present as well as expected future utility +\begin{align*} + \max E_t \sum_{j=0}^{\infty} \beta^{j} U_{t+j} +\end{align*} +with \(\beta <1\) denoting the discount factor and \(E_t\) is expectation given information at time \(t\). +The contemporaneous utility function +\begin{align*} + U_t = \gamma \log(c_t) + \psi \log{(1-l_{t})} +\end{align*} +is additively separable and has two arguments: consumption \(c_t\) and normalized labor supply \(l_{t}\). +The marginal utility of consumption is positive, whereas more labor supply reduces utility. +Accordingly, \(\gamma \) is the consumption weight in the utility function and \(\psi \) the weight on leisure. +In each period the household takes the real wage \(w_t\) as given and supplies perfectly elastic labor service to the representative firm. +In return, she receives real labor income in the amount of \(w_t l_{t}\) and, additionally, + real profits \({div}_t\) from the firm as well as revenue from lending capital \(k_{t-1}\) at real rental rate \(r_{k,t}\) to the firms, + as it is assumed that the firm and capital stock are owned by the household. +Income and wealth are used to finance consumption \(c_t\) and investment \(i_t\). +In total, this defines the (real) budget constraint of the household: +\begin{align*} +c_t + i_t = w_t l_{t} + r_{k,t} k_{t-1} + {div}_t +\end{align*} +The law of motion for capital \(k_t\) at the end of period \(t\) is given by +\begin{align*} +k_{t} = (1-\delta) k_{t-1} + i_t +\end{align*} +where \(\delta \) is the capital depreciation rate.\footnote{% + Note that we use the end-of-period timing convention for capital, i.e.\ + \(k_t\) instead of \(k_{t+1}\), + because the investment decision is done in period \(t\) and hence capital is also determined in \(t\). + In older papers and books you will often find beginning-of-period timing convention for capital, + so always think about when a variable is decided and determined.} +Assume that the transversality condition is full-filled. + +Productivity \(a_t\) is the driving force of the economy and evolves according to +\begin{align*} +\log{a_{t}} &= \rho_a \log{a_{t-1}} + \varepsilon_{a,t} +\end{align*} +where \(\rho_a\) denotes the persistence parameter and \(\varepsilon_{a,t}\) is assumed to be normally distributed with mean zero and variance \(\sigma_a^2\). + +Real profits \({div}_t\) of the representative firm are revenues from selling output \(y_t\) minus costs from labor \(w_t l_{d,t}\) and renting capital \(r_{k,t} k_{d,t-1}\): +\begin{align*} +{div}_t = y_{t} - w_{t} l_{d,t} - r_{k,t} k_{d,t-1} +\end{align*} +The representative firm maximizes expected profits +\begin{align*} + \max E_t \sum_{j=0}^{\infty} {\Lambda}_{t,t+j} {div}_{t+j} +\end{align*} +subject to a Cobb-Douglas production function +\begin{align*} +f(k_{d,t-1}, l_{d,t}) = y_t = a_t k_{d,t-1}^\alpha l_{d,t}^{1-\alpha} +\end{align*} +The stochastic discount factor \({\Lambda}_{t,t+j}\) takes into account that firms are owned by the household, + i.e.\ it is the present value of a unit of consumption in period \(t+j\) or, respectively, + the marginal utility of an additional unit of profit; + therefore +\begin{align*} +\Lambda_{t,t+j}=\beta^j \frac{\partial U_{t+j}/\partial c_{t+j}}{\partial U_{t}/\partial c_{t}} +\end{align*} + +Finally, we have non-negativity constraints \(k_t \geq 0\), \(c_t \geq 0\) and \(0 \leq l_t \leq 1\). + +\begin{enumerate} +\item Show that the first-order conditions of the representative household are given by +\begin{align*} +U_{c,t} &= \beta E_t\left[U_{c,t+1} \left(1-\delta + r_{k,t+1}\right)\right] +\\ +w_t &= -\frac{U_{l,t}}{U_{c,t}} +\end{align*} +where \(U_{c,t} = \gamma c_t^{-1}\) and \(U_{l,t} = \frac{-\psi}{1-l_t}\). +Interpret these equations in economic terms. + +\item Show that the first-order conditions of the representative firm are given by +\begin{align*} +w_t &= f_l +\\ +r_{k,t} &= f_k +\end{align*} +where \(f_l = (1-\alpha) a_t {\left(\frac{k_{d,t-1}}{l_{d,t}}\right)}^\alpha \) and \(f_k = \alpha a_t {\left(\frac{k_{d,t-1}}{l_{d,t}}\right)}^{1-\alpha}\). +Interpret these equations in economic terms. + +\item Show that combining the optimal decisions with clearing of both the labor market, i.e.\ +\(l^s_t = l_t\), and the capital market, \(k^d_t = k_t\) implies clearing of the goods market: +\begin{align*} +y_t = c_t + i_t +\end{align*} + +\item Derive the steady-state of the model, in the sense that there is a set of values for the endogenous variables that in equilibrium remain constant over time. + +\item Discuss how to calibrate the following parameters \(\alpha \), \(\beta \), \(\delta \), \(\gamma \), \(\psi \), \(\rho_a\) and \(\sigma_a\). + +\item Briefly provide intuition behind the transversality condition. + +\item Write a script for this RBC model with a feasible calibration for an OECD country that computes the steady-state of the model. + +\item Write a DYNARE mod file for this RBC model with a feasible calibration for an OECD country + and compute the steady-state of the model by using a \texttt{steady\_state\_model} block. + Compare this to the steady-state computed above. + +\item Now assume a contemporaneous utility function of the CRRA (constant Relative Risk Aversion) type:\footnote{% + Note that due to {L'Hopital}'s rule \(\eta_c=\eta_l=1\) implies the original specification, \(U_t=\gamma \log c_t + \psi \log(1-l_t)\).} +\begin{align*} + U_t = \gamma \frac{c_{t}^{1-\eta_c}-1}{1-\eta_c} + \psi \frac{{(1-l_{t})}^{1-\eta_l}-1}{1-\eta_l} +\end{align*} +\begin{enumerate} + \item Derive the model equations and steady-state analytically. + \item Write a script to compute the steady-state for this model. + \item Write a DYNARE mod file and compute the steady-state for this model by using a helper function in the \texttt{steady\_state\_model} block. +\end{enumerate} + +\end{enumerate} + +\paragraph{Readings} +\begin{itemize} + \item \textcite[Ch.~3, Ch.~6]{McCandless_2008_ABCsRBCsIntroduction} + \item \textcite[Ch.~1, Ch.~2]{Torres_2013_IntroductionDynamicMacroeconomic} +\end{itemize} + +\begin{solution}\textbf{Solution to \nameref{ex:RBCModel}} +\ifDisplaySolutions +\input{exercises/rbc_model_solution.tex} +\fi +\newpage +\end{solution} \ No newline at end of file diff --git a/exercises/rbc_model_solution.tex b/exercises/rbc_model_solution.tex new file mode 100644 index 0000000..a26fd82 --- /dev/null +++ b/exercises/rbc_model_solution.tex @@ -0,0 +1,252 @@ +\begin{enumerate} + +\item Due to the transversality condition, + we will not have corner solutions and can neglect the non-negativity constraints. +Moreover, due to the concave optimization problem, we only need to focus on the first-order conditions. + +The Lagrangian for the household problem is +\begin{align*} +\pounds^H = E_t\sum_{j=0}^{\infty}\beta^j&\left \{ \gamma \log(c_{t+j}) + \psi \log(1-l_{t+j}) \right \} +\\ ++\beta^j \lambda_{t+j} &\left \{ \left(w_{t+j} l_{t+j} + r_{k,t+j} k_{t-1+j} - c_{t+j} - i_{t+j}\right) \right \} +\\ ++\beta^j \mu_{t+j} &\left \{ \left((1-\delta)k_{t-1+j} + i_{t+j} - k_{t+j}\right) \right \} +\end{align*} +Note that the problem is not to choose \( {\{c_t,i_t,l_t,k_{t} \}}_{t=0}^\infty \) all at once in an open-loop policy, + but to choose these variables sequentially given the information at time \(t\) in a closed-loop policy, + i.e.\ at period \(t\) decision rules for \({\{c_t,i_t,l_t,k_{t}\}}\) given the information set at period \(t\); + at period \(t+1\) decision rules for \({\{c_{t+1},i_{t+1},l_{t+1},k_{t+1}\}}\) given the information set at period \(t+1\), + and so on. + +The first-order condition w.r.t.\ +\(c_t\) is given by +\begin{align*} +\frac{\partial \pounds^H}{\partial c_{t}} &= E_t \left(\gamma c_t^{-1}-\lambda_{t}\right) = 0 +\\ +\Leftrightarrow \lambda_{t} &= \gamma c_{t}^{-1} & (I) +\end{align*} +The first-order condition w.r.t.\ +\(l_t\) is given by +\begin{align*} +\frac{\partial \pounds^H}{\partial l_{t}} &= E_t \left(\frac{-\psi}{1-l_{t}} + \lambda_{t} w_{t}\right) = 0 +\\ +\Leftrightarrow \lambda_{t} w_{t} &= \frac{\psi}{1-l_{t}} &(II) +\end{align*} +The first-order condition w.r.t.\ +\(i_{t}\) is given by +\begin{align*} +\frac{\partial \pounds^H}{\partial i_{t}} &= E_t \left(-\lambda_{t} + \mu_{t}\right) = 0 +\\ +\Leftrightarrow \lambda_{t} &= \mu_{t} & (III) +\end{align*} +The first-order condition w.r.t. +\(k_{t}\) is given by +\begin{align*} +\frac{\partial \pounds^H}{\partial k_{t}} &= E_t (-\mu_{t}) + + E_t \beta \left(\lambda_{t+1} r_{k,t+1} + \mu_{t+1}(1-\delta)\right) = 0 +\\ +\Leftrightarrow \mu_{t} &= E_t \beta(\mu_{t+1}(1-\delta) + \lambda_{t+1} r_{k,t+1}) & (IV) +\end{align*} + +(I) and (III) in (IV) yields +\begin{align*} +\underbrace{\gamma c_t^{-1}}_{U_{c,t}} &= \beta E_t \underbrace{\gamma c_{t+1}^{-1}}_{U_{c,t+1}}\left(1 - \delta + r_{k,t+1}\right) +\end{align*} +This is the Euler equation of \textbf{intertemporal optimality}. It reflects the trade-off between consumption and savings. +If the household saves a (marginal) unit of consumption, i.e.\ invest this into the capital stock, + she can consume \((1-\delta+r_{k,t+1})\) units in the following period. +The marginal utility of consuming a unit today is equal to \(U_{c,t}\), + whereas consuming tomorrow has expected utility equal to \(E_t(U_{c,t+1})\). +Discounting expected marginal utility with \(\beta \), an optimum is characterized by a situation + in which the household must be indifferent between both choices. + +(I) in (II) yields: +\begin{align*} +w_t = -\frac{\frac{-\psi}{1-l_t}}{\gamma c_t^{-1}} \equiv - \frac{U_{l,t}}{U_{c,t}} +\end{align*} +This equation reflects \textbf{intratemporal optimality}; in other words, the labor supply function. +According to the equation, the real wage must be equal to the marginal rate of substitution between consumption and leisure. + +\item First, note that even though firms maximize expected profits it is actually a static problem as there are no forward-looking terms. +That is, the objective is to maximize profits +\begin{align*} +{div}_t = a_t k_{d,t-1}^\alpha l_{d,t}^{1-\alpha} - w_t l_{d,t} - r_{k,t} k_{d,t-1} +\end{align*} +The first-order condition w.r.t. +\(l_{d,t}\) is given by +\begin{align*} +\frac{\partial {div}_t}{\partial l_{d,t}} &= (1-\alpha) a_t k_{d,t-1}^\alpha l_{d,t}^{-\alpha} - w_t = 0 +\\ +\Leftrightarrow w_t &= (1-\alpha) a_t k_{d,t-1}^\alpha l_{d,t}^{-\alpha} = f_l = (1-\alpha) \frac{y_t}{l_{d,t}} +\end{align*} +The real wage must be equal to the marginal product of labor. +Due to the Cobb-Douglas production function it is a constant proportion \((1-\alpha)\) of the ratio of total output to labor. +Simply put, this is the labor demand function. + +The first-order condition w.r.t. +\(k_{d,t-1}\) is given by +\begin{align*} +\frac{\partial {div}_t}{\partial k_{d,t-1}} &= \alpha a_t k_{d,t-1}^{\alpha-1} l_{d,t}^{1-\alpha} - r_{k,t} = 0 +\\ +\Leftrightarrow r_{k,t} &= \alpha a_t k_{d,t-1}^{\alpha-1} l_{d,t}^{1-\alpha} = f_k = \alpha \frac{y_t}{k_{d,t-1}} +\end{align*} +The real rental rate for capital must be equal to the marginal product of capital. +Due to the Cobb-Douglas production function it is a constant proportion \(\alpha\) of the ratio of total output to capital. +Simply put, this is the capital demand function. + +\item Making use of clearing of labor and capital markets implies that the firms profits in the optimum are given by: +\begin{align*} +{div}_t &= y_t - w_t l_{t} - r_{k,t} k_{t-1} = y_t - (1-\alpha)y_t - \alpha y_t = 0 +\\ +\Leftrightarrow y_t &= w_t l_{t} + r_{k,t} k_{t-1} +\end{align*} +Insert into the budget restriction of the households yields: +\begin{align*} +c_t + i_t = w_t l_{t} + r_{k,t} k_{t-1} + {div}_t = y_t +\end{align*} +This is a manifestation of Walras law: if 2 out of 3 markets are cleared, the last market must clear as well. + +\item First, consider the steady-state value of technology: +\begin{align*} +\log{a}&=\rho_a \log{a} + 0 \Leftrightarrow \log{a} = 0 +\\ +\Leftrightarrow a &= 1 +\end{align*} +The Euler equation in steady-state becomes: +\begin{align*} +U_c &= \beta U_c (1-\delta+r_k) +\\ +\Leftrightarrow r_k &= \frac{1}{\beta} + \delta - 1 +\end{align*} +Next we will provide recursively closed-form expressions for all variables in terms of steady-state labor. +That is the right-hand sides of the following equations are given in terms of parameters or previously computed expressions. +\begin{itemize} + \item The firms demand for capital in steady-state becomes + \begin{align*} + r_k &= \alpha a k^{\alpha-1} l^{1-\alpha} + \\ + \Leftrightarrow \frac{k}{l} &= \left(\frac{\alpha a}{r_k}\right)^{\frac{1}{1-\alpha}} + \end{align*} + \item The firms demand for labor in steady-state becomes: + \begin{align*} + w = (1-\alpha) A k^\alpha l^{-\alpha} = (1-\alpha)a \left(\frac{k}{l}\right)^\alpha + \end{align*} + \item The law of motion for capital in steady-state implies + \begin{align*} + \frac{i}{l} &= \delta\frac{k}{l} + \end{align*} + \item The production function in steady-state becomes + \begin{align*} + \frac{y}{l} = a \left(\frac{k}{l}\right)^\alpha + \end{align*} + \item The clearing of the goods market in steady-state implies + \begin{align*} + \frac{c}{l} = \frac{y}{l} - \frac{i}{l} + \end{align*} +\end{itemize} +Now, we have expressions for all variables as a ratio to steady-state labor. +Hence, once we compute \(l\), we can revisit the above expressions to compute all values in closed-form. +Due to the log-utility function, we can actually derive a closed-form expression for \(l\). +To this end, set labor demand equal to labor supply and express the right hand side in terms of previously computed expressions. +\begin{align*} +\psi \frac{1}{1-l} &= \gamma c^{-1} w +\\ +\Leftrightarrow \psi \frac{l}{1-l} &= \gamma \left(\frac{c}{l}\right)^{-1} w +\\ +\Leftrightarrow l &= (1-l)\frac{\gamma}{\psi} \left(\frac{c}{l}\right)^{-1} w +\\ +\Leftrightarrow l &= \frac{\frac{\gamma}{\psi} \left(\frac{c}{l}\right)^{-1} w}{1+\frac{\gamma}{\psi} \left(\frac{c}{l}\right)^{-1} w} +\end{align*} +Lastly, it is straightforward to compute the remaining steady-state values, i.e. +\begin{align*} +c = \frac{c}{l} l,\qquad +i = \frac{i}{l}l ,\qquad +k = \frac{k}{l} l,\qquad +y = \frac{y}{l} l +\end{align*} + +\item The transversality condition for an infinite horizon dynamic optimization problem is the boundary condition + determining a solution to the problem's first-order conditions together with the initial condition. +The transversality condition requires the present value of the state variables (here \(k_t\) and \(a_t\)) to converge to zero as the planning horizon recedes towards infinity. +The first-order and transversality conditions are sufficient to identify an optimum in a concave optimization problem. +Given an optimal path, the necessity of the transversality condition reflects the impossibility of finding an alternative feasible path + for which each state variable deviates from the optimum at each time and increases discounted utility. +These conditions are implicit only, we don't enter them in a computer program. +But implicitly we do consider them when we focus on unique and stable solutions or when we pick certain steady-state values. + +\item General hints: construct and parameterize the model such, that it corresponds to certain properties of the true economy. +One often uses steady-state characteristics for choosing the parameters in accordance with observed data. +For instance, long-run averages (wages, hours worked, interest rates, inflation, consumption-shares, government-spending-ratios, etc.) are used + to target certain steady-state values of the endogenous variables, which implies certain values for some parameters. +You can also use micro-studies, however, one has to be careful about the aggregation. + +We will focus on OECD countries and discuss one \enquote{possible} way to calibrate the model parameters (there are many other ways): +\begin{itemize} +\item[\(\boldsymbol{\alpha}\)] productivity parameter of capital. +Due to the Cobb Douglas production function thus should be equal to the proportion of capital income to total income of economy. +So, one looks inside the national accounts for OECD countries and sets \(\alpha\) to 1 minus the share of labor income over total income. +For most OECD countries this implies a range of 0.25 to 0.40. +\item[\(\boldsymbol{\beta}\)] subjective intertemporal preference rate of households: + it is the value of future utility in relation to present utility. +Usually this parameter takes a value slightly less than unity, indicating that agents discount the future. +For quarterly data, we typically set it around 0.99 and for yearly data 0.96. +These values imply a certain steady-state real rental rate. +To see this, re-consider the Euler equation in steady-state: \(\beta = \frac{1}{\bar{r_k}+1-\delta}\) where \(r_k= \alpha \frac{y}{k}\). +Looking at OECD data one usually finds that the average capital productivity \(k/y\) is in the range of \(9\) to \(11\). +\item[\(\boldsymbol{\delta}\)] depreciation rate of capital stock. +For quarterly data the literature uses values in the range of 0.02 to 0.03, for yearly data you often find 0.10. +Again let's use a steady-state relationship to get a reasonable value. +That is, \(\delta=\frac{\bar{I}}{\bar{K}}=\frac{\bar{I/Y}}{\bar{K/Y}}\). +For OECD data one usually finds an average ratio of investment to output, \(\bar{I}/\bar{Y}\), around 0.25. +\item[\(\boldsymbol{\gamma}\) and \(\boldsymbol{\psi}\)] households's preference regarding consumption and leisure. +Often a certain interpretation in terms of elasticities of substitutions is possible. +In the RBC mode, we can make use of the First-Order-Conditions in steady-state, i.e. +\begin{align*} +\frac{\psi}{\gamma} = \bar{W}\frac{(1-\bar{L})}{\bar{C}}= (1-\alpha)\left(\frac{\bar{K}}{\bar{L}}\right)^\alpha\frac{(1-\bar{L})}{\bar{C}} = (1-\alpha){\left(\frac{\bar{K}}{\bar{L}}\right)}^\alpha\frac{\frac{1}{\bar{L}}(1-\bar{L})}{\frac{\bar{C}}{\bar{L}}} +\end{align*} +Note that \(\bar{C}/\bar{L}\) as well as \(\bar{K}/\bar{L}\) are given in terms of already calibrated parameters (see steady-state computations). +Therefore, one possible way is to normalize one of the parameters to unity (e.g. +\(\gamma=1\)) and calibrate the other one in terms of steady-state ratios + for which we would only require to set a value for steady-state hours worked \(\bar{L}\). +In the specification of the utility function, we see that labor is normalized to be between 0 and 1. +So, targeting 8 hours a day implies \(l=8/24=1/3\). +\item[\(\boldsymbol{\rho_A}\) and \(\boldsymbol{\sigma_A}\)] parameters of process for total factor productivity. +These do not influence the steady-state values, but the dynamics of the model. +Often you can calibrate these by e.g.\ estimating the Cobb-Douglas production function with OLS and then compute the Solow residuals. +Then look at the persistence and standard error of the residuals. +Typically we find that \(\rho_A\) is mostly set above 0.9 to reflect persistence of the technological process + and \(\sigma_A\) around \(0.6\) in the simple RBC model. +\end{itemize} +\item The function might look like this: +\lstinputlisting[style=Matlab-editor,basicstyle=\mlttfamily,title=\lstname]{progs/matlab/rbcLogutilSS.m} +You can try it out with the following parametrization (same as in the Dynare mod file): +\lstinputlisting[style=Matlab-editor,basicstyle=\mlttfamily,title=\lstname]{progs/matlab/rbcLogutilSSTest.m} +\item The mod file might look like this: +\lstinputlisting[style=Matlab-editor,basicstyle=\mlttfamily,title=\lstname]{progs/dynare/rbcLogutil.mod} +Obviously, the results are the same. +\item +\begin{enumerate} + \item For the first-order conditions of the household we know use + \begin{align*} + U_{c,t} &= \gamma c_t^{-\eta_c} + \\ + U_{l,t} &= - \psi (1-l_t)^{-\eta_l} + \end{align*} + The steady-state for labor changes to + \begin{align*} + w \gamma \left(\frac{c}{l}\right)^{-\eta_c} &= \psi{(1-l)}^{-\eta_l}L^{\eta_c} + \end{align*} + This cannot be solved for \(l\) in closed-form. + Rather, we need to condition on the values of the parameters and use a numerical optimizer to solve for \(l\). + \item The function might look like this: + \lstinputlisting[style=Matlab-editor,basicstyle=\mlttfamily,title=\lstname]{progs/matlab/rbcSS.m} + You can try it out with the following parametrization (same as in the Dynare mod file): + \lstinputlisting[style=Matlab-editor,basicstyle=\mlttfamily,title=\lstname]{progs/matlab/rbcSSTest.m} + \item In Dynare we could use the following mod file: + \lstinputlisting[style=Matlab-editor,basicstyle=\mlttfamily,title=\lstname]{progs/dynare/rbcCES.mod} + and the following helper function: + \lstinputlisting[style=Matlab-editor,basicstyle=\mlttfamily,title=\lstname]{progs/dynare/rbcCEShelper.m} + Obviously, the results are the same. +\end{enumerate} + +\end{enumerate} \ No newline at end of file diff --git a/literature/_biblio.bib b/literature/_biblio.bib index c754e44..670c27d 100644 --- a/literature/_biblio.bib +++ b/literature/_biblio.bib @@ -93,6 +93,19 @@ @book{Brandimarte_2006_NumericalMethodsFinance keywords = {Economics,Finance,Statistical methods} } +@article{Calvo_1983_StaggeredPricesUtilitymaximizing, + title = {Staggered Prices in a Utility-Maximizing Framework}, + author = {Calvo, Guillermo A.}, + year = {1983}, + month = sep, + journal = {Journal of Monetary Economics}, + volume = {12}, + number = {3}, + pages = {383--398}, + doi = {10.1016/0304-3932(83)90060-0}, + abstract = {We develop a model of staggered prices along the lines of Phelps (1978) and Taylor (1979, 1980), but utilizing an analytically more tractable price-setting technology. `Demands' are derived from utility maximization assuming Sidrauski-Brock infinitely-lived families. We show that the nature of the equilibrium path can be found out on the basis of essentially graphical techniques. Furthermore, we demonstrate the usefulness of the model by analyzing the welfare implications of monetary and fiscal policy, and by showing that despite the price level being a predetermined variable, a policy of pegging the nominal interest rate will lead to the existence of a continuum of equilibria.} +} + @inbook{Cantore.Gabriel.Levine.EtAl_2013_ScienceArtDSGE, title = {The Science and Art of {{DSGE}} Modelling: {{I}} {\textendash} Construction and {{Bayesian}} Estimation}, booktitle = {Handbook of {{Research Methods}} and {{Applications}} in {{Empirical Macroeconomics}}}, @@ -158,6 +171,16 @@ @article{Crack.Ledoit_2010_CentralLimitTheorems abstract = {Although dependence in financial data is pervasive, standard doctoral-level econometrics texts do not make clear that the common central limit theorems (CLTs) contained therein fail when applied to dependent data. More advanced books that are clear in their CLT assumptions do not contain any worked examples of CLTs that apply to dependent data. We address these pedagogical gaps by discussing dependence in financial data and dependence assumptions in CLTs and by giving a worked example of the application of a CLT for dependent data to the case of the derivation of the asymptotic distribution of the sample variance of a Gaussian AR(1). We also provide code and the results for a Monte-Carlo simulation used to check the results of the derivation.} } +@article{Dixit.Stiglitz_1977_MonopolisticCompetitionOptimum, + title = {Monopolistic {{Competition}} and {{Optimum Product Diversity}}}, + author = {Dixit, Avinash K. and Stiglitz, Joseph E.}, + year = {1977}, + journal = {The American Economic Review}, + volume = {67}, + number = {3}, + pages = {297--308} +} + @article{Fagiolo.Napoletano.Roventini_2008_AreOutputGrowthrate, title = {Are Output Growth-Rate Distributions Fat-Tailed? {{Some}} Evidence from {{OECD}} Countries}, author = {Fagiolo, Giorgio and Napoletano, Mauro and Roventini, Andrea}, @@ -184,6 +207,20 @@ @incollection{Fernandez-Villaverde.Rubio-Ramirez_2010_StructuralVectorAutoregres isbn = {978-0-230-23885-5 978-0-230-28083-0} } +@incollection{Fernandez-Villaverde.Rubio-Ramirez.Schorfheide_2016_SolutionEstimationMethods, + title = {Solution and {{Estimation Methods}} for {{DSGE Models}}}, + booktitle = {Handbook of {{Macroeconomics}}}, + author = {{Fern{\'a}ndez-Villaverde}, Jes{\'u}s and {Rubio-Ram{\'i}rez}, Juan F. and Schorfheide, Frank}, + editor = {Taylor, John B. and Uhlig, Harald}, + year = {2016}, + volume = {A}, + pages = {527--724}, + publisher = {{Elsevier North-Holland}}, + url = {https://doi.org/10.1016/bs.hesmac.2016.03.006}, + abstract = {This chapter provides an overview of solution and estimation techniques for dynamic stochastic general equilibrium models. We cover the foundations of numerical approximation techniques as well as statistical inference and survey the latest developments in the field.}, + isbn = {978-0-444-59469-3} +} + @article{Gali_1992_HowWellDoes, title = {How {{Well Does The IS-LM Model Fit Postwar U}}. {{S}}. {{Data}}?}, author = {Gali, J.}, @@ -197,6 +234,18 @@ @article{Gali_1992_HowWellDoes abstract = {Postwar U. S. time series for money, interest rates, prices, and GNP are characterized by a multivariate process driven by four exogenous disturbances. Those disturbances are identified so that they can be interpreted as the four main sources of fluctuations found in the IS-LM-Phillips curve model: money supply, money demand, IS, and aggregate supply shocks. The dynamic properties of the estimated model are analyzed and shown to match most of the stylized predictions of the model. The estimated decomposition is also used to measure the relative importance of each shock, to interpret some macroeconomic episodes, and to study sources of permanent shocks to nominal variables.} } +@book{Gali_2015_MonetaryPolicyInflation, + title = {Monetary Policy, Inflation, and the Business Cycle: An Introduction to the New {{Keynesian}} Framework and Its Applications}, + author = {Gal{\'i}, Jordi}, + year = {2015}, + edition = {Second edition}, + publisher = {{Princeton University Press}}, + address = {{Princeton ; Oxford}}, + abstract = {This revised second edition of Monetary Policy, Inflation, and the Business Cycle provides a rigorous graduate-level introduction to the New Keynesian framework and its applications to monetary policy. The New Keynesian framework is the workhorse for the analysis of monetary policy and its implications for inflation, economic fluctuations, and welfare. A backbone of the new generation of medium-scale models under development at major central banks and international policy institutions, the framework provides the theoretical underpinnings for the price stability{\textendash}oriented strategies adopted by most central banks in the industrialized world. Using a canonical version of the New Keynesian model as a reference, Jordi Gal{\'i} explores various issues pertaining to monetary policy's design, including optimal monetary policy and the desirability of simple policy rules. He analyzes several extensions of the baseline model, allowing for cost-push shocks, nominal wage rigidities, and open economy factors. In each case, the effects on monetary policy are addressed, with emphasis on the desirability of inflation-targeting policies. New material includes the zero lower bound on nominal interest rates and an analysis of unemployment's significance for monetary policy.}, + isbn = {978-0-691-16478-6}, + keywords = {BUSINESS & ECONOMICS / Economics / Theory,BUSINESS & ECONOMICS / Finance,BUSINESS & ECONOMICS / Money & Monetary Policy,Business cycles,Inflation (Finance),Keynesian economics,Monetary policy} +} + @book{Greenberg_2008_IntroductionBayesianEconometrics, title = {Introduction to {{Bayesian}} Econometrics}, author = {Greenberg, Edward}, @@ -220,6 +269,18 @@ @inbook{Guerron-Quintana.Nason_2013_BayesianEstimationDSGE isbn = {978-0-85793-102-3} } +@book{Heijdra_2017_FoundationsModernMacroeconomics, + title = {Foundations of Modern Macroeconomics}, + author = {Heijdra, Ben J.}, + year = {2017}, + edition = {Third edition}, + publisher = {{Oxford university Press}}, + address = {{Oxford}}, + abstract = {The study of macroeconomics can seem a daunting project. The field is complex and sometimes poorly defined and there are a variety of competing approaches. It is easy for the senior bachelor and starting master student to get lost in the forest of macroeconomics and the mathematics it uses extensively. Foundations of Modern Macroeconomics is a guide book for the interested and ambitious student. Non-partisan in its approach, it deals with all the major topics, summarising the important approaches and providing the reader with a coherent angle on all aspects of macroeconomic thought. Each chapter deals with a separate area of macroeconomics, and each contains a summary section of key points and a further reading list. Using nothing more than undergraduate mathematical skills, it takes the student from basic IS-LM style macro models to the state of the art literature on Dynamic Stochastic General Equilibrium, explaining the mathematical tricks used where they are first introduced. Fully updated and substantially revised, this third edition of Foundations of Modern Macroeconomics now includes brand new chapters covering highly topical subjects such as dynamic programming, competitive risk sharing equilibria and the New Keynesian DSGE approach. --}, + isbn = {978-0-19-878413-5}, + keywords = {Macroeconomics,Problems and exercises,Problems exercises etc} +} + @book{Herbst.Schorfheide_2016_BayesianEstimationDSGE, title = {Bayesian {{Estimation}} of {{DSGE Models}}}, author = {Herbst, Edward and Schorfheide, Frank}, @@ -324,6 +385,17 @@ @book{Lutkepohl_2005_NewIntroductionMultiple keywords = {Time-series analysis} } +@book{McCandless_2008_ABCsRBCsIntroduction, + title = {The {{ABCs}} of {{RBCs}}: An Introduction to Dynamic Macroeconomic Models}, + author = {McCandless, George T.}, + year = {2008}, + publisher = {{Harvard University Pres}}, + address = {{Cambridge, MA}}, + abstract = {The ABCs of RBCs is the first book to provide a basic introduction to Real Business Cycle (RBC) and New-Keynesian models. These models argue that random shocks, new inventions, droughts, and wars, in the case of pure RBC models, and monetary and fiscal policy and international investor risk aversion, in more open interpretations can trigger booms and recessions and can account for much of observed output volatility. George McCandless works through a sequence of these Real Business Cycle and New-Keynesian dynamic stochastic general equilibrium models in fine detail, showing how to solve them, and how to add important extensions to the basic model, such as money, price and wage rigidities, financial markets, and an open economy. The impulse response functions of each new model show how the added feature changes the dynamics. The ABCs of RBCs is designed to teach the economic practitioner or student how to build simple RBC models. Matlab code for solving many of the models is provided, and careful readers should be able to construct, solve, and use their own models. In the tradition of the freshwater economic schools of Chicago and Minnesota, McCandless enhances the methods and sophistication of current macroeconomic modeling.}, + isbn = {978-0-674-02814-2}, + keywords = {Business cycles,Econometric models,Macroeconomics} +} + @book{Miranda.Fackler_2002_AppliedComputationalEconomics, title = {Applied Computational Economics and Finance}, author = {Miranda, Mario Javier and Fackler, Paul L.}, @@ -367,6 +439,19 @@ @incollection{Ploberger_2010_LawLargeNumbers isbn = {978-0-230-23885-5 978-0-230-28083-0} } +@book{Romer_2019_AdvancedMacroeconomics, + title = {Advanced Macroeconomics}, + author = {Romer, David}, + year = {2019}, + series = {The {{McGraw-Hill}} Series in Economics}, + edition = {Fifth Edition}, + publisher = {{McGraw-Hill Education}}, + address = {{Dubuque}}, + abstract = {The fifth edition of Romer's Advanced Macroeconomics continues its tradition as the standard text and the starting point for graduate macroeconomics courses and helps lay the groundwork for students to begin doing research in macroeconomics and monetary economics. Romer presents the major theories concerning the central questions of macroeconomics. The theoretical analysis is supplemented by examples of relevant empirical work, illustrating the ways that theories can be applied and tested. In areas ranging from economic growth and short-run fluctuations to the natural rate of unemployment and monetary policy, formal models are used to present and analyze key ideas and issues. The book has been extensively revised to incorporate important new topics and new research, eliminate inessential material, and further improve the presentation.}, + isbn = {978-1-260-18521-8}, + keywords = {Macroeconomics} +} + @article{Rubio-Ramirez.Waggoner.Zha_2010_StructuralVectorAutoregressions, title = {Structural {{Vector Autoregressions}}: {{Theory}} of {{Identification}} and {{Algorithms}} for {{Inference}}}, author = {{Rubio-Ram{\'i}rez}, Juan F. and Waggoner, Daniel F. and Zha, Tao}, @@ -418,6 +503,17 @@ @article{Smets.Wouters_2007_ShocksFrictionsUS abstract = {Using a Bayesian likelihood approach, we estimate a dynamic stochastic general equilibrium model for the US economy using seven macroeconomic time series. The model incorporates many types of real and nominal frictions and seven types of structural shocks. We show that this model is able to compete with Bayesian Vector Autoregression models in out-of-sample prediction. We investigate the relative empirical importance of the various frictions. Finally, using the estimated model, we address a number of key issues in business cycle analysis: What are the sources of business cycle fluctuations? Can the model explain the cross correlation between output and inflation? What are the effects of productivity on hours worked? What are the sources of the "Great Moderation"?} } +@book{Torres_2013_IntroductionDynamicMacroeconomic, + title = {Introduction to Dynamic Macroeconomic General Equilibrium Models}, + author = {Torres, Jos{\'e} L.}, + year = {2013}, + publisher = {{Vernon Press}}, + address = {{Malaga, Spain}}, + abstract = {This book offers an introductory step-by-step course in Dynamic Stochastic General Equilibrium (DSGE) modelling. Modern macroeconomic analysis is increasingly concerned with the construction, calibration and/or estimation and simulation of DSGE models. The book is intended for graduate students as an introductory course to DSGE modelling and for those economists who would like a hands-on approach to learning the basics of modern dynamic macroeconomic modelling. The book starts with the simplest canonical neoclassical DSGE model and then gradually extends the basic framework incorporating a variety of additional features, such as consumption habit formation, investment adjustment cost, investment-specific technological change, taxes, public capital, household production, non-ricardian agents, monopolistic competition, etc. The book includes Dynare codes for the models developed that can be downloaded from the book's homepage.}, + isbn = {978-1-62273-007-0}, + keywords = {Equilibrium (Economics),Macroeconomics,Mathematical models} +} + @book{Uribe.Schmitt-Grohe_2017_OpenEconomyMacroeconomics, title = {Open Economy Macroeconomics}, author = {Uribe, Martin and {Schmitt-Grohe}, Stephanie}, @@ -426,7 +522,19 @@ @book{Uribe.Schmitt-Grohe_2017_OpenEconomyMacroeconomics address = {{Princeton, NJ}}, abstract = {Combining theoretical models and data in ways unimaginable just a few years ago, open economy macroeconomics has experienced enormous growth over the past several decades. This rigorous and self-contained textbook brings graduate students, scholars, and policymakers to the research frontier and provides the tools and context necessary for new research and policy proposals. Mart{\'i}n Uribe and Stephanie Schmitt-Groh{\'e} factor in the discipline's latest developments, including major theoretical advances in incorporating financial and nominal frictions into microfounded dynamic models of the open economy, the availability of macro- and microdata for emerging and developed countries, and a revolution in the tools available to simulate and estimate dynamic stochastic models. The authors begin with a canonical general equilibrium model of an open economy and then build levels of complexity through the coverage of important topics such as international business-cycle analysis, financial frictions as drivers and transmitters of business cycles and global crises, sovereign default, pecuniary externalities, involuntary unemployment, optimal macroprudential policy, and the role of nominal rigidities in shaping optimal exchange-rate policy. Based on courses taught at several universities, Open Economy Macroeconomics is an essential resource for students, researchers, and practitioners. Detailed exploration of international business-cycle analysis Coverage of financial frictions as drivers and transmitters of business cycles and global crises Extensive investigation of nominal rigidities and their role in shaping optimal exchange-rate policy Other topics include fixed exchange-rate regimes, involuntary unemployment, optimal macroprudential policy, and sovereign default and debt sustainability Chapters include exercises and replication codes.}, isbn = {978-0-691-15877-8}, - keywords = {BUSINESS \& ECONOMICS / Economics / Macroeconomics,BUSINESS \& ECONOMICS / Economics / Theory,BUSINESS \& ECONOMICS / Finance,BUSINESS \& ECONOMICS / Reference,Economic policy,Macroeconomics,Political planning,POLITICAL SCIENCE / Public Policy / Economic Policy} + keywords = {BUSINESS & ECONOMICS / Economics / Macroeconomics,BUSINESS & ECONOMICS / Economics / Theory,BUSINESS & ECONOMICS / Finance,BUSINESS & ECONOMICS / Reference,Economic policy,Macroeconomics,Political planning,POLITICAL SCIENCE / Public Policy / Economic Policy} +} + +@book{Walsh_2017_MonetaryTheoryPolicy, + title = {Monetary Theory and Policy}, + author = {Walsh, Carl E.}, + year = {2017}, + edition = {Fourth edition}, + publisher = {{MIT Press}}, + address = {{London, England ; Cambridge, Massachusetts}}, + abstract = {This textbook presents a comprehensive treatment of the most important topics in monetary economics, focusing on the primary models monetary economists have employed to address topics in theory and policy. Striking a balance of insight, accessibility, and rigor, the book covers the basic theoretical approaches, shows how to do simulation work with the models, and discusses the full range of frictions that economists have studied to understand the impacts of monetary policy. For the fourth edition, every chapter has been revised to improve the exposition and to reflect recent research. The new edition offers an entirely new chapter on the effective lower bound on nominal interest rates, forward guidance policies, and quantitative and credit easing policies. Material on the basic new Keynesian model has been reorganized into a single chapter to provide a comprehensive analysis of the model and its policy implications. In addition, the chapter on the open economy now reflects the dominance of the new Keynesian approach. Other new material includes discussions of price adjustment, labor market frictions and unemployment, and moral hazard frictions among financial intermediaries. References and end-of-chapter problems allow readers to extend their knowledge of the topics covered. Monetary Theory and Policy continues to be the most comprehensive and up-to-date treatment of monetary economics, not only the leading text in the field but also the standard reference for academics and central bank researchers.}, + isbn = {978-0-262-03581-1}, + keywords = {Monetary policy,Money} } @book{White_2001_AsymptoticTheoryEconometricians, @@ -452,3 +560,27 @@ @article{Wolf_2022_WhatCanWe doi = {10.1257/pandp.20221045}, abstract = {I use a simple business cycle model to illustrate the workings and limitations of sign restrictions in structural vector autoregressions. Three lessons emerge. First, such sign-based identification is vulnerable to ``shock masquerading'': linear combinations of other shocks may be misidentified as the shock of interest. Second, since the popular Haar prior automatically overweights more volatile shocks, the implied posterior is decisively shaped by relative shock volatilities--a feature of shocks that has nothing to do with their dynamic causal effects. Third, sign restrictions on structural elasticities--rather than just the usual restrictions on impulse responses--can be highly informative.} } + +@book{Woodford_2003_InterestPricesFoundations, + title = {Interest and Prices: Foundations of a Theory of Monetary Policy}, + author = {Woodford, Michael}, + year = {2003}, + publisher = {{Princeton University Press}}, + address = {{Princeton, N.J. ; Woodstock, Oxfordshire [England]}}, + abstract = {With the collapse of the Bretton Woods system, any pretense of a connection of the world's currencies to any real commodity has been abandoned. Yet since the 1980s, most central banks have abandoned money-growth targets as practical guidelines for monetary policy as well. How then can pure ``fiat'' currencies be managed so as to create confidence in the stability of national units of account? Interest and Prices seeks to provide theoretical foundations for a rule-based approach to monetary policy suitable for a world of instant communications and ever more efficient financial markets. In such a world, effective monetary policy requires that central banks construct a conscious and articulate account of what they are doing. Michael Woodford reexamines the foundations of monetary economics, and shows how interest-rate policy can be used to achieve an inflation target in the absence of either commodity backing or control of a monetary aggregate. The book further shows how the tools of modern macroeconomic theory can be used to design an optimal inflation-targeting regime\,{\textemdash}\,one that balances stabilization goals with the pursuit of price stability in a way that is grounded in an explicit welfare analysis, and that takes account of the ``New Classical'' critique of traditional policy evaluation exercises. It thus argues that rule-based policymaking need not mean adherence to a rigid framework unrelated to stabilization objectives for the sake of credibility, while at the same time showing the advantages of rule-based over purely discretionary policymaking.}, + isbn = {978-0-691-01049-6}, + keywords = {Interet (Economie),Monetary policy,Politique economique,Politique monetaire,Prix} +} + +@article{Yun_1996_NominalPriceRigidity, + title = {Nominal Price Rigidity, Money Supply Endogeneity, and Business Cycles}, + author = {Yun, Tack}, + year = {1996}, + month = apr, + journal = {Journal of Monetary Economics}, + volume = {37}, + number = {2}, + pages = {345--370}, + doi = {10.1016/S0304-3932(96)90040-9}, + abstract = {This paper investigates the ability of nominal price rigidity to explain the co-movement of inflation with the cyclical component of output observed in the post-war U.S. data. A dynamic general equilibrium model is constructed with the introduction of monopolistic competition and nominal price rigidity in a standard real business cycle model, allowing for an endogenous money supply rule. It is then demonstrated that sticky price models can explain the observed associations between movements in inflation and output much better than flexible price models. This result depends little on whether money supply is assumed to be endogenous or not.} +} diff --git a/progs/dynare/rbcCES.mod b/progs/dynare/rbcCES.mod new file mode 100644 index 0000000..717dc5c --- /dev/null +++ b/progs/dynare/rbcCES.mod @@ -0,0 +1,55 @@ +%% Declare Variables and Parameters +var y c k l a rk w iv uc ul fl fk; +varexo eps_a; +parameters ALPHA BETA DELTA GAMMA PSI RHOA ETAC ETAL; + +%% Calibration of parameters (simple) +ALPHA = 0.35; +BETA = 0.9901; +DELTA = 0.025; +GAMMA = 1; +PSI = 1.7333; +RHOA = 0.9; +ETAC = 2; +ETAL = 1.5; + + +%% Model Equations +model; +uc = GAMMA*c^(-ETAC); +ul = -PSI*(1-l)^(-ETAL); +fl = (1-ALPHA)*a*(k(-1)/l)^ALPHA; +fk = ALPHA*a*(k(-1)/l)^(ALPHA-1); + +uc = BETA*uc(+1)*(1-DELTA+rk(+1)); +w = - ul/uc; +w = fl; +rk = fk; +y = a*k(-1)^ALPHA*l^(1-ALPHA); +k = (1-DELTA)*k(-1) + iv; +y = c + iv; +log(a) = RHOA*log(a(-1)) + eps_a; +end; + +%% Steady State +steady_state_model; +a = 1; +rk = 1/BETA+DELTA-1; +k_l = ((ALPHA*a)/rk)^(1/(1-ALPHA)); +w = (1-ALPHA)*a*k_l^ALPHA; +iv_l = DELTA*k_l; +y_l = a*k_l^ALPHA; +c_l = y_l - iv_l; +l0 = 1/3; % initial guess +l = rbcCEShelper(l0,PSI,ETAL,ETAC,GAMMA,c_l,w); +c = c_l*l; +iv = iv_l*l; +k = k_l*l; +y = y_l*l; +uc = GAMMA*c^(-ETAC); +ul = -PSI*(1-l)^(-ETAL); +fl = (1-ALPHA)*a*(k_l)^ALPHA; +fk = ALPHA*a*(k_l)^(ALPHA-1); +end; + +steady; diff --git a/progs/dynare/rbcCEShelper.m b/progs/dynare/rbcCEShelper.m new file mode 100644 index 0000000..5a883e6 --- /dev/null +++ b/progs/dynare/rbcCEShelper.m @@ -0,0 +1,10 @@ +function l = rbcCEShelper(l0,PSI,ETAL,ETAC,GAMMA,c_l,w) + if ETAC == 1 && ETAL == 1 + % close-form expression + l = GAMMA/PSI*c_l^(-1)*w/(1+GAMMA/PSI*c_l^(-1)*w); + else + % use numerical optimizer + l = fsolve(@(L) w*GAMMA*c_l^(-ETAC) - PSI*(1-L)^(-ETAL)*L^ETAC ,... + l0, optimset('Display','Final','TolX',1e-12,'TolFun',1e-12)); + end +end \ No newline at end of file diff --git a/progs/dynare/rbcLogutil.mod b/progs/dynare/rbcLogutil.mod new file mode 100644 index 0000000..2bc64bb --- /dev/null +++ b/progs/dynare/rbcLogutil.mod @@ -0,0 +1,94 @@ +%% Declare Variables and Parameters +var y c k l a rk w iv uc ul fl fk; +varexo eps_a; +parameters ALPHA BETA DELTA GAMMA PSI RHOA; + +%% Calibration of parameters (simple) +% ALPHA = 0.35; +% BETA = 0.9901; +% DELTA = 0.025; +% GAMMA = 1; +% PSI = 1.7333; +% RHOA = 0.9; + +%% Calibration of parameters (advanced) for OECD countries + +% target values +kss_yss = 10; % average capital productivity found in long-run averages of data +ivss_yss = 0.25; % average investment to ouput ratio found in long-run averages of data +wsslss_yss = 0.65; % average share of labor income to total income +lss = 1/3; % 8h/24h working time + +% flip steady-state relationships to get parameters in terms of the target values +ALPHA = 1-wsslss_yss; % labor demand in steady-state combined with Cobb-Douglas production function +DELTA = ivss_yss / kss_yss; % capital accumulation in steady-state +rkss = ALPHA/kss_yss; % capital demand in steady-state combined with Cobb-Douglas production function +BETA = 1/(rkss - DELTA + 1); % Euler equation in steady-state +% normalize GAMMA and calibrate PSI to get targeted lss +GAMMA = 1; % normalize +ass = 1; % tfp in steady-state +kss_lss = ((ALPHA*ass)/rkss)^(1/(1-ALPHA)); +kss = kss_lss*lss; +yss = kss/kss_yss; +ivss = DELTA*kss; +wss = (1-ALPHA)*ass*kss_lss^ALPHA; +css = yss-ivss; +PSI = GAMMA*(css/lss)^(-1)*wss*(lss/(1-lss))^(-1); % flipped steady-state labor equation + +RHOA = 0.9; % does not affect the steady-state + +%% Model Equations +model; +uc = GAMMA*c^(-1); +ul = -PSI/(1-l); +fl = (1-ALPHA)*a*(k(-1)/l)^ALPHA; +fk = ALPHA*a*(k(-1)/l)^(ALPHA-1); + +uc = BETA*uc(+1)*(1-DELTA+rk(+1)); +w = - ul/uc; +w = fl; +rk = fk; +y = a*k(-1)^ALPHA*l^(1-ALPHA); +k = (1-DELTA)*k(-1) + iv; +y = c + iv; +log(a) = RHOA*log(a(-1)) + eps_a; +end; + +%% Steady State +steady_state_model; +a = 1; +rk = 1/BETA+DELTA-1; +K_L = ((ALPHA*a)/rk)^(1/(1-ALPHA)); +w = (1-ALPHA)*a*K_L^ALPHA; +I_L = DELTA*K_L; +Y_L = a*K_L^ALPHA; +C_L = Y_L - I_L; +l = GAMMA/PSI*C_L^(-1)*w/(1+GAMMA/PSI*C_L^(-1)*w); +c = C_L*l; +iv = I_L*l; +k = K_L*l; +y = Y_L*l; +uc = GAMMA*c^(-1); +ul = -PSI/(1-l); +fl = (1-ALPHA)*a*(k/l)^ALPHA; +fk = ALPHA*a*(k/l)^(ALPHA-1); +end; + +steady; + +shocks; +var eps_a = 1; +end; + +stoch_simul(order=1,irf=30,periods=400) y c k l rk w iv a; + +figure('name','Simulated Data') +subplot(3,3,1); plot(oo_.endo_simul(ismember(M_.endo_names,'a'),300:end)); title('productivity'); +subplot(3,3,2); plot(oo_.endo_simul(ismember(M_.endo_names,'y'),300:end)); title('output'); +subplot(3,3,3); plot(oo_.endo_simul(ismember(M_.endo_names,'c'),300:end)); title('consumption'); +subplot(3,3,4); plot(oo_.endo_simul(ismember(M_.endo_names,'k'),300:end)); title('capital'); +subplot(3,3,5); plot(oo_.endo_simul(ismember(M_.endo_names,'iv'),300:end)); title('investment'); +subplot(3,3,6); plot(oo_.endo_simul(ismember(M_.endo_names,'rk'),300:end)); title('rental rate'); +subplot(3,3,7); plot(oo_.endo_simul(ismember(M_.endo_names,'l'),300:end)); title('labor'); +subplot(3,3,8); plot(oo_.endo_simul(ismember(M_.endo_names,'w'),300:end)); title('wage'); + diff --git a/progs/matlab/rbcLogutilSS.m b/progs/matlab/rbcLogutilSS.m new file mode 100644 index 0000000..dc2ea5d --- /dev/null +++ b/progs/matlab/rbcLogutilSS.m @@ -0,0 +1,67 @@ +function [SS,PARAMS,error_indicator] = rbcLogutilSS(SS,PARAMS) +% [SS,PARAMS,error_indicator] = rbcLogutilSS(SS,PARAMS) +% ---------------------------------------------------------------------- +% computes the steady-state of the RBC model with log utility analytically +% ---------------------------------------------------------------------- +% INPUTS +% - SS : structure with initial steady state values, fieldnames are variable names (usually empty, but might be useful for initial values) +% - params : structure with values for the parameters, fieldnames are parameter names +% ---------------------------------------------------------------------- +% OUTPUTS +% - SS : structure with computed steady state values, fieldnames are variable names +% - params : structure with updated values for the parameters, fieldnames are parameter names +% - error_indicator: 0 if no error when computing the steady-state +% ---------------------------------------------------------------------- +% Willi Mutschler (willi@mutschler.eu) +% Version: January 26, 2023 +% ---------------------------------------------------------------------- +error_indicator = 0; % initialize no error + +% read-out parameters +ALPHA = PARAMS.ALPHA; +BETA = PARAMS.BETA; +DELTA = PARAMS.DELTA; +GAMMA = PARAMS.GAMMA; +PSI = PARAMS.PSI; +RHOA = PARAMS.RHOA; + +% compute steady-state +a = 1; +rk = 1/BETA+DELTA-1; +k_l = ((ALPHA*a)/rk)^(1/(1-ALPHA)); +if k_l <= 0 + error_indicator = 1; +end +w = (1-ALPHA)*a*k_l^ALPHA; +iv_l = DELTA*k_l; +y_l = a*k_l^ALPHA; +c_l = y_l - iv_l; +if c_l <= 0 + error_indicator = 1; +end +l = GAMMA/PSI*c_l^(-1)*w/(1+GAMMA/PSI*c_l^(-1)*w); % closed-form expression for l + +c = c_l*l; +iv = iv_l*l; +k = k_l*l; +y = y_l*l; +uc = GAMMA*c^(-1); +ul = -PSI/(1-l); +fl = (1-ALPHA)*a*(k/l)^ALPHA; +fk = ALPHA*a*(k/l)^(ALPHA-1); + +% write to output structure +SS.y = y; +SS.c = c; +SS.k = k; +SS.l = l; +SS.a = a; +SS.rk = rk; +SS.w = w; +SS.iv = iv; +SS.uc = uc; +SS.ul = ul; +SS.fl = fl; +SS.fk = fk; + +end \ No newline at end of file diff --git a/progs/matlab/rbcLogutilSSTest.m b/progs/matlab/rbcLogutilSSTest.m new file mode 100644 index 0000000..5bbfd4b --- /dev/null +++ b/progs/matlab/rbcLogutilSSTest.m @@ -0,0 +1,22 @@ +% computes the steady-state of the RBC model with log utility +% ---------------------------------------------------------------------- +% Willi Mutschler (willi@mutschler.eu) +% Version: January 26, 2023 +% ---------------------------------------------------------------------- + +% calibration +PARAMS.ALPHA = 0.35; +PARAMS.BETA = 0.9901; +PARAMS.DELTA = 0.025; +PARAMS.GAMMA = 1; +PARAMS.PSI = 1.7333; +PARAMS.RHOA = 0.9; +SS = []; % no need for initial values + +% compute steady-state +[SS,PARAMS,error_indicator] = rbcLogutilSS(SS,PARAMS); +if ~error_indicator + disp(SS); +else + error('steady-state could not be computed') +end \ No newline at end of file diff --git a/progs/matlab/rbcSS.m b/progs/matlab/rbcSS.m new file mode 100644 index 0000000..3e37f3c --- /dev/null +++ b/progs/matlab/rbcSS.m @@ -0,0 +1,91 @@ +function [SS,PARAMS,error_indicator] = rbcSS(SS,PARAMS) +% [SS,PARAMS,error_indicator] = rbcSS(SS,PARAMS) +% ---------------------------------------------------------------------- +% computes the steady-state of the RBC model with CES utility using a +% numerical optimizer to compute steady-state labor +% ---------------------------------------------------------------------- +% INPUTS +% - SS : structure with initial steady state values, fieldnames are variable names +% - params : structure with values for the parameters, fieldnames are parameter names +% ---------------------------------------------------------------------- +% OUTPUTS +% - SS : structure with computed steady state values, fieldnames are variable names +% - params : structure with updated values for the parameters, fieldnames are parameter names +% - error_indicator: 0 if no error when computing the steady-state +% ---------------------------------------------------------------------- +% Willi Mutschler (willi@mutschler.eu) +% Version: January 26, 2023 +% ---------------------------------------------------------------------- + +error_indicator = 0; % initialize no error + +% read-out parameters +ALPHA = PARAMS.ALPHA; +BETA = PARAMS.BETA; +DELTA = PARAMS.DELTA; +GAMMA = PARAMS.GAMMA; +PSI = PARAMS.PSI; +RHOA = PARAMS.RHOA; +ETAC = PARAMS.ETAC; +ETAL = PARAMS.ETAL; + +% compute steady-state +a = 1; +rk = 1/BETA+DELTA-1; +k_l = ((ALPHA*a)/rk)^(1/(1-ALPHA)); +if k_l <= 0 + error_indicator = 1; +end +w = (1-ALPHA)*a*k_l^ALPHA; +iv_l = DELTA*k_l; +y_l = a*k_l^ALPHA; +c_l = y_l - iv_l; +if c_l <= 0 + error_indicator = 1; +end +if (ETAC == 1 && ETAL == 1) + % closed-form expression for l + l = GAMMA/PSI*c_l^(-1)*w/(1+GAMMA/PSI*c_l^(-1)*w); +else + % no closed-form solution and we therefore use a fixed-point algorithm + if error_indicator == 0 + l0 = SS.l; + [l,~,exitflag] = fsolve(@findL,l0,optimset('Display','off','TolX',1e-12,'TolFun',1e-12)); + if exitflag <= 0 + error_indicator = 1; + end + else + l = NaN; + end +end +c = c_l*l; +iv = iv_l*l; +k = k_l*l; +y = y_l*l; +uc = GAMMA*c^(-1); +ul = -PSI/(1-l); +fl = (1-ALPHA)*a*(k/l)^ALPHA; +fk = ALPHA*a*(k/l)^(ALPHA-1); + +% write to output structure +SS.y = y; +SS.c = c; +SS.k = k; +SS.l = l; +SS.a = a; +SS.rk = rk; +SS.w = w; +SS.iv = iv; +SS.uc = uc; +SS.ul = ul; +SS.fl = fl; +SS.fk = fk; + +%% Auxiliary function used in optimization +% note that some variables are not explicitly declared as input arguments but get their value from above, +% i.e. the scope of some variables spans multiple functions +function error = findL(L) + error = w*GAMMA*c_l^(-ETAC) - PSI*(1-L)^(-ETAL)*L^ETAC; +end + +end \ No newline at end of file diff --git a/progs/matlab/rbcSSTest.m b/progs/matlab/rbcSSTest.m new file mode 100644 index 0000000..10e377d --- /dev/null +++ b/progs/matlab/rbcSSTest.m @@ -0,0 +1,24 @@ +% computes the steady-state of the RBC model with CES utility +% ---------------------------------------------------------------------- +% Willi Mutschler (willi@mutschler.eu) +% Version: January 26, 2023 +% ---------------------------------------------------------------------- + +% calibration +PARAMS.ALPHA = 0.35; +PARAMS.BETA = 0.9901; +PARAMS.DELTA = 0.025; +PARAMS.GAMMA = 1; +PARAMS.PSI = 1.7333; +PARAMS.RHOA = 0.9; +PARAMS.ETAC = 2; +PARAMS.ETAL = 1.5; +SS.l = 1/3; % initial guess for labor + +% compute steady-state +[SS,PARAMS,error_indicator] = rbcSS(SS,PARAMS); +if ~error_indicator + disp(SS); +else + error('steady-state could not be computed') +end \ No newline at end of file diff --git a/week_13.tex b/week_13.tex new file mode 100644 index 0000000..edb4c4e --- /dev/null +++ b/week_13.tex @@ -0,0 +1,30 @@ +% !TEX root = week_13.tex +\input{exercises/_common_header.tex} +\Newassociation{solution}{Solution}{week_13_solution} +\newif\ifDisplaySolutions\DisplaySolutionstrue + +\begin{document} +\title{Quantitative Macroeconomics\\~\\Winter 2023/24\\~\\Week 13} +\author{Willi Mutschler\\willi@mutschler.eu} +\date{Version: 1.0\\Latest version available on: \href{https://github.com/wmutschl/Quantitative-Macroeconomics/releases/latest/download/week_13.pdf}{GitHub}} +\maketitle\thispagestyle{empty} + +\newpage +\Opensolutionfile{week_13_solution}[week_13_solution] +\tableofcontents\thispagestyle{empty}\newpage + +\setcounter{page}{1} +\input{exercises/dsge_definition_challenges_structure.tex}\newpage +\input{exercises/rbc_model.tex}\newpage +\input{exercises/nk_model_algebra.tex}\newpage +\printbibliography + +\Closesolutionfile{week_13_solution} +\ifDisplaySolutions +\newpage +\appendix + +\section{Solutions} +\input{week_13_solution} +\fi +\end{document} \ No newline at end of file