From fbdf8f50473935d4591d04b6ca4ae793138ec223 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98yvind=20Sigmundson=20Sch=C3=B8yen?= Date: Mon, 15 Nov 2021 08:18:14 +0100 Subject: [PATCH 1/3] Recover ODENN slides --- doc/pub/odenn/html/._odenn-bs000.html | 305 +++++++++++++ doc/pub/odenn/html/._odenn-bs001.html | 297 +++++++++++++ doc/pub/odenn/html/._odenn-bs002.html | 297 +++++++++++++ doc/pub/odenn/html/._odenn-bs003.html | 304 +++++++++++++ doc/pub/odenn/html/._odenn-bs004.html | 327 ++++++++++++++ doc/pub/odenn/html/._odenn-bs005.html | 297 +++++++++++++ doc/pub/odenn/html/._odenn-bs006.html | 313 ++++++++++++++ doc/pub/odenn/html/._odenn-bs007.html | 303 +++++++++++++ doc/pub/odenn/html/._odenn-bs008.html | 317 ++++++++++++++ doc/pub/odenn/html/._odenn-bs009.html | 365 ++++++++++++++++ doc/pub/odenn/html/._odenn-bs010.html | 396 +++++++++++++++++ doc/pub/odenn/html/._odenn-bs011.html | 305 +++++++++++++ doc/pub/odenn/html/._odenn-bs012.html | 328 ++++++++++++++ doc/pub/odenn/html/._odenn-bs013.html | 440 +++++++++++++++++++ doc/pub/odenn/html/._odenn-bs014.html | 459 ++++++++++++++++++++ doc/pub/odenn/html/._odenn-bs015.html | 311 ++++++++++++++ doc/pub/odenn/html/._odenn-bs016.html | 307 ++++++++++++++ doc/pub/odenn/html/._odenn-bs017.html | 310 ++++++++++++++ doc/pub/odenn/html/._odenn-bs018.html | 461 ++++++++++++++++++++ doc/pub/odenn/html/._odenn-bs019.html | 424 ++++++++++++++++++ doc/pub/odenn/html/._odenn-bs020.html | 295 +++++++++++++ doc/pub/odenn/html/._odenn-bs021.html | 438 +++++++++++++++++++ doc/pub/odenn/html/._odenn-bs022.html | 323 ++++++++++++++ doc/pub/odenn/html/._odenn-bs023.html | 324 ++++++++++++++ doc/pub/odenn/html/._odenn-bs024.html | 445 +++++++++++++++++++ doc/pub/odenn/html/._odenn-bs025.html | 589 ++++++++++++++++++++++++++ doc/pub/odenn/html/._odenn-bs026.html | 409 ++++++++++++++++++ doc/pub/odenn/html/._odenn-bs027.html | 324 ++++++++++++++ doc/pub/odenn/html/._odenn-bs028.html | 352 +++++++++++++++ doc/pub/odenn/html/._odenn-bs029.html | 311 ++++++++++++++ doc/pub/odenn/html/._odenn-bs030.html | 318 ++++++++++++++ doc/pub/odenn/html/._odenn-bs031.html | 343 +++++++++++++++ doc/pub/odenn/html/._odenn-bs032.html | 359 ++++++++++++++++ doc/pub/odenn/html/._odenn-bs033.html | 534 +++++++++++++++++++++++ doc/pub/odenn/html/._odenn-bs034.html | 310 ++++++++++++++ doc/pub/odenn/html/._odenn-bs035.html | 310 ++++++++++++++ doc/pub/odenn/html/._odenn-bs036.html | 303 +++++++++++++ doc/pub/odenn/html/._odenn-bs037.html | 290 +++++++++++++ doc/pub/odenn/html/._odenn-bs038.html | 506 ++++++++++++++++++++++ doc/pub/odenn/html/._odenn-bs039.html | 455 ++++++++++++++++++++ doc/pub/odenn/html/._odenn-bs040.html | 286 +++++++++++++ 41 files changed, 14690 insertions(+) create mode 100644 doc/pub/odenn/html/._odenn-bs000.html create mode 100644 doc/pub/odenn/html/._odenn-bs001.html create mode 100644 doc/pub/odenn/html/._odenn-bs002.html create mode 100644 doc/pub/odenn/html/._odenn-bs003.html create mode 100644 doc/pub/odenn/html/._odenn-bs004.html create mode 100644 doc/pub/odenn/html/._odenn-bs005.html create mode 100644 doc/pub/odenn/html/._odenn-bs006.html create mode 100644 doc/pub/odenn/html/._odenn-bs007.html create mode 100644 doc/pub/odenn/html/._odenn-bs008.html create mode 100644 doc/pub/odenn/html/._odenn-bs009.html create mode 100644 doc/pub/odenn/html/._odenn-bs010.html create mode 100644 doc/pub/odenn/html/._odenn-bs011.html create mode 100644 doc/pub/odenn/html/._odenn-bs012.html create mode 100644 doc/pub/odenn/html/._odenn-bs013.html create mode 100644 doc/pub/odenn/html/._odenn-bs014.html create mode 100644 doc/pub/odenn/html/._odenn-bs015.html create mode 100644 doc/pub/odenn/html/._odenn-bs016.html create mode 100644 doc/pub/odenn/html/._odenn-bs017.html create mode 100644 doc/pub/odenn/html/._odenn-bs018.html create mode 100644 doc/pub/odenn/html/._odenn-bs019.html create mode 100644 doc/pub/odenn/html/._odenn-bs020.html create mode 100644 doc/pub/odenn/html/._odenn-bs021.html create mode 100644 doc/pub/odenn/html/._odenn-bs022.html create mode 100644 doc/pub/odenn/html/._odenn-bs023.html create mode 100644 doc/pub/odenn/html/._odenn-bs024.html create mode 100644 doc/pub/odenn/html/._odenn-bs025.html create mode 100644 doc/pub/odenn/html/._odenn-bs026.html create mode 100644 doc/pub/odenn/html/._odenn-bs027.html create mode 100644 doc/pub/odenn/html/._odenn-bs028.html create mode 100644 doc/pub/odenn/html/._odenn-bs029.html create mode 100644 doc/pub/odenn/html/._odenn-bs030.html create mode 100644 doc/pub/odenn/html/._odenn-bs031.html create mode 100644 doc/pub/odenn/html/._odenn-bs032.html create mode 100644 doc/pub/odenn/html/._odenn-bs033.html create mode 100644 doc/pub/odenn/html/._odenn-bs034.html create mode 100644 doc/pub/odenn/html/._odenn-bs035.html create mode 100644 doc/pub/odenn/html/._odenn-bs036.html create mode 100644 doc/pub/odenn/html/._odenn-bs037.html create mode 100644 doc/pub/odenn/html/._odenn-bs038.html create mode 100644 doc/pub/odenn/html/._odenn-bs039.html create mode 100644 doc/pub/odenn/html/._odenn-bs040.html diff --git a/doc/pub/odenn/html/._odenn-bs000.html b/doc/pub/odenn/html/._odenn-bs000.html new file mode 100644 index 000000000..51f116b28 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs000.html @@ -0,0 +1,305 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + + + +
+

Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs

+ +

+ + +

+Kristine Baluka Hein +
+ +

+ + +

Department of Informatics, University of Oslo, Norway
+
+

+

Nov 11, 2018

+
+

+ + +

Read »

+ + +
+ +

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs001.html b/doc/pub/odenn/html/._odenn-bs001.html new file mode 100644 index 000000000..804257781 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs001.html @@ -0,0 +1,297 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Differential equations

+ +

+The Universal Approximation Theorem states that a neural network can approximate any function at a single hidden layer along with one input and output layer to any given precision. +Having this in mind, we will look closer at whether a neural network manages to solve for a function in an equation. + +

+A differential equation is an equation where the solution is a function. In the equation, it is given some relations between the function's derivatives subject to some given conditions. +Typically, a differential equation is solved numerically using approximations of Taylor series. +These kind of methods usually depends of choosing step sizes along each dimension that are small enough for the approximations to yield accurate results. +As we will see in the examples, using a neural network manages to outperform some well known methods in selected cases. +A possible way to solve the equation that we will look into, is to formulate a trial solution involving the result from a neural network. +The trial solution should hopefully converge into the true solution. +Be aware though, not all differential equations has an analytical solution. +We will stick with those that has an analytical solution such that we can check how well the network performs. +To make the trial solution converge to the true solution, setting up the cost function and updating the weights and biases within the neural network is crucial for the trial solution to converge. + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs002.html b/doc/pub/odenn/html/._odenn-bs002.html new file mode 100644 index 000000000..e8584887f --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs002.html @@ -0,0 +1,297 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Description of the equation to solve for

+A differential equation is a equation where the solution is a function. +The equation describes how the derivatives of the function behaves in a given domain along with some conditions. + +

+Given a differential equation, it is desirable to know how to reformulate it into an equation a neural network can solve. +Having decided on which activation functions each layer should use, along with the number of hidden layers and neurons within each layer, +the changeable parameters of a neural network are the weights and biases for each neuron in every layer in the net. +If a differential equation is reformulated into an equation where minimization of some parameters must be done, +a neural net could possibly solve this equation. + +

+A trial solution might be tricky to find in general. +Due to the Universal Approximation Theorem, one could hope that outcome of the deep neural net might solve a given differential equation, even though it is used in a simple trial solution. +Let us try this idea on some well-known ordinary differential equations and thereafter try to solve for functions defined by two variables, giving partial differential equations. + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs003.html b/doc/pub/odenn/html/._odenn-bs003.html new file mode 100644 index 000000000..c18d7e520 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs003.html @@ -0,0 +1,304 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Ordinary Differential Equations

+An ordinary differential equation (ODE) is an equation involving functions having one variable. + +

+In general, an ordinary differential equation looks like + +$$ +\begin{equation} \tag{1} +f\left(x, \, g(x), \, g'(x), \, g''(x), \, \dots \, , \, g^{(n)}(x)\right) = 0 +\end{equation} +$$ + +

+where \( g(x) \) is the function to find, and \( g^{(n)}(x) \) is the \( n \)-th derivative of \( g(x) \). + +

+The \( f\left(x, g(x), g'(x), g''(x), \, \dots \, , g^{(n)}(x)\right) \) is just a way to write that there is an expression involving \( x \) and \( g(x), \ g'(x), \ g''(x), \, \dots \, , \text{ and } g^{(n)}(x) \) on the left side of the equality sign in (1). +The highest order of derivative, that is the value of \( n \), determines to the order of the equation. +The equation is referred to as a \( n \)-th order ODE. +Along with (1), some additional conditions of the function \( g(x) \) are typically given +for the solution to be unique. + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs004.html b/doc/pub/odenn/html/._odenn-bs004.html new file mode 100644 index 000000000..ef6fb6b6d --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs004.html @@ -0,0 +1,327 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

The trial solution

+Let the trial solution \( g_t(x) \) be + +$$ +\begin{equation} + g_t(x) = h_1(x) + h_2(x,N(x,P)) +\tag{2} +\end{equation} +$$ + +

+where \( h_1(x) \) is a function that makes \( g_t(x) \) satisfy a given set of conditions, \( N(x,P) \) a neural network with weights and biases described by \( P \) and \( h_2(x, N(x,P)) \) some expression involving the neural network. +The role of the function \( h_2(x, N(x,P)) \), is to ensure that the output from \( N(x,P) \) is zero when \( g_t(x) \) is evaluated at the values of \( x \) where the given conditions must be satisfied. +The function \( h_1(x) \) should alone make \( g_t(x) \) satisfy the conditions. + +

+But what about the network \( N(x,P) \)? +As described previously, an optimization method could be used to minimize the parameters of a neural network, that being its weights and biases, through backward propagation. +For the minimization to be defined, we need to have a cost function at hand to minimize. + +

+It is given that \( f\left(x, \, g(x), \, g'(x), \, g''(x), \, \dots \, , \, g^{(n)}(x)\right) \) should be equal to zero in (1). +We can choose to consider the mean squared error as the cost function for an input \( x \). +Since we are looking at one input, the cost function is just \( f \) squared. +The cost function \( c\left(x, P \right) \) can therefore be expressed as + +$$ +c\left(x, P\right) = \big(f\left(x, \, g(x), \, g'(x), \, g''(x), \, \dots \, , \, g^{(n)}(x)\right)\big)^2 +$$ + +

+If \( N \) inputs are given as a vector \( \vec x \) with elements \( x_i \) for \( i = 1,\dots,N \), +the cost function becomes + +$$ +\begin{equation} \tag{3} + c\left(\vec x, P\right) = \frac{1}{N} \sum_{i=1}^N \big(f\left(x_i, \, g(x_i), \, g'(x_i), \, g''(x_i), \, \dots \, , \, g^{(n)}(x_i)\right)\big)^2 +\end{equation} +$$ + +

+The neural net should then find some parameters \( P \) that minimizes the cost function in +(3) for a set of \( N \) training samples \( x_i \). + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs005.html b/doc/pub/odenn/html/._odenn-bs005.html new file mode 100644 index 000000000..de6b29593 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs005.html @@ -0,0 +1,297 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Minimizing the cost function using gradient descent and automatic differentiation

+To perform the minimization using gradient descent, the gradient of \( c\left(\vec x, P\right) \) is needed. +It might happen so that finding an analytical expression of the gradient of \( c(\vec x, P) \) from (3) gets too messy, depending on which cost function one desires to use. + +

+Luckily, there exists libraries that makes the job for us through automatic differentiation. +Automatic differentiation is a method of finding the derivatives numerically with very high precision. + +

+In the forthcoming examples presenting possible usages of Autograd and TensorFlow, +it is shown how one could set up a neural network using gradient descent solving a differential +equation. + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs006.html b/doc/pub/odenn/html/._odenn-bs006.html new file mode 100644 index 000000000..33f6a8ddf --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs006.html @@ -0,0 +1,313 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Example: Exponential decay and setting up the network using Autograd

+An exponential decay of a quantity \( g(x) \) is described by the equation + +$$ +\begin{equation} \tag{4} + g'(x) = -\gamma g(x) +\end{equation} +$$ + +

+with \( g(0) = g_0 \) for some chosen initial value \( g_0 \). + +

+The analytical solution of (4) is + +$$ +\begin{equation} + g(x) = g_0 \exp\left(-\gamma x\right) +\tag{5} +\end{equation} +$$ + +

+Having an analytical solution at hand, it is possible to use it to compare how well a neural network finds a solution of (4). + +

+In this example, a neural network will be implemented using Autograd in order to perform backpropagation. + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs007.html b/doc/pub/odenn/html/._odenn-bs007.html new file mode 100644 index 000000000..953a5dac5 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs007.html @@ -0,0 +1,303 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

The function to solve for

+ +

+The program will use a neural network to solve + +$$ +\begin{equation} \tag{6} +g'(x) = -\gamma g(x) +\end{equation} +$$ + +

+where \( g(0) = g_0 \) with \( \gamma \) and \( g_0 \) being some chosen values. + +

+In this example, \( \gamma = 2 \) and \( g_0 = 10 \). + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs008.html b/doc/pub/odenn/html/._odenn-bs008.html new file mode 100644 index 000000000..99f00a0b9 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs008.html @@ -0,0 +1,317 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

The trial solution

+To begin with, a trial solution \( g_t(t) \) must be chosen. A general trial solution for ordinary differential equations could be + +$$ +g_t(x, P) = h_1(x) + h_2(x, N(x, P)) +$$ + +

+with \( h_1(x) \) ensuring that \( g_t(x) \) satisfies some conditions and \( h_2(x,N(x, P)) \) an expression involving \( x \) and the output from the neural network \( N(x,P) \) with \( P \) being the collection of the weights and biases for each layer. For now, it is assumed that the network consists of one input layer, one hidden layer, and one output layer. + +

+In this network, there are no weights and bias at the input layer, so \( P = \{ P_{\text{hidden}}, P_{\text{output}} \} \). +If there are \( N_{\text{hidden} } \) neurons in the hidden layer, then \( P_{\text{hidden}} \) is a \( N_{\text{hidden} } \times (1 + N_{\text{input}}) \) matrix, given that there are \( N_{\text{input}} \) neurons in the input layer. + +

+The first column in \( P_{\text{hidden} } \) represents the bias for each neuron in the hidden layer and the second column represents the weights for each neuron in the hidden layer from the input layer. +If there are \( N_{\text{output} } \) neurons in the output layer, then \( P_{\text{output}} \) is a \( N_{\text{output} } \times (1 + N_{\text{hidden} }) \) matrix. + +

+Its first column represents the bias of each neuron and the remaining columns represents the weights to each neuron. + +

+It is given that \( g(0) = g_0 \). The trial solution must fulfill this condition to be a proper solution of (6). A possible way to ensure that \( g_t(0, P) = g_0 \), is to let \( F(N(x,P)) = x \cdot N(x,P) \) and \( A(x) = g_0 \). This gives the following trial solution: + +$$ +\begin{equation} \tag{7} +g_t(x, P) = g_0 + x \cdot N(x, P) +\end{equation} +$$ + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs009.html b/doc/pub/odenn/html/._odenn-bs009.html new file mode 100644 index 000000000..758d66c2e --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs009.html @@ -0,0 +1,365 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Reformulating the problem

+We wish that our neural network manages to minimize a given cost function. + +

+A reformulation of out equation, (6), must therefore be done, +such that it describes the problem a neural network can solve for. + +

+The neural network must find the set of weights and biases \( P \) such that the trial solution in (7) satisfies (6). + +

+The trial solution + +$$ +g_t(x, P) = g_0 + x \cdot N(x, P) +$$ + +

+has been chosen such that it already solves the condition \( g(0) = g_0 \). What remains, is to find \( P \) such that + +$$ +\begin{equation} \tag{8} +g_t'(x, P) = - \gamma g_t(x, P) +\end{equation} +$$ + +

+is fulfilled as best as possible. + +

+The left hand side and right hand side of (8) must be computed separately, and then the neural network must choose weights and biases, contained in \( P \), such that the sides are equal as best as possible. +This means that the absolute or squared difference between the sides must be as close to zero, ideally equal to zero. +In this case, the difference squared shows to be an appropriate measurement of how erroneous the trial solution is with respect to \( P \) of the neural network. + +

+This gives the following cost function our neural network must solve for: + +$$ +\min_{P}\Big\{ \big(g_t'(x, P) - ( -\gamma g_t(x, P) \big)^2 \Big\} +$$ + +

+(the notation \( \min_{P}\{ f(x, P) \} \) means that we desire to find \( P \) that yields the minimum of \( f(x, P) \)) + +

+or, in terms of weights and biases for the hidden and output layer in our network: + +$$ +\min_{P_{\text{hidden} }, \ P_{\text{output} }}\Big\{ \big(g_t'(x, \{ P_{\text{hidden} }, P_{\text{output} }\}) - ( -\gamma g_t(x, \{ P_{\text{hidden} }, P_{\text{output} }\}) \big)^2 \Big\} +$$ + +

+for an input value \( x \). + +

+If the neural network evaluates \( g_t(x, P) \) at more values for \( x \), say \( N \) values \( x_i \) for \( i = 1, \dots, N \), then the total error to minimize becomes + +$$ +\begin{equation} \tag{9} +\min_{P}\Big\{\frac{1}{N} \sum_{i=1}^N \big(g_t'(x_i, P) - ( -\gamma g_t(x_i, P) \big)^2 \Big\} +\end{equation} +$$ + +

+Letting \( \vec x \) be a vector with elements \( x_i \) and \( c(\vec x, P) = \frac{1}{N} \sum_i \big(g_t'(x_i, P) - ( -\gamma g_t(x_i, P) \big)^2 \) denote the cost function, the minimization problem that our network must solve, becomes + +$$ +\min_{P} c(\vec x, P) +$$ + +

+In terms of \( P_{\text{hidden} } \) and \( P_{\text{output} } \), this could also be expressed as + +$$ +\min_{P_{\text{hidden} }, \ P_{\text{output} }} c(\vec x, \{P_{\text{hidden} }, P_{\text{output} }\}) +$$ + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs010.html b/doc/pub/odenn/html/._odenn-bs010.html new file mode 100644 index 000000000..950fb9a1c --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs010.html @@ -0,0 +1,396 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

A possible implementation of a neural network using Autograd

+ +

+For simplicity, it is assumed that the input is an array \( \vec x = (x_1, \dots, x_N) \) with \( N \) elements. It is at these points the neural network should find \( P \) such that it fulfills (9). + +

+First, the neural network must feed forward the inputs. +This means that \( \vec x \) must be passed through an input layer, a hidden layer and a output layer. The input layer in this case, does not need to process the data any further. +The input layer will consist of \( N_{\text{input} } \) neurons, passing its element to each neuron in the hidden layer. The number of neurons in the hidden layer will be \( N_{\text{hidden} } \). + +

+For the \( i \)-th in the hidden layer with weight \( w_i^{\text{hidden} } \) and bias \( b_i^{\text{hidden} } \), the weighting from the \( j \)-th neuron at the input layer is: + +$$ +\begin{aligned} +z_{i,j}^{\text{hidden}} &= b_i^{\text{hidden}} + w_i^{\text{hidden}}x_j \\ +&= +\begin{pmatrix} +b_i^{\text{hidden}} & w_i^{\text{hidden}} +\end{pmatrix} +\begin{pmatrix} +1 \\ +x_j +\end{pmatrix} +\end{aligned} +$$ + +

+The result after weighting the inputs at the \( i \)-th hidden neuron can be written as a vector: + +$$ +\begin{aligned} +\vec{z}_{i}^{\text{hidden}} &= \Big( b_i^{\text{hidden}} + w_i^{\text{hidden}}x_1 , \ b_i^{\text{hidden}} + w_i^{\text{hidden}} x_2, \ \dots \, , \ b_i^{\text{hidden}} + w_i^{\text{hidden}} x_N\Big) \\ +&= +\begin{pmatrix} + b_i^{\text{hidden}} & w_i^{\text{hidden}} +\end{pmatrix} +\begin{pmatrix} +1 & 1 & \dots & 1 \\ +x_1 & x_2 & \dots & x_N +\end{pmatrix} \\ +&= \vec{p}_{i, \text{hidden}}^T X +\end{aligned} +$$ + +

+The vector \( \vec{p}_{i, \text{hidden}}^T \) constitutes each row in \( P_{\text{hidden} } \), which contains the weights for the neural network to minimize according to (9). + +

+After having found \( \vec{z}_{i}^{\text{hidden}} \) for every \( i \)-th neuron within the hidden layer, the vector will be sent to an activation function \( a_i(\vec{z}) \). + +

+In this example, the sigmoid function has been chosen to be the activation function for each hidden neuron: + +$$ +f(z) = \frac{1}{1 + \exp{(-z)}} +$$ + +

+It is possible to use other activations functions for the hidden layer also. + +

+The output $\vec{x}_i^{\text{hidden} }$from each \( i \)-th hidden neuron is: + +$$ +\vec{x}_i^{\text{hidden} } = f\big( \vec{z}_{i}^{\text{hidden}} \big) +$$ + +

+The outputs \( \vec{x}_i^{\text{hidden} } \) are then sent to the output layer. + +

+The output layer consists of one neuron in this case, and combines the output from each of the neurons in the hidden layers. The output layer combines the results from the hidden layer using some weights $ w_i^{\text{output}}$ and biases \( b_i^{\text{output}} \). In this case, it is assumes that the number of neurons in the output layer is one. + +

+The procedure of weighting the output neuron \( j \) in the hidden layer to the \( i \)-th neuron in the output layer is similar as for the hidden layer described previously. + +$$ +\begin{aligned} +z_{1,j}^{\text{output}} & = +\begin{pmatrix} +b_1^{\text{output}} & \vec{w}_1^{\text{output}} +\end{pmatrix} +\begin{pmatrix} +1 \\ +\vec{x}_j^{\text{hidden}} +\end{pmatrix} +\end{aligned} +$$ + +

+Expressing \( z_{1,j}^{\text{output}} \) as a vector gives the following way of weighting the inputs from the hidden layer: + +$$ +\vec{z}_{1}^{\text{output}} = +\begin{pmatrix} +b_1^{\text{output}} & \vec{w}_1^{\text{output}} +\end{pmatrix} +\begin{pmatrix} +1 & 1 & \dots & 1 \\ +\vec{x}_1^{\text{hidden}} & \vec{x}_2^{\text{hidden}} & \dots & \vec{x}_N^{\text{hidden}} +\end{pmatrix} +$$ + +

+In this case we seek a continuous range of values since we are approximating a function. This means that after computing \( \vec{z}_{1}^{\text{output}} \) the neural network has finished its feed forward step, and \( \vec{z}_{1}^{\text{output}} \) is the final output of the network. + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs011.html b/doc/pub/odenn/html/._odenn-bs011.html new file mode 100644 index 000000000..72eeb9646 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs011.html @@ -0,0 +1,305 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Backpropagation using Autograd

+The next step is to decide how the parameters should be changed such that they minimize the cost function. + +

+The chosen cost function for this problem is + +$$ +c(\vec x, P) = \frac{1}{N} \sum_i \big(g_t'(x_i, P) - ( -\gamma g_t(x_i, P) \big)^2 +$$ + +

+In order to minimize the cost function, an optimization method must be chosen. + +

+Here, gradient descent with a constant step size has been chosen. + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs012.html b/doc/pub/odenn/html/._odenn-bs012.html new file mode 100644 index 000000000..2b1dc588b --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs012.html @@ -0,0 +1,328 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Gradient descent

+The idea of the gradient descent algorithm is to update parameters in direction where the cost function decreases goes to a minimum. + +

+In general, the update of some parameters \( \vec \omega \) given a cost function defined by some weights \( \vec \omega \), \( c(\vec x, \vec \omega) \), goes as follows: + +$$ +\vec \omega_{\text{new} } = \vec \omega - \lambda \nabla_{\vec \omega} c(\vec x, \vec \omega) +$$ + +

+for a number of iterations or until $ \big|\big| \vec \omega_{\text{new} } - \vec \omega \big|\big|$ becomes smaller than some given tolerance. + +

+The value of \( \lambda \) decides how large steps the algorithm must take in the direction of $ \nabla_{\vec \omega} c(\vec x, \vec \omega)$. +The notation \( \nabla_{\vec \omega} \) express the gradient with respect to the elements in \( \vec \omega \). + +

+In our case, we have to minimize the cost function \( c(\vec x, P) \) with respect to the two sets of weights and biases, that is for the hidden layer \( P_{\text{hidden} } \) and for the output layer \( P_{\text{output} } \) . + +

+This means that \( P_{\text{hidden} } \) and \( P_{\text{output} } \) is updated by + +$$ +\begin{aligned} +P_{\text{hidden},\text{new}} &= P_{\text{hidden}} - \lambda \nabla_{P_{\text{hidden}}} c(\vec x, P) \\ +P_{\text{output},\text{new}} &= P_{\text{output}} - \lambda \nabla_{P_{\text{output}}} c(\vec x, P) +\end{aligned} +$$ + +

+In general, one could risk using a cost function having gradients that are cumbersome to derive analytically. +For our case, the cost functions are just the mean squared error. +One could employ an implementation of the back propagation for this case, but we will emphasis +on how one could use automatic differentiation in order to train the network. + +

+However, it might be useful to know how automatic differentiation can be used, e.g through Autograd, in order to test an implementation. + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs013.html b/doc/pub/odenn/html/._odenn-bs013.html new file mode 100644 index 000000000..fe1e93112 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs013.html @@ -0,0 +1,440 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

The network with one input, hidden, and output layer

+ +

+ + +

# Autograd will be used for later, so the numpy wrapper for Autograd must be imported
+import autograd.numpy as np
+from autograd import grad, elementwise_grad
+import autograd.numpy.random as npr
+from matplotlib import pyplot as plt
+
+def sigmoid(z):
+    return 1/(1 + np.exp(-z))
+
+# Assuming one input, hidden, and output layer
+def neural_network(params, x):
+
+    # Find the weights (including and biases) for the hidden and output layer.
+    # Assume that params is a list of parameters for each layer.
+    # The biases are the first element for each array in params,
+    # and the weights are the remaning elements in each array in params.
+
+    w_hidden = params[0]
+    w_output = params[1]
+
+    # Assumes input x being an one-dimensional array
+    num_values = np.size(x)
+    x = x.reshape(-1, num_values)
+
+    # Assume that the input layer does nothing to the input x
+    x_input = x
+
+    ## Hidden layer:
+
+    # Add a row of ones to include bias
+    x_input = np.concatenate((np.ones((1,num_values)), x_input ), axis = 0)
+
+    z_hidden = np.matmul(w_hidden, x_input)
+    x_hidden = sigmoid(z_hidden)
+
+    ## Output layer:
+
+    # Include bias:
+    x_hidden = np.concatenate((np.ones((1,num_values)), x_hidden ), axis = 0)
+
+    z_output = np.matmul(w_output, x_hidden)
+    x_output = z_output
+
+    return x_output
+
+# The trial solution using the deep neural network:
+def g_trial(x,params, g0 = 10):
+    return g0 + x*neural_network(params,x)
+
+# The right side of the ODE:
+def g(x, g_trial, gamma = 2):
+    return -gamma*g_trial
+
+# The cost function:
+def cost_function(P, x):
+
+    # Evaluate the trial function with the current parameters P
+    g_t = g_trial(x,P)
+
+    # Find the derivative w.r.t x of the neural network
+    d_net_out = elementwise_grad(neural_network,1)(P,x)
+
+    # Find the derivative w.r.t x of the trial function
+    d_g_t = elementwise_grad(g_trial,0)(x,P)
+
+    # The right side of the ODE
+    func = g(x, g_t)
+
+    err_sqr = (d_g_t - func)**2
+    cost_sum = np.sum(err_sqr)
+
+    return cost_sum / np.size(err_sqr)
+
+# Solve the exponential decay ODE using neural network with one input, hidden, and output layer
+def solve_ode_neural_network(x, num_neurons_hidden, num_iter, lmb):
+    ## Set up initial weights and biases
+
+    # For the hidden layer
+    p0 = npr.randn(num_neurons_hidden, 2 )
+
+    # For the output layer
+    p1 = npr.randn(1, num_neurons_hidden + 1 ) # +1 since bias is included
+
+    P = [p0, p1]
+
+    print('Initial cost: %g'%cost_function(P, x))
+
+    ## Start finding the optimal weights using gradient descent
+
+    # Find the Python function that represents the gradient of the cost function
+    # w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
+    cost_function_grad = grad(cost_function,0)
+
+    # Let the update be done num_iter times
+    for i in range(num_iter):
+        # Evaluate the gradient at the current weights and biases in P.
+        # The cost_grad consist now of two arrays;
+        # one for the gradient w.r.t P_hidden and
+        # one for the gradient w.r.t P_output
+        cost_grad =  cost_function_grad(P, x)
+
+        P[0] = P[0] - lmb * cost_grad[0]
+        P[1] = P[1] - lmb * cost_grad[1]
+
+    print('Final cost: %g'%cost_function(P, x))
+
+    return P
+
+def g_analytic(x, gamma = 2, g0 = 10):
+    return g0*np.exp(-gamma*x)
+
+# Solve the given problem
+if __name__ == '__main__':
+    # Set seed such that the weight are initialized
+    # with same weights and biases for every run.
+    npr.seed(15)
+
+    ## Decide the vales of arguments to the function to solve
+    N = 10
+    x = np.linspace(0, 1, N)
+
+    ## Set up the initial parameters
+    num_hidden_neurons = 10
+    num_iter = 10000
+    lmb = 0.001
+
+    # Use the network
+    P = solve_ode_neural_network(x, num_hidden_neurons, num_iter, lmb)
+
+    # Print the deviation from the trial solution and true solution
+    res = g_trial(x,P)
+    res_analytical = g_analytic(x)
+
+    print('Max absolute difference: %g'%np.max(np.abs(res - res_analytical)))
+
+    # Plot the results
+    plt.figure(figsize=(10,10))
+
+    plt.title('Performance of neural network solving an ODE compared to the analytical solution')
+    plt.plot(x, res_analytical)
+    plt.plot(x, res[0,:])
+    plt.legend(['analytical','nn'])
+    plt.xlabel('x')
+    plt.ylabel('g(x)')
+    plt.show()
+
+

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs014.html b/doc/pub/odenn/html/._odenn-bs014.html new file mode 100644 index 000000000..f7e099c47 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs014.html @@ -0,0 +1,459 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

The network with one input layer, specified number of hidden layers, and one output layer output layer

+ +

+It is also possible to extend the construction of our network into a more general one, allowing the network to contain more than one hidden layers. + +

+The number of neurons within each hidden layer are given as a list of integers in the program below. + +

+ + +

import autograd.numpy as np
+from autograd import grad, elementwise_grad
+import autograd.numpy.random as npr
+from matplotlib import pyplot as plt
+
+def sigmoid(z):
+    return 1/(1 + np.exp(-z))
+
+# The neural network with one input layer and one output layer,
+# but with number of hidden layers specified by the user.
+def deep_neural_network(deep_params, x):
+    # N_hidden is the number of hidden layers
+
+    N_hidden = np.size(deep_params) - 1 # -1 since params consists of
+                                        # parameters to all the hidden
+                                        # layers AND the output layer.
+
+    # Assumes input x being an one-dimensional array
+    num_values = np.size(x)
+    x = x.reshape(-1, num_values)
+
+    # Assume that the input layer does nothing to the input x
+    x_input = x
+
+    # Due to multiple hidden layers, define a variable referencing to the
+    # output of the previous layer:
+    x_prev = x_input
+
+    ## Hidden layers:
+
+    for l in range(N_hidden):
+        # From the list of parameters P; find the correct weigths and bias for this layer
+        w_hidden = deep_params[l]
+
+        # Add a row of ones to include bias
+        x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)
+
+        z_hidden = np.matmul(w_hidden, x_prev)
+        x_hidden = sigmoid(z_hidden)
+
+        # Update x_prev such that next layer can use the output from this layer
+        x_prev = x_hidden
+
+    ## Output layer:
+
+    # Get the weights and bias for this layer
+    w_output = deep_params[-1]
+
+    # Include bias:
+    x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)
+
+    z_output = np.matmul(w_output, x_prev)
+    x_output = z_output
+
+    return x_output
+
+# The trial solution using the deep neural network:
+def g_trial_deep(x,params, g0 = 10):
+    return g0 + x*deep_neural_network(params, x)
+
+# The right side of the ODE:
+def g(x, g_trial, gamma = 2):
+    return -gamma*g_trial
+
+# The same cost function as before, but calls deep_neural_network instead.
+def cost_function_deep(P, x):
+
+    # Evaluate the trial function with the current parameters P
+    g_t = g_trial_deep(x,P)
+
+    # Find the derivative w.r.t x of the neural network
+    d_net_out = elementwise_grad(deep_neural_network,1)(P,x)
+
+    # Find the derivative w.r.t x of the trial function
+    d_g_t = elementwise_grad(g_trial_deep,0)(x,P)
+
+    # The right side of the ODE
+    func = g(x, g_t)
+
+    err_sqr = (d_g_t - func)**2
+    cost_sum = np.sum(err_sqr)
+
+    return cost_sum / np.size(err_sqr)
+
+# Solve the exponential decay ODE using neural network with one input and one output layer,
+# but with specified number of hidden layers from the user.
+def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):
+    # num_hidden_neurons is now a list of number of neurons within each hidden layer
+
+    # The number of elements in the list num_hidden_neurons thus represents
+    # the number of hidden layers.
+
+    # Find the number of hidden layers:
+    N_hidden = np.size(num_neurons)
+
+    ## Set up initial weights and biases
+
+    # Initialize the list of parameters:
+    P = [None]*(N_hidden + 1) # + 1 to include the output layer
+
+    P[0] = npr.randn(num_neurons[0], 2 )
+    for l in range(1,N_hidden):
+        P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
+
+    # For the output layer
+    P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
+
+    print('Initial cost: %g'%cost_function_deep(P, x))
+
+    ## Start finding the optimal weights using gradient descent
+
+    # Find the Python function that represents the gradient of the cost function
+    # w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
+    cost_function_deep_grad = grad(cost_function_deep,0)
+
+    # Let the update be done num_iter times
+    for i in range(num_iter):
+        # Evaluate the gradient at the current weights and biases in P.
+        # The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases
+        # in the hidden layers and output layers evaluated at x.
+        cost_deep_grad =  cost_function_deep_grad(P, x)
+
+        for l in range(N_hidden+1):
+            P[l] = P[l] - lmb * cost_deep_grad[l]
+
+    print('Final cost: %g'%cost_function_deep(P, x))
+
+    return P
+
+def g_analytic(x, gamma = 2, g0 = 10):
+    return g0*np.exp(-gamma*x)
+
+# Solve the given problem
+if __name__ == '__main__':
+    npr.seed(15)
+
+    ## Decide the vales of arguments to the function to solve
+    N = 10
+    x = np.linspace(0, 1, N)
+
+    ## Set up the initial parameters
+    num_hidden_neurons = np.array([10,10])
+    num_iter = 10000
+    lmb = 0.001
+
+    P = solve_ode_deep_neural_network(x, num_hidden_neurons, num_iter, lmb)
+
+    res = g_trial_deep(x,P)
+    res_analytical = g_analytic(x)
+
+    plt.figure(figsize=(10,10))
+
+    plt.title('Performance of a deep neural network solving an ODE compared to the analytical solution')
+    plt.plot(x, res_analytical)
+    plt.plot(x, res[0,:])
+    plt.legend(['analytical','dnn'])
+    plt.ylabel('g(x)')
+    plt.show()
+
+

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs015.html b/doc/pub/odenn/html/._odenn-bs015.html new file mode 100644 index 000000000..639a80a11 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs015.html @@ -0,0 +1,311 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Example: Population growth, comparing Autograd, TensorFlow, and Euler's scheme

+ +

+A logistic model of population growth assumes that a population converges toward an equilibrium. +The population growth can be modeled by + +$$ +\begin{equation} \tag{10} + g'(t) = \alpha g(t)(A - g(t)) +\end{equation} +$$ + +

+where \( g(t) \) is the population density at time \( t \), \( \alpha > 0 \) the growth rate and \( A > 0 \) is the maximum population number in the environment. +Also, at \( t = 0 \) the population has the size \( g(0) = g_0 \), where \( g_0 \) is some chosen constant. + +

+In this example, similar network as for the exponential decay using Autograd has been used to solve the equation. However, as the implementation might suffer from e.g numerical instability +and high execution time (this might be more apparent in the examples solving PDEs), +a network has been constructed using TensorFlow also. +For comparison, the forward Euler method has been implemented in order to see how the networks performs compared to a numerical scheme. + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs016.html b/doc/pub/odenn/html/._odenn-bs016.html new file mode 100644 index 000000000..9df9ad9db --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs016.html @@ -0,0 +1,307 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Setting up the problem

+ +

+Here, we will model a population \( g(t) \) in an environment having carrying capacity \( A \). +The population follows the model + +$$ +\begin{equation} \tag{11} +g'(t) = \alpha g(t)(A - g(t)) +\end{equation} +$$ + +

+where \( g(0) = g_0 \). + +

+In this example, we let \( \alpha = 2 \), \( A = 1 \), and \( g_0 = 1.2 \). + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs017.html b/doc/pub/odenn/html/._odenn-bs017.html new file mode 100644 index 000000000..e646c6457 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs017.html @@ -0,0 +1,310 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

The trial solution

+We will get a slightly different trial solution, as the boundary conditions are different +compared to the case for exponential decay. + +

+A possible trial solution satisfying the condition \( g(0) = g_0 \) could be + +$$ +h_1(t) = g_0 + t \cdot N(t,P) +$$ + +

+with \( N(t,P) \) being the output from the neural network with weights and biases for each layer collected in the set \( P \). + +

+The analytical solution is + +$$ +g(t) = \frac{Ag_0}{g_0 + (A - g_0)\exp(-\alpha A t)} +$$ + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs018.html b/doc/pub/odenn/html/._odenn-bs018.html new file mode 100644 index 000000000..c8057761f --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs018.html @@ -0,0 +1,461 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

The program using Autograd

+ +

+The network will be the similar as for the exponential decay example, but with some small modifications for our problem. + +

+ + +

import autograd.numpy as np
+from autograd import grad, elementwise_grad
+import autograd.numpy.random as npr
+from matplotlib import pyplot as plt
+
+def sigmoid(z):
+    return 1/(1 + np.exp(-z))
+
+# Function to get the parameters.
+# Done such that one can easily change the paramaters after one's liking.
+def get_parameters():
+    alpha = 2
+    A = 1
+    g0 = 1.2
+    return alpha, A, g0
+
+def deep_neural_network(P, x):
+    # N_hidden is the number of hidden layers
+    N_hidden = np.size(P) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
+
+    # Assumes input x being an one-dimensional array
+    num_values = np.size(x)
+    x = x.reshape(-1, num_values)
+
+    # Assume that the input layer does nothing to the input x
+    x_input = x
+
+    # Due to multiple hidden layers, define a variable referencing to the
+    # output of the previous layer:
+    x_prev = x_input
+
+    ## Hidden layers:
+
+    for l in range(N_hidden):
+        # From the list of parameters P; find the correct weigths and bias for this layer
+        w_hidden = P[l]
+
+        # Add a row of ones to include bias
+        x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)
+
+        z_hidden = np.matmul(w_hidden, x_prev)
+        x_hidden = sigmoid(z_hidden)
+
+        # Update x_prev such that next layer can use the output from this layer
+        x_prev = x_hidden
+
+    ## Output layer:
+
+    # Get the weights and bias for this layer
+    w_output = P[-1]
+
+    # Include bias:
+    x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)
+
+    z_output = np.matmul(w_output, x_prev)
+    x_output = z_output
+
+    return x_output
+
+
+def cost_function_deep(P, x):
+
+    # Evaluate the trial function with the current parameters P
+    g_t = g_trial_deep(x,P)
+
+    # Find the derivative w.r.t x of the trial function
+    d_g_t = elementwise_grad(g_trial_deep,0)(x,P)
+
+    # The right side of the ODE
+    func = f(x, g_t)
+
+    err_sqr = (d_g_t - func)**2
+    cost_sum = np.sum(err_sqr)
+
+    return cost_sum / np.size(err_sqr)
+
+# The right side of the ODE:
+def f(x, g_trial):
+    alpha,A, g0 = get_parameters()
+    return alpha*g_trial*(A - g_trial)
+
+# The trial solution using the deep neural network:
+def g_trial_deep(x, params):
+    alpha,A, g0 = get_parameters()
+    return g0 + x*deep_neural_network(params,x)
+
+# The analytical solution:
+def g_analytic(t):
+    alpha,A, g0 = get_parameters()
+    return A*g0/(g0 + (A - g0)*np.exp(-alpha*A*t))
+
+def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):
+    # num_hidden_neurons is now a list of number of neurons within each hidden layer
+
+    # Find the number of hidden layers:
+    N_hidden = np.size(num_neurons)
+
+    ## Set up initial weigths and biases
+
+    # Initialize the list of parameters:
+    P = [None]*(N_hidden + 1) # + 1 to include the output layer
+
+    P[0] = npr.randn(num_neurons[0], 2 )
+    for l in range(1,N_hidden):
+        P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
+
+    # For the output layer
+    P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
+
+    print('Initial cost: %g'%cost_function_deep(P, x))
+
+    ## Start finding the optimal weigths using gradient descent
+
+    # Find the Python function that represents the gradient of the cost function
+    # w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
+    cost_function_deep_grad = grad(cost_function_deep,0)
+
+    # Let the update be done num_iter times
+    for i in range(num_iter):
+        # Evaluate the gradient at the current weights and biases in P.
+        # The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases
+        # in the hidden layers and output layers evaluated at x.
+        cost_deep_grad =  cost_function_deep_grad(P, x)
+
+        for l in range(N_hidden+1):
+            P[l] = P[l] - lmb * cost_deep_grad[l]
+
+    print('Final cost: %g'%cost_function_deep(P, x))
+
+    return P
+
+if __name__ == '__main__':
+    npr.seed(4155)
+
+    ## Decide the vales of arguments to the function to solve
+    Nt = 10
+    T = 1
+    t = np.linspace(0,T, Nt)
+
+    ## Set up the initial parameters
+    num_hidden_neurons = [100, 50, 25]
+    num_iter = 1000
+    lmb = 1e-3
+
+    P = solve_ode_deep_neural_network(t, num_hidden_neurons, num_iter, lmb)
+
+    g_dnn_ag = g_trial_deep(t,P)
+    g_analytical = g_analytic(t)
+
+    # Find the maximum absolute difference between the solutons:
+    diff_ag = np.max(np.abs(g_dnn_ag - g_analytical))
+    print("The max absolute difference between the solutions is: %g"%diff_ag)
+
+    plt.figure(figsize=(10,10))
+
+    plt.title('Performance of neural network solving an ODE compared to the analytical solution')
+    plt.plot(t, g_analytical)
+    plt.plot(t, g_dnn_ag[0,:])
+    plt.legend(['analytical','nn'])
+    plt.xlabel('t')
+    plt.ylabel('g(t)')
+
+    plt.show()
+
+

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs019.html b/doc/pub/odenn/html/._odenn-bs019.html new file mode 100644 index 000000000..54b18990f --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs019.html @@ -0,0 +1,424 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Using forward Euler to solve the ODE

+ +

+A straight-forward way of solving an ODE numerically, is to use Euler's method. + +

+Euler's method uses Taylor series to approximate the value at a function \( f \) at a step \( \Delta x \) from \( x \): + +$$ +f(x + \Delta x) \approx f(x) + \Delta x f'(x) +$$ + +

+In our case, using Euler's method to approximate the value of \( g \) at a step \( \Delta t \) from \( t \) yields + +$$ +\begin{aligned} + g(t + \Delta t) &\approx g(t) + \Delta t g'(t) \\ + &= g(t) + \Delta t \big(\alpha g(t)(A - g(t))\big) +\end{aligned} +$$ + +along with the condition that \( g(0) = g_0 \). + +

+Let \( t_i = i \cdot \Delta t \) where \( \Delta t = \frac{T}{N_t-1} \) where \( T \) is the final time our solver must solve for and \( N_t \) the number of values for \( t \in [0, T] \) for \( i = 0, \dots, N_t-1 \). + +

+For \( i \geq 1 \), we have that +$$ +\begin{aligned} +t_i &= i\Delta t \\ +&= (i - 1)\Delta t + \Delta t \\ +&= t_{i-1} + \Delta t +\end{aligned} +$$ + +

+Now, if \( g_i = g(t_i) \) then + +$$ +\begin{equation} + \begin{aligned} + g_i &= g(t_i) \\ + &= g(t_{i-1} + \Delta t) \\ + &\approx g(t_{i-1}) + \Delta t \big(\alpha g(t_{i-1})(A - g(t_{i-1}))\big) \\ + &= g_{i-1} + \Delta t \big(\alpha g_{i-1}(A - g_{i-1})\big) + \end{aligned} +\end{equation} \tag{12} +$$ + +for \( i \geq 1 \) and \( g_0 = g(t_0) = g(0) = g_0 \). + +

+Equation (12) could be implemented in the following way, +extending the program that uses the network using Autograd: + +

+ + +

# Assume that all function definitions from the example program using Autograd
+# are located here.
+
+if __name__ == '__main__':
+    npr.seed(4155)
+
+    ## Decide the vales of arguments to the function to solve
+    Nt = 10
+    T = 1
+    t = np.linspace(0,T, Nt)
+
+    ## Set up the initial parameters
+    num_hidden_neurons = [100,50,25]
+    num_iter = 1000
+    lmb = 1e-3
+
+    P = solve_ode_deep_neural_network(t, num_hidden_neurons, num_iter, lmb)
+
+    g_dnn_ag = g_trial_deep(t,P)
+    g_analytical = g_analytic(t)
+
+    # Find the maximum absolute difference between the solutons:
+    diff_ag = np.max(np.abs(g_dnn_ag - g_analytical))
+    print("The max absolute difference between the solutions is: %g"%diff_ag)
+
+    plt.figure(figsize=(10,10))
+
+    plt.title('Performance of neural network solving an ODE compared to the analytical solution')
+    plt.plot(t, g_analytical)
+    plt.plot(t, g_dnn_ag[0,:])
+    plt.legend(['analytical','nn'])
+    plt.xlabel('t')
+    plt.ylabel('g(t)')
+
+    ## Find an approximation to the funtion using forward Euler
+
+    alpha, A, g0 = get_parameters()
+    dt = T/(Nt - 1)
+
+    # Perform forward Euler to solve the ODE
+    g_euler = np.zeros(Nt)
+    g_euler[0] = g0
+
+    for i in range(1,Nt):
+        g_euler[i] = g_euler[i-1] + dt*(alpha*g_euler[i-1]*(A - g_euler[i-1]))
+
+    # Print the errors done by each method
+    diff1 = np.max(np.abs(g_euler - g_analytical))
+    diff2 = np.max(np.abs(g_dnn_ag[0,:] - g_analytical))
+
+    print('Max absolute difference between Euler method and analytical: %g'%diff1)
+    print('Max absolute difference between deep neural network and analytical: %g'%diff2)
+
+    # Plot results
+    plt.figure(figsize=(10,10))
+
+    plt.plot(t,g_euler)
+    plt.plot(t,g_analytical)
+    plt.plot(t,g_dnn_ag[0,:])
+
+    plt.legend(['euler','analytical','dnn'])
+    plt.xlabel('Time t')
+    plt.ylabel('g(t)')
+
+    plt.show()
+
+

+Running the program gives + +

+ + +

Max absolute difference between Euler method and analytical: 0.011225
+Max absolute difference between deep neural network and analytical: 0.00424909
+
+

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs020.html b/doc/pub/odenn/html/._odenn-bs020.html new file mode 100644 index 000000000..df4d79bab --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs020.html @@ -0,0 +1,295 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Using TensorFlow to model logistic population growth

+ +

+TensorFlow is a library widely used in the machine learning community. +A neural network can be set up in a flexible manner, where various optimization algorithms are implemented and different types of networks can be used, making it easier to experiment on solving differential equations using neural networks. + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs021.html b/doc/pub/odenn/html/._odenn-bs021.html new file mode 100644 index 000000000..97db583e2 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs021.html @@ -0,0 +1,438 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

The general program flow in TensorFlow

+ +

+Usually, a program in TensorFlow is divided into two parts; the construction phase and the execution phase. +In the construction phase, the computational graph that TensorFlow uses to perform its calculations are set up. +In the execution phase, TensorFlow evaluates any procedure that was defined in the construction phase. + +

Program flow in TensorFlow - Construction phase

+ +

+Here, the architecture for the neural network will be set up, along with the cost function and an optimizer class used during training of the network. +Note that TensorFlow uses a different convention for the weighting done in each neuron in each layer within the network than in the implementation using Autograd. +The matrix-vector multiplication between the input from the previous layer and the weighting at the neuron at current layer in the program using Autograd, is the transpose of the convention used in TensorFlow. But it will not affect that much our construction, as TensorFlow takes care of most of the computations. The only thing we have to be aware of, is how the dimensions are for our inputs. + +

Program flow in TensorFlow - Execution phase

+ +

+The computation graph has been defined, and is ready to be evaluated. +In order to get access to the graph, it has to be initialized and be runned within a Session. + +

The full program modeling logistic population growth using TensorFlow

+ +

+ + +

import tensorflow as tf
+import numpy as np
+import matplotlib.pyplot as plt
+
+# Just to reset the graph such that it is possible to rerun this in a
+# Jupyter cell without resetting the whole kernel.
+tf.reset_default_graph()
+
+# Set a seed to ensure getting the same results from every run
+tf.set_random_seed(4155)
+
+Nt = 10
+T = 1
+t = np.linspace(0,T, Nt)
+
+## The construction phase
+
+# Convert the values the trial solution is evaluated at to a tensor.
+t_tf = tf.convert_to_tensor(t.reshape(-1,1),dtype=tf.float64)
+zeros = tf.reshape(tf.convert_to_tensor(np.zeros(t.shape)),shape=(-1,1))
+
+# Define the parameters of the equation
+alpha = tf.constant(2.,dtype=tf.float64)
+A = tf.constant(1.,dtype=tf.float64)
+g0 = tf.constant(1.2,dtype=tf.float64)
+
+num_iter = 100000
+
+# Define the number of neurons at each hidden layer
+num_hidden_neurons = [100,50,25]
+num_hidden_layers = np.size(num_hidden_neurons)
+
+# Construct the network.
+# tf.name_scope is used to group each step in the construction,
+# just for a more organized visualization in TensorBoard
+with tf.name_scope('dnn'):
+
+    # Input layer
+    previous_layer = t_tf
+
+    # Hidden layers
+    for l in range(num_hidden_layers):
+        current_layer = tf.layers.dense(previous_layer, num_hidden_neurons[l], name='hidden%d'%(l+1), activation=tf.nn.sigmoid)
+        previous_layer = current_layer
+
+    # Output layer
+    dnn_output = tf.layers.dense(previous_layer, 1, name='output')
+
+# Define the cost function
+with tf.name_scope('cost'):
+    g_trial = g0 + t_tf*dnn_output
+    d_g_trial = tf.gradients(g_trial,t_tf)
+
+    func = alpha*g_trial*(A - g_trial)
+    cost = tf.losses.mean_squared_error(zeros, d_g_trial[0] - func)
+
+
+# Choose the method to minimize the cost function, along with a learning rate
+learning_rate = 1e-2
+with tf.name_scope('train'):
+    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
+    traning_op = optimizer.minimize(cost)
+
+# Set up a referance to the result from the neural network:
+g_dnn_tf = None
+
+# Define a node that initializes all of the other nodes in the computational graph
+# used by TensorFlow:
+init = tf.global_variables_initializer()
+
+## Execution phase
+
+# Start a session where the graph defined from the construction phase can be evaluated at:
+with tf.Session() as sess:
+    # Initialize the whole graph
+    init.run()
+
+    # Evaluate the initial cost:
+    print('Initial cost: %g'%cost.eval())
+
+    # The training of the network:
+    for i in range(num_iter):
+        sess.run(traning_op)
+
+        # If one desires to see how the cost function behaves for each iteration:
+        #if i % 1000 == 0:
+        #    print(cost.eval())
+
+    # Training is done, and we have an approximate solution to the ODE
+    print('Final cost: %g'%cost.eval())
+
+    # Store the result
+    g_dnn_tf = g_trial.eval()
+
+# Compare with analytical solution
+def get_parameters():
+    alpha = 2
+    A = 1
+    g0 = 1.2
+    return alpha, A, g0
+
+def g_analytic(t):
+    alpha,A, g0 = get_parameters()
+    return A*g0/(g0 + (A - g0)*np.exp(-alpha*A*t))
+
+g_analytical = g_analytic(t)
+diff_tf = g_dnn_tf - g_analytical.reshape(-1,1)
+
+print('\nMax absolute difference between the analytical solution and solution from TensorFlow DNN: %g'%np.max(np.abs(diff_tf)))
+
+# Plot the result
+plt.figure(figsize=(10,10))
+
+plt.title('Numerical solutions of the ODE')
+
+plt.plot(t, g_dnn_tf)
+plt.plot(t, g_analytical)
+
+plt.legend(['dnn, tensorflow', 'exact'])
+plt.xlabel('Time t')
+plt.ylabel('g(t)')
+
+plt.show()
+
+

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs022.html b/doc/pub/odenn/html/._odenn-bs022.html new file mode 100644 index 000000000..2d4cedfeb --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs022.html @@ -0,0 +1,323 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Example: Solving the one dimensional Poisson equation using Autograd and TensorFlow

+ +

+The Poisson equation for \( g(x) \) in one dimension is + +$$ +\begin{equation} \tag{13} + -g''(x) = f(x) +\end{equation} +$$ + +

+where \( f(x) \) is a given function for \( x \in (0,1) \). + +

+The conditions that \( g(x) \) is chosen to fulfill, are +$$ +\begin{align*} + g(0) &= 0 \\ + g(1) &= 0 +\end{align*} +$$ + +

+This equation can be solved numerically using programs where e.g Autograd and TensorFlow are used. +The results from the networks can then be compared to the analytical solution. +In addition, it could be interesting to see how a typical method for numerically solving second order ODEs compares to the neural networks. + +

+There exists many different optimization methods implemented in TensorFlow. +In the examples program using TensorFlow, it could also be of interest to see how +the choice of an optimization method affects our results. +In the TensorFlow documentation about optimizers, a list over available optimization methods are shown. + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs023.html b/doc/pub/odenn/html/._odenn-bs023.html new file mode 100644 index 000000000..dca062da9 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs023.html @@ -0,0 +1,324 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

The specific equation to solve for

+ +

+Here, the function \( g(x) \) to solve for follows the equation + +$$ +-g''(x) = f(x),\qquad x \in (0,1) +$$ + +

+where \( f(x) \) is a given function, along with the chosen conditions + +$$ +\begin{aligned} +g(0) = g(1) = 0 +\end{aligned}\tag{14} +$$ + +

+In this example, we consider the case when \( f(x) = (3x + x^2)\exp(x) \). + +

+For this case, a possible trial solution satisfying the conditions could be + +$$ +g_t(x) = x \cdot (1-x) \cdot N(P,x) +$$ + +

+The analytical solution for this problem is + +$$ +g(x) = x(1 - x)\exp(x) +$$ + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs024.html b/doc/pub/odenn/html/._odenn-bs024.html new file mode 100644 index 000000000..533d09e26 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs024.html @@ -0,0 +1,445 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Solving the equation using Autograd

+ +

+ + +

import autograd.numpy as np
+from autograd import grad, elementwise_grad
+import autograd.numpy.random as npr
+from matplotlib import pyplot as plt
+
+def sigmoid(z):
+    return 1/(1 + np.exp(-z))
+
+def deep_neural_network(deep_params, x):
+    # N_hidden is the number of hidden layers
+    N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
+
+    # Assumes input x being an one-dimensional array
+    num_values = np.size(x)
+    x = x.reshape(-1, num_values)
+
+    # Assume that the input layer does nothing to the input x
+    x_input = x
+
+    # Due to multiple hidden layers, define a variable referencing to the
+    # output of the previous layer:
+    x_prev = x_input
+
+    ## Hidden layers:
+
+    for l in range(N_hidden):
+        # From the list of parameters P; find the correct weigths and bias for this layer
+        w_hidden = deep_params[l]
+
+        # Add a row of ones to include bias
+        x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)
+
+        z_hidden = np.matmul(w_hidden, x_prev)
+        x_hidden = sigmoid(z_hidden)
+
+        # Update x_prev such that next layer can use the output from this layer
+        x_prev = x_hidden
+
+    ## Output layer:
+
+    # Get the weights and bias for this layer
+    w_output = deep_params[-1]
+
+    # Include bias:
+    x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)
+
+    z_output = np.matmul(w_output, x_prev)
+    x_output = z_output
+
+    return x_output
+
+def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):
+    # num_hidden_neurons is now a list of number of neurons within each hidden layer
+
+    # Find the number of hidden layers:
+    N_hidden = np.size(num_neurons)
+
+    ## Set up initial weigths and biases
+
+    # Initialize the list of parameters:
+    P = [None]*(N_hidden + 1) # + 1 to include the output layer
+
+    P[0] = npr.randn(num_neurons[0], 2 )
+    for l in range(1,N_hidden):
+        P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
+
+    # For the output layer
+    P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
+
+    print('Initial cost: %g'%cost_function_deep(P, x))
+
+    ## Start finding the optimal weigths using gradient descent
+
+    # Find the Python function that represents the gradient of the cost function
+    # w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
+    cost_function_deep_grad = grad(cost_function_deep,0)
+
+    # Let the update be done num_iter times
+    for i in range(num_iter):
+        # Evaluate the gradient at the current weights and biases in P.
+        # The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases
+        # in the hidden layers and output layers evaluated at x.
+        cost_deep_grad =  cost_function_deep_grad(P, x)
+
+        for l in range(N_hidden+1):
+            P[l] = P[l] - lmb * cost_deep_grad[l]
+
+    print('Final cost: %g'%cost_function_deep(P, x))
+
+    return P
+
+## Set up the cost function specified for this Poisson equation:
+
+# The right side of the ODE
+def f(x):
+    return (3*x + x**2)*np.exp(x)
+
+def cost_function_deep(P, x):
+
+    # Evaluate the trial function with the current parameters P
+    g_t = g_trial_deep(x,P)
+
+    # Find the derivative w.r.t x of the trial function
+    d2_g_t = elementwise_grad(elementwise_grad(g_trial_deep,0))(x,P)
+
+    right_side = f(x)
+
+    err_sqr = (-d2_g_t - right_side)**2
+    cost_sum = np.sum(err_sqr)
+
+    return cost_sum/np.size(err_sqr)
+
+# The trial solution:
+def g_trial_deep(x,P):
+    return x*(1-x)*deep_neural_network(P,x)
+
+# The analytic solution;
+def g_analytic(x):
+    return x*(1-x)*np.exp(x)
+
+if __name__ == '__main__':
+    npr.seed(4155)
+
+    ## Decide the vales of arguments to the function to solve
+    Nx = 10
+    x = np.linspace(0,1, Nx)
+
+    ## Set up the initial parameters
+    num_hidden_neurons = [200,100]
+    num_iter = 1000
+    lmb = 1e-3
+
+    P = solve_ode_deep_neural_network(x, num_hidden_neurons, num_iter, lmb)
+
+    g_dnn_ag = g_trial_deep(x,P)
+    g_analytical = g_analytic(x)
+
+    # Find the maximum absolute difference between the solutons:
+    max_diff = np.max(np.abs(g_dnn_ag - g_analytical))
+    print("The max absolute difference between the solutions is: %g"%max_diff)
+
+    plt.figure(figsize=(10,10))
+
+    plt.title('Performance of neural network solving an ODE compared to the analytical solution')
+    plt.plot(x, g_analytical)
+    plt.plot(x, g_dnn_ag[0,:])
+    plt.legend(['analytical','nn'])
+    plt.xlabel('x')
+    plt.ylabel('g(x)')
+    plt.show()
+
+

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs025.html b/doc/pub/odenn/html/._odenn-bs025.html new file mode 100644 index 000000000..faafb9006 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs025.html @@ -0,0 +1,589 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Comparing with a numerical scheme

+ +

+The Poisson equation is possible to solve using Taylor series to approximate the second derivative. + +

+Using Taylor series, the second derivative can be expressed as + +$$ +g''(x) = \frac{g(x + \Delta x) - 2g(x) + g(x-\Delta x)}{\Delta x^2} + E_{\Delta x}(x) +$$ + +

+where \( \Delta x \) is a small step size and \( E_{\Delta x}(x) \) being the error term. + +

+Looking away from the error terms gives an approximation to the second derivative: + +$$ +\begin{equation} \tag{15} +g''(x) \approx \frac{g(x + \Delta x) - 2g(x) + g(x-\Delta x)}{\Delta x^2} +\end{equation} +$$ + +

+If \( x_i = i \Delta x = x_{i-1} + \Delta x \) and \( g_i = g(x_i) \) for \( i = 1,\dots N_x - 2 \) with \( N_x \) being the number of values for \( x \), (15) becomes + +$$ +\begin{aligned} +g''(x_i) &\approx \frac{g(x_i + \Delta x) - 2g(x_i) + g(x_i -\Delta x)}{\Delta x^2} \\ +&= \frac{g_{i+1} - 2g_i + g_{i-1}}{\Delta x^2} +\end{aligned} +$$ + +

+Since we know from our problem that + +$$ +\begin{aligned} +-g''(x) &= f(x) \\ +&= (3x + x^2)\exp(x) +\end{aligned} +$$ + +

+along with the conditions \( g(0) = g(1) = 0 \), +the following scheme can be used to find an approximate solution for \( g(x) \) numerically: + +$$ +\begin{equation} + \begin{aligned} + -\Big( \frac{g_{i+1} - 2g_i + g_{i-1}}{\Delta x^2} \Big) &= f(x_i) \\ + -g_{i+1} + 2g_i - g_{i-1} &= \Delta x^2 f(x_i) + \end{aligned} +\end{equation} \tag{16} +$$ + +

+for \( i = 1, \dots, N_x - 2 \) where \( g_0 = g_{N_x - 1} = 0 \) and \( f(x_i) = (3x_i + x_i^2)\exp(x_i) \), which is given for our specific problem. + +

+The equation can be rewritten into a matrix equation: + +$$ +\begin{aligned} +\begin{pmatrix} +2 & -1 & 0 & \dots & 0 \\ +-1 & 2 & -1 & \dots & 0 \\ +\vdots & & \ddots & & \vdots \\ +0 & \dots & -1 & 2 & -1 \\ +0 & \dots & 0 & -1 & 2\\ +\end{pmatrix} +\begin{pmatrix} +g_1 \\ +g_2 \\ +\vdots \\ +g_{N_x - 3} \\ +g_{N_x - 2} +\end{pmatrix} +&= +\Delta x^2 +\begin{pmatrix} +f(x_1) \\ +f(x_2) \\ +\vdots \\ +f(x_{N_x - 3}) \\ +f(x_{N_x - 2}) +\end{pmatrix} \\ +A\vec{g} &= \vec{f} +\end{aligned} +$$ + +

+which makes it possible to solve for the vector \( \vec{g} \). + +

+We can then compare the result from this numerical scheme with the output from our network using Autograd: + +

+ + +

import autograd.numpy as np
+from autograd import grad, elementwise_grad
+import autograd.numpy.random as npr
+from matplotlib import pyplot as plt
+
+def sigmoid(z):
+    return 1/(1 + np.exp(-z))
+
+def deep_neural_network(deep_params, x):
+    # N_hidden is the number of hidden layers
+    N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
+
+    # Assumes input x being an one-dimensional array
+    num_values = np.size(x)
+    x = x.reshape(-1, num_values)
+
+    # Assume that the input layer does nothing to the input x
+    x_input = x
+
+    # Due to multiple hidden layers, define a variable referencing to the
+    # output of the previous layer:
+    x_prev = x_input
+
+    ## Hidden layers:
+
+    for l in range(N_hidden):
+        # From the list of parameters P; find the correct weigths and bias for this layer
+        w_hidden = deep_params[l]
+
+        # Add a row of ones to include bias
+        x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)
+
+        z_hidden = np.matmul(w_hidden, x_prev)
+        x_hidden = sigmoid(z_hidden)
+
+        # Update x_prev such that next layer can use the output from this layer
+        x_prev = x_hidden
+
+    ## Output layer:
+
+    # Get the weights and bias for this layer
+    w_output = deep_params[-1]
+
+    # Include bias:
+    x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)
+
+    z_output = np.matmul(w_output, x_prev)
+    x_output = z_output
+
+    return x_output
+
+def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):
+    # num_hidden_neurons is now a list of number of neurons within each hidden layer
+
+    # Find the number of hidden layers:
+    N_hidden = np.size(num_neurons)
+
+    ## Set up initial weigths and biases
+
+    # Initialize the list of parameters:
+    P = [None]*(N_hidden + 1) # + 1 to include the output layer
+
+    P[0] = npr.randn(num_neurons[0], 2 )
+    for l in range(1,N_hidden):
+        P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
+
+    # For the output layer
+    P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
+
+    print('Initial cost: %g'%cost_function_deep(P, x))
+
+    ## Start finding the optimal weigths using gradient descent
+
+    # Find the Python function that represents the gradient of the cost function
+    # w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
+    cost_function_deep_grad = grad(cost_function_deep,0)
+
+    # Let the update be done num_iter times
+    for i in range(num_iter):
+        # Evaluate the gradient at the current weights and biases in P.
+        # The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases
+        # in the hidden layers and output layers evaluated at x.
+        cost_deep_grad =  cost_function_deep_grad(P, x)
+
+        for l in range(N_hidden+1):
+            P[l] = P[l] - lmb * cost_deep_grad[l]
+
+    print('Final cost: %g'%cost_function_deep(P, x))
+
+    return P
+
+## Set up the cost function specified for this Poisson equation:
+
+# The right side of the ODE
+def f(x):
+    return (3*x + x**2)*np.exp(x)
+
+def cost_function_deep(P, x):
+
+    # Evaluate the trial function with the current parameters P
+    g_t = g_trial_deep(x,P)
+
+    # Find the derivative w.r.t x of the trial function
+    d2_g_t = elementwise_grad(elementwise_grad(g_trial_deep,0))(x,P)
+
+    right_side = f(x)
+
+    err_sqr = (-d2_g_t - right_side)**2
+    cost_sum = np.sum(err_sqr)
+
+    return cost_sum/np.size(err_sqr)
+
+# The trial solution:
+def g_trial_deep(x,P):
+    return x*(1-x)*deep_neural_network(P,x)
+
+# The analytic solution;
+def g_analytic(x):
+    return x*(1-x)*np.exp(x)
+
+if __name__ == '__main__':
+    npr.seed(4155)
+
+    ## Decide the vales of arguments to the function to solve
+    Nx = 10
+    x = np.linspace(0,1, Nx)
+
+    ## Set up the initial parameters
+    num_hidden_neurons = [200,100]
+    num_iter = 1000
+    lmb = 1e-3
+
+    P = solve_ode_deep_neural_network(x, num_hidden_neurons, num_iter, lmb)
+
+    g_dnn_ag = g_trial_deep(x,P)
+    g_analytical = g_analytic(x)
+
+    # Find the maximum absolute difference between the solutons:
+
+    plt.figure(figsize=(10,10))
+
+    plt.title('Performance of neural network solving an ODE compared to the analytical solution')
+    plt.plot(x, g_analytical)
+    plt.plot(x, g_dnn_ag[0,:])
+    plt.legend(['analytical','nn'])
+    plt.xlabel('x')
+    plt.ylabel('g(x)')
+
+    ## Perform the computation using the numerical scheme
+
+    dx = 1/(Nx - 1)
+
+    # Set up the matrix A
+    A = np.zeros((Nx-2,Nx-2))
+
+    A[0,0] = 2
+    A[0,1] = -1
+
+    for i in range(1,Nx-3):
+        A[i,i-1] = -1
+        A[i,i] = 2
+        A[i,i+1] = -1
+
+    A[Nx - 3, Nx - 4] = -1
+    A[Nx - 3, Nx - 3] = 2
+
+    # Set up the vector f
+    f_vec = dx**2 * f(x[1:-1])
+
+    # Solve the equation
+    g_res = np.linalg.solve(A,f_vec)
+
+    g_vec = np.zeros(Nx)
+    g_vec[1:-1] = g_res
+
+    # Print the differences between each method
+    max_diff1 = np.max(np.abs(g_dnn_ag - g_analytical))
+    max_diff2 = np.max(np.abs(g_vec - g_analytical))
+    print("The max absolute difference between the analytical solution and DNN Autograd: %g"%max_diff1)
+    print("The max absolute difference between the analytical solution and numerical scheme: %g"%max_diff2)
+
+    # Plot the results
+    plt.figure(figsize=(10,10))
+
+    plt.plot(x,g_vec)
+    plt.plot(x,g_analytical)
+    plt.plot(x,g_dnn_ag[0,:])
+
+    plt.legend(['numerical scheme','analytical','dnn'])
+    plt.show()
+
+

+The program prints out: +

+ + +

The max absolute difference between the analytical solution and DNN Autograd: 0.000464088
+The max absolute difference between the analytical solution and numerical scheme: 0.00266858
+
+

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs026.html b/doc/pub/odenn/html/._odenn-bs026.html new file mode 100644 index 000000000..5bbb8fec6 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs026.html @@ -0,0 +1,409 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Using gradient descent in TensorFlow to solve Poisson equation

+The program follows the similar idea as for the logistic population model. + +

+What has changed, is what the cost function minimizes and the trial solution. + +

+ + +

import tensorflow as tf
+import numpy as np
+import matplotlib.pyplot as plt
+## Construction phase
+
+# Just to reset the graph such that it is possible to rerun this in a
+# Jupyter cell without resetting the whole kernel.
+tf.reset_default_graph()
+
+tf.set_random_seed(4155)
+
+# Convert the values the trial solution is evaluated at to a tensor.
+Nx = 10
+x = np.linspace(0,1, Nx)
+x_tf = tf.convert_to_tensor(x.reshape(-1,1),dtype=tf.float64)
+
+
+num_iter = 10000
+
+# Define the number of neurons at each hidden layer
+num_hidden_neurons = [20,10]
+num_hidden_layers = np.size(num_hidden_neurons)
+
+# Construct the network.
+# tf.name_scope is used to group each step in the construction,
+# just for a more organized visualization in TensorBoard
+with tf.name_scope('dnn'):
+
+    # Input layer
+    previous_layer = x_tf
+
+    # Hidden layers
+    for l in range(num_hidden_layers):
+        current_layer = tf.layers.dense(previous_layer, num_hidden_neurons[l], name='hidden%d'%(l+1), activation=tf.nn.sigmoid)
+        previous_layer = current_layer
+
+    # Output layer
+    dnn_output = tf.layers.dense(previous_layer, 1, name='output')
+
+# Define the cost function
+with tf.name_scope('cost'):
+    g_trial = x_tf*(1-x_tf)*dnn_output
+    d_g_trial = tf.gradients(g_trial,x_tf)
+    d2_g_trial = tf.gradients(d_g_trial,x_tf)
+
+    right_side = (3*x_tf + x_tf**2)*tf.exp(x_tf)
+
+    err = tf.square( -d2_g_trial[0] - right_side)
+    cost = tf.reduce_sum(err, name = 'cost')
+
+# Choose the method to minimize the cost function, along with a learning rate
+learning_rate = 1e-2
+with tf.name_scope('train'):
+    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
+    traning_op = optimizer.minimize(cost)
+
+g_dnn_tf = None
+
+# Define a node that initializes all of the other nodes in the computational graph
+# used by TensorFlow:
+init = tf.global_variables_initializer()
+
+
+## Execution phase
+
+# Start a session where the graph defined from the construction phase can be evaluated at:
+
+with tf.Session() as sess:
+    # Initialize the whole graph
+    init.run()
+
+    # Evaluate the initial cost:
+    print('Initial cost: %g'%cost.eval())
+
+    # The traning of the network:
+    for i in range(num_iter):
+        sess.run(traning_op)
+
+    # Training is done, and we have an approximate solution to the ODE
+    print('Final cost: %g'%cost.eval())
+
+    # Store the result
+    g_dnn_tf = g_trial.eval()
+
+    writer = tf.summary.FileWriter("./output", sess.graph)
+    writer.close()
+
+# Evaluate the analytical function to compare with
+def g_analytic(x):
+    return x*(1-x)*np.exp(x)
+
+g_analytical = g_analytic(x)
+
+diff_tf = g_dnn_tf - g_analytical.reshape(-1,1)
+
+print('\nMax absolute difference between the analytical solution and solution from TensorFlow DNN: %g'%np.max(np.abs(diff_tf)))
+
+# Plot the result
+plt.figure(figsize=(10,10))
+
+plt.title('Numerical solutions of the ODE')
+
+plt.plot(x, g_dnn_tf)
+plt.plot(x, g_analytical)
+
+plt.legend(['dnn, tensorflow','exact'])
+plt.xlabel('x')
+plt.ylabel('g(x)')
+
+plt.show()
+
+

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs027.html b/doc/pub/odenn/html/._odenn-bs027.html new file mode 100644 index 000000000..bb0b5444e --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs027.html @@ -0,0 +1,324 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Using a different optimization algorithm implemented in TensorFlow to solve Poisson equation

+ +

+We can see that the results using GradientDescentOptimizer seems to converge towards the analytical solution. +But there exists many other methods for optimization also, see the TensorFlow documentation on Optimizers. + +

+Adam is an optimization algorithm that changes its learning rates accordingly to the function it tries to minimize for every iteration. +The algorithm is described in this paper. +How much an optimization algorithm has to say for the network to converge, could be interesting to experiment with. +Using the same TensorFlow program as before, the only change to do, is to replace the variable optimizer. + +

+In the program that uses TensorFlow to solve for the Poisson equation, change the line + +

+ + +

optimizer = tf.train.GradientDescentOptimizer(learning_rate)
+
+

+to + +

+ + +

optimizer = tf.train.AdamOptimizer(learning_rate)
+
+

+The program using the Adam optimizer with a different initial learning rate yields indeed an interesting result: +

+ + +

Max absolute difference between the analytical solution and solution from TensorFlow DNN: 7.11243e-05
+
+

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs028.html b/doc/pub/odenn/html/._odenn-bs028.html new file mode 100644 index 000000000..1acb1f815 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs028.html @@ -0,0 +1,352 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Partial Differential Equations

+A partial differential equation (PDE) has a solution here the function is defined by multiple variables. +The equation may involve all kinds of combinations of which variables the function is differentiated with respect to. + +

+In general, a partial differential equation for a function \( g(x_1,\dots,x_N) \) with \( N \) variables may be expressed as + +$$ +\begin{equation} \tag{17} + f\left(x_1, \, \dots \, , x_N, \frac{\partial g(x_1,\dots,x_N) }{\partial x_1}, \dots , \frac{\partial g(x_1,\dots,x_N) }{\partial x_N}, \frac{\partial g(x_1,\dots,x_N) }{\partial x_1\partial x_2}, \, \dots \, , \frac{\partial^n g(x_1,\dots,x_N) }{\partial x_N^n} \right) = 0 +\end{equation} +$$ + +

+where \( f \) is an expression involving all kinds of possible mixed derivatives of \( g(x_1,\dots,x_N) \) up to an order \( n \). In order for the solution to be unique, some additional conditions must also be given. + +

+The problem our network must solve for, is similar to the ODE case. +We must have a trial solution \( g_t \) at hand. + +

+For instance, the trial solution could be expressed as +$$ +\begin{align*} + g_t(x_1,\dots,x_N) = h_1(x_1,\dots,x_N) + h_2(x_1,\dots,x_N,N(x_1,\dots,x_N,P)) +\end{align*} +$$ + +where \( h_1(x_1,\dots,x_N) \) is a function that ensures \( g_t(x_1,\dots,x_N) \) satisfies some given conditions. +The neural network \( N(x_1,\dots,x_N,P) \) has weights and biases described by \( P \) and \( h_2(x_1,\dots,x_N,N(x_1,\dots,x_N,P)) \) is an expression using the output from the neural network in some way. + +

+The role of the function \( h_2(x_1,\dots,x_N,N(x_1,\dots,x_N,P)) \), is to ensure that the output of \( N(x_1,\dots,x_N,P) \) is zero when \( g_t(x_1,\dots,x_N) \) is evaluated at the values of \( x_1,\dots,x_N \) where the given conditions must be satisfied. The function \( h_1(x_1,\dots,x_N) \) should alone make \( g_t(x_1,\dots,x_N) \) satisfy the conditions. + +

+The network tries then the minimize the cost function following the same ideas as described for the ODE case, but now with more than one variables to consider. +The concept still remains the same; find a set of parameters \( P \) such that the expression \( f \) in (17) is as close to zero as possible. + +

+As for the ODE case, the cost function is the mean squared error that the network must try to minimize. The cost function for the network to minimize is + +$$ +\begin{equation*} +c\left(x_1, \dots, x_N, P\right) = \left( f\left(x_1, \, \dots \, , x_N, \frac{\partial g(x_1,\dots,x_N) }{\partial x_1}, \dots , \frac{\partial g(x_1,\dots,x_N) }{\partial x_N}, \frac{\partial g(x_1,\dots,x_N) }{\partial x_1\partial x_2}, \, \dots \, , \frac{\partial^n g(x_1,\dots,x_N) }{\partial x_N^n} \right) \right)^2 +\end{equation*} +$$ + +

+If we let \( \vec x = \big( x_1, \dots, x_N \big) \) be an array containing the values for \( x_1, \dots, x_N \) respectively, the cost function can be reformulated into the following: +$$ +\begin{equation*} + c\left(\vec{x}, P\right) = f\left( \left( \vec{x}, \frac{\partial g(\vec x) }{\partial x_1}, \dots , \frac{\partial g(\vec x) }{\partial x_N}, \frac{\partial g(\vec x) }{\partial x_1\partial x_2}, \, \dots \, , \frac{\partial^n g(\vec x) }{\partial x_N^n} \right) \right)^2 +\end{equation*} +$$ + +

+If we also have \( M \) different sets of values for \( x_1, \dots, x_N \), that is \( \vec{x}_i = \big(x_1^{(i)}, \dots, x_N^{(i)}\big) \) for \( i = 1,\dots,M \) being the rows in matrix \( X \), the cost function can be generalized into +$$ +\begin{equation*} +c\left(X, P \right) = \sum_{i=1}^M f\left( \left( \vec{x}_i, \frac{\partial g(\vec{x}_i) }{\partial x_1}, \dots , \frac{\partial g(\vec{x}_i) }{\partial x_N}, \frac{\partial g(\vec{x}_i) }{\partial x_1\partial x_2}, \, \dots \, , \frac{\partial^n g(\vec{x}_i) }{\partial x_N^n} \right) \right)^2 +\end{equation*} +$$ + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs029.html b/doc/pub/odenn/html/._odenn-bs029.html new file mode 100644 index 000000000..8899d835e --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs029.html @@ -0,0 +1,311 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Example: The diffusion equation

+ +

+In one spatial dimension, the equation reads +$$ +\begin{equation*} + \frac{\partial g(x,t)}{\partial t} = \frac{\partial^2 g(x,t)}{\partial x^2} +\end{equation*} +$$ + +

+where a possible choice of conditions are +$$ +\begin{align*} +g(0,t) &= 0 ,\qquad t \geq 0 \\ +g(1,t) &= 0, \qquad t \geq 0 \\ +g(x,0) &= u(x),\qquad x\in [0,1] +\end{align*} +$$ + +with \( u(x) \) being some given function. + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs030.html b/doc/pub/odenn/html/._odenn-bs030.html new file mode 100644 index 000000000..a713bab94 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs030.html @@ -0,0 +1,318 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Defining the problem

+ +

+For this case, we want to find \( g(x,t) \) such that + +$$ +\begin{equation} + \frac{\partial g(x,t)}{\partial t} = \frac{\partial^2 g(x,t)}{\partial x^2} +\end{equation} \tag{18} +$$ + +

+and + +$$ +\begin{align*} +g(0,t) &= 0 ,\qquad t \geq 0 \\ +g(1,t) &= 0, \qquad t \geq 0 \\ +g(x,0) &= u(x),\qquad x\in [0,1] +\end{align*} +$$ + +with \( u(x) = \sin(\pi x) \). + +

+First, let us set up the deep neural network. +The deep neural network will follow the same structure as discussed in the examples solving the ODEs. +First, we will look into how Autograd could be used in a network tailored to solve for bivariate functions. + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs031.html b/doc/pub/odenn/html/._odenn-bs031.html new file mode 100644 index 000000000..74089c3d5 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs031.html @@ -0,0 +1,343 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Setting up the network using Autograd

+ +

+The only change to do here, is to extend our network such that functions of multiple parameters are correctly handled. +In this case we have two variables in our function to solve for, that is time \( t \) and position \( x \). +The variables will be represented by a one-dimensional array in the program. +The program will evaluate the network at each possible pair \( (x,t) \), given an array for the desired \( x \)-values and \( t \)-values to approximate the solution at. + +

+ + +

def sigmoid(z):
+    return 1/(1 + np.exp(-z))
+
+def deep_neural_network(deep_params, x):
+    # x is now a point and a 1D numpy array; make it a column vector
+    num_coordinates = np.size(x,0)
+    x = x.reshape(num_coordinates,-1)
+
+    num_points = np.size(x,1)
+
+    # N_hidden is the number of hidden layers
+    N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
+
+    # Assume that the input layer does nothing to the input x
+    x_input = x
+    x_prev = x_input
+
+    ## Hidden layers:
+
+    for l in range(N_hidden):
+        # From the list of parameters P; find the correct weigths and bias for this layer
+        w_hidden = deep_params[l]
+
+        # Add a row of ones to include bias
+        x_prev = np.concatenate((np.ones((1,num_points)), x_prev ), axis = 0)
+
+        z_hidden = np.matmul(w_hidden, x_prev)
+        x_hidden = sigmoid(z_hidden)
+
+        # Update x_prev such that next layer can use the output from this layer
+        x_prev = x_hidden
+
+    ## Output layer:
+
+    # Get the weights and bias for this layer
+    w_output = deep_params[-1]
+
+    # Include bias:
+    x_prev = np.concatenate((np.ones((1,num_points)), x_prev), axis = 0)
+
+    z_output = np.matmul(w_output, x_prev)
+    x_output = z_output
+
+    return x_output[0][0]
+
+

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs032.html b/doc/pub/odenn/html/._odenn-bs032.html new file mode 100644 index 000000000..7a02a906a --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs032.html @@ -0,0 +1,359 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Setting up the network using Autograd; The trial solution

+The cost function must then iterate through the given arrays containing values for \( x \) and \( t \), defines a point \( (x,t) \) the deep neural network and the trial solution is evaluated at, and then finds the Jacobian of the trial solution. + +

+A possible trial solution for this PDE is + +$$ +g_t(x,t) = h_1(x,t) + x(1-x)tN(x,t,P) +$$ + +

+with \( A(x,t) \) being a function ensuring that \( g_t(x,t) \) satisfies our given conditions, and \( N(x,t,P) \) being the output from the deep neural network using weights and biases for each layer from \( P \). + +

+To fulfill the conditions, \( A(x,t) \) could be: + +$$ +h_1(x,t) = (1-t)\Big(u(x) - \big((1-x)u(0) + x u(1)\big)\Big) = (1-t)u(x) = (1-t)\sin(\pi x) +$$ +since \( (0) = u(1) = 0 \) and \( u(x) = \sin(\pi x) \). + +

+The Jacobian is used because the program must find the derivative of the trial solution with respect to \( x \) and \( t \). + +

+This gives the necessity of computing the Jacobian matrix, as we want to evaluate the gradient with respect to \( x \) and \( t \) (note that the Jacobian of a scalar-valued multivariate function is simply its gradient). + +

+In Autograd, the differentiation is by default done with respect to the first input argument of your Python function. Since the points is an array representing \( x \) and \( t \), the Jacobian is calculated using the values of \( x \) and \( t \). + +

+To find the second derivative with respect to \( x \) and \( t \), the Jacobian can be found for the second time. The result is a Hessian matrix, which is the matrix containing all the possible second order mixed derivatives of \( g(x,t) \). + +

+ + +

# Set up the trial function:
+def u(x):
+    return np.sin(np.pi*x)
+
+def g_trial(point,P):
+    x,t = point
+    return (1-t)*u(x) + x*(1-x)*t*deep_neural_network(P,point)
+
+# The right side of the ODE:
+def f(point):
+    return 0.
+
+# The cost function:
+def cost_function(P, x, t):
+    cost_sum = 0
+
+    g_t_jacobian_func = jacobian(g_trial)
+    g_t_hessian_func = hessian(g_trial)
+
+    for x_ in x:
+        for t_ in t:
+            point = np.array([x_,t_])
+
+            g_t = g_trial(point,P)
+            g_t_jacobian = g_t_jacobian_func(point,P)
+            g_t_hessian = g_t_hessian_func(point,P)
+
+            g_t_dt = g_t_jacobian[1]
+            g_t_d2x = g_t_hessian[0][0]
+
+            func = f(point)
+
+            err_sqr = ( (g_t_dt - g_t_d2x) - func)**2
+            cost_sum += err_sqr
+
+    return cost_sum
+
+

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs033.html b/doc/pub/odenn/html/._odenn-bs033.html new file mode 100644 index 000000000..e5c55829d --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs033.html @@ -0,0 +1,534 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Setting up the network using Autograd; The full program

+Having set up the network, along with the trial solution and cost function, we can now see how the deep neural network performs by comparing the results to the analytical solution. + +

+The analytical solution of our problem is + +$$ +g(x,t) = \exp(-\pi^2 t)\sin(\pi x) +$$ + +

+A possible way to implement a neural network solving the PDE, is given below. +Be aware, though, that it is fairly slow for the parameters used. +A better result is possible, but requires more iterations, and thus longer time to complete. + +

+Using only 20 neurons in one hidden layer, the program managed to make the trial solution have the maximum absolute error of 0.0075. The execution time, however, was approximately one day and 14 hours on a computer having Intel i7-7560U 2.4 GHz CPU. + +

+Indeed, the program below is not optimal in its implementation, but rather serves as an example on how to implement and use a neural network to solve a PDE. +Using TensorFlow in the next example sovling the wave equation, has a much better execution time. + +

+ + +

import autograd.numpy as np
+from autograd import jacobian,hessian,grad
+import autograd.numpy.random as npr
+from matplotlib import cm
+from matplotlib import pyplot as plt
+from mpl_toolkits.mplot3d import axes3d
+
+## Set up the network
+
+def sigmoid(z):
+    return 1/(1 + np.exp(-z))
+
+def deep_neural_network(deep_params, x):
+    # x is now a point and a 1D numpy array; make it a column vector
+    num_coordinates = np.size(x,0)
+    x = x.reshape(num_coordinates,-1)
+
+    num_points = np.size(x,1)
+
+    # N_hidden is the number of hidden layers
+    N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
+
+    # Assume that the input layer does nothing to the input x
+    x_input = x
+    x_prev = x_input
+
+    ## Hidden layers:
+
+    for l in range(N_hidden):
+        # From the list of parameters P; find the correct weigths and bias for this layer
+        w_hidden = deep_params[l]
+
+        # Add a row of ones to include bias
+        x_prev = np.concatenate((np.ones((1,num_points)), x_prev ), axis = 0)
+
+        z_hidden = np.matmul(w_hidden, x_prev)
+        x_hidden = sigmoid(z_hidden)
+
+        # Update x_prev such that next layer can use the output from this layer
+        x_prev = x_hidden
+
+    ## Output layer:
+
+    # Get the weights and bias for this layer
+    w_output = deep_params[-1]
+
+    # Include bias:
+    x_prev = np.concatenate((np.ones((1,num_points)), x_prev), axis = 0)
+
+    z_output = np.matmul(w_output, x_prev)
+    x_output = z_output
+
+    return x_output[0][0]
+
+## Define the trial solution and cost function
+def u(x):
+    return np.sin(np.pi*x)
+
+def g_trial(point,P):
+    x,t = point
+    return (1-t)*u(x) + x*(1-x)*t*deep_neural_network(P,point)
+
+# The right side of the ODE:
+def f(point):
+    return 0.
+
+# The cost function:
+def cost_function(P, x, t):
+    cost_sum = 0
+
+    g_t_jacobian_func = jacobian(g_trial)
+    g_t_hessian_func = hessian(g_trial)
+
+    for x_ in x:
+        for t_ in t:
+            point = np.array([x_,t_])
+
+            g_t = g_trial(point,P)
+            g_t_jacobian = g_t_jacobian_func(point,P)
+            g_t_hessian = g_t_hessian_func(point,P)
+
+            g_t_dt = g_t_jacobian[1]
+            g_t_d2x = g_t_hessian[0][0]
+
+            func = f(point)
+
+            err_sqr = ( (g_t_dt - g_t_d2x) - func)**2
+            cost_sum += err_sqr
+
+    return cost_sum /( np.size(x)*np.size(t) )
+
+## For comparison, define the analytical solution
+def g_analytic(point):
+    x,t = point
+    return np.exp(-np.pi**2*t)*np.sin(np.pi*x)
+
+## Set up a function for training the network to solve for the equation
+def solve_pde_deep_neural_network(x,t, num_neurons, num_iter, lmb):
+    ## Set up initial weigths and biases
+    N_hidden = np.size(num_neurons)
+
+    ## Set up initial weigths and biases
+
+    # Initialize the list of parameters:
+    P = [None]*(N_hidden + 1) # + 1 to include the output layer
+
+    P[0] = npr.randn(num_neurons[0], 2 + 1 ) # 2 since we have two points, +1 to include bias
+    for l in range(1,N_hidden):
+        P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
+
+    # For the output layer
+    P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
+
+    print('Initial cost: ',cost_function(P, x, t))
+
+    cost_function_grad = grad(cost_function,0)
+
+    # Let the update be done num_iter times
+    for i in range(num_iter):
+        cost_grad =  cost_function_grad(P, x , t)
+
+        for l in range(N_hidden+1):
+            P[l] = P[l] - lmb * cost_grad[l]
+
+    print('Final cost: ',cost_function(P, x, t))
+
+    return P
+
+if __name__ == '__main__':
+    ### Use the neural network:
+    npr.seed(15)
+
+    ## Decide the vales of arguments to the function to solve
+    Nx = 10; Nt = 10
+    x = np.linspace(0, 1, Nx)
+    t = np.linspace(0,1,Nt)
+
+    ## Set up the parameters for the network
+    num_hidden_neurons = [100, 25]
+    num_iter = 250
+    lmb = 0.01
+
+    P = solve_pde_deep_neural_network(x,t, num_hidden_neurons, num_iter, lmb)
+
+    ## Store the results
+    g_dnn_ag = np.zeros((Nx, Nt))
+    G_analytical = np.zeros((Nx, Nt))
+    for i,x_ in enumerate(x):
+        for j, t_ in enumerate(t):
+            point = np.array([x_, t_])
+            g_dnn_ag[i,j] = g_trial(point,P)
+
+            G_analytical[i,j] = g_analytic(point)
+
+    # Find the map difference between the analytical and the computed solution
+    diff_ag = np.abs(g_dnn_ag - G_analytical)
+    print('Max absolute difference between the analytical solution and the network: %g'%np.max(diff_ag))
+
+    ## Plot the solutions in two dimensions, that being in position and time
+
+    T,X = np.meshgrid(t,x)
+
+    fig = plt.figure(figsize=(10,10))
+    ax = fig.gca(projection='3d')
+    ax.set_title('Solution from the deep neural network w/ %d layer'%len(num_hidden_neurons))
+    s = ax.plot_surface(T,X,g_dnn_ag,linewidth=0,antialiased=False,cmap=cm.viridis)
+    ax.set_xlabel('Time $t$')
+    ax.set_ylabel('Position $x$');
+
+
+    fig = plt.figure(figsize=(10,10))
+    ax = fig.gca(projection='3d')
+    ax.set_title('Analytical solution')
+    s = ax.plot_surface(T,X,G_analytical,linewidth=0,antialiased=False,cmap=cm.viridis)
+    ax.set_xlabel('Time $t$')
+    ax.set_ylabel('Position $x$');
+
+    fig = plt.figure(figsize=(10,10))
+    ax = fig.gca(projection='3d')
+    ax.set_title('Difference')
+    s = ax.plot_surface(T,X,diff_ag,linewidth=0,antialiased=False,cmap=cm.viridis)
+    ax.set_xlabel('Time $t$')
+    ax.set_ylabel('Position $x$');
+
+    ## Take some slices of the 3D plots just to see the solutions at particular times
+    indx1 = 0
+    indx2 = int(Nt/2)
+    indx3 = Nt-1
+
+    t1 = t[indx1]
+    t2 = t[indx2]
+    t3 = t[indx3]
+
+    # Slice the results from the DNN
+    res1 = g_dnn_ag[:,indx1]
+    res2 = g_dnn_ag[:,indx2]
+    res3 = g_dnn_ag[:,indx3]
+
+    # Slice the analytical results
+    res_analytical1 = G_analytical[:,indx1]
+    res_analytical2 = G_analytical[:,indx2]
+    res_analytical3 = G_analytical[:,indx3]
+
+    # Plot the slices
+    plt.figure(figsize=(10,10))
+    plt.title("Computed solutions at time = %g"%t1)
+    plt.plot(x, res1)
+    plt.plot(x,res_analytical1)
+    plt.legend(['dnn','analytical'])
+
+    plt.figure(figsize=(10,10))
+    plt.title("Computed solutions at time = %g"%t2)
+    plt.plot(x, res2)
+    plt.plot(x,res_analytical2)
+    plt.legend(['dnn','analytical'])
+
+    plt.figure(figsize=(10,10))
+    plt.title("Computed solutions at time = %g"%t3)
+    plt.plot(x, res3)
+    plt.plot(x,res_analytical3)
+    plt.legend(['dnn','analytical'])
+
+    plt.show()
+
+

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs034.html b/doc/pub/odenn/html/._odenn-bs034.html new file mode 100644 index 000000000..a27031836 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs034.html @@ -0,0 +1,310 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Example: Solving the wave equation using Autograd and TensorFlow

+ +

+The wave equation is +$$ +\begin{equation*} + \frac{\partial^2 g(x,t)}{\partial t^2} = c^2\frac{\partial^2 g(x,t)}{\partial x^2} +\end{equation*} +$$ + +

+with \( c \) being the specified wave speed. + +

+Here, the chosen conditions are +$$ +\begin{align*} + g(0,t) &= 0 \\ + g(1,t) &= 0 \\ + g(x,0) &= u(x) \\ + \frac{\partial g(x,t)}{\partial t} \Big |_{t = 0} &= v(x) +\end{align*} +$$ + +where \( \frac{\partial g(x,t)}{\partial t} \Big |_{t = 0} \) means the derivative of \( g(x,t) \) with respect to \( t \) is evaluated at \( t = 0 \), and \( u(x) \) and \( v(x) \) being given functions. + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs035.html b/doc/pub/odenn/html/._odenn-bs035.html new file mode 100644 index 000000000..ced1c106b --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs035.html @@ -0,0 +1,310 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

The problem to solve for

+ +

+The wave equation to solve for, is + +$$ +\begin{equation} \tag{19} +\frac{\partial^2 g(x,t)}{\partial t^2} = c^2 \frac{\partial^2 g(x,t)}{\partial x^2} +\end{equation} +$$ + +

+where \( c \) is the given wave speed. +The chosen conditions for this equation are + +$$ +\begin{aligned} +g(0,t) &= 0, &t \geq 0 \\ +g(1,t) &= 0, &t \geq 0 \\ +g(x,0) &= u(x), &x\in[0,1] \\ +\frac{\partial g(x,t)}{\partial t}\Big |_{t = 0} &= v(x), &x \in [0,1] +\end{aligned} \tag{20} +$$ + +

+In this example, let \( c = 1 \) and \( u(x) = \sin(\pi x) \) and \( v(x) = -\pi\sin(\pi x) \). + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs036.html b/doc/pub/odenn/html/._odenn-bs036.html new file mode 100644 index 000000000..96a0c8851 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs036.html @@ -0,0 +1,303 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

The trial solution

+Setting up the network is done in similar matter as for the example of solving the diffusion equation. +The only things we have to change, is the trial solution such that it satisfies the conditions from (20) and the cost function. + +

+The trial solution becomes slightly different since we have other conditions than in the example of solving the diffusion equation. Here, a possible trial solution \( g_t(x,t) \) is + +$$ +g_t(x,t) = h_1(x,t) + x(1-x)t^2N(x,t,P) +$$ + +

+where + +$$ +h_1(x,t) = (1-t^2)u(x) + tv(x) +$$ + +

+Note that this trial solution satisfies the conditions only if \( u(0) = v(0) = u(1) = v(1) = 0 \), which is the case in this example. + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs037.html b/doc/pub/odenn/html/._odenn-bs037.html new file mode 100644 index 000000000..883c92633 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs037.html @@ -0,0 +1,290 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

The analytical solution

+ +

+The analytical solution for our specific problem, is + +$$ +g(x,t) = \sin(\pi x)\cos(\pi t) - \sin(\pi x)\sin(\pi t) +$$ + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs038.html b/doc/pub/odenn/html/._odenn-bs038.html new file mode 100644 index 000000000..1c591eb1d --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs038.html @@ -0,0 +1,506 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Solving the wave equation - the full program using Autograd

+ +

+ + +

import autograd.numpy as np
+from autograd import hessian,grad
+import autograd.numpy.random as npr
+from matplotlib import cm
+from matplotlib import pyplot as plt
+from mpl_toolkits.mplot3d import axes3d
+
+## Set up the trial function:
+def u(x):
+    return np.sin(np.pi*x)
+
+def v(x):
+    return -np.pi*np.sin(np.pi*x)
+
+def h1(point):
+    x,t = point
+    return (1 - t**2)*u(x) + t*v(x)
+
+def g_trial(point,P):
+    x,t = point
+    return h1(point) + x*(1-x)*t**2*deep_neural_network(P,point)
+
+## Define the cost function
+def cost_function(P, x, t):
+    cost_sum = 0
+
+    g_t_hessian_func = hessian(g_trial)
+
+    for x_ in x:
+        for t_ in t:
+            point = np.array([x_,t_])
+
+            g_t_hessian = g_t_hessian_func(point,P)
+
+            g_t_d2x = g_t_hessian[0][0]
+            g_t_d2t = g_t_hessian[1][1]
+
+            err_sqr = ( (g_t_d2t - g_t_d2x) )**2
+            cost_sum += err_sqr
+
+    return cost_sum / (np.size(t) * np.size(x))
+
+## The neural network
+def sigmoid(z):
+    return 1/(1 + np.exp(-z))
+
+def deep_neural_network(deep_params, x):
+    # x is now a point and a 1D numpy array; make it a column vector
+    num_coordinates = np.size(x,0)
+    x = x.reshape(num_coordinates,-1)
+
+    num_points = np.size(x,1)
+
+    # N_hidden is the number of hidden layers
+    N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
+
+    # Assume that the input layer does nothing to the input x
+    x_input = x
+    x_prev = x_input
+
+    ## Hidden layers:
+
+    for l in range(N_hidden):
+        # From the list of parameters P; find the correct weigths and bias for this layer
+        w_hidden = deep_params[l]
+
+        # Add a row of ones to include bias
+        x_prev = np.concatenate((np.ones((1,num_points)), x_prev ), axis = 0)
+
+        z_hidden = np.matmul(w_hidden, x_prev)
+        x_hidden = sigmoid(z_hidden)
+
+        # Update x_prev such that next layer can use the output from this layer
+        x_prev = x_hidden
+
+    ## Output layer:
+
+    # Get the weights and bias for this layer
+    w_output = deep_params[-1]
+
+    # Include bias:
+    x_prev = np.concatenate((np.ones((1,num_points)), x_prev), axis = 0)
+
+    z_output = np.matmul(w_output, x_prev)
+    x_output = z_output
+
+    return x_output[0][0]
+
+## The analytical solution
+def g_analytic(point):
+    x,t = point
+    return np.sin(np.pi*x)*np.cos(np.pi*t) - np.sin(np.pi*x)*np.sin(np.pi*t)
+
+def solve_pde_deep_neural_network(x,t, num_neurons, num_iter, lmb):
+    ## Set up initial weigths and biases
+    N_hidden = np.size(num_neurons)
+
+    ## Set up initial weigths and biases
+
+    # Initialize the list of parameters:
+    P = [None]*(N_hidden + 1) # + 1 to include the output layer
+
+    P[0] = npr.randn(num_neurons[0], 2 + 1 ) # 2 since we have two points, +1 to include bias
+    for l in range(1,N_hidden):
+        P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
+
+    # For the output layer
+    P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
+
+    print('Initial cost: ',cost_function(P, x, t))
+
+    cost_function_grad = grad(cost_function,0)
+
+    # Let the update be done num_iter times
+    for i in range(num_iter):
+        cost_grad =  cost_function_grad(P, x , t)
+
+        for l in range(N_hidden+1):
+            P[l] = P[l] - lmb * cost_grad[l]
+
+
+    print('Final cost: ',cost_function(P, x, t))
+
+    return P
+
+if __name__ == '__main__':
+    ### Use the neural network:
+    npr.seed(15)
+
+    ## Decide the vales of arguments to the function to solve
+    Nx = 10; Nt = 10
+    x = np.linspace(0, 1, Nx)
+    t = np.linspace(0,1,Nt)
+
+    ## Set up the parameters for the network
+    num_hidden_neurons = [50,20]
+    num_iter = 1000
+    lmb = 0.01
+
+    P = solve_pde_deep_neural_network(x,t, num_hidden_neurons, num_iter, lmb)
+
+    ## Store the results
+    res = np.zeros((Nx, Nt))
+    res_analytical = np.zeros((Nx, Nt))
+    for i,x_ in enumerate(x):
+        for j, t_ in enumerate(t):
+            point = np.array([x_, t_])
+            res[i,j] = g_trial(point,P)
+
+            res_analytical[i,j] = g_analytic(point)
+
+    diff = np.abs(res - res_analytical)
+    print("Max difference between analytical and solution from nn: %g"%np.max(diff))
+
+    ## Plot the solutions in two dimensions, that being in position and time
+
+    T,X = np.meshgrid(t,x)
+
+    fig = plt.figure(figsize=(10,10))
+    ax = fig.gca(projection='3d')
+    ax.set_title('Solution from the deep neural network w/ %d layer'%len(num_hidden_neurons))
+    s = ax.plot_surface(T,X,res,linewidth=0,antialiased=False,cmap=cm.viridis)
+    ax.set_xlabel('Time $t$')
+    ax.set_ylabel('Position $x$');
+
+
+    fig = plt.figure(figsize=(10,10))
+    ax = fig.gca(projection='3d')
+    ax.set_title('Analytical solution')
+    s = ax.plot_surface(T,X,res_analytical,linewidth=0,antialiased=False,cmap=cm.viridis)
+    ax.set_xlabel('Time $t$')
+    ax.set_ylabel('Position $x$');
+
+
+    fig = plt.figure(figsize=(10,10))
+    ax = fig.gca(projection='3d')
+    ax.set_title('Difference')
+    s = ax.plot_surface(T,X,diff,linewidth=0,antialiased=False,cmap=cm.viridis)
+    ax.set_xlabel('Time $t$')
+    ax.set_ylabel('Position $x$');
+
+    ## Take some slices of the 3D plots just to see the solutions at particular times
+    indx1 = 0
+    indx2 = int(Nt/2)
+    indx3 = Nt-1
+
+    t1 = t[indx1]
+    t2 = t[indx2]
+    t3 = t[indx3]
+
+    # Slice the results from the DNN
+    res1 = res[:,indx1]
+    res2 = res[:,indx2]
+    res3 = res[:,indx3]
+
+    # Slice the analytical results
+    res_analytical1 = res_analytical[:,indx1]
+    res_analytical2 = res_analytical[:,indx2]
+    res_analytical3 = res_analytical[:,indx3]
+
+    # Plot the slices
+    plt.figure(figsize=(10,10))
+    plt.title("Computed solutions at time = %g"%t1)
+    plt.plot(x, res1)
+    plt.plot(x,res_analytical1)
+    plt.legend(['dnn','analytical'])
+
+    plt.figure(figsize=(10,10))
+    plt.title("Computed solutions at time = %g"%t2)
+    plt.plot(x, res2)
+    plt.plot(x,res_analytical2)
+    plt.legend(['dnn','analytical'])
+
+    plt.figure(figsize=(10,10))
+    plt.title("Computed solutions at time = %g"%t3)
+    plt.plot(x, res3)
+    plt.plot(x,res_analytical3)
+    plt.legend(['dnn','analytical'])
+
+    plt.show()
+
+

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs039.html b/doc/pub/odenn/html/._odenn-bs039.html new file mode 100644 index 000000000..06b1f4bd2 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs039.html @@ -0,0 +1,455 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Solving the wave equation - the full program using TensorFlow

+As the program using Autograd is fairly slow, one could hope that using TensorFlow +could make a naive implementation faster, and more numerically robust. + +

+In addition, having TensorFlow at hand, it could be easier to experiment with different +optimization algorithms, and other constructions of the network. + +

+The following program solves the given wave equation much faster, + +

+ + +

import tensorflow as tf
+import numpy as np
+from matplotlib import cm
+from matplotlib import pyplot as plt
+from mpl_toolkits.mplot3d import axes3d
+
+Nx = 10
+x_np = np.linspace(0,1,Nx)
+
+Nt = 10
+t_np = np.linspace(0,1,Nt)
+
+X,T = np.meshgrid(x_np, t_np)
+
+x = X.ravel()
+t = T.ravel()
+
+## The construction phase
+
+zeros = tf.reshape(tf.convert_to_tensor(np.zeros(x.shape)),shape=(-1,1))
+x = tf.reshape(tf.convert_to_tensor(x),shape=(-1,1))
+t = tf.reshape(tf.convert_to_tensor(t),shape=(-1,1))
+
+points = tf.concat([x,t],1)
+
+num_iter = 100000
+num_hidden_neurons = [90]
+
+X = tf.convert_to_tensor(X)
+T = tf.convert_to_tensor(T)
+
+
+with tf.variable_scope('dnn'):
+    num_hidden_layers = np.size(num_hidden_neurons)
+
+    previous_layer = points
+
+    for l in range(num_hidden_layers):
+        current_layer = tf.layers.dense(previous_layer, num_hidden_neurons[l],activation=tf.nn.sigmoid)
+        previous_layer = current_layer
+
+    dnn_output = tf.layers.dense(previous_layer, 1)
+
+
+def u(x):
+    return tf.sin(np.pi*x)
+
+def v(x):
+    return -np.pi*tf.sin(np.pi*x)
+
+with tf.name_scope('loss'):
+    g_trial = (1 - t**2)*u(x) + t*v(x) + x*(1-x)*t**2*dnn_output
+
+    g_trial_d2t =  tf.gradients(tf.gradients(g_trial,t),t)
+    g_trial_d2x = tf.gradients(tf.gradients(g_trial,x),x)
+
+    loss = tf.losses.mean_squared_error(zeros, g_trial_d2t[0] - g_trial_d2x[0])
+
+learning_rate = 0.01
+with tf.name_scope('train'):
+    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
+    traning_op = optimizer.minimize(loss)
+
+init = tf.global_variables_initializer()
+
+g_analytic = tf.sin(np.pi*x)*tf.cos(np.pi*t) - tf.sin(np.pi*x)*tf.sin(np.pi*t)
+g_dnn = None
+
+## The execution phase
+with tf.Session() as sess:
+    init.run()
+    for i in range(num_iter):
+        sess.run(traning_op)
+
+        # If one desires to see how the cost function behaves during training
+        #if i % 100 == 0:
+        #    print(loss.eval())
+
+    g_analytic = g_analytic.eval()
+    g_dnn = g_trial.eval()
+
+
+## Compare with the analutical solution
+diff = np.abs(g_analytic - g_dnn)
+print('Max absolute difference between analytical solution and TensorFlow DNN = ',np.max(diff))
+
+G_analytic = g_analytic.reshape((Nt,Nx))
+G_dnn = g_dnn.reshape((Nt,Nx))
+
+diff = np.abs(G_analytic - G_dnn)
+
+# Plot the results
+
+X,T = np.meshgrid(x_np, t_np)
+
+fig = plt.figure(figsize=(10,10))
+ax = fig.gca(projection='3d')
+ax.set_title('Solution from the deep neural network w/ %d layer'%len(num_hidden_neurons))
+s = ax.plot_surface(X,T,G_dnn,linewidth=0,antialiased=False,cmap=cm.viridis)
+ax.set_xlabel('Time $t$')
+ax.set_ylabel('Position $x$');
+
+fig = plt.figure(figsize=(10,10))
+ax = fig.gca(projection='3d')
+ax.set_title('Analytical solution')
+s = ax.plot_surface(X,T,G_analytic,linewidth=0,antialiased=False,cmap=cm.viridis)
+ax.set_xlabel('Time $t$')
+ax.set_ylabel('Position $x$');
+
+fig = plt.figure(figsize=(10,10))
+ax = fig.gca(projection='3d')
+ax.set_title('Difference')
+s = ax.plot_surface(X,T,diff,linewidth=0,antialiased=False,cmap=cm.viridis)
+ax.set_xlabel('Time $t$')
+ax.set_ylabel('Position $x$');
+
+## Take some 3D slices
+
+indx1 = 0
+indx2 = int(Nt/2)
+indx3 = Nt-1
+
+t1 = t_np[indx1]
+t2 = t_np[indx2]
+t3 = t_np[indx3]
+
+# Slice the results from the DNN
+res1 = G_dnn[indx1,:]
+res2 = G_dnn[indx2,:]
+res3 = G_dnn[indx3,:]
+
+# Slice the analytical results
+res_analytical1 = G_analytic[indx1,:]
+res_analytical2 = G_analytic[indx2,:]
+res_analytical3 = G_analytic[indx3,:]
+
+# Plot the slices
+plt.figure(figsize=(10,10))
+plt.title("Computed solutions at time = %g"%t1)
+plt.plot(x_np, res1)
+plt.plot(x_np,res_analytical1)
+plt.legend(['dnn','analytical'])
+
+plt.figure(figsize=(10,10))
+plt.title("Computed solutions at time = %g"%t2)
+plt.plot(x_np, res2)
+plt.plot(x_np,res_analytical2)
+plt.legend(['dnn','analytical'])
+
+plt.figure(figsize=(10,10))
+plt.title("Computed solutions at time = %g"%t3)
+plt.plot(x_np, res3)
+plt.plot(x_np,res_analytical3)
+plt.legend(['dnn','analytical'])
+
+plt.show()
+
+

+The program manages to find a solution having max absolute difference to the analytical +at approximately 0.0059, by just using some minutes! +It was found, by some testing, that one hidden layer with 90 neurons actually performed well. + +

+

+ +

+ + +
+ + + + + + + + + + + diff --git a/doc/pub/odenn/html/._odenn-bs040.html b/doc/pub/odenn/html/._odenn-bs040.html new file mode 100644 index 000000000..a39c07a12 --- /dev/null +++ b/doc/pub/odenn/html/._odenn-bs040.html @@ -0,0 +1,286 @@ + + + + + + + +Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +

 

 

 

+ + + + +

Resources

+ +
    +
  1. Artificial neural networks for solving ordinary and partial differential equations by I.E. Lagaris et al
  2. +
  3. Neural networks for solving differential equations by A. Honchar
  4. +
  5. Solving differential equations using neural networks by M.M Chiaramonte and M. Kiener
  6. +
  7. Introduction to Partial Differential Equations by A. Tveitio, R. Winther
  8. +
+ + +

+ +

+ + +
+ + + + + + + + + + + From cf068f5d1950f6a612df804f816e427f4139667f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98yvind=20Sigmundson=20Sch=C3=B8yen?= Date: Mon, 15 Nov 2021 08:19:01 +0100 Subject: [PATCH 2/3] Delete Jupyter checkpoint --- .../.ipynb_checkpoints/odenn-checkpoint.ipynb | 3579 ----------------- 1 file changed, 3579 deletions(-) delete mode 100644 doc/pub/odenn/ipynb/.ipynb_checkpoints/odenn-checkpoint.ipynb diff --git a/doc/pub/odenn/ipynb/.ipynb_checkpoints/odenn-checkpoint.ipynb b/doc/pub/odenn/ipynb/.ipynb_checkpoints/odenn-checkpoint.ipynb deleted file mode 100644 index a21d0f5a0..000000000 --- a/doc/pub/odenn/ipynb/.ipynb_checkpoints/odenn-checkpoint.ipynb +++ /dev/null @@ -1,3579 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "# Data Analysis and Machine Learning: Using Neural networks to solve ODEs and PDEs\n", - "\n", - " \n", - "**Kristine Baluka Hein**, Department of Informatics, University of Oslo, Norway\n", - "\n", - "Date: **Nov 11, 2018**\n", - "\n", - "## Differential equations\n", - "\n", - "The Universal Approximation Theorem states that a neural network can approximate any function at a single hidden layer along with one input and output layer to any given precision.\n", - "Having this in mind, we will look closer at whether a neural network manages to solve for a function in an equation.\n", - "\n", - "A differential equation is an equation where the solution is a function. In the equation, it is given some relations between the function's derivatives subject to some given conditions.\n", - "Typically, a differential equation is solved numerically using approximations of Taylor series.\n", - "These kind of methods usually depends of choosing step sizes along each dimension that are small enough for the approximations to yield accurate results.\n", - "As we will see in the examples, using a neural network manages to outperform some well known methods in selected cases.\n", - "A possible way to solve the equation that we will look into, is to formulate a trial solution involving the result from a neural network.\n", - "The trial solution should hopefully converge into the true solution.\n", - "Be aware though, not all differential equations has an analytical solution.\n", - "We will stick with those that has an analytical solution such that we can check how well the network performs.\n", - "To make the trial solution converge to the true solution, setting up the cost function and updating the weights and biases within the neural network is crucial for the trial solution to converge.\n", - "\n", - "## Description of the equation to solve for\n", - "A differential equation is a equation where the solution is a function.\n", - "The equation describes how the derivatives of the function behaves in a given domain along with some conditions.\n", - "\n", - "Given a differential equation, it is desirable to know how to reformulate it into an equation a neural network can solve.\n", - "Having decided on which activation functions each layer should use, along with the number of hidden layers and neurons within each layer,\n", - "the changeable parameters of a neural network are the weights and biases for each neuron in every layer in the net.\n", - "If a differential equation is reformulated into an equation where minimization of some parameters must be done,\n", - "a neural net could possibly solve this equation.\n", - "\n", - "A trial solution might be tricky to find in general.\n", - "Due to the Universal Approximation Theorem, one could hope that outcome of the deep neural net might solve a given differential equation, even though it is used in a simple trial solution.\n", - "Let us try this idea on some well-known ordinary differential equations and thereafter try to solve for functions defined by two variables, giving partial differential equations.\n", - "\n", - "## Ordinary Differential Equations\n", - "An ordinary differential equation (ODE) is an equation involving functions having one variable.\n", - "\n", - "In general, an ordinary differential equation looks like" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation} \\label{ode} \\tag{1}\n", - "f\\left(x, \\, g(x), \\, g'(x), \\, g''(x), \\, \\dots \\, , \\, g^{(n)}(x)\\right) = 0\n", - "\\end{equation}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "where $g(x)$ is the function to find, and $g^{(n)}(x)$ is the $n$-th derivative of $g(x)$.\n", - "\n", - "The $f\\left(x, g(x), g'(x), g''(x), \\, \\dots \\, , g^{(n)}(x)\\right)$ is just a way to write that there is an expression involving $x$ and $g(x), \\ g'(x), \\ g''(x), \\, \\dots \\, , \\text{ and } g^{(n)}(x)$ on the left side of the equality sign in ([ode](#ode)).\n", - "The highest order of derivative, that is the value of $n$, determines to the order of the equation.\n", - "The equation is referred to as a $n$-th order ODE.\n", - "Along with ([ode](#ode)), some additional conditions of the function $g(x)$ are typically given\n", - "for the solution to be unique.\n", - "\n", - "## The trial solution\n", - "Let the trial solution $g_t(x)$ be" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation}\n", - "\tg_t(x) = h_1(x) + h_2(x,N(x,P))\n", - "\\label{_auto1} \\tag{2}\n", - "\\end{equation}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "where $h_1(x)$ is a function that makes $g_t(x)$ satisfy a given set of conditions, $N(x,P)$ a neural network with weights and biases described by $P$ and $h_2(x, N(x,P))$ some expression involving the neural network.\n", - "The role of the function $h_2(x, N(x,P))$, is to ensure that the output from $N(x,P)$ is zero when $g_t(x)$ is evaluated at the values of $x$ where the given conditions must be satisfied.\n", - "The function $h_1(x)$ should alone make $g_t(x)$ satisfy the conditions.\n", - "\n", - "But what about the network $N(x,P)$?\n", - "As described previously, an optimization method could be used to minimize the parameters of a neural network, that being its weights and biases, through backward propagation.\n", - "For the minimization to be defined, we need to have a cost function at hand to minimize.\n", - "\n", - "It is given that $f\\left(x, \\, g(x), \\, g'(x), \\, g''(x), \\, \\dots \\, , \\, g^{(n)}(x)\\right)$ should be equal to zero in ([ode](#ode)).\n", - "We can choose to consider the mean squared error as the cost function for an input $x$.\n", - "Since we are looking at one input, the cost function is just $f$ squared.\n", - "The cost function $c\\left(x, P \\right)$ can therefore be expressed as" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "c\\left(x, P\\right) = \\big(f\\left(x, \\, g(x), \\, g'(x), \\, g''(x), \\, \\dots \\, , \\, g^{(n)}(x)\\right)\\big)^2\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If $N$ inputs are given as a vector $\\vec x$ with elements $x_i$ for $i = 1,\\dots,N$,\n", - "the cost function becomes" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation} \\label{cost} \\tag{3}\n", - "\tc\\left(\\vec x, P\\right) = \\frac{1}{N} \\sum_{i=1}^N \\big(f\\left(x_i, \\, g(x_i), \\, g'(x_i), \\, g''(x_i), \\, \\dots \\, , \\, g^{(n)}(x_i)\\right)\\big)^2\n", - "\\end{equation}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The neural net should then find some parameters $P$ that minimizes the cost function in\n", - "([cost](#cost)) for a set of $N$ training samples $x_i$.\n", - "\n", - "## Minimizing the cost function using gradient descent and automatic differentiation\n", - "To perform the minimization using gradient descent, the gradient of $c\\left(\\vec x, P\\right)$ is needed.\n", - "It might happen so that finding an analytical expression of the gradient of $c(\\vec x, P)$ from ([cost](#cost)) gets too messy, depending on which cost function one desires to use.\n", - "\n", - "Luckily, there exists libraries that makes the job for us through automatic differentiation.\n", - "Automatic differentiation is a method of finding the derivatives numerically with very high precision.\n", - "\n", - "In the forthcoming examples presenting possible usages of Autograd and TensorFlow,\n", - "it is shown how one could set up a neural network using gradient descent solving a differential\n", - "equation.\n", - "\n", - "## Example: Exponential decay and setting up the network using Autograd\n", - "An exponential decay of a quantity $g(x)$ is described by the equation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation} \\label{solve_expdec} \\tag{4}\n", - " g'(x) = -\\gamma g(x)\n", - "\\end{equation}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "with $g(0) = g_0$ for some chosen initial value $g_0$.\n", - "\n", - "The analytical solution of ([solve_expdec](#solve_expdec)) is" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation}\n", - " g(x) = g_0 \\exp\\left(-\\gamma x\\right)\n", - "\\label{_auto2} \\tag{5}\n", - "\\end{equation}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Having an analytical solution at hand, it is possible to use it to compare how well a neural network finds a solution of ([solve_expdec](#solve_expdec)).\n", - "\n", - "In this example, a neural network will be implemented using Autograd in order to perform backpropagation.\n", - "\n", - "## The function to solve for\n", - "\n", - "The program will use a neural network to solve" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation} \\label{solveode} \\tag{6}\n", - "g'(x) = -\\gamma g(x)\n", - "\\end{equation}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "where $g(0) = g_0$ with $\\gamma$ and $g_0$ being some chosen values.\n", - "\n", - "In this example, $\\gamma = 2$ and $g_0 = 10$.\n", - "\n", - "## The trial solution\n", - "To begin with, a trial solution $g_t(t)$ must be chosen. A general trial solution for ordinary differential equations could be" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "g_t(x, P) = h_1(x) + h_2(x, N(x, P))\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "with $h_1(x)$ ensuring that $g_t(x)$ satisfies some conditions and $h_2(x,N(x, P))$ an expression involving $x$ and the output from the neural network $N(x,P)$ with $P $ being the collection of the weights and biases for each layer. For now, it is assumed that the network consists of one input layer, one hidden layer, and one output layer.\n", - "\n", - "In this network, there are no weights and bias at the input layer, so $P = \\{ P_{\\text{hidden}}, P_{\\text{output}} \\}$.\n", - "If there are $N_{\\text{hidden} }$ neurons in the hidden layer, then $P_{\\text{hidden}}$ is a $N_{\\text{hidden} } \\times (1 + N_{\\text{input}})$ matrix, given that there are $N_{\\text{input}}$ neurons in the input layer.\n", - "\n", - "The first column in $P_{\\text{hidden} }$ represents the bias for each neuron in the hidden layer and the second column represents the weights for each neuron in the hidden layer from the input layer.\n", - "If there are $N_{\\text{output} }$ neurons in the output layer, then $P_{\\text{output}} $ is a $N_{\\text{output} } \\times (1 + N_{\\text{hidden} })$ matrix.\n", - "\n", - "Its first column represents the bias of each neuron and the remaining columns represents the weights to each neuron.\n", - "\n", - "It is given that $g(0) = g_0$. The trial solution must fulfill this condition to be a proper solution of ([solveode](#solveode)). A possible way to ensure that $g_t(0, P) = g_0$, is to let $F(N(x,P)) = x \\cdot N(x,P)$ and $A(x) = g_0$. This gives the following trial solution:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation} \\label{trial} \\tag{7}\n", - "g_t(x, P) = g_0 + x \\cdot N(x, P)\n", - "\\end{equation}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Reformulating the problem\n", - "We wish that our neural network manages to minimize a given cost function.\n", - "\n", - "A reformulation of out equation, ([solveode](#solveode)), must therefore be done,\n", - "such that it describes the problem a neural network can solve for.\n", - "\n", - "The neural network must find the set of weights and biases $P$ such that the trial solution in ([trial](#trial)) satisfies ([solveode](#solveode)).\n", - "\n", - "The trial solution" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "g_t(x, P) = g_0 + x \\cdot N(x, P)\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "has been chosen such that it already solves the condition $g(0) = g_0$. What remains, is to find $P$ such that" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation} \\label{nnmin} \\tag{8}\n", - "g_t'(x, P) = - \\gamma g_t(x, P)\n", - "\\end{equation}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "is fulfilled as *best as possible*.\n", - "\n", - "The left hand side and right hand side of ([nnmin](#nnmin)) must be computed separately, and then the neural network must choose weights and biases, contained in $P$, such that the sides are equal as best as possible.\n", - "This means that the absolute or squared difference between the sides must be as close to zero, ideally equal to zero.\n", - "In this case, the difference squared shows to be an appropriate measurement of how erroneous the trial solution is with respect to $P$ of the neural network.\n", - "\n", - "This gives the following cost function our neural network must solve for:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\min_{P}\\Big\\{ \\big(g_t'(x, P) - ( -\\gamma g_t(x, P) \\big)^2 \\Big\\}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "(the notation $\\min_{P}\\{ f(x, P) \\}$ means that we desire to find $P$ that yields the minimum of $f(x, P)$)\n", - "\n", - "or, in terms of weights and biases for the hidden and output layer in our network:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\min_{P_{\\text{hidden} }, \\ P_{\\text{output} }}\\Big\\{ \\big(g_t'(x, \\{ P_{\\text{hidden} }, P_{\\text{output} }\\}) - ( -\\gamma g_t(x, \\{ P_{\\text{hidden} }, P_{\\text{output} }\\}) \\big)^2 \\Big\\}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "for an input value $x$.\n", - "\n", - "If the neural network evaluates $g_t(x, P)$ at more values for $x$, say $N$ values $x_i$ for $i = 1, \\dots, N$, then the *total* error to minimize becomes" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation} \\label{min} \\tag{9}\n", - "\\min_{P}\\Big\\{\\frac{1}{N} \\sum_{i=1}^N \\big(g_t'(x_i, P) - ( -\\gamma g_t(x_i, P) \\big)^2 \\Big\\}\n", - "\\end{equation}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Letting $\\vec x$ be a vector with elements $x_i$ and $c(\\vec x, P) = \\frac{1}{N} \\sum_i \\big(g_t'(x_i, P) - ( -\\gamma g_t(x_i, P) \\big)^2$ denote the cost function, the minimization problem that our network must solve, becomes" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\min_{P} c(\\vec x, P)\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In terms of $P_{\\text{hidden} }$ and $P_{\\text{output} }$, this could also be expressed as\n", - "\n", - "$$\n", - "\\min_{P_{\\text{hidden} }, \\ P_{\\text{output} }} c(\\vec x, \\{P_{\\text{hidden} }, P_{\\text{output} }\\})\n", - "$$\n", - "\n", - "## A possible implementation of a neural network using Autograd\n", - "\n", - "For simplicity, it is assumed that the input is an array $\\vec x = (x_1, \\dots, x_N)$ with $N$ elements. It is at these points the neural network should find $P$ such that it fulfills ([min](#min)).\n", - "\n", - "First, the neural network must feed forward the inputs.\n", - "This means that $\\vec x$ must be passed through an input layer, a hidden layer and a output layer. The input layer in this case, does not need to process the data any further.\n", - "The input layer will consist of $N_{\\text{input} }$ neurons, passing its element to each neuron in the hidden layer. The number of neurons in the hidden layer will be $N_{\\text{hidden} }$.\n", - "\n", - "For the $i$-th in the hidden layer with weight $w_i^{\\text{hidden} }$ and bias $b_i^{\\text{hidden} }$, the weighting from the $j$-th neuron at the input layer is:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\begin{aligned}\n", - "z_{i,j}^{\\text{hidden}} &= b_i^{\\text{hidden}} + w_i^{\\text{hidden}}x_j \\\\\n", - "&=\n", - "\\begin{pmatrix}\n", - "b_i^{\\text{hidden}} & w_i^{\\text{hidden}}\n", - "\\end{pmatrix}\n", - "\\begin{pmatrix}\n", - "1 \\\\\n", - "x_j\n", - "\\end{pmatrix}\n", - "\\end{aligned}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The result after weighting the inputs at the $i$-th hidden neuron can be written as a vector:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\begin{aligned}\n", - "\\vec{z}_{i}^{\\text{hidden}} &= \\Big( b_i^{\\text{hidden}} + w_i^{\\text{hidden}}x_1 , \\ b_i^{\\text{hidden}} + w_i^{\\text{hidden}} x_2, \\ \\dots \\, , \\ b_i^{\\text{hidden}} + w_i^{\\text{hidden}} x_N\\Big) \\\\\n", - "&=\n", - "\\begin{pmatrix}\n", - " b_i^{\\text{hidden}} & w_i^{\\text{hidden}}\n", - "\\end{pmatrix}\n", - "\\begin{pmatrix}\n", - "1 & 1 & \\dots & 1 \\\\\n", - "x_1 & x_2 & \\dots & x_N\n", - "\\end{pmatrix} \\\\\n", - "&= \\vec{p}_{i, \\text{hidden}}^T X\n", - "\\end{aligned}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The vector $\\vec{p}_{i, \\text{hidden}}^T$ constitutes each row in $P_{\\text{hidden} }$, which contains the weights for the neural network to minimize according to ([min](#min)).\n", - "\n", - "After having found $\\vec{z}_{i}^{\\text{hidden}} $ for every $i$-th neuron within the hidden layer, the vector will be sent to an activation function $a_i(\\vec{z})$.\n", - "\n", - "In this example, the sigmoid function has been chosen to be the activation function for each hidden neuron:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "f(z) = \\frac{1}{1 + \\exp{(-z)}}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It is possible to use other activations functions for the hidden layer also.\n", - "\n", - "The output $\\vec{x}_i^{\\text{hidden} }$from each $i$-th hidden neuron is:\n", - "\n", - "$$\n", - "\\vec{x}_i^{\\text{hidden} } = f\\big( \\vec{z}_{i}^{\\text{hidden}} \\big)\n", - "$$\n", - "\n", - "The outputs $\\vec{x}_i^{\\text{hidden} } $ are then sent to the output layer.\n", - "\n", - "The output layer consists of one neuron in this case, and combines the output from each of the neurons in the hidden layers. The output layer combines the results from the hidden layer using some weights $ w_i^{\\text{output}}$ and biases $b_i^{\\text{output}}$. In this case, it is assumes that the number of neurons in the output layer is one.\n", - "\n", - "The procedure of weighting the output neuron $j$ in the hidden layer to the $i$-th neuron in the output layer is similar as for the hidden layer described previously." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\begin{aligned}\n", - "z_{1,j}^{\\text{output}} & =\n", - "\\begin{pmatrix}\n", - "b_1^{\\text{output}} & \\vec{w}_1^{\\text{output}}\n", - "\\end{pmatrix}\n", - "\\begin{pmatrix}\n", - "1 \\\\\n", - "\\vec{x}_j^{\\text{hidden}}\n", - "\\end{pmatrix}\n", - "\\end{aligned}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Expressing $z_{1,j}^{\\text{output}}$ as a vector gives the following way of weighting the inputs from the hidden layer:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\vec{z}_{1}^{\\text{output}} =\n", - "\\begin{pmatrix}\n", - "b_1^{\\text{output}} & \\vec{w}_1^{\\text{output}}\n", - "\\end{pmatrix}\n", - "\\begin{pmatrix}\n", - "1 & 1 & \\dots & 1 \\\\\n", - "\\vec{x}_1^{\\text{hidden}} & \\vec{x}_2^{\\text{hidden}} & \\dots & \\vec{x}_N^{\\text{hidden}}\n", - "\\end{pmatrix}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In this case we seek a continuous range of values since we are approximating a function. This means that after computing $\\vec{z}_{1}^{\\text{output}}$ the neural network has finished its feed forward step, and $\\vec{z}_{1}^{\\text{output}}$ is the final output of the network.\n", - "\n", - "## Backpropagation using Autograd\n", - "The next step is to decide how the parameters should be changed such that they minimize the cost function.\n", - "\n", - "The chosen cost function for this problem is" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "c(\\vec x, P) = \\frac{1}{N} \\sum_i \\big(g_t'(x_i, P) - ( -\\gamma g_t(x_i, P) \\big)^2\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In order to minimize the cost function, an optimization method must be chosen.\n", - "\n", - "Here, gradient descent with a constant step size has been chosen.\n", - "\n", - "## Gradient descent\n", - "The idea of the gradient descent algorithm is to update parameters in direction where the cost function decreases goes to a minimum.\n", - "\n", - "In general, the update of some parameters $\\vec \\omega$ given a cost function defined by some weights $\\vec \\omega$, $c(\\vec x, \\vec \\omega)$, goes as follows:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\vec \\omega_{\\text{new} } = \\vec \\omega - \\lambda \\nabla_{\\vec \\omega} c(\\vec x, \\vec \\omega)\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "for a number of iterations or until $ \\big|\\big| \\vec \\omega_{\\text{new} } - \\vec \\omega \\big|\\big|$ becomes smaller than some given tolerance.\n", - "\n", - "The value of $\\lambda$ decides how large steps the algorithm must take in the direction of $ \\nabla_{\\vec \\omega} c(\\vec x, \\vec \\omega)$.\n", - "The notation $\\nabla_{\\vec \\omega}$ express the gradient with respect to the elements in $\\vec \\omega$.\n", - "\n", - "In our case, we have to minimize the cost function $c(\\vec x, P)$ with respect to the two sets of weights and biases, that is for the hidden layer $P_{\\text{hidden} }$ and for the output layer $P_{\\text{output} }$ .\n", - "\n", - "This means that $P_{\\text{hidden} }$ and $P_{\\text{output} }$ is updated by" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\begin{aligned}\n", - "P_{\\text{hidden},\\text{new}} &= P_{\\text{hidden}} - \\lambda \\nabla_{P_{\\text{hidden}}} c(\\vec x, P) \\\\\n", - "P_{\\text{output},\\text{new}} &= P_{\\text{output}} - \\lambda \\nabla_{P_{\\text{output}}} c(\\vec x, P)\n", - "\\end{aligned}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In general, one could risk using a cost function having gradients that are cumbersome to derive analytically.\n", - "For our case, the cost functions are just the mean squared error.\n", - "One could employ an implementation of the back propagation for this case, but we will emphasis\n", - "on how one could use automatic differentiation in order to train the network.\n", - "\n", - "However, it might be useful to know how automatic differentiation can be used, e.g through Autograd, in order to test an implementation.\n", - "\n", - "## The network with one input, hidden, and output layer" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/usr/local/lib/python3.7/site-packages/autograd/numpy/numpy_vjps.py:444: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n", - " return lambda g: g[idxs]\n", - "/usr/local/lib/python3.7/site-packages/autograd/numpy/numpy_boxes.py:13: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n", - " def __getitem__(A, idx): return A[idx]\n", - "/usr/local/lib/python3.7/site-packages/autograd/numpy/numpy_vjps.py:597: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n", - " onp.add.at(A, idx, x)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Initial cost: 367.01\n" - ] - } - ], - "source": [ - "%matplotlib inline\n", - "\n", - "# Autograd will be used for later, so the numpy wrapper for Autograd must be imported\n", - "import autograd.numpy as np\n", - "from autograd import grad, elementwise_grad\n", - "import autograd.numpy.random as npr\n", - "from matplotlib import pyplot as plt\n", - "\n", - "def sigmoid(z):\n", - " return 1/(1 + np.exp(-z))\n", - "\n", - "# Assuming one input, hidden, and output layer\n", - "def neural_network(params, x):\n", - "\n", - " # Find the weights (including and biases) for the hidden and output layer.\n", - " # Assume that params is a list of parameters for each layer.\n", - " # The biases are the first element for each array in params,\n", - " # and the weights are the remaning elements in each array in params.\n", - "\n", - " w_hidden = params[0]\n", - " w_output = params[1]\n", - "\n", - " # Assumes input x being an one-dimensional array\n", - " num_values = np.size(x)\n", - " x = x.reshape(-1, num_values)\n", - "\n", - " # Assume that the input layer does nothing to the input x\n", - " x_input = x\n", - "\n", - " ## Hidden layer:\n", - "\n", - " # Add a row of ones to include bias\n", - " x_input = np.concatenate((np.ones((1,num_values)), x_input ), axis = 0)\n", - "\n", - " z_hidden = np.matmul(w_hidden, x_input)\n", - " x_hidden = sigmoid(z_hidden)\n", - "\n", - " ## Output layer:\n", - "\n", - " # Include bias:\n", - " x_hidden = np.concatenate((np.ones((1,num_values)), x_hidden ), axis = 0)\n", - "\n", - " z_output = np.matmul(w_output, x_hidden)\n", - " x_output = z_output\n", - "\n", - " return x_output\n", - "\n", - "# The trial solution using the deep neural network:\n", - "def g_trial(x,params, g0 = 10):\n", - " return g0 + x*neural_network(params,x)\n", - "\n", - "# The right side of the ODE:\n", - "def g(x, g_trial, gamma = 2):\n", - " return -gamma*g_trial\n", - "\n", - "# The cost function:\n", - "def cost_function(P, x):\n", - "\n", - " # Evaluate the trial function with the current parameters P\n", - " g_t = g_trial(x,P)\n", - "\n", - " # Find the derivative w.r.t x of the neural network\n", - " d_net_out = elementwise_grad(neural_network,1)(P,x)\n", - "\n", - " # Find the derivative w.r.t x of the trial function\n", - " d_g_t = elementwise_grad(g_trial,0)(x,P)\n", - "\n", - " # The right side of the ODE\n", - " func = g(x, g_t)\n", - "\n", - " err_sqr = (d_g_t - func)**2\n", - " cost_sum = np.sum(err_sqr)\n", - "\n", - " return cost_sum / np.size(err_sqr)\n", - "\n", - "# Solve the exponential decay ODE using neural network with one input, hidden, and output layer\n", - "def solve_ode_neural_network(x, num_neurons_hidden, num_iter, lmb):\n", - " ## Set up initial weights and biases\n", - "\n", - " # For the hidden layer\n", - " p0 = npr.randn(num_neurons_hidden, 2 )\n", - "\n", - " # For the output layer\n", - " p1 = npr.randn(1, num_neurons_hidden + 1 ) # +1 since bias is included\n", - "\n", - " P = [p0, p1]\n", - "\n", - " print('Initial cost: %g'%cost_function(P, x))\n", - "\n", - " ## Start finding the optimal weights using gradient descent\n", - "\n", - " # Find the Python function that represents the gradient of the cost function\n", - " # w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer\n", - " cost_function_grad = grad(cost_function,0)\n", - "\n", - " # Let the update be done num_iter times\n", - " for i in range(num_iter):\n", - " # Evaluate the gradient at the current weights and biases in P.\n", - " # The cost_grad consist now of two arrays;\n", - " # one for the gradient w.r.t P_hidden and\n", - " # one for the gradient w.r.t P_output\n", - " cost_grad = cost_function_grad(P, x)\n", - "\n", - " P[0] = P[0] - lmb * cost_grad[0]\n", - " P[1] = P[1] - lmb * cost_grad[1]\n", - "\n", - " print('Final cost: %g'%cost_function(P, x))\n", - "\n", - " return P\n", - "\n", - "def g_analytic(x, gamma = 2, g0 = 10):\n", - " return g0*np.exp(-gamma*x)\n", - "\n", - "# Solve the given problem\n", - "if __name__ == '__main__':\n", - " # Set seed such that the weight are initialized\n", - " # with same weights and biases for every run.\n", - " npr.seed(15)\n", - "\n", - " ## Decide the vales of arguments to the function to solve\n", - " N = 10\n", - " x = np.linspace(0, 1, N)\n", - "\n", - " ## Set up the initial parameters\n", - " num_hidden_neurons = 10\n", - " num_iter = 10000\n", - " lmb = 0.001\n", - "\n", - " # Use the network\n", - " P = solve_ode_neural_network(x, num_hidden_neurons, num_iter, lmb)\n", - "\n", - " # Print the deviation from the trial solution and true solution\n", - " res = g_trial(x,P)\n", - " res_analytical = g_analytic(x)\n", - "\n", - " print('Max absolute difference: %g'%np.max(np.abs(res - res_analytical)))\n", - "\n", - " # Plot the results\n", - " plt.figure(figsize=(10,10))\n", - "\n", - " plt.title('Performance of neural network solving an ODE compared to the analytical solution')\n", - " plt.plot(x, res_analytical)\n", - " plt.plot(x, res[0,:])\n", - " plt.legend(['analytical','nn'])\n", - " plt.xlabel('x')\n", - " plt.ylabel('g(x)')\n", - " plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## The network with one input layer, specified number of hidden layers, and one output layer output layer\n", - "\n", - "It is also possible to extend the construction of our network into a more general one, allowing the network to contain more than one hidden layers.\n", - "\n", - "The number of neurons within each hidden layer are given as a list of integers in the program below." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import autograd.numpy as np\n", - "from autograd import grad, elementwise_grad\n", - "import autograd.numpy.random as npr\n", - "from matplotlib import pyplot as plt\n", - "\n", - "def sigmoid(z):\n", - " return 1/(1 + np.exp(-z))\n", - "\n", - "# The neural network with one input layer and one output layer,\n", - "# but with number of hidden layers specified by the user.\n", - "def deep_neural_network(deep_params, x):\n", - " # N_hidden is the number of hidden layers\n", - "\n", - " N_hidden = np.size(deep_params) - 1 # -1 since params consists of\n", - " # parameters to all the hidden\n", - " # layers AND the output layer.\n", - "\n", - " # Assumes input x being an one-dimensional array\n", - " num_values = np.size(x)\n", - " x = x.reshape(-1, num_values)\n", - "\n", - " # Assume that the input layer does nothing to the input x\n", - " x_input = x\n", - "\n", - " # Due to multiple hidden layers, define a variable referencing to the\n", - " # output of the previous layer:\n", - " x_prev = x_input\n", - "\n", - " ## Hidden layers:\n", - "\n", - " for l in range(N_hidden):\n", - " # From the list of parameters P; find the correct weigths and bias for this layer\n", - " w_hidden = deep_params[l]\n", - "\n", - " # Add a row of ones to include bias\n", - " x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)\n", - "\n", - " z_hidden = np.matmul(w_hidden, x_prev)\n", - " x_hidden = sigmoid(z_hidden)\n", - "\n", - " # Update x_prev such that next layer can use the output from this layer\n", - " x_prev = x_hidden\n", - "\n", - " ## Output layer:\n", - "\n", - " # Get the weights and bias for this layer\n", - " w_output = deep_params[-1]\n", - "\n", - " # Include bias:\n", - " x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)\n", - "\n", - " z_output = np.matmul(w_output, x_prev)\n", - " x_output = z_output\n", - "\n", - " return x_output\n", - "\n", - "# The trial solution using the deep neural network:\n", - "def g_trial_deep(x,params, g0 = 10):\n", - " return g0 + x*deep_neural_network(params, x)\n", - "\n", - "# The right side of the ODE:\n", - "def g(x, g_trial, gamma = 2):\n", - " return -gamma*g_trial\n", - "\n", - "# The same cost function as before, but calls deep_neural_network instead.\n", - "def cost_function_deep(P, x):\n", - "\n", - " # Evaluate the trial function with the current parameters P\n", - " g_t = g_trial_deep(x,P)\n", - "\n", - " # Find the derivative w.r.t x of the neural network\n", - " d_net_out = elementwise_grad(deep_neural_network,1)(P,x)\n", - "\n", - " # Find the derivative w.r.t x of the trial function\n", - " d_g_t = elementwise_grad(g_trial_deep,0)(x,P)\n", - "\n", - " # The right side of the ODE\n", - " func = g(x, g_t)\n", - "\n", - " err_sqr = (d_g_t - func)**2\n", - " cost_sum = np.sum(err_sqr)\n", - "\n", - " return cost_sum / np.size(err_sqr)\n", - "\n", - "# Solve the exponential decay ODE using neural network with one input and one output layer,\n", - "# but with specified number of hidden layers from the user.\n", - "def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):\n", - " # num_hidden_neurons is now a list of number of neurons within each hidden layer\n", - "\n", - " # The number of elements in the list num_hidden_neurons thus represents\n", - " # the number of hidden layers.\n", - "\n", - " # Find the number of hidden layers:\n", - " N_hidden = np.size(num_neurons)\n", - "\n", - " ## Set up initial weights and biases\n", - "\n", - " # Initialize the list of parameters:\n", - " P = [None]*(N_hidden + 1) # + 1 to include the output layer\n", - "\n", - " P[0] = npr.randn(num_neurons[0], 2 )\n", - " for l in range(1,N_hidden):\n", - " P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias\n", - "\n", - " # For the output layer\n", - " P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included\n", - "\n", - " print('Initial cost: %g'%cost_function_deep(P, x))\n", - "\n", - " ## Start finding the optimal weights using gradient descent\n", - "\n", - " # Find the Python function that represents the gradient of the cost function\n", - " # w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer\n", - " cost_function_deep_grad = grad(cost_function_deep,0)\n", - "\n", - " # Let the update be done num_iter times\n", - " for i in range(num_iter):\n", - " # Evaluate the gradient at the current weights and biases in P.\n", - " # The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases\n", - " # in the hidden layers and output layers evaluated at x.\n", - " cost_deep_grad = cost_function_deep_grad(P, x)\n", - "\n", - " for l in range(N_hidden+1):\n", - " P[l] = P[l] - lmb * cost_deep_grad[l]\n", - "\n", - " print('Final cost: %g'%cost_function_deep(P, x))\n", - "\n", - " return P\n", - "\n", - "def g_analytic(x, gamma = 2, g0 = 10):\n", - " return g0*np.exp(-gamma*x)\n", - "\n", - "# Solve the given problem\n", - "if __name__ == '__main__':\n", - " npr.seed(15)\n", - "\n", - " ## Decide the vales of arguments to the function to solve\n", - " N = 10\n", - " x = np.linspace(0, 1, N)\n", - "\n", - " ## Set up the initial parameters\n", - " num_hidden_neurons = np.array([10,10])\n", - " num_iter = 10000\n", - " lmb = 0.001\n", - "\n", - " P = solve_ode_deep_neural_network(x, num_hidden_neurons, num_iter, lmb)\n", - "\n", - " res = g_trial_deep(x,P)\n", - " res_analytical = g_analytic(x)\n", - "\n", - " plt.figure(figsize=(10,10))\n", - "\n", - " plt.title('Performance of a deep neural network solving an ODE compared to the analytical solution')\n", - " plt.plot(x, res_analytical)\n", - " plt.plot(x, res[0,:])\n", - " plt.legend(['analytical','dnn'])\n", - " plt.ylabel('g(x)')\n", - " plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Example: Population growth, comparing Autograd, TensorFlow, and Euler's scheme\n", - "\n", - "A logistic model of population growth assumes that a population converges toward an equilibrium.\n", - "The population growth can be modeled by" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation} \\label{log} \\tag{10}\n", - "\tg'(t) = \\alpha g(t)(A - g(t))\n", - "\\end{equation}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "where $g(t)$ is the population density at time $t$, $\\alpha > 0$ the growth rate and $A > 0$ is the maximum population number in the environment.\n", - "Also, at $t = 0$ the population has the size $g(0) = g_0$, where $g_0$ is some chosen constant.\n", - "\n", - "In this example, similar network as for the exponential decay using Autograd has been used to solve the equation. However, as the implementation might suffer from e.g numerical instability\n", - "and high execution time (this might be more apparent in the examples solving PDEs),\n", - "a network has been constructed using TensorFlow also.\n", - "For comparison, the forward Euler method has been implemented in order to see how the networks performs compared to a numerical scheme.\n", - "\n", - "## Setting up the problem\n", - "\n", - "Here, we will model a population $g(t)$ in an environment having carrying capacity $A$.\n", - "The population follows the model" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation} \\label{solveode_population} \\tag{11}\n", - "g'(t) = \\alpha g(t)(A - g(t))\n", - "\\end{equation}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "where $g(0) = g_0$.\n", - "\n", - "In this example, we let $\\alpha = 2$, $A = 1$, and $g_0 = 1.2$.\n", - "\n", - "## The trial solution\n", - "We will get a slightly different trial solution, as the boundary conditions are different\n", - "compared to the case for exponential decay.\n", - "\n", - "A possible trial solution satisfying the condition $g(0) = g_0$ could be\n", - "\n", - "$$\n", - "h_1(t) = g_0 + t \\cdot N(t,P)\n", - "$$\n", - "\n", - "with $N(t,P)$ being the output from the neural network with weights and biases for each layer collected in the set $P$.\n", - "\n", - "The analytical solution is\n", - "\n", - "$$\n", - "g(t) = \\frac{Ag_0}{g_0 + (A - g_0)\\exp(-\\alpha A t)}\n", - "$$\n", - "\n", - "## The program using Autograd\n", - "\n", - "The network will be the similar as for the exponential decay example, but with some small modifications for our problem." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "import autograd.numpy as np\n", - "from autograd import grad, elementwise_grad\n", - "import autograd.numpy.random as npr\n", - "from matplotlib import pyplot as plt\n", - "\n", - "def sigmoid(z):\n", - " return 1/(1 + np.exp(-z))\n", - "\n", - "# Function to get the parameters.\n", - "# Done such that one can easily change the paramaters after one's liking.\n", - "def get_parameters():\n", - " alpha = 2\n", - " A = 1\n", - " g0 = 1.2\n", - " return alpha, A, g0\n", - "\n", - "def deep_neural_network(P, x):\n", - " # N_hidden is the number of hidden layers\n", - " N_hidden = np.size(P) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer\n", - "\n", - " # Assumes input x being an one-dimensional array\n", - " num_values = np.size(x)\n", - " x = x.reshape(-1, num_values)\n", - "\n", - " # Assume that the input layer does nothing to the input x\n", - " x_input = x\n", - "\n", - " # Due to multiple hidden layers, define a variable referencing to the\n", - " # output of the previous layer:\n", - " x_prev = x_input\n", - "\n", - " ## Hidden layers:\n", - "\n", - " for l in range(N_hidden):\n", - " # From the list of parameters P; find the correct weigths and bias for this layer\n", - " w_hidden = P[l]\n", - "\n", - " # Add a row of ones to include bias\n", - " x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)\n", - "\n", - " z_hidden = np.matmul(w_hidden, x_prev)\n", - " x_hidden = sigmoid(z_hidden)\n", - "\n", - " # Update x_prev such that next layer can use the output from this layer\n", - " x_prev = x_hidden\n", - "\n", - " ## Output layer:\n", - "\n", - " # Get the weights and bias for this layer\n", - " w_output = P[-1]\n", - "\n", - " # Include bias:\n", - " x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)\n", - "\n", - " z_output = np.matmul(w_output, x_prev)\n", - " x_output = z_output\n", - "\n", - " return x_output\n", - "\n", - "\n", - "def cost_function_deep(P, x):\n", - "\n", - " # Evaluate the trial function with the current parameters P\n", - " g_t = g_trial_deep(x,P)\n", - "\n", - " # Find the derivative w.r.t x of the trial function\n", - " d_g_t = elementwise_grad(g_trial_deep,0)(x,P)\n", - "\n", - " # The right side of the ODE\n", - " func = f(x, g_t)\n", - "\n", - " err_sqr = (d_g_t - func)**2\n", - " cost_sum = np.sum(err_sqr)\n", - "\n", - " return cost_sum / np.size(err_sqr)\n", - "\n", - "# The right side of the ODE:\n", - "def f(x, g_trial):\n", - " alpha,A, g0 = get_parameters()\n", - " return alpha*g_trial*(A - g_trial)\n", - "\n", - "# The trial solution using the deep neural network:\n", - "def g_trial_deep(x, params):\n", - " alpha,A, g0 = get_parameters()\n", - " return g0 + x*deep_neural_network(params,x)\n", - "\n", - "# The analytical solution:\n", - "def g_analytic(t):\n", - " alpha,A, g0 = get_parameters()\n", - " return A*g0/(g0 + (A - g0)*np.exp(-alpha*A*t))\n", - "\n", - "def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):\n", - " # num_hidden_neurons is now a list of number of neurons within each hidden layer\n", - "\n", - " # Find the number of hidden layers:\n", - " N_hidden = np.size(num_neurons)\n", - "\n", - " ## Set up initial weigths and biases\n", - "\n", - " # Initialize the list of parameters:\n", - " P = [None]*(N_hidden + 1) # + 1 to include the output layer\n", - "\n", - " P[0] = npr.randn(num_neurons[0], 2 )\n", - " for l in range(1,N_hidden):\n", - " P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias\n", - "\n", - " # For the output layer\n", - " P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included\n", - "\n", - " print('Initial cost: %g'%cost_function_deep(P, x))\n", - "\n", - " ## Start finding the optimal weigths using gradient descent\n", - "\n", - " # Find the Python function that represents the gradient of the cost function\n", - " # w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer\n", - " cost_function_deep_grad = grad(cost_function_deep,0)\n", - "\n", - " # Let the update be done num_iter times\n", - " for i in range(num_iter):\n", - " # Evaluate the gradient at the current weights and biases in P.\n", - " # The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases\n", - " # in the hidden layers and output layers evaluated at x.\n", - " cost_deep_grad = cost_function_deep_grad(P, x)\n", - "\n", - " for l in range(N_hidden+1):\n", - " P[l] = P[l] - lmb * cost_deep_grad[l]\n", - "\n", - " print('Final cost: %g'%cost_function_deep(P, x))\n", - "\n", - " return P\n", - "\n", - "if __name__ == '__main__':\n", - " npr.seed(4155)\n", - "\n", - " ## Decide the vales of arguments to the function to solve\n", - " Nt = 10\n", - " T = 1\n", - " t = np.linspace(0,T, Nt)\n", - "\n", - " ## Set up the initial parameters\n", - " num_hidden_neurons = [100, 50, 25]\n", - " num_iter = 1000\n", - " lmb = 1e-3\n", - "\n", - " P = solve_ode_deep_neural_network(t, num_hidden_neurons, num_iter, lmb)\n", - "\n", - " g_dnn_ag = g_trial_deep(t,P)\n", - " g_analytical = g_analytic(t)\n", - "\n", - " # Find the maximum absolute difference between the solutons:\n", - " diff_ag = np.max(np.abs(g_dnn_ag - g_analytical))\n", - " print(\"The max absolute difference between the solutions is: %g\"%diff_ag)\n", - "\n", - " plt.figure(figsize=(10,10))\n", - "\n", - " plt.title('Performance of neural network solving an ODE compared to the analytical solution')\n", - " plt.plot(t, g_analytical)\n", - " plt.plot(t, g_dnn_ag[0,:])\n", - " plt.legend(['analytical','nn'])\n", - " plt.xlabel('t')\n", - " plt.ylabel('g(t)')\n", - "\n", - " plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using forward Euler to solve the ODE\n", - "\n", - "A straight-forward way of solving an ODE numerically, is to use Euler's method.\n", - "\n", - "Euler's method uses Taylor series to approximate the value at a function $f$ at a step $\\Delta x$ from $x$:\n", - "\n", - "$$\n", - "f(x + \\Delta x) \\approx f(x) + \\Delta x f'(x)\n", - "$$\n", - "\n", - "In our case, using Euler's method to approximate the value of $g$ at a step $\\Delta t$ from $t$ yields" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\begin{aligned}\n", - " g(t + \\Delta t) &\\approx g(t) + \\Delta t g'(t) \\\\\n", - " &= g(t) + \\Delta t \\big(\\alpha g(t)(A - g(t))\\big)\n", - "\\end{aligned}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "along with the condition that $g(0) = g_0$.\n", - "\n", - "Let $t_i = i \\cdot \\Delta t$ where $\\Delta t = \\frac{T}{N_t-1}$ where $T$ is the final time our solver must solve for and $N_t$ the number of values for $t \\in [0, T]$ for $i = 0, \\dots, N_t-1$.\n", - "\n", - "For $i \\geq 1$, we have that" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\begin{aligned}\n", - "t_i &= i\\Delta t \\\\\n", - "&= (i - 1)\\Delta t + \\Delta t \\\\\n", - "&= t_{i-1} + \\Delta t\n", - "\\end{aligned}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, if $g_i = g(t_i)$ then" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation}\n", - " \\begin{aligned}\n", - " g_i &= g(t_i) \\\\\n", - " &= g(t_{i-1} + \\Delta t) \\\\\n", - " &\\approx g(t_{i-1}) + \\Delta t \\big(\\alpha g(t_{i-1})(A - g(t_{i-1}))\\big) \\\\\n", - " &= g_{i-1} + \\Delta t \\big(\\alpha g_{i-1}(A - g_{i-1})\\big)\n", - " \\end{aligned}\n", - "\\end{equation} \\label{odenum} \\tag{12}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "for $i \\geq 1$ and $g_0 = g(t_0) = g(0) = g_0$.\n", - "\n", - "Equation ([odenum](#odenum)) could be implemented in the following way,\n", - "extending the program that uses the network using Autograd:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "# Assume that all function definitions from the example program using Autograd\n", - "# are located here.\n", - "\n", - "if __name__ == '__main__':\n", - " npr.seed(4155)\n", - "\n", - " ## Decide the vales of arguments to the function to solve\n", - " Nt = 10\n", - " T = 1\n", - " t = np.linspace(0,T, Nt)\n", - "\n", - " ## Set up the initial parameters\n", - " num_hidden_neurons = [100,50,25]\n", - " num_iter = 1000\n", - " lmb = 1e-3\n", - "\n", - " P = solve_ode_deep_neural_network(t, num_hidden_neurons, num_iter, lmb)\n", - "\n", - " g_dnn_ag = g_trial_deep(t,P)\n", - " g_analytical = g_analytic(t)\n", - "\n", - " # Find the maximum absolute difference between the solutons:\n", - " diff_ag = np.max(np.abs(g_dnn_ag - g_analytical))\n", - " print(\"The max absolute difference between the solutions is: %g\"%diff_ag)\n", - "\n", - " plt.figure(figsize=(10,10))\n", - "\n", - " plt.title('Performance of neural network solving an ODE compared to the analytical solution')\n", - " plt.plot(t, g_analytical)\n", - " plt.plot(t, g_dnn_ag[0,:])\n", - " plt.legend(['analytical','nn'])\n", - " plt.xlabel('t')\n", - " plt.ylabel('g(t)')\n", - "\n", - " ## Find an approximation to the funtion using forward Euler\n", - "\n", - " alpha, A, g0 = get_parameters()\n", - " dt = T/(Nt - 1)\n", - "\n", - " # Perform forward Euler to solve the ODE\n", - " g_euler = np.zeros(Nt)\n", - " g_euler[0] = g0\n", - "\n", - " for i in range(1,Nt):\n", - " g_euler[i] = g_euler[i-1] + dt*(alpha*g_euler[i-1]*(A - g_euler[i-1]))\n", - "\n", - " # Print the errors done by each method\n", - " diff1 = np.max(np.abs(g_euler - g_analytical))\n", - " diff2 = np.max(np.abs(g_dnn_ag[0,:] - g_analytical))\n", - "\n", - " print('Max absolute difference between Euler method and analytical: %g'%diff1)\n", - " print('Max absolute difference between deep neural network and analytical: %g'%diff2)\n", - "\n", - " # Plot results\n", - " plt.figure(figsize=(10,10))\n", - "\n", - " plt.plot(t,g_euler)\n", - " plt.plot(t,g_analytical)\n", - " plt.plot(t,g_dnn_ag[0,:])\n", - "\n", - " plt.legend(['euler','analytical','dnn'])\n", - " plt.xlabel('Time t')\n", - " plt.ylabel('g(t)')\n", - "\n", - " plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Running the program gives" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - " Max absolute difference between Euler method and analytical: 0.011225\n", - " Max absolute difference between deep neural network and analytical: 0.00424909\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using TensorFlow to model logistic population growth\n", - "\n", - "TensorFlow is a library widely used in the machine learning community.\n", - "A neural network can be set up in a flexible manner, where various optimization algorithms are implemented and different types of networks can be used, making it easier to experiment on solving differential equations using neural networks.\n", - "\n", - "## The general program flow in TensorFlow\n", - "\n", - "Usually, a program in TensorFlow is divided into two parts; the *construction phase* and the *execution phase*.\n", - "In the construction phase, the computational graph that TensorFlow uses to perform its calculations are set up.\n", - "In the execution phase, TensorFlow evaluates any procedure that was defined in the construction phase.\n", - "\n", - "## Program flow in TensorFlow - Construction phase\n", - "\n", - "Here, the architecture for the neural network will be set up, along with the cost function and an optimizer class used during training of the network.\n", - "Note that TensorFlow uses a different convention for the weighting done in each neuron in each layer within the network than in the implementation using Autograd.\n", - "The matrix-vector multiplication between the input from the previous layer and the weighting at the neuron at current layer in the program using Autograd, is the transpose of the convention used in TensorFlow. But it will not affect that much our construction, as TensorFlow takes care of most of the computations. The only thing we have to be aware of, is how the dimensions are for our inputs.\n", - "\n", - "## Program flow in TensorFlow - Execution phase\n", - "\n", - "The computation graph has been defined, and is ready to be evaluated.\n", - "In order to get access to the graph, it has to be initialized and be runned within a Session.\n", - "\n", - "## The full program modeling logistic population growth using TensorFlow" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow as tf\n", - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "\n", - "# Just to reset the graph such that it is possible to rerun this in a\n", - "# Jupyter cell without resetting the whole kernel.\n", - "tf.reset_default_graph()\n", - "\n", - "# Set a seed to ensure getting the same results from every run\n", - "tf.set_random_seed(4155)\n", - "\n", - "Nt = 10\n", - "T = 1\n", - "t = np.linspace(0,T, Nt)\n", - "\n", - "## The construction phase\n", - "\n", - "# Convert the values the trial solution is evaluated at to a tensor.\n", - "t_tf = tf.convert_to_tensor(t.reshape(-1,1),dtype=tf.float64)\n", - "zeros = tf.reshape(tf.convert_to_tensor(np.zeros(t.shape)),shape=(-1,1))\n", - "\n", - "# Define the parameters of the equation\n", - "alpha = tf.constant(2.,dtype=tf.float64)\n", - "A = tf.constant(1.,dtype=tf.float64)\n", - "g0 = tf.constant(1.2,dtype=tf.float64)\n", - "\n", - "num_iter = 100000\n", - "\n", - "# Define the number of neurons at each hidden layer\n", - "num_hidden_neurons = [100,50,25]\n", - "num_hidden_layers = np.size(num_hidden_neurons)\n", - "\n", - "# Construct the network.\n", - "# tf.name_scope is used to group each step in the construction,\n", - "# just for a more organized visualization in TensorBoard\n", - "with tf.name_scope('dnn'):\n", - "\n", - " # Input layer\n", - " previous_layer = t_tf\n", - "\n", - " # Hidden layers\n", - " for l in range(num_hidden_layers):\n", - " current_layer = tf.layers.dense(previous_layer, num_hidden_neurons[l], name='hidden%d'%(l+1), activation=tf.nn.sigmoid)\n", - " previous_layer = current_layer\n", - "\n", - " # Output layer\n", - " dnn_output = tf.layers.dense(previous_layer, 1, name='output')\n", - "\n", - "# Define the cost function\n", - "with tf.name_scope('cost'):\n", - " g_trial = g0 + t_tf*dnn_output\n", - " d_g_trial = tf.gradients(g_trial,t_tf)\n", - "\n", - " func = alpha*g_trial*(A - g_trial)\n", - " cost = tf.losses.mean_squared_error(zeros, d_g_trial[0] - func)\n", - "\n", - "\n", - "# Choose the method to minimize the cost function, along with a learning rate\n", - "learning_rate = 1e-2\n", - "with tf.name_scope('train'):\n", - " optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n", - " traning_op = optimizer.minimize(cost)\n", - "\n", - "# Set up a referance to the result from the neural network:\n", - "g_dnn_tf = None\n", - "\n", - "# Define a node that initializes all of the other nodes in the computational graph\n", - "# used by TensorFlow:\n", - "init = tf.global_variables_initializer()\n", - "\n", - "## Execution phase\n", - "\n", - "# Start a session where the graph defined from the construction phase can be evaluated at:\n", - "with tf.Session() as sess:\n", - " # Initialize the whole graph\n", - " init.run()\n", - "\n", - " # Evaluate the initial cost:\n", - " print('Initial cost: %g'%cost.eval())\n", - "\n", - " # The training of the network:\n", - " for i in range(num_iter):\n", - " sess.run(traning_op)\n", - "\n", - " # If one desires to see how the cost function behaves for each iteration:\n", - " #if i % 1000 == 0:\n", - " # print(cost.eval())\n", - "\n", - " # Training is done, and we have an approximate solution to the ODE\n", - " print('Final cost: %g'%cost.eval())\n", - "\n", - " # Store the result\n", - " g_dnn_tf = g_trial.eval()\n", - "\n", - "# Compare with analytical solution\n", - "def get_parameters():\n", - " alpha = 2\n", - " A = 1\n", - " g0 = 1.2\n", - " return alpha, A, g0\n", - "\n", - "def g_analytic(t):\n", - " alpha,A, g0 = get_parameters()\n", - " return A*g0/(g0 + (A - g0)*np.exp(-alpha*A*t))\n", - "\n", - "g_analytical = g_analytic(t)\n", - "diff_tf = g_dnn_tf - g_analytical.reshape(-1,1)\n", - "\n", - "print('\\nMax absolute difference between the analytical solution and solution from TensorFlow DNN: %g'%np.max(np.abs(diff_tf)))\n", - "\n", - "# Plot the result\n", - "plt.figure(figsize=(10,10))\n", - "\n", - "plt.title('Numerical solutions of the ODE')\n", - "\n", - "plt.plot(t, g_dnn_tf)\n", - "plt.plot(t, g_analytical)\n", - "\n", - "plt.legend(['dnn, tensorflow', 'exact'])\n", - "plt.xlabel('Time t')\n", - "plt.ylabel('g(t)')\n", - "\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Example: Solving the one dimensional Poisson equation using Autograd and TensorFlow\n", - "\n", - "The Poisson equation for $g(x)$ in one dimension is" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation} \\label{poisson} \\tag{13}\n", - " -g''(x) = f(x)\n", - "\\end{equation}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "where $f(x)$ is a given function for $x \\in (0,1)$.\n", - "\n", - "The conditions that $g(x)$ is chosen to fulfill, are" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\begin{align*}\n", - " g(0) &= 0 \\\\\n", - " g(1) &= 0\n", - "\\end{align*}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This equation can be solved numerically using programs where e.g Autograd and TensorFlow are used.\n", - "The results from the networks can then be compared to the analytical solution.\n", - "In addition, it could be interesting to see how a typical method for numerically solving second order ODEs compares to the neural networks.\n", - "\n", - "There exists many different optimization methods implemented in TensorFlow.\n", - "In the examples program using TensorFlow, it could also be of interest to see how\n", - "the choice of an optimization method affects our results.\n", - "In the [TensorFlow documentation about optimizers](https://www.tensorflow.org/versions/r1.2/api_guides/python/train#Optimizers), a list over available optimization methods are shown.\n", - "\n", - "## The specific equation to solve for\n", - "\n", - "Here, the function $g(x)$ to solve for follows the equation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "-g''(x) = f(x),\\qquad x \\in (0,1)\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "where $f(x)$ is a given function, along with the chosen conditions" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{aligned}\n", - "g(0) = g(1) = 0\n", - "\\end{aligned}\\label{cond} \\tag{14}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In this example, we consider the case when $f(x) = (3x + x^2)\\exp(x)$.\n", - "\n", - "For this case, a possible trial solution satisfying the conditions could be" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "g_t(x) = x \\cdot (1-x) \\cdot N(P,x)\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The analytical solution for this problem is" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "g(x) = x(1 - x)\\exp(x)\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Solving the equation using Autograd" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "import autograd.numpy as np\n", - "from autograd import grad, elementwise_grad\n", - "import autograd.numpy.random as npr\n", - "from matplotlib import pyplot as plt\n", - "\n", - "def sigmoid(z):\n", - " return 1/(1 + np.exp(-z))\n", - "\n", - "def deep_neural_network(deep_params, x):\n", - " # N_hidden is the number of hidden layers\n", - " N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer\n", - "\n", - " # Assumes input x being an one-dimensional array\n", - " num_values = np.size(x)\n", - " x = x.reshape(-1, num_values)\n", - "\n", - " # Assume that the input layer does nothing to the input x\n", - " x_input = x\n", - "\n", - " # Due to multiple hidden layers, define a variable referencing to the\n", - " # output of the previous layer:\n", - " x_prev = x_input\n", - "\n", - " ## Hidden layers:\n", - "\n", - " for l in range(N_hidden):\n", - " # From the list of parameters P; find the correct weigths and bias for this layer\n", - " w_hidden = deep_params[l]\n", - "\n", - " # Add a row of ones to include bias\n", - " x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)\n", - "\n", - " z_hidden = np.matmul(w_hidden, x_prev)\n", - " x_hidden = sigmoid(z_hidden)\n", - "\n", - " # Update x_prev such that next layer can use the output from this layer\n", - " x_prev = x_hidden\n", - "\n", - " ## Output layer:\n", - "\n", - " # Get the weights and bias for this layer\n", - " w_output = deep_params[-1]\n", - "\n", - " # Include bias:\n", - " x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)\n", - "\n", - " z_output = np.matmul(w_output, x_prev)\n", - " x_output = z_output\n", - "\n", - " return x_output\n", - "\n", - "def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):\n", - " # num_hidden_neurons is now a list of number of neurons within each hidden layer\n", - "\n", - " # Find the number of hidden layers:\n", - " N_hidden = np.size(num_neurons)\n", - "\n", - " ## Set up initial weigths and biases\n", - "\n", - " # Initialize the list of parameters:\n", - " P = [None]*(N_hidden + 1) # + 1 to include the output layer\n", - "\n", - " P[0] = npr.randn(num_neurons[0], 2 )\n", - " for l in range(1,N_hidden):\n", - " P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias\n", - "\n", - " # For the output layer\n", - " P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included\n", - "\n", - " print('Initial cost: %g'%cost_function_deep(P, x))\n", - "\n", - " ## Start finding the optimal weigths using gradient descent\n", - "\n", - " # Find the Python function that represents the gradient of the cost function\n", - " # w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer\n", - " cost_function_deep_grad = grad(cost_function_deep,0)\n", - "\n", - " # Let the update be done num_iter times\n", - " for i in range(num_iter):\n", - " # Evaluate the gradient at the current weights and biases in P.\n", - " # The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases\n", - " # in the hidden layers and output layers evaluated at x.\n", - " cost_deep_grad = cost_function_deep_grad(P, x)\n", - "\n", - " for l in range(N_hidden+1):\n", - " P[l] = P[l] - lmb * cost_deep_grad[l]\n", - "\n", - " print('Final cost: %g'%cost_function_deep(P, x))\n", - "\n", - " return P\n", - "\n", - "## Set up the cost function specified for this Poisson equation:\n", - "\n", - "# The right side of the ODE\n", - "def f(x):\n", - " return (3*x + x**2)*np.exp(x)\n", - "\n", - "def cost_function_deep(P, x):\n", - "\n", - " # Evaluate the trial function with the current parameters P\n", - " g_t = g_trial_deep(x,P)\n", - "\n", - " # Find the derivative w.r.t x of the trial function\n", - " d2_g_t = elementwise_grad(elementwise_grad(g_trial_deep,0))(x,P)\n", - "\n", - " right_side = f(x)\n", - "\n", - " err_sqr = (-d2_g_t - right_side)**2\n", - " cost_sum = np.sum(err_sqr)\n", - "\n", - " return cost_sum/np.size(err_sqr)\n", - "\n", - "# The trial solution:\n", - "def g_trial_deep(x,P):\n", - " return x*(1-x)*deep_neural_network(P,x)\n", - "\n", - "# The analytic solution;\n", - "def g_analytic(x):\n", - " return x*(1-x)*np.exp(x)\n", - "\n", - "if __name__ == '__main__':\n", - " npr.seed(4155)\n", - "\n", - " ## Decide the vales of arguments to the function to solve\n", - " Nx = 10\n", - " x = np.linspace(0,1, Nx)\n", - "\n", - " ## Set up the initial parameters\n", - " num_hidden_neurons = [200,100]\n", - " num_iter = 1000\n", - " lmb = 1e-3\n", - "\n", - " P = solve_ode_deep_neural_network(x, num_hidden_neurons, num_iter, lmb)\n", - "\n", - " g_dnn_ag = g_trial_deep(x,P)\n", - " g_analytical = g_analytic(x)\n", - "\n", - " # Find the maximum absolute difference between the solutons:\n", - " max_diff = np.max(np.abs(g_dnn_ag - g_analytical))\n", - " print(\"The max absolute difference between the solutions is: %g\"%max_diff)\n", - "\n", - " plt.figure(figsize=(10,10))\n", - "\n", - " plt.title('Performance of neural network solving an ODE compared to the analytical solution')\n", - " plt.plot(x, g_analytical)\n", - " plt.plot(x, g_dnn_ag[0,:])\n", - " plt.legend(['analytical','nn'])\n", - " plt.xlabel('x')\n", - " plt.ylabel('g(x)')\n", - " plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Comparing with a numerical scheme\n", - "\n", - "The Poisson equation is possible to solve using Taylor series to approximate the second derivative.\n", - "\n", - "Using Taylor series, the second derivative can be expressed as\n", - "\n", - "$$\n", - "g''(x) = \\frac{g(x + \\Delta x) - 2g(x) + g(x-\\Delta x)}{\\Delta x^2} + E_{\\Delta x}(x)\n", - "$$\n", - "\n", - "where $\\Delta x$ is a small step size and $E_{\\Delta x}(x)$ being the error term.\n", - "\n", - "Looking away from the error terms gives an approximation to the second derivative:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation} \\label{approx} \\tag{15}\n", - "g''(x) \\approx \\frac{g(x + \\Delta x) - 2g(x) + g(x-\\Delta x)}{\\Delta x^2}\n", - "\\end{equation}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If $x_i = i \\Delta x = x_{i-1} + \\Delta x$ and $g_i = g(x_i)$ for $i = 1,\\dots N_x - 2$ with $N_x$ being the number of values for $x$, ([approx](#approx)) becomes" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\begin{aligned}\n", - "g''(x_i) &\\approx \\frac{g(x_i + \\Delta x) - 2g(x_i) + g(x_i -\\Delta x)}{\\Delta x^2} \\\\\n", - "&= \\frac{g_{i+1} - 2g_i + g_{i-1}}{\\Delta x^2}\n", - "\\end{aligned}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Since we know from our problem that" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\begin{aligned}\n", - "-g''(x) &= f(x) \\\\\n", - "&= (3x + x^2)\\exp(x)\n", - "\\end{aligned}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "along with the conditions $g(0) = g(1) = 0$,\n", - "the following scheme can be used to find an approximate solution for $g(x)$ numerically:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation}\n", - " \\begin{aligned}\n", - " -\\Big( \\frac{g_{i+1} - 2g_i + g_{i-1}}{\\Delta x^2} \\Big) &= f(x_i) \\\\\n", - " -g_{i+1} + 2g_i - g_{i-1} &= \\Delta x^2 f(x_i)\n", - " \\end{aligned}\n", - "\\end{equation} \\label{odesys} \\tag{16}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "for $i = 1, \\dots, N_x - 2$ where $g_0 = g_{N_x - 1} = 0$ and $f(x_i) = (3x_i + x_i^2)\\exp(x_i)$, which is given for our specific problem.\n", - "\n", - "The equation can be rewritten into a matrix equation:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\begin{aligned}\n", - "\\begin{pmatrix}\n", - "2 & -1 & 0 & \\dots & 0 \\\\\n", - "-1 & 2 & -1 & \\dots & 0 \\\\\n", - "\\vdots & & \\ddots & & \\vdots \\\\\n", - "0 & \\dots & -1 & 2 & -1 \\\\\n", - "0 & \\dots & 0 & -1 & 2\\\\\n", - "\\end{pmatrix}\n", - "\\begin{pmatrix}\n", - "g_1 \\\\\n", - "g_2 \\\\\n", - "\\vdots \\\\\n", - "g_{N_x - 3} \\\\\n", - "g_{N_x - 2}\n", - "\\end{pmatrix}\n", - "&=\n", - "\\Delta x^2\n", - "\\begin{pmatrix}\n", - "f(x_1) \\\\\n", - "f(x_2) \\\\\n", - "\\vdots \\\\\n", - "f(x_{N_x - 3}) \\\\\n", - "f(x_{N_x - 2})\n", - "\\end{pmatrix} \\\\\n", - "A\\vec{g} &= \\vec{f}\n", - "\\end{aligned}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "which makes it possible to solve for the vector $\\vec{g}$.\n", - "\n", - "We can then compare the result from this numerical scheme with the output from our network using Autograd:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "import autograd.numpy as np\n", - "from autograd import grad, elementwise_grad\n", - "import autograd.numpy.random as npr\n", - "from matplotlib import pyplot as plt\n", - "\n", - "def sigmoid(z):\n", - " return 1/(1 + np.exp(-z))\n", - "\n", - "def deep_neural_network(deep_params, x):\n", - " # N_hidden is the number of hidden layers\n", - " N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer\n", - "\n", - " # Assumes input x being an one-dimensional array\n", - " num_values = np.size(x)\n", - " x = x.reshape(-1, num_values)\n", - "\n", - " # Assume that the input layer does nothing to the input x\n", - " x_input = x\n", - "\n", - " # Due to multiple hidden layers, define a variable referencing to the\n", - " # output of the previous layer:\n", - " x_prev = x_input\n", - "\n", - " ## Hidden layers:\n", - "\n", - " for l in range(N_hidden):\n", - " # From the list of parameters P; find the correct weigths and bias for this layer\n", - " w_hidden = deep_params[l]\n", - "\n", - " # Add a row of ones to include bias\n", - " x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)\n", - "\n", - " z_hidden = np.matmul(w_hidden, x_prev)\n", - " x_hidden = sigmoid(z_hidden)\n", - "\n", - " # Update x_prev such that next layer can use the output from this layer\n", - " x_prev = x_hidden\n", - "\n", - " ## Output layer:\n", - "\n", - " # Get the weights and bias for this layer\n", - " w_output = deep_params[-1]\n", - "\n", - " # Include bias:\n", - " x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)\n", - "\n", - " z_output = np.matmul(w_output, x_prev)\n", - " x_output = z_output\n", - "\n", - " return x_output\n", - "\n", - "def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):\n", - " # num_hidden_neurons is now a list of number of neurons within each hidden layer\n", - "\n", - " # Find the number of hidden layers:\n", - " N_hidden = np.size(num_neurons)\n", - "\n", - " ## Set up initial weigths and biases\n", - "\n", - " # Initialize the list of parameters:\n", - " P = [None]*(N_hidden + 1) # + 1 to include the output layer\n", - "\n", - " P[0] = npr.randn(num_neurons[0], 2 )\n", - " for l in range(1,N_hidden):\n", - " P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias\n", - "\n", - " # For the output layer\n", - " P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included\n", - "\n", - " print('Initial cost: %g'%cost_function_deep(P, x))\n", - "\n", - " ## Start finding the optimal weigths using gradient descent\n", - "\n", - " # Find the Python function that represents the gradient of the cost function\n", - " # w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer\n", - " cost_function_deep_grad = grad(cost_function_deep,0)\n", - "\n", - " # Let the update be done num_iter times\n", - " for i in range(num_iter):\n", - " # Evaluate the gradient at the current weights and biases in P.\n", - " # The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases\n", - " # in the hidden layers and output layers evaluated at x.\n", - " cost_deep_grad = cost_function_deep_grad(P, x)\n", - "\n", - " for l in range(N_hidden+1):\n", - " P[l] = P[l] - lmb * cost_deep_grad[l]\n", - "\n", - " print('Final cost: %g'%cost_function_deep(P, x))\n", - "\n", - " return P\n", - "\n", - "## Set up the cost function specified for this Poisson equation:\n", - "\n", - "# The right side of the ODE\n", - "def f(x):\n", - " return (3*x + x**2)*np.exp(x)\n", - "\n", - "def cost_function_deep(P, x):\n", - "\n", - " # Evaluate the trial function with the current parameters P\n", - " g_t = g_trial_deep(x,P)\n", - "\n", - " # Find the derivative w.r.t x of the trial function\n", - " d2_g_t = elementwise_grad(elementwise_grad(g_trial_deep,0))(x,P)\n", - "\n", - " right_side = f(x)\n", - "\n", - " err_sqr = (-d2_g_t - right_side)**2\n", - " cost_sum = np.sum(err_sqr)\n", - "\n", - " return cost_sum/np.size(err_sqr)\n", - "\n", - "# The trial solution:\n", - "def g_trial_deep(x,P):\n", - " return x*(1-x)*deep_neural_network(P,x)\n", - "\n", - "# The analytic solution;\n", - "def g_analytic(x):\n", - " return x*(1-x)*np.exp(x)\n", - "\n", - "if __name__ == '__main__':\n", - " npr.seed(4155)\n", - "\n", - " ## Decide the vales of arguments to the function to solve\n", - " Nx = 10\n", - " x = np.linspace(0,1, Nx)\n", - "\n", - " ## Set up the initial parameters\n", - " num_hidden_neurons = [200,100]\n", - " num_iter = 1000\n", - " lmb = 1e-3\n", - "\n", - " P = solve_ode_deep_neural_network(x, num_hidden_neurons, num_iter, lmb)\n", - "\n", - " g_dnn_ag = g_trial_deep(x,P)\n", - " g_analytical = g_analytic(x)\n", - "\n", - " # Find the maximum absolute difference between the solutons:\n", - "\n", - " plt.figure(figsize=(10,10))\n", - "\n", - " plt.title('Performance of neural network solving an ODE compared to the analytical solution')\n", - " plt.plot(x, g_analytical)\n", - " plt.plot(x, g_dnn_ag[0,:])\n", - " plt.legend(['analytical','nn'])\n", - " plt.xlabel('x')\n", - " plt.ylabel('g(x)')\n", - "\n", - " ## Perform the computation using the numerical scheme\n", - "\n", - " dx = 1/(Nx - 1)\n", - "\n", - " # Set up the matrix A\n", - " A = np.zeros((Nx-2,Nx-2))\n", - "\n", - " A[0,0] = 2\n", - " A[0,1] = -1\n", - "\n", - " for i in range(1,Nx-3):\n", - " A[i,i-1] = -1\n", - " A[i,i] = 2\n", - " A[i,i+1] = -1\n", - "\n", - " A[Nx - 3, Nx - 4] = -1\n", - " A[Nx - 3, Nx - 3] = 2\n", - "\n", - " # Set up the vector f\n", - " f_vec = dx**2 * f(x[1:-1])\n", - "\n", - " # Solve the equation\n", - " g_res = np.linalg.solve(A,f_vec)\n", - "\n", - " g_vec = np.zeros(Nx)\n", - " g_vec[1:-1] = g_res\n", - "\n", - " # Print the differences between each method\n", - " max_diff1 = np.max(np.abs(g_dnn_ag - g_analytical))\n", - " max_diff2 = np.max(np.abs(g_vec - g_analytical))\n", - " print(\"The max absolute difference between the analytical solution and DNN Autograd: %g\"%max_diff1)\n", - " print(\"The max absolute difference between the analytical solution and numerical scheme: %g\"%max_diff2)\n", - "\n", - " # Plot the results\n", - " plt.figure(figsize=(10,10))\n", - "\n", - " plt.plot(x,g_vec)\n", - " plt.plot(x,g_analytical)\n", - " plt.plot(x,g_dnn_ag[0,:])\n", - "\n", - " plt.legend(['numerical scheme','analytical','dnn'])\n", - " plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The program prints out:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - " The max absolute difference between the analytical solution and DNN Autograd: 0.000464088\n", - " The max absolute difference between the analytical solution and numerical scheme: 0.00266858\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using gradient descent in TensorFlow to solve Poisson equation\n", - "The program follows the similar idea as for the logistic population model.\n", - "\n", - "What has changed, is what the cost function minimizes and the trial solution." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow as tf\n", - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "## Construction phase\n", - "\n", - "# Just to reset the graph such that it is possible to rerun this in a\n", - "# Jupyter cell without resetting the whole kernel.\n", - "tf.reset_default_graph()\n", - "\n", - "tf.set_random_seed(4155)\n", - "\n", - "# Convert the values the trial solution is evaluated at to a tensor.\n", - "Nx = 10\n", - "x = np.linspace(0,1, Nx)\n", - "x_tf = tf.convert_to_tensor(x.reshape(-1,1),dtype=tf.float64)\n", - "\n", - "\n", - "num_iter = 10000\n", - "\n", - "# Define the number of neurons at each hidden layer\n", - "num_hidden_neurons = [20,10]\n", - "num_hidden_layers = np.size(num_hidden_neurons)\n", - "\n", - "# Construct the network.\n", - "# tf.name_scope is used to group each step in the construction,\n", - "# just for a more organized visualization in TensorBoard\n", - "with tf.name_scope('dnn'):\n", - "\n", - " # Input layer\n", - " previous_layer = x_tf\n", - "\n", - " # Hidden layers\n", - " for l in range(num_hidden_layers):\n", - " current_layer = tf.layers.dense(previous_layer, num_hidden_neurons[l], name='hidden%d'%(l+1), activation=tf.nn.sigmoid)\n", - " previous_layer = current_layer\n", - "\n", - " # Output layer\n", - " dnn_output = tf.layers.dense(previous_layer, 1, name='output')\n", - "\n", - "# Define the cost function\n", - "with tf.name_scope('cost'):\n", - " g_trial = x_tf*(1-x_tf)*dnn_output\n", - " d_g_trial = tf.gradients(g_trial,x_tf)\n", - " d2_g_trial = tf.gradients(d_g_trial,x_tf)\n", - "\n", - " right_side = (3*x_tf + x_tf**2)*tf.exp(x_tf)\n", - "\n", - " err = tf.square( -d2_g_trial[0] - right_side)\n", - " cost = tf.reduce_sum(err, name = 'cost')\n", - "\n", - "# Choose the method to minimize the cost function, along with a learning rate\n", - "learning_rate = 1e-2\n", - "with tf.name_scope('train'):\n", - " optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n", - " traning_op = optimizer.minimize(cost)\n", - "\n", - "g_dnn_tf = None\n", - "\n", - "# Define a node that initializes all of the other nodes in the computational graph\n", - "# used by TensorFlow:\n", - "init = tf.global_variables_initializer()\n", - "\n", - "\n", - "## Execution phase\n", - "\n", - "# Start a session where the graph defined from the construction phase can be evaluated at:\n", - "\n", - "with tf.Session() as sess:\n", - " # Initialize the whole graph\n", - " init.run()\n", - "\n", - " # Evaluate the initial cost:\n", - " print('Initial cost: %g'%cost.eval())\n", - "\n", - " # The traning of the network:\n", - " for i in range(num_iter):\n", - " sess.run(traning_op)\n", - "\n", - " # Training is done, and we have an approximate solution to the ODE\n", - " print('Final cost: %g'%cost.eval())\n", - "\n", - " # Store the result\n", - " g_dnn_tf = g_trial.eval()\n", - "\n", - " writer = tf.summary.FileWriter(\"./output\", sess.graph)\n", - " writer.close()\n", - "\n", - "# Evaluate the analytical function to compare with\n", - "def g_analytic(x):\n", - " return x*(1-x)*np.exp(x)\n", - "\n", - "g_analytical = g_analytic(x)\n", - "\n", - "diff_tf = g_dnn_tf - g_analytical.reshape(-1,1)\n", - "\n", - "print('\\nMax absolute difference between the analytical solution and solution from TensorFlow DNN: %g'%np.max(np.abs(diff_tf)))\n", - "\n", - "# Plot the result\n", - "plt.figure(figsize=(10,10))\n", - "\n", - "plt.title('Numerical solutions of the ODE')\n", - "\n", - "plt.plot(x, g_dnn_tf)\n", - "plt.plot(x, g_analytical)\n", - "\n", - "plt.legend(['dnn, tensorflow','exact'])\n", - "plt.xlabel('x')\n", - "plt.ylabel('g(x)')\n", - "\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using a different optimization algorithm implemented in TensorFlow to solve Poisson equation\n", - "\n", - "We can see that the results using GradientDescentOptimizer seems to converge towards the analytical solution.\n", - "But there exists many other methods for optimization also, see [the TensorFlow documentation on Optimizers](https://www.tensorflow.org/versions/r1.2/api_guides/python/train#Optimizers).\n", - "\n", - "Adam is an optimization algorithm that changes its learning rates accordingly to the function it tries to minimize for every iteration.\n", - "The algorithm is described in [this paper](https://arxiv.org/pdf/1412.6980.pdf).\n", - "How much an optimization algorithm has to say for the network to converge, could be interesting to experiment with.\n", - "Using the same TensorFlow program as before, the only change to do, is to replace the variable *optimizer*.\n", - "\n", - "In the program that uses TensorFlow to solve for the Poisson equation, change the line" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - " optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "to" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - " optimizer = tf.train.AdamOptimizer(learning_rate)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The program using the Adam optimizer with a different initial learning rate yields indeed an interesting result:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - " Max absolute difference between the analytical solution and solution from TensorFlow DNN: 7.11243e-05\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Partial Differential Equations\n", - "A partial differential equation (PDE) has a solution here the function is defined by multiple variables.\n", - "The equation may involve all kinds of combinations of which variables the function is differentiated with respect to.\n", - "\n", - "In general, a partial differential equation for a function $g(x_1,\\dots,x_N)$ with $N$ variables may be expressed as" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation} \\label{PDE} \\tag{17}\n", - " f\\left(x_1, \\, \\dots \\, , x_N, \\frac{\\partial g(x_1,\\dots,x_N) }{\\partial x_1}, \\dots , \\frac{\\partial g(x_1,\\dots,x_N) }{\\partial x_N}, \\frac{\\partial g(x_1,\\dots,x_N) }{\\partial x_1\\partial x_2}, \\, \\dots \\, , \\frac{\\partial^n g(x_1,\\dots,x_N) }{\\partial x_N^n} \\right) = 0\n", - "\\end{equation}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "where $f$ is an expression involving all kinds of possible mixed derivatives of $g(x_1,\\dots,x_N)$ up to an order $n$. In order for the solution to be unique, some additional conditions must also be given.\n", - "\n", - "The problem our network must solve for, is similar to the ODE case.\n", - "We must have a trial solution $g_t$ at hand.\n", - "\n", - "For instance, the trial solution could be expressed as" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\begin{align*}\n", - " g_t(x_1,\\dots,x_N) = h_1(x_1,\\dots,x_N) + h_2(x_1,\\dots,x_N,N(x_1,\\dots,x_N,P))\n", - "\\end{align*}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "where $h_1(x_1,\\dots,x_N)$ is a function that ensures $g_t(x_1,\\dots,x_N)$ satisfies some given conditions.\n", - "The neural network $N(x_1,\\dots,x_N,P)$ has weights and biases described by $P$ and $h_2(x_1,\\dots,x_N,N(x_1,\\dots,x_N,P))$ is an expression using the output from the neural network in some way.\n", - "\n", - "The role of the function $h_2(x_1,\\dots,x_N,N(x_1,\\dots,x_N,P))$, is to ensure that the output of $N(x_1,\\dots,x_N,P)$ is zero when $g_t(x_1,\\dots,x_N)$ is evaluated at the values of $x_1,\\dots,x_N$ where the given conditions must be satisfied. The function $h_1(x_1,\\dots,x_N)$ should alone make $g_t(x_1,\\dots,x_N)$ satisfy the conditions.\n", - "\n", - "The network tries then the minimize the cost function following the same ideas as described for the ODE case, but now with more than one variables to consider.\n", - "The concept still remains the same; find a set of parameters $P$ such that the expression $f$ in ([PDE](#PDE)) is as close to zero as possible.\n", - "\n", - "As for the ODE case, the cost function is the mean squared error that the network must try to minimize. The cost function for the network to minimize is" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "c\\left(x_1, \\dots, x_N, P\\right) = \\left( f\\left(x_1, \\, \\dots \\, , x_N, \\frac{\\partial g(x_1,\\dots,x_N) }{\\partial x_1}, \\dots , \\frac{\\partial g(x_1,\\dots,x_N) }{\\partial x_N}, \\frac{\\partial g(x_1,\\dots,x_N) }{\\partial x_1\\partial x_2}, \\, \\dots \\, , \\frac{\\partial^n g(x_1,\\dots,x_N) }{\\partial x_N^n} \\right) \\right)^2\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If we let $\\vec x = \\big( x_1, \\dots, x_N \\big)$ be an array containing the values for $x_1, \\dots, x_N$ respectively, the cost function can be reformulated into the following:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "c\\left(\\vec{x}, P\\right) = f\\left( \\left( \\vec{x}, \\frac{\\partial g(\\vec x) }{\\partial x_1}, \\dots , \\frac{\\partial g(\\vec x) }{\\partial x_N}, \\frac{\\partial g(\\vec x) }{\\partial x_1\\partial x_2}, \\, \\dots \\, , \\frac{\\partial^n g(\\vec x) }{\\partial x_N^n} \\right) \\right)^2\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If we also have $M$ different sets of values for $x_1, \\dots, x_N$, that is $\\vec{x}_i = \\big(x_1^{(i)}, \\dots, x_N^{(i)}\\big)$ for $i = 1,\\dots,M$ being the rows in matrix $X$, the cost function can be generalized into" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "c\\left(X, P \\right) = \\sum_{i=1}^M f\\left( \\left( \\vec{x}_i, \\frac{\\partial g(\\vec{x}_i) }{\\partial x_1}, \\dots , \\frac{\\partial g(\\vec{x}_i) }{\\partial x_N}, \\frac{\\partial g(\\vec{x}_i) }{\\partial x_1\\partial x_2}, \\, \\dots \\, , \\frac{\\partial^n g(\\vec{x}_i) }{\\partial x_N^n} \\right) \\right)^2\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Example: The diffusion equation\n", - "\n", - "In one spatial dimension, the equation reads" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\frac{\\partial g(x,t)}{\\partial t} = \\frac{\\partial^2 g(x,t)}{\\partial x^2}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "where a possible choice of conditions are" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\begin{align*}\n", - "g(0,t) &= 0 ,\\qquad t \\geq 0 \\\\\n", - "g(1,t) &= 0, \\qquad t \\geq 0 \\\\\n", - "g(x,0) &= u(x),\\qquad x\\in [0,1]\n", - "\\end{align*}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "with $u(x)$ being some given function.\n", - "\n", - "## Defining the problem\n", - "\n", - "For this case, we want to find $g(x,t)$ such that" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation}\n", - " \\frac{\\partial g(x,t)}{\\partial t} = \\frac{\\partial^2 g(x,t)}{\\partial x^2}\n", - "\\end{equation} \\label{diffonedim} \\tag{18}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "and" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\begin{align*}\n", - "g(0,t) &= 0 ,\\qquad t \\geq 0 \\\\\n", - "g(1,t) &= 0, \\qquad t \\geq 0 \\\\\n", - "g(x,0) &= u(x),\\qquad x\\in [0,1]\n", - "\\end{align*}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "with $u(x) = \\sin(\\pi x)$.\n", - "\n", - "First, let us set up the deep neural network.\n", - "The deep neural network will follow the same structure as discussed in the examples solving the ODEs.\n", - "First, we will look into how Autograd could be used in a network tailored to solve for bivariate functions.\n", - "\n", - "\n", - "\n", - "## Setting up the network using Autograd\n", - "\n", - "The only change to do here, is to extend our network such that functions of multiple parameters are correctly handled.\n", - "In this case we have two variables in our function to solve for, that is time $t$ and position $x$.\n", - "The variables will be represented by a one-dimensional array in the program.\n", - "The program will evaluate the network at each possible pair $(x,t)$, given an array for the desired $x$-values and $t$-values to approximate the solution at." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "def sigmoid(z):\n", - " return 1/(1 + np.exp(-z))\n", - "\n", - "def deep_neural_network(deep_params, x):\n", - " # x is now a point and a 1D numpy array; make it a column vector\n", - " num_coordinates = np.size(x,0)\n", - " x = x.reshape(num_coordinates,-1)\n", - "\n", - " num_points = np.size(x,1)\n", - "\n", - " # N_hidden is the number of hidden layers\n", - " N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer\n", - "\n", - " # Assume that the input layer does nothing to the input x\n", - " x_input = x\n", - " x_prev = x_input\n", - "\n", - " ## Hidden layers:\n", - "\n", - " for l in range(N_hidden):\n", - " # From the list of parameters P; find the correct weigths and bias for this layer\n", - " w_hidden = deep_params[l]\n", - "\n", - " # Add a row of ones to include bias\n", - " x_prev = np.concatenate((np.ones((1,num_points)), x_prev ), axis = 0)\n", - "\n", - " z_hidden = np.matmul(w_hidden, x_prev)\n", - " x_hidden = sigmoid(z_hidden)\n", - "\n", - " # Update x_prev such that next layer can use the output from this layer\n", - " x_prev = x_hidden\n", - "\n", - " ## Output layer:\n", - "\n", - " # Get the weights and bias for this layer\n", - " w_output = deep_params[-1]\n", - "\n", - " # Include bias:\n", - " x_prev = np.concatenate((np.ones((1,num_points)), x_prev), axis = 0)\n", - "\n", - " z_output = np.matmul(w_output, x_prev)\n", - " x_output = z_output\n", - "\n", - " return x_output[0][0]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Setting up the network using Autograd; The trial solution\n", - "The cost function must then iterate through the given arrays containing values for $x$ and $t$, defines a point $(x,t)$ the deep neural network and the trial solution is evaluated at, and then finds the Jacobian of the trial solution.\n", - "\n", - "A possible trial solution for this PDE is\n", - "\n", - "$$\n", - "g_t(x,t) = h_1(x,t) + x(1-x)tN(x,t,P)\n", - "$$\n", - "\n", - "with $A(x,t)$ being a function ensuring that $g_t(x,t)$ satisfies our given conditions, and $N(x,t,P)$ being the output from the deep neural network using weights and biases for each layer from $P$.\n", - "\n", - "To fulfill the conditions, $A(x,t)$ could be:\n", - "\n", - "$$\n", - "h_1(x,t) = (1-t)\\Big(u(x) - \\big((1-x)u(0) + x u(1)\\big)\\Big) = (1-t)u(x) = (1-t)\\sin(\\pi x)\n", - "$$\n", - "since $(0) = u(1) = 0$ and $u(x) = \\sin(\\pi x)$.\n", - "\n", - "The Jacobian is used because the program must find the derivative of the trial solution with respect to $x$ and $t$.\n", - "\n", - "This gives the necessity of computing the Jacobian matrix, as we want to evaluate the gradient with respect to $x$ and $t$ (note that the Jacobian of a scalar-valued multivariate function is simply its gradient).\n", - "\n", - "In Autograd, the differentiation is by default done with respect to the first input argument of your Python function. Since the points is an array representing $x$ and $t$, the Jacobian is calculated using the values of $x$ and $t$.\n", - "\n", - "To find the second derivative with respect to $x$ and $t$, the Jacobian can be found for the second time. The result is a Hessian matrix, which is the matrix containing all the possible second order mixed derivatives of $g(x,t)$." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "# Set up the trial function:\n", - "def u(x):\n", - " return np.sin(np.pi*x)\n", - "\n", - "def g_trial(point,P):\n", - " x,t = point\n", - " return (1-t)*u(x) + x*(1-x)*t*deep_neural_network(P,point)\n", - "\n", - "# The right side of the ODE:\n", - "def f(point):\n", - " return 0.\n", - "\n", - "# The cost function:\n", - "def cost_function(P, x, t):\n", - " cost_sum = 0\n", - "\n", - " g_t_jacobian_func = jacobian(g_trial)\n", - " g_t_hessian_func = hessian(g_trial)\n", - "\n", - " for x_ in x:\n", - " for t_ in t:\n", - " point = np.array([x_,t_])\n", - "\n", - " g_t = g_trial(point,P)\n", - " g_t_jacobian = g_t_jacobian_func(point,P)\n", - " g_t_hessian = g_t_hessian_func(point,P)\n", - "\n", - " g_t_dt = g_t_jacobian[1]\n", - " g_t_d2x = g_t_hessian[0][0]\n", - "\n", - " func = f(point)\n", - "\n", - " err_sqr = ( (g_t_dt - g_t_d2x) - func)**2\n", - " cost_sum += err_sqr\n", - "\n", - " return cost_sum" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Setting up the network using Autograd; The full program\n", - "Having set up the network, along with the trial solution and cost function, we can now see how the deep neural network performs by comparing the results to the analytical solution.\n", - "\n", - "The analytical solution of our problem is\n", - "\n", - "$$\n", - "g(x,t) = \\exp(-\\pi^2 t)\\sin(\\pi x)\n", - "$$\n", - "\n", - "A possible way to implement a neural network solving the PDE, is given below.\n", - "Be aware, though, that it is fairly slow for the parameters used.\n", - "A better result is possible, but requires more iterations, and thus longer time to complete.\n", - "\n", - "Using only 20 neurons in one hidden layer, the program managed to make the trial solution have the maximum absolute error of 0.0075. The execution time, however, was approximately one day and 14 hours on a computer having Intel i7-7560U 2.4 GHz CPU.\n", - "\n", - "Indeed, the program below is not optimal in its implementation, but rather serves as an example on how to implement and use a neural network to solve a PDE.\n", - "Using TensorFlow in the next example sovling the wave equation, has a much better execution time." - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "import autograd.numpy as np\n", - "from autograd import jacobian,hessian,grad\n", - "import autograd.numpy.random as npr\n", - "from matplotlib import cm\n", - "from matplotlib import pyplot as plt\n", - "from mpl_toolkits.mplot3d import axes3d\n", - "\n", - "## Set up the network\n", - "\n", - "def sigmoid(z):\n", - " return 1/(1 + np.exp(-z))\n", - "\n", - "def deep_neural_network(deep_params, x):\n", - " # x is now a point and a 1D numpy array; make it a column vector\n", - " num_coordinates = np.size(x,0)\n", - " x = x.reshape(num_coordinates,-1)\n", - "\n", - " num_points = np.size(x,1)\n", - "\n", - " # N_hidden is the number of hidden layers\n", - " N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer\n", - "\n", - " # Assume that the input layer does nothing to the input x\n", - " x_input = x\n", - " x_prev = x_input\n", - "\n", - " ## Hidden layers:\n", - "\n", - " for l in range(N_hidden):\n", - " # From the list of parameters P; find the correct weigths and bias for this layer\n", - " w_hidden = deep_params[l]\n", - "\n", - " # Add a row of ones to include bias\n", - " x_prev = np.concatenate((np.ones((1,num_points)), x_prev ), axis = 0)\n", - "\n", - " z_hidden = np.matmul(w_hidden, x_prev)\n", - " x_hidden = sigmoid(z_hidden)\n", - "\n", - " # Update x_prev such that next layer can use the output from this layer\n", - " x_prev = x_hidden\n", - "\n", - " ## Output layer:\n", - "\n", - " # Get the weights and bias for this layer\n", - " w_output = deep_params[-1]\n", - "\n", - " # Include bias:\n", - " x_prev = np.concatenate((np.ones((1,num_points)), x_prev), axis = 0)\n", - "\n", - " z_output = np.matmul(w_output, x_prev)\n", - " x_output = z_output\n", - "\n", - " return x_output[0][0]\n", - "\n", - "## Define the trial solution and cost function\n", - "def u(x):\n", - " return np.sin(np.pi*x)\n", - "\n", - "def g_trial(point,P):\n", - " x,t = point\n", - " return (1-t)*u(x) + x*(1-x)*t*deep_neural_network(P,point)\n", - "\n", - "# The right side of the ODE:\n", - "def f(point):\n", - " return 0.\n", - "\n", - "# The cost function:\n", - "def cost_function(P, x, t):\n", - " cost_sum = 0\n", - "\n", - " g_t_jacobian_func = jacobian(g_trial)\n", - " g_t_hessian_func = hessian(g_trial)\n", - "\n", - " for x_ in x:\n", - " for t_ in t:\n", - " point = np.array([x_,t_])\n", - "\n", - " g_t = g_trial(point,P)\n", - " g_t_jacobian = g_t_jacobian_func(point,P)\n", - " g_t_hessian = g_t_hessian_func(point,P)\n", - "\n", - " g_t_dt = g_t_jacobian[1]\n", - " g_t_d2x = g_t_hessian[0][0]\n", - "\n", - " func = f(point)\n", - "\n", - " err_sqr = ( (g_t_dt - g_t_d2x) - func)**2\n", - " cost_sum += err_sqr\n", - "\n", - " return cost_sum /( np.size(x)*np.size(t) )\n", - "\n", - "## For comparison, define the analytical solution\n", - "def g_analytic(point):\n", - " x,t = point\n", - " return np.exp(-np.pi**2*t)*np.sin(np.pi*x)\n", - "\n", - "## Set up a function for training the network to solve for the equation\n", - "def solve_pde_deep_neural_network(x,t, num_neurons, num_iter, lmb):\n", - " ## Set up initial weigths and biases\n", - " N_hidden = np.size(num_neurons)\n", - "\n", - " ## Set up initial weigths and biases\n", - "\n", - " # Initialize the list of parameters:\n", - " P = [None]*(N_hidden + 1) # + 1 to include the output layer\n", - "\n", - " P[0] = npr.randn(num_neurons[0], 2 + 1 ) # 2 since we have two points, +1 to include bias\n", - " for l in range(1,N_hidden):\n", - " P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias\n", - "\n", - " # For the output layer\n", - " P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included\n", - "\n", - " print('Initial cost: ',cost_function(P, x, t))\n", - "\n", - " cost_function_grad = grad(cost_function,0)\n", - "\n", - " # Let the update be done num_iter times\n", - " for i in range(num_iter):\n", - " cost_grad = cost_function_grad(P, x , t)\n", - "\n", - " for l in range(N_hidden+1):\n", - " P[l] = P[l] - lmb * cost_grad[l]\n", - "\n", - " print('Final cost: ',cost_function(P, x, t))\n", - "\n", - " return P\n", - "\n", - "if __name__ == '__main__':\n", - " ### Use the neural network:\n", - " npr.seed(15)\n", - "\n", - " ## Decide the vales of arguments to the function to solve\n", - " Nx = 10; Nt = 10\n", - " x = np.linspace(0, 1, Nx)\n", - " t = np.linspace(0,1,Nt)\n", - "\n", - " ## Set up the parameters for the network\n", - " num_hidden_neurons = [100, 25]\n", - " num_iter = 250\n", - " lmb = 0.01\n", - "\n", - " P = solve_pde_deep_neural_network(x,t, num_hidden_neurons, num_iter, lmb)\n", - "\n", - " ## Store the results\n", - " g_dnn_ag = np.zeros((Nx, Nt))\n", - " G_analytical = np.zeros((Nx, Nt))\n", - " for i,x_ in enumerate(x):\n", - " for j, t_ in enumerate(t):\n", - " point = np.array([x_, t_])\n", - " g_dnn_ag[i,j] = g_trial(point,P)\n", - "\n", - " G_analytical[i,j] = g_analytic(point)\n", - "\n", - " # Find the map difference between the analytical and the computed solution\n", - " diff_ag = np.abs(g_dnn_ag - G_analytical)\n", - " print('Max absolute difference between the analytical solution and the network: %g'%np.max(diff_ag))\n", - "\n", - " ## Plot the solutions in two dimensions, that being in position and time\n", - "\n", - " T,X = np.meshgrid(t,x)\n", - "\n", - " fig = plt.figure(figsize=(10,10))\n", - " ax = fig.gca(projection='3d')\n", - " ax.set_title('Solution from the deep neural network w/ %d layer'%len(num_hidden_neurons))\n", - " s = ax.plot_surface(T,X,g_dnn_ag,linewidth=0,antialiased=False,cmap=cm.viridis)\n", - " ax.set_xlabel('Time $t$')\n", - " ax.set_ylabel('Position $x$');\n", - "\n", - "\n", - " fig = plt.figure(figsize=(10,10))\n", - " ax = fig.gca(projection='3d')\n", - " ax.set_title('Analytical solution')\n", - " s = ax.plot_surface(T,X,G_analytical,linewidth=0,antialiased=False,cmap=cm.viridis)\n", - " ax.set_xlabel('Time $t$')\n", - " ax.set_ylabel('Position $x$');\n", - "\n", - " fig = plt.figure(figsize=(10,10))\n", - " ax = fig.gca(projection='3d')\n", - " ax.set_title('Difference')\n", - " s = ax.plot_surface(T,X,diff_ag,linewidth=0,antialiased=False,cmap=cm.viridis)\n", - " ax.set_xlabel('Time $t$')\n", - " ax.set_ylabel('Position $x$');\n", - "\n", - " ## Take some slices of the 3D plots just to see the solutions at particular times\n", - " indx1 = 0\n", - " indx2 = int(Nt/2)\n", - " indx3 = Nt-1\n", - "\n", - " t1 = t[indx1]\n", - " t2 = t[indx2]\n", - " t3 = t[indx3]\n", - "\n", - " # Slice the results from the DNN\n", - " res1 = g_dnn_ag[:,indx1]\n", - " res2 = g_dnn_ag[:,indx2]\n", - " res3 = g_dnn_ag[:,indx3]\n", - "\n", - " # Slice the analytical results\n", - " res_analytical1 = G_analytical[:,indx1]\n", - " res_analytical2 = G_analytical[:,indx2]\n", - " res_analytical3 = G_analytical[:,indx3]\n", - "\n", - " # Plot the slices\n", - " plt.figure(figsize=(10,10))\n", - " plt.title(\"Computed solutions at time = %g\"%t1)\n", - " plt.plot(x, res1)\n", - " plt.plot(x,res_analytical1)\n", - " plt.legend(['dnn','analytical'])\n", - "\n", - " plt.figure(figsize=(10,10))\n", - " plt.title(\"Computed solutions at time = %g\"%t2)\n", - " plt.plot(x, res2)\n", - " plt.plot(x,res_analytical2)\n", - " plt.legend(['dnn','analytical'])\n", - "\n", - " plt.figure(figsize=(10,10))\n", - " plt.title(\"Computed solutions at time = %g\"%t3)\n", - " plt.plot(x, res3)\n", - " plt.plot(x,res_analytical3)\n", - " plt.legend(['dnn','analytical'])\n", - "\n", - " plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Example: Solving the wave equation using Autograd and TensorFlow\n", - "\n", - "The wave equation is" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\frac{\\partial^2 g(x,t)}{\\partial t^2} = c^2\\frac{\\partial^2 g(x,t)}{\\partial x^2}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "with $c$ being the specified wave speed.\n", - "\n", - "Here, the chosen conditions are" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "$$\n", - "\\begin{align*}\n", - "\tg(0,t) &= 0 \\\\\n", - "\tg(1,t) &= 0 \\\\\n", - "\tg(x,0) &= u(x) \\\\\n", - "\t\\frac{\\partial g(x,t)}{\\partial t} \\Big |_{t = 0} &= v(x)\n", - "\\end{align*}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "where $\\frac{\\partial g(x,t)}{\\partial t} \\Big |_{t = 0}$ means the derivative of $g(x,t)$ with respect to $t$ is evaluated at $t = 0$, and $u(x)$ and $v(x)$ being given functions.\n", - "\n", - "## The problem to solve for\n", - "\n", - "The wave equation to solve for, is" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{equation} \\label{wave} \\tag{19}\n", - "\\frac{\\partial^2 g(x,t)}{\\partial t^2} = c^2 \\frac{\\partial^2 g(x,t)}{\\partial x^2}\n", - "\\end{equation}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "where $c$ is the given wave speed.\n", - "The chosen conditions for this equation are" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "
\n", - "\n", - "$$\n", - "\\begin{aligned}\n", - "g(0,t) &= 0, &t \\geq 0 \\\\\n", - "g(1,t) &= 0, &t \\geq 0 \\\\\n", - "g(x,0) &= u(x), &x\\in[0,1] \\\\\n", - "\\frac{\\partial g(x,t)}{\\partial t}\\Big |_{t = 0} &= v(x), &x \\in [0,1]\n", - "\\end{aligned} \\label{condwave} \\tag{20}\n", - "$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In this example, let $c = 1$ and $u(x) = \\sin(\\pi x)$ and $v(x) = -\\pi\\sin(\\pi x)$.\n", - "\n", - "\n", - "## The trial solution\n", - "Setting up the network is done in similar matter as for the example of solving the diffusion equation.\n", - "The only things we have to change, is the trial solution such that it satisfies the conditions from ([condwave](#condwave)) and the cost function.\n", - "\n", - "The trial solution becomes slightly different since we have other conditions than in the example of solving the diffusion equation. Here, a possible trial solution $g_t(x,t)$ is\n", - "\n", - "$$\n", - "g_t(x,t) = h_1(x,t) + x(1-x)t^2N(x,t,P)\n", - "$$\n", - "\n", - "where\n", - "\n", - "$$\n", - "h_1(x,t) = (1-t^2)u(x) + tv(x)\n", - "$$\n", - "\n", - "Note that this trial solution satisfies the conditions only if $u(0) = v(0) = u(1) = v(1) = 0$, which is the case in this example.\n", - "\n", - "## The analytical solution\n", - "\n", - "The analytical solution for our specific problem, is\n", - "\n", - "$$\n", - "g(x,t) = \\sin(\\pi x)\\cos(\\pi t) - \\sin(\\pi x)\\sin(\\pi t)\n", - "$$\n", - "\n", - "## Solving the wave equation - the full program using Autograd" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "import autograd.numpy as np\n", - "from autograd import hessian,grad\n", - "import autograd.numpy.random as npr\n", - "from matplotlib import cm\n", - "from matplotlib import pyplot as plt\n", - "from mpl_toolkits.mplot3d import axes3d\n", - "\n", - "## Set up the trial function:\n", - "def u(x):\n", - " return np.sin(np.pi*x)\n", - "\n", - "def v(x):\n", - " return -np.pi*np.sin(np.pi*x)\n", - "\n", - "def h1(point):\n", - " x,t = point\n", - " return (1 - t**2)*u(x) + t*v(x)\n", - "\n", - "def g_trial(point,P):\n", - " x,t = point\n", - " return h1(point) + x*(1-x)*t**2*deep_neural_network(P,point)\n", - "\n", - "## Define the cost function\n", - "def cost_function(P, x, t):\n", - " cost_sum = 0\n", - "\n", - " g_t_hessian_func = hessian(g_trial)\n", - "\n", - " for x_ in x:\n", - " for t_ in t:\n", - " point = np.array([x_,t_])\n", - "\n", - " g_t_hessian = g_t_hessian_func(point,P)\n", - "\n", - " g_t_d2x = g_t_hessian[0][0]\n", - " g_t_d2t = g_t_hessian[1][1]\n", - "\n", - " err_sqr = ( (g_t_d2t - g_t_d2x) )**2\n", - " cost_sum += err_sqr\n", - "\n", - " return cost_sum / (np.size(t) * np.size(x))\n", - "\n", - "## The neural network\n", - "def sigmoid(z):\n", - " return 1/(1 + np.exp(-z))\n", - "\n", - "def deep_neural_network(deep_params, x):\n", - " # x is now a point and a 1D numpy array; make it a column vector\n", - " num_coordinates = np.size(x,0)\n", - " x = x.reshape(num_coordinates,-1)\n", - "\n", - " num_points = np.size(x,1)\n", - "\n", - " # N_hidden is the number of hidden layers\n", - " N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer\n", - "\n", - " # Assume that the input layer does nothing to the input x\n", - " x_input = x\n", - " x_prev = x_input\n", - "\n", - " ## Hidden layers:\n", - "\n", - " for l in range(N_hidden):\n", - " # From the list of parameters P; find the correct weigths and bias for this layer\n", - " w_hidden = deep_params[l]\n", - "\n", - " # Add a row of ones to include bias\n", - " x_prev = np.concatenate((np.ones((1,num_points)), x_prev ), axis = 0)\n", - "\n", - " z_hidden = np.matmul(w_hidden, x_prev)\n", - " x_hidden = sigmoid(z_hidden)\n", - "\n", - " # Update x_prev such that next layer can use the output from this layer\n", - " x_prev = x_hidden\n", - "\n", - " ## Output layer:\n", - "\n", - " # Get the weights and bias for this layer\n", - " w_output = deep_params[-1]\n", - "\n", - " # Include bias:\n", - " x_prev = np.concatenate((np.ones((1,num_points)), x_prev), axis = 0)\n", - "\n", - " z_output = np.matmul(w_output, x_prev)\n", - " x_output = z_output\n", - "\n", - " return x_output[0][0]\n", - "\n", - "## The analytical solution\n", - "def g_analytic(point):\n", - " x,t = point\n", - " return np.sin(np.pi*x)*np.cos(np.pi*t) - np.sin(np.pi*x)*np.sin(np.pi*t)\n", - "\n", - "def solve_pde_deep_neural_network(x,t, num_neurons, num_iter, lmb):\n", - " ## Set up initial weigths and biases\n", - " N_hidden = np.size(num_neurons)\n", - "\n", - " ## Set up initial weigths and biases\n", - "\n", - " # Initialize the list of parameters:\n", - " P = [None]*(N_hidden + 1) # + 1 to include the output layer\n", - "\n", - " P[0] = npr.randn(num_neurons[0], 2 + 1 ) # 2 since we have two points, +1 to include bias\n", - " for l in range(1,N_hidden):\n", - " P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias\n", - "\n", - " # For the output layer\n", - " P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included\n", - "\n", - " print('Initial cost: ',cost_function(P, x, t))\n", - "\n", - " cost_function_grad = grad(cost_function,0)\n", - "\n", - " # Let the update be done num_iter times\n", - " for i in range(num_iter):\n", - " cost_grad = cost_function_grad(P, x , t)\n", - "\n", - " for l in range(N_hidden+1):\n", - " P[l] = P[l] - lmb * cost_grad[l]\n", - "\n", - "\n", - " print('Final cost: ',cost_function(P, x, t))\n", - "\n", - " return P\n", - "\n", - "if __name__ == '__main__':\n", - " ### Use the neural network:\n", - " npr.seed(15)\n", - "\n", - " ## Decide the vales of arguments to the function to solve\n", - " Nx = 10; Nt = 10\n", - " x = np.linspace(0, 1, Nx)\n", - " t = np.linspace(0,1,Nt)\n", - "\n", - " ## Set up the parameters for the network\n", - " num_hidden_neurons = [50,20]\n", - " num_iter = 1000\n", - " lmb = 0.01\n", - "\n", - " P = solve_pde_deep_neural_network(x,t, num_hidden_neurons, num_iter, lmb)\n", - "\n", - " ## Store the results\n", - " res = np.zeros((Nx, Nt))\n", - " res_analytical = np.zeros((Nx, Nt))\n", - " for i,x_ in enumerate(x):\n", - " for j, t_ in enumerate(t):\n", - " point = np.array([x_, t_])\n", - " res[i,j] = g_trial(point,P)\n", - "\n", - " res_analytical[i,j] = g_analytic(point)\n", - "\n", - " diff = np.abs(res - res_analytical)\n", - " print(\"Max difference between analytical and solution from nn: %g\"%np.max(diff))\n", - "\n", - " ## Plot the solutions in two dimensions, that being in position and time\n", - "\n", - " T,X = np.meshgrid(t,x)\n", - "\n", - " fig = plt.figure(figsize=(10,10))\n", - " ax = fig.gca(projection='3d')\n", - " ax.set_title('Solution from the deep neural network w/ %d layer'%len(num_hidden_neurons))\n", - " s = ax.plot_surface(T,X,res,linewidth=0,antialiased=False,cmap=cm.viridis)\n", - " ax.set_xlabel('Time $t$')\n", - " ax.set_ylabel('Position $x$');\n", - "\n", - "\n", - " fig = plt.figure(figsize=(10,10))\n", - " ax = fig.gca(projection='3d')\n", - " ax.set_title('Analytical solution')\n", - " s = ax.plot_surface(T,X,res_analytical,linewidth=0,antialiased=False,cmap=cm.viridis)\n", - " ax.set_xlabel('Time $t$')\n", - " ax.set_ylabel('Position $x$');\n", - "\n", - "\n", - " fig = plt.figure(figsize=(10,10))\n", - " ax = fig.gca(projection='3d')\n", - " ax.set_title('Difference')\n", - " s = ax.plot_surface(T,X,diff,linewidth=0,antialiased=False,cmap=cm.viridis)\n", - " ax.set_xlabel('Time $t$')\n", - " ax.set_ylabel('Position $x$');\n", - "\n", - " ## Take some slices of the 3D plots just to see the solutions at particular times\n", - " indx1 = 0\n", - " indx2 = int(Nt/2)\n", - " indx3 = Nt-1\n", - "\n", - " t1 = t[indx1]\n", - " t2 = t[indx2]\n", - " t3 = t[indx3]\n", - "\n", - " # Slice the results from the DNN\n", - " res1 = res[:,indx1]\n", - " res2 = res[:,indx2]\n", - " res3 = res[:,indx3]\n", - "\n", - " # Slice the analytical results\n", - " res_analytical1 = res_analytical[:,indx1]\n", - " res_analytical2 = res_analytical[:,indx2]\n", - " res_analytical3 = res_analytical[:,indx3]\n", - "\n", - " # Plot the slices\n", - " plt.figure(figsize=(10,10))\n", - " plt.title(\"Computed solutions at time = %g\"%t1)\n", - " plt.plot(x, res1)\n", - " plt.plot(x,res_analytical1)\n", - " plt.legend(['dnn','analytical'])\n", - "\n", - " plt.figure(figsize=(10,10))\n", - " plt.title(\"Computed solutions at time = %g\"%t2)\n", - " plt.plot(x, res2)\n", - " plt.plot(x,res_analytical2)\n", - " plt.legend(['dnn','analytical'])\n", - "\n", - " plt.figure(figsize=(10,10))\n", - " plt.title(\"Computed solutions at time = %g\"%t3)\n", - " plt.plot(x, res3)\n", - " plt.plot(x,res_analytical3)\n", - " plt.legend(['dnn','analytical'])\n", - "\n", - " plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Solving the wave equation - the full program using TensorFlow\n", - "As the program using Autograd is fairly slow, one could hope that using TensorFlow\n", - "could make a naive implementation faster, and more numerically robust.\n", - "\n", - "In addition, having TensorFlow at hand, it could be easier to experiment with different\n", - "optimization algorithms, and other constructions of the network.\n", - "\n", - "The following program solves the given wave equation much faster," - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow as tf\n", - "import numpy as np\n", - "from matplotlib import cm\n", - "from matplotlib import pyplot as plt\n", - "from mpl_toolkits.mplot3d import axes3d\n", - "\n", - "Nx = 10\n", - "x_np = np.linspace(0,1,Nx)\n", - "\n", - "Nt = 10\n", - "t_np = np.linspace(0,1,Nt)\n", - "\n", - "X,T = np.meshgrid(x_np, t_np)\n", - "\n", - "x = X.ravel()\n", - "t = T.ravel()\n", - "\n", - "## The construction phase\n", - "\n", - "zeros = tf.reshape(tf.convert_to_tensor(np.zeros(x.shape)),shape=(-1,1))\n", - "x = tf.reshape(tf.convert_to_tensor(x),shape=(-1,1))\n", - "t = tf.reshape(tf.convert_to_tensor(t),shape=(-1,1))\n", - "\n", - "points = tf.concat([x,t],1)\n", - "\n", - "num_iter = 100000\n", - "num_hidden_neurons = [90]\n", - "\n", - "X = tf.convert_to_tensor(X)\n", - "T = tf.convert_to_tensor(T)\n", - "\n", - "\n", - "with tf.variable_scope('dnn'):\n", - " num_hidden_layers = np.size(num_hidden_neurons)\n", - "\n", - " previous_layer = points\n", - "\n", - " for l in range(num_hidden_layers):\n", - " current_layer = tf.layers.dense(previous_layer, num_hidden_neurons[l],activation=tf.nn.sigmoid)\n", - " previous_layer = current_layer\n", - "\n", - " dnn_output = tf.layers.dense(previous_layer, 1)\n", - "\n", - "\n", - "def u(x):\n", - " return tf.sin(np.pi*x)\n", - "\n", - "def v(x):\n", - " return -np.pi*tf.sin(np.pi*x)\n", - "\n", - "with tf.name_scope('loss'):\n", - " g_trial = (1 - t**2)*u(x) + t*v(x) + x*(1-x)*t**2*dnn_output\n", - "\n", - " g_trial_d2t = tf.gradients(tf.gradients(g_trial,t),t)\n", - " g_trial_d2x = tf.gradients(tf.gradients(g_trial,x),x)\n", - "\n", - " loss = tf.losses.mean_squared_error(zeros, g_trial_d2t[0] - g_trial_d2x[0])\n", - "\n", - "learning_rate = 0.01\n", - "with tf.name_scope('train'):\n", - " optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n", - " traning_op = optimizer.minimize(loss)\n", - "\n", - "init = tf.global_variables_initializer()\n", - "\n", - "g_analytic = tf.sin(np.pi*x)*tf.cos(np.pi*t) - tf.sin(np.pi*x)*tf.sin(np.pi*t)\n", - "g_dnn = None\n", - "\n", - "## The execution phase\n", - "with tf.Session() as sess:\n", - " init.run()\n", - " for i in range(num_iter):\n", - " sess.run(traning_op)\n", - "\n", - " # If one desires to see how the cost function behaves during training\n", - " #if i % 100 == 0:\n", - " # print(loss.eval())\n", - "\n", - " g_analytic = g_analytic.eval()\n", - " g_dnn = g_trial.eval()\n", - "\n", - "\n", - "## Compare with the analutical solution\n", - "diff = np.abs(g_analytic - g_dnn)\n", - "print('Max absolute difference between analytical solution and TensorFlow DNN = ',np.max(diff))\n", - "\n", - "G_analytic = g_analytic.reshape((Nt,Nx))\n", - "G_dnn = g_dnn.reshape((Nt,Nx))\n", - "\n", - "diff = np.abs(G_analytic - G_dnn)\n", - "\n", - "# Plot the results\n", - "\n", - "X,T = np.meshgrid(x_np, t_np)\n", - "\n", - "fig = plt.figure(figsize=(10,10))\n", - "ax = fig.gca(projection='3d')\n", - "ax.set_title('Solution from the deep neural network w/ %d layer'%len(num_hidden_neurons))\n", - "s = ax.plot_surface(X,T,G_dnn,linewidth=0,antialiased=False,cmap=cm.viridis)\n", - "ax.set_xlabel('Time $t$')\n", - "ax.set_ylabel('Position $x$');\n", - "\n", - "fig = plt.figure(figsize=(10,10))\n", - "ax = fig.gca(projection='3d')\n", - "ax.set_title('Analytical solution')\n", - "s = ax.plot_surface(X,T,G_analytic,linewidth=0,antialiased=False,cmap=cm.viridis)\n", - "ax.set_xlabel('Time $t$')\n", - "ax.set_ylabel('Position $x$');\n", - "\n", - "fig = plt.figure(figsize=(10,10))\n", - "ax = fig.gca(projection='3d')\n", - "ax.set_title('Difference')\n", - "s = ax.plot_surface(X,T,diff,linewidth=0,antialiased=False,cmap=cm.viridis)\n", - "ax.set_xlabel('Time $t$')\n", - "ax.set_ylabel('Position $x$');\n", - "\n", - "## Take some 3D slices\n", - "\n", - "indx1 = 0\n", - "indx2 = int(Nt/2)\n", - "indx3 = Nt-1\n", - "\n", - "t1 = t_np[indx1]\n", - "t2 = t_np[indx2]\n", - "t3 = t_np[indx3]\n", - "\n", - "# Slice the results from the DNN\n", - "res1 = G_dnn[indx1,:]\n", - "res2 = G_dnn[indx2,:]\n", - "res3 = G_dnn[indx3,:]\n", - "\n", - "# Slice the analytical results\n", - "res_analytical1 = G_analytic[indx1,:]\n", - "res_analytical2 = G_analytic[indx2,:]\n", - "res_analytical3 = G_analytic[indx3,:]\n", - "\n", - "# Plot the slices\n", - "plt.figure(figsize=(10,10))\n", - "plt.title(\"Computed solutions at time = %g\"%t1)\n", - "plt.plot(x_np, res1)\n", - "plt.plot(x_np,res_analytical1)\n", - "plt.legend(['dnn','analytical'])\n", - "\n", - "plt.figure(figsize=(10,10))\n", - "plt.title(\"Computed solutions at time = %g\"%t2)\n", - "plt.plot(x_np, res2)\n", - "plt.plot(x_np,res_analytical2)\n", - "plt.legend(['dnn','analytical'])\n", - "\n", - "plt.figure(figsize=(10,10))\n", - "plt.title(\"Computed solutions at time = %g\"%t3)\n", - "plt.plot(x_np, res3)\n", - "plt.plot(x_np,res_analytical3)\n", - "plt.legend(['dnn','analytical'])\n", - "\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The program manages to find a solution having max absolute difference to the analytical\n", - "at approximately 0.0059, by just using some minutes!\n", - "It was found, by some testing, that one hidden layer with 90 neurons actually performed well.\n", - "\n", - "## Resources\n", - "\n", - "1. [Artificial neural networks for solving ordinary and partial differential equations by I.E. Lagaris et al](https://pdfs.semanticscholar.org/d061/df393e0e8fbfd0ea24976458b7d42419040d.pdf)\n", - "\n", - "2. [Neural networks for solving differential equations by A. Honchar](https://becominghuman.ai/neural-networks-for-solving-differential-equations-fa230ac5e04c)\n", - "\n", - "3. [Solving differential equations using neural networks by M.M Chiaramonte and M. Kiener](http://cs229.stanford.edu/proj2013/ChiaramonteKiener-SolvingDifferentialEquationsUsingNeuralNetworks.pdf)\n", - "\n", - "4. [Introduction to Partial Differential Equations by A. Tveitio, R. Winther](https://www.springer.com/us/book/9783540225515)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} From 772fba7a82ff597405cd3e3908c6f37910dacfcd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98yvind=20Sigmundson=20Sch=C3=B8yen?= Date: Mon, 15 Nov 2021 08:20:40 +0100 Subject: [PATCH 3/3] Add default Python .gitignore --- .gitignore | 133 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 132 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 176ce398f..4d4d17325 100755 --- a/.gitignore +++ b/.gitignore @@ -53,4 +53,135 @@ doc/Projects/2016/Project2/.project2_2016.copyright doc/Programs/ParallelizationMPI/dill10 -doc/pub/* \ No newline at end of file +doc/pub/* + + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/