Skip to content

Commit 188b19d

Browse files
committed
cleaning the asset pricing
1 parent 315efbe commit 188b19d

File tree

4 files changed

+248
-149
lines changed

4 files changed

+248
-149
lines changed

figures_asset_pricing.py

Lines changed: 8 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
import matplotlib.pyplot as plt
33
import os
44
from new_asset_pricing_matern import asset_pricing_matern
5-
from asset_pricing_neural import asset_pricing_neural
65

76
from mpl_toolkits.axes_grid1.inset_locator import (
87
zoomed_inset_axes,
@@ -12,7 +11,7 @@
1211

1312
fontsize = 17
1413
ticksize = 16
15-
figsize = (15, 8)
14+
figsize = (15, 7)
1615
params = {
1716
"font.family": "serif",
1817
"figure.figsize": figsize,
@@ -31,7 +30,6 @@
3130
## Plot given solution
3231
def plot_asset_pricing(
3332
sol_matern,
34-
sol_neural,
3533
output_path,
3634
p_rel_error_ylim=(1e-5, 2 * 1e-2),
3735
zoom=True,
@@ -43,19 +41,14 @@ def plot_asset_pricing(
4341
p_benchmark = sol_matern["p_benchmark"]
4442
p_rel_error_matern = sol_matern["p_rel_error"]
4543

46-
p_hat_neural = sol_neural["p_test"]
47-
p_rel_error_neural = sol_neural["p_rel_error"]
4844

4945
# Plotting
50-
plt.figure(figsize=(15, 8))
46+
plt.figure(figsize=(15, 7))
5147

5248
ax_prices = plt.subplot(1, 2, 1)
5349

5450
plt.plot(
55-
t, p_hat_matern, color="k", label=r"$\hat{\mu}(t)$: Matérn Kernel Approximation"
56-
)
57-
plt.plot(
58-
t, p_hat_neural, color="b", label=r"$\hat{\mu}(t)$: Neural Network Approximation"
51+
t, p_hat_matern, color="k", label=r"$\hat{\mu}(t)$: Kernel Approximation"#Mtérn
5952
)
6053
plt.plot(
6154
t,
@@ -76,14 +69,9 @@ def plot_asset_pricing(
7669
t,
7770
p_rel_error_matern,
7871
color="k",
79-
label=r"$\varepsilon_{\mu}(t)$: Relative Errors, Matérn Kernel Approx.",
80-
)
81-
plt.plot(
82-
t,
83-
p_rel_error_neural,
84-
color="b",
85-
label=r"$\varepsilon_{\mu}(t)$: Relative Errors, Neural Network Approx.",
86-
)
72+
label=r"$\varepsilon_{\mu}(t)$: Relative Errors",
73+
)#, Matérn Kernel Approx.
74+
8775
plt.axvline(x=T, color="k", linestyle=":", label="Extrapolation/Interpolation")
8876
plt.yscale("log") # Set y-scale to logarithmic
8977
plt.ylim(p_rel_error_ylim[0], p_rel_error_ylim[1])
@@ -112,11 +100,7 @@ def plot_asset_pricing(
112100
p_hat_matern[time_window[0] - 1 : time_window[1] + 1],
113101
color="k",
114102
)
115-
axins.plot(
116-
t[time_window[0] - 1 : time_window[1] + 1],
117-
p_hat_neural[time_window[0] - 1 : time_window[1] + 1],
118-
color="b",
119-
)
103+
120104
axins.plot(
121105
t[time_window[0] - 1 : time_window[1] + 1],
122106
p_benchmark[time_window[0] - 1 : time_window[1] + 1],
@@ -144,10 +128,6 @@ def plot_asset_pricing(
144128

145129
# Plots with various parameters
146130
sol_matern = asset_pricing_matern()
147-
sol_neural = asset_pricing_neural()
148131
plot_asset_pricing(
149-
sol_matern, sol_neural, "figures/asset_pricing.pdf"
132+
sol_matern, "figures/asset_pricing_contiguous.pdf"
150133
)
151-
152-
# sol = asset_pricing_matern(train_points_list=[0.0, 5.0, 10.0, 15.0, 20.0, 25.0, 30.0])
153-
# plot_asset_pricing(sol, "figures/asset_pricing_sparse.pdf", zoom_loc=[5, 15])

new_asset_pricing_matern.py

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ def asset_pricing_matern(
1919
x_0: float = 0.01,
2020
nu: float = 0.5,
2121
sigma: float = 1.0,
22-
rho: float = 15,
22+
rho: float = 10,
2323
solver_type: str = "ipopt",
2424
train_T: float = 40.0,
2525
train_points: int = 41,
@@ -47,18 +47,13 @@ def asset_pricing_matern(
4747
m = pyo.ConcreteModel()
4848
m.I = range(N)
4949
m.alpha_mu = pyo.Var(m.I, within=pyo.Reals, initialize=0.0)
50-
m.alpha_b = pyo.Var(m.I, within=pyo.Reals, initialize=0.0)
5150
m.mu_0 = pyo.Var(within=pyo.NonNegativeReals, initialize=0.0)
52-
m.b_0 = pyo.Var(within=pyo.NonNegativeReals, initialize= 0.0)
5351

5452

5553
# Map kernels to variables. Pyomo doesn't support p_0 + K_tilde @ m.alpha
5654
def mu(m, i):
5755
return m.mu_0 + sum(K_tilde[i, j] * m.alpha_mu[j] for j in m.I)
5856

59-
def b(m, i):
60-
return m.b_0 + sum(K_tilde[i, j] * m.alpha_b[j] for j in m.I)
61-
6257
def dmu_dt(m, i):
6358
return sum(K[i, j] * m.alpha_mu[j] for j in m.I)
6459

@@ -70,13 +65,10 @@ def x(i):
7065
def dp_dt_constraint(m, i):
7166
return dmu_dt(m, i) == r * mu(m, i) - x(i)
7267

73-
@m.Constraint(m.I) # for each index in m.I
74-
def b_constraint(m, i):
75-
return mu(m, i) * x(i) == b(m, i)
7668

7769
@m.Objective(sense=pyo.minimize)
7870
def min_norm(m): # alpha @ K @ alpha not supported by pyomo
79-
return sum(K[i, j] * m.alpha_b[i] * m.alpha_b[j] for i in m.I for j in m.I)
71+
return sum(K[i, j] * m.alpha_mu[i] * m.alpha_mu[j] for i in m.I for j in m.I)
8072

8173
solver = pyo.SolverFactory(solver_type)
8274
options = {
Lines changed: 111 additions & 111 deletions
Original file line numberDiff line numberDiff line change
@@ -1,111 +1,111 @@
1-
import numpy as np
2-
import torch
3-
import torch.nn as nn
4-
from torch.utils.data import DataLoader
5-
import jsonargparse
6-
from asset_pricing_benchmark import mu_f_array
7-
from typing import List, Optional
8-
9-
10-
def asset_pricing_neural(
11-
r: float = 0.1,
12-
c: float = 0.02,
13-
g: float = -0.2,
14-
x_0: float = 0.01,
15-
train_T: float = 40.0,
16-
train_points: int = 41,
17-
test_T: float = 50.0,
18-
test_points: int = 100,
19-
train_points_list: Optional[List[float]] = None,
20-
seed=123,
21-
):
22-
# if passing in `train_points` then doesn't us a grid. Otherwise, uses linspace
23-
if train_points_list is None:
24-
train_data = torch.tensor(
25-
np.linspace(0, train_T, train_points), dtype=torch.float32
26-
)
27-
else:
28-
train_data = torch.tensor(np.array(train_points_list), dtype=torch.float32)
29-
train_data = train_data.unsqueeze(dim=1)
30-
test_data = torch.tensor(np.linspace(0, test_T, test_points), dtype=torch.float32)
31-
test_data = test_data.unsqueeze(dim=1)
32-
33-
train = DataLoader(train_data, batch_size=len(train_data), shuffle=False)
34-
35-
def derivative_back(model, t): # backward differencing
36-
epsilon = 1.0e-8
37-
sqrt_eps = np.sqrt(epsilon)
38-
return (model(t) - model(t - sqrt_eps)) / sqrt_eps
39-
40-
# Dividends
41-
def x(i):
42-
return (x_0 + (c / g)) * np.exp(g * i) - (c / g)
43-
44-
def G(model, t):
45-
mu = model(t)
46-
dmudt = r * mu - x(t)
47-
return dmudt
48-
49-
torch.manual_seed(seed)
50-
51-
class NN(nn.Module):
52-
def __init__(
53-
self,
54-
dim_hidden=128,
55-
):
56-
super().__init__()
57-
self.dim_hidden = dim_hidden
58-
self.q = nn.Sequential(
59-
nn.Linear(1, dim_hidden, bias=True),
60-
nn.Tanh(),
61-
nn.Linear(dim_hidden, dim_hidden, bias=True),
62-
nn.Tanh(),
63-
nn.Linear(dim_hidden, dim_hidden, bias=True),
64-
nn.Tanh(),
65-
nn.Linear(dim_hidden, dim_hidden, bias=True),
66-
nn.Tanh(),
67-
nn.Linear(dim_hidden, 1),
68-
nn.Softplus(beta=1.0), # To make sure price stays positive
69-
)
70-
71-
def forward(self, x):
72-
return self.q(x)
73-
74-
q_hat = NN()
75-
learning_rate = 1e-3
76-
optimizer = torch.optim.Adam(q_hat.parameters(), lr=learning_rate)
77-
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.8)
78-
num_epochs = 1000
79-
80-
for epoch in range(num_epochs):
81-
for i, time in enumerate(train):
82-
83-
res_ode = derivative_back(q_hat, time) - G(q_hat, time)
84-
res_p_dot = res_ode[:, 0]
85-
86-
loss = res_p_dot.pow(2).mean()
87-
88-
optimizer.zero_grad()
89-
loss.backward()
90-
91-
optimizer.step()
92-
scheduler.step()
93-
94-
# Generate test_data and compare to the benchmark
95-
mu_benchmark = mu_f_array(np.array(test_data), c, g, r, x_0)
96-
mu_test = np.array(q_hat(test_data)[:, [0]].detach())
97-
98-
mu_rel_error = np.abs(mu_benchmark - mu_test) / mu_benchmark
99-
print(f"E(|rel_error(p)|) = {mu_rel_error.mean()}")
100-
return {
101-
"t_train": train_data,
102-
"t_test": test_data,
103-
"p_test": mu_test,
104-
"p_benchmark": mu_benchmark,
105-
"p_rel_error": mu_rel_error,
106-
"neural_net_solution": q_hat, # interpolator
107-
}
108-
109-
110-
if __name__ == "__main__":
111-
jsonargparse.CLI(asset_pricing_neural)
1+
import numpy as np
2+
import torch
3+
import torch.nn as nn
4+
from torch.utils.data import DataLoader
5+
import jsonargparse
6+
from asset_pricing_benchmark import mu_f_array
7+
from typing import List, Optional
8+
9+
10+
def asset_pricing_neural(
11+
r: float = 0.1,
12+
c: float = 0.02,
13+
g: float = -0.2,
14+
x_0: float = 0.01,
15+
train_T: float = 40.0,
16+
train_points: int = 41,
17+
test_T: float = 50.0,
18+
test_points: int = 100,
19+
train_points_list: Optional[List[float]] = None,
20+
seed=123,
21+
):
22+
# if passing in `train_points` then doesn't us a grid. Otherwise, uses linspace
23+
if train_points_list is None:
24+
train_data = torch.tensor(
25+
np.linspace(0, train_T, train_points), dtype=torch.float32
26+
)
27+
else:
28+
train_data = torch.tensor(np.array(train_points_list), dtype=torch.float32)
29+
train_data = train_data.unsqueeze(dim=1)
30+
test_data = torch.tensor(np.linspace(0, test_T, test_points), dtype=torch.float32)
31+
test_data = test_data.unsqueeze(dim=1)
32+
33+
train = DataLoader(train_data, batch_size=len(train_data), shuffle=False)
34+
35+
def derivative_back(model, t): # backward differencing
36+
epsilon = 1.0e-8
37+
sqrt_eps = np.sqrt(epsilon)
38+
return (model(t) - model(t - sqrt_eps)) / sqrt_eps
39+
40+
# Dividends
41+
def x(i):
42+
return (x_0 + (c / g)) * np.exp(g * i) - (c / g)
43+
44+
def G(model, t):
45+
mu = model(t)
46+
dmudt = r * mu - x(t)
47+
return dmudt
48+
49+
torch.manual_seed(seed)
50+
51+
class NN(nn.Module):
52+
def __init__(
53+
self,
54+
dim_hidden=128,
55+
):
56+
super().__init__()
57+
self.dim_hidden = dim_hidden
58+
self.q = nn.Sequential(
59+
nn.Linear(1, dim_hidden, bias=True),
60+
nn.Tanh(),
61+
nn.Linear(dim_hidden, dim_hidden, bias=True),
62+
nn.Tanh(),
63+
nn.Linear(dim_hidden, dim_hidden, bias=True),
64+
nn.Tanh(),
65+
nn.Linear(dim_hidden, dim_hidden, bias=True),
66+
nn.Tanh(),
67+
nn.Linear(dim_hidden, 1),
68+
nn.Softplus(beta=1.0), # To make sure price stays positive
69+
)
70+
71+
def forward(self, x):
72+
return self.q(x)
73+
74+
q_hat = NN()
75+
learning_rate = 1e-3
76+
optimizer = torch.optim.Adam(q_hat.parameters(), lr=learning_rate)
77+
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.8)
78+
num_epochs = 1000
79+
80+
for epoch in range(num_epochs):
81+
for i, time in enumerate(train):
82+
83+
res_ode = derivative_back(q_hat, time) - G(q_hat, time)
84+
res_p_dot = res_ode[:, 0]
85+
86+
loss = res_p_dot.pow(2).mean()
87+
88+
optimizer.zero_grad()
89+
loss.backward()
90+
91+
optimizer.step()
92+
scheduler.step()
93+
94+
# Generate test_data and compare to the benchmark
95+
mu_benchmark = mu_f_array(np.array(test_data), c, g, r, x_0)
96+
mu_test = np.array(q_hat(test_data)[:, [0]].detach())
97+
98+
mu_rel_error = np.abs(mu_benchmark - mu_test) / mu_benchmark
99+
print(f"E(|rel_error(p)|) = {mu_rel_error.mean()}")
100+
return {
101+
"t_train": train_data,
102+
"t_test": test_data,
103+
"p_test": mu_test,
104+
"p_benchmark": mu_benchmark,
105+
"p_rel_error": mu_rel_error,
106+
"neural_net_solution": q_hat, # interpolator
107+
}
108+
109+
110+
if __name__ == "__main__":
111+
jsonargparse.CLI(asset_pricing_neural)

0 commit comments

Comments
 (0)