-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsimple_simply_supported.py
100 lines (89 loc) · 3.19 KB
/
simple_simply_supported.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
"""
Solving the Euler beam equation for a simply supported beam with load uniformly distributed.
EI y'' = M
M = -p*(L*X-X^2)/2
x= 10m
E(Youngs Modulus) = 200000MPa
I(Monent of Inertia in transverse direction) = 0.000005 m^4
p(Point load) = 10KN
y = deflection of the beam
strategy:
-> Hard boundary condition assignments
-> loss is calculated for all the points and reduces to its mean square value before taking gradient steps
"""
#Import required packages
import autodiff as ad
import numpy as np
from NN_architecture import NeuralNetLSTM,xavier,diff_n_times
import matplotlib.pyplot as plt
from optimizers import *
import sys
sys.setrecursionlimit(5000)
#styling of plots
plt.style.use('dark_background')
def loss_calculator(model,points):
"""
Calculates the loss within the domain nd boundary of the differential equation
inputs:
model: The Neural Network model to be trained
points: The points at which loss should be calculated(should lie within the domain)
returns:Mean Squared loss from all the points in domain [0,10]
"""
X = ad.Variable(points,"X")
val = X*(10-X)*model.output(X)
#Force (S.I Units)
p = 10000
#Flexural Rigidity - EI
F = 2*0.000005*200000*1000000
temp = p/F
f = (diff_n_times(val,X,2)) - ((temp*((10*X)-ad.Pow(X,2))))
lossd = ad.ReduceSumToShape(ad.Pow(f,2),())/100
return lossd
def sampler(n):
"""
samples of random data points(uniformly distributed)
inputs:
n : number of data points
returns array of size n
"""
np.random.seed(0)
return np.reshape(np.random.uniform(0,10,n),(n,1))
#Instantiating model and optimizer
model = NeuralNetLSTM(10,1,1,1)
model.set_weights([xavier(i().shape[0],i().shape[1]) for i in model.get_weights()])
optimizer= Adam(len(model.get_weights()))
epochs = 2000
x=sampler(100)
#-------------------------------------------------------Training--------------------------------------------------
for i in range(epochs):
loss = loss_calculator(model,x)
print("loss",loss())
params = model.get_weights()
grad_params = ad.grad(loss,params)
new_params = optimizer([i() for i in params], [i() for i in grad_params])
model.set_weights(new_params)
loss2= loss_calculator(model,x)
print("loss now",loss2())
#Exit condition
if loss2()< 1e-2:
break
#-----------------------------------Plotting--------------------------------------
np.random.seed(0)
x_list = np.random.uniform(low=0,high=10,size=100)
def y(x,F,P):
return ((P*10*x**3)/(12*F)) - ((P*1000*x)/(24*F)) - ((P*x**4/(24*F)))
y_plot = y(x_list,0.000005*200000*1000000,10000)
print(y_plot.shape)
y_list =[]
for i in x_list:
X=ad.Variable(np.array([[i]]),name="X")
val =X*(10-X)*model.output(X)
y_list.append(val()[0][0])
plt.plot(np.linspace(0,10,10),np.zeros(10),label="Beam before deflection")
plt.scatter(x_list,y_plot,marker="+",label="Analytical")
plt.scatter(x_list,y_list,marker="x",label="Predicted")
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Simply Supported Beam with 10kN/m Uniformly distributed Load , E= 0.000005m^4,I =200000Mpa")
plt.legend()
plt.show()