From bb38685524f5f6deb2e72f2160a858aa1a349f07 Mon Sep 17 00:00:00 2001 From: Lu Lu Date: Sat, 12 Dec 2020 23:15:51 -0500 Subject: [PATCH] Add code --- README.md | 145 +++++- fractional/CNN_operator_alpha.py | 464 ++++++++++++++++++ fractional/Caputo_1D.m | 6 + fractional/DeepONet_float32_batch.py | 393 +++++++++++++++ fractional/Fractional_Lap_2D.m | 6 + fractional/GJ_generate.m | 42 ++ fractional/Orthogonal_polynomials.m | 14 + fractional/Zp.m | 60 +++ fractional/datasets.py | 139 ++++++ fractional/matrix_for_frac_Lap.m | 91 ++++ fractional/operator_Orthogonal_polynomials.m | 37 ++ fractional/rootsofjacobi.m | 140 ++++++ fractional/test_set.m | 108 ++++ fractional/training_set.m | 56 +++ seq2seq/data.py | 79 +++ seq2seq/learner/__init__.py | 16 + seq2seq/learner/brain.py | 162 ++++++ seq2seq/learner/data/__init__.py | 8 + seq2seq/learner/data/data.py | 126 +++++ seq2seq/learner/integrator/__init__.py | 8 + .../integrator/hamiltonian/__init__.py | 8 + .../integrator/hamiltonian/stormer_verlet.py | 92 ++++ seq2seq/learner/nn/__init__.py | 26 + seq2seq/learner/nn/deeponet.py | 56 +++ seq2seq/learner/nn/fnn.py | 53 ++ seq2seq/learner/nn/hnn.py | 53 ++ seq2seq/learner/nn/module.py | 135 +++++ seq2seq/learner/nn/seq2seq.py | 62 +++ seq2seq/learner/nn/sympnet.py | 190 +++++++ seq2seq/learner/utils.py | 62 +++ seq2seq/seq2seq_main.py | 92 ++++ src/ADR_solver.py | 81 +++ src/ADVD_solver.py | 78 +++ src/CVC_solver.py | 187 +++++++ src/config.py | 1 + src/deeponet_dataset.py | 63 +++ src/deeponet_pde.py | 285 +++++++++++ src/sde.py | 369 ++++++++++++++ src/spaces.py | 162 ++++++ src/system.py | 321 ++++++++++++ src/utils.py | 117 +++++ 41 files changed, 4592 insertions(+), 1 deletion(-) create mode 100644 fractional/CNN_operator_alpha.py create mode 100644 fractional/Caputo_1D.m create mode 100644 fractional/DeepONet_float32_batch.py create mode 100644 fractional/Fractional_Lap_2D.m create mode 100644 fractional/GJ_generate.m create mode 100644 fractional/Orthogonal_polynomials.m create mode 100644 fractional/Zp.m create mode 100644 fractional/datasets.py create mode 100644 fractional/matrix_for_frac_Lap.m create mode 100644 fractional/operator_Orthogonal_polynomials.m create mode 100644 fractional/rootsofjacobi.m create mode 100644 fractional/test_set.m create mode 100644 fractional/training_set.m create mode 100755 seq2seq/data.py create mode 100755 seq2seq/learner/__init__.py create mode 100755 seq2seq/learner/brain.py create mode 100755 seq2seq/learner/data/__init__.py create mode 100755 seq2seq/learner/data/data.py create mode 100755 seq2seq/learner/integrator/__init__.py create mode 100755 seq2seq/learner/integrator/hamiltonian/__init__.py create mode 100755 seq2seq/learner/integrator/hamiltonian/stormer_verlet.py create mode 100755 seq2seq/learner/nn/__init__.py create mode 100755 seq2seq/learner/nn/deeponet.py create mode 100755 seq2seq/learner/nn/fnn.py create mode 100755 seq2seq/learner/nn/hnn.py create mode 100755 seq2seq/learner/nn/module.py create mode 100755 seq2seq/learner/nn/seq2seq.py create mode 100755 seq2seq/learner/nn/sympnet.py create mode 100755 seq2seq/learner/utils.py create mode 100755 seq2seq/seq2seq_main.py create mode 100644 src/ADR_solver.py create mode 100755 src/ADVD_solver.py create mode 100755 src/CVC_solver.py create mode 100644 src/config.py create mode 100644 src/deeponet_dataset.py create mode 100644 src/deeponet_pde.py create mode 100644 src/sde.py create mode 100644 src/spaces.py create mode 100644 src/system.py create mode 100644 src/utils.py diff --git a/README.md b/README.md index 8caa037..e334f64 100644 --- a/README.md +++ b/README.md @@ -1 +1,144 @@ -# deeponet \ No newline at end of file +# DeepONet: Learning nonlinear operators + +The source code for the paper [Learning nonlinear operators via DeepONet based on the universal approximation theorem of operators](https://arxiv.org/abs/1910.03193). + +## System requirements + +Most code is written in Python 3, and depends on the deep learning package [DeepXDE](https://github.com/lululxvi/deepxde) v0.9.0. Some code is written in Matlab (version R2019a). + +## Installation guide + +1. Install Python 3 +2. Install DeepXDE (https://github.com/lululxvi/deepxde) +3. Optional: For CNN, install Matlab and TensorFlow 1; for Seq2Seq, install PyTorch + +The installation may take between 10 minutes and one hour. + +## Demo + +### Case `Antiderivative` + +1. Open deeponet_pde.py, and choose the parameters/setup in the functions `main()` and `ode_system()` based on the comments; +2. Run deeponet_pde.py, which will first generate the two datasets (training and test) and then train a DeepONet. The training and test MSE errors will be displayed in the screen. + +A standard output is + +``` +Building operator neural network... +'build' took 0.104784 s + +Generating operator data... +'gen_operator_data' took 20.495655 s + +Generating operator data... +'gen_operator_data' took 168.944620 s + +Compiling model... +'compile' took 0.265885 s + +Initializing variables... +Training model... + +Step Train loss Test loss Test metric +0 [1.09e+00] [1.11e+00] [1.06e+00] +1000 [2.57e-04] [2.87e-04] [2.76e-04] +2000 [8.37e-05] [9.99e-05] [9.62e-05] +... +50000 [9.98e-07] [1.39e-06] [1.09e-06] + +Best model at step 46000: + train loss: 6.30e-07 + test loss: 9.79e-07 + test metric: [7.01e-07] + +'train' took 324.343075 s + +Saving loss history to loss.dat ... +Saving training data to train.dat ... +Saving test data to test.dat ... +Restoring model from model/model.ckpt-46000 ... + +Predicting... +'predict' took 0.056257 s + +Predicting... +'predict' took 0.012670 s + +Test MSE: 9.269857471315847e-07 +Test MSE w/o outliers: 6.972881784590493e-07 +``` + +You can get the training and test errors in the end of the output. + +The run time could be between several minutes to several hours depending on the parameters you choose, e.g., the dataset size and the number of iterations for training. + +### Case `Stochastic ODE/PDE` + +1. Open sde.py, and choose the parameters/setup in the functions `main()`; +2. Run sde.py, which will generate traning and test datasets; +3. Open deeponet_dataset.py, and choose the parameters/setup in the functions `main()`; +4. Run deeponet_dataset.py to train a DeepONet. The training and test MSE errors will be displayed in the screen. + +### Case `1D Caputo fractional derivative` + +1. Go to the folder `fractional`; +2. Run Caputo1D.m to generate training and test datasets. One can specify the orthongonal polynomial to be Legendre polynomial or poly-fractonomial in Orthogonal_polynomials.m. Expected run time: 20 mins. +3. Run datasets.py to pack and compress the genrated datasets. Expected outputs: compressed .npz files. Expected run time: 5 mins. +4. Run DeepONet_float32_batch.py to train and test DeepONets. Expected outputs: a figure of training and test losses. Expected run time: 1 hour. + +### Case `2D fractional Laplacian` + +#### Learning a 2D fractional Laplacian using DeepONets + +1. Run Fractional_Lap_2D.m to generate training and test datasets. Expected outputs: text files that store the training and test data. Expected run time: 40 mins. +2. Run datasets.py to pack and compress the genrated datasets. Expected outputs: compressed .npz files. Expected run time: 15 mins. +3. Run DeepONet_float32_batch.py to train and test DeepONets. Expected run time: 3 hours. + +#### Learning a 2D fractional Laplacian using CNNs + +1. Suppose that the text files containing all training and test sets have been generated in the previous step. +2. Run CNN_operator_alpha.py to train and test CNNs. Expected outputs: a figure of training and test losses. Expected run time: 30 mins. + +### Seq2Seq + +1. Open seq2seq_main.py, choose the problem in the function main(), and change the parameters/setup in the corresponding function (antiderivative()/pendulum()) if needed. +2. Run seq2seq_main.py, which will first generate the dataset and then train the Seq2Seq model on the dataset. The training and test MSE errors will be displayed in the screen. Moreover, the loss history, generated data and trained best model will be saved in the direction ('./outputs/'). + +A standard output is + +``` +Training... +0 Train loss: 0.21926558017730713 Test loss: 0.22550159692764282 +1000 Train loss: 0.0022761737927794456 Test loss: 0.0024939212016761303 +2000 Train loss: 0.0004760705924127251 Test loss: 0.0005566366016864777 +... +49000 Train loss: 1.2885914202342974e-06 Test loss: 1.999963387788739e-06 +50000 Train loss: 1.1382834372852813e-06 Test loss: 1.8525416862757993e-06 +Done! +'run' took 747.5421471595764 s +Best model at iteration 50000: +Train loss: 1.1382834372852813e-06 Test loss: 1.8525416862757993e-06 +``` + +You can get the training and test errors in the end of the output. + +The run time could be between several minutes to several hours depending on the parameters you choose, e.g., the dataset size and the number of iterations for training. + +## Instructions for use + +The instructions for running each case are as follows. + +- Legendre transform: The same as `Antiderivative` in Demo. You need to modify the function `main()` in deeponet_pde.py. +- Antiderivative: In Demo. +- Fractional (1D): In Demo. +- Fractional (2D): In Demo. +- Nonlinear ODE: The same as `Antiderivative` in Demo. You need to modify the functions `main()` and `ode_system()` in deeponet_pde.py. +- Gravity pendulum: The same as `Antiderivative` in Demo. You need to modify the functions `main()` and `ode_system()` in deeponet_pde.py. +- Diffusion-reaction: The same as `Antiderivative` in Demo. You need to modify the function `main()` in deeponet_pde.py. +- Advection: The same as `Antiderivative` in Demo. You need to modify the functions `main()` in deeponet_pde.py, `run()` in deeponet_pde.py, `CVCSystem()` in system.py, and `solve_CVC()` in CVC_solver.py to run each case. +- Advection-diffusion: The same as `Antiderivative` in Demo. You need to modify the function `main()` in deeponet_pde.py. +- Stochastic ODE/PDE: In Demo. + +## Questions + +To get help on how to use the data or code, simply open an issue in the GitHub "Issues" section. diff --git a/fractional/CNN_operator_alpha.py b/fractional/CNN_operator_alpha.py new file mode 100644 index 0000000..0418049 --- /dev/null +++ b/fractional/CNN_operator_alpha.py @@ -0,0 +1,464 @@ +### Variational autoencoder + +import tensorflow as tf +import matplotlib.pyplot as plt +import numpy as np + +# from keras.datasets import mnist + + +class CNN_model: + def __init__(self): + pass + + def conv2D_layer( + self, + in_features, + out_channel, + padding_size=0, + filter_size=3, + activation="tanh", + output_paras=False, + ): + batch_size, height, width, channel = in_features.shape + in_height = height.value + in_width = width.value + in_channel = channel.value + assert height == width, "The input image must be square like" + S = 1 # unit-stride + out_height = (in_height - filter_size + 2 * padding_size) / S + 1 + in_dim = in_height ** 2 * in_channel + xavier_stddev = np.sqrt(2.0 / (in_dim)) + weights = tf.Variable( + tf.truncated_normal( + [filter_size, filter_size, in_channel, out_channel], + stddev=xavier_stddev, + dtype=tf.float32, + seed=None, + ), + dtype=tf.float32, + ) + biases = tf.Variable(tf.zeros((1, out_channel), dtype=tf.float32)) + padded_features = tf.pad( + in_features, + [ + [0, 0], + [padding_size, padding_size], + [padding_size, padding_size], + [0, 0], + ], + "CONSTANT", + ) + feature_maps = tf.nn.conv2d( + padded_features, + weights, + padding="VALID", + strides=[1, 1, 1, 1], + data_format="NHWC", + ) + feature_maps = feature_maps + biases + if activation == "relu": + out_features = tf.nn.relu(feature_maps) + elif activation == "sigmoid": + out_features = tf.nn.sigmoid(feature_maps) + elif activation == "identity": + out_features = feature_maps + elif activation == "tanh": + out_features = tf.nn.tanh(feature_maps) + + if output_paras == True: + return out_features, weights, biases + else: + return out_features + + def pooling2D(self, in_features, size): + return tf.nn.max_pool( + in_features, + ksize=[1] + size + [1], + strides=[1] + size + [1], + padding="VALID", + ) + + def dense(self, in_features, out_channel, activation="tanh", output_paras=False): + flattened_features = tf.layers.flatten(in_features) + in_dim = flattened_features.shape[1] + in_dim = in_dim.value + out_dim = out_channel + xavier_stddev = np.sqrt(2.0 / (in_dim + out_dim)) + weights = tf.Variable( + tf.truncated_normal( + [in_dim, out_dim], stddev=xavier_stddev, dtype=tf.float32, seed=None + ), + dtype=tf.float32, + ) + biases = tf.Variable(tf.zeros((1, out_dim), dtype=tf.float32)) + feature_maps = tf.matmul(flattened_features, weights) + feature_maps = feature_maps + biases + if activation == "relu": + out_features = tf.nn.relu(feature_maps) + elif activation == "softmax": + exp0 = tf.exp(-1.0 * feature_maps) + out_features = exp0 / tf.reduce_sum(exp0, axis=1, keepdims=True) + elif activation == "sigmoid": + out_features = tf.nn.sigmoid(feature_maps) + elif activation == "softplus": + out_features = tf.nn.softplus(feature_maps) + elif activation == "identity": + out_features = feature_maps + elif activation == "tanh": + out_features = tf.nn.tanh(feature_maps) + + if output_paras == True: + return out_features, weights, biases + else: + return out_features + + def cross_entropy(self, out_features, labels): + return tf.reduce_sum( + -1.0 * tf.reduce_sum(tf.log(out_features) * labels, axis=1) + ) + + def latent_space(self, in_features, latent_dim, batch_size): + z_mean = self.dense(in_features, latent_dim, activation="identity") + z_log_var = self.dense( + in_features, latent_dim, activation="identity" + ) # log std + out_features = z_mean + tf.sqrt(tf.exp(z_log_var)) * tf.random_normal( + (batch_size, latent_dim) + ) + return out_features, z_mean, z_log_var + + def dense_reshape(self, in_features, shape): + return tf.reshape(in_features, [-1] + shape) + + def upsampling(self, in_features, shape): + return tf.image.resize_images(in_features, size=shape) + + def VAE_loss(self, in_features, batch_labels, z_mean, z_log_var): + dim = in_features.shape + dim = dim[1].value * dim[2].value * dim[3].value + predicted = tf.reshape(in_features, (-1, dim)) + ground_truth = tf.reshape(batch_labels, (-1, dim)) + CE_loss = tf.reduce_mean( + tf.reduce_sum(tf.square(predicted - ground_truth), axis=1) + ) / tf.reduce_mean(tf.reduce_sum(tf.square(ground_truth), axis=1)) + epsilon = 1e-7 + # CE_loss = -1.0*tf.reduce_mean(tf.reduce_sum(tf.log(epsilon+predicted)*ground_truth+tf.log(1.0+epsilon-predicted)*(1-ground_truth),axis=1)) + REF_loss = -1.0 * tf.reduce_mean( + tf.reduce_sum( + tf.log(epsilon + ground_truth) * ground_truth + + tf.log(1.0 + epsilon - ground_truth) * (1 - ground_truth), + axis=1, + ) + ) + KL_loss = -0.0 * tf.reduce_mean( + tf.reduce_sum(1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var), axis=1) + ) + # loss1 = -1.0* tf.reduce_sum(tf.log(epsilon+predicted)*ground_truth+tf.log(1.0+epsilon-predicted)*(1-ground_truth),axis=1) + loss1 = CE_loss + loss2 = KL_loss + loss = loss1 + loss2 + relative_err = tf.linalg.norm(predicted - ground_truth, 2) / tf.linalg.norm( + ground_truth, 2 + ) + return loss, CE_loss, KL_loss, REF_loss, relative_err + + +# (train_images, train_labels), (test_images, test_labels) = mnist.load_data() + +# train_images = train_images.astype('float32')/255 +# test_images = test_images.astype('float32')/255 + +# train_images = np.expand_dims(train_images, axis=3) # add channel axis +# test_images = np.expand_dims(test_images, axis=3) + + +# train_labels0 = np.zeros((train_images.shape[0],10),dtype=np.float32) +# train_labels0[np.arange(train_images.shape[0]),train_labels]=1.0 + +# test_labels0 = np.zeros((test_images.shape[0],10),dtype=np.float32) +# test_labels0[np.arange(test_images.shape[0]),test_labels]=1.0 + +image_size = 15 +num_u = 10000 + +# images = np.loadtxt('training_u.txt',dtype = np.float32).T +# labels0 = np.loadtxt('training_Lu.txt', dtype=np.float32).T +# cart_co = np.loadtxt('training_y.txt', dtype=np.float32) +# alpha = np.loadtxt('training_alpha.txt', dtype=np.float32) + + +# images_test = np.loadtxt('test_u.txt',dtype = np.float32).T +# labels0_test = np.loadtxt('test_Lu.txt', dtype=np.float32).T +# cart_co_test = np.loadtxt('test_y.txt', dtype=np.float32) +# alpha_test = np.loadtxt('test_alpha.txt', dtype=np.float32) +train_data = np.load("train.npz") +test_data = np.load("test.npz") + +images = train_data["images"] +labels0 = train_data["labels0"] +cart_co = train_data["cart_co"] +alpha = train_data["alpha"] + +images_test = test_data["images"] +labels0_test = test_data["labels0"] +cart_co_test = test_data["cart_co"] +alpha_test = test_data["alpha"] + +batch_images = tf.placeholder(tf.float32, shape=(None, image_size, image_size, 1)) +batch_labels = tf.placeholder( + tf.float32, shape=(None, image_size, image_size, alpha.shape[0]) +) + +x_grid = cart_co[:, 0].reshape((image_size, image_size)).T +y_grid = cart_co[:, 1].reshape((image_size, image_size)).T + +x_grid_test = cart_co_test[:, 0].reshape((image_size, image_size)).T +y_grid_test = cart_co_test[:, 1].reshape((image_size, image_size)).T + +labels = np.zeros((images.shape[0], image_size ** 2, alpha.shape[0])) +for i in range(alpha.shape[0]): + labels[:, :, i] = labels0[(i * num_u) : ((i + 1) * num_u), 0:] + +labels_test = np.zeros((images.shape[0], image_size ** 2, alpha_test.shape[0])) +for i in range(alpha_test.shape[0]): + labels_test[:, :, i] = labels0_test[(i * num_u) : ((i + 1) * num_u), 0:] + +np.random.seed(1234) +# index = np.arange(images.shape[0]) +# np.random.shuffle(index) +# idd = int(0.7*images.shape[0]) +# +# train_images = images[:idd,0:] +# train_labels = labels[:idd,0:,0:] +# test_images = images[idd:,0:] +# test_labels = labels[idd:,0:,0:] +train_images = images +train_labels = labels +test_images = images_test +test_labels = labels_test + +# max_image = np.amax(train_images0) +# min_image = np.amin(train_images0) +# max_label = np.amax(train_labels0) +# min_label = np.amin(train_labels0) + +# max_image = np.amax(images) +# min_image = np.amin(images) +# max_label = np.amax(labels) +# min_label = np.amin(labels) + +# train_images = (train_images0 - min_image) / (max_image - min_image) +# train_labels = (train_labels0 - min_label) / (max_label - min_label) +# test_images = (test_images0 - min_image) / (max_image - min_image) +# test_labels = (test_labels0 - min_label) / (max_label - min_label) + +train_images = train_images.reshape((-1, image_size, image_size, 1)) +train_labels = train_labels.reshape((-1, image_size, image_size, alpha.shape[0])) +test_images = test_images.reshape((-1, image_size, image_size, 1)) +test_labels = test_labels.reshape((-1, image_size, image_size, alpha_test.shape[0])) + +# plt.imshow(train_images[1021,:].reshape((image_size,image_size)), cmap='jet') +# plt.colorbar() +# plt.show() +# plt.imshow(train_labels[1021,:].reshape((image_size,image_size)), cmap='jet') +# plt.colorbar() +# plt.show() + +# aaa = 1 + +num_epoch = 10001 +batch_size0 = 100 +batch_size = tf.placeholder(tf.int32, shape=()) +latent_dim = 20 + +x = CNN_model() + +# feature = x.conv2D_layer(batch_images, 32) +# feature = x.pooling2D(feature, [2,2]) +# feature = x.conv2D_layer(feature, 64) +# feature = x.pooling2D(feature, [2,2]) +# feature = x.conv2D_layer(feature, 64) +# feature = x.dense(feature, 64) +# feature, z_mean, z_log_var = x.latent_space(feature, latent_dim, batch_size) +# feature0 = feature +# feature = x.dense(feature, 64) +# feature = x.dense(feature, 3*3*64) +# feature = x.dense_reshape(feature, [3,3,64]) +# feature = x.conv2D_layer(feature, 64, padding_size=2) +# feature = x.upsampling(feature, shape=[11,11]) +# feature = x.conv2D_layer(feature, 32, padding_size=2) +# feature = x.upsampling(feature, shape=[26,26]) +# feature = x.conv2D_layer(feature, alpha.shape[0], padding_size=2, activation = 'identity') +# loss, loss1, loss2, ref_loss, predicted0, ground_truth0 = x.VAE_loss(feature, batch_labels, z_mean, z_log_var) +feature = x.conv2D_layer(batch_images, 32) +feature = x.pooling2D(feature, [2, 2]) +feature = x.conv2D_layer(feature, 64) +feature = x.dense(feature, 64) +feature, z_mean, z_log_var = x.latent_space(feature, latent_dim, batch_size) +feature0 = feature +feature = x.dense(feature, 64) +feature = x.dense(feature, 4 * 4 * 64) +feature = x.dense_reshape(feature, [4, 4, 64]) +feature = x.conv2D_layer(feature, 32, padding_size=2) +feature = x.upsampling(feature, shape=[13, 13]) +feature = x.conv2D_layer(feature, alpha.shape[0], padding_size=2, activation="identity") +loss, loss1, loss2, ref_loss, relative_err = x.VAE_loss( + feature, batch_labels, z_mean, z_log_var +) + +# feature = x.dense(batch_images, 500) +# feature = x.dense(feature, 500) + +# feature, z_mean, z_log_var = x.latent_space(feature, latent_dim, batch_size) +# feature0 = feature +# feature = x.dense(feature, 500) +# feature = x.dense(feature, 500) +# feature = x.dense(feature, 30*30*1, activation = 'identity') +# feature = x.dense_reshape(feature, [30,30,1]) +# loss, loss1, loss2, ref_loss = x.VAE_loss(feature, batch_labels, z_mean, z_log_var) + +index = np.arange(train_images.shape[0]) +optimizer_Adam = tf.train.AdamOptimizer(1.0e-3) # Adam as SGD algorithm +# optimizer_RMS = tf.train.RMSPropOptimizer(1.0e-3) +train_op_Adam = optimizer_Adam.minimize(loss) +# train_op_RMS = optimizer_RMS.minimize(loss) + + +def latent_variables(feature, images, image0, labels, batch_size0, sess): + feed_dict = {batch_images: images, batch_size: batch_size0} + feed_dict0 = {batch_images: image0, batch_size: 1} + + lv = sess.run(feature0, feed_dict) + lv0 = sess.run(feature0, feed_dict0) + label0 = np.argmax(labels, axis=1) + fig = plt.figure() + index0 = np.argwhere(label0 == 0) + plt.plot(lv[index0, 0], lv[index0, 1], "c.") + index1 = np.argwhere(label0 == 1) + plt.plot(lv[index1, 0], lv[index1, 1], "b.") + index2 = np.argwhere(label0 == 2) + plt.plot(lv[index2, 0], lv[index2, 1], "y.") + index3 = np.argwhere(label0 == 3) + plt.plot(lv[index3, 0], lv[index3, 1], "k.") + index7 = np.argwhere(label0 == 7) + plt.plot(lv[index7, 0], lv[index7, 1], "r.") + index9 = np.argwhere(label0 == 9) + plt.plot(lv[index9, 0], lv[index9, 1], "g.") + plt.plot(lv0[0, 0], lv0[0, 1], "go") + plt.savefig("VAE4.png", dpi=300) + plt.show() + plt.close(fig) + + +init = tf.global_variables_initializer() +num_batch = int(train_images.shape[0] / batch_size0) +# feed_dict_train = {batch_images: train_images, batch_labels: train_labels, batch_size: train_images.shape[0]} +feed_dict_test = { + batch_images: test_images, + batch_labels: test_labels, + batch_size: test_images.shape[0], +} + +batch_loss_record = [] +test_loss_record = [] +epoch_record = [] + +with tf.Session() as sess: + sess.run(init) + for epoch in range(num_epoch): + np.random.shuffle(index) + for i in range(num_batch): + + batch_input = train_images[ + index[(i * batch_size0) : ((i + 1) * batch_size0)], :, :, : + ] + batch_output = train_labels[ + index[(i * batch_size0) : ((i + 1) * batch_size0)], : + ] + feed_dict = { + batch_images: batch_input, + batch_labels: batch_output, + batch_size: batch_size0, + } + sess.run(train_op_Adam, feed_dict=feed_dict) + # sess.run(train_op_RMS, feed_dict = feed_dict ) + if epoch % 10 == 0: + + loss_test_val, loss_test_1_val, loss_test_2_val, ref_loss_val = sess.run( + [loss, loss1, loss2, ref_loss], feed_dict_test + ) + loss_batch_val, loss_batch_1_val, loss_batch_2_val = sess.run( + [loss, loss1, loss2], feed_dict + ) + batch_loss_record.append(loss_batch_val) + test_loss_record.append(loss_test_val) + epoch_record.append(epoch) + + epoch_vec = np.reshape(np.stack(epoch_record), (-1, 1)) + batch_loss_vec = np.reshape(np.stack(batch_loss_record), (-1, 1)) + test_loss_vec = np.reshape(np.stack(test_loss_record), (-1, 1)) + + fig = plt.figure() + plt.semilogy( + epoch_vec, batch_loss_vec, "ro-", label="Training loss per batch" + ) + plt.semilogy(epoch_vec, test_loss_vec, "bo-", label="Test loss") + plt.legend() + plt.savefig("loss_curve.png", dpi=300) + # plt.show() + plt.close(fig) + + record = np.concatenate((epoch_vec, batch_loss_vec, test_loss_vec), axis=1) + np.savetxt("records.txt", record) + + print( + "\n Epoch. ", + epoch, + " Batch. ", + i, + " Batch_loss. ", + [loss_batch_val, loss_batch_1_val, loss_batch_2_val], + " Test_loss. ", + [loss_test_val, loss_test_1_val, loss_test_2_val], + ) + print("Ref_loss: ", ref_loss_val) + # if np.isnan(loss_test_val): + # aaa = pred_val + print("\n") + # predicted_val, ground_truth_val = sess.run([predicted0, ground_truth0],feed_dict_test) + predicted_all, rel_err = sess.run([feature, relative_err], feed_dict_test) + predicted = predicted_all[2] + ground_truth = test_labels[2] + fig = plt.figure() + plt.subplot(121) + plt.imshow(predicted[:, :, 0].T, cmap="jet") + plt.colorbar() + plt.subplot(122) + plt.imshow(ground_truth[:, :, 0].T, cmap="jet") + plt.colorbar() + plt.tight_layout() + plt.savefig("VAE3.png", dpi=300) + # plt.show() + plt.close(fig) + + fig = plt.figure() + plt.subplot(121) + plt.contourf(x_grid, y_grid, predicted[:, :, 0].T, cmap="jet") + plt.colorbar() + plt.subplot(122) + plt.contourf(x_grid, y_grid, ground_truth[:, :, 0].T, cmap="jet") + plt.colorbar() + plt.tight_layout() + plt.savefig("VAE33.png", dpi=300) + # plt.show() + plt.close(fig) + + print("Average relative error: ", rel_err, "\n") + + print( + "Err: ", + np.linalg.norm(predicted[:, :, 0] - ground_truth[:, :, 0]) + / np.linalg.norm(ground_truth[:, :, 0]), + ) + # latent_variables(feature0, test_images[:1000,0:], test_images[0:1,0:], test_labels0[:1000,0:], 1000, sess) diff --git a/fractional/Caputo_1D.m b/fractional/Caputo_1D.m new file mode 100644 index 0000000..24767f2 --- /dev/null +++ b/fractional/Caputo_1D.m @@ -0,0 +1,6 @@ +tic +space_paras = [10, 2]; +space_type = 'Orthogonal'; +training_set(10000, 15, 10, 10, '1D_Caputo', space_type, space_paras); +test_set(10000, 15, 10, 10, '1D_Caputo', space_type, space_paras); +toc diff --git a/fractional/DeepONet_float32_batch.py b/fractional/DeepONet_float32_batch.py new file mode 100644 index 0000000..36b436f --- /dev/null +++ b/fractional/DeepONet_float32_batch.py @@ -0,0 +1,393 @@ +import tensorflow as tf +import matplotlib.pyplot as plt +import scipy.special as scisp +import numpy as np +from SALib.sample import sobol_sequence +import time +import sys + +# import datasets as ds + +random_seed = 12345 + + +def xavier_init(size): + in_dim = size[0] + out_dim = size[1] + xavier_stddev = np.sqrt(2.0 / (in_dim + out_dim)) + return tf.Variable( + tf.truncated_normal( + [in_dim, out_dim], stddev=xavier_stddev, seed=random_seed, dtype=tf.float32 + ), + dtype=tf.float32, + ) + + +# def neural_net(X, weights, biases): +# num_layers = len(weights) + 1 +# H = X +# for l in range(0,num_layers-1): +# W = weights[l] +# b = biases[l] +# H = tf.nn.tanh(tf.add(tf.matmul(H, W), b)) +# W = weights[-1] +# b = biases[-1] +# Y = tf.add(tf.matmul(H, W), b) +# Y = H +# return Y + + +def neural_net2(X, weights, biases): + num_layers = len(weights) + 1 + H = X + for l in range(0, num_layers - 1): + W = weights[l] + b = biases[l] + H = tf.nn.tanh(tf.add(tf.matmul(H, W), b)) + + Y = H + return Y + + +def neural_net1(X, weights, biases): + num_layers = len(weights) + 1 + H = X + for l in range(0, num_layers - 2): + W = weights[l] + b = biases[l] + H = tf.nn.tanh(tf.add(tf.matmul(H, W), b)) + W = weights[-1] + b = biases[-1] + Y = tf.add(tf.matmul(H, W), b) + return Y + + +################ Specify parameters and hyperparameters +### learning 1D Caputo derivative +m = 15 # length of u vector +d = 2 # dim of (y,alpha) + +### learning 2D fractional Laplacian +# m = 225 # length of u vector +# d = 3 # dim of (x,y,alpha) + +batch_size = 100000 +num_epoch = 1000001 +print_skip = 100 +is_test = False +# is_test = True + +### 1D Caputo +layers_u = [m] + [40] * 3 +layers_y = [d] + [40] * 3 + +### 2D fractional Laplacian +# layers_u = [m] + [60]*3 +# layers_y = [d] + [60]*3 + +store_path = "./saved_model/" + +################################# buidling ONet + +L_u = len(layers_u) +L_y = len(layers_y) + +b0 = tf.Variable(0.0, name="b0", dtype=tf.float32) + +weights_u = [ + tf.Variable( + xavier_init([layers_u[l], layers_u[l + 1]]), + name="weights_u" + str(l), + dtype=tf.float32, + ) + for l in range(0, L_u - 1) +] +biases_u = [ + tf.Variable( + tf.zeros((1, layers_u[l + 1]), dtype=tf.float32, name="biases_u" + str(l)), + dtype=tf.float32, + ) + for l in range(0, L_u - 1) +] + +weights_y = [ + tf.Variable( + xavier_init([layers_y[l], layers_y[l + 1]]), + name="weights_y" + str(l), + dtype=tf.float32, + ) + for l in range(0, L_y - 1) +] +biases_y = [ + tf.Variable( + tf.zeros((1, layers_y[l + 1]), dtype=tf.float32, name="biases_y" + str(l)), + dtype=tf.float32, + ) + for l in range(0, L_y - 1) +] + +x_u = tf.placeholder(tf.float32, shape=(None, m)) +x_y = tf.placeholder(tf.float32, shape=(None, d)) +y = tf.placeholder(tf.float32, shape=(None, 1)) + +net_u = neural_net1(x_u, weights_u, biases_u) +net_y = neural_net2(x_y, weights_y, biases_y) + +net_o = tf.reduce_sum(net_u * net_y, axis=1, keepdims=True) + b0 + +saver = tf.train.Saver( + var_list=[weights_u[l] for l in range(L_u - 1)] + + [biases_u[l] for l in range(L_u - 1)] + + [weights_y[l] for l in range(L_y - 1)] + + [biases_y[l] for l in range(L_y - 1)] + + [b0] +) + +############ defining loss and optimizer + +loss = tf.reduce_mean(tf.square(net_o - y)) / tf.reduce_mean(tf.square(y)) +optimizer_Adam = tf.train.AdamOptimizer(1.0e-3) +# tt0 = time.time() +train_op_Adam = optimizer_Adam.minimize(loss) +# tt1 = time.time() +# print ('loss_graph CPU time: ', tt1-tt0) + +############ generating and loading training, validation, and test sets + +# if is_test == False: +# tt0 = time.time() +# ds.training_set(m, d, n_u, n_y) +# tt1 = time.time() +# print ('Generate training set CPU time: ', tt1-tt0) +# +# ds.test_set(m, d, n_y) +data_path = "data/" + +data = np.load(data_path + "train.npz") +X_u_train, X_y_train, Y_train = data["X_u_train"], data["X_y_train"], data["Y_train"] + +data = np.load(data_path + "test.npz") +X_u_test, X_y_test, Y_test = data["X_u_test"], data["X_y_test"], data["Y_test"] + +data = np.load(data_path + "test0.npz") +X_u_test0, X_y_test0, Y_test0 = data["X_u_test"], data["X_y_test"], data["Y_test"] + +# data = np.load("test_fabricated.npz") +# X_u_test, X_y_test, Y_test = data["X_u_test"], data["X_y_test"], data["Y_test"] + +# X_u_train = (X_u_train0 - np.mean(X_u_train0,axis=0,keepdims=True))/np.std(X_u_train0,axis=0, keepdims=True) +# X_y_train = (X_y_train0 - np.mean(X_y_train0,axis=0,keepdims=True))/np.std(X_y_train0,axis=0, keepdims=True) +# +# +# X_u_test = (X_u_test0- np.mean(X_u_train0,axis=0,keepdims=True))/np.std(X_u_train0,axis=0, keepdims=True) +# X_y_test = (X_y_test0 - np.mean(X_y_train0,axis=0,keepdims=True))/np.std(X_y_train0,axis=0, keepdims=True) + +################## Training, validating or test +loss_train_h = [] +loss_test_h = [] +loss_test0_h = [] + +i_h = [] + +if is_test == False: + tt0 = time.time() + min_loss = 1e16 + num_batch = X_u_train.shape[0] // batch_size + + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + # feed_train = {x_u: X_u_train, x_y: X_y_train, y: Y_train} + feed_test = {x_u: X_u_test, x_y: X_y_test, y: Y_test} + feed_test0 = {x_u: X_u_test0, x_y: X_y_test0, y: Y_test0} + + ind = np.arange(X_u_train.shape[0]) + for i in range(num_epoch): + np.random.shuffle(ind) + for j in range(num_batch): + feed_train_batch = { + x_u: X_u_train[ind[(j * batch_size) : ((j + 1) * batch_size)], 0:], + x_y: X_y_train[ind[(j * batch_size) : ((j + 1) * batch_size)], 0:], + y: Y_train[ind[(j * batch_size) : ((j + 1) * batch_size)], 0:], + } + if i % print_skip == 0 and j == num_batch - 1: + temp_loss = sess.run(loss, feed_train_batch) + + if temp_loss < min_loss: + save_path = saver.save(sess, store_path + "paras_NN.ckpt") + min_loss = temp_loss + loss_train = temp_loss + loss_test, Y_pred = sess.run([loss, net_o], feed_test) + loss_test0, Y_pred0 = sess.run([loss, net_o], feed_test0) + error = np.linalg.norm(Y_pred - Y_test) / np.linalg.norm(Y_test) + error0 = np.linalg.norm(Y_pred0 - Y_test0) / np.linalg.norm( + Y_test0 + ) + + loss_train_h.append(loss_train) + loss_test_h.append(loss_test) + loss_test0_h.append(loss_test0) + + i_h.append(np.float64(i)) + + fig = plt.figure() + losst = np.stack(loss_train_h) + lossv = np.stack(loss_test_h) + lossv0 = np.stack(loss_test0_h) + + ii = np.stack(i_h) + plt.semilogy(ii, losst, "r", label="Training loss") + plt.semilogy(ii, lossv, "b", label="Test loss") + plt.semilogy(ii, lossv0, "b", label="Test loss0") + + plt.xlabel("Number of epochs") + plt.ylabel("Loss") + plt.title("Training and test") + plt.legend() + plt.savefig(store_path + "Training_test0.png", dpi=300) + plt.tight_layout() + plt.close(fig) + + fig = plt.figure() + losst = np.stack(loss_train_h) + lossv = np.stack(loss_test_h) + lossv0 = np.stack(loss_test0_h) + + ii = np.stack(i_h) + plt.semilogy(ii, losst, "r", label="Training loss") + plt.semilogy(ii, lossv, "b", label="Test loss") + + plt.xlabel("Number of epochs") + plt.ylabel("Loss") + plt.title("Training and test") + plt.legend() + plt.savefig(store_path + "Training_test.png", dpi=300) + plt.tight_layout() + plt.close(fig) + + with open(store_path + "training_validation.txt", "a") as f: + f.write( + "Epoch: " + + str(i + 1) + + " Training loss: " + + str(loss_train) + + " Test loss: " + + str(loss_test) + + " Test loss0: " + + str(loss_test0) + + " RelErr: " + + str(error) + + "\n\n" + ) + print( + "\n", + "Epoch: ", + i + 1, + "Training loss: ", + loss_train, + "Test loss: ", + loss_test, + "Test loss0: ", + loss_test0, + "Rel_Err: ", + error, + ) + + np.savetxt(store_path + "loss_train.txt", losst) + np.savetxt(store_path + "loss_test.txt", lossv) + np.savetxt(store_path + "loss-test0.txt", lossv0) + np.savetxt(store_path + "ii.txt", ii) + + sess.run(train_op_Adam, feed_train_batch) + + tt1 = time.time() + + print("Training and validation CPU time: ", tt1 - tt0) + +else: + tt0 = time.time() + + with tf.Session() as sess: + saver.restore(sess, store_path + "paras_NN.ckpt") + feed_test = {x_u: X_u_test, x_y: X_y_test, y: Y_test} + feed_test0 = {x_u: X_u_test0, x_y: X_y_test0, y: Y_test0} + + # feed_train = {x_u: X_u_train, x_y: X_y_train, y: Y_train} + feed_valid = {x_u: X_u_test, x_y: X_y_test, y: Y_test} + # train_loss = sess.run(loss, feed_train) + valid_loss = sess.run(loss, feed_valid) + test_loss, Y_pred = sess.run([loss, net_o], feed_test) + test_loss0, Y_pred0 = sess.run([loss, net_o], feed_test0) + + test_err = np.linalg.norm(Y_pred - Y_test) / np.linalg.norm(Y_test) + test_err0 = np.linalg.norm(Y_pred0 - Y_test0) / np.linalg.norm(Y_test0) + + with open(store_path + "test.txt", "a") as f: + f.write( + " Validation loss: " + + str(valid_loss) + + " Test loss: " + + str(test_loss) + + " Test loss0: " + + str(test_loss0) + + " RelErr: " + + str(test_err) + + "\n\n" + ) + + print( + "Valid_loss: ", + valid_loss, + "Test_loss: ", + test_loss, + "test rel_Err: ", + test_err, + "Test_loss0: ", + test_loss0, + "test rel_Err0: ", + test_err0, + ) + + # np.savetxt('Y_pred.txt', Y_pred) + fig = plt.figure() + plt.plot(Y_pred, Y_test, "r.", Y_test, Y_test, "b:") + plt.savefig(store_path + "prediction.png", dpi=300) + plt.close(fig) + # rr = X_y_test[:100,0].reshape((10,10)) + # tt = X_y_test[:100,1].reshape((10,10)) + + # fig = plt.figure() + # plt.subplot(121) + # plt.contourf(rr*np.cos(tt), rr*np.sin(tt), Y_pred[:100].reshape(rr.shape),100,cmap='jet') + # plt.colorbar() + # plt.subplot(122) + # plt.contourf(rr*np.cos(tt), rr*np.sin(tt), Y_test[:100].reshape(rr.shape),100,cmap='jet') + # plt.colorbar() + # plt.title(r'$\alpha= $'+str(X_y_test[0,-1])) + # plt.tight_layout() + # plt.savefig(store_path+'prediction1_fabricated.png',dpi=300) + # plt.close(fig) + + # fig = plt.figure() + # plt.plot(X_y_test[0:9,0:1].flatten(), Y_pred[0:9,0:1].flatten(),'r',label='pred: '+r'$G\{u\}(y,0.01)$') + # plt.plot(X_y_test[0:9,0:1].flatten(), Y_test[0:9,0:1].flatten(),'b',label='test: '+r'$\frac{d^{0.01}u}{dy^0.01}(y)$') + # plt.title('Prediction ' +r' $G\{u\}(y,\alpha=0.01)\approx \frac{d^{0.01}u}{dy^0.01}(y)$') + # plt.xlabel('y') + # plt.ylabel(r'$G\{u\}(y,\alpha)$') + # plt.tight_layout() + # plt.legend() + # plt.savefig(store_path+'prediction1.png',dpi=500) + # + # fig = plt.figure() + # plt.plot(X_y_test[81:,0:1].flatten(), Y_pred[81:,0:1].flatten(),'r',label='pred: '+r'$G\{u\}(y,0.99)$') + # plt.plot(X_y_test[81:,0:1].flatten(), Y_test[81:,0:1].flatten(),'b',label='test: '+r'$\frac{d^{0.99}u}{dy^0.99}(y)$') + # plt.title('Prediction ' +r' $G\{u\}(y,\alpha=0.99) \approx \frac{d^{0.99}u}{dy^0.99}(y)$') + # plt.xlabel('y') + # plt.ylabel(r'$G\{u\}(y,\alpha)$') + # plt.tight_layout() + # plt.legend() + # plt.savefig(store_path+'prediction2.png',dpi=500) + # + # plt.show() + # plt.close(fig) + tt1 = time.time() + print("Test CPU time: ", tt1 - tt0) diff --git a/fractional/Fractional_Lap_2D.m b/fractional/Fractional_Lap_2D.m new file mode 100644 index 0000000..a66625c --- /dev/null +++ b/fractional/Fractional_Lap_2D.m @@ -0,0 +1,6 @@ +tic +space_paras = [14, 2]; +space_type = 'Orthogonal'; +training_set(5000, 15, 15, 10, '2D_fLap_disk', space_type, space_paras); +test_set(5000, 15, 15, 10, '2D_fLap_disk', space_type, space_paras); +toc diff --git a/fractional/GJ_generate.m b/fractional/GJ_generate.m new file mode 100644 index 0000000..810fcae --- /dev/null +++ b/fractional/GJ_generate.m @@ -0,0 +1,42 @@ +function [point,weight]=GJ_generate(N,varargin) +%GJ_GENERATE Gauss-Jacobi quadrature points and weights. +% [POINT,WEIGHT]=GJ_GENERATE(N) returns N points and N corresponding +% weights of Gauss-Legendre quadrature rules. + +% [POINT,WEIGHT]=GJ_GENERATE(N,miu,lamed) returns the points and weights +% of Gauss-Jacobi rules with double-precision parameters miu and lamed. + +% See also ROOTSOFJACOBI, GJL_GENERATE. +% Ref: G.H. Golub and J.H. Welsch, " Calculation of Gass quadrature", +% 1967 +% G.F. Pang, 27/08/2012 + + if abs(round(N)-N)>eps + error('**********N must be an integer*************') + end + if length(varargin)==2 && strcmp(class(varargin{1}),'double') && strcmp(class(varargin{2}),'double') + miu=varargin{1}; + lamed=varargin{2}; + if miu<=-1.0 || lamed<=-1.0 + error ('***********Miu and lamed must be in (-1,inf)***********') + end + elseif isempty(varargin) + % Default case: Gauss-Legendre rule. + miu=0.; + lamed=miu; + else + error('***********Imporper arguments!************') + end + + % Quadrature points. + [x_nodes,pp,T]=rootsofjacobi(N,miu,lamed); + point=x_nodes; + + % Weights. + [V,pp]=eig(T); + miu0=2^(miu+lamed+1)*gamma(miu+1)*gamma(lamed+1)/gamma(miu+lamed+2); + w=zeros(N,1); + for i=1:N + w(i)=miu0*V(1,i)^2; + end + weight=w; \ No newline at end of file diff --git a/fractional/Orthogonal_polynomials.m b/fractional/Orthogonal_polynomials.m new file mode 100644 index 0000000..7b63f3a --- /dev/null +++ b/fractional/Orthogonal_polynomials.m @@ -0,0 +1,14 @@ +function Phi = Orthogonal_polynomials( Loc_u, degree, domain_type) + + if strcmp(domain_type, 'unit_interval') + syms x + expr = legendreP(0:degree, x); % Legendre polynomial +% expr =(1 + x).^0.5.* jacobiP(0:degree, -0.5, 0.5, x); % poly-fractonomial + + x = Loc_u; + Phi = double(subs(expr)); + + elseif strcmp(domain_type, 'unit_disk') + Phi = Zp(Loc_u(:,1), Loc_u(:,2), degree); % Loc_u 's in polar coordinate system + end + \ No newline at end of file diff --git a/fractional/Zp.m b/fractional/Zp.m new file mode 100644 index 0000000..8084f9a --- /dev/null +++ b/fractional/Zp.m @@ -0,0 +1,60 @@ +function y = Zp(r, t, max_index) +% single-indexed Zernike polynomials according to table in Wiki +if max_index == 14 + N = size(r,1); + y = zeros(N, max_index+1); + y(:,1) = ones(N,1); + y(:,2) = 2*r.*sin(t); + y(:,3) = 2*r.*cos(t); + y(:,4) = 6^0.5 * r.^2.*sin(2*t); + y(:,5) = 3^0.5*(2*r.^2-1); + y(:,6) =6^0.5*r.^2.*cos(2*t); + y(:,7) = 8^0.5*r.^3.*sin(3*t); + y(:,8) = 8^0.5*(3*r.^3-2*r).*sin(t); + y(:,9) = 8^0.5*(3*r.^3-2*r).*cos(t); + y(:,10) = 8^0.5*r.^3.*cos(3*t); + y(:,11) = 10^0.5*r.^4.*sin(4*t); + y(:,12) = 10^0.5*(4*r.^4-3*r.^2).*sin(2*t); + y(:,13) = 5^0.5*(6*r.^4-6*r.^2+1); + y(:,14) = 10^0.5*(4*r.^4-3*r.^2).*cos(2*t); + y(:,15)=10^0.5*r.^4.*cos(4*t); +elseif max_index == 35 + N = size(r,1); + y = zeros(N, max_index+1); + y(:,1) = ones(N,1); + y(:,2) = 2*r.*sin(t); + y(:,3) = 2*r.*cos(t); + y(:,4) = 6^0.5 * r.^2.*sin(2*t); + y(:,5) = 3^0.5*(2*r.^2-1); + y(:,6) =6^0.5*r.^2.*cos(2*t); + y(:,7) = 8^0.5*r.^3.*sin(3*t); + y(:,8) = 8^0.5*(3*r.^3-2*r).*sin(t); + y(:,9) = 8^0.5*(3*r.^3-2*r).*cos(t); + y(:,10) = 8^0.5*r.^3.*cos(3*t); + y(:,11) = 10^0.5*r.^4.*sin(4*t); + y(:,12) = 10^0.5*(4*r.^4-3*r.^2).*sin(2*t); + y(:,13) = 5^0.5*(6*r.^4-6*r.^2+1); + y(:,14) = 10^0.5*(4*r.^4-3*r.^2).*cos(2*t); + y(:,15)=10^0.5*r.^4.*cos(4*t); + y(:,16)=12^0.5*r.^5.*sin(5*t); + y(:,17)=12^0.5*(5*r.^5-4*r.^3).*sin(3*t); + y(:,18)=12^0.5*(10*r.^5-12*r.^3+3*r).*sin(t); + y(:,19)=12^0.5*(10*r.^5-12*r.^3+3*r).*cos(t); + y(:,20)=12^0.5*(5*r.^5-4*r.^3).*cos(3*t); + y(:,21)=12^0.5*r.^5.*cos(5*t); + y(:,22)=14^0.5*r.^6.*sin(6*t); + y(:,23)=14^0.5*(6*r.^6-5*r.^4).*sin(4*t); + y(:,24)=14^0.5*(15*r.^6-20*r.^4+6*r.^2).*sin(2*t); + y(:,25)=7^0.5*(20*r.^6-30*r.^4+12*r.^2-1); + y(:,26)=14^0.5*(15*r.^6-20*r.^4+6*r.^2).*cos(2*t); + y(:,27)=14^0.5*(6*r.^6-5*r.^4).*cos(4*t); + y(:,28)=14^0.5*(r.^6.*cos(6*t)); + y(:,29)=4*r.^7.*sin(7*t); + y(:,30)=4*(7*r.^7-6*r.^5).*sin(5*t); + y(:,31)=4*(21*r.^7-30*r.^5+10*r.^3).*sin(3*t); + y(:,32)=4*(35*r.^7-60*r.^5+30*r.^3-4*r).*sin(t); + y(:,33)=4*(35*r.^7-60*r.^5+30*r.^3-4*r).*cos(t); + y(:,34)=4*(21*r.^7-30*r.^5+10*r.^3).*cos(3*t); + y(:,35)=4*(7*r.^7-6*r.^5).*cos(5*t); + y(:,36)=4*r.^7.*cos(7*t); +end diff --git a/fractional/datasets.py b/fractional/datasets.py new file mode 100644 index 0000000..8d0adaa --- /dev/null +++ b/fractional/datasets.py @@ -0,0 +1,139 @@ +import numpy as np + + +training_Lu = np.float32(np.loadtxt("training_Lu.txt")) +training_u = np.float32(np.loadtxt("training_u.txt")) +training_y = np.float32(np.loadtxt("training_y.txt")) +training_alpha = np.float32(np.loadtxt("training_alpha.txt")) + +test_Lu = np.float32(np.loadtxt("test_Lu.txt")) +test_u = np.float32(np.loadtxt("test_u.txt")) +test_y = np.float32(np.loadtxt("test_y.txt")) +test_alpha = np.float32(np.loadtxt("test_alpha.txt")) + +test_Lu0 = np.float32(np.loadtxt("test_Lu0.txt")) +test_u0 = np.float32(np.loadtxt("test_u0.txt")) +test_y0 = np.float32(np.loadtxt("test_y0.txt")) +test_alpha0 = np.float32(np.loadtxt("test_alpha0.txt")) + +if len(training_y.shape) == 1: + training_y = training_y.reshape((-1, 1)) +if len(test_y.shape) == 1: + test_y = test_y.reshape((-1, 1)) +if len(training_alpha.shape) == 1: + training_alpha = training_alpha.reshape((-1, 1)) +if len(test_alpha.shape) == 1: + test_alpha = test_alpha.reshape((-1, 1)) +if len(test_y0.shape) == 1: + test_y0 = test_y0.reshape((-1, 1)) +if len(test_alpha0.shape) == 1: + test_alpha0 = test_alpha0.reshape((-1, 1)) +if len(test_u0.shape) == 1: + test_u0 = test_u0.reshape((-1, 1)) +if len(test_alpha0.shape) == 0: + test_alpha0 = np.array([[test_alpha0]], dtype=np.float32) +if len(test_Lu0.shape) == 1: + test_Lu0 = test_Lu0.reshape((-1, 1)) +# test_frac_Lap = np.float32(np.loadtxt('test_frac_Lap0.txt')) +# test_u = np.float32(np.loadtxt('test_u0.txt').reshape((-1,1))) +# print(test_u.shape) +# test_r_y = np.float32(np.loadtxt('test_r_y0.txt')).T +# test_t_y = np.float32(np.loadtxt('test_t_y0.txt')).T +# test_alpha = np.float32(np.loadtxt('test_alpha0.txt')) + +# training_y = np.concatenate((training_r_y.reshape((-1,1)), training_t_y.reshape((-1,1))),axis=1) +# test_y = np.concatenate((test_r_y.reshape((-1,1)), test_t_y.reshape((-1,1))),axis=1) + +N_X = training_u.shape[0] +N_U = training_u.shape[1] +N_Y = training_y.shape[0] +N_A = training_alpha.shape[0] + +n_x = test_u.shape[0] +n_u = test_u.shape[1] +n_y = test_y.shape[0] +n_a = test_alpha.shape[0] +# print(n_a, n_u) +n_x0 = test_u0.shape[0] +n_u0 = test_u0.shape[1] +n_y0 = test_y0.shape[0] +n_a0 = test_alpha0.shape[0] + +d = training_y.shape[1] + training_alpha.shape[1] + +counter = 0 +X_u_train = np.zeros((N_U * N_A * N_Y, N_X), dtype=np.float32) +X_y_train = np.zeros((N_U * N_A * N_Y, d), dtype=np.float32) +Y_train = np.zeros((N_U * N_A * N_Y, 1), dtype=np.float32) + +for i in range(N_A): + for j in range(N_U): + u_sample = training_u[:, j : (j + 1)].T + alpha_sample = np.array([[training_alpha[i, 0]]], dtype=np.float32) + U = np.tile(u_sample, (N_Y, 1)) + A = np.tile(alpha_sample, (N_Y, 1)) + index = i * N_U + j + X_u_train[(counter * N_Y) : ((counter + 1) * N_Y), :] = U + X_y_train[(counter * N_Y) : ((counter + 1) * N_Y), :] = np.concatenate( + (training_y, A), axis=1 + ) + Y_train[(counter * N_Y) : ((counter + 1) * N_Y), 0:1] = training_Lu[ + :, index : (index + 1) + ] + counter = counter + 1 + +counter = 0 +X_u_test = np.zeros((n_u * n_a * n_y, n_x), dtype=np.float32) +X_y_test = np.zeros((n_u * n_a * n_y, d), dtype=np.float32) +Y_test = np.zeros((n_u * n_a * n_y, 1), dtype=np.float32) + +for i in range(n_a): + for j in range(n_u): + u_sample = test_u[:, j : (j + 1)].T + alpha_sample = np.array([[test_alpha[i, 0]]], dtype=np.float32) + U = np.tile(u_sample, (n_y, 1)) + A = np.tile(alpha_sample, (n_y, 1)) + index = i * n_u + j + X_u_test[(counter * n_y) : ((counter + 1) * n_y), :] = U + X_y_test[(counter * n_y) : ((counter + 1) * n_y), :] = np.concatenate( + (test_y, A), axis=1 + ) + Y_test[(counter * n_y) : ((counter + 1) * n_y), 0:1] = test_Lu[ + :, index : (index + 1) + ] + counter = counter + 1 + +counter = 0 +X_u_test0 = np.zeros((n_u0 * n_a0 * n_y0, n_x), dtype=np.float32) +X_y_test0 = np.zeros((n_u0 * n_a0 * n_y0, d), dtype=np.float32) +Y_test0 = np.zeros((n_u0 * n_a0 * n_y0, 1), dtype=np.float32) + +for i in range(n_a0): + for j in range(n_u0): + u_sample0 = test_u0[:, j : (j + 1)].T + alpha_sample0 = np.array([[test_alpha0[i, 0]]], dtype=np.float32) + U0 = np.tile(u_sample0, (n_y0, 1)) + A0 = np.tile(alpha_sample0, (n_y0, 1)) + index = i * n_u0 + j + X_u_test0[(counter * n_y0) : ((counter + 1) * n_y0), :] = U0 + X_y_test0[(counter * n_y0) : ((counter + 1) * n_y0), :] = np.concatenate( + (test_y0, A0), axis=1 + ) + Y_test0[(counter * n_y0) : ((counter + 1) * n_y0), 0:1] = test_Lu0[ + :, index : (index + 1) + ] + counter = counter + 1 + +# X_u_train = np.loadtxt('X_u_train.txt') +# X_y_train = np.loadtxt('X_y_train.txt') +# Y_train = np.loadtxt('Y_train.txt') +data_path = "data/" +np.savez_compressed( + data_path + "train.npz", X_u_train=X_u_train, X_y_train=X_y_train, Y_train=Y_train +) +np.savez_compressed( + data_path + "test.npz", X_u_test=X_u_test, X_y_test=X_y_test, Y_test=Y_test +) +np.savez_compressed( + data_path + "test0.npz", X_u_test=X_u_test0, X_y_test=X_y_test0, Y_test=Y_test0 +) diff --git a/fractional/matrix_for_frac_Lap.m b/fractional/matrix_for_frac_Lap.m new file mode 100644 index 0000000..fd506a6 --- /dev/null +++ b/fractional/matrix_for_frac_Lap.m @@ -0,0 +1,91 @@ +function [pts_cell0, coeff_mat]=matrix_for_frac_Lap(y,alpha) + +M = size(y,1); +M_theta = 16; +[KESI,W]=GJ_generate(M_theta,0,0); + +lam_N = 8000; +index = (0:lam_N)'; +gk=wk(alpha,lam_N+10); +h = 1.0e-3; +pts_cell = cell(1,M); +coeff_cell = cell(1,M); + +parfor i=1:M + x0=y(i,1); + y0=y(i,2); + theta1=pi+pi*KESI; + W1=pi*W; + + vec = [x0-index*h*cos(theta1(1)) y0-index*h*sin(theta1(1))]; + id = find(sum(vec.^2,2)>1); + id0 = id(1)-1; + vec0 = vec(1:id0,:); + coeff0 = W1(1)*h^(-alpha)* gk(1:id0); + + + for k2=2:M_theta + vec = [x0-index*h*cos(theta1(k2)) y0-index*h*sin(theta1(k2))]; + id = find(sum(vec.^2,2)>1); + id0 = id(1)-1; + vec0 = [vec0; vec(1:id0,:)]; + coeff = W1(k2)*h^(-alpha)*gk(1:id0); + coeff0 = [coeff0; coeff]; + +% [t_vec, r_vec] = cart2pol(x_vec, y_vec); +% v = fun(r_vec,t_vec,coeff); +% s1(k2)=h^(-alpha)*(gk(1:id0)'*v); + end + + pts_cell{i} = vec0; + coeff_cell{i} = coeff0; +end + +max_size = 0; +for i=1:M + temp = size(pts_cell{i},1); + if temp>max_size + max_size = temp; + end +end + +coeff_mat = zeros(M, max_size); +pts_cell0 = cell(1,M); +for i =1:M + len = size(coeff_cell{i},1); + coeff_mat(i,1:len)=coeff_cell{i}; + pts_cell0{i} = [pts_cell{i}; zeros(max_size-len,2)]; +end + +% u_aux = zeros(max_size, M); +% for i=1:M +% len = size(pts_cell{i},1); +% u_aux(1:len,i) = u_exact(pts_cell{i},alpha); +% end +ratio=gamma((1-alpha)/2)*gamma((2+alpha)/2)/sqrt(pi)/2/pi; +coeff_mat = coeff_mat * ratio; +% f_appr = ratio*diag(coeff_mat*u_aux); +% +% f_exa = f_sour(y, alpha); +% +% err = norm(f_appr-f_exa)/norm(f_exa) + + +function y=wk(q,K) + y=zeros(K+1,1); + y(1)=1; + + parfor kk=1:K + b=1:kk; + a=-q+b-1; + y(kk+1)=prod(a./b); + end + + + function y=f_sour(x,alpha) + d=2; + y=2^alpha*gamma(alpha/2+2)*gamma((d+alpha)/2)*gamma(d/2)^(-1)*(1-(1+alpha/d)*(x(:,1).^2+x(:,2).^2)); + + + function y=u_exact(x,alpha) + y=(1-x(:,1).^2-x(:,2).^2+1.0e-15).^(1+alpha/2); \ No newline at end of file diff --git a/fractional/operator_Orthogonal_polynomials.m b/fractional/operator_Orthogonal_polynomials.m new file mode 100644 index 0000000..f588cb2 --- /dev/null +++ b/fractional/operator_Orthogonal_polynomials.m @@ -0,0 +1,37 @@ +function L_Phi = operator_Orthogonal_polynomials( Loc_u, operator_type, operator_paras, degree) + + if strcmp(operator_type, '1D_Caputo') + alpha = operator_paras(1); + M = size(Loc_u,1); + N = 1000; + pt_cell = cell(1,M); + coeff_mat = zeros(M, N+1); + + for i = 1:M + pt_cell{i}= linspace(-1, Loc_u(i),N+1)'; + coeff = zeros(1,N+1); + h = (Loc_u(i)+1)/N; + index = 0:(N-1); + cc = (N-index).^(1-alpha)-(N-index-1).^(1-alpha); + coeff (2:(N+1)) = cc; + coeff(1:N)=coeff(1:N) - cc; + coeff_mat(i,:) = coeff * h^(-alpha)/gamma(2-alpha); + end + L_Phi = zeros(M, degree+1); + for i = 1:M + Phi = Orthogonal_polynomials(pt_cell{i}, degree, 'unit_interval'); + L_Phi(i,:) = coeff_mat(i,:) * Phi; + end + + elseif strcmp(operator_type, '2D_fLap_disk') + alpha = operator_paras(1); + M = size(Loc_u,1); + [pt_cell, coeff_mat]=matrix_for_frac_Lap(Loc_u, alpha); + L_Phi = zeros(M, degree+1); + for i = 1:M + [t0,r0]=cart2pol(pt_cell{i}(:,1), pt_cell{i}(:,2)); + Phi = Orthogonal_polynomials([r0,t0], degree, 'unit_disk'); + L_Phi(i,:) = coeff_mat(i,:) * Phi; + end + end + \ No newline at end of file diff --git a/fractional/rootsofjacobi.m b/fractional/rootsofjacobi.m new file mode 100644 index 0000000..882e4c7 --- /dev/null +++ b/fractional/rootsofjacobi.m @@ -0,0 +1,140 @@ +function [root, poly,Tri_mat]=rootsofjacobi(N,varargin) +% ROOTSOFJACOBI Zeros of Jacobi polynomials, polynomial coefficients +% and the tridiagonal matrix corresponding to +% the three-term recursive relation of Jacobi polynomials. +% ROOT=ROOTSOFJACOBI(N,'Legendre') returns the zeros of Legendre +% polynomial of degree N>=2. +% +% ROOT=ROOTSOFJACOBI(N,'Chebyshev1') returns the zeros of Chebyshev +% polynomial of the first kind. +% +% ROOT=ROOTSOFJACOBI(N,'Chebyshev2') returns the zeros of Chebyshev +% polynomial of the second kind. +% +% ROOT=ROOTSOFJACOBI(N,miu,lamed) returns the zeros of Jacobi polynomial +% with double-precision parameters miu and lamed both larger than -1.0. +% +% [ROOT,POLY]=ROOTSOFJACOBI(...) returns the polynomial coefficients with +% the leading coefficient in POLY(1). +% +% [ROOT,POLY,TRI_MAT]=ROOTSOFJACOBI(...) returns the tridiagonal matrix +% that corresponds to the three-term recursive relation of Jacobi +% polynomials. + +% See also GJ_GENERATE, GJL_GENERATE. +% Ref: G.H. Golub and J.H. Welsch, " Calculation of Gass quadrature", +% 1967. +% G.F. Pang, 27/08/2012 + + +if N<1 || abs(round(N)-N)>eps + error('************N must be a positive integer************') +end + + +if length(varargin)==2 && strcmp(class(varargin{1}),'double') && strcmp(class(varargin{2}),'double') + miu=varargin{1}; + lamed=varargin{2}; + C=ones(1,N+1); + if miu<=-1.0 || lamed<=-1.0 + error ('***********Miu and lamed must be in (-1,inf)***********') + end +elseif length(varargin)==1 && ischar(class(varargin)) + switch varargin{:} + case 'Legendre' + miu=0.; + lamed=miu; + % C is a constant factor relating Jacobi polynomials to some special + % orthogonal polynomials such as Legendre and Chebyshev + % polynomials. + C=ones(1,N+1); + case 'Chebyshev1' + miu=-0.5; + lamed=miu; + C=[1 2.^(2:2:2*N).*beta(2:(N+1),2:(N+1)).*(2*(1:N)+1)]; + case 'Chebyshev2' + miu=0.5; + lamed=0.5; + C=[1 2.^(2:2:2*N).*beta(3:(N+2),2:(N+1)).*(2*(1:N)+2)]; + otherwise + error(' Polynomial type must be in {Legendre, Chebyshev1, Chebyshev2}') + end +else + error('***********imporper arguments!************') +end + + +format long + +coeff_matrix=zeros(N+1,N+1); +coeff_matrix(1,N+1)=1; +coeff_matrix(2,(end-1):end)=conv([an(1,miu,lamed),bn(1,miu,lamed)],coeff_matrix(1,N+1)); + + +% Generate the coefficients of Jacobi polynomials from degree 2 to N using +% three-term recursive relation. +for i=3:N+1 + coeff_matrix(i,(end-i+1):end)= ... + conv([an(i-1,miu,lamed),bn(i-1,miu,lamed)],coeff_matrix(i-1,(end-i+2):end)) ... + -[0 0 cn(i-1,miu,lamed)*coeff_matrix(i-2,(end-i+3):end)]; +end + + +% Transform Jacobi polynomials to Legendre/Chebyshev polynomials. +for j=1:N+1 + coeff_matrix(j,:)=coeff_matrix(j,:)*C(j); +end +poly=coeff_matrix(end,:); + + +% Generate the tridiagonal matrix, zeros and poly. +A=zeros(1,N); +B=zeros(1,N-1); + for n=1:N + A(n)=-bn(n,miu,lamed)/an(n,miu,lamed); + end + + for n=1:N-1 + B(n)=(cn(n+1,miu,lamed)/an(n,miu,lamed)/an(n+1,miu,lamed))^0.5; + end +Tri_mat=diag(A)+diag(B,1)+diag(B,-1); + +% Obtain zeros by computing the eigenvalues of matrix. +root=sort(eig(Tri_mat)); + + + +%---Coefficients in three-term recursive relation------------------------- + +function y=an(n,miu,lamed) + if abs(miu+lamed+1)=2 + y=(2*n-1)/n; + else + y=0.5; + end + else + y=(2*n+miu+lamed-1)*(2*n+miu+lamed)/(2*n*(n+miu+lamed)); + end + +%-------------------------------------------------------------------------- + +function y=bn(n,miu,lamed) + if abs(miu+lamed+1)=2 + y=(miu^2-lamed^2)/n/(2*n-3); + else + y=(miu^2-lamed^2)*(-0.5); + end + elseif abs(abs(miu)-abs(lamed)) 1 else [self.T] + def solve(y): + u = interpolate.interp1d(x, y, kind='cubic', copy=False, assume_sorted=True) + return solve_ivp(lambda t, y: self.g(y, u(t), t), [0, self.T], self.s0, 'RK45', interval, max_step=0.05).y[0] + return np.vstack(list(map(solve, gps))) + +class AntideData(ODEData): + '''Data for learning the antiderivative operator. + g(y,u,t)=u. + ''' + def __init__(self, T, s0, sensor_in, sensor_out, length_scale, train_num, test_num): + super(AntideData, self).__init__(T, s0, sensor_in, sensor_out, length_scale, train_num, test_num) + + def g(self, y, u, t): + return u + +class PendData(ODEData): + '''Data for learning the gravity pendulum. + g(y,u,t)=[y[1], -np.sin(s[0]) + u]. + ''' + def __init__(self, T, s0, sensor_in, sensor_out, length_scale, train_num, test_num): + super(PendData, self).__init__(T, s0, sensor_in, sensor_out, length_scale, train_num, test_num) + + def g(self, y, u, t): + return [y[1], -np.sin(y[0]) + u] \ No newline at end of file diff --git a/seq2seq/learner/__init__.py b/seq2seq/learner/__init__.py new file mode 100755 index 0000000..b47d84b --- /dev/null +++ b/seq2seq/learner/__init__.py @@ -0,0 +1,16 @@ +""" +@author: jpzxshi +""" +from . import nn +from . import integrator +from .brain import Brain +from .data import Data +from .nn import Module + +__all__ = [ + 'nn', + 'integrator', + 'Brain', + 'Data', + 'Module', +] \ No newline at end of file diff --git a/seq2seq/learner/brain.py b/seq2seq/learner/brain.py new file mode 100755 index 0000000..6c14d9f --- /dev/null +++ b/seq2seq/learner/brain.py @@ -0,0 +1,162 @@ +""" +@author: jpzxshi +""" +import os +import time +import numpy as np +import torch + +from .nn import LossNN +from .utils import timing, cross_entropy_loss + +class Brain: + '''Runner based on torch. + ''' + brain = None + + @classmethod + def Init(cls, data, net, criterion, optimizer, lr, iterations, batch_size=None, + print_every=1000, save=False, callback=None, dtype='float', device='cpu'): + cls.brain = cls(data, net, criterion, optimizer, lr, iterations, batch_size, + print_every, save, callback, dtype, device) + + @classmethod + def Run(cls): + cls.brain.run() + + @classmethod + def Restore(cls): + cls.brain.restore() + + @classmethod + def Output(cls, data=True, best_model=True, loss_history=True, info=None, path=None, **kwargs): + cls.brain.output(data, best_model, loss_history, info, path, **kwargs) + + @classmethod + def Loss_history(cls): + return cls.brain.loss_history + + @classmethod + def Encounter_nan(cls): + return cls.brain.encounter_nan + + @classmethod + def Best_model(cls): + return cls.brain.best_model + + def __init__(self, data, net, criterion, optimizer, lr, iterations, batch_size, + print_every, save, callback, dtype, device): + self.data = data + self.net = net + self.criterion = criterion + self.optimizer = optimizer + self.lr = lr + self.iterations = iterations + self.batch_size = batch_size + self.print_every = print_every + self.save = save + self.callback = callback + self.dtype = dtype + self.device = device + + self.loss_history = None + self.encounter_nan = False + self.best_model = None + + self.__optimizer = None + self.__criterion = None + + @timing + def run(self): + self.__init_brain() + print('Training...', flush=True) + loss_history = [] + for i in range(self.iterations + 1): + if self.batch_size is not None: + mask = np.random.choice(self.data.X_train.size(0), self.batch_size, replace=False) + loss = self.__criterion(self.net(self.data.X_train[mask]), self.data.y_train[mask]) + else: + loss = self.__criterion(self.net(self.data.X_train), self.data.y_train) + if i % self.print_every == 0 or i == self.iterations: + loss_test = self.__criterion(self.net(self.data.X_test), self.data.y_test) + loss_history.append([i, loss.item(), loss_test.item()]) + print('{:<9}Train loss: {:<25}Test loss: {:<25}'.format(i, loss.item(), loss_test.item()), flush=True) + if torch.any(torch.isnan(loss)): + self.encounter_nan = True + print('Encountering nan, stop training', flush=True) + return None + if self.save: + if not os.path.exists('model'): os.mkdir('model') + torch.save(self.net, 'model/model{}.pkl'.format(i)) + if self.callback is not None: + to_stop = self.callback(self.data, self.net) + if to_stop: break + if i < self.iterations: + self.__optimizer.zero_grad() + loss.backward() + self.__optimizer.step() + self.loss_history = np.array(loss_history) + print('Done!', flush=True) + return self.loss_history + + def restore(self): + if self.loss_history is not None and self.save == True: + best_loss_index = np.argmin(self.loss_history[:, 1]) + iteration = int(self.loss_history[best_loss_index, 0]) + loss_train = self.loss_history[best_loss_index, 1] + loss_test = self.loss_history[best_loss_index, 2] + print('Best model at iteration {}:'.format(iteration), flush=True) + print('Train loss:', loss_train, 'Test loss:', loss_test, flush=True) + self.best_model = torch.load('model/model{}.pkl'.format(iteration)) + else: + raise RuntimeError('restore before running or without saved models') + return self.best_model + + def output(self, data, best_model, loss_history, info, path, **kwargs): + if path is None: + path = './outputs/' + time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(time.time())) + if not os.path.isdir(path): os.makedirs(path) + if data: + np.save(path + '/X_train', self.data.X_train_np) + np.save(path + '/y_train', self.data.y_train_np) + np.save(path + '/X_test', self.data.X_test_np) + np.save(path + '/y_test', self.data.y_test_np) + if best_model: + torch.save(self.best_model, path + '/model_best.pkl') + if loss_history: + np.savetxt(path + '/loss.txt', self.loss_history) + if info is not None: + with open(path + '/info.txt', 'w') as f: + for item in info: + f.write('{}: {}\n'.format(item[0], str(item[1]))) + for key, arg in kwargs.items(): + np.savetxt(path + '/' + key + '.txt', arg) + + def __init_brain(self): + self.loss_history = None + self.encounter_nan = False + self.best_model = None + self.data.device = self.device + self.data.dtype = self.dtype + self.net.device = self.device + self.net.dtype = self.dtype + self.__init_optimizer() + self.__init_criterion() + + def __init_optimizer(self): + if self.optimizer == 'adam': + self.__optimizer = torch.optim.Adam(self.net.parameters(), lr=self.lr) + else: + raise NotImplementedError + + def __init_criterion(self): + if isinstance(self.net, LossNN): + self.__criterion = self.net.criterion + if self.criterion is not None: + raise Warning('loss-oriented neural network has already implemented its loss function') + elif self.criterion == 'MSE': + self.__criterion = torch.nn.MSELoss() + elif self.criterion == 'CrossEntropy': + self.__criterion = cross_entropy_loss + else: + raise NotImplementedError \ No newline at end of file diff --git a/seq2seq/learner/data/__init__.py b/seq2seq/learner/data/__init__.py new file mode 100755 index 0000000..1cd12f7 --- /dev/null +++ b/seq2seq/learner/data/__init__.py @@ -0,0 +1,8 @@ +""" +@author: jpzxshi +""" +from .data import Data + +__all__ = [ + 'Data', +] diff --git a/seq2seq/learner/data/data.py b/seq2seq/learner/data/data.py new file mode 100755 index 0000000..568912d --- /dev/null +++ b/seq2seq/learner/data/data.py @@ -0,0 +1,126 @@ +""" +@author: jpzxshi +""" +import numpy as np +import torch + +class Data: + '''Standard data format. + ''' + def __init__(self): + self.X_train = None + self.y_train = None + self.X_test = None + self.y_test = None + + self.__device = None + self.__dtype = None + + @property + def device(self): + return self.__device + + @property + def dtype(self): + return self.__dtype + + @device.setter + def device(self, d): + if d == 'cpu': + self.__to_cpu() + elif d == 'gpu': + self.__to_gpu() + else: + raise ValueError + self.__device = d + + @dtype.setter + def dtype(self, d): + if d == 'float': + self.__to_float() + elif d == 'double': + self.__to_double() + else: + raise ValueError + self.__dtype = d + + @property + def Device(self): + if self.__device == 'cpu': + return torch.device('cpu') + elif self.__device == 'gpu': + return torch.device('cuda') + + @property + def Dtype(self): + if self.__dtype == 'float': + return torch.float32 + elif self.__dtype == 'double': + return torch.float64 + + @property + def dim(self): + if isinstance(self.X_train, np.ndarray): + return self.X_train.shape[-1] + elif isinstance(self.X_train, torch.Tensor): + return self.X_train.size(-1) + + @property + def K(self): + if isinstance(self.y_train, np.ndarray): + return self.y_train.shape[-1] + elif isinstance(self.y_train, torch.Tensor): + return self.y_train.size(-1) + + @property + def X_train_np(self): + return Data.to_np(self.X_train) + + @property + def y_train_np(self): + return Data.to_np(self.y_train) + + @property + def X_test_np(self): + return Data.to_np(self.X_test) + + @property + def y_test_np(self): + return Data.to_np(self.y_test) + + @staticmethod + def to_np(d): + if isinstance(d, np.ndarray) or d is None: + return d + elif isinstance(d, torch.Tensor): + return d.cpu().detach().numpy() + else: + raise ValueError + + def __to_cpu(self): + for d in ['X_train', 'y_train', 'X_test', 'y_test']: + if isinstance(getattr(self, d), np.ndarray): + setattr(self, d, torch.DoubleTensor(getattr(self, d))) + elif isinstance(getattr(self, d), torch.Tensor): + setattr(self, d, getattr(self, d).cpu()) + + def __to_gpu(self): + for d in ['X_train', 'y_train', 'X_test', 'y_test']: + if isinstance(getattr(self, d), np.ndarray): + setattr(self, d, torch.cuda.DoubleTensor(getattr(self, d))) + elif isinstance(getattr(self, d), torch.Tensor): + setattr(self, d, getattr(self, d).cuda()) + + def __to_float(self): + if self.device is None: + raise RuntimeError('device is not set') + for d in ['X_train', 'y_train', 'X_test', 'y_test']: + if isinstance(getattr(self, d), torch.Tensor): + setattr(self, d, getattr(self, d).float()) + + def __to_double(self): + if self.device is None: + raise RuntimeError('device is not set') + for d in ['X_train', 'y_train', 'X_test', 'y_test']: + if isinstance(getattr(self, d), torch.Tensor): + setattr(self, d, getattr(self, d).double()) \ No newline at end of file diff --git a/seq2seq/learner/integrator/__init__.py b/seq2seq/learner/integrator/__init__.py new file mode 100755 index 0000000..10812fc --- /dev/null +++ b/seq2seq/learner/integrator/__init__.py @@ -0,0 +1,8 @@ +""" +@author: jpzxshi +""" +from . import hamiltonian + +__all__ = [ + 'hamiltonian', +] diff --git a/seq2seq/learner/integrator/hamiltonian/__init__.py b/seq2seq/learner/integrator/hamiltonian/__init__.py new file mode 100755 index 0000000..1a8816a --- /dev/null +++ b/seq2seq/learner/integrator/hamiltonian/__init__.py @@ -0,0 +1,8 @@ +""" +@author: jpzxshi +""" +from .stormer_verlet import SV + +__all__ = [ + 'SV', +] \ No newline at end of file diff --git a/seq2seq/learner/integrator/hamiltonian/stormer_verlet.py b/seq2seq/learner/integrator/hamiltonian/stormer_verlet.py new file mode 100755 index 0000000..f7e9684 --- /dev/null +++ b/seq2seq/learner/integrator/hamiltonian/stormer_verlet.py @@ -0,0 +1,92 @@ +""" +@author: jpzxshi +""" +import numpy as np +import torch + +from ...utils import grad + +class SV: + '''Stormer-Verlet scheme. + ''' + def __init__(self, H, dH, iterations=10, order=4, N=1): + ''' + H: H(x) or None + dH: dp,dq=dH(p,q) or None + ``iterations`` is encouraged to be 1 if H is separable. + ''' + self.H = H + self.dH = dH + self.iterations = iterations + self.order = order + self.N = N + + def __sv2(self, x, h): + '''Order 2. + x: np.ndarray or torch.Tensor of shape [dim] or [num, dim]. + h: int + ''' + dim = x.shape[-1] if isinstance(x, np.ndarray) else x.size(-1) + d = int(dim / 2) + p0, q0 = (x[..., :d], x[..., d:]) + p1, q1 = p0, q0 + if callable(self.dH): + for _ in range(self.iterations): + p1 = p0 - h / 2 * self.dH(p1, q0)[1] + q1 = q0 + h / 2 * self.dH(p1, q0)[0] + p2, q2 = p1, q1 + for _ in range(self.iterations): + q2 = q1 + h / 2 * self.dH(p1, q2)[0] + p2 = p1 - h / 2 * self.dH(p1, q2)[1] + return np.hstack([p2, q2]) if isinstance(x, np.ndarray) else torch.cat([p2, q2], dim=-1) + elif isinstance(x, torch.Tensor): + for _ in range(self.iterations): + x = torch.cat([p1, q0], dim=-1).requires_grad_(True) + dH = grad(self.H(x), x, create_graph=False) + p1 = p0 - h / 2 * dH[..., d:] + q1 = q0 + h / 2 * dH[..., :d] + p2, q2 = p1, q1 + for _ in range(self.iterations): + x = torch.cat([p1, q2], dim=-1).requires_grad_(True) + dH = grad(self.H(x), x, create_graph=False) + q2 = q1 + h / 2 * dH[..., :d] + p2 = p1 - h / 2 * dH[..., d:] + return torch.cat([p2, q2], dim=-1) + else: + raise ValueError + + def __sv4(self, x, h): + '''Order 4. + ''' + r1 = 1 / (2 - 2 ** (1 / 3)) + r2 = - 2 ** (1 / 3) / (2 - 2 ** (1 / 3)) + return self.__sv2(self.__sv2(self.__sv2(x, r1 * h), r2 * h), r1 * h) + + def __sv6(self, x, h): + '''Order 6 + ''' + r1 = 1 / (2 - 2 ** (1 / 5)) + r2 = - 2 ** (1 / 5) / (2 - 2 ** (1 / 5)) + return self.__sv4(self.__sv4(self.__sv4(x, r1 * h), r2 * h), r1 * h) + + def solve(self, x, h): + if self.order == 2: + solver = self.__sv2 + elif self.order == 4: + solver = self.__sv4 + elif self.order == 6: + solver = self.__sv6 + else: + raise NotImplementedError + for _ in range(self.N): + x = solver(x, h / self.N) + return x + + def flow(self, x, h, steps): + dim = x.shape[-1] if isinstance(x, np.ndarray) else x.size(-1) + size = len(x.shape) if isinstance(x, np.ndarray) else len(x.size()) + X = [x] + for i in range(steps): + X.append(self.solve(X[-1], h)) + shape = [steps + 1, dim] if size == 1 else [-1, steps + 1, dim] + return np.hstack(X).reshape(shape) if isinstance(x, np.ndarray) else torch.cat(X, dim=-1).view(shape) \ No newline at end of file diff --git a/seq2seq/learner/nn/__init__.py b/seq2seq/learner/nn/__init__.py new file mode 100755 index 0000000..45f9f90 --- /dev/null +++ b/seq2seq/learner/nn/__init__.py @@ -0,0 +1,26 @@ +""" +@author: jpzxshi +""" +from .module import Module +from .module import StructureNN +from .module import LossNN +from .fnn import FNN +from .hnn import HNN +from .sympnet import LASympNet +from .sympnet import GSympNet +from .seq2seq import S2S +from .deeponet import DeepONet + +__all__ = [ + 'Module', + 'StructureNN', + 'LossNN', + 'FNN', + 'HNN', + 'LASympNet', + 'GSympNet', + 'S2S', + 'DeepONet', +] + + diff --git a/seq2seq/learner/nn/deeponet.py b/seq2seq/learner/nn/deeponet.py new file mode 100755 index 0000000..ca0361d --- /dev/null +++ b/seq2seq/learner/nn/deeponet.py @@ -0,0 +1,56 @@ +""" +@author: jpzxshi +""" +import torch +import torch.nn as nn + +from .module import StructureNN +from .fnn import FNN + +class DeepONet(StructureNN): + '''Deep operator network. + Input: [batch size, branch_dim + trunk_dim] + Output: [batch size, 1] + ''' + def __init__(self, branch_dim, trunk_dim, branch_depth=2, trunk_depth=3, width=50, + activation='relu', initializer='Glorot normal'): + super(DeepONet, self).__init__() + self.branch_dim = branch_dim + self.trunk_dim = trunk_dim + self.branch_depth = branch_depth + self.trunk_depth = trunk_depth + self.width = width + self.activation = activation + self.initializer = initializer + + self.modus = self.__init_modules() + self.params = self.__init_params() + self.__initialize() + + def forward(self, x): + x_branch, x_trunk = x[..., :self.branch_dim], x[..., self.branch_dim:] + x_branch = self.modus['Branch'](x_branch) + for i in range(1, self.trunk_depth): + x_trunk = self.modus['TrActM{}'.format(i)](self.modus['TrLinM{}'.format(i)](x_trunk)) + return torch.sum(x_branch * x_trunk, dim=-1, keepdim=True) + self.params['bias'] + + def __init_modules(self): + modules = nn.ModuleDict() + modules['Branch'] = FNN(self.branch_dim, self.width, self.branch_depth, self.width, + self.activation, self.initializer) + modules['TrLinM1'] = nn.Linear(self.trunk_dim, self.width) + modules['TrActM1'] = self.Act + for i in range(2, self.trunk_depth): + modules['TrLinM{}'.format(i)] = nn.Linear(self.width, self.width) + modules['TrActM{}'.format(i)] = self.Act + return modules + + def __init_params(self): + params = nn.ParameterDict() + params['bias'] = nn.Parameter(torch.zeros([1])) + return params + + def __initialize(self): + for i in range(1, self.trunk_depth): + self.weight_init_(self.modus['TrLinM{}'.format(i)].weight) + nn.init.constant_(self.modus['TrLinM{}'.format(i)].bias, 0) \ No newline at end of file diff --git a/seq2seq/learner/nn/fnn.py b/seq2seq/learner/nn/fnn.py new file mode 100755 index 0000000..56376b3 --- /dev/null +++ b/seq2seq/learner/nn/fnn.py @@ -0,0 +1,53 @@ +""" +@author: jpzxshi +""" +import torch.nn as nn + +from .module import StructureNN + +class FNN(StructureNN): + '''Fully connected neural networks. + ''' + def __init__(self, ind, outd, layers=2, width=50, activation='relu', initializer='default', softmax=False): + super(FNN, self).__init__() + self.ind = ind + self.outd = outd + self.layers = layers + self.width = width + self.activation = activation + self.initializer = initializer + self.softmax = softmax + + self.modus = self.__init_modules() + self.__initialize() + + def forward(self, x): + for i in range(1, self.layers): + LinM = self.modus['LinM{}'.format(i)] + NonM = self.modus['NonM{}'.format(i)] + x = NonM(LinM(x)) + x = self.modus['LinMout'](x) + if self.softmax: + x = nn.functional.softmax(x, dim=-1) + return x + + def __init_modules(self): + modules = nn.ModuleDict() + if self.layers > 1: + modules['LinM1'] = nn.Linear(self.ind, self.width) + modules['NonM1'] = self.Act + for i in range(2, self.layers): + modules['LinM{}'.format(i)] = nn.Linear(self.width, self.width) + modules['NonM{}'.format(i)] = self.Act + modules['LinMout'] = nn.Linear(self.width, self.outd) + else: + modules['LinMout'] = nn.Linear(self.ind, self.outd) + + return modules + + def __initialize(self): + for i in range(1, self.layers): + self.weight_init_(self.modus['LinM{}'.format(i)].weight) + nn.init.constant_(self.modus['LinM{}'.format(i)].bias, 0) + self.weight_init_(self.modus['LinMout'].weight) + nn.init.constant_(self.modus['LinMout'].bias, 0) \ No newline at end of file diff --git a/seq2seq/learner/nn/hnn.py b/seq2seq/learner/nn/hnn.py new file mode 100755 index 0000000..017e61e --- /dev/null +++ b/seq2seq/learner/nn/hnn.py @@ -0,0 +1,53 @@ +""" +@author: jpzxshi +""" +import numpy as np +import torch + +from .module import LossNN +from .fnn import FNN +from ..integrator.hamiltonian import SV +from ..utils import lazy_property, grad + +class HNN(LossNN): + '''Hamiltonian neural networks. + ''' + def __init__(self, dim, layers=3, width=30, activation='tanh', initializer='orthogonal', integrator='midpoint'): + super(HNN, self).__init__() + self.dim = dim + self.layers = layers + self.width = width + self.activation = activation + self.initializer = initializer + self.integrator = integrator + + self.modus = self.__init_modules() + + def criterion(self, x0h, x1): + x0, h = (x0h[..., :-1], x0h[..., -1:]) + return self.__integrator_loss(x0, x1, h) + + def predict(self, x0, h, steps=1, keepinitx=False, returnnp=False): + N = max(int(h * 10), 1) + solver = SV(self.modus['H'], None, iterations=10, order=4, N=N) + res = solver.flow(x0, h, steps) if keepinitx else solver.flow(x0, h, steps)[..., 1:, :].squeeze() + return res.cpu().detach().numpy() if returnnp else res + + @lazy_property + def J(self): + d = int(self.dim / 2) + res = np.eye(self.dim, k=d) - np.eye(self.dim, k=-d) + return torch.tensor(res, dtype=self.Dtype, device=self.Device) + + def __init_modules(self): + modules = torch.nn.ModuleDict() + modules['H'] = FNN(self.dim, 1, self.layers, self.width, self.activation, self.initializer) + return modules + + def __integrator_loss(self, x0, x1, h): + if self.integrator == 'midpoint': + mid = ((x0 + x1) / 2).requires_grad_(True) + gradH = grad(self.modus['H'](mid), mid) + return torch.nn.MSELoss()((x1 - x0) / h, gradH @ self.J) + else: + raise NotImplementedError \ No newline at end of file diff --git a/seq2seq/learner/nn/module.py b/seq2seq/learner/nn/module.py new file mode 100755 index 0000000..05bfe99 --- /dev/null +++ b/seq2seq/learner/nn/module.py @@ -0,0 +1,135 @@ +""" +@author: jpzxshi +""" +import abc +import torch + +class Module(torch.nn.Module): + '''Standard module format. + ''' + def __init__(self): + super(Module, self).__init__() + self.activation = None + self.initializer = None + + self.__device = None + self.__dtype = None + + @property + def device(self): + return self.__device + + @property + def dtype(self): + return self.__dtype + + @device.setter + def device(self, d): + if d == 'cpu': + self.cpu() + elif d == 'gpu': + self.cuda() + else: + raise ValueError + self.__device = d + + @dtype.setter + def dtype(self, d): + if d == 'float': + self.to(torch.float) + elif d == 'double': + self.to(torch.double) + else: + raise ValueError + self.__dtype = d + + @property + def Device(self): + if self.__device == 'cpu': + return torch.device('cpu') + elif self.__device == 'gpu': + return torch.device('cuda') + + @property + def Dtype(self): + if self.__dtype == 'float': + return torch.float32 + elif self.__dtype == 'double': + return torch.float64 + + @property + def act(self): + if self.activation == 'sigmoid': + return torch.sigmoid + elif self.activation == 'relu': + return torch.relu + elif self.activation == 'tanh': + return torch.tanh + elif self.activation == 'elu': + return torch.elu + else: + raise NotImplementedError + + @property + def Act(self): + if self.activation == 'sigmoid': + return torch.nn.Sigmoid() + elif self.activation == 'relu': + return torch.nn.ReLU() + elif self.activation == 'tanh': + return torch.nn.Tanh() + elif self.activation == 'elu': + return torch.nn.ELU() + else: + raise NotImplementedError + + @property + def weight_init_(self): + if self.initializer == 'He normal': + return torch.nn.init.kaiming_normal_ + elif self.initializer == 'He uniform': + return torch.nn.init.kaiming_uniform_ + elif self.initializer == 'Glorot normal': + return torch.nn.init.xavier_normal_ + elif self.initializer == 'Glorot uniform': + return torch.nn.init.xavier_uniform_ + elif self.initializer == 'orthogonal': + return torch.nn.init.orthogonal_ + elif self.initializer == 'default': + if self.activation == 'relu': + return torch.nn.init.kaiming_normal_ + elif self.activation == 'tanh': + return torch.nn.init.orthogonal_ + else: + return lambda x: None + else: + raise NotImplementedError + +class StructureNN(Module): + '''Structure-oriented neural network used as a general map based on designing architecture. + ''' + def __init__(self): + super(StructureNN, self).__init__() + + def predict(self, x, returnnp=False): + if not isinstance(x, torch.Tensor): + x = torch.tensor(x, dtype=self.Dtype, device=self.Device) + return self(x).cpu().detach().numpy() if returnnp else self(x) + +class LossNN(Module, abc.ABC): + '''Loss-oriented neural network used as an algorithm based on designing loss. + ''' + def __init__(self): + super(LossNN, self).__init__() + + #@final + def forward(self, x): + return x + + @abc.abstractmethod + def criterion(self, X, y): + pass + + @abc.abstractmethod + def predict(self): + pass \ No newline at end of file diff --git a/seq2seq/learner/nn/seq2seq.py b/seq2seq/learner/nn/seq2seq.py new file mode 100755 index 0000000..752a66d --- /dev/null +++ b/seq2seq/learner/nn/seq2seq.py @@ -0,0 +1,62 @@ +""" +@author: jpzxshi +""" +import torch + +from .module import StructureNN + +class S2S(StructureNN): + '''Seq2seq model. + Input: [batch size, len_in, dim_in] + Output: [batch size, len_out, dim_out] + ''' + def __init__(self, dim_in, len_in, dim_out, len_out, hidden_size=10, cell='LSTM'): + super(S2S, self).__init__() + self.dim_in = dim_in + self.len_in = len_in + self.dim_out = dim_out + self.len_out = len_out + self.hidden_size = hidden_size + self.cell = cell + self.encoder = self.__init_encoder() + self.decoder = self.__init_decoder() + self.att_weights = self.__init_att_weights() + self.out = self.__init_out() + + def forward(self, x): + to_squeeze = True if len(x.size()) == 2 else False + if to_squeeze: + x = x.view(1, self.len_in, self.dim_in) + zeros = torch.zeros([1, x.size(0), self.hidden_size], dtype=x.dtype, device=x.device) + init_state = (zeros, zeros) if self.cell == 'LSTM' else zeros + x, _ = self.encoder(x, init_state) + x = torch.softmax(self.att_weights, dim=1) @ x + x, _ = self.decoder(x, init_state) + x = self.out(x) + return x.squeeze(0) if to_squeeze else x + + def __init_encoder(self): + if self.cell == 'RNN': + return torch.nn.RNN(self.dim_in, self.hidden_size, batch_first=True) + elif self.cell == 'LSTM': + return torch.nn.LSTM(self.dim_in, self.hidden_size, batch_first=True) + elif self.cell == 'GRU': + return torch.nn.GRU(self.dim_in, self.hidden_size, batch_first=True) + else: + raise NotImplementedError + + def __init_decoder(self): + if self.cell == 'RNN': + return torch.nn.RNN(self.hidden_size, self.hidden_size, batch_first=True) + elif self.cell == 'LSTM': + return torch.nn.LSTM(self.hidden_size, self.hidden_size, batch_first=True) + elif self.cell == 'GRU': + return torch.nn.GRU(self.hidden_size, self.hidden_size, batch_first=True) + else: + raise NotImplementedError + + def __init_att_weights(self): + return torch.nn.Parameter(torch.zeros([self.len_out, self.len_in])) + + def __init_out(self): + return torch.nn.Linear(self.hidden_size, self.dim_out) \ No newline at end of file diff --git a/seq2seq/learner/nn/sympnet.py b/seq2seq/learner/nn/sympnet.py new file mode 100755 index 0000000..1bd631f --- /dev/null +++ b/seq2seq/learner/nn/sympnet.py @@ -0,0 +1,190 @@ +""" +@author: jpzxshi +""" +import torch +import torch.nn as nn + +from .module import Module, StructureNN + +class LinearModule(Module): + '''Linear symplectic module. + ''' + def __init__(self, dim, layers): + super(LinearModule, self).__init__() + self.dim = dim + self.layers = layers + + self.params = self.__init_params() + + def forward(self, pqh): + p, q, h = pqh + for i in range(self.layers): + S = self.params['S{}'.format(i + 1)] + if i % 2 == 0: + p = p + q @ (S + S.t()) * h + else: + q = p @ (S + S.t()) * h + q + return p + self.params['bp'] * h, q + self.params['bq'] * h + + def __init_params(self): + '''Si is distributed N(0, 0.01), and b is set to zero. + ''' + d = int(self.dim / 2) + params = nn.ParameterDict() + for i in range(self.layers): + params['S{}'.format(i + 1)] = nn.Parameter((torch.randn([d, d]) * 0.01).requires_grad_(True)) + params['bp'] = nn.Parameter(torch.zeros([d]).requires_grad_(True)) + params['bq'] = nn.Parameter(torch.zeros([d]).requires_grad_(True)) + return params + +class ActivationModule(Module): + '''Activation symplectic module. + ''' + def __init__(self, dim, activation, mode): + super(ActivationModule, self).__init__() + self.dim = dim + self.activation = activation + self.mode = mode + + self.params = self.__init_params() + + def forward(self, pqh): + p, q, h = pqh + if self.mode == 'up': + return p + self.act(q) * self.params['a'] * h, q + elif self.mode == 'low': + return p, self.act(p) * self.params['a'] * h + q + else: + raise ValueError + + def __init_params(self): + d = int(self.dim / 2) + params = nn.ParameterDict() + params['a'] = nn.Parameter((torch.randn([d]) * 0.01).requires_grad_(True)) + return params + +class GradientModule(Module): + '''Gradient symplectic module. + ''' + def __init__(self, dim, width, activation, mode): + super(GradientModule, self).__init__() + self.dim = dim + self.width = width + self.activation = activation + self.mode = mode + + self.params = self.__init_params() + + def forward(self, pqh): + p, q, h = pqh + if self.mode == 'up': + gradH = (self.act(q @ self.params['K'] + self.params['b']) * self.params['a']) @ self.params['K'].t() + return p + gradH * h, q + elif self.mode == 'low': + gradH = (self.act(p @ self.params['K'] + self.params['b']) * self.params['a']) @ self.params['K'].t() + return p, gradH * h + q + else: + raise ValueError + + def __init_params(self): + d = int(self.dim / 2) + params = nn.ParameterDict() + params['K'] = nn.Parameter((torch.randn([d, self.width]) * 0.01).requires_grad_(True)) + params['a'] = nn.Parameter((torch.randn([self.width]) * 0.01).requires_grad_(True)) + params['b'] = nn.Parameter(torch.zeros([self.width]).requires_grad_(True)) + return params + +class SympNet(StructureNN): + def __init__(self): + super(SympNet, self).__init__() + self.dim = None + + def predict(self, xh, steps=1, keepinitx=False, returnnp=False): + dim = xh.size(-1) + size = len(xh.size()) + if dim == self.dim: + pred = [xh] + for _ in range(steps): + pred.append(self(pred[-1])) + else: + x0, h = xh[..., :-1], xh[..., -1:] + pred = [x0] + for _ in range(steps): + pred.append(self(torch.cat([pred[-1], h], dim=-1))) + if keepinitx: + steps = steps + 1 + else: + pred = pred[1:] + res = torch.cat(pred, dim=-1).view([-1, steps, self.dim][2 - size:]) + return res.cpu().detach().numpy() if returnnp else res + +class LASympNet(SympNet): + '''LA-SympNet. + Input: [num, dim] or [num, dim + 1] + Output: [num, dim] + ''' + def __init__(self, dim, layers=3, sublayers=2, activation='sigmoid'): + super(LASympNet, self).__init__() + self.dim = dim + self.layers = layers + self.sublayers = sublayers + self.activation = activation + + self.modus = self.__init_modules() + + def forward(self, pqh): + d = int(self.dim / 2) + if pqh.size(-1) == self.dim + 1: + p, q, h = pqh[..., :d], pqh[..., d:-1], pqh[..., -1:] + elif pqh.size(-1) == self.dim: + p, q, h = pqh[..., :d], pqh[..., d:], torch.ones_like(pqh[..., -1:]) + else: + raise ValueError + for i in range(self.layers - 1): + LinM = self.modus['LinM{}'.format(i + 1)] + ActM = self.modus['ActM{}'.format(i + 1)] + p, q = ActM([*LinM([p, q, h]), h]) + return torch.cat(self.modus['LinMout']([p, q, h]), dim=-1) + + def __init_modules(self): + modules = nn.ModuleDict() + for i in range(self.layers - 1): + modules['LinM{}'.format(i + 1)] = LinearModule(self.dim, self.sublayers) + mode = 'up' if i % 2 == 0 else 'low' + modules['ActM{}'.format(i + 1)] = ActivationModule(self.dim, self.activation, mode) + modules['LinMout'] = LinearModule(self.dim, self.sublayers) + return modules + +class GSympNet(SympNet): + '''G-SympNet. + Input: [num, dim] or [num, dim + 1] + Output: [num, dim] + ''' + def __init__(self, dim, layers=3, width=20, activation='sigmoid'): + super(GSympNet, self).__init__() + self.dim = dim + self.layers = layers + self.width = width + self.activation = activation + + self.modus = self.__init_modules() + + def forward(self, pqh): + d = int(self.dim / 2) + if pqh.size(-1) == self.dim + 1: + p, q, h = pqh[..., :d], pqh[..., d:-1], pqh[..., -1:] + elif pqh.size(-1) == self.dim: + p, q, h = pqh[..., :d], pqh[..., d:], torch.ones_like(pqh[..., -1:]) + else: + raise ValueError + for i in range(self.layers): + GradM = self.modus['GradM{}'.format(i + 1)] + p, q = GradM([p, q, h]) + return torch.cat([p, q], dim=-1) + + def __init_modules(self): + modules = nn.ModuleDict() + for i in range(self.layers): + mode = 'up' if i % 2 == 0 else 'low' + modules['GradM{}'.format(i + 1)] = GradientModule(self.dim, self.width, self.activation, mode) + return modules \ No newline at end of file diff --git a/seq2seq/learner/utils.py b/seq2seq/learner/utils.py new file mode 100755 index 0000000..c190d53 --- /dev/null +++ b/seq2seq/learner/utils.py @@ -0,0 +1,62 @@ +""" +@author: jpzxshi +""" +from functools import wraps +import time + +import numpy as np +import torch + +# +# Useful tools. +# +def timing(func): + @wraps(func) + def wrapper(*args, **kwargs): + t = time.time() + result = func(*args, **kwargs) + print('\'' + func.__name__ + '\'' + ' took {} s'.format(time.time() - t)) + return result + return wrapper + +class lazy_property: + def __init__(self, func): + self.func = func + + def __get__(self, instance, cls): + val = self.func(instance) + setattr(instance, self.func.__name__, val) + return val + +# +# Numpy tools. +# +def softmax(x): + e_x = np.exp(x - np.max(x, axis=-1, keepdims=True)) + return e_x / np.sum(e_x, axis=-1, keepdims=True) + +# +# Torch tools. +# +def cross_entropy_loss(y_pred, y_label): + if y_pred.size() == y_label.size(): + return torch.mean(-torch.sum(torch.log_softmax(y_pred, dim=-1) * y_label, dim=-1)) + else: + return torch.nn.CrossEntropyLoss()(y_pred, y_label.long()) + +def grad(y, x, create_graph=True, keepdim=False): + ''' + y: [N, Ny] or [Ny] + x: [N, Nx] or [Nx] + Return dy/dx ([N, Ny, Nx] or [Ny, Nx]). + ''' + N = y.size(0) if len(y.size()) == 2 else 1 + Ny = y.size(-1) + Nx = x.size(-1) + z = torch.ones_like(y[..., 0]) + dy = [] + for i in range(Ny): + dy.append(torch.autograd.grad(y[..., i], x, grad_outputs=z, create_graph=create_graph)[0]) + shape = np.array([N, Ny])[2-len(y.size()):] + shape = list(shape) if keepdim else list(shape[shape > 1]) + return torch.cat(dy, dim=-1).view(shape + [Nx]) \ No newline at end of file diff --git a/seq2seq/seq2seq_main.py b/seq2seq/seq2seq_main.py new file mode 100755 index 0000000..6402b09 --- /dev/null +++ b/seq2seq/seq2seq_main.py @@ -0,0 +1,92 @@ +""" +@author: jpzxshi +""" +import learner as ln +from data import AntideData, PendData + +def antiderivative(): + device = 'gpu' # 'cpu' or 'gpu' + # data + T = 1 + s0 = [0] + sensor_in = 100 + sensor_out = 100 + length_scale = 0.2 + train_num = 1000 + test_num = 10000 + # seq2seq + cell = 'GRU' # 'RNN', 'LSTM' or 'GRU' + hidden_size = 5 + # training + lr = 0.001 + iterations = 50000 + print_every = 1000 + + data = AntideData(T, s0, sensor_in, sensor_out, length_scale, train_num, test_num) + net = ln.nn.S2S(data.dim, sensor_in, data.K, sensor_out, hidden_size, cell) + args = { + 'data': data, + 'net': net, + 'criterion': 'MSE', + 'optimizer': 'adam', + 'lr': lr, + 'iterations': iterations, + 'batch_size': None, + 'print_every': print_every, + 'save': True, + 'callback': None, + 'dtype': 'float', + 'device': device + } + + ln.Brain.Init(**args) + ln.Brain.Run() + ln.Brain.Restore() + ln.Brain.Output() + +def pendulum(): + device = 'gpu' # 'cpu' or 'gpu' + # data + T = 3 + s0 = [0, 0] + sensor_in = 100 + sensor_out = 100 + length_scale = 0.2 + train_num = 1000 + test_num = 10000 + # seq2seq + cell = 'GRU' # 'RNN', 'LSTM' or 'GRU' + hidden_size = 5 + # training + lr = 0.001 + iterations = 100000 + print_every = 1000 + + data = PendData(T, s0, sensor_in, sensor_out, length_scale, train_num, test_num) + net = ln.nn.S2S(data.dim, sensor_in, data.K, sensor_out, hidden_size, cell) + args = { + 'data': data, + 'net': net, + 'criterion': 'MSE', + 'optimizer': 'adam', + 'lr': lr, + 'iterations': iterations, + 'batch_size': None, + 'print_every': print_every, + 'save': True, + 'callback': None, + 'dtype': 'float', + 'device': device + } + + ln.Brain.Init(**args) + ln.Brain.Run() + ln.Brain.Restore() + ln.Brain.Output() + +def main(): + antiderivative() + #pendulum() + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/src/ADR_solver.py b/src/ADR_solver.py new file mode 100644 index 0000000..f65aaa3 --- /dev/null +++ b/src/ADR_solver.py @@ -0,0 +1,81 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import matplotlib.pyplot as plt +import numpy as np + + +def solve_ADR(xmin, xmax, tmin, tmax, k, v, g, dg, f, u0, Nx, Nt): + """Solve 1D + u_t = (k(x) u_x)_x - v(x) u_x + g(u) + f(x, t) + with zero boundary condition. + """ + + x = np.linspace(xmin, xmax, Nx) + t = np.linspace(tmin, tmax, Nt) + h = x[1] - x[0] + dt = t[1] - t[0] + h2 = h ** 2 + + D1 = np.eye(Nx, k=1) - np.eye(Nx, k=-1) + D2 = -2 * np.eye(Nx) + np.eye(Nx, k=-1) + np.eye(Nx, k=1) + D3 = np.eye(Nx - 2) + k = k(x) + M = -np.diag(D1 @ k) @ D1 - 4 * np.diag(k) @ D2 + m_bond = 8 * h2 / dt * D3 + M[1:-1, 1:-1] + v = v(x) + v_bond = 2 * h * np.diag(v[1:-1]) @ D1[1:-1, 1:-1] + 2 * h * np.diag( + v[2:] - v[: Nx - 2] + ) + mv_bond = m_bond + v_bond + c = 8 * h2 / dt * D3 - M[1:-1, 1:-1] - v_bond + f = f(x[:, None], t) + + u = np.zeros((Nx, Nt)) + u[:, 0] = u0(x) + for i in range(Nt - 1): + gi = g(u[1:-1, i]) + dgi = dg(u[1:-1, i]) + h2dgi = np.diag(4 * h2 * dgi) + A = mv_bond - h2dgi + b1 = 8 * h2 * (0.5 * f[1:-1, i] + 0.5 * f[1:-1, i + 1] + gi) + b2 = (c - h2dgi) @ u[1:-1, i].T + u[1:-1, i + 1] = np.linalg.solve(A, b1 + b2) + return x, t, u + + +def main(): + xmin, xmax = -1, 1 + tmin, tmax = 0, 1 + k = lambda x: x ** 2 - x ** 2 + 1 + v = lambda x: np.ones_like(x) + g = lambda u: u ** 3 + dg = lambda u: 3 * u ** 2 + f = ( + lambda x, t: np.exp(-t) * (1 + x ** 2 - 2 * x) + - (np.exp(-t) * (1 - x ** 2)) ** 3 + ) + u0 = lambda x: (x + 1) * (1 - x) + u_true = lambda x, t: np.exp(-t) * (1 - x ** 2) + + # xmin, xmax = 0, 1 + # tmin, tmax = 0, 1 + # k = lambda x: np.ones_like(x) + # v = lambda x: np.zeros_like(x) + # g = lambda u: u ** 2 + # dg = lambda u: 2 * u + # f = lambda x, t: x * (1 - x) + 2 * t - t ** 2 * (x - x ** 2) ** 2 + # u0 = lambda x: np.zeros_like(x) + # u_true = lambda x, t: t * x * (1 - x) + + Nx, Nt = 100, 100 + x, t, u = solve_ADR(xmin, xmax, tmin, tmax, k, v, g, dg, f, u0, Nx, Nt) + + print(np.max(abs(u - u_true(x[:, None], t)))) + plt.plot(x, u) + plt.show() + + +if __name__ == "__main__": + main() diff --git a/src/ADVD_solver.py b/src/ADVD_solver.py new file mode 100755 index 0000000..a1b5877 --- /dev/null +++ b/src/ADVD_solver.py @@ -0,0 +1,78 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import matplotlib.pyplot as plt +import seaborn as sns + + +def solve_ADVD(xmin, xmax, tmin, tmax, f, g, V, Nx, Nt): + """Solve + u_t + u_x - D * u_xx = 0 + u(x, 0) = V(x) + """ + # Crank-Nicholson + D = 0.1 + x = np.linspace(xmin, xmax, Nx) + t = np.linspace(tmin, tmax, Nt) + h = x[1] - x[0] + dt = t[1] - t[0] + lam = dt / h + mu = dt / h ** 2 + u = np.zeros([Nx, Nt]) + u[:, 0] = V(x) + + I = np.eye(Nx - 1) + I1 = np.roll(I, 1, axis=0) + I2 = np.roll(I, -1, axis=0) + A = (1 + D * mu) * I - (lam / 4 + D * mu / 2) * I1 + (lam / 4 - D * mu / 2) * I2 + B = 2 * I - A + C = np.linalg.solve(A, B) + + for n in range(Nt - 1): + u[1:, n + 1] = C @ u[1:, n] + u[0, :] = u[-1, :] + + return x, t, u + + +def main(): + xmin, xmax = 0, 1 + tmin, tmax = 0, 1 + V = lambda x: np.sin(2 * np.pi * x) # 1 - 2 * x#np.sin(2 * np.pi * x) + f = None + g = None + D = 0.1 + + u_true = lambda x, t: np.exp(-4 * np.pi ** 2 * D * t) * np.sin( + 2 * np.pi * (x - t) + ) # V(np.sin(np.pi * (x - t)) ** 2) + + Nx, Nt = 100, 991 + x, t, u = solve_ADVD(xmin, xmax, tmin, tmax, f, g, V, Nx, Nt) + + print(np.max(abs(u - u_true(x[:, None], t)))) + print(np.average(abs(u - u_true(x[:, None], t)))) + # diff = u - u_true(x[:, None], t) + # plt.plot(x, u) + # plt.show() + + u_true = u_true(x[:, None], t)[:, 0:991:10] + u = u[:, 0:991:10] + error = abs(u - u_true) + axis = plt.subplot(111) + sns.heatmap(error, linewidths=0.00, ax=axis, cmap="rainbow") + xlabel = [format(i, ".1f") for i in np.linspace(0, 1, num=11)] + ylabel = [format(i, ".1f") for i in np.linspace(0, 1, num=11)] + axis.set_xticks(range(0, 101, 10)) + axis.set_xticklabels(xlabel) + axis.set_yticks(range(0, 101, 10)) + axis.set_yticklabels(ylabel) + axis.set_xlabel("t") + axis.set_ylabel("x") + axis.set_title(r"Error", fontdict={"fontsize": 30}, loc="left") + + +if __name__ == "__main__": + main() diff --git a/src/CVC_solver.py b/src/CVC_solver.py new file mode 100755 index 0000000..f9ebd96 --- /dev/null +++ b/src/CVC_solver.py @@ -0,0 +1,187 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import matplotlib.pyplot as plt + + +def solve_CVC(xmin, xmax, tmin, tmax, f, g, V, Nx, Nt): + """Solve + u_t + a(x) * u_x = 0 + """ + + # Case I: Analytical solution for a(x)=1, u(x,0)=V(x) (V,V' periodic) + x = np.linspace(xmin, xmax, Nx) + t = np.linspace(tmin, tmax, Nt) + u = V((x[:, None] - t) % 1) + + # Case II: Wendroff for a(x)=1, u(x,0)=f(x), u(0,t)=g(t) (f(0)=g(0)) + """ + x = np.linspace(xmin, xmax, Nx) + t = np.linspace(tmin, tmax, Nt) + h = x[1] - x[0] + dt = t[1] - t[0] + lam = dt / h + u = np.zeros([Nx, Nt]) + u[:, 0] = f(x) + u[0, :] = g(t) + + r = (1 - lam) / (1 + lam) + K = np.eye(Nx - 1, k=0) + K_temp = np.eye(Nx - 1, k=0) + Trans = np.eye(Nx - 1, k=-1) + for _ in range(Nx - 2): + K_temp = (-r) * (Trans @ K_temp) + K += K_temp + D = r * np.eye(Nx - 1, k=0) + np.eye(Nx - 1, k=-1) + + for n in range(Nt - 1): + b = np.zeros(Nx - 1) + b[0] = g(n * dt) - r * g((n + 1) * dt) + u[1:, n + 1] = K @ (D @ u[1:, n] + b) + """ + + # Case III: Wendroff for a(x)=1+0.1*V(x), u(x,0)=f(x), u(0,t)=g(t) (f(0)=g(0)) + """ + x = np.linspace(xmin, xmax, Nx) + t = np.linspace(tmin, tmax, Nt) + h = x[1] - x[0] + dt = t[1] - t[0] + lam = dt / h + v = 1 + 0.1 * V(x) + u = np.zeros([Nx, Nt]) + u[:, 0] = f(x) + u[0, :] = g(t) + a = (v[:-1] + v[1:]) / 2 + k = (1 - a * lam) / (1 + a * lam) + K = np.eye(Nx - 1, k=0) + K_temp = np.eye(Nx - 1, k=0) + Trans = np.eye(Nx - 1, k=-1) + for _ in range(Nx - 2): + K_temp = (-k[:, None]) * (Trans @ K_temp) + K += K_temp + D = np.diag(k) + np.eye(Nx - 1, k=-1) + + for n in range(Nt - 1): + b = np.zeros(Nx - 1) + b[0] = g(n * dt) - k[0] * g((n + 1) * dt) + u[1:, n + 1] = K @ (D @ u[1:, n] + b) + """ + + # Case IV: Wendroff for a(x)=1+0.1*(V(x)+V(1-x))/2, u(x,0)=f(x) (f,f' periodic) + """ + x = np.linspace(xmin, xmax, Nx) + t = np.linspace(tmin, tmax, Nt) + h = x[1] - x[0] + dt = t[1] - t[0] + lam = dt / h + v = 1 + 0.1 * (V(x) + V(x)[::-1]) / 2 + u = np.zeros([Nx, Nt]) + u[:, 0] = f(x) + + a = (v[:-1] + v[1:]) / 2 + I = np.eye(Nx - 1) + Ir = np.roll(I, 1, axis=0) + D = lam * a[:, None] * (I - Ir) + A = I + Ir + D + B = I + Ir - D + + for n in range(Nt - 1): + u[1:, n + 1] = np.linalg.solve(A, B @ u[1:, n]) + u[0, :] = u[-1, :] + """ + + return x, t, u + + +def main(): + # Case I: Analytical solution for a(x)=1, u(x,0)=V(x) (V,V' periodic) + xmin, xmax = 0, 1 + tmin, tmax = 0, 1 + V = lambda x: np.sin(2 * np.pi * x) + f = None + g = None + + u_true = lambda x, t: V(x - t) + + Nx, Nt = 100, 100 + x, t, u = solve_CVC(xmin, xmax, tmin, tmax, f, g, V, Nx, Nt) + + print(np.max(abs(u - u_true(x[:, None], t)))) + print(np.average(abs(u - u_true(x[:, None], t)))) + + # Case II: Wendroff for a(x)=1, u(x,0)=V(x), u(0,t)=0 (V(0)=0) + """ + xmin, xmax = 0, 1 + tmin, tmax = 0, 1 + V = None + f = lambda x: (2 * np.pi * x) ** 5 + g = lambda t: (2 * np.pi * (-t)) ** 5 + + u_true = lambda x, t: (2 * np.pi * (x - t)) ** 5 + + Nx, Nt = 100, 100 + x, t, u = solve_CVC(xmin, xmax, tmin, tmax, f, g, V, Nx, Nt) + + print(np.max(abs(u - u_true(x[:, None], t)))) + print(np.average(abs(u - u_true(x[:, None], t)))) + """ + + # Case III: Wendroff for a(x)=1+0.1*V(x), u(x,0)=f(x), u(0,t)=g(t) (f(0)=g(0)) + """ + vel = 1 + xmin, xmax = 0, 1 + tmin, tmax = 0, 1 + V = lambda x: np.ones_like(x) * vel + f = lambda x: np.sin(2 * np.pi * x) + g = lambda t: np.sin(2 * np.pi * (-(1 + 0.1 * vel) * t)) + + u_true = lambda x, t: np.sin(2 * np.pi * (x - (1 + 0.1 * vel) * t)) + + Nx, Nt = 100, 100 + x, t, u = solve_CVC(xmin, xmax, tmin, tmax, f, g, V, Nx, Nt) + + print(np.max(abs(u - u_true(x[:, None], t)))) + print(np.average(abs(u - u_true(x[:, None], t)))) + """ + + # Case IV: Wendroff for a(x)=1+0.1*(V(x)+V(1-x))/2, u(x,0)=f(x) (f,f' periodic) + """ + vel = 1 + xmin, xmax = 0, 1 + tmin, tmax = 0, 1 + V = lambda x: np.ones_like(x) * vel + f = lambda x: np.sin(2 * np.pi * x) + g = lambda t: np.sin(2 * np.pi * (-(1 + 0.1 * vel) * t)) + + u_true = lambda x, t: np.sin(2 * np.pi * (x - (1 + 0.1 * vel) * t)) + + Nx, Nt = 100, 100 + x, t, u = solve_CVC(xmin, xmax, tmin, tmax, f, g, V, Nx, Nt) + + print(np.max(abs(u - u_true(x[:, None], t)))) + print(np.average(abs(u - u_true(x[:, None], t)))) + """ + + # plot + u_true = u_true(x[:, None], t) + error = abs(u - u_true) + axis = plt.subplot(111) + plt.imshow(error, cmap="rainbow", vmin=0) + plt.colorbar() + xlabel = [format(i, ".1f") for i in np.linspace(0, 1, num=11)] + ylabel = [format(i, ".1f") for i in np.linspace(0, 1, num=11)] + axis.set_xticks(range(0, 101, 10)) + axis.set_xticklabels(xlabel) + axis.set_yticks(range(0, 101, 10)) + axis.set_yticklabels(ylabel) + axis.set_xlabel("t") + axis.set_ylabel("x") + axis.set_title(r"Error", fontdict={"fontsize": 30}, loc="left") + + return error + + +if __name__ == "__main__": + error = main() diff --git a/src/config.py b/src/config.py new file mode 100644 index 0000000..a5dd21f --- /dev/null +++ b/src/config.py @@ -0,0 +1 @@ +processes = 4 diff --git a/src/deeponet_dataset.py b/src/deeponet_dataset.py new file mode 100644 index 0000000..ee5089e --- /dev/null +++ b/src/deeponet_dataset.py @@ -0,0 +1,63 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +import deepxde as dde +from utils import mean_squared_error_outlier, safe_test, trim_to_65535 + + +def run(m, net, lr, epochs): + d = np.load("train.npz") + X_train, y_train = (d["X_train0"], d["X_train1"]), d["y_train"] + d = np.load("test.npz") + X_test, y_test = (d["X_test0"], d["X_test1"]), d["y_test"] + + X_test_trim = trim_to_65535(X_test)[0] + y_test_trim = trim_to_65535(y_test)[0] + data = dde.data.OpDataSet( + X_train=X_train, y_train=y_train, X_test=X_test_trim, y_test=y_test_trim + ) + + model = dde.Model(data, net) + model.compile("adam", lr=lr, metrics=[mean_squared_error_outlier]) + checker = dde.callbacks.ModelCheckpoint( + "model/model.ckpt", save_better_only=True, period=1000 + ) + losshistory, train_state = model.train(epochs=epochs, callbacks=[checker]) + dde.saveplot(losshistory, train_state, issave=False, isplot=True) + + model.restore("model/model.ckpt-" + str(train_state.best_step), verbose=1) + safe_test(model, data, X_test, y_test) + + for i in range(10): + d = np.load("example{}.npz".format(i)) + X_test, y_test = (d["X_test0"], d["X_test1"]), d["y_test"] + safe_test(model, data, X_test, y_test, fname="example{}.dat".format(i)) + + +def main(): + # Pathwise solution + # m = 100 + # epochs = 50000 + # dim_x = 6 + # Statistical solution + m = 240 + epochs = 20000 + dim_x = 1 + lr = 0.001 + net = dde.maps.OpNN( + [m, 100, 100], + [dim_x, 100, 100], + "relu", + "Glorot normal", + use_bias=True, + stacked=False, + ) + + run(m, net, lr, epochs) + + +if __name__ == "__main__": + main() diff --git a/src/deeponet_pde.py b/src/deeponet_pde.py new file mode 100644 index 0000000..61259bc --- /dev/null +++ b/src/deeponet_pde.py @@ -0,0 +1,285 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import itertools + +import numpy as np +import tensorflow as tf + +import deepxde as dde +from spaces import FinitePowerSeries, FiniteChebyshev, GRF +from system import LTSystem, ODESystem, DRSystem, CVCSystem, ADVDSystem +from utils import merge_values, trim_to_65535, mean_squared_error_outlier, safe_test + + +def test_u_lt(nn, system, T, m, model, data, u, fname): + """Test Legendre transform""" + sensors = np.linspace(-1, 1, num=m) + sensor_value = u(sensors) + s = system.eval_s(sensor_value) + ns = np.arange(system.npoints_output)[:, None] + X_test = [np.tile(sensor_value, (system.npoints_output, 1)), ns] + y_test = s + if nn != "opnn": + X_test = merge_values(X_test) + y_pred = model.predict(data.transform_inputs(X_test)) + np.savetxt("test/u_" + fname, sensor_value) + np.savetxt("test/s_" + fname, np.hstack((ns, y_test, y_pred))) + + +def test_u_ode(nn, system, T, m, model, data, u, fname, num=100): + """Test ODE""" + sensors = np.linspace(0, T, num=m)[:, None] + sensor_values = u(sensors) + x = np.linspace(0, T, num=num)[:, None] + X_test = [np.tile(sensor_values.T, (num, 1)), x] + y_test = system.eval_s_func(u, x) + if nn != "opnn": + X_test = merge_values(X_test) + y_pred = model.predict(data.transform_inputs(X_test)) + np.savetxt(fname, np.hstack((x, y_test, y_pred))) + print("L2relative error:", dde.metrics.l2_relative_error(y_test, y_pred)) + + +def test_u_dr(nn, system, T, m, model, data, u, fname): + """Test Diffusion-reaction""" + sensors = np.linspace(0, 1, num=m) + sensor_value = u(sensors) + s = system.eval_s(sensor_value) + xt = np.array(list(itertools.product(range(m), range(system.Nt)))) + xt = xt * [1 / (m - 1), T / (system.Nt - 1)] + X_test = [np.tile(sensor_value, (m * system.Nt, 1)), xt] + y_test = s.reshape([m * system.Nt, 1]) + if nn != "opnn": + X_test = merge_values(X_test) + y_pred = model.predict(data.transform_inputs(X_test)) + np.savetxt(fname, np.hstack((xt, y_test, y_pred))) + + +def test_u_cvc(nn, system, T, m, model, data, u, fname): + """Test Advection""" + sensors = np.linspace(0, 1, num=m) + sensor_value = u(sensors) + s = system.eval_s(sensor_value) + xt = np.array(list(itertools.product(range(m), range(system.Nt)))) + xt = xt * [1 / (m - 1), T / (system.Nt - 1)] + X_test = [np.tile(sensor_value, (m * system.Nt, 1)), xt] + y_test = s.reshape([m * system.Nt, 1]) + if nn != "opnn": + X_test = merge_values(X_test) + y_pred = model.predict(data.transform_inputs(X_test)) + np.savetxt("test/u_" + fname, sensor_value) + np.savetxt("test/s_" + fname, np.hstack((xt, y_test, y_pred))) + + +def test_u_advd(nn, system, T, m, model, data, u, fname): + """Test Advection-diffusion""" + sensors = np.linspace(0, 1, num=m) + sensor_value = u(sensors) + s = system.eval_s(sensor_value) + xt = np.array(list(itertools.product(range(m), range(system.Nt)))) + xt = xt * [1 / (m - 1), T / (system.Nt - 1)] + X_test = [np.tile(sensor_value, (m * system.Nt, 1)), xt] + y_test = s.reshape([m * system.Nt, 1]) + if nn != "opnn": + X_test = merge_values(X_test) + y_pred = model.predict(data.transform_inputs(X_test)) + np.savetxt("test/u_" + fname, sensor_value) + np.savetxt("test/s_" + fname, np.hstack((xt, y_test, y_pred))) + + +def lt_system(npoints_output): + """Legendre transform""" + return LTSystem(npoints_output) + + +def ode_system(T): + """ODE""" + + def g(s, u, x): + # Antiderivative + return u + # Nonlinear ODE + # return -s**2 + u + # Gravity pendulum + # k = 1 + # return [s[1], - k * np.sin(s[0]) + u] + + s0 = [0] + # s0 = [0, 0] # Gravity pendulum + return ODESystem(g, s0, T) + + +def dr_system(T, npoints_output): + """Diffusion-reaction""" + D = 0.01 + k = 0.01 + Nt = 100 + return DRSystem(D, k, T, Nt, npoints_output) + + +def cvc_system(T, npoints_output): + """Advection""" + f = None + g = None + Nt = 100 + return CVCSystem(f, g, T, Nt, npoints_output) + + +def advd_system(T, npoints_output): + """Advection-diffusion""" + f = None + g = None + Nt = 100 + return ADVDSystem(f, g, T, Nt, npoints_output) + + +def run(problem, system, space, T, m, nn, net, lr, epochs, num_train, num_test): + # space_test = GRF(1, length_scale=0.1, N=1000, interp="cubic") + + X_train, y_train = system.gen_operator_data(space, m, num_train) + X_test, y_test = system.gen_operator_data(space, m, num_test) + if nn != "opnn": + X_train = merge_values(X_train) + X_test = merge_values(X_test) + + # np.savez_compressed("train.npz", X_train0=X_train[0], X_train1=X_train[1], y_train=y_train) + # np.savez_compressed("test.npz", X_test0=X_test[0], X_test1=X_test[1], y_test=y_test) + # return + + # d = np.load("train.npz") + # X_train, y_train = (d["X_train0"], d["X_train1"]), d["y_train"] + # d = np.load("test.npz") + # X_test, y_test = (d["X_test0"], d["X_test1"]), d["y_test"] + + X_test_trim = trim_to_65535(X_test)[0] + y_test_trim = trim_to_65535(y_test)[0] + if nn == "opnn": + data = dde.data.OpDataSet( + X_train=X_train, y_train=y_train, X_test=X_test_trim, y_test=y_test_trim + ) + else: + data = dde.data.DataSet( + X_train=X_train, y_train=y_train, X_test=X_test_trim, y_test=y_test_trim + ) + + model = dde.Model(data, net) + model.compile("adam", lr=lr, metrics=[mean_squared_error_outlier]) + checker = dde.callbacks.ModelCheckpoint( + "model/model.ckpt", save_better_only=True, period=1000 + ) + losshistory, train_state = model.train(epochs=epochs, callbacks=[checker]) + print("# Parameters:", np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])) + dde.saveplot(losshistory, train_state, issave=True, isplot=True) + + model.restore("model/model.ckpt-" + str(train_state.best_step), verbose=1) + safe_test(model, data, X_test, y_test) + + tests = [ + (lambda x: x, "x.dat"), + (lambda x: np.sin(np.pi * x), "sinx.dat"), + (lambda x: np.sin(2 * np.pi * x), "sin2x.dat"), + (lambda x: x * np.sin(2 * np.pi * x), "xsin2x.dat"), + ] + for u, fname in tests: + if problem == "lt": + test_u_lt(nn, system, T, m, model, data, u, fname) + elif problem == "ode": + test_u_ode(nn, system, T, m, model, data, u, fname) + elif problem == "dr": + test_u_dr(nn, system, T, m, model, data, u, fname) + elif problem == "cvc": + test_u_cvc(nn, system, T, m, model, data, u, fname) + elif problem == "advd": + test_u_advd(nn, system, T, m, model, data, u, fname) + + if problem == "lt": + features = space.random(10) + sensors = np.linspace(0, 2, num=m)[:, None] + u = space.eval_u(features, sensors) + for i in range(u.shape[0]): + test_u_lt(nn, system, T, m, model, data, lambda x: u[i], str(i) + ".dat") + + if problem == "cvc": + features = space.random(10) + sensors = np.linspace(0, 1, num=m)[:, None] + # Case I Input: V(sin^2(pi*x)) + u = space.eval_u(features, np.sin(np.pi * sensors) ** 2) + # Case II Input: x*V(x) + # u = sensors.T * space.eval_u(features, sensors) + # Case III/IV Input: V(x) + # u = space.eval_u(features, sensors) + for i in range(u.shape[0]): + test_u_cvc(nn, system, T, m, model, data, lambda x: u[i], str(i) + ".dat") + + if problem == "advd": + features = space.random(10) + sensors = np.linspace(0, 1, num=m)[:, None] + u = space.eval_u(features, np.sin(np.pi * sensors) ** 2) + for i in range(u.shape[0]): + test_u_advd(nn, system, T, m, model, data, lambda x: u[i], str(i) + ".dat") + + +def main(): + # Problems: + # - "lt": Legendre transform + # - "ode": Antiderivative, Nonlinear ODE, Gravity pendulum + # - "dr": Diffusion-reaction + # - "cvc": Advection + # - "advd": Advection-diffusion + problem = "ode" + T = 1 + if problem == "lt": + npoints_output = 20 + system = lt_system(npoints_output) + elif problem == "ode": + system = ode_system(T) + elif problem == "dr": + npoints_output = 100 + system = dr_system(T, npoints_output) + elif problem == "cvc": + npoints_output = 100 + system = cvc_system(T, npoints_output) + elif problem == "advd": + npoints_output = 100 + system = advd_system(T, npoints_output) + + # Function space + # space = FinitePowerSeries(N=100, M=1) + # space = FiniteChebyshev(N=20, M=1) + # space = GRF(2, length_scale=0.2, N=2000, interp="cubic") # "lt" + space = GRF(1, length_scale=0.2, N=1000, interp="cubic") + # space = GRF(T, length_scale=0.2, N=1000 * T, interp="cubic") + + # Hyperparameters + m = 100 + num_train = 10000 + num_test = 100000 + lr = 0.001 + epochs = 50000 + + # Network + nn = "opnn" + activation = "relu" + initializer = "Glorot normal" # "He normal" or "Glorot normal" + dim_x = 1 if problem in ["ode", "lt"] else 2 + if nn == "opnn": + net = dde.maps.OpNN( + [m, 40, 40], + [dim_x, 40, 40], + activation, + initializer, + use_bias=True, + stacked=False, + ) + elif nn == "fnn": + net = dde.maps.FNN([m + dim_x] + [100] * 2 + [1], activation, initializer) + elif nn == "resnet": + net = dde.maps.ResNet(m + dim_x, 1, 128, 2, activation, initializer) + + run(problem, system, space, T, m, nn, net, lr, epochs, num_train, num_test) + + +if __name__ == "__main__": + main() diff --git a/src/sde.py b/src/sde.py new file mode 100644 index 0000000..df481ef --- /dev/null +++ b/src/sde.py @@ -0,0 +1,369 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import matplotlib.pyplot as plt +import numpy as np +from pathos.pools import ProcessPool +from sklearn import gaussian_process as gp + +import config +from spaces import GRF, GRF_KL +from utils import eig, make_triple, timing, trapz + + +def KL(): + l = 0.2 + N = 1000 + kernel = gp.kernels.RBF(length_scale=l) + # kernel = gp.kernels.Matern(length_scale=l, nu=0.5) # AE + # kernel = gp.kernels.Matern(length_scale=l, nu=2.5) + + eigval, eigfun = eig(kernel, 10, N, eigenfunction=True) + print(eigval) + + variance = 0.999 + s = np.cumsum(eigval) + idx = np.nonzero(s > variance)[0][1] + print(idx + 1) + + x = np.linspace(0, 1, num=N) + plt.plot(x, eigfun[:, 0]) + plt.plot(x, eigfun[:, idx - 1]) + plt.plot(x, eigfun[:, idx]) + plt.show() + + +class GRFs(object): + def __init__( + self, T, kernel, length_scale_min, length_scale_max, N=100, interp="linear" + ): + self.T = T + self.kernel = kernel + self.length_scale_min = length_scale_min + self.length_scale_max = length_scale_max + self.N = N + self.interp = interp + + def random(self, n): + return (self.length_scale_max - self.length_scale_min) * np.random.rand( + n, 1 + ) + self.length_scale_min + + def eval_u_one(self, l, sensors, M): + grf = GRF( + self.T, kernel=self.kernel, length_scale=l[0], N=self.N, interp=self.interp + ) + us = grf.random(M) + ys = grf.eval_u(us, sensors) + return np.ravel(ys) + + def eval_u(self, ls, sensors, M): + return np.vstack([self.eval_u_one(l, sensors, M) for l in ls]) + + def eval_KL_bases(self, ls, sensors, M): + def helper(l): + grf = GRF_KL( + self.T, + kernel=self.kernel, + length_scale=l[0], + num_eig=M, + N=self.N, + interp=self.interp, + ) + return np.ravel(grf.bases(sensors)) + + p = ProcessPool(nodes=config.processes) + return np.vstack(p.map(helper, ls)) + + +class SODESystem(object): + def __init__(self, T, y0, Nx=None, npoints_output=None): + """Stochastic ODE""" + self.T = T + self.y0 = y0 + self.Nx = Nx + self.npoints_output = npoints_output + + @timing + def gen_operator_data(self, space, Nx, M, num, representation): + print("Generating operator data...", flush=True) + features = space.random(num) + sensors = np.linspace(0, self.T, num=Nx)[:, None] + if representation == "samples": + X = space.eval_u(features, sensors, M) + elif representation == "KL": + X = space.eval_KL_bases(features, sensors, M) + t = self.T * np.random.rand(num)[:, None] + y = self.eval_s(features, t) + return [X, t], y + + @timing + def gen_example_data(self, space, l, Nx, M, representation, num=100): + print("Generating example operator data...", flush=True) + features = np.full((num, 1), l) + sensors = np.linspace(0, self.T, num=Nx)[:, None] + if representation == "samples": + X = space.eval_u(features, sensors, M) + elif representation == "KL": + X = space.eval_KL_bases(features, sensors, M) + t = np.linspace(0, self.T, num=num)[:, None] + y = self.eval_s(features, t) + return [X, t], y + + def eval_s(self, features, t): + sigma2 = 2 * features * t + 2 * features ** 2 * (np.exp(-t / features) - 1) + # mean + y = self.y0 * np.exp(1 / 2 * sigma2) + # 2nd moment + # y = self.y0**2 * np.exp(2 * sigma2) + # 3rd moment + # y = self.y0**3 * np.exp(9/2 * sigma2) + # 4th moment + # y = self.y0**4 * np.exp(8 * sigma2) + return y + + @timing + def gen_operator_data_path(self, space, Nx, M, num): + print("Generating operator data...", flush=True) + features = space.random(num) + t = np.linspace(0, self.T, num=self.Nx)[:, None] + bases = space.eval_KL_bases(features, t, M) + rv = np.random.randn(num, M) + # rv = np.clip(rv, -3.1, 3.1) + p = ProcessPool(nodes=config.processes) + s_values = np.array(p.map(self.eval_s_path, bases, rv)) + + sensors = np.linspace(0, self.T, num=Nx)[:, None] + sensor_values = space.eval_KL_bases(features, sensors, M) + sensor_values = np.hstack((sensor_values, rv)) + res = [ + make_triple(sensor_values[i], t, s_values[i], self.npoints_output) + for i in range(num) + ] + res = np.vstack(res) + m = Nx * M + return [res[:, :m], res[:, m:-1]], res[:, -1:] + + @timing + def gen_example_data_path(self, space, l, Nx, M): + print("Generating operator data...", flush=True) + features = np.full((1, 1), l) + t = np.linspace(0, self.T, num=self.Nx)[:, None] + bases = space.eval_KL_bases(features, t, M) + rv = np.random.randn(1, M) + # rv = np.clip(rv, -3.1, 3.1) + s_values = self.eval_s_path(bases[0], rv[0]) + + sensors = np.linspace(0, self.T, num=Nx)[:, None] + sensor_value = space.eval_KL_bases(features, sensors, M) + return ( + [ + np.tile(sensor_value, (self.Nx, 1)), + np.hstack((np.tile(rv, (self.Nx, 1)), t)), + ], + s_values[:, None], + ) + + def eval_s_path(self, bases, rv): + bases = bases.reshape((-1, self.Nx)) + k = np.dot(rv, bases) + h = self.T / (self.Nx - 1) + K = trapz(k, h) + return self.y0 * np.exp(K) + + +class SPDESystem(object): + def __init__(self, T, f, Nx, M, npoints_output): + """Stochastic PDE""" + self.T = T + self.f = f + self.Nx = Nx + self.M = M + self.npoints_output = npoints_output + + def random_process(self, gp): + # return np.exp(gp) + return np.exp(0.1 * gp) + + @timing + def gen_operator_data(self, space, Nx, M, num, representation): + print("Generating operator data...", flush=True) + features = space.random(num) + # Generate outputs + x = np.linspace(0, self.T, num=self.Nx)[:, None] + sensor_values = self.random_process(space.eval_u(features, x, self.M)) # exp(b) + p = ProcessPool(nodes=config.processes) + s_values = np.array(p.map(self.eval_s, sensor_values)) + + # Generate inputs + sensors = np.linspace(0, self.T, num=Nx)[:, None] + if representation == "samples": + sensor_values = self.random_process(space.eval_u(features, sensors, M)) + elif representation == "KL": + sensor_values = space.eval_KL_bases(features, sensors, M) + # sensor_values = self.random_process(sensor_values) + res = [ + make_triple(sensor_values[i], x, s_values[i], self.npoints_output) + for i in range(num) + ] + res = np.vstack(res) + m = sensor_values.shape[1] + return [res[:, :m], res[:, m:-1]], res[:, -1:] + + @timing + def gen_example_data(self, space, l, Nx, M, representation): + print("Generating example operator data...", flush=True) + features = np.full((1, 1), l) + t = np.linspace(0, self.T, num=self.Nx)[:, None] + sensor_values = self.random_process(space.eval_u(features, t, self.M)) + s_value = self.eval_s(sensor_values) + + sensors = np.linspace(0, self.T, num=Nx)[:, None] + if representation == "samples": + sensor_value = self.random_process(space.eval_u(features, sensors, M)) + elif representation == "KL": + sensor_value = space.eval_KL_bases(features, sensors, M) + # sensor_value = self.random_process(sensor_value) + return [np.tile(sensor_value, (self.Nx, 1)), t], s_value[:, None] + + def eval_s(self, sensor_value): + h = self.T / (self.Nx - 1) + sensor_value = sensor_value.reshape((self.M, self.Nx)) + tmp = 1 / sensor_value # exp(-b) + v1 = trapz(tmp, h) + tmp *= self.f * np.linspace(0, self.T, num=self.Nx) + v2 = trapz(tmp, h) + C = 1 / v1[:, -1:] * v2[:, -1:] + v = C * v1 - v2 + return np.mean(v, axis=0) + # return np.std(v, axis=0) + # return np.mean(v ** 3, axis=0) + # skewness + # mean, std = np.mean(v, axis=0), np.std(v, axis=0) + # std[0], std[-1] = 1, 1 + # return (np.mean(v ** 3, axis=0) - 3 * mean * std ** 2 - mean ** 3) / (std ** 3 + 1e-13) + # return (np.mean(v ** 3, axis=0) - 3 * mean * std ** 2 - mean ** 3) / std ** 3 + # res = np.mean((v / std) ** 3, axis=0) - 3 * mean / std - (mean / std) ** 3 + # res[0], res[-1] = res[1], res[-2] + # return res + # kurtosis + # return np.mean((v - mean) ** 4, axis=0) / (std ** 4 + 1e-13) + + @timing + def gen_operator_data_path(self, space, Nx, M, num): + print("Generating operator data...", flush=True) + features = space.random(num) + x = np.linspace(0, self.T, num=self.Nx)[:, None] + bases = space.eval_KL_bases(features, x, M) + rv = np.random.randn(num, M) + # rv = np.clip(rv, -3.1, 3.1) + p = ProcessPool(nodes=config.processes) + s_values = np.array(p.map(self.eval_s_path, bases, rv)) + + sensors = np.linspace(0, self.T, num=Nx)[:, None] + sensor_values = space.eval_KL_bases(features, sensors, M) + # sensor_values = self.random_process(sensor_values) + sensor_values = np.hstack((sensor_values, rv)) + res = [ + make_triple(sensor_values[i], x, s_values[i], self.npoints_output) + for i in range(num) + ] + res = np.vstack(res) + m = Nx * M + return [res[:, :m], res[:, m:-1]], res[:, -1:] + + @timing + def gen_example_data_path(self, space, l, Nx, M): + print("Generating operator data...", flush=True) + features = np.full((1, 1), l) + t = np.linspace(0, self.T, num=self.Nx)[:, None] + bases = space.eval_KL_bases(features, t, M) + rv = np.random.randn(1, M) + # rv = np.clip(rv, -3.1, 3.1) + s_values = self.eval_s_path(bases[0], rv[0]) + + sensors = np.linspace(0, self.T, num=Nx)[:, None] + sensor_value = space.eval_KL_bases(features, sensors, M) + # sensor_value = self.random_process(sensor_value) + return ( + [ + np.tile(sensor_value, (self.Nx, 1)), + np.hstack((np.tile(rv, (self.Nx, 1)), t)), + ], + s_values[:, None], + ) + + def eval_s_path(self, bases, rv): + bases = bases.reshape((-1, self.Nx)) + b = np.dot(rv, bases) + h = self.T / (self.Nx - 1) + tmp = np.exp(-b) + v1 = trapz(tmp, h) + tmp *= self.f * np.linspace(0, self.T, num=self.Nx) + v2 = trapz(tmp, h) + C = 1 / v1[-1] * v2[-1] + v = C * v1 - v2 + return v + + +def main(): + # KL() + # return + + # SODE statistical averages + # system = SODESystem(1, 1) + # representation = "samples" + # space = GRFs(1, "AE", 1, 2, N=10, interp="linear") + # Nx = 10 + # M = 10 + # representation = "KL" + # space = GRFs(1, "AE", 1, 2, N=100, interp="linear") + # Nx = 20 + # M = 5 + # X, y = system.gen_operator_data(space, Nx, M, 1000000, representation) + # np.savez_compressed("train.npz", X_train0=X[0], X_train1=X[1], y_train=y) + # X, y = system.gen_operator_data(space, Nx, M, 1000000, representation) + # np.savez_compressed("test.npz", X_test0=X[0], X_test1=X[1], y_test=y) + # X, y = system.gen_example_data(space, 1.5, Nx, M, representation, num=100) + # np.savez_compressed("example.npz", X_test0=X[0], X_test1=X[1], y_test=y) + + # SPDE statistical averages + system = SPDESystem(1, 10, 100, 20000, 10) + space = GRFs(1, "RBF", 0.2, 2, N=100, interp="linear") + # representation = "samples" + # Nx = 10 + # M = 10 + representation = "KL" + Nx = 30 + M = 8 + X, y = system.gen_operator_data(space, Nx, M, 1000, representation) + np.savez_compressed("train.npz", X_train0=X[0], X_train1=X[1], y_train=y) + X, y = system.gen_operator_data(space, Nx, M, 1000, representation) + np.savez_compressed("test.npz", X_test0=X[0], X_test1=X[1], y_test=y) + for i in range(10): + X, y = system.gen_example_data(space, 0.2 + 0.2 * i, Nx, M, representation) + np.savez_compressed( + "example{}.npz".format(i), X_test0=X[0], X_test1=X[1], y_test=y + ) + return + + # SODE/SPDE pathwise solution + system = SODESystem(1, 1, Nx=100, npoints_output=100) + # system = SPDESystem(1, 10, 100, None, 100) + space = GRFs(1, "RBF", 1, 2, N=100, interp="linear") + Nx = 20 + M = 5 + X, y = system.gen_operator_data_path(space, Nx, M, 10000) + np.savez_compressed("train.npz", X_train0=X[0], X_train1=X[1], y_train=y) + X, y = system.gen_operator_data_path(space, Nx, M, 10000) + np.savez_compressed("test.npz", X_test0=X[0], X_test1=X[1], y_test=y) + for i in range(10): + X, y = system.gen_example_data_path(space, 1.5, Nx, M) + np.savez_compressed( + "example{}.npz".format(i), X_test0=X[0], X_test1=X[1], y_test=y + ) + + +if __name__ == "__main__": + main() diff --git a/src/spaces.py b/src/spaces.py new file mode 100644 index 0000000..2734a13 --- /dev/null +++ b/src/spaces.py @@ -0,0 +1,162 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import matplotlib.pyplot as plt +import numpy as np +from pathos.pools import ProcessPool +from scipy import linalg, interpolate +from sklearn import gaussian_process as gp + +import config +from utils import eig + + +class FinitePowerSeries: + def __init__(self, N=100, M=1): + self.N = N + self.M = M + + def random(self, n): + return 2 * self.M * np.random.rand(n, self.N) - self.M + + def eval_u_one(self, a, x): + return np.dot(a, x ** np.arange(self.N)) + + def eval_u(self, a, sensors): + mat = np.ones((self.N, len(sensors))) + for i in range(1, self.N): + mat[i] = np.ravel(sensors ** i) + return np.dot(a, mat) + + +class FiniteChebyshev: + def __init__(self, N=100, M=1): + self.N = N + self.M = M + + def random(self, n): + return 2 * self.M * np.random.rand(n, self.N) - self.M + + def eval_u_one(self, a, x): + return np.polynomial.chebyshev.chebval(2 * x - 1, a) + + def eval_u(self, a, sensors): + return np.polynomial.chebyshev.chebval(2 * np.ravel(sensors) - 1, a.T) + + +class GRF(object): + def __init__(self, T, kernel="RBF", length_scale=1, N=1000, interp="cubic"): + self.N = N + self.interp = interp + self.x = np.linspace(0, T, num=N)[:, None] + if kernel == "RBF": + K = gp.kernels.RBF(length_scale=length_scale) + elif kernel == "AE": + K = gp.kernels.Matern(length_scale=length_scale, nu=0.5) + self.K = K(self.x) + self.L = np.linalg.cholesky(self.K + 1e-13 * np.eye(self.N)) + + def random(self, n): + """Generate `n` random feature vectors. + """ + u = np.random.randn(self.N, n) + return np.dot(self.L, u).T + + def eval_u_one(self, y, x): + """Compute the function value at `x` for the feature `y`. + """ + if self.interp == "linear": + return np.interp(x, np.ravel(self.x), y) + f = interpolate.interp1d( + np.ravel(self.x), y, kind=self.interp, copy=False, assume_sorted=True + ) + return f(x) + + def eval_u(self, ys, sensors): + """For a list of functions represented by `ys`, + compute a list of a list of function values at a list `sensors`. + """ + if self.interp == "linear": + return np.vstack([np.interp(sensors, np.ravel(self.x), y).T for y in ys]) + p = ProcessPool(nodes=config.processes) + res = p.map( + lambda y: interpolate.interp1d( + np.ravel(self.x), y, kind=self.interp, copy=False, assume_sorted=True + )(sensors).T, + ys, + ) + return np.vstack(list(res)) + + +class GRF_KL(object): + def __init__( + self, T, kernel="RBF", length_scale=1, num_eig=10, N=100, interp="cubic" + ): + if not np.isclose(T, 1): + raise ValueError("Only support T = 1.") + + self.num_eig = num_eig + if kernel == "RBF": + kernel = gp.kernels.RBF(length_scale=length_scale) + elif kernel == "AE": + kernel = gp.kernels.Matern(length_scale=length_scale, nu=0.5) + eigval, eigvec = eig(kernel, num_eig, N, eigenfunction=True) + eigvec *= eigval ** 0.5 + x = np.linspace(0, T, num=N) + self.eigfun = [ + interpolate.interp1d(x, y, kind=interp, copy=False, assume_sorted=True) + for y in eigvec.T + ] + + def bases(self, sensors): + return np.array([np.ravel(f(sensors)) for f in self.eigfun]) + + def random(self, n): + """Generate `n` random feature vectors. + """ + return np.random.randn(n, self.num_eig) + + def eval_u_one(self, y, x): + """Compute the function value at `x` for the feature `y`. + """ + eigfun = [f(x) for f in self.eigfun] + return np.sum(eigfun * y) + + def eval_u(self, ys, sensors): + """For a list of functions represented by `ys`, + compute a list of a list of function values at a list `sensors`. + """ + eigfun = np.array([np.ravel(f(sensors)) for f in self.eigfun]) + return np.dot(ys, eigfun) + + +def space_samples(space, T): + features = space.random(100000) + sensors = np.linspace(0, T, num=1000) + u = space.eval_u(features, sensors[:, None]) + + plt.plot(sensors, np.mean(u, axis=0), "k") + plt.plot(sensors, np.std(u, axis=0), "k--") + plt.plot(sensors, np.cov(u.T)[0], "k--") + plt.plot(sensors, np.exp(-0.5 * sensors ** 2 / 0.2 ** 2)) + for ui in u[:3]: + plt.plot(sensors, ui) + plt.show() + + +def main(): + # space = FinitePowerSeries(N=100, M=1) + # space = FiniteChebyshev(N=20, M=1) + # space = GRF(1, length_scale=0.2, N=1000, interp="cubic") + # space = GRF_KL(1, length_scale=0.2, num_eig=10, N=100, interp="cubic") + # space_samples(space, 1) + + space1 = GRF(1, length_scale=0.1, N=100, interp="cubic") + space2 = GRF(1, length_scale=1, N=100, interp="cubic") + W2 = np.trace(space1.K + space2.K - 2 * linalg.sqrtm(space1.K @ space2.K)) ** 0.5 / 100 ** 0.5 + print(W2) + + +if __name__ == "__main__": + main() diff --git a/src/system.py b/src/system.py new file mode 100644 index 0000000..eef6c69 --- /dev/null +++ b/src/system.py @@ -0,0 +1,321 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from pathos.pools import ProcessPool +from scipy import interpolate +from scipy.integrate import solve_ivp +from scipy.special import legendre + +import config +from ADR_solver import solve_ADR +from ADVD_solver import solve_ADVD +from CVC_solver import solve_CVC +from utils import timing + + +class LTSystem(object): + def __init__(self, npoints_output): + """Legendre transform J_n{f(x)}. + + Args: + npoints_output: For a input function, choose n=0,1,2,...,`npoints_output`-1 as data. + """ + self.npoints_output = npoints_output + + @timing + def gen_operator_data(self, space, m, num): + """For each input function, generate `npoints_output` data, so the total number N = num x npoints_output. + """ + print("Generating operator data...", flush=True) + features = space.random(num) + sensors = np.linspace(0, 2, num=m)[:, None] + sensor_values = space.eval_u(features, sensors) + + sensor_values_tile = np.tile(sensor_values, (1, self.npoints_output)).reshape( + [-1, m] + ) + ns = np.tile(np.arange(self.npoints_output)[:, None], (num, 1)) + s_values = np.vstack(list(map(self.eval_s, sensor_values))) + return [sensor_values_tile, ns], s_values + + def eval_s(self, sensor_value): + """Compute J_n{f(x)} for a `sensor_value` of `f` with n=0,1,...,'npoints_output'-1. + """ + x = np.linspace(-1, 1, num=10000) + samplings = interpolate.interp1d( + np.linspace(-1, 1, len(sensor_value)), sensor_value, kind="cubic" + )(x) + ns = np.arange(self.npoints_output) + ys = np.vstack(list(map(lambda n: legendre(n)(x), ns))) + + return np.sum((samplings * ys)[:, 1:], axis=1, keepdims=True) * (x[1] - x[0]) + + +class ODESystem(object): + def __init__(self, g, s0, T): + self.g = g + self.s0 = s0 + self.T = T + + @timing + def gen_operator_data(self, space, m, num): + print("Generating operator data...", flush=True) + features = space.random(num) + sensors = np.linspace(0, self.T, num=m)[:, None] + sensor_values = space.eval_u(features, sensors) + x = self.T * np.random.rand(num)[:, None] + y = self.eval_s_space(space, features, x) + return [sensor_values, x], y + + def eval_s_space(self, space, features, x): + """For a list of functions in `space` represented by `features` + and a list `x`, compute the corresponding list of outputs. + """ + + def f(feature, xi): + return self.eval_s(lambda t: space.eval_u_one(feature, t), xi[0]) + + p = ProcessPool(nodes=config.processes) + res = p.map(f, features, x) + return np.array(list(res)) + + def eval_s_func(self, u, x): + """For an input function `u` and a list `x`, compute the corresponding list of outputs. + """ + res = map(lambda xi: self.eval_s(u, xi[0]), x) + return np.array(list(res)) + + def eval_s(self, u, tf): + """Compute `s`(`tf`) for an input function `u`. + """ + + def f(t, y): + return self.g(y, u(t), t) + + sol = solve_ivp(f, [0, tf], self.s0, method="RK45") + return sol.y[0, -1:] + + +class DRSystem(object): + def __init__(self, D, k, T, Nt, npoints_output): + """Diffusion-reaction on the domain [0, 1] x [0, T]. + + Args: + T: Time [0, T]. + Nt: Nt in FDM + npoints_output: For a input function, randomly choose these many points from the solver output as data + """ + self.D = D + self.k = k + self.T = T + self.Nt = Nt + self.npoints_output = npoints_output + + @timing + def gen_operator_data(self, space, m, num): + """For each input function, generate `npoints_output` data, so the total number N = num x npoints_output. + """ + print("Generating operator data...", flush=True) + features = space.random(num) + sensors = np.linspace(0, 1, num=m)[:, None] + sensor_values = space.eval_u(features, sensors) + # p = ProcessPool(nodes=config.processes) + # s_values = p.map(self.eval_s, sensor_values) + s_values = map(self.eval_s, sensor_values) + res = np.vstack(list(map(self.eval_s_sampling, sensor_values, s_values))) + return [res[:, :m], res[:, m:-1]], res[:, -1:] + + def eval_s_sampling(self, sensor_value, s): + """Given a `sensor_value` of `u` and the corresponding solution `s`, generate the + sampling outputs. + """ + m = sensor_value.shape[0] + x = np.random.randint(m, size=self.npoints_output) + t = np.random.randint(self.Nt, size=self.npoints_output) + xt = np.hstack([x[:, None], t[:, None]]) * [1 / (m - 1), self.T / (self.Nt - 1)] + y = s[x][range(self.npoints_output), t][:, None] + return np.hstack([np.tile(sensor_value, (self.npoints_output, 1)), xt, y]) + + def eval_s(self, sensor_value): + """Compute s(x, t) over m * Nt points for a `sensor_value` of `u`. + """ + return solve_ADR( + 0, + 1, + 0, + self.T, + lambda x: self.D * np.ones_like(x), + lambda x: np.zeros_like(x), + lambda u: self.k * u ** 2, + lambda u: 2 * self.k * u, + lambda x, t: np.tile(sensor_value[:, None], (1, len(t))), + lambda x: np.zeros_like(x), + len(sensor_value), + self.Nt, + )[2] + + +class CVCSystem(object): + def __init__(self, f, g, T, Nt, npoints_output): + """Advection on the domain [0, 1] x [0, T]. + + Args: + T: Time [0, T]. + Nt: Nt in FDM + npoints_output: For a input function, randomly choose these many points from the solver output as data + """ + self.f = f + self.g = g + self.T = T + self.Nt = Nt + self.npoints_output = npoints_output + + @timing + def gen_operator_data(self, space, m, num): + """For each input function, generate `npoints_output` data, so the total number N = num x npoints_output. + """ + print("Generating operator data...", flush=True) + features = space.random(num) + sensors = np.linspace(0, 1, num=m)[:, None] + # Case I Input: V(sin^2(pi*x)) + sensor_values = space.eval_u(features, np.sin(np.pi * sensors) ** 2) + # Case II Input: x*V(x) + # sensor_values = sensors.T * space.eval_u(features, sensors) + # Case III/IV Input: V(x) + # sensor_values = space.eval_u(features, sensors) + # p = ProcessPool(nodes=config.processes) + # s_values = np.array(p.map(self.eval_s, sensor_values)) + s_values = np.array(list(map(self.eval_s, sensor_values))) + res = np.vstack(list(map(self.eval_s_sampling, sensor_values, s_values))) + return [res[:, :m], res[:, m:-1]], res[:, -1:] + + def eval_s_sampling(self, sensor_value, s): + """Given a `sensor_value` of `u` and the corresponding solution `s`, generate the + sampling outputs. + """ + m = sensor_value.shape[0] + x = np.random.randint(m, size=self.npoints_output) + t = np.random.randint(self.Nt, size=self.npoints_output) + xt = np.hstack([x[:, None], t[:, None]]) * [1 / (m - 1), self.T / (self.Nt - 1)] + y = s[x][range(self.npoints_output), t][:, None] + return np.hstack([np.tile(sensor_value, (self.npoints_output, 1)), xt, y]) + + def eval_s(self, sensor_value): + """Compute s(x, t) over m * Nt points for a `sensor_value` of `u`. + """ + # Case I: Analytical solution for a(x)=1, u(x,0)=V(x) (V,V' periodic) + return solve_CVC( + 0, + 1, + 0, + self.T, + self.f, + self.g, + interpolate.interp1d( + np.linspace(0, 1, len(sensor_value)), sensor_value, kind="cubic" + ), + len(sensor_value), + self.Nt, + )[2] + # Case II: Wendroff for a(x)=1, u(x,0)=f(x), u(0,t)=g(t) (f(0)=g(0)) + """ + return solve_CVC( + 0, + 1, + 0, + self.T, + lambda x: sensor_value, + lambda t: np.zeros_like(t), + None, + len(sensor_value), + self.Nt, + )[2] + """ + # Case III: Wendroff for a(x)=1+0.1*V(x), u(x,0)=f(x), u(0,t)=g(t) (f(0)=g(0)) + """ + return solve_CVC( + 0, + 1, + 0, + self.T, + lambda x: x ** 2, + lambda t: np.sin(np.pi * t), + lambda x: sensor_value, + len(sensor_value), + self.Nt, + )[2] + """ + # Case IV: Wendroff for a(x)=1+0.1*(V(x)+V(1-x))/2, u(x,0)=f(x) (f,f' periodic) + """ + return solve_CVC( + 0, + 1, + 0, + self.T, + lambda x: np.sin(2 * np.pi * x), + None, + lambda x: sensor_value, + len(sensor_value), + self.Nt, + )[2] + """ + + +class ADVDSystem(object): + def __init__(self, f, g, T, Nt, npoints_output): + """Advection-diffusion on the domain [0, 1] x [0, T]. + + Args: + T: Time [0, T]. + Nt: Nt in FDM + npoints_output: For a input function, randomly choose these many points from the solver output as data + """ + self.f = f + self.g = g + self.T = T + self.Nt = Nt + self.npoints_output = npoints_output + + @timing + def gen_operator_data(self, space, m, num): + """For each input function, generate `npoints_output` data, so the total number N = num x npoints_output. + """ + print("Generating operator data...", flush=True) + features = space.random(num) + sensors = np.linspace(0, 1, num=m)[:, None] + # Input: V(sin^2(pi*x)) + sensor_values = space.eval_u(features, np.sin(np.pi * sensors) ** 2) + # p = ProcessPool(nodes=config.processes) + # s_values = np.array(p.map(self.eval_s, sensor_values)) + s_values = np.array(list(map(self.eval_s, sensor_values))) + res = np.vstack(list(map(self.eval_s_sampling, sensor_values, s_values))) + return [res[:, :m], res[:, m:-1]], res[:, -1:] + + def eval_s_sampling(self, sensor_value, s): + """Given a `sensor_value` of `u` and the corresponding solution `s`, generate the + sampling outputs. + """ + m = sensor_value.shape[0] + x = np.random.randint(m, size=self.npoints_output) + t = np.random.randint(self.Nt, size=self.npoints_output) + xt = np.hstack([x[:, None], t[:, None]]) * [1 / (m - 1), self.T / (self.Nt - 1)] + y = s[x][range(self.npoints_output), t][:, None] + return np.hstack([np.tile(sensor_value, (self.npoints_output, 1)), xt, y]) + + def eval_s(self, sensor_value): + """Compute s(x, t) over m * Nt points for a `sensor_value` of `u`. + """ + Nt_pc = (self.Nt - 1) * 10 + 1 + return solve_ADVD( + 0, + 1, + 0, + self.T, + self.f, + self.g, + lambda x: sensor_value, + len(sensor_value), + Nt_pc, + )[2][:, 0:Nt_pc:10] diff --git a/src/utils.py b/src/utils.py new file mode 100644 index 0000000..4f9af3a --- /dev/null +++ b/src/utils.py @@ -0,0 +1,117 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +import time +from functools import wraps + +import numpy as np + + +def timing(f): + """Decorator for measuring the execution time of methods.""" + + @wraps(f) + def wrapper(*args, **kwargs): + ts = time.time() + result = f(*args, **kwargs) + te = time.time() + print("%r took %f s\n" % (f.__name__, te - ts)) + sys.stdout.flush() + return result + + return wrapper + + +def merge_values(values): + return np.hstack(values) if isinstance(values, (list, tuple)) else values + + +def trim_to_65535(x): + """Incorrect output when batch size > 65535. + https://github.com/tensorflow/tensorflow/issues/9870 + https://github.com/tensorflow/tensorflow/issues/13869 + """ + N = 65535 + if isinstance(x, (list, tuple)): + return (x[0][:N], x[1][:N]), (x[0][N:], x[1][N:]) + return x[:N], x[N:] + + +def mean_squared_error_outlier(y_true, y_pred): + error = np.ravel((y_true - y_pred) ** 2) + error = np.sort(error)[: -len(error) // 1000] + return np.mean(error) + + +def safe_test(model, data, X_test, y_test, fname=None): + def is_nonempty(X): + return len(X[0]) > 0 if isinstance(X, (list, tuple)) else len(X) > 0 + + y_pred = [] + X = X_test + while is_nonempty(X): + X_add, X = trim_to_65535(X) + y_pred.append(model.predict(data.transform_inputs(X_add))) + y_pred = np.vstack(y_pred) + error = np.mean((y_test - y_pred) ** 2) + print("Test MSE: {}".format(error)) + error = mean_squared_error_outlier(y_test, y_pred) + print("Test MSE w/o outliers: {}\n".format(error)) + + if fname is not None: + np.savetxt(fname, np.hstack((X_test[1], y_test, y_pred))) + + +def eig(kernel, num, Nx, eigenfunction=True): + """Compute the eigenvalues and eigenfunctions of a kernel on [0, 1]. + """ + h = 1 / (Nx - 1) + c = kernel(np.linspace(0, 1, num=Nx)[:, None])[0] * h + A = np.empty((Nx, Nx)) + for i in range(Nx): + A[i, i:] = c[: Nx - i] + A[i, i::-1] = c[: i + 1] + A[:, 0] *= 0.5 + A[:, -1] *= 0.5 + + if not eigenfunction: + return np.flipud(np.sort(np.real(np.linalg.eigvals(A))))[:num] + + eigval, eigvec = np.linalg.eig(A) + eigval, eigvec = np.real(eigval), np.real(eigvec) + idx = np.flipud(np.argsort(eigval))[:num] + eigval, eigvec = eigval[idx], eigvec[:, idx] + for i in range(num): + eigvec[:, i] /= np.trapz(eigvec[:, i] ** 2, dx=h) ** 0.5 + return eigval, eigvec + + +def trapz(y, dx): + """Integrate [y(x1), y(x2), ...] or [[y1(x1), y1(x2), ...], [y2(x1), y2(x2), ...], ...] + using the composite trapezoidal rule. + + Return: [I1(x1)=0, I1(x2), ...] or [[I1(x1)=0, I1(x2), ...], [I2(x1)=0, I2(x2), ...], ...] + """ + if len(y.shape) == 1: + left = np.cumsum(y)[:-1] + right = np.cumsum(y[1:]) + return np.hstack(([0], (left + right) / 2 * dx)) + left = np.cumsum(y, axis=1)[:, :-1] + right = np.cumsum(y[:, 1:], axis=1) + return np.hstack((np.zeros((len(y), 1)), (left + right) / 2 * dx)) + + +def make_triple(sensor_value, x, y, num): + """For a `sensor_value` of `u`, a list of locations `x` and the corresponding solution `y`, + generate a dataset of `num` triples. + + sensor_value: 1d array + x: 2d array, N x d + y: 1d array + """ + idx = np.random.choice(len(x), size=num, replace=False) + x = x[idx] + y = y[idx][:, None] + return np.hstack([np.tile(sensor_value, (num, 1)), x, y])