Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
pjyazdian authored Feb 7, 2024
1 parent b1e9d7e commit 8e2f45d
Show file tree
Hide file tree
Showing 22 changed files with 6,286 additions and 6,286 deletions.
4,008 changes: 2,004 additions & 2,004 deletions scripts/model/Autoencoder_VQVAE_model.py

Large diffs are not rendered by default.

776 changes: 388 additions & 388 deletions scripts/model/Autoencoder_model.py

Large diffs are not rendered by default.

1,450 changes: 725 additions & 725 deletions scripts/model/DAE_model.py

Large diffs are not rendered by default.

1,680 changes: 840 additions & 840 deletions scripts/model/Helper_models.py

Large diffs are not rendered by default.

Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added scripts/model/__pycache__/tcn.cpython-38.pyc
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added scripts/model/__pycache__/vocab.cpython-38.pyc
Binary file not shown.
Binary file added scripts/model/__pycache__/vocab.cpython-39.pyc
Binary file not shown.
74 changes: 37 additions & 37 deletions scripts/model/autoencoder_backup.py
Original file line number Diff line number Diff line change
@@ -1,38 +1,38 @@
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import math


class DAE_Network(nn.Module):
def __init__(self, motion_dim, latent_dim):
super(DAE_Network, self).__init__()
print("init");
self.encoder = nn.Sequential(
nn.Linear(motion_dim, latent_dim),
nn.Tanh(),
# nn.Linear(motion_dim-10, motion_dim-30),
# nn.Tanh(),
)

self.decoder = nn.Sequential(
# nn.Linear(motion_dim-30, motion_dim-10),
# nn.Tanh(),
nn.Linear(latent_dim, motion_dim),

)

def forward(self, x):
# print("_________________")
# print(self.encoder)
# print(x.shape)
# print("_________________")
x = torch.squeeze(x)
# print(x.shape)
x = self.encoder(x)
# print("Encoded", x.shape)
x = self.decoder(x)
x = torch.unsqueeze(x, 2)
# print("Decoder", x.shape)
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import math


class DAE_Network(nn.Module):
def __init__(self, motion_dim, latent_dim):
super(DAE_Network, self).__init__()
print("init");
self.encoder = nn.Sequential(
nn.Linear(motion_dim, latent_dim),
nn.Tanh(),
# nn.Linear(motion_dim-10, motion_dim-30),
# nn.Tanh(),
)

self.decoder = nn.Sequential(
# nn.Linear(motion_dim-30, motion_dim-10),
# nn.Tanh(),
nn.Linear(latent_dim, motion_dim),

)

def forward(self, x):
# print("_________________")
# print(self.encoder)
# print(x.shape)
# print("_________________")
x = torch.squeeze(x)
# print(x.shape)
x = self.encoder(x)
# print("Encoded", x.shape)
x = self.decoder(x)
x = torch.unsqueeze(x, 2)
# print("Decoder", x.shape)
return x
Loading

0 comments on commit 8e2f45d

Please sign in to comment.