Skip to content

Commit

Permalink
new net
Browse files Browse the repository at this point in the history
  • Loading branch information
enricobu96 committed Apr 14, 2024
1 parent 82fdc9e commit 84828a7
Show file tree
Hide file tree
Showing 4 changed files with 84 additions and 20 deletions.
2 changes: 1 addition & 1 deletion mACHINE-LEARNINGS/classes/MemeDataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def __init__(self, data_dir, transform=None):
for path in self.img_path:
classes.add(os.path.basename(os.path.dirname(path)))
self.labels = {cls: i for i, cls in enumerate(sorted(list(classes)))}
self.resize = Resize(size = (640, 640))
self.resize = Resize(size = (160, 160))

def __len__(self):
return len(self.img_path)
Expand Down
3 changes: 2 additions & 1 deletion mACHINE-LEARNINGS/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def eval(image):
- Load all data: train, test, validation
"""
image = read_image(image, ImageReadMode.RGB).float()
resize = Resize(size = (640, 640))
resize = Resize(size = (160, 160))
image = resize(image)

"""
Expand All @@ -40,6 +40,7 @@ def eval(image):
image = image.unsqueeze(0)
output = model(image.to(device))
prediction = output.data
print(prediction.item())
print('This meme is', cls[0 if prediction.item() < .5 else 1])


Expand Down
87 changes: 74 additions & 13 deletions mACHINE-LEARNINGS/models/DankCNN.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,31 +6,92 @@ class DankCNN(nn.Module):

def __init__(self, dropout=False):
super(DankCNN, self).__init__()
self.act = nn.ReLU()

self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=4)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)

self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=4)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

self.conv3 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=4)
self.linear_layer1 = nn.Linear(758912, 128)
self.linear_layer2 = nn.Linear(128, 96)
self.linear_layer3 = nn.Linear(96, 1)
self.dropout = nn.Dropout(p=.3)
self.dropout_flag = dropout
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2) #todo

self.linear_layer1 = nn.Linear(36992, 512)
self.linear_layer2 = nn.Linear(512, 256)
self.linear_layer3 = nn.Linear(256, 128)
self.linear_layer4 = nn.Linear(128, 64)
self.linear_layer5 = nn.Linear(64, 32)
self.linear_layer6 = nn.Linear(32, 1)


# self.linear_layer3 = nn.Linear(4096, 1024)
# self.linear_layer4 = nn.Linear(1024, 128)
# self.linear_layer5 = nn.Linear(128, 32)
# self.linear_layer6 = nn.Linear(32, 1)




def forward(self, x):

x = self.conv1(x)
x = F.relu(x)
x = self.pool(x)
x = self.act(x)
x = self.pool1(x)

x = self.conv2(x)
x = F.relu(x)
x = self.act(x)
x = self.pool2(x)

x = self.conv3(x)
x = F.relu(x)
x = self.act(x)
x = torch.flatten(x, 1)

x = self.linear_layer1(x)
x = F.relu(x)
x = self.act(x)
x = self.linear_layer2(x)
x = F.relu(x)
x = self.dropout(x) if self.dropout_flag else x
x = self.act(x)
x = self.linear_layer3(x)
x = self.act(x)
x = self.linear_layer4(x)
x = self.act(x)
x = self.linear_layer5(x)
x = self.act(x)
x = self.linear_layer6(x)
# x = self.act(x)

return F.sigmoid(x)



# # Creating a CNN-based image classifier.
class ImageClassifier(nn.Module):
def __init__(self):
super().__init__()
self.conv_layer_1 = nn.Sequential(
nn.Conv2d(3, 64, 3, padding=1),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.MaxPool2d(2))
self.conv_layer_2 = nn.Sequential(
nn.Conv2d(64, 512, 3, padding=1),
nn.ReLU(),
nn.BatchNorm2d(512),
nn.MaxPool2d(2))
self.conv_layer_3 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(),
nn.BatchNorm2d(512),
nn.MaxPool2d(2))
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(in_features=512*3*3, out_features=2))
def forward(self, x: torch.Tensor):
x = self.conv_layer_1(x)
x = self.conv_layer_2(x)
x = self.conv_layer_3(x)
x = self.conv_layer_3(x)
x = self.conv_layer_3(x)
x = self.conv_layer_3(x)
x = self.classifier(x)
return x
12 changes: 7 additions & 5 deletions mACHINE-LEARNINGS/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,16 +42,18 @@ def execute(train_set_size, batch_size, lr, epochs, is_verbose, weight_decay):
"train": DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True),
"test": DataLoader(test_set, batch_size=BATCH_SIZE, shuffle=False)
}

"""
MODEL INITIALIZATION
- optimizer: Adam with weight decay as regularization technique
- loss function: binary cross entropy loss
"""
model = DankCNN()
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=LR, weight_decay=WEIGHT_DECAY)
loss_function = torch.nn.BCELoss()
# optimizer = torch.optim.Adam(model.parameters(), lr=LR, weight_decay=WEIGHT_DECAY)
optimizer = torch.optim.RMSprop(model.parameters(), lr=.1, alpha=.99, eps=1e-08, weight_decay=0, momentum=0, centered=False)
# loss_function = torch.nn.BCELoss()
loss_function = torch.nn.HingeEmbeddingLoss()

"""
TRAINING PHASE
Expand All @@ -76,8 +78,8 @@ def execute(train_set_size, batch_size, lr, epochs, is_verbose, weight_decay):
acc += (label == predictions).sum()/len(label)

if IS_VERBOSE:
print('Training: Epoch %d - Batch %d/%d: Loss: %.4f' %
(epoch+1, batch_num+1, len(dataloaders["train"]), train_loss / (batch_num + 1)))
print('Training: Epoch %d - Batch 🌭/%d: Loss: %.4f' %
(epoch+1, len(dataloaders["train"]), train_loss / (batch_num + 1)))

n_batch = batch_num

Expand Down

0 comments on commit 84828a7

Please sign in to comment.