-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgradientDescent.py
129 lines (104 loc) · 3.7 KB
/
gradientDescent.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import torch
import random
'''
backend = MLBackend() # to create new backend class object
backend.dataLoad(float[12], float) # give the backend the user response to a profile after every like and dislike
backend.getProfile() return float[12] a list of 12 floats
'''
class MLBackend:
def __init__(self):
self.batch_size = 10
self.parameter_size = 12
self.sample_size = 100
# Define the model
self.model = torch.nn.Linear(self.parameter_size, 1)
# Define the loss function and optimizer
self.loss_fn = torch.nn.MSELoss()
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.1)
self.x = torch.tensor([])
self.y = torch.tensor([])
self.initTrain()
self.profileCache = self.initProfileGenerate()
self.pageNum = 0
def initTrain(self):
for i in range(self.batch_size):
newInput, newResponse = self.generateTrainingData()
self.dataLoad(newInput, newResponse)
def dotProduct(self, list1, list2):
return sum([x*y for x,y in zip(list1,list2)])
def clearCache(self):
self.x = torch.tensor([])
self.y = torch.tensor([])
# Generate some random data
def dataLoad(self, newInput, newResponse):
self.x = torch.cat((self.x, torch.tensor([newInput])),0)
self.y = torch.cat((self.y, torch.tensor([[newResponse]])),0)
if self.y.size()[0] >= self.batch_size:
self.updateGD()
def updateGD(self):
print('Gradient Descent Updated')
# Training loop
for i in range(self.batch_size):
# Forward pass
y_pred = self.model(self.x)
self.loss = self.loss_fn(y_pred, self.y)
# Print gradient and bias
#print('Iteration:', i)
#print('Gradient:', self.model.weight.grad)
#print('Bias:', self.model.bias.grad)
# Backward pass
self.optimizer.zero_grad()
self.loss.backward()
# Update weights
self.optimizer.step()
self.clearCache()
def generateTrainingData(self):
i = 0
while i < self.batch_size:
newInput = []
for j in range(self.parameter_size):
newInput.append(random.random())
'''
newResponse = float(input('Response: '))
newResponse = torch.tensor([[newResponse]])
'''
#print(newResponse)
newResponse = random.random()
i+=1
return newInput, newResponse
def initProfileGenerate(self):
newUserParameters = []
for i in range(self.batch_size):
singleUser = []
for j in range(self.parameter_size):
singleUser.append(random.random())
newUserParameters.append(singleUser)
return newUserParameters
def recommendProfile(self):
print('New Profiles Recommended')
newUserParameters = []
for i in range(self.sample_size // self.batch_size):
newUserParameters += self.initProfileGenerate()
for i in range(self.sample_size):
rating = self.dotProduct(self.model.weight.grad[0], newUserParameters[i][:-1])
rating = rating.item() + self.model.bias.grad.item()
newUserParameters[i][-1] = rating
#print('Rating:',rating)
#print(newUserParameters[i])
newUserParameters = sorted(newUserParameters, key = lambda x: x[-1], reverse = True)
for i in range(self.sample_size):
#print(newUserParameters[i][0])
newUserParameters[i] = newUserParameters[i][:self.parameter_size]
newUserParameters = newUserParameters[:self.batch_size]
return newUserParameters
def getProfile(self):
if self.pageNum >= self.batch_size-1:
self.profileCache = self.recommendProfile()
self.pageNum = 0
else:
self.pageNum+=1
return self.profileCache[self.pageNum]
if __name__ == '__main__':
backEnd = MLBackend()
for i in range(32):
backEnd.dataLoad(backEnd.getProfile(), 1.0)