Skip to content

Commit

Permalink
adding doctsrings to programs
Browse files Browse the repository at this point in the history
  • Loading branch information
hfaghihi15 committed Nov 1, 2023
1 parent e43524e commit 1b2fb00
Show file tree
Hide file tree
Showing 3 changed files with 382 additions and 16 deletions.
69 changes: 66 additions & 3 deletions domiknows/program/model/lossModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,29 @@ class LossModel(torch.nn.Module):
def __init__(self, graph,
tnorm='P',
sample = False, sampleSize = 0, sampleGlobalLoss = False, device='auto'):
"""
This function initializes a LossModel object with the given parameters and sets up the
necessary variables and constraints.
:param graph: The `graph` parameter is an object that represents the logical constraints of a
graph. It contains information about the nodes, edges, and constraints of the graph
:param tnorm: The `tnorm` parameter specifies the type of t-norm to be used in the model.
T-norms are a family of binary operations that are used to model logical conjunction (AND) in
fuzzy logic. The default value is 'P', which stands for the product t-norm, defaults to P
(optional)
:param sample: The `sample` parameter is a boolean flag that determines whether to use sampling
during training. If set to `True`, the model will use sampling to estimate the loss function. If
set to `False`, the model will not use sampling and will use the exact loss function, defaults
to False (optional)
:param sampleSize: The `sampleSize` parameter determines the size of the sample used for
training. It specifies the number of samples that will be randomly selected from the dataset for
each training iteration, defaults to 0 (optional)
:param sampleGlobalLoss: The parameter `sampleGlobalLoss` is a boolean flag that determines
whether to sample the global loss during training. If `sampleGlobalLoss` is set to `True`, the
global loss will be sampled. Otherwise, it will not be sampled, defaults to False (optional)
:param device: The `device` parameter specifies the device (CPU or GPU) on which the model will
be trained and evaluated. It can take the following values:, defaults to auto (optional)
"""
super().__init__()
self.graph = graph
self.build = True
Expand Down Expand Up @@ -59,6 +82,13 @@ def reset(self):
self.loss.reset()

def get_lmbd(self, key):
"""
The function `get_lmbd` returns a clamped value from a dictionary based on a given key.
:param key: The key parameter is used to access a specific value in the lmbd dictionary
:return: the value of `self.lmbd[self.lmbd_index[key]]` after clamping it to a maximum value of
`self.lmbd_p[self.lmbd_index[key]]`.
"""
return self.lmbd[self.lmbd_index[key]].clamp(max=self.lmbd_p[self.lmbd_index[key]])

def forward(self, builder, build=None):
Expand Down Expand Up @@ -96,18 +126,32 @@ def forward(self, builder, build=None):
# (*out, datanode, builder)
return lmbd_loss, datanode, builder

# The `PrimalDualModel` class is a subclass of `LossModel` that implements a primal-dual optimization
# algorithm.
class PrimalDualModel(LossModel):
logger = logging.getLogger(__name__)

def __init__(self, graph, tnorm='P', device='auto'):
"""
The above function is the constructor for a class that initializes an object with a graph,
tnorm, and device parameters.
:param graph: The `graph` parameter is the input graph that the coding assistant is being
initialized with. It represents the structure of the graph and can be used to perform various
operations on the graph, such as adding or removing nodes and edges, calculating node and edge
properties, and traversing the graph
:param tnorm: The tnorm parameter is used to specify the type of t-norm to be used in the graph.
A t-norm is a binary operation that generalizes the concept of conjunction (logical AND) to
fuzzy logic. The 'P' value for tnorm indicates that the product t-norm should, defaults to P
(optional)
:param device: The `device` parameter specifies the device on which the computations will be
performed. It can take the following values:, defaults to auto (optional)
"""
super().__init__(graph, tnorm=tnorm, device=device)

class SampleLossModel(torch.nn.Module):
logger = logging.getLogger(__name__)

# def __init__(self, graph, sample = False, sampleSize = 0, sampleGlobalLoss = False):
# super().__init__(graph, sample=sample, sampleSize=sampleSize, sampleGlobalLoss=sampleGlobalLoss)

def __init__(self, graph,
tnorm='P',
sample = False, sampleSize = 0, sampleGlobalLoss = False, device='auto'):
Expand Down Expand Up @@ -148,12 +192,31 @@ def reset(self):
self.loss.reset()

def get_lmbd(self, key):
"""
The function `get_lmbd` returns the value of `self.lmbd` at the index specified by
`self.lmbd_index[key]`, ensuring that the value is non-negative.
:param key: The `key` parameter is used to access a specific element in the `self.lmbd` list. It
is used as an index to retrieve the corresponding value from the list
:return: the value of `self.lmbd[self.lmbd_index[key]]`.
"""
if self.lmbd[self.lmbd_index[key]] < 0:
with torch.no_grad():
self.lmbd[self.lmbd_index[key]] = 0
return self.lmbd[self.lmbd_index[key]]

def forward(self, builder, build=None):
"""
The `forward` function calculates the loss for a PrimalDualModel using a DataNodeBuilder and
returns the loss value, the DataNode, and the builder.
:param builder: The `builder` parameter is an instance of the `DataNodeBuilder` class. It is
used to create a batch root data node and retrieve a data node
:param build: The `build` parameter is an optional argument that specifies whether the
`DataNodeBuilder` should be invoked or not. If `build` is `None`, then the value of `self.build`
is used. If `build` is `True`, then the `createBatchRootDN()` method
:return: three values: lmbd_loss, datanode, and builder.
"""
if build is None:
build = self.build
self.iter_step += 1
Expand Down
Loading

0 comments on commit 1b2fb00

Please sign in to comment.