Skip to content

Commit

Permalink
Merge branch 'staging' into no-reregister-wallet
Browse files Browse the repository at this point in the history
  • Loading branch information
ifrit98 committed Oct 26, 2023
2 parents b868941 + b6847d3 commit 0ea3f43
Show file tree
Hide file tree
Showing 5 changed files with 21 additions and 24 deletions.
2 changes: 0 additions & 2 deletions neurons/validators/validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@
OpenAssistantRewardModel,
ReciprocateRewardModel,
RelevanceRewardModel,
MockRewardModel,
DahoasRewardModel,
DiversityRewardModel,
PromptRewardModel,
Expand Down Expand Up @@ -190,7 +189,6 @@ def __init__(self):
MockRewardModel(RewardModelType.nsfw.value),
]
bt.logging.debug(str(self.reward_functions))
self.blacklist = MockRewardModel(RewardModelType.blacklist.value)
else:
self.reward_weights = torch.tensor(
[
Expand Down
22 changes: 20 additions & 2 deletions prompting/validators/mock.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,14 @@
import bittensor as bt
from prompting.validators.prompts import FirewallPrompt, FollowupPrompt, AnswerPrompt
from prompting.validators.gating import BaseGatingModel
from prompting.validators.reward import BaseRewardModel
from typing import List


class MockGatingModel(BaseGatingModel):
def __init__(self, num_uids: int):
super(MockGatingModel, self).__init__()
# super(MockGatingModel, self).__init__()

self.num_uids = num_uids
self.linear = torch.nn.Linear(256, 10)

Expand All @@ -45,7 +46,24 @@ def resync(
pass


class MockRewardModel(torch.nn.Module):
class MockRewardModel(BaseRewardModel):
@property
def name(self) -> str:
return self.mock_name

def __init__(self, mock_name: str = "MockReward"):
super().__init__()
self.mock_name = mock_name
self.question_blacklist = []
self.answer_blacklist = []

def apply(self, prompt: str, completion: List[str], name: str) -> torch.FloatTensor:
mock_reward = torch.tensor([1 for _ in completion], dtype=torch.float32)
return mock_reward, mock_reward

def reset(self):
return self

def reward(
self,
completions_with_prompt: List[str],
Expand Down
1 change: 0 additions & 1 deletion prompting/validators/reward/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from .reciprocate import ReciprocateRewardModel
from .relevance import RelevanceRewardModel
from .reward import BaseRewardModel
from .reward import MockRewardModel
from .dahoas import DahoasRewardModel
from .diversity import DiversityRewardModel
from .prompt import PromptRewardModel
Expand Down
17 changes: 0 additions & 17 deletions prompting/validators/reward/reward.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,20 +137,3 @@ def apply(

# Return the filled rewards.
return filled_rewards, filled_rewards_normalized


class MockRewardModel(BaseRewardModel):
@property
def name(self) -> str:
return self.mock_name

def __init__(self, mock_name: str = "MockReward"):
super().__init__()
self.mock_name = mock_name

def apply(self, prompt: str, completion: List[str], name: str) -> torch.FloatTensor:
mock_reward = torch.tensor([1 for _ in completion], dtype=torch.float32)
return mock_reward, mock_reward

def reset(self):
return self
3 changes: 1 addition & 2 deletions prompting/validators/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import bittensor as bt
import prompting.validators as validators
from prompting.validators.misc import ttl_get_block
from prompting.validators.reward import MockRewardModel


def should_reinit_wandb(self):
Expand All @@ -49,7 +48,7 @@ def init_wandb(self, reinit=False):
if self.config.neuron.use_custom_gating_model:
tags.append("custom_gating_model")
for fn in self.reward_functions:
if not isinstance(fn, MockRewardModel):
if not self.config.neuron.mock_reward_models:
tags.append(str(fn.name))
if self.config.neuron.disable_set_weights:
tags.append("disable_set_weights")
Expand Down

0 comments on commit 0ea3f43

Please sign in to comment.