Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Offline tests #38

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -140,4 +140,5 @@ cython_debug/
# Custom stuff
local-search/
.idea/
test-venv/
test-venv/
student_solutions/
2 changes: 2 additions & 0 deletions local_search/problems/graph_coloring_problem/goals/goal.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
from local_search.problems.graph_coloring_problem.models.edge import Edge
from local_search.problems.graph_coloring_problem.state import GraphColoringState

# Do we want to have TODOs on this class. It not teaches students nothing about algorithms, only makes them familiar with the problem?
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Zastanawiam się nad tym, czy chcemy żeby studenci implementowali Goal?

Według mnie to by było fajne gdyby oprócz tego TODO było jeszcze kilka dotyczących architektury solvery, bo wtedy to by pozwoliło:

  1. lepiej zrozumieć jak działa nasz solver
  2. pokazało by im jak można podchodzić do implementacji rzeczywistych solverów.

Natomiast teraz większość TODO dotyczy algorytmów i to jak na mnie jest trochę zbędne.



class GraphColoringGoal(Goal, ABC):
"""
Expand Down
5 changes: 4 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,18 @@
from setuptools import setup


setup(
name='Local search',
version='1.0.0',
py_modules=['local_search'],
install_requires=[
'numpy',
'rich',
'click',
'pygame',
'Pillow',
"mpmath"
'mpmath',
'pytest'
],
entry_points='''
[console_scripts]
Expand Down
1 change: 1 addition & 0 deletions tests/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
This folder contains scripts, that allows you to test TODOs
Empty file added tests/__init__.py
Empty file.
109 changes: 109 additions & 0 deletions tests/sum_problem.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
from __future__ import annotations
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Przeniosłem ten problem z tego repozytorium


import random
from abc import ABC
from dataclasses import dataclass
from typing import Iterable, Type, Generator

from local_search.problems import State, Problem
from local_search.problems.base import Move
from local_search.problems.base.goal import Goal, GoalType
from local_search.problems.base.move_generator import MoveGenerator


@dataclass
class SumProblemState(State):
a: int
b: int

def __str__(self) -> str:
return f"{self.a} + {self.b}"

def __eq__(self, other):
return isinstance(other, SumProblemState) and self.a == other.a and self.b == other.b

@staticmethod
def suboptimal_state(sum: int = 100) -> SumProblemState:
a = int(0.25 * sum)
return SumProblemState(a, sum - a)

@staticmethod
def optimal_state(goal_type: GoalType, sum: int = 100) -> SumProblemState:
a = int(0.5 * sum) if goal_type == GoalType.MIN else 0
return SumProblemState(a, sum - a)


class SumProblemGoal(Goal, ABC):

def objective_for(self, state: SumProblemState) -> int:
return state.a ** 2 + state.b ** 2

def human_readable_objective_for(self, state: SumProblemState) -> str:
return f"{self.objective_for(state)}"


class Maximize(SumProblemGoal):

def type(self) -> GoalType:
return GoalType.MAX


class Minimize(SumProblemGoal):

def type(self) -> GoalType:
return GoalType.MIN


@dataclass
class SumProblemMoveGenerator(MoveGenerator):
sum: int

def available_moves(self, state: SumProblemState) -> Generator[Move[SumProblemState], None, None]:
for na in range(max(0, state.a - 2), min(self.sum, state.a + 2)):
yield SumProblemMove(na, self.sum)


@dataclass
class SumProblemMove(Move):
new_a: int
sum: int

def make(self) -> SumProblemState:
return SumProblemState(self.new_a, self.sum - self.new_a)


class SumProblem(Problem):
"""
Demonstration of a simple problem, where the goal is to decompose :param sum: into components.
"""
def __init__(self, sum: int, goal: SumProblemGoal):
self.sum = sum
self.goal = goal
self.move_generator = SumProblemMoveGenerator(self.sum)

def random_state(self) -> SumProblemState:
a = random.randrange(self.sum + 1)
return SumProblemState(a, self.sum - a)

@staticmethod
def get_available_move_generation_strategies() -> Iterable[str]:
return ["MockMoveGenerator"]

@staticmethod
def get_available_goals() -> Iterable[str]:
return ["MockGoalMin", "MockGoalMax"]

@staticmethod
def from_benchmark(**kwargs) -> SumProblem:
raise NotImplementedError(
f"{SumProblem.__name__} cannot be created from benchmark")

@classmethod
def from_dict(cls: Type[SumProblem], **kwargs) -> SumProblem:
raise NotImplementedError(
f"{SumProblem.__name__} cannot be created from dict")

def next_states_from(self, state: SumProblemState) -> Iterable[SumProblemState]:
for move in self.move_generator.available_moves(state):
yield move.make()

73 changes: 73 additions & 0 deletions tests/test_graph_coloring_goal.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
from typing import List

import pytest

from local_search.problems.graph_coloring_problem.goals import MinColors
from local_search.problems.graph_coloring_problem.goals.goal import GraphColoringGoal
from local_search.problems.graph_coloring_problem.models.edge import Edge
from local_search.problems.graph_coloring_problem.models.vertex import Vertex
from local_search.problems.graph_coloring_problem.state import GraphColoringState

GraphDict = dict[int, dict[int]]


def create_edges(graph: GraphDict) -> List[Edge]:
result = []
for start, ends in graph.items():
result += [Edge(start, end) for end in ends]
return result


@pytest.fixture()
def graph():
return {0: {4, 5, 8}, 1: {4, 6}, 2: {4, 5}, 3: {6}, 4: {0, 1, 2}, 5: {0, 2}, 6: {1, 3}, 7: {8}, 8: {0, 7}}


def create_goal(graph: GraphDict) -> GraphColoringGoal:
n_vertices = len(graph)
edges = create_edges(graph)
return MinColors(edges, n_vertices)


def create_graph_coloring_state(graph: GraphDict, num_colors: int):
n_vertices = len(graph)
colors = list(range(num_colors))
return GraphColoringState([
Vertex(i, colors[i % num_colors]) for i in range(n_vertices)
])


@pytest.mark.parametrize('num_colors', [5, 3, 1])
def test_num_colors(graph: GraphDict, num_colors: int):
goal = create_goal(graph)
state = create_graph_coloring_state(graph, num_colors)
expected_num_colors = num_colors
actual_num_colors = goal._num_colors(state)
assert expected_num_colors == goal._num_colors(state), f'expected {expected_num_colors} colors, got {actual_num_colors}\n' \
f'\t- state {state},'


@pytest.mark.parametrize('num_colors, expected', [
(5, [2, 2, 0, 0, 0, 0, 0, 0, 0]),
(3, [2, 2, 2, 0, 0, 0, 0, 0, 0]),
(1, [18, 0, 0, 0, 0, 0, 0, 0, 0])
])
def test_bad_edges(graph: GraphDict, num_colors: int, expected: list[int]):
goal = create_goal(graph)
state = create_graph_coloring_state(graph, num_colors)
actual_bad_edges = goal._bad_edges(state)
assert expected == actual_bad_edges, f'expected {expected} bad edges, got {actual_bad_edges}\n' \
f'\t- state {state},'


@pytest.mark.parametrize('num_colors, expected', [
(5, [2, 2, 2, 2, 1, 0, 0, 0, 0]),
(3, [3, 3, 3, 0, 0, 0, 0, 0, 0]),
(1, [9, 0, 0, 0, 0, 0, 0, 0, 0])
])
def test_color_classes(graph: GraphDict, num_colors: int, expected: list[int]):
goal = create_goal(graph)
state = create_graph_coloring_state(graph, num_colors)
actual_color_classes = goal._color_classes(state)
assert expected == actual_color_classes, f'expected {expected} color classes, got {actual_color_classes}\n' \
f'\t- state {state},'
85 changes: 85 additions & 0 deletions tests/test_graph_coloring_kempe_chain.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
from typing import List, Dict, Set

import pytest

from local_search.problems.graph_coloring_problem.goals import MinColors
from local_search.problems.graph_coloring_problem.models.edge import Edge
from local_search.problems.graph_coloring_problem.models.vertex import Vertex
from local_search.problems.graph_coloring_problem.moves.kempe_chain import KempeChainMove
from local_search.problems.graph_coloring_problem.state import GraphColoringState


@pytest.fixture()
def start_index():
return 0


@pytest.fixture()
def graph() -> dict[int, Set[int]]:
return {0: {4, 5, 8}, 1: {4, 6}, 2: {4, 5}, 3: {6}, 4: {0, 1, 2}, 5: {0, 2}, 6: {1, 3}, 7: {8}, 8: {0, 7}}


@pytest.fixture()
def old_state(graph) -> GraphColoringState:
def color(i):
return i // 4
return GraphColoringState([Vertex(idx, color(idx)) for idx in range(len(graph))])


@pytest.fixture()
def new_state(old_state, start_index, new_color) -> GraphColoringState:
old_state.coloring[start_index].color = new_color
return old_state


@pytest.fixture()
def student_move(graph, old_state, start_index, new_color):
return KempeChainMove(graph, old_state, start_index, new_color)


@pytest.fixture()
def edges(graph: Dict[int, Set[int]]) -> List[Edge]:
result = []
for start, ends in graph.items():
result += [Edge(start, end) for end in ends]
return result


@pytest.mark.parametrize('new_color', [3, 5])
def test_kempe_chain_should_have_result_with_no_conflicts(student_move, new_state, edges):
student_move._kempe_chain(new_state.coloring)
goal = MinColors(edges, len(student_move.graph))
bad_edges = goal._bad_edges(new_state)
n_bad_edges = sum(bad_edges)
assert n_bad_edges == 0, f"there are still {n_bad_edges} conflicts after kempe chain\n" \
f"\t- bad edges: {bad_edges}\n" \
f"\t- state: {new_state}\n" \
f"\t- graph: {student_move.graph}\n"


@pytest.mark.parametrize('new_color', [3, 5])
def test_kempe_chain_should_solve_direct_conflicts(student_move, new_state):
student_move._kempe_chain(new_state.coloring)
for n in student_move.graph[0]:
assert new_state.coloring[n].color != new_state.coloring[0].color, \
f"kempe chain fails to correctly fix direct coloring conflict\n" \
f"\t- state: {new_state}\n" \
f"\t- graph: {student_move.graph}\n"


@pytest.mark.parametrize('new_color', [2])
def test_kempe_chain_should_solve_indirect_conflicts(student_move, new_state):
student_move._kempe_chain(new_state.coloring)
assert new_state.coloring[1].color == 0 \
and new_state.coloring[3].color == 0 \
and new_state.coloring[6].color == 1, f"kempe chain fails to fix indirect coloring conflicts:\n" \
f"\t- state: {new_state}\n" \
f"\t- graph: {student_move.graph}\n"


@pytest.mark.parametrize('new_color', [2])
def test_kempe_chain_should_handle_cycles(student_move, new_state):
student_move._kempe_chain(new_state.coloring)
assert new_state.coloring[2].color == 0, f"kempe chain doesn't handle correctly cycles in the graph\n" \
f"\t- state: {new_state}\n" \
f"\t- graph: {student_move.graph}\n"
60 changes: 60 additions & 0 deletions tests/test_hill_climbing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
import pytest
from local_search.algorithms.hill_climbing.best_choice_hill_climbing import BestChoiceHillClimbing
from local_search.algorithms.hill_climbing.hill_climbing import HillClimbing, DEFAULT_CONFIG
from local_search.algorithms.hill_climbing.random_choice_hill_climbing import RandomChoiceHillClimbing
from local_search.algorithms.hill_climbing.worst_choice_hill_climbing import WorstChoiceHillClimbing
from tests.sum_problem import SumProblem, SumProblemGoal, Maximize, Minimize, SumProblemState


# TODO: should be here?
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Przeniosłem testy z tego repo.

Nie widzę żeby w kodzie były jakiekolwiek metody dotyczące hill climbingu, ale myślę, że można by było je dodać skoro testy już są.

PROBLEM_SIZE = 100


@pytest.fixture
def goals() -> list[SumProblemGoal]:
return [Minimize(), Maximize()]


def test_best_choice_hill_climbing_should_find_the_best_neighbour(goals: list[SumProblemGoal]):
solver = BestChoiceHillClimbing(DEFAULT_CONFIG)
for goal in goals:
state = SumProblemState.suboptimal_state(PROBLEM_SIZE)
next_state, problem = get_climbing_results_for_a_mock_problem(
solver, goal, state)
assert problem.improvement(next_state,
state) > 0, "algorithm returns a state that's not better than the previous " \
f"one (goal type: {goal.type()})"

next_states = problem.next_states_from(state)
improving_states = [
s for s in next_states if problem.improvement(s, state) > 0]
expected_state = max(
improving_states, key=lambda next_state: problem.improvement(next_state, state))
assert problem.objective_for(next_state) == problem.objective_for(
expected_state), "algorithm does return an improving state, but it's not the best " \
f"(goal type: {goal.type()})"


def test_random_choice_hill_climbing_should_find_the_random_improving_neighbour(goals):
solver = RandomChoiceHillClimbing(DEFAULT_CONFIG)
for goal in goals:
state = SumProblemState.suboptimal_state(PROBLEM_SIZE)

next_state, problem = get_climbing_results_for_a_mock_problem(
solver, goal, state)
assert problem.improvement(next_state, state) >= 0, f"algorithm returns a state that's worse than " \
f"the previous " \
"one (goal type: {goal.type()})"

next_values = set([problem.objective_for(
solver._climb_the_hill(problem, state)) for _ in range(100)])
assert len(
next_values) > 1, f"algorithm is deterministic, always returns the same state, while it should be random " \
f"(goal type: {goal.type()})) "


def get_climbing_results_for_a_mock_problem(solver: HillClimbing, goal: SumProblemGoal, state: SumProblemState):
problem = SumProblem(PROBLEM_SIZE, goal)
next_state = solver._climb_the_hill(problem, state)
assert next_state is not None, "algorithm returns None instead of a state"
return next_state, problem
Loading