-
Notifications
You must be signed in to change notification settings - Fork 0
/
agent.py
60 lines (51 loc) · 2.23 KB
/
agent.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import gym
import collections
GAMMA = 0.9
class Agent:
def __init__(self, env_name):
self.env = gym.make(env_name)
self.state = self.env.reset()
self.rewards = collections.defaultdict(float)
self.transits = collections.defaultdict(collections.Counter)
self.values = collections.defaultdict(float)
def play_n_random_steps(self, count):
for _ in range(count):
action = self.env.action_space.sample()
new_state, reward, is_done, _ = self.env.step(action)
self.rewards[(self.state, action, new_state)] = reward
self.transits[(self.state, action)][new_state] += 1
self.state = self.env.reset() if is_done else new_state
def calc_action_value(self, state, action):
target_counts = self.transits[(state, action)]
total = sum(target_counts.values())
action_value = 0.0
for target_state, count in target_counts.items():
reward = self.rewards[(state, action, target_state)]
action_value += (count / total) * (reward + GAMMA * self.values[target_state])
return action_value
def select_action(self, state):
best_action, best_value = None, None
for action in range(self.env.action_space.n):
action_value = self.calc_action_value(state, action)
if best_value is None or best_value < action_value:
best_value = action_value
best_action = action
return best_action
def play_episode(self, env):
total_reward = 0.0
state = env.reset()
while True:
action = self.select_action(state)
new_state, reward, is_done, _ = env.step(action)
self.rewards[(state, action, new_state)] = reward
self.transits[(state, action)][new_state] += 1
total_reward += reward
if is_done:
break
state = new_state
return total_reward
def value_iteration(self):
for state in range(self.env.observation_space.n):
state_values = [self.calc_action_value(state, action)
for action in range(self.env.action_space.n)]
self.values[state] = max(state_values)