forked from rasmusbergpalm/hebbian-evolution
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathwrappers.py
47 lines (35 loc) · 1.56 KB
/
wrappers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import gym
import numpy as np
from scipy import stats
class EnvReward(gym.Wrapper):
def __init__(self,env,limit=False):
super().__init__(env)
self.env=env
self.mean=0.2
self.std = 0.1
self.norm= stats.norm(self.mean,self.std)
self.observation_space=gym.spaces.Box(float('-inf'),float('inf'),shape=(25,),dtype=np.float32)
self.limit=limit
def step(self,action):
obs, reward, done, info = self.env.step(action)
# for i in range(obs.size):
# obs[i] = (1 + obs[i] / (1 + abs(obs[i]))) * 0.5
#eward=self.norm.pdf(obs[2])*reward
obs= np.append(obs,reward)
return obs,reward, done, info
class SpeedLimitReward(gym.Wrapper):
def __init__(self,env,speed):
super().__init__(env)
self.env=env
self.mean=speed
self.std = 0.1
self.norm= stats.norm(self.mean,self.std)
self.observation_space=gym.spaces.Box(float('-inf'),float('inf'),shape=(25,),dtype=np.float32)
def step(self,action):
obs, reward, done, info = self.env.step(action)
# for i in range(obs.size):
# obs[i] = (1 + obs[i] / (1 + abs(obs[i]))) * 0.5
reward=self.norm.pdf(obs[2])*reward
#reward= max(-1, min(reward, 1))
obs= np.append(obs,reward)
return obs,reward, done, info