-
Notifications
You must be signed in to change notification settings - Fork 47
/
Copy pathcartpole_swing_up.py
executable file
·226 lines (170 loc) · 8.8 KB
/
cartpole_swing_up.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from envs.dflex_env import DFlexEnv
import math
import torch
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import dflex as df
import numpy as np
np.set_printoptions(precision=5, linewidth=256, suppress=True)
try:
from pxr import Usd
except ModuleNotFoundError:
print("No pxr package")
from utils import load_utils as lu
from utils import torch_utils as tu
class CartPoleSwingUpEnv(DFlexEnv):
def __init__(self, render=False, device='cuda:0', num_envs=1024, seed=0, episode_length=240, no_grad=True, stochastic_init=False, MM_caching_frequency = 1, early_termination = False):
num_obs = 5
num_act = 1
super(CartPoleSwingUpEnv, self).__init__(num_envs, num_obs, num_act, episode_length, MM_caching_frequency, seed, no_grad, render, device)
self.stochastic_init = stochastic_init
self.early_termination = early_termination
self.init_sim()
# action parameters
self.action_strength = 1000.
# loss related
self.pole_angle_penalty = 1.0
self.pole_velocity_penalty = 0.1
self.cart_position_penalty = 0.05
self.cart_velocity_penalty = 0.1
self.cart_action_penalty = 0.0
#-----------------------
# set up Usd renderer
if (self.visualize):
self.stage = Usd.Stage.CreateNew("outputs/" + "CartPoleSwingUp_" + str(self.num_envs) + ".usd")
self.renderer = df.render.UsdRenderer(self.model, self.stage)
self.renderer.draw_points = True
self.renderer.draw_springs = True
self.renderer.draw_shapes = True
self.render_time = 0.0
def init_sim(self):
self.builder = df.sim.ModelBuilder()
self.dt = 1. / 60.
self.sim_substeps = 4
self.sim_dt = self.dt
if self.visualize:
self.env_dist = 1.0
else:
self.env_dist = 0.0
self.num_joint_q = 2
self.num_joint_qd = 2
asset_folder = os.path.join(os.path.dirname(__file__), 'assets')
for i in range(self.num_environments):
lu.urdf_load(self.builder,
os.path.join(asset_folder, 'cartpole.urdf'),
df.transform((0.0, 2.5, 0.0 + self.env_dist * i), df.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi*0.5)),
floating=False,
shape_kd=1e4,
limit_kd=1.)
self.builder.joint_q[i * self.num_joint_q + 1] = -math.pi
self.model = self.builder.finalize(self.device)
self.model.ground = False
self.model.gravity = torch.tensor((0.0, -9.81, 0.0), dtype = torch.float, device = self.device)
self.integrator = df.sim.SemiImplicitIntegrator()
self.state = self.model.state()
self.start_joint_q = self.state.joint_q.clone()
self.start_joint_qd = self.state.joint_qd.clone()
def render(self, mode = 'human'):
if self.visualize:
self.render_time += self.dt
self.renderer.update(self.state, self.render_time)
if (self.num_frames == 40):
try:
self.stage.Save()
except:
print('USD save error')
self.num_frames -= 40
def step(self, actions):
with df.ScopedTimer("simulate", active=False, detailed=False):
actions = actions.view((self.num_envs, self.num_actions))
actions = torch.clip(actions, -1., 1.)
self.actions = actions
self.state.joint_act.view(self.num_envs, -1)[:, 0:1] = actions * self.action_strength
self.state = self.integrator.forward(self.model, self.state, self.sim_dt, self.sim_substeps, self.MM_caching_frequency)
self.sim_time += self.sim_dt
self.reset_buf = torch.zeros_like(self.reset_buf)
self.progress_buf += 1
self.num_frames += 1
self.calculateObservations()
self.calculateReward()
if self.no_grad == False:
self.obs_buf_before_reset = self.obs_buf.clone()
self.extras = {
'obs_before_reset': self.obs_buf_before_reset,
'episode_end': self.termination_buf
}
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
#self.obs_buf_before_reset = self.obs_buf.clone()
with df.ScopedTimer("reset", active=False, detailed=False):
if len(env_ids) > 0:
self.reset(env_ids)
with df.ScopedTimer("render", active=False, detailed=False):
self.render()
#self.extras = {'obs_before_reset': self.obs_buf_before_reset}
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def reset(self, env_ids=None, force_reset=True):
if env_ids is None:
if force_reset == True:
env_ids = torch.arange(self.num_envs, dtype=torch.long, device=self.device)
if env_ids is not None:
# fixed start state
self.state.joint_q = self.state.joint_q.clone()
self.state.joint_qd = self.state.joint_qd.clone()
self.state.joint_q.view(self.num_envs, -1)[env_ids, :] = self.start_joint_q.view(-1, self.num_joint_q)[env_ids, :].clone()
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = self.start_joint_qd.view(-1, self.num_joint_qd)[env_ids, :].clone()
if self.stochastic_init:
self.state.joint_q.view(self.num_envs, -1)[env_ids, :] = \
self.state.joint_q.view(self.num_envs, -1)[env_ids, :] \
+ np.pi * (torch.rand(size=(len(env_ids), self.num_joint_q), device=self.device) - 0.5)
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] = \
self.state.joint_qd.view(self.num_envs, -1)[env_ids, :] \
+ 0.5 * (torch.rand(size=(len(env_ids), self.num_joint_qd), device=self.device) - 0.5)
self.progress_buf[env_ids] = 0
self.calculateObservations()
return self.obs_buf
'''
cut off the gradient from the current state to previous states
'''
def clear_grad(self):
with torch.no_grad(): # TODO: check with Miles
current_joint_q = self.state.joint_q.clone()
current_joint_qd = self.state.joint_qd.clone()
current_joint_act = self.state.joint_act.clone()
self.state = self.model.state()
self.state.joint_q = current_joint_q
self.state.joint_qd = current_joint_qd
self.state.joint_act = current_joint_act
'''
This function starts collecting a new trajectory from the current states but cut off the computation graph to the previous states.
It has to be called every time the algorithm starts an episode and return the observation vectors
'''
def initialize_trajectory(self):
self.clear_grad()
self.calculateObservations()
return self.obs_buf
def calculateObservations(self):
x = self.state.joint_q.view(self.num_envs, -1)[:, 0:1]
theta = self.state.joint_q.view(self.num_envs, -1)[:, 1:2]
xdot = self.state.joint_qd.view(self.num_envs, -1)[:, 0:1]
theta_dot = self.state.joint_qd.view(self.num_envs, -1)[:, 1:2]
# observations: [x, xdot, sin(theta), cos(theta), theta_dot]
self.obs_buf = torch.cat([x, xdot, torch.sin(theta), torch.cos(theta), theta_dot], dim = -1)
def calculateReward(self):
x = self.state.joint_q.view(self.num_envs, -1)[:, 0]
theta = tu.normalize_angle(self.state.joint_q.view(self.num_envs, -1)[:, 1])
xdot = self.state.joint_qd.view(self.num_envs, -1)[:, 0]
theta_dot = self.state.joint_qd.view(self.num_envs, -1)[:, 1]
self.rew_buf = -torch.pow(theta, 2.) * self.pole_angle_penalty \
- torch.pow(theta_dot, 2.) * self.pole_velocity_penalty \
- torch.pow(x, 2.) * self.cart_position_penalty \
- torch.pow(xdot, 2.) * self.cart_velocity_penalty \
- torch.sum(self.actions ** 2, dim = -1) * self.cart_action_penalty
# reset agents
self.reset_buf = torch.where(self.progress_buf > self.episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)