new
This commit is contained in:
parent
4569c0a6ed
commit
a7ecf50a00
@ -282,7 +282,6 @@ class sprint(gym.Env):
|
|||||||
Draw.clear_all()
|
Draw.clear_all()
|
||||||
self.player.terminate()
|
self.player.terminate()
|
||||||
|
|
||||||
|
|
||||||
def generate_random_target(self, position):
|
def generate_random_target(self, position):
|
||||||
while True:
|
while True:
|
||||||
angle = np.random.uniform(0, 2 * np.pi)
|
angle = np.random.uniform(0, 2 * np.pi)
|
||||||
@ -312,7 +311,7 @@ class sprint(gym.Env):
|
|||||||
self.walk_distance = np.linalg.norm(self.walk_target - r.loc_head_position[:2])
|
self.walk_distance = np.linalg.norm(self.walk_target - r.loc_head_position[:2])
|
||||||
self.walk_rel_orientation = M.vector_angle(self.walk_rel_target) * 0.3
|
self.walk_rel_orientation = M.vector_angle(self.walk_rel_target) * 0.3
|
||||||
# exponential moving average
|
# exponential moving average
|
||||||
self.act = 0.8 * self.act + 0.2 * action * action_mult * 0.7
|
self.act = 0.6 * self.act + 0.4 * action
|
||||||
|
|
||||||
# execute Step behavior to extract the target positions of each leg (we will override these targets)
|
# execute Step behavior to extract the target positions of each leg (we will override these targets)
|
||||||
lfy, lfz, rfy, rfz = self.step_generator.get_target_positions(self.step_counter == 0, self.STEP_DUR,
|
lfy, lfz, rfy, rfz = self.step_generator.get_target_positions(self.step_counter == 0, self.STEP_DUR,
|
||||||
@ -363,9 +362,9 @@ class Train(Train_Base):
|
|||||||
def train(self, args):
|
def train(self, args):
|
||||||
|
|
||||||
# --------------------------------------- Learning parameters
|
# --------------------------------------- Learning parameters
|
||||||
n_envs = min(12, os.cpu_count())
|
n_envs = min(10, os.cpu_count())
|
||||||
n_steps_per_env = 1024 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
n_steps_per_env = 1024 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||||
minibatch_size = 64 # should be a factor of (n_steps_per_env * n_envs)
|
minibatch_size = 256 # should be a factor of (n_steps_per_env * n_envs)
|
||||||
total_steps = 50000000
|
total_steps = 50000000
|
||||||
learning_rate = 3e-4
|
learning_rate = 3e-4
|
||||||
folder_name = f'Sprint_R{self.robot_type}'
|
folder_name = f'Sprint_R{self.robot_type}'
|
||||||
|
@ -322,7 +322,9 @@ class sprint(gym.Env):
|
|||||||
self.sync()
|
self.sync()
|
||||||
self.step_counter += 1
|
self.step_counter += 1
|
||||||
obs = self.observe()
|
obs = self.observe()
|
||||||
reward = r.loc_torso_velocity[0] - r.loc_torso_velocity[1] * 0.2
|
direction_error = abs(self.walk_rel_orientation)
|
||||||
|
direction_error = min(direction_error, 10)
|
||||||
|
reward = (r.loc_torso_velocity[0] - r.loc_torso_velocity[1] * 0.2) * (1 - direction_error / 10)
|
||||||
if self.player.behavior.is_ready("Get_Up"):
|
if self.player.behavior.is_ready("Get_Up"):
|
||||||
self.terminal = True
|
self.terminal = True
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user