From a7ecf50a005d6b1d2523d843f42966d9d3caf58c Mon Sep 17 00:00:00 2001 From: MagDish <2717360869@qq.com> Date: Tue, 19 Nov 2024 20:38:15 +0800 Subject: [PATCH] new --- scripts/gyms/Sprint.py | 7 +++---- scripts/gyms/sprint_demo.py | 4 +++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/scripts/gyms/Sprint.py b/scripts/gyms/Sprint.py index fd0cbe9..139a89f 100644 --- a/scripts/gyms/Sprint.py +++ b/scripts/gyms/Sprint.py @@ -282,7 +282,6 @@ class sprint(gym.Env): Draw.clear_all() self.player.terminate() - def generate_random_target(self, position): while True: angle = np.random.uniform(0, 2 * np.pi) @@ -312,7 +311,7 @@ class sprint(gym.Env): self.walk_distance = np.linalg.norm(self.walk_target - r.loc_head_position[:2]) self.walk_rel_orientation = M.vector_angle(self.walk_rel_target) * 0.3 # exponential moving average - self.act = 0.8 * self.act + 0.2 * action * action_mult * 0.7 + self.act = 0.6 * self.act + 0.4 * action # execute Step behavior to extract the target positions of each leg (we will override these targets) lfy, lfz, rfy, rfz = self.step_generator.get_target_positions(self.step_counter == 0, self.STEP_DUR, @@ -363,9 +362,9 @@ class Train(Train_Base): def train(self, args): # --------------------------------------- Learning parameters - n_envs = min(12, os.cpu_count()) + n_envs = min(10, os.cpu_count()) n_steps_per_env = 1024 # RolloutBuffer is of size (n_steps_per_env * n_envs) - minibatch_size = 64 # should be a factor of (n_steps_per_env * n_envs) + minibatch_size = 256 # should be a factor of (n_steps_per_env * n_envs) total_steps = 50000000 learning_rate = 3e-4 folder_name = f'Sprint_R{self.robot_type}' diff --git a/scripts/gyms/sprint_demo.py b/scripts/gyms/sprint_demo.py index 92b0d2f..9c2c293 100644 --- a/scripts/gyms/sprint_demo.py +++ b/scripts/gyms/sprint_demo.py @@ -322,7 +322,9 @@ class sprint(gym.Env): self.sync() self.step_counter += 1 obs = self.observe() - reward = r.loc_torso_velocity[0] - r.loc_torso_velocity[1] * 0.2 + direction_error = abs(self.walk_rel_orientation) + direction_error = min(direction_error, 10) + reward = (r.loc_torso_velocity[0] - r.loc_torso_velocity[1] * 0.2) * (1 - direction_error / 10) if self.player.behavior.is_ready("Get_Up"): self.terminal = True