From 38969462804128e40ea3a95db62175a2c22fa594 Mon Sep 17 00:00:00 2001 From: MagDish <2717360869@qq.com> Date: Fri, 22 Nov 2024 11:29:16 +0800 Subject: [PATCH] new sprint_demo.py --- scripts/gyms/sprint_demo.py | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/scripts/gyms/sprint_demo.py b/scripts/gyms/sprint_demo.py index 20975de..9666d0a 100644 --- a/scripts/gyms/sprint_demo.py +++ b/scripts/gyms/sprint_demo.py @@ -1,6 +1,6 @@ import math import random - +import time from agent.Base_Agent import Base_Agent as Agent from behaviors.custom.Step.Step import Step from world.commons.Draw import Draw @@ -79,6 +79,7 @@ class sprint(gym.Env): self.player.scom.unofficial_move_ball((0, 0, 0)) self.gait: Step_Generator = self.player.behavior.get_custom_behavior_object("Walk").env.step_generator + self.last_target_update_time = time.time() def observe(self, init=False): r = self.player.world.robot @@ -278,12 +279,29 @@ class sprint(gym.Env): Draw.clear_all() self.player.terminate() + def change_target(self): + original_angle = M.vector_angle(self.walk_rel_target) + random_angle_delta = np.random.uniform(-10, 10) + new_angle = (original_angle + np.radians(random_angle_delta)) * 3 * math.sin(time.time()) + + new_walk_rel_target = np.array([ + np.cos(new_angle) * self.walk_distance, + np.sin(new_angle) * self.walk_distance + ]) + + self.walk_rel_target = new_walk_rel_target + def step(self, action): r = (self. player.world.robot) w = self.player.world + current_time = time.time() + if current_time - self.last_target_update_time > 1: + self.change_target() + self.last_target_update_time = current_time + internal_dist = np.linalg.norm(self.internal_target) action_mult = 1 if internal_dist > 0.2 else (0.7 / 0.2) * internal_dist + 0.3 self.walk_rel_target = M.rotate_2d_vec( @@ -292,7 +310,7 @@ class sprint(gym.Env): self.walk_rel_orientation = M.vector_angle(self.walk_rel_target) * 0.3 # exponential moving average - self.act = 0.6 * self.act + 0.4 * action + self.act = 0.7 * self.act + 0.3 * action_mult * 0.7 # execute Step behavior to extract the target positions of each leg (we will override these targets) lfy, lfz, rfy, rfz = self.step_generator.get_target_positions(self.step_counter == 0, self.STEP_DUR, @@ -322,22 +340,22 @@ class sprint(gym.Env): self.sync() self.step_counter += 1 obs = self.observe() + robot_speed = r.loc_torso_velocity[0] direction_error = abs(self.walk_rel_orientation) direction_error = min(direction_error, 10) - reward = (r.loc_torso_velocity[0] - r.loc_torso_velocity[1] * 0.2) * (1 - direction_error / 10) + reward = robot_speed * (1 - direction_error / 10) if self.player.behavior.is_ready("Get_Up"): self.terminal = True - - elif w.time_local_ms - self.reset_time > 7500 * 2: + elif w.time_local_ms - self.reset_time > 30000: self.terminal = True elif r.loc_torso_position[0] > 14.5: self.terminal = True reward += 500 + elif r.loc_torso_position[0] > 0: + reward += 3 * r.loc_torso_position[0] else: self.terminal = False return obs, reward, self.terminal, {} - - class Train(Train_Base): def __init__(self, script) -> None: super().__init__(script)