From e8cdff3a43587fd222654f6b5dd6ce3513e61e21 Mon Sep 17 00:00:00 2001 From: MagDish <2717360869@qq.com> Date: Wed, 13 Nov 2024 19:28:43 +0800 Subject: [PATCH] new --- scripts/gyms/sprint.py | 49 +++++++++++++++--------------------------- 1 file changed, 17 insertions(+), 32 deletions(-) diff --git a/scripts/gyms/sprint.py b/scripts/gyms/sprint.py index 1d45f9f..3de0924 100644 --- a/scripts/gyms/sprint.py +++ b/scripts/gyms/sprint.py @@ -56,6 +56,7 @@ class sprint(gym.Env): self.walk_rel_orientation = None self.walk_rel_target = None + self.walk_target = None self.walk_distance = None self.act = np.zeros(16, np.float32) # memory variable @@ -232,10 +233,12 @@ class sprint(gym.Env): w = self.player.world t = w.time_local_ms self.reset_time = t - - distance = 15 - r.loc_head_position[0] - self.walk_rel_target = (15, self.Gen_player_pos[1]) + self.generate_random_target() + distance = np.linalg.norm(self.walk_target[:2] - self.Gen_player_pos[:2]) self.walk_distance = distance + self.walk_rel_target = M.rotate_2d_vec( + (self.walk_target[0] - r.loc_head_position[0], self.walk_target[1] - r.loc_head_position[1]), + -r.imu_torso_orientation) self.walk_rel_orientation = M.vector_angle(self.walk_rel_target) for _ in range(25): @@ -279,40 +282,24 @@ class sprint(gym.Env): Draw.clear_all() self.player.terminate() - def change_target(self): - original_angle = self.walk_rel_orientation + def generate_random_target(self, x_range=(-12, 12), y_range=(-9, 9)): - orientations = random.choice([-1, 1]) - random_angle_delta = orientations * np.random.uniform(10, 75) # 单位是度 - - # 新的角度 - new_angle = original_angle + np.radians(random_angle_delta) # 转换为弧度 - - # 计算新的目标向量,保持原来的距离不变 - new_walk_rel_target = np.array([ - 15, - np.sin(new_angle) * self.walk_distance * 3 - ]) - - # 保存新的目标向量 - self.walk_rel_target = new_walk_rel_target + x = np.random.uniform(x_range[0], x_range[1]) + y = np.random.uniform(y_range[0], y_range[1]) + self.walk_target = np.array([x, y]) def step(self, action): r = (self. player.world.robot) w = self.player.world - current_time = time.time() - if current_time - self.last_target_update_time > 2: - self.change_target() - self.last_target_update_time = current_time internal_dist = np.linalg.norm(self.internal_target) action_mult = 1 if internal_dist > 0.2 else (0.7 / 0.2) * internal_dist + 0.3 self.walk_rel_target = M.rotate_2d_vec( - (15 - r.loc_head_position[0], self.Gen_player_pos[1] - r.loc_head_position[1]), -r.imu_torso_orientation) - self.walk_distance = np.linalg.norm(self.walk_rel_target) + (self.walk_target[0] - r.loc_head_position[0], self.walk_target[1] - r.loc_head_position[1]), -r.imu_torso_orientation) + self.walk_distance = np.linalg.norm(self.walk_target[:2] - self.Gen_player_pos[:2]) self.walk_rel_orientation = M.vector_angle(self.walk_rel_target) * 0.3 # exponential moving average @@ -349,17 +336,15 @@ class sprint(gym.Env): robot_speed = r.loc_torso_velocity[0] direction_error = abs(self.walk_rel_orientation) direction_error = min(direction_error, 10) - reward = robot_speed * (1 - direction_error / 10) - if np.linalg.norm(r.loc_torso_position[:2] - self.walk_rel_target,ord='fro') < 1: - reward += 50 + reward = robot_speed * (3 - 0.5 * direction_error / 10) + if self.walk_distance < 1: + reward += 20 + self.generate_random_target() if self.player.behavior.is_ready("Get_Up"): self.terminal = True - elif w.time_local_ms - self.reset_time > 40000: + elif w.time_local_ms - self.reset_time > 100000: self.terminal = True - elif r.loc_torso_position[0] > 14.5: - self.terminal = True - reward += 500 else: self.terminal = False return obs, reward, self.terminal, {}