get_path_to_target

This commit is contained in:
Her-darling 2024-11-16 17:17:44 +08:00
parent 2859bdcce2
commit 2fd1f8c3ef

View File

@ -232,10 +232,10 @@ class sprint(gym.Env):
t = w.time_local_ms t = w.time_local_ms
self.reset_time = t self.reset_time = t
self.target = np.array([3, 0]) self.target = np.array([10, 0])
self.walk_rel_target = self.path_manager.get_path_to_target(target=self.target)[0] self.walk_rel_target = self.path_manager.get_path_to_target(target=self.target)[0]
self.walk_distance = self.path_manager.get_path_to_target(target=self.target)[2] self.walk_distance = self.path_manager.get_path_to_target(target=self.target)[2]
self.walk_rel_orientation = self.path_manager.get_path_to_target(target=self.target)[1] self.walk_rel_orientation = self.path_manager.get_path_to_target(target=self.target)[1] - r.imu_torso_orientation
for _ in range(25): for _ in range(25):
self.player.scom.unofficial_beam(self.Gen_player_pos, 0) # beam player continuously (floating above ground) self.player.scom.unofficial_beam(self.Gen_player_pos, 0) # beam player continuously (floating above ground)
@ -302,7 +302,7 @@ class sprint(gym.Env):
# self.walk_rel_orientation = M.vector_angle(self.walk_rel_target) * 0.5 # self.walk_rel_orientation = M.vector_angle(self.walk_rel_target) * 0.5
self.walk_rel_target = self.path_manager.get_path_to_target(target=self.target)[0] self.walk_rel_target = self.path_manager.get_path_to_target(target=self.target)[0]
self.walk_distance = self.path_manager.get_path_to_target(target=self.target)[2] self.walk_distance = self.path_manager.get_path_to_target(target=self.target)[2]
self.walk_rel_orientation = self.path_manager.get_path_to_target(target=self.target)[1] self.walk_rel_orientation = self.path_manager.get_path_to_target(target=self.target)[1] - r.imu_torso_orientation
# exponential moving average # exponential moving average
self.act = 0.8 * self.act + 0.2 * action * action_mult * 0.7 self.act = 0.8 * self.act + 0.2 * action * action_mult * 0.7
@ -334,14 +334,9 @@ class sprint(gym.Env):
self.sync() self.sync()
self.step_counter += 1 self.step_counter += 1
obs = self.observe() obs = self.observe()
unit_vector = (self.walk_rel_target - r.loc_head_position[:2]) / np.linalg.norm(self.walk_rel_target - r.loc_head_position[:2]) direction_error = abs(self.walk_rel_orientation)
if np.linalg.norm(r.loc_torso_velocity[:2]) != 0: direction_error = min(direction_error, 10)
cos_theta = np.dot(unit_vector, r.loc_torso_velocity[:2]) / ( reward = np.linalg.norm(r.loc_torso_velocity[:2])**2 * (1 - direction_error/10) * 0.1
np.linalg.norm(unit_vector) * np.linalg.norm(r.loc_torso_velocity[:2]))
else:
cos_theta = 0
reward = np.linalg.norm(r.loc_torso_velocity[:2]) * cos_theta * 0.2
if np.linalg.norm(self.target - r.loc_head_position[:2]) < 0.3: if np.linalg.norm(self.target - r.loc_head_position[:2]) < 0.3:
reward += 50 reward += 50
self.generate_random_target(self.target) self.generate_random_target(self.target)