|
|
@ -1,6 +1,6 @@
|
|
|
|
import math
|
|
|
|
import math
|
|
|
|
import random
|
|
|
|
import random
|
|
|
|
|
|
|
|
import time
|
|
|
|
from agent.Base_Agent import Base_Agent as Agent
|
|
|
|
from agent.Base_Agent import Base_Agent as Agent
|
|
|
|
from behaviors.custom.Step.Step import Step
|
|
|
|
from behaviors.custom.Step.Step import Step
|
|
|
|
from world.commons.Draw import Draw
|
|
|
|
from world.commons.Draw import Draw
|
|
|
@ -79,6 +79,7 @@ class sprint(gym.Env):
|
|
|
|
self.player.scom.unofficial_move_ball((0, 0, 0))
|
|
|
|
self.player.scom.unofficial_move_ball((0, 0, 0))
|
|
|
|
|
|
|
|
|
|
|
|
self.gait: Step_Generator = self.player.behavior.get_custom_behavior_object("Walk").env.step_generator
|
|
|
|
self.gait: Step_Generator = self.player.behavior.get_custom_behavior_object("Walk").env.step_generator
|
|
|
|
|
|
|
|
self.last_target_update_time = time.time()
|
|
|
|
|
|
|
|
|
|
|
|
def observe(self, init=False):
|
|
|
|
def observe(self, init=False):
|
|
|
|
r = self.player.world.robot
|
|
|
|
r = self.player.world.robot
|
|
|
@ -278,12 +279,29 @@ class sprint(gym.Env):
|
|
|
|
Draw.clear_all()
|
|
|
|
Draw.clear_all()
|
|
|
|
self.player.terminate()
|
|
|
|
self.player.terminate()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def change_target(self):
|
|
|
|
|
|
|
|
original_angle = M.vector_angle(self.walk_rel_target)
|
|
|
|
|
|
|
|
random_angle_delta = np.random.uniform(-10, 10)
|
|
|
|
|
|
|
|
new_angle = (original_angle + np.radians(random_angle_delta)) * 3 * math.sin(time.time())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
new_walk_rel_target = np.array([
|
|
|
|
|
|
|
|
np.cos(new_angle) * self.walk_distance,
|
|
|
|
|
|
|
|
np.sin(new_angle) * self.walk_distance
|
|
|
|
|
|
|
|
])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.walk_rel_target = new_walk_rel_target
|
|
|
|
|
|
|
|
|
|
|
|
def step(self, action):
|
|
|
|
def step(self, action):
|
|
|
|
|
|
|
|
|
|
|
|
r = (self.
|
|
|
|
r = (self.
|
|
|
|
player.world.robot)
|
|
|
|
player.world.robot)
|
|
|
|
|
|
|
|
|
|
|
|
w = self.player.world
|
|
|
|
w = self.player.world
|
|
|
|
|
|
|
|
current_time = time.time()
|
|
|
|
|
|
|
|
if current_time - self.last_target_update_time > 1:
|
|
|
|
|
|
|
|
self.change_target()
|
|
|
|
|
|
|
|
self.last_target_update_time = current_time
|
|
|
|
|
|
|
|
|
|
|
|
internal_dist = np.linalg.norm(self.internal_target)
|
|
|
|
internal_dist = np.linalg.norm(self.internal_target)
|
|
|
|
action_mult = 1 if internal_dist > 0.2 else (0.7 / 0.2) * internal_dist + 0.3
|
|
|
|
action_mult = 1 if internal_dist > 0.2 else (0.7 / 0.2) * internal_dist + 0.3
|
|
|
|
self.walk_rel_target = M.rotate_2d_vec(
|
|
|
|
self.walk_rel_target = M.rotate_2d_vec(
|
|
|
@ -292,7 +310,7 @@ class sprint(gym.Env):
|
|
|
|
self.walk_rel_orientation = M.vector_angle(self.walk_rel_target) * 0.3
|
|
|
|
self.walk_rel_orientation = M.vector_angle(self.walk_rel_target) * 0.3
|
|
|
|
|
|
|
|
|
|
|
|
# exponential moving average
|
|
|
|
# exponential moving average
|
|
|
|
self.act = 0.6 * self.act + 0.4 * action
|
|
|
|
self.act = 0.7 * self.act + 0.3 * action_mult * 0.7
|
|
|
|
|
|
|
|
|
|
|
|
# execute Step behavior to extract the target positions of each leg (we will override these targets)
|
|
|
|
# execute Step behavior to extract the target positions of each leg (we will override these targets)
|
|
|
|
lfy, lfz, rfy, rfz = self.step_generator.get_target_positions(self.step_counter == 0, self.STEP_DUR,
|
|
|
|
lfy, lfz, rfy, rfz = self.step_generator.get_target_positions(self.step_counter == 0, self.STEP_DUR,
|
|
|
@ -322,22 +340,22 @@ class sprint(gym.Env):
|
|
|
|
self.sync()
|
|
|
|
self.sync()
|
|
|
|
self.step_counter += 1
|
|
|
|
self.step_counter += 1
|
|
|
|
obs = self.observe()
|
|
|
|
obs = self.observe()
|
|
|
|
|
|
|
|
robot_speed = r.loc_torso_velocity[0]
|
|
|
|
direction_error = abs(self.walk_rel_orientation)
|
|
|
|
direction_error = abs(self.walk_rel_orientation)
|
|
|
|
direction_error = min(direction_error, 10)
|
|
|
|
direction_error = min(direction_error, 10)
|
|
|
|
reward = (r.loc_torso_velocity[0] - r.loc_torso_velocity[1] * 0.2) * (1 - direction_error / 10)
|
|
|
|
reward = robot_speed * (1 - direction_error / 10)
|
|
|
|
if self.player.behavior.is_ready("Get_Up"):
|
|
|
|
if self.player.behavior.is_ready("Get_Up"):
|
|
|
|
self.terminal = True
|
|
|
|
self.terminal = True
|
|
|
|
|
|
|
|
elif w.time_local_ms - self.reset_time > 30000:
|
|
|
|
elif w.time_local_ms - self.reset_time > 7500 * 2:
|
|
|
|
|
|
|
|
self.terminal = True
|
|
|
|
self.terminal = True
|
|
|
|
elif r.loc_torso_position[0] > 14.5:
|
|
|
|
elif r.loc_torso_position[0] > 14.5:
|
|
|
|
self.terminal = True
|
|
|
|
self.terminal = True
|
|
|
|
reward += 500
|
|
|
|
reward += 500
|
|
|
|
|
|
|
|
elif r.loc_torso_position[0] > 0:
|
|
|
|
|
|
|
|
reward += 3 * r.loc_torso_position[0]
|
|
|
|
else:
|
|
|
|
else:
|
|
|
|
self.terminal = False
|
|
|
|
self.terminal = False
|
|
|
|
return obs, reward, self.terminal, {}
|
|
|
|
return obs, reward, self.terminal, {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Train(Train_Base):
|
|
|
|
class Train(Train_Base):
|
|
|
|
def __init__(self, script) -> None:
|
|
|
|
def __init__(self, script) -> None:
|
|
|
|
super().__init__(script)
|
|
|
|
super().__init__(script)
|
|
|
|