best
This commit is contained in:
parent
5dbbf571d1
commit
57b1496ff6
@ -1,6 +1,6 @@
|
||||
import math
|
||||
import random
|
||||
|
||||
import time
|
||||
from agent.Base_Agent import Base_Agent as Agent
|
||||
from behaviors.custom.Step.Step import Step
|
||||
from world.commons.Draw import Draw
|
||||
@ -11,6 +11,7 @@ from scripts.commons.Train_Base import Train_Base
|
||||
from time import sleep
|
||||
import os, gym
|
||||
import numpy as np
|
||||
from math_ops.Math_Ops import Math_Ops as U
|
||||
from math_ops.Math_Ops import Math_Ops as M
|
||||
from behaviors.custom.Step.Step_Generator import Step_Generator
|
||||
|
||||
@ -40,7 +41,6 @@ class sprint(gym.Env):
|
||||
self.player = Agent(ip, server_p, monitor_p, 1, self.robot_type, "Gym", True, enable_draw)
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.ik = self.player.inv_kinematics
|
||||
self.target = None
|
||||
|
||||
# Step behavior defaults
|
||||
self.STEP_DUR = 10
|
||||
@ -53,9 +53,10 @@ class sprint(gym.Env):
|
||||
max_ankle_z = nao_specs[5] * 1.8
|
||||
self.step_generator = Step_Generator(feet_y_dev, sample_time, max_ankle_z)
|
||||
self.DEFAULT_ARMS = np.array([-90, -90, 8, 8, 90, 90, 70, 70], np.float32)
|
||||
self.path_manager = self.player.path_manager
|
||||
|
||||
self.walk_rel_orientation = None
|
||||
self.walk_rel_target = None
|
||||
self.walk_target = None
|
||||
self.walk_distance = None
|
||||
self.act = np.zeros(16, np.float32) # memory variable
|
||||
|
||||
@ -79,6 +80,7 @@ class sprint(gym.Env):
|
||||
self.player.scom.unofficial_move_ball((0, 0, 0))
|
||||
|
||||
self.gait: Step_Generator = self.player.behavior.get_custom_behavior_object("Walk").env.step_generator
|
||||
self.last_target_update_time = time.time()
|
||||
|
||||
def observe(self, init=False):
|
||||
r = self.player.world.robot
|
||||
@ -231,11 +233,13 @@ class sprint(gym.Env):
|
||||
w = self.player.world
|
||||
t = w.time_local_ms
|
||||
self.reset_time = t
|
||||
|
||||
self.target = np.array([10, 0])
|
||||
self.walk_rel_target = self.path_manager.get_path_to_target(target=self.target)[0]
|
||||
self.walk_distance = self.path_manager.get_path_to_target(target=self.target)[2]
|
||||
self.walk_rel_orientation = (self.path_manager.get_path_to_target(target=self.target)[1]) * 0.3
|
||||
self.generate_random_target(self.Gen_player_pos[:2])
|
||||
distance = np.linalg.norm(self.walk_target[:2] - self.Gen_player_pos[:2])
|
||||
self.walk_distance = distance
|
||||
self.walk_rel_target = M.rotate_2d_vec(
|
||||
(self.walk_target[0] - r.loc_head_position[0], self.walk_target[1] - r.loc_head_position[1]),
|
||||
-r.imu_torso_orientation)
|
||||
self.walk_rel_orientation = M.vector_angle(self.walk_rel_target)
|
||||
|
||||
for _ in range(25):
|
||||
self.player.scom.unofficial_beam(self.Gen_player_pos, 0) # beam player continuously (floating above ground)
|
||||
@ -278,15 +282,16 @@ class sprint(gym.Env):
|
||||
Draw.clear_all()
|
||||
self.player.terminate()
|
||||
|
||||
|
||||
def generate_random_target(self, position, x_range=(-15, 15), y_range=(-10, 10)):
|
||||
while True:
|
||||
x = np.random.uniform(x_range[0], x_range[1])
|
||||
y = np.random.uniform(y_range[0], y_range[1])
|
||||
|
||||
if np.linalg.norm(np.array([x, y]) - position[:2]) >= 10:
|
||||
if np.linalg.norm(np.array([x, y]) - position) >= 10:
|
||||
break
|
||||
self.target = np.array([x, y])
|
||||
|
||||
self.walk_target = np.array([x, y])
|
||||
def step(self, action):
|
||||
|
||||
r = (self.
|
||||
@ -295,14 +300,16 @@ class sprint(gym.Env):
|
||||
w = self.player.world
|
||||
internal_dist = np.linalg.norm(self.internal_target)
|
||||
action_mult = 1 if internal_dist > 0.2 else (0.7 / 0.2) * internal_dist + 0.3
|
||||
# self.walk_rel_target = M.rotate_2d_vec(
|
||||
# (self.target[0] - r.loc_head_position[0], self.target[1] - r.loc_head_position[1]),
|
||||
# -r.imu_torso_orientation)
|
||||
# self.walk_distance = np.linalg.norm(self.walk_rel_target)
|
||||
# self.walk_rel_orientation = M.vector_angle(self.walk_rel_target) * 0.5
|
||||
self.walk_rel_target = self.path_manager.get_path_to_target(target=self.target)[0]
|
||||
self.walk_distance = self.path_manager.get_path_to_target(target=self.target)[2]
|
||||
self.walk_rel_orientation = (self.path_manager.get_path_to_target(target=self.target)[1] - r.imu_torso_orientation) * 0.3
|
||||
self.walk_rel_target = M.rotate_2d_vec(
|
||||
(self.walk_target[0] - r.loc_head_position[0], self.walk_target[1] - r.loc_head_position[1]), -r.imu_torso_orientation)
|
||||
self.walk_distance = np.linalg.norm(self.walk_target - r.loc_head_position[:2])
|
||||
if self.walk_distance <= 0.5:
|
||||
self.generate_random_target(r.loc_head_position[:2])
|
||||
self.walk_rel_target = M.rotate_2d_vec(
|
||||
(self.walk_target[0] - r.loc_head_position[0], self.walk_target[1] - r.loc_head_position[1]),
|
||||
-r.imu_torso_orientation)
|
||||
self.walk_distance = np.linalg.norm(self.walk_target - r.loc_head_position[:2])
|
||||
self.walk_rel_orientation = M.vector_angle(self.walk_rel_target) * 0.3
|
||||
# exponential moving average
|
||||
self.act = 0.8 * self.act + 0.2 * action * action_mult * 0.7
|
||||
|
||||
@ -334,22 +341,18 @@ class sprint(gym.Env):
|
||||
self.sync()
|
||||
self.step_counter += 1
|
||||
obs = self.observe()
|
||||
robot_speed = np.linalg.norm(r.loc_torso_velocity[:2])
|
||||
direction_error = abs(self.walk_rel_orientation)
|
||||
direction_error = min(direction_error, 10)
|
||||
reward = np.linalg.norm(r.loc_torso_velocity[:2])**2 * (1 - direction_error/10) * 0.1
|
||||
if np.linalg.norm(self.target - r.loc_head_position[:2]) < 0.3:
|
||||
reward += 50
|
||||
self.generate_random_target(self.target)
|
||||
|
||||
reward = robot_speed**2 * (1 - direction_error / 10) * 0.2
|
||||
if self.walk_distance < 0.5:
|
||||
reward += 10
|
||||
|
||||
if self.player.behavior.is_ready("Get_Up"):
|
||||
self.terminal = True
|
||||
elif w.time_local_ms - self.reset_time > 25000 * 2:
|
||||
self.terminal = True
|
||||
else:
|
||||
self.terminal = False
|
||||
return obs, reward, self.terminal, {}
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
@ -1,6 +1,6 @@
|
||||
import math
|
||||
import random
|
||||
import time
|
||||
|
||||
from agent.Base_Agent import Base_Agent as Agent
|
||||
from behaviors.custom.Step.Step import Step
|
||||
from world.commons.Draw import Draw
|
||||
@ -37,7 +37,6 @@ class sprint(gym.Env):
|
||||
self.robot_type = r_type
|
||||
self.kick_ori = 0
|
||||
self.terminal = False
|
||||
self.distance = None
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.player = Agent(ip, server_p, monitor_p, 1, self.robot_type, "Gym", True, enable_draw)
|
||||
self.step_counter = 0 # to limit episode size
|
||||
@ -57,7 +56,6 @@ class sprint(gym.Env):
|
||||
|
||||
self.walk_rel_orientation = None
|
||||
self.walk_rel_target = None
|
||||
self.walk_target = None
|
||||
self.walk_distance = None
|
||||
self.act = np.zeros(16, np.float32) # memory variable
|
||||
|
||||
@ -81,7 +79,6 @@ class sprint(gym.Env):
|
||||
self.player.scom.unofficial_move_ball((0, 0, 0))
|
||||
|
||||
self.gait: Step_Generator = self.player.behavior.get_custom_behavior_object("Walk").env.step_generator
|
||||
self.last_target_update_time = time.time()
|
||||
|
||||
def observe(self, init=False):
|
||||
r = self.player.world.robot
|
||||
@ -234,12 +231,10 @@ class sprint(gym.Env):
|
||||
w = self.player.world
|
||||
t = w.time_local_ms
|
||||
self.reset_time = t
|
||||
self.generate_random_target(self.Gen_player_pos[:2])
|
||||
distance = np.linalg.norm(self.walk_target[:2] - self.Gen_player_pos[:2])
|
||||
|
||||
distance = 15 - r.loc_head_position[0]
|
||||
self.walk_rel_target = (15, self.Gen_player_pos[1])
|
||||
self.walk_distance = distance
|
||||
self.walk_rel_target = M.rotate_2d_vec(
|
||||
(self.walk_target[0] - r.loc_head_position[0], self.walk_target[1] - r.loc_head_position[1]),
|
||||
-r.imu_torso_orientation)
|
||||
self.walk_rel_orientation = M.vector_angle(self.walk_rel_target)
|
||||
|
||||
for _ in range(25):
|
||||
@ -283,16 +278,6 @@ class sprint(gym.Env):
|
||||
Draw.clear_all()
|
||||
self.player.terminate()
|
||||
|
||||
|
||||
def generate_random_target(self, position, x_range=(-15, 15), y_range=(-10, 10)):
|
||||
while True:
|
||||
x = np.random.uniform(x_range[0], x_range[1])
|
||||
y = np.random.uniform(y_range[0], y_range[1])
|
||||
|
||||
if np.linalg.norm(np.array([x, y]) - position) >= 10:
|
||||
break
|
||||
self.walk_target = np.array([x, y])
|
||||
|
||||
def step(self, action):
|
||||
|
||||
r = (self.
|
||||
@ -302,17 +287,10 @@ class sprint(gym.Env):
|
||||
internal_dist = np.linalg.norm(self.internal_target)
|
||||
action_mult = 1 if internal_dist > 0.2 else (0.7 / 0.2) * internal_dist + 0.3
|
||||
self.walk_rel_target = M.rotate_2d_vec(
|
||||
(self.walk_target[0] - r.loc_head_position[0], self.walk_target[1] - r.loc_head_position[1]), -r.imu_torso_orientation)
|
||||
self.distance = np.linalg.norm(self.walk_target - r.loc_head_position[:2])
|
||||
(15 - r.loc_head_position[0], self.Gen_player_pos[1] - r.loc_head_position[1]), -r.imu_torso_orientation)
|
||||
self.walk_distance = np.linalg.norm(self.walk_rel_target)
|
||||
if self.distance <= 0.5:
|
||||
self.generate_random_target(r.loc_head_position[:2])
|
||||
self.walk_rel_target = M.rotate_2d_vec(
|
||||
(self.walk_target[0] - r.loc_head_position[0], self.walk_target[1] - r.loc_head_position[1]),
|
||||
-r.imu_torso_orientation)
|
||||
self.distance = np.linalg.norm(self.walk_target - r.loc_head_position[:2])
|
||||
self.walk_distance = np.linalg.norm(self.walk_rel_target)
|
||||
self.walk_rel_orientation = M.vector_angle(self.walk_rel_target) * 0.3
|
||||
|
||||
# exponential moving average
|
||||
self.act = 0.8 * self.act + 0.2 * action * action_mult * 0.7
|
||||
|
||||
@ -344,18 +322,20 @@ class sprint(gym.Env):
|
||||
self.sync()
|
||||
self.step_counter += 1
|
||||
obs = self.observe()
|
||||
robot_speed = np.linalg.norm(r.loc_torso_velocity[:2])
|
||||
direction_error = abs(self.walk_rel_orientation)
|
||||
direction_error = min(direction_error, 10)
|
||||
reward = robot_speed * (1 - direction_error / 10) * 0.6
|
||||
if self.distance < 0.5:
|
||||
reward += 10
|
||||
|
||||
reward = r.loc_torso_velocity[0] - r.loc_torso_velocity[1] * 0.2
|
||||
if self.player.behavior.is_ready("Get_Up"):
|
||||
self.terminal = True
|
||||
|
||||
elif w.time_local_ms - self.reset_time > 7500 * 2:
|
||||
self.terminal = True
|
||||
elif r.loc_torso_position[0] > 14.5:
|
||||
self.terminal = True
|
||||
reward += 500
|
||||
else:
|
||||
self.terminal = False
|
||||
return obs, reward, self.terminal, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
Loading…
x
Reference in New Issue
Block a user