Init gym
This commit is contained in:
372
scripts/gyms/Dribble.py
Normal file
372
scripts/gyms/Dribble.py
Normal file
@@ -0,0 +1,372 @@
|
||||
import math
|
||||
from locale import normalize
|
||||
|
||||
from agent.Base_Agent import Base_Agent as Agent
|
||||
from world.commons.Draw import Draw
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
from scripts.commons.Server import Server
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from time import sleep
|
||||
import os, gym
|
||||
import numpy as np
|
||||
from math_ops.Math_Ops import Math_Ops as M
|
||||
from behaviors.custom.Step.Step_Generator import Step_Generator
|
||||
import random
|
||||
|
||||
|
||||
class Dribble(gym.Env):
|
||||
def __init__(self, ip, server_p, monitor_p, r_type, enable_draw) -> None:
|
||||
self.abs_ori = 75
|
||||
self.ball_dist_hip_center_2d = 0
|
||||
self.ball_dist_hip_center = None
|
||||
self.internal_rel_orientation = None
|
||||
self.dribble_speed = 1
|
||||
self.gym_last_internal_abs_ori = None
|
||||
self.internal_target_vel = None
|
||||
self.Gen_player_pos = None
|
||||
self.internal_target = None
|
||||
self.values_l = None
|
||||
self.values_r = None
|
||||
self.reset_time = None
|
||||
self.behavior = None
|
||||
self.robot_type = r_type
|
||||
self.kick_ori = 0
|
||||
self.terminal = False
|
||||
|
||||
self.player = Agent(ip, server_p, monitor_p, 1, self.robot_type, "Gym", True, True)
|
||||
|
||||
self.step_counter = 0
|
||||
self.ik = self.player.inv_kinematics
|
||||
self.dribble_rel_orientation = 0
|
||||
|
||||
self.STEP_DUR = 8
|
||||
self.STEP_Z_SPAN = 0.02
|
||||
self.STEP_Z_MAX = 0.70
|
||||
nao_specs = self.ik.NAO_SPECS
|
||||
self.leg_length = nao_specs[1] + nao_specs[3]
|
||||
feet_y_dev = nao_specs[0] * 1.12
|
||||
sample_time = self.player.world.robot.STEPTIME
|
||||
max_ankle_z = nao_specs[5]
|
||||
self.step_generator = Step_Generator(feet_y_dev, sample_time, max_ankle_z)
|
||||
self.DEFAULT_ARMS = np.array([-90, -90, 8, 8, 90, 90, 70, 70], np.float32)
|
||||
|
||||
self.act = np.zeros(16, np.float32)
|
||||
self.path_manager = self.player.path_manager
|
||||
|
||||
obs_size = 76
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = gym.spaces.Box(
|
||||
low=np.full(obs_size, -np.inf, np.float32),
|
||||
high=np.full(obs_size, np.inf, np.float32),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
self.ball_x_center = 0.21
|
||||
self.ball_y_center = -0.045
|
||||
|
||||
MAX = np.finfo(np.float32).max
|
||||
self.no_of_actions = act_size = 16
|
||||
self.action_space = gym.spaces.Box(
|
||||
low=np.full(act_size, -MAX, np.float32),
|
||||
high=np.full(act_size, MAX, np.float32),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
self.player.scom.unofficial_move_ball((0, 0, 0.042))
|
||||
self.player.scom.unofficial_set_play_mode("PlayOn")
|
||||
|
||||
def observe(self, init=False, virtual_ball=False):
|
||||
r = self.player.world.robot
|
||||
w = self.player.world
|
||||
|
||||
if init:
|
||||
self.step_counter = 0
|
||||
self.act = np.zeros(16, np.float32)
|
||||
|
||||
self.obs[0] = min(self.step_counter, 12 * 8) / 100
|
||||
self.obs[1] = r.loc_head_z * 3
|
||||
self.obs[2] = r.loc_head_z_vel / 2
|
||||
self.obs[3] = r.imu_torso_roll / 15
|
||||
self.obs[4] = r.imu_torso_pitch / 15
|
||||
self.obs[5:8] = r.gyro / 100
|
||||
self.obs[8:11] = r.acc / 10
|
||||
|
||||
self.obs[11:17] = r.frp.get('lf', np.zeros(6)) * (10, 10, 10, 0.01, 0.01, 0.01)
|
||||
self.obs[17:23] = r.frp.get('rf', np.zeros(6)) * (10, 10, 10, 0.01, 0.01, 0.01)
|
||||
|
||||
self.obs[23:43] = r.joints_position[2:22] / 100
|
||||
self.obs[43:63] = r.joints_speed[2:22] / 6.1395
|
||||
|
||||
if init:
|
||||
self.obs[63] = 1
|
||||
self.obs[64] = 1
|
||||
self.obs[65] = 0
|
||||
self.obs[66] = 0
|
||||
else:
|
||||
self.obs[63] = self.step_generator.external_progress
|
||||
self.obs[64] = float(self.step_generator.state_is_left_active)
|
||||
self.obs[65] = float(not self.step_generator.state_is_left_active)
|
||||
self.obs[66] = math.sin(self.step_generator.state_current_ts / self.step_generator.ts_per_step * math.pi)
|
||||
|
||||
ball_rel_hip_center = self.ik.torso_to_hip_transform(w.ball_rel_torso_cart_pos)
|
||||
self.ball_dist_hip_center = np.linalg.norm(ball_rel_hip_center)
|
||||
ball_rel_torso_xy = w.ball_rel_torso_cart_pos[:2]
|
||||
self.ball_dist_hip_center_2d = np.linalg.norm(ball_rel_torso_xy)
|
||||
|
||||
if init:
|
||||
self.obs[67:70] = (0, 0, 0)
|
||||
elif w.ball_is_visible:
|
||||
self.obs[67:70] = (ball_rel_hip_center - self.obs[70:73]) * 10
|
||||
|
||||
self.obs[70:73] = ball_rel_hip_center
|
||||
self.obs[73] = self.ball_dist_hip_center * 2
|
||||
|
||||
if virtual_ball:
|
||||
self.obs[67:74] = (0, 0, 0, 0.05, 0, -0.175, 0.36)
|
||||
|
||||
MAX_ROTATION_DIFF = 20
|
||||
MAX_ROTATION_DIST = 80
|
||||
|
||||
if init:
|
||||
self.internal_rel_orientation = 0
|
||||
self.internal_target_vel = 0
|
||||
self.gym_last_internal_abs_ori = r.imu_torso_orientation
|
||||
|
||||
if w.vision_is_up_to_date:
|
||||
previous_internal_rel_orientation = np.copy(self.internal_rel_orientation)
|
||||
internal_ori_diff = np.clip(
|
||||
M.normalize_deg(self.dribble_rel_orientation - self.internal_rel_orientation),
|
||||
-MAX_ROTATION_DIFF, MAX_ROTATION_DIFF
|
||||
)
|
||||
self.internal_rel_orientation = np.clip(
|
||||
M.normalize_deg(self.internal_rel_orientation + internal_ori_diff),
|
||||
-MAX_ROTATION_DIST, MAX_ROTATION_DIST
|
||||
)
|
||||
self.internal_target_vel = self.internal_rel_orientation - previous_internal_rel_orientation
|
||||
self.gym_last_internal_abs_ori = self.internal_rel_orientation + r.imu_torso_orientation
|
||||
|
||||
self.obs[74] = self.internal_rel_orientation / MAX_ROTATION_DIST
|
||||
self.obs[75] = self.internal_target_vel / MAX_ROTATION_DIFF
|
||||
|
||||
return self.obs
|
||||
|
||||
def execute_ik(self, l_pos, l_rot, r_pos, r_rot):
|
||||
r = self.player.world.robot
|
||||
|
||||
indices, self.values_l, error_codes = self.ik.leg(l_pos, l_rot, True, dynamic_pose=False)
|
||||
r.set_joints_target_position_direct(indices, self.values_l, harmonize=False)
|
||||
|
||||
indices, self.values_r, error_codes = self.ik.leg(r_pos, r_rot, False, dynamic_pose=False)
|
||||
r.set_joints_target_position_direct(indices, self.values_r, harmonize=False)
|
||||
|
||||
def sync(self):
|
||||
r = self.player.world.robot
|
||||
self.player.scom.commit_and_send(r.get_command())
|
||||
self.player.scom.receive()
|
||||
|
||||
def reset(self):
|
||||
self.abs_ori = 45
|
||||
self.player.scom.unofficial_set_play_mode("PlayOn")
|
||||
|
||||
Gen_ball_pos = [-9, 0, 0]
|
||||
a = random.uniform(0, 3)
|
||||
b = random.uniform(-3, 3)
|
||||
self.Gen_player_pos = (Gen_ball_pos[0] - a, b, 0.5)
|
||||
self.player.scom.unofficial_move_ball((Gen_ball_pos[0], Gen_ball_pos[1], Gen_ball_pos[2]))
|
||||
|
||||
self.step_counter = 0
|
||||
self.behavior = self.player.behavior
|
||||
r = self.player.world.robot
|
||||
w = self.player.world
|
||||
t = w.time_local_ms
|
||||
self.reset_time = t
|
||||
self.b_rel = w.ball_rel_torso_cart_pos[:2]
|
||||
self.path_manager = self.player.path_manager
|
||||
|
||||
for _ in range(25):
|
||||
self.player.scom.unofficial_beam(self.Gen_player_pos, 0)
|
||||
self.player.behavior.execute("Zero_Bent_Knees")
|
||||
self.sync()
|
||||
|
||||
self.player.scom.unofficial_beam(self.Gen_player_pos, 0)
|
||||
r.joints_target_speed[0] = 0.01
|
||||
self.sync()
|
||||
|
||||
for _ in range(7):
|
||||
self.player.behavior.execute("Zero_Bent_Knees")
|
||||
self.sync()
|
||||
|
||||
while True:
|
||||
self.b_rel = w.ball_rel_torso_cart_pos[:2]
|
||||
if self.player.behavior.is_ready("Get_Up"):
|
||||
self.player.behavior.execute_to_completion("Get_Up")
|
||||
|
||||
if 0.25 > np.linalg.norm(self.b_rel) and w.ball_is_visible:
|
||||
break
|
||||
else:
|
||||
if self.player.behavior.is_ready("Get_Up"):
|
||||
self.player.behavior.execute_to_completion("Get_Up")
|
||||
|
||||
reset_walk = self.behavior.previous_behavior != "Walk"
|
||||
rel_target = self.b_rel - 0.25 * M.normalize_vec(self.b_rel)
|
||||
rel_ori = M.vector_angle(self.b_rel)
|
||||
dist = max(0.08, float(np.linalg.norm(rel_target)) * 0.7)
|
||||
self.behavior.execute_sub_behavior("Walk", reset_walk, rel_target, False, rel_ori, False, dist)
|
||||
self.sync()
|
||||
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
return self.observe(True)
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def close(self):
|
||||
Draw.clear_all()
|
||||
self.player.terminate()
|
||||
|
||||
def step(self, action):
|
||||
r = self.player.world.robot
|
||||
w = self.player.world
|
||||
t = self.step_counter
|
||||
dribble_target = (15, random.choice([5, -5]))
|
||||
if w.ball_abs_pos[1] > 5:
|
||||
dribble_target = (15, -5)
|
||||
if w.ball_abs_pos[1] < -5:
|
||||
dribble_target = (15, 5)
|
||||
|
||||
self.dribble_rel_orientation = self.path_manager.get_dribble_path(optional_2d_target=dribble_target)[1]
|
||||
|
||||
self.act = 0.75 * self.act + 0.25 * action * 0.7 * 0.95 * self.dribble_speed
|
||||
|
||||
lfy, lfz, rfy, rfz = self.step_generator.get_target_positions(
|
||||
self.step_counter == 0, self.STEP_DUR, self.STEP_Z_SPAN, self.leg_length * self.STEP_Z_MAX
|
||||
)
|
||||
|
||||
a = self.act
|
||||
l_ankle_pos = (a[0] * 0.025 - 0.01, a[1] * 0.01 + lfy, a[2] * 0.01 + lfz)
|
||||
r_ankle_pos = (a[3] * 0.025 - 0.01, a[4] * 0.01 + rfy, a[5] * 0.01 + rfz)
|
||||
l_foot_rot = a[6:9] * (2, 2, 3)
|
||||
r_foot_rot = a[9:12] * (2, 2, 3)
|
||||
|
||||
l_foot_rot[2] = max(0, l_foot_rot[2] + 18.3)
|
||||
r_foot_rot[2] = min(0, r_foot_rot[2] - 18.3)
|
||||
|
||||
arms = np.copy(self.DEFAULT_ARMS)
|
||||
|
||||
amplitude = 15
|
||||
period = 25
|
||||
forward_arm_movement = amplitude * math.sin(2 * math.pi * t / period)
|
||||
arms[0] += forward_arm_movement
|
||||
arms[1] += forward_arm_movement
|
||||
|
||||
inward_arm_movement = 10
|
||||
arms[2] += inward_arm_movement
|
||||
arms[3] -= inward_arm_movement
|
||||
|
||||
arms[0:4] += a[12:16] * 4
|
||||
|
||||
self.execute_ik(l_ankle_pos, l_foot_rot, r_ankle_pos, r_foot_rot)
|
||||
r.set_joints_target_position_direct(slice(14, 22), arms, harmonize=False)
|
||||
self.sync()
|
||||
self.step_counter += 1
|
||||
|
||||
obs = self.observe()
|
||||
|
||||
angle_rad = np.radians(self.gym_last_internal_abs_ori)
|
||||
unit_vector = np.array([np.cos(angle_rad), np.sin(angle_rad)])
|
||||
|
||||
if np.linalg.norm(w.ball_cheat_abs_vel[:2]) != 0:
|
||||
cos_theta = np.dot(unit_vector, w.ball_cheat_abs_vel[:2]) / (
|
||||
np.linalg.norm(unit_vector) * np.linalg.norm(w.ball_cheat_abs_vel[:2])
|
||||
)
|
||||
else:
|
||||
cos_theta = 0
|
||||
|
||||
reward = np.linalg.norm(w.ball_cheat_abs_vel) * cos_theta
|
||||
|
||||
if abs(w.ball_rel_torso_cart_pos[1]) > 0.02:
|
||||
reward = 0
|
||||
|
||||
if self.ball_dist_hip_center_2d < 0.115 or self.ball_dist_hip_center_2d > 0.2:
|
||||
reward = 0
|
||||
|
||||
if abs(r.imu_torso_roll) > 10 or abs(r.imu_torso_pitch) > 10:
|
||||
reward = 0
|
||||
|
||||
if self.player.behavior.is_ready("Get_Up"):
|
||||
self.terminal = True
|
||||
elif (w.time_local_ms - self.reset_time > 7500 * 15 or
|
||||
np.linalg.norm(w.ball_cheat_abs_pos[:2] - r.loc_head_position[:2]) > 0.25 or
|
||||
not w.is_ball_abs_pos_from_vision):
|
||||
self.terminal = True
|
||||
else:
|
||||
self.terminal = False
|
||||
|
||||
return obs, reward, self.terminal, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
n_envs = min(8, os.cpu_count())
|
||||
n_steps_per_env = 128
|
||||
minibatch_size = 64
|
||||
total_steps = 500000000
|
||||
learning_rate = 3e-4
|
||||
folder_name = f'Dribble_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
def init_env(i_env):
|
||||
def thunk():
|
||||
return Dribble(self.ip, self.server_p + i_env, self.monitor_p_1000 + i_env, self.robot_type, False)
|
||||
|
||||
return thunk
|
||||
|
||||
servers = Server(self.server_p, self.monitor_p_1000, n_envs + 1)
|
||||
|
||||
env = SubprocVecEnv([init_env(i) for i in range(n_envs)])
|
||||
eval_env = SubprocVecEnv([init_env(n_envs)])
|
||||
|
||||
try:
|
||||
if "model_file" in args:
|
||||
model = PPO.load(
|
||||
args["model_file"], env=env, device="cuda", n_envs=n_envs,
|
||||
n_steps=n_steps_per_env, batch_size=minibatch_size, learning_rate=learning_rate
|
||||
)
|
||||
else:
|
||||
model = PPO(
|
||||
"MlpPolicy", env=env, verbose=1, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate, device="cuda"
|
||||
)
|
||||
|
||||
model_path = self.learn_model(
|
||||
model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 10, save_freq=n_steps_per_env * 10,
|
||||
backup_env_file=__file__
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1)
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
server = Server(self.server_p - 1, self.monitor_p, 1)
|
||||
env = Dribble(self.ip, self.server_p - 1, self.monitor_p, self.robot_type, True)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl", False)
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
324
scripts/gyms/Kick_10M.py
Normal file
324
scripts/gyms/Kick_10M.py
Normal file
@@ -0,0 +1,324 @@
|
||||
from random import random
|
||||
|
||||
from agent.Base_Agent import Base_Agent as Agent
|
||||
from behaviors.custom.Step.Step import Step
|
||||
from world.commons.Draw import Draw
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
from scripts.commons.Server import Server
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from time import sleep
|
||||
import os, gym
|
||||
import numpy as np
|
||||
from math_ops.Math_Ops import Math_Ops as U
|
||||
from math_ops.Math_Ops import Math_Ops as M
|
||||
from behaviors.custom.Step.Step_Generator import Step_Generator
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class Kick_10M(gym.Env):
|
||||
def __init__(self, ip, server_p, monitor_p, r_type, enable_draw) -> None:
|
||||
self.lock_flag = False
|
||||
self.sleep = 0
|
||||
self.reset_time = None
|
||||
self.behavior = None
|
||||
self.path_manager = None
|
||||
self.bias_dir = None
|
||||
self.robot_type = r_type
|
||||
self.kick_ori = 0
|
||||
self.terminal = False
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.player = Agent(ip, server_p, monitor_p, 1, self.robot_type, "Gym", True, enable_draw)
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.ball_pos = np.array([0, 0, 0])
|
||||
|
||||
self.step_obj: Step = self.player.behavior.get_custom_behavior_object("Step") # Step behavior object
|
||||
|
||||
# State space
|
||||
obs_size = 63
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = gym.spaces.Box(low=np.full(obs_size, -np.inf, np.float32),
|
||||
high=np.full(obs_size, np.inf, np.float32), dtype=np.float32)
|
||||
|
||||
# Action space
|
||||
MAX = np.finfo(np.float32).max
|
||||
self.no_of_actions = act_size = 16
|
||||
self.action_space = gym.spaces.Box(low=np.full(act_size, -MAX, np.float32),
|
||||
high=np.full(act_size, MAX, np.float32), dtype=np.float32)
|
||||
|
||||
# Place ball far away to keep landmarks in FoV (head follows ball while using Step behavior)
|
||||
self.player.scom.unofficial_move_ball((14, 0, 0.042))
|
||||
|
||||
self.ball_x_center = 0.21
|
||||
self.ball_y_center = -0.045
|
||||
self.player.scom.unofficial_set_play_mode("PlayOn")
|
||||
self.player.scom.unofficial_move_ball((0, 0, 0))
|
||||
|
||||
def observe(self, init=False):
|
||||
w = self.player.world
|
||||
r = self.player.world.robot
|
||||
|
||||
if init:
|
||||
self.step_counter = 0
|
||||
self.obs[0] = self.step_counter / 20
|
||||
self.obs[1] = r.loc_head_z * 3
|
||||
self.obs[2] = r.loc_head_z_vel / 2
|
||||
self.obs[3] = r.imu_torso_roll / 15
|
||||
self.obs[4] = r.imu_torso_pitch / 15
|
||||
self.obs[5:8] = r.gyro / 100
|
||||
self.obs[8:11] = r.acc / 10
|
||||
self.obs[11:17] = r.frp.get('lf', np.zeros(6)) * (10, 10, 10, 0.01, 0.01, 0.01)
|
||||
self.obs[17:23] = r.frp.get('rf', np.zeros(6)) * (10, 10, 10, 0.01, 0.01, 0.01)
|
||||
self.obs[23:39] = r.joints_position[2:18] / 100
|
||||
self.obs[39:55] = r.joints_speed[2:18] / 6.1395
|
||||
ball_rel_hip_center = self.player.inv_kinematics.torso_to_hip_transform(w.ball_rel_torso_cart_pos)
|
||||
if init:
|
||||
self.obs[55:58] = (0, 0, 0)
|
||||
elif w.ball_is_visible:
|
||||
self.obs[55:58] = (ball_rel_hip_center - self.obs[58:61]) * 10
|
||||
self.obs[58:61] = ball_rel_hip_center
|
||||
self.obs[61] = np.linalg.norm(ball_rel_hip_center) * 2
|
||||
self.obs[62] = U.normalize_deg(self.kick_ori - r.imu_torso_orientation) / 30
|
||||
'''
|
||||
Expected observations for walking parameters/state (example):
|
||||
Time step R 0 1 2 0 1 2 3 4
|
||||
Progress 1 0 .5 1 0 .25 .5 .75 1
|
||||
Left leg active T F F F T T T T T
|
||||
Parameters A A A B B B B B C
|
||||
Example note: (A) has a step duration of 3ts, (B) has a step duration of 5ts
|
||||
'''
|
||||
return self.obs
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
r = self.player.world.robot
|
||||
self.player.scom.commit_and_send(r.get_command())
|
||||
self.player.scom.receive()
|
||||
|
||||
def reset(self):
|
||||
# print("reset")
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
self.lock_flag = False
|
||||
self.player.scom.unofficial_set_play_mode("PlayOn")
|
||||
Gen_ball_pos = [random() * 5 - 9, random() * 6 - 3, 0]
|
||||
Gen_player_pos = (random() * 3 + Gen_ball_pos[0], random() * 3 + Gen_ball_pos[1], 0.5)
|
||||
self.ball_pos = np.array(Gen_ball_pos)
|
||||
self.player.scom.unofficial_move_ball((Gen_ball_pos[0], Gen_ball_pos[1], Gen_ball_pos[2]))
|
||||
self.sleep = 0
|
||||
self.step_counter = 0
|
||||
self.behavior = self.player.behavior
|
||||
r = self.player.world.robot
|
||||
w = self.player.world
|
||||
t = w.time_local_ms
|
||||
self.path_manager = self.player.path_manager
|
||||
gait: Step_Generator = self.behavior.get_custom_behavior_object("Walk").env.step_generator
|
||||
self.reset_time = t
|
||||
|
||||
for _ in range(25):
|
||||
self.player.scom.unofficial_beam(Gen_player_pos, 0) # beam player continuously (floating above ground)
|
||||
self.player.behavior.execute("Zero_Bent_Knees")
|
||||
self.sync()
|
||||
|
||||
# beam player to ground
|
||||
self.player.scom.unofficial_beam(Gen_player_pos, 0)
|
||||
r.joints_target_speed[
|
||||
0] = 0.01 # move head to trigger physics update (rcssserver3d bug when no joint is moving)
|
||||
self.sync()
|
||||
|
||||
# stabilize on ground
|
||||
for _ in range(7):
|
||||
self.player.behavior.execute("Zero_Bent_Knees")
|
||||
self.sync()
|
||||
# walk to ball
|
||||
while True and w.time_local_ms - self.reset_time <= 50000:
|
||||
direction = 0
|
||||
if self.player.behavior.is_ready("Get_Up"):
|
||||
self.player.behavior.execute_to_completion("Get_Up")
|
||||
self.bias_dir = [0.09, 0.1, 0.14, 0.08, 0.05][r.type]
|
||||
biased_dir = M.normalize_deg(direction + self.bias_dir) # add bias to rectify direction
|
||||
ang_diff = abs(
|
||||
M.normalize_deg(biased_dir - r.loc_torso_orientation)) # the reset was learned with loc, not IMU
|
||||
|
||||
next_pos, next_ori, dist_to_final_target = self.path_manager.get_path_to_ball(
|
||||
x_ori=biased_dir, x_dev=-self.ball_x_center, y_dev=-self.ball_y_center, torso_ori=biased_dir)
|
||||
if (w.ball_last_seen > t - w.VISUALSTEP_MS and ang_diff < 5 and
|
||||
t - w.ball_abs_pos_last_update < 100 and # ball absolute location is recent
|
||||
dist_to_final_target < 0.025 and # if absolute ball position is updated
|
||||
not gait.state_is_left_active and gait.state_current_ts == 2): # to avoid kicking immediately without preparation & stability
|
||||
break
|
||||
else:
|
||||
dist = max(0.07, dist_to_final_target)
|
||||
reset_walk = self.behavior.previous_behavior != "Walk" # reset walk if it wasn't the previous behavior
|
||||
self.behavior.execute_sub_behavior("Walk", reset_walk, next_pos, True, next_ori, True,
|
||||
dist) # target, is_target_abs, ori, is_ori_abs, distance
|
||||
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.lastx = r.cheat_abs_pos[0]
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
|
||||
return self.observe(True)
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def close(self):
|
||||
Draw.clear_all()
|
||||
self.player.terminate()
|
||||
|
||||
def step(self, action):
|
||||
r = self.player.world.robot
|
||||
b = self.player.world.ball_abs_pos
|
||||
w = self.player.world
|
||||
t = w.time_local_ms
|
||||
if self.step_counter < 5:
|
||||
r.joints_target_speed[2:18] = action * [
|
||||
4,
|
||||
4,
|
||||
1,
|
||||
1,
|
||||
0.2,
|
||||
0.2,
|
||||
0.2,
|
||||
0.2,
|
||||
5,
|
||||
5,
|
||||
3.5,
|
||||
3.5,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1]
|
||||
r.joints_target_speed[12] += 1
|
||||
r.joints_target_speed[6] += 3
|
||||
r.joints_target_speed[5] -= 1
|
||||
r.joints_target_speed[8] += 3
|
||||
r.joints_target_speed[9] -= 5
|
||||
else:
|
||||
r.joints_target_speed[2:18] = action * [
|
||||
7.03,
|
||||
7.03,
|
||||
3,
|
||||
3,
|
||||
7.03,
|
||||
7.03,
|
||||
7,
|
||||
7,
|
||||
7,
|
||||
7,
|
||||
2,
|
||||
2,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1]
|
||||
r.joints_target_speed[6] -= 5
|
||||
r.joints_target_speed[7] += 6
|
||||
r.joints_target_speed[9] += 6
|
||||
r.joints_target_speed[11] -= 0.7
|
||||
r.set_joints_target_position_direct([0, 1], np.array([0, -44], float), False)
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
self.lastx = r.cheat_abs_pos[0]
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
if self.step_counter > 12:
|
||||
obs = self.observe()
|
||||
self.player.scom.unofficial_beam((-14.5, 0, 0.51), 0) # beam player continuously (floating above ground)
|
||||
waiting_steps = 0
|
||||
high = 0
|
||||
while waiting_steps < 650: # 假设额外等待5个步骤
|
||||
if w.ball_cheat_abs_pos[2] > high:
|
||||
high = w.ball_cheat_abs_pos[2]
|
||||
self.sync() # 继续执行仿真步骤
|
||||
waiting_steps += 1
|
||||
dis = np.linalg.norm(self.ball_pos - w.ball_cheat_abs_pos)
|
||||
reward = 10 - abs(10 - dis) - abs(w.ball_cheat_abs_pos[1] - self.ball_pos[1]) - abs(1.5-high) * 3
|
||||
# print(reward)
|
||||
self.terminal = True
|
||||
|
||||
else:
|
||||
obs = self.observe()
|
||||
reward = 0
|
||||
self.terminal = False
|
||||
|
||||
return obs, reward, self.terminal, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = min(14, os.cpu_count())
|
||||
n_steps_per_env = 128 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = 64 # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = 3e-5
|
||||
folder_name = f'Kick_10M_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
# print("Model path:", model_path)
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env):
|
||||
def thunk():
|
||||
return Kick_10M(self.ip, self.server_p + i_env, self.monitor_p_1000 + i_env, self.robot_type, False)
|
||||
|
||||
return thunk
|
||||
|
||||
servers = Server(self.server_p, self.monitor_p_1000, n_envs + 1) # include 1 extra server for testing
|
||||
|
||||
env = SubprocVecEnv([init_env(i) for i in range(n_envs)])
|
||||
eval_env = SubprocVecEnv([init_env(n_envs)])
|
||||
|
||||
try:
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cuda", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO("MlpPolicy", env=env, verbose=1, n_steps=n_steps_per_env, batch_size=minibatch_size,
|
||||
learning_rate=learning_rate, device="cuda")
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 20,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
# Uses different server and monitor ports
|
||||
server = Server(self.server_p - 1, self.monitor_p, 1)
|
||||
env = Kick_10M(self.ip, self.server_p - 1, self.monitor_p, self.robot_type, True)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
326
scripts/gyms/Kick_15M.py
Normal file
326
scripts/gyms/Kick_15M.py
Normal file
@@ -0,0 +1,326 @@
|
||||
from random import random
|
||||
from random import gauss
|
||||
from agent.Base_Agent import Base_Agent as Agent
|
||||
from behaviors.custom.Step.Step import Step
|
||||
from world.commons.Draw import Draw
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
from scripts.commons.Server import Server
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from time import sleep
|
||||
import os, gym
|
||||
import numpy as np
|
||||
from math_ops.Math_Ops import Math_Ops as U
|
||||
from math_ops.Math_Ops import Math_Ops as M
|
||||
from behaviors.custom.Step.Step_Generator import Step_Generator
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class Fxxk_WHUAI(gym.Env):
|
||||
def __init__(self, ip, server_p, monitor_p, r_type, enable_draw) -> None:
|
||||
self.lock_flag = False
|
||||
self.sleep = 0
|
||||
self.reset_time = None
|
||||
self.behavior = None
|
||||
self.path_manager = None
|
||||
self.bias_dir = None
|
||||
self.robot_type = r_type
|
||||
self.kick_ori = 0
|
||||
self.terminal = False
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.player = Agent(ip, server_p, monitor_p, 1, self.robot_type, "Gym", True, enable_draw)
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.ball_pos = np.array([0, 0, 0])
|
||||
|
||||
self.step_obj: Step = self.player.behavior.get_custom_behavior_object("Step") # Step behavior object
|
||||
|
||||
# State space
|
||||
obs_size = 63
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = gym.spaces.Box(low=np.full(obs_size, -np.inf, np.float32),
|
||||
high=np.full(obs_size, np.inf, np.float32), dtype=np.float32)
|
||||
|
||||
# Action space
|
||||
MAX = np.finfo(np.float32).max
|
||||
self.no_of_actions = act_size = 16
|
||||
self.action_space = gym.spaces.Box(low=np.full(act_size, -MAX, np.float32),
|
||||
high=np.full(act_size, MAX, np.float32), dtype=np.float32)
|
||||
|
||||
# Place ball far away to keep landmarks in FoV (head follows ball while using Step behavior)
|
||||
self.player.scom.unofficial_move_ball((14, 0, 0.042))
|
||||
|
||||
self.ball_x_center = 0.21
|
||||
self.ball_y_center = -0.045
|
||||
self.player.scom.unofficial_set_play_mode("PlayOn")
|
||||
self.player.scom.unofficial_move_ball((0, 0, 0))
|
||||
|
||||
def observe(self, init=False):
|
||||
w = self.player.world
|
||||
r = self.player.world.robot
|
||||
|
||||
if init:
|
||||
self.step_counter = 0
|
||||
self.obs[0] = self.step_counter / 20
|
||||
self.obs[1] = r.loc_head_z * 3
|
||||
self.obs[2] = r.loc_head_z_vel / 2
|
||||
self.obs[3] = r.imu_torso_roll / 15
|
||||
self.obs[4] = r.imu_torso_pitch / 15
|
||||
self.obs[5:8] = r.gyro / 100
|
||||
self.obs[8:11] = r.acc / 10
|
||||
self.obs[11:17] = r.frp.get('lf', np.zeros(6)) * (10, 10, 10, 0.01, 0.01, 0.01)
|
||||
self.obs[17:23] = r.frp.get('rf', np.zeros(6)) * (10, 10, 10, 0.01, 0.01, 0.01)
|
||||
self.obs[23:39] = r.joints_position[2:18] / 100
|
||||
self.obs[39:55] = r.joints_speed[2:18] / 6.1395
|
||||
ball_rel_hip_center = self.player.inv_kinematics.torso_to_hip_transform(w.ball_rel_torso_cart_pos)
|
||||
if init:
|
||||
self.obs[55:58] = (0, 0, 0)
|
||||
elif w.ball_is_visible:
|
||||
self.obs[55:58] = (ball_rel_hip_center - self.obs[58:61]) * 10
|
||||
self.obs[58:61] = ball_rel_hip_center
|
||||
self.obs[61] = np.linalg.norm(ball_rel_hip_center) * 2
|
||||
self.obs[62] = U.normalize_deg(self.kick_ori - r.imu_torso_orientation) / 30
|
||||
'''
|
||||
Expected observations for walking parameters/state (example):
|
||||
Time step R 0 1 2 0 1 2 3 4
|
||||
Progress 1 0 .5 1 0 .25 .5 .75 1
|
||||
Left leg active T F F F T T T T T
|
||||
Parameters A A A B B B B B C
|
||||
Example note: (A) has a step duration of 3ts, (B) has a step duration of 5ts
|
||||
'''
|
||||
return self.obs
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
r = self.player.world.robot
|
||||
self.player.scom.commit_and_send(r.get_command())
|
||||
self.player.scom.receive()
|
||||
|
||||
def reset(self):
|
||||
# print("reset")
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
self.lock_flag = False
|
||||
self.player.scom.unofficial_set_play_mode("PlayOn")
|
||||
Gen_ball_pos = [random() * 5 - 9, random() * 6 - 3, 0]
|
||||
Gen_player_pos = (random() * 3 + Gen_ball_pos[0], random() * 3 + Gen_ball_pos[1], 0.5)
|
||||
Gen_player_rot = random() * 360
|
||||
self.ball_pos = np.array(Gen_ball_pos)
|
||||
self.player.scom.unofficial_move_ball((Gen_ball_pos[0], Gen_ball_pos[1], Gen_ball_pos[2]))
|
||||
self.sleep = 0
|
||||
self.step_counter = 0
|
||||
self.behavior = self.player.behavior
|
||||
r = self.player.world.robot
|
||||
w = self.player.world
|
||||
t = w.time_local_ms
|
||||
self.path_manager = self.player.path_manager
|
||||
gait: Step_Generator = self.behavior.get_custom_behavior_object("Walk").env.step_generator
|
||||
self.reset_time = t
|
||||
|
||||
for _ in range(25):
|
||||
self.player.scom.unofficial_beam(Gen_player_pos, Gen_player_rot) # beam player continuously (floating above ground)
|
||||
self.player.behavior.execute("Zero_Bent_Knees")
|
||||
self.sync()
|
||||
|
||||
# beam player to ground
|
||||
self.player.scom.unofficial_beam(Gen_player_pos, Gen_player_rot)
|
||||
r.joints_target_speed[
|
||||
0] = 0.01 # move head to trigger physics update (rcssserver3d bug when no joint is moving)
|
||||
self.sync()
|
||||
|
||||
# stabilize on ground
|
||||
for _ in range(7):
|
||||
self.player.behavior.execute("Zero_Bent_Knees")
|
||||
self.sync()
|
||||
delta_diff = gauss(1, 0.1)
|
||||
# walk to ball
|
||||
while True and w.time_local_ms - self.reset_time <= 50000:
|
||||
direction = 0
|
||||
if self.player.behavior.is_ready("Get_Up"):
|
||||
self.player.behavior.execute_to_completion("Get_Up")
|
||||
self.bias_dir = [0.09, 0.1, 0.14, 0.08, 0.05][r.type]
|
||||
biased_dir = M.normalize_deg(direction + self.bias_dir) # add bias to rectify direction
|
||||
ang_diff = abs(
|
||||
M.normalize_deg(biased_dir - r.loc_torso_orientation)) # the reset was learned with loc, not IMU
|
||||
|
||||
next_pos, next_ori, dist_to_final_target = self.path_manager.get_path_to_ball(
|
||||
x_ori=biased_dir, x_dev=-self.ball_x_center, y_dev=-self.ball_y_center, torso_ori=biased_dir)
|
||||
if (w.ball_last_seen > t - w.VISUALSTEP_MS and ang_diff < 5 + delta_diff and
|
||||
t - w.ball_abs_pos_last_update < 100 and # ball absolute location is recent
|
||||
dist_to_final_target < 0.018 and # if absolute ball position is updated
|
||||
not gait.state_is_left_active and gait.state_current_ts == 2): # to avoid kicking immediately without preparation & stability
|
||||
break
|
||||
else:
|
||||
dist = max(0.07, dist_to_final_target)
|
||||
reset_walk = self.behavior.previous_behavior != "Walk" # reset walk if it wasn't the previous behavior
|
||||
self.behavior.execute_sub_behavior("Walk", reset_walk, next_pos, True, next_ori, True,
|
||||
dist) # target, is_target_abs, ori, is_ori_abs, distance
|
||||
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.lastx = r.cheat_abs_pos[0]
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
|
||||
return self.observe(True)
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def close(self):
|
||||
Draw.clear_all()
|
||||
self.player.terminate()
|
||||
|
||||
def step(self, action):
|
||||
r = self.player.world.robot
|
||||
b = self.player.world.ball_abs_pos
|
||||
w = self.player.world
|
||||
t = w.time_local_ms
|
||||
if self.step_counter < 6:
|
||||
r.joints_target_speed[2:18] = action * [
|
||||
2.1,
|
||||
2.1,
|
||||
1.1,
|
||||
1.1,
|
||||
0.21,
|
||||
0.21,
|
||||
0.21,
|
||||
0.21,
|
||||
2.2,
|
||||
2.2,
|
||||
1.1,
|
||||
1.1,
|
||||
1.1,
|
||||
1.1,
|
||||
1.1,
|
||||
1.1]
|
||||
r.joints_target_speed[12] += 1
|
||||
r.joints_target_speed[6] += 3
|
||||
r.joints_target_speed[5] -= 1
|
||||
r.joints_target_speed[8] += 3
|
||||
r.joints_target_speed[9] -= 5
|
||||
else:
|
||||
r.joints_target_speed[2:18] = action * [
|
||||
7.03,
|
||||
7.03,
|
||||
3,
|
||||
3,
|
||||
7.03,
|
||||
7.03,
|
||||
7,
|
||||
7,
|
||||
7,
|
||||
7,
|
||||
2,
|
||||
2,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1]
|
||||
r.joints_target_speed[6] -= 2.5
|
||||
r.joints_target_speed[7] += 2.5
|
||||
r.joints_target_speed[9] += 2.5
|
||||
r.joints_target_speed[11] -= 0.7
|
||||
r.set_joints_target_position_direct([0, 1], np.array([0, -44], float), False)
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
self.lastx = r.cheat_abs_pos[0]
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
if self.step_counter > 16:
|
||||
obs = self.observe()
|
||||
self.player.scom.unofficial_beam((-14.5, 0, 0.51), 0) # beam player continuously (floating above ground)
|
||||
waiting_steps = 0
|
||||
high = 0
|
||||
while waiting_steps < 650: # 假设额外等待5个步骤
|
||||
if w.ball_cheat_abs_pos[2] > high:
|
||||
high = w.ball_cheat_abs_pos[2]
|
||||
self.sync() # 继续执行仿真步骤
|
||||
waiting_steps += 1
|
||||
dis = np.linalg.norm(self.ball_pos - w.ball_cheat_abs_pos)
|
||||
reward = dis - abs(w.ball_cheat_abs_pos[1] - self.ball_pos[1]) + high*0.2
|
||||
# print(reward)
|
||||
self.terminal = True
|
||||
|
||||
else:
|
||||
obs = self.observe()
|
||||
reward = 0
|
||||
self.terminal = False
|
||||
|
||||
return obs, reward, self.terminal, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = min(14, os.cpu_count())
|
||||
n_steps_per_env = 128 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = 64 # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = 3e-5
|
||||
folder_name = f'Kick_15M_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
# print("Model path:", model_path)
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env):
|
||||
def thunk():
|
||||
return Fxxk_WHUAI(self.ip, self.server_p + i_env, self.monitor_p_1000 + i_env, self.robot_type, False)
|
||||
|
||||
return thunk
|
||||
|
||||
servers = Server(self.server_p, self.monitor_p_1000, n_envs + 1) # include 1 extra server for testing
|
||||
|
||||
env = SubprocVecEnv([init_env(i) for i in range(n_envs)])
|
||||
eval_env = SubprocVecEnv([init_env(n_envs)])
|
||||
|
||||
try:
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cuda", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO("MlpPolicy", env=env, verbose=1, n_steps=n_steps_per_env, batch_size=minibatch_size,
|
||||
learning_rate=learning_rate, device="cuda")
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 20,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
# Uses different server and monitor ports
|
||||
server = Server(self.server_p - 1, self.monitor_p, 1)
|
||||
env = Fxxk_WHUAI(self.ip, self.server_p - 1, self.monitor_p, self.robot_type, True)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
322
scripts/gyms/Kick_20M.py
Normal file
322
scripts/gyms/Kick_20M.py
Normal file
@@ -0,0 +1,322 @@
|
||||
import math
|
||||
import pickle
|
||||
from random import random
|
||||
from random import randint
|
||||
from agent.Base_Agent import Base_Agent as Agent
|
||||
from behaviors.custom.Step.Step import Step
|
||||
from world.commons.Draw import Draw
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
from scripts.commons.Server import Server
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from time import sleep
|
||||
import os, gym
|
||||
import numpy as np
|
||||
from math_ops.Math_Ops import Math_Ops as U
|
||||
from math_ops.Math_Ops import Math_Ops as M
|
||||
from behaviors.custom.Step.Step_Generator import Step_Generator
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class Mimic(gym.Env):
|
||||
def __init__(self, ip, server_p, monitor_p, r_type, enable_draw) -> None:
|
||||
self.phase = 0
|
||||
self.init_step = 40
|
||||
self.lock_flag = False
|
||||
self.sleep = 0
|
||||
self.reset_time = None
|
||||
self.behavior = None
|
||||
self.path_manager = None
|
||||
self.bias_dir = None
|
||||
self.robot_type = r_type
|
||||
self.kick_ori = 0
|
||||
self.terminal = False
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.player = Agent(ip, server_p, monitor_p, 1, self.robot_type, "Gym", True, enable_draw)
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.ball_pos = np.array([0, 0, 0])
|
||||
|
||||
self.step_obj: Step = self.player.behavior.get_custom_behavior_object("Step") # Step behavior object
|
||||
|
||||
# State space
|
||||
obs_size = 63
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = gym.spaces.Box(low=np.full(obs_size, -np.inf, np.float32),
|
||||
high=np.full(obs_size, np.inf, np.float32), dtype=np.float32)
|
||||
|
||||
# Action space
|
||||
MAX = np.finfo(np.float32).max
|
||||
self.no_of_actions = act_size = 16
|
||||
self.action_space = gym.spaces.Box(low=np.full(act_size, -MAX, np.float32),
|
||||
high=np.full(act_size, MAX, np.float32), dtype=np.float32)
|
||||
|
||||
# Place ball far away to keep landmarks in FoV (head follows ball while using Step behavior)
|
||||
self.player.scom.unofficial_move_ball((14, 0, 0.042))
|
||||
|
||||
self.ball_x_center = 0.19
|
||||
self.ball_y_center = -0.045
|
||||
self.player.scom.unofficial_set_play_mode("PlayOn")
|
||||
self.player.scom.unofficial_move_ball((0, 0, 0))
|
||||
|
||||
with open("kick_slot", 'rb') as f:
|
||||
self.obs_list = pickle.load(f)
|
||||
self.obs_list = np.array(self.obs_list)
|
||||
|
||||
def observe(self, init=False):
|
||||
w = self.player.world
|
||||
r = self.player.world.robot
|
||||
|
||||
if init:
|
||||
self.step_counter = 0
|
||||
self.obs[0] = self.step_counter / 20
|
||||
self.obs[1] = r.loc_head_z * 3
|
||||
self.obs[2] = r.loc_head_z_vel / 2
|
||||
self.obs[3] = r.imu_torso_roll / 15
|
||||
self.obs[4] = r.imu_torso_pitch / 15
|
||||
self.obs[5:8] = r.gyro / 100
|
||||
self.obs[8:11] = r.acc / 10
|
||||
self.obs[11:17] = r.frp.get('lf', np.zeros(6)) * (10, 10, 10, 0.01, 0.01, 0.01)
|
||||
self.obs[17:23] = r.frp.get('rf', np.zeros(6)) * (10, 10, 10, 0.01, 0.01, 0.01)
|
||||
self.obs[23:39] = r.joints_position[2:18] / 100
|
||||
self.obs[39:55] = r.joints_speed[2:18] / 6.1395
|
||||
ball_rel_hip_center = self.player.inv_kinematics.torso_to_hip_transform(w.ball_rel_torso_cart_pos)
|
||||
if init:
|
||||
self.obs[55:58] = (0, 0, 0)
|
||||
elif w.ball_is_visible:
|
||||
self.obs[55:58] = (ball_rel_hip_center - self.obs[58:61]) * 10
|
||||
self.obs[58:61] = ball_rel_hip_center
|
||||
self.obs[61] = np.linalg.norm(ball_rel_hip_center) * 2
|
||||
self.obs[62] = U.normalize_deg(self.kick_ori - r.imu_torso_orientation) / 30
|
||||
'''
|
||||
Expected observations for walking parameters/state (example):
|
||||
Time step R 0 1 2 0 1 2 3 4
|
||||
Progress 1 0 .5 1 0 .25 .5 .75 1
|
||||
Left leg active T F F F T T T T T
|
||||
Parameters A A A B B B B B C
|
||||
Example note: (A) has a step duration of 3ts, (B) has a step duration of 5ts
|
||||
'''
|
||||
return self.obs
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
r = self.player.world.robot
|
||||
self.player.scom.commit_and_send(r.get_command())
|
||||
self.player.scom.receive()
|
||||
|
||||
def reset(self):
|
||||
# print("reset")
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
self.lock_flag = False
|
||||
self.phase = 0
|
||||
# self.init_step = randint(0,74)
|
||||
self.init_step = 0
|
||||
|
||||
self.player.scom.unofficial_set_play_mode("PlayOn")
|
||||
Gen_ball_pos = [random() * 5 - 9, random() * 6 - 3, 0]
|
||||
Gen_player_pos = (random() * 3 + Gen_ball_pos[0], random() * 3 + Gen_ball_pos[1], 0.5)
|
||||
self.ball_pos = np.array(Gen_ball_pos)
|
||||
self.player.scom.unofficial_move_ball((Gen_ball_pos[0], Gen_ball_pos[1], Gen_ball_pos[2]))
|
||||
self.sleep = 0
|
||||
self.step_counter = self.init_step // 2
|
||||
self.behavior = self.player.behavior
|
||||
r = self.player.world.robot
|
||||
w = self.player.world
|
||||
t = w.time_local_ms
|
||||
self.path_manager = self.player.path_manager
|
||||
gait: Step_Generator = self.behavior.get_custom_behavior_object("Walk").env.step_generator
|
||||
self.reset_time = t
|
||||
|
||||
for _ in range(25):
|
||||
self.player.scom.unofficial_beam(Gen_player_pos, 0) # beam player continuously (floating above ground)
|
||||
self.player.behavior.execute("Zero_Bent_Knees")
|
||||
self.sync()
|
||||
|
||||
# beam player to ground
|
||||
self.player.scom.unofficial_beam(Gen_player_pos, 0)
|
||||
r.joints_target_speed[
|
||||
0] = 0.01 # move head to trigger physics update (rcssserver3d bug when no joint is moving)
|
||||
self.sync()
|
||||
|
||||
# stabilize on ground
|
||||
for _ in range(7):
|
||||
self.player.behavior.execute("Zero_Bent_Knees")
|
||||
self.sync()
|
||||
# walk to ball
|
||||
while True and w.time_local_ms - self.reset_time <= 50000:
|
||||
if self.player.behavior.is_ready("Get_Up"):
|
||||
self.player.behavior.execute_to_completion("Get_Up")
|
||||
if self.phase == 0:
|
||||
direction = 0
|
||||
self.bias_dir = [0.09, 0, 0.14, 0, 0.05][r.type]
|
||||
biased_dir = M.normalize_deg(direction + self.bias_dir) # add bias to rectify direction
|
||||
ang_diff = abs(
|
||||
M.normalize_deg(biased_dir - r.loc_torso_orientation)) # the reset was learned with loc, not IMU
|
||||
|
||||
next_pos, next_ori, dist_to_final_target = self.path_manager.get_path_to_ball(
|
||||
x_ori=biased_dir, x_dev=-self.ball_x_center, y_dev=-self.ball_y_center, torso_ori=biased_dir)
|
||||
|
||||
|
||||
if (w.ball_last_seen > t - w.VISUALSTEP_MS and ang_diff < 5 and
|
||||
t - w.ball_abs_pos_last_update < 100 and # ball absolute location is recent
|
||||
dist_to_final_target < 0.025 and # if absolute ball position is updated
|
||||
not gait.state_is_left_active and gait.state_current_ts == 2):
|
||||
self.behavior.execute_sub_behavior("Kick_Motion", True)
|
||||
if self.phase == self.init_step:
|
||||
break
|
||||
self.phase += 1
|
||||
|
||||
else:
|
||||
dist = max(0.07, dist_to_final_target)
|
||||
reset_walk = self.behavior.previous_behavior != "Walk" # reset walk if it wasn't the previous behavior
|
||||
self.behavior.execute_sub_behavior("Walk", reset_walk, next_pos, True, next_ori, True,
|
||||
dist) # target, is_target_abs, ori, is_ori_abs, distance
|
||||
else:
|
||||
self.phase += 1
|
||||
self.behavior.execute_sub_behavior("Kick_Motion", False)
|
||||
if self.phase == self.init_step:
|
||||
break
|
||||
|
||||
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.lastx = r.cheat_abs_pos[0]
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
|
||||
return self.observe(True)
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def close(self):
|
||||
Draw.clear_all()
|
||||
self.player.terminate()
|
||||
|
||||
def step(self, action):
|
||||
r = self.player.world.robot
|
||||
b = self.player.world.ball_abs_pos
|
||||
w = self.player.world
|
||||
t = w.time_local_ms
|
||||
r.joints_target_speed[2:18] = action
|
||||
r.set_joints_target_position_direct([0, 1], np.array([0, -44], float), False)
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.lastx = r.cheat_abs_pos[0]
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
if self.step_counter > 42:
|
||||
obs = self.observe()
|
||||
self.player.scom.unofficial_beam((-14.5, 0, 0.51), 0) # beam player continuously (floating above ground)
|
||||
waiting_steps = 0
|
||||
high = 0
|
||||
while waiting_steps < 650: # 假设额外等待5个步骤
|
||||
if w.ball_cheat_abs_pos[2] > high:
|
||||
high = w.ball_cheat_abs_pos[2]
|
||||
self.sync() # 继续执行仿真步骤
|
||||
waiting_steps += 1
|
||||
dis = np.linalg.norm(self.ball_pos - w.ball_cheat_abs_pos)
|
||||
# reward = 0
|
||||
reward = dis - abs(w.ball_cheat_abs_pos[1] - self.ball_pos[1]) + high*0.2
|
||||
|
||||
# print(reward)
|
||||
self.terminal = True
|
||||
|
||||
else:
|
||||
obs = self.observe()
|
||||
# 计算状态差异(例如,均方误差)
|
||||
ref = self.obs_list[self.step_counter*2]
|
||||
reward = self.cal_reward(ref, self.obs) * 0.005
|
||||
|
||||
|
||||
if self.player.behavior.is_ready("Get_Up") :
|
||||
self.terminal = True
|
||||
# 计算加权差异
|
||||
# reward = 0
|
||||
self.terminal = False
|
||||
self.step_counter += 1
|
||||
|
||||
|
||||
return obs, reward, self.terminal, {}
|
||||
|
||||
|
||||
def cal_reward(self,ref,obs):
|
||||
reward = -0.5*np.linalg.norm(ref[23:39]*100 - obs[23:39]*100) -0.5*np.linalg.norm(ref[1:23]*30 - obs[1:23]*30)
|
||||
|
||||
reward -= 2*np.linalg.norm(ref[20:23] - obs[20:23])
|
||||
return reward
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = min(14, os.cpu_count())
|
||||
n_steps_per_env = 128 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = 64 # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = 3e-4
|
||||
folder_name = f'Kick_20M_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
# print("Model path:", model_path)
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env):
|
||||
def thunk():
|
||||
return Mimic(self.ip, self.server_p + i_env, self.monitor_p_1000 + i_env, self.robot_type, False)
|
||||
|
||||
return thunk
|
||||
|
||||
servers = Server(self.server_p, self.monitor_p_1000, n_envs + 1) # include 1 extra server for testing
|
||||
|
||||
env = SubprocVecEnv([init_env(i) for i in range(n_envs)])
|
||||
eval_env = SubprocVecEnv([init_env(n_envs)])
|
||||
|
||||
try:
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cuda", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO("MlpPolicy", env=env, verbose=1, n_steps=n_steps_per_env, batch_size=minibatch_size,
|
||||
learning_rate=learning_rate, device="cuda")
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 20,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
# Uses different server and monitor ports
|
||||
server = Server(self.server_p - 1, self.monitor_p, 1)
|
||||
env = Mimic(self.ip, self.server_p - 1, self.monitor_p, self.robot_type, True)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
359
scripts/gyms/Kick_3M.py
Normal file
359
scripts/gyms/Kick_3M.py
Normal file
@@ -0,0 +1,359 @@
|
||||
import gym
|
||||
import os
|
||||
from random import random
|
||||
from random import uniform
|
||||
from time import sleep
|
||||
|
||||
import numpy as np
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
|
||||
from agent.Base_Agent import Base_Agent as Agent
|
||||
from behaviors.custom.Step.Step import Step
|
||||
from behaviors.custom.Step.Step_Generator import Step_Generator
|
||||
from math_ops.Math_Ops import Math_Ops as M
|
||||
from math_ops.Math_Ops import Math_Ops as U
|
||||
from scripts.commons.Server import Server
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from world.commons.Draw import Draw
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class kick_moving_ball(gym.Env):
|
||||
def __init__(self, ip, server_p, monitor_p, r_type, enable_draw) -> None:
|
||||
self.isfallen = False
|
||||
self.last_ball_vec = np.array([0, 0, 0])
|
||||
self.lock_flag = False
|
||||
self.sleep = 0
|
||||
self.reset_time = None
|
||||
self.behavior = None
|
||||
self.path_manager = None
|
||||
self.bias_dir = None
|
||||
self.robot_type = r_type
|
||||
self.kick_ori = 0
|
||||
self.terminal = False
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.player = Agent(ip, server_p, monitor_p, 1, self.robot_type, "Gym", True, enable_draw)
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.ball_pos = np.array([0, 0, 0])
|
||||
|
||||
self.step_obj: Step = self.player.behavior.get_custom_behavior_object("Step") # Step behavior object
|
||||
|
||||
# State space
|
||||
obs_size = 64
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = gym.spaces.Box(low=np.full(obs_size, -np.inf, np.float32),
|
||||
high=np.full(obs_size, np.inf, np.float32), dtype=np.float32)
|
||||
|
||||
# Action space
|
||||
MAX = np.finfo(np.float32).max
|
||||
self.no_of_actions = act_size = 16
|
||||
self.action_space = gym.spaces.Box(low=np.full(act_size, -MAX, np.float32),
|
||||
high=np.full(act_size, MAX, np.float32), dtype=np.float32)
|
||||
|
||||
# Place ball far away to keep landmarks in FoV (head follows ball while using Step behavior)
|
||||
self.player.scom.unofficial_move_ball((14, 0, 0.042))
|
||||
|
||||
self.ball_x_center = 0.21
|
||||
self.ball_y_center = -0.045
|
||||
self.player.scom.unofficial_set_play_mode("PlayOn")
|
||||
self.player.scom.unofficial_move_ball((0, 0, 0))
|
||||
|
||||
def observe(self, init=False):
|
||||
w = self.player.world
|
||||
r = self.player.world.robot
|
||||
|
||||
if init:
|
||||
self.step_counter = 0
|
||||
self.obs[0] = self.step_counter / 20
|
||||
self.obs[1] = r.loc_head_z * 3
|
||||
self.obs[2] = r.loc_head_z_vel / 2
|
||||
self.obs[3] = r.imu_torso_roll / 15
|
||||
self.obs[4] = r.imu_torso_pitch / 15
|
||||
self.obs[5:8] = r.gyro / 100
|
||||
self.obs[8:11] = r.acc / 10
|
||||
self.obs[11:17] = r.frp.get('lf', np.zeros(6)) * (10, 10, 10, 0.01, 0.01, 0.01)
|
||||
self.obs[17:23] = r.frp.get('rf', np.zeros(6)) * (10, 10, 10, 0.01, 0.01, 0.01)
|
||||
self.obs[23:39] = r.joints_position[2:18] / 100
|
||||
self.obs[39:55] = r.joints_speed[2:18] / 6.1395
|
||||
ball_rel_hip_center = self.player.inv_kinematics.torso_to_hip_transform(w.ball_rel_torso_cart_pos)
|
||||
if init:
|
||||
self.obs[55:58] = (0, 0, 0)
|
||||
elif w.ball_is_visible:
|
||||
self.obs[55:58] = (ball_rel_hip_center - self.obs[58:61]) * 10
|
||||
self.obs[58:61] = ball_rel_hip_center
|
||||
self.obs[61] = np.linalg.norm(ball_rel_hip_center) * 2
|
||||
self.obs[62] = U.normalize_deg(self.kick_ori - r.imu_torso_orientation) / 30
|
||||
self.obs[63] = 0.7
|
||||
|
||||
return self.obs
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
r = self.player.world.robot
|
||||
self.player.scom.commit_and_send(r.get_command())
|
||||
self.player.scom.receive()
|
||||
|
||||
def reset(self):
|
||||
# print("reset")
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
self.isfallen = False
|
||||
self.lock_flag = False
|
||||
self.player.scom.unofficial_set_play_mode("PlayOn")
|
||||
# 生成球的位置
|
||||
Gen_ball_pos = [random() * 5 - 9, random() * 6 - 3, 0]
|
||||
|
||||
Gen_player_pos = (
|
||||
uniform(-1.5, 1.5) + Gen_ball_pos[0],
|
||||
uniform(-1.5, 1.5) + Gen_ball_pos[1],
|
||||
0.5
|
||||
)
|
||||
self.ball_pos = np.array(Gen_ball_pos)
|
||||
self.player.scom.unofficial_move_ball((Gen_ball_pos[0], Gen_ball_pos[1], Gen_ball_pos[2]))
|
||||
self.sleep = 0
|
||||
self.step_counter = 0
|
||||
self.behavior = self.player.behavior
|
||||
r = self.player.world.robot
|
||||
w = self.player.world
|
||||
t = w.time_local_ms
|
||||
self.path_manager = self.player.path_manager
|
||||
gait: Step_Generator = self.behavior.get_custom_behavior_object("Walk").env.step_generator
|
||||
self.reset_time = t
|
||||
|
||||
for _ in range(25):
|
||||
self.player.scom.unofficial_beam(Gen_player_pos, 0) # beam player continuously (floating above ground)
|
||||
self.player.behavior.execute("Zero_Bent_Knees")
|
||||
self.sync()
|
||||
|
||||
# beam player to ground
|
||||
self.player.scom.unofficial_beam(Gen_player_pos, 0)
|
||||
r.joints_target_speed[
|
||||
0] = 0.01 # move head to trigger physics update (rcssserver3d bug when no joint is moving)
|
||||
self.sync()
|
||||
|
||||
# stabilize on ground
|
||||
for _ in range(7):
|
||||
self.player.behavior.execute("Zero_Bent_Knees")
|
||||
self.sync()
|
||||
# walk to ball
|
||||
while True and w.time_local_ms - self.reset_time <= 50000:
|
||||
direction = 0
|
||||
if self.player.behavior.is_ready("Get_Up"):
|
||||
self.player.behavior.execute_to_completion("Get_Up")
|
||||
self.bias_dir = [0.09, 0.1, 0.14, 0.08, 0.05][r.type]
|
||||
biased_dir = M.normalize_deg(direction + self.bias_dir) # add bias to rectify direction
|
||||
ang_diff = abs(
|
||||
M.normalize_deg(biased_dir - r.loc_torso_orientation)) # the reset was learned with loc, not IMU
|
||||
|
||||
next_pos, next_ori, dist_to_final_target = self.path_manager.get_path_to_ball(
|
||||
x_ori=biased_dir, x_dev=-self.ball_x_center, y_dev=-self.ball_y_center, torso_ori=biased_dir)
|
||||
|
||||
if (w.ball_last_seen > t - w.VISUALSTEP_MS and ang_diff < 13 and
|
||||
t - w.ball_abs_pos_last_update < 100 and # ball absolute location is recent
|
||||
dist_to_final_target < 0.1 and # if absolute ball position is updated
|
||||
not gait.state_is_left_active and gait.state_current_ts == 2): # to avoid kicking immediately without preparation & stability
|
||||
break
|
||||
else:
|
||||
dist = max(0.07, dist_to_final_target)
|
||||
reset_walk = self.behavior.previous_behavior != "Walk" # reset walk if it wasn't the previous behavior
|
||||
self.behavior.execute_sub_behavior("Walk", reset_walk, next_pos, True, next_ori, True,
|
||||
dist) # target, is_target_abs, ori, is_ori_abs, distance
|
||||
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.lastx = r.cheat_abs_pos[0]
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
|
||||
return self.observe(True)
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def close(self):
|
||||
Draw.clear_all()
|
||||
self.player.terminate()
|
||||
|
||||
def step(self, action):
|
||||
r = self.player.world.robot
|
||||
b = self.player.world.ball_abs_pos
|
||||
w = self.player.world
|
||||
t = w.time_local_ms
|
||||
if self.step_counter < 1: # wing back
|
||||
r.joints_target_speed[2:18] = action * [
|
||||
2,
|
||||
2,
|
||||
1,
|
||||
1,
|
||||
0.2,
|
||||
0.2,
|
||||
0.2,
|
||||
0.2,
|
||||
2,
|
||||
2,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1]
|
||||
r.joints_target_speed[12] += 1
|
||||
r.joints_target_speed[6] += 3
|
||||
r.joints_target_speed[5] -= 1
|
||||
r.joints_target_speed[8] += 3
|
||||
r.joints_target_speed[9] -= 5
|
||||
else: # wing front
|
||||
r.joints_target_speed[2:18] = action * [
|
||||
6,
|
||||
6,
|
||||
3,
|
||||
3,
|
||||
6,
|
||||
6,
|
||||
6,
|
||||
6,
|
||||
6,
|
||||
6,
|
||||
2,
|
||||
2,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1]
|
||||
r.joints_target_speed[6] -= 3
|
||||
r.joints_target_speed[7] += 3
|
||||
r.joints_target_speed[9] += 3
|
||||
r.set_joints_target_position_direct([0, 1], np.array([0, -44], float), False)
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
self.lastx = r.cheat_abs_pos[0]
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
if self.step_counter > 4:
|
||||
obs = self.observe()
|
||||
waiting_steps = 0
|
||||
target = (15 - random() * 30, 10 - random() * 20)
|
||||
# self.player.scom.unofficial_beam((-13.5, 0, 0.51), 0)
|
||||
while waiting_steps < 300: # 假设额外等待5个步骤
|
||||
target_2d, _, distance_to_final_target = self.path_manager.get_path_to_target(
|
||||
target, priority_unums=[], is_aggressive=False, timeout=3000)
|
||||
self.player.behavior.execute("Walk", target_2d, True, None, True,
|
||||
distance_to_final_target) # Args: target, is_target_abs, ori, is_ori_abs, distance
|
||||
if self.player.behavior.is_ready("Get_Up"):
|
||||
self.isfallen = True
|
||||
self.sync() # 继续执行仿真步骤
|
||||
waiting_steps += 1
|
||||
dis = np.linalg.norm(self.ball_pos - w.ball_cheat_abs_pos)
|
||||
|
||||
reward = 10 - abs(3 - dis) - abs(w.ball_cheat_abs_pos[1] - self.ball_pos[1])
|
||||
if self.isfallen or w.ball_cheat_abs_pos[0] < self.ball_pos[0]:
|
||||
reward = 0
|
||||
# print(reward)
|
||||
self.terminal = True
|
||||
|
||||
else:
|
||||
obs = self.observe()
|
||||
reward = 0
|
||||
self.terminal = False
|
||||
|
||||
return obs, reward, self.terminal, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = min(14, os.cpu_count())
|
||||
n_steps_per_env = 256 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = 64 # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = 3e-5
|
||||
folder_name = f'kick_moving_ball_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
# print("Model path:", model_path)
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env):
|
||||
def thunk():
|
||||
return kick_moving_ball(self.ip, self.server_p + i_env, self.monitor_p_1000 + i_env, self.robot_type,
|
||||
False)
|
||||
|
||||
return thunk
|
||||
|
||||
servers = Server(self.server_p, self.monitor_p_1000, n_envs + 1) # include 1 extra server for testing
|
||||
|
||||
env = SubprocVecEnv([init_env(i) for i in range(n_envs)])
|
||||
eval_env = SubprocVecEnv([init_env(n_envs)])
|
||||
|
||||
try:
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cuda", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO("MlpPolicy", env=env, verbose=1, n_steps=n_steps_per_env, batch_size=minibatch_size,
|
||||
learning_rate=learning_rate, device="cuda")
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 10, save_freq=n_steps_per_env * 100,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server = Server(self.server_p - 1, self.monitor_p, 1)
|
||||
env = kick_moving_ball(self.ip, self.server_p - 1, self.monitor_p, self.robot_type, True)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
|
||||
|
||||
'''
|
||||
The learning process takes several hours.
|
||||
A video with the results can be seen at:
|
||||
https://imgur.com/a/dC2V6Et
|
||||
|
||||
Stats:
|
||||
- Avg. reward: 7.7
|
||||
- Avg. ep. length: 5.5s (episode is limited to 6s)
|
||||
- Max. reward: 9.3 (speed: 1.55m/s)
|
||||
|
||||
State space:
|
||||
- Composed of all joint positions + torso height
|
||||
- Stage of the underlying Step behavior
|
||||
|
||||
Reward:
|
||||
- Displacement in the x-axis (it can be negative)
|
||||
- Note that cheat and visual data is only updated every 3 steps
|
||||
'''
|
||||
343
scripts/gyms/Kick_5M.py
Normal file
343
scripts/gyms/Kick_5M.py
Normal file
@@ -0,0 +1,343 @@
|
||||
from random import random
|
||||
|
||||
from agent.Base_Agent import Base_Agent as Agent
|
||||
from behaviors.custom.Step.Step import Step
|
||||
from world.commons.Draw import Draw
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
from scripts.commons.Server import Server
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from time import sleep
|
||||
import os, gym
|
||||
import numpy as np
|
||||
from math_ops.Math_Ops import Math_Ops as U
|
||||
from math_ops.Math_Ops import Math_Ops as M
|
||||
from behaviors.custom.Step.Step_Generator import Step_Generator
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class Short_Kick(gym.Env):
|
||||
def __init__(self, ip, server_p, monitor_p, r_type, enable_draw) -> None:
|
||||
self.lock_flag = False
|
||||
self.sleep = 0
|
||||
self.reset_time = None
|
||||
self.behavior = None
|
||||
self.path_manager = None
|
||||
self.bias_dir = None
|
||||
self.robot_type = r_type
|
||||
self.kick_ori = 0
|
||||
self.terminal = False
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.player = Agent(ip, server_p, monitor_p, 1, self.robot_type, "Gym", True, enable_draw)
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.ball_pos = np.array([0, 0, 0])
|
||||
|
||||
self.step_obj: Step = self.player.behavior.get_custom_behavior_object("Step") # Step behavior object
|
||||
|
||||
# State space
|
||||
obs_size = 64
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = gym.spaces.Box(low=np.full(obs_size, -np.inf, np.float32),
|
||||
high=np.full(obs_size, np.inf, np.float32), dtype=np.float32)
|
||||
|
||||
# Action space
|
||||
MAX = np.finfo(np.float32).max
|
||||
self.no_of_actions = act_size = 16
|
||||
self.action_space = gym.spaces.Box(low=np.full(act_size, -MAX, np.float32),
|
||||
high=np.full(act_size, MAX, np.float32), dtype=np.float32)
|
||||
|
||||
# Place ball far away to keep landmarks in FoV (head follows ball while using Step behavior)
|
||||
self.player.scom.unofficial_move_ball((14, 0, 0.042))
|
||||
|
||||
self.ball_x_center = 0.21
|
||||
self.ball_y_center = -0.045
|
||||
self.player.scom.unofficial_set_play_mode("PlayOn")
|
||||
self.player.scom.unofficial_move_ball((0, 0, 0))
|
||||
|
||||
def observe(self, init=False):
|
||||
w = self.player.world
|
||||
r = self.player.world.robot
|
||||
|
||||
if init:
|
||||
self.step_counter = 0
|
||||
self.obs[0] = self.step_counter / 20
|
||||
self.obs[1] = r.loc_head_z * 3
|
||||
self.obs[2] = r.loc_head_z_vel / 2
|
||||
self.obs[3] = r.imu_torso_roll / 15
|
||||
self.obs[4] = r.imu_torso_pitch / 15
|
||||
self.obs[5:8] = r.gyro / 100
|
||||
self.obs[8:11] = r.acc / 10
|
||||
self.obs[11:17] = r.frp.get('lf', np.zeros(6)) * (10, 10, 10, 0.01, 0.01, 0.01)
|
||||
self.obs[17:23] = r.frp.get('rf', np.zeros(6)) * (10, 10, 10, 0.01, 0.01, 0.01)
|
||||
self.obs[23:39] = r.joints_position[2:18] / 100
|
||||
self.obs[39:55] = r.joints_speed[2:18] / 6.1395
|
||||
ball_rel_hip_center = self.player.inv_kinematics.torso_to_hip_transform(w.ball_rel_torso_cart_pos)
|
||||
if init:
|
||||
self.obs[55:58] = (0, 0, 0)
|
||||
elif w.ball_is_visible:
|
||||
self.obs[55:58] = (ball_rel_hip_center - self.obs[58:61]) * 10
|
||||
self.obs[58:61] = ball_rel_hip_center
|
||||
self.obs[61] = np.linalg.norm(ball_rel_hip_center) * 2
|
||||
self.obs[62] = U.normalize_deg(self.kick_ori - r.imu_torso_orientation) / 30
|
||||
self.obs[63] = 0.7
|
||||
'''
|
||||
Expected observations for walking parameters/state (example):
|
||||
Time step R 0 1 2 0 1 2 3 4
|
||||
Progress 1 0 .5 1 0 .25 .5 .75 1
|
||||
Left leg active T F F F T T T T T
|
||||
Parameters A A A B B B B B C
|
||||
Example note: (A) has a step duration of 3ts, (B) has a step duration of 5ts
|
||||
'''
|
||||
return self.obs
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
r = self.player.world.robot
|
||||
self.player.scom.commit_and_send(r.get_command())
|
||||
self.player.scom.receive()
|
||||
|
||||
def reset(self):
|
||||
# print("reset")
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
self.lock_flag = False
|
||||
self.player.scom.unofficial_set_play_mode("PlayOn")
|
||||
Gen_ball_pos = [random() * 5 - 9, random() * 6 - 3, 0]
|
||||
Gen_player_pos = (random() * 3 + Gen_ball_pos[0], random() * 3 + Gen_ball_pos[1], 0.5)
|
||||
self.ball_pos = np.array(Gen_ball_pos)
|
||||
self.player.scom.unofficial_move_ball((Gen_ball_pos[0], Gen_ball_pos[1], Gen_ball_pos[2]))
|
||||
self.sleep = 0
|
||||
self.step_counter = 0
|
||||
self.behavior = self.player.behavior
|
||||
r = self.player.world.robot
|
||||
w = self.player.world
|
||||
t = w.time_local_ms
|
||||
self.path_manager = self.player.path_manager
|
||||
gait: Step_Generator = self.behavior.get_custom_behavior_object("Walk").env.step_generator
|
||||
self.reset_time = t
|
||||
|
||||
for _ in range(25):
|
||||
self.player.scom.unofficial_beam(Gen_player_pos, 0) # beam player continuously (floating above ground)
|
||||
self.player.behavior.execute("Zero_Bent_Knees")
|
||||
self.sync()
|
||||
|
||||
# beam player to ground
|
||||
self.player.scom.unofficial_beam(Gen_player_pos, 0)
|
||||
r.joints_target_speed[
|
||||
0] = 0.01 # move head to trigger physics update (rcssserver3d bug when no joint is moving)
|
||||
self.sync()
|
||||
|
||||
# stabilize on ground
|
||||
for _ in range(7):
|
||||
self.player.behavior.execute("Zero_Bent_Knees")
|
||||
self.sync()
|
||||
# walk to ball
|
||||
while True and w.time_local_ms - self.reset_time <= 50000:
|
||||
direction = 0
|
||||
if self.player.behavior.is_ready("Get_Up"):
|
||||
self.player.behavior.execute_to_completion("Get_Up")
|
||||
self.bias_dir = [0.09, 0.1, 0.14, 0.08, 0.05][r.type]
|
||||
biased_dir = M.normalize_deg(direction + self.bias_dir) # add bias to rectify direction
|
||||
ang_diff = abs(
|
||||
M.normalize_deg(biased_dir - r.loc_torso_orientation)) # the reset was learned with loc, not IMU
|
||||
|
||||
next_pos, next_ori, dist_to_final_target = self.path_manager.get_path_to_ball(
|
||||
x_ori=biased_dir, x_dev=-self.ball_x_center, y_dev=-self.ball_y_center, torso_ori=biased_dir)
|
||||
if (w.ball_last_seen > t - w.VISUALSTEP_MS and ang_diff < 5 and
|
||||
t - w.ball_abs_pos_last_update < 100 and # ball absolute location is recent
|
||||
dist_to_final_target < 0.035 and # if absolute ball position is updated
|
||||
not gait.state_is_left_active and gait.state_current_ts == 2): # to avoid kicking immediately without preparation & stability
|
||||
break
|
||||
else:
|
||||
dist = max(0.07, dist_to_final_target)
|
||||
reset_walk = self.behavior.previous_behavior != "Walk" # reset walk if it wasn't the previous behavior
|
||||
self.behavior.execute_sub_behavior("Walk", reset_walk, next_pos, True, next_ori, True,
|
||||
dist) # target, is_target_abs, ori, is_ori_abs, distance
|
||||
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.lastx = r.cheat_abs_pos[0]
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
|
||||
return self.observe(True)
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def close(self):
|
||||
Draw.clear_all()
|
||||
self.player.terminate()
|
||||
|
||||
def step(self, action):
|
||||
r = self.player.world.robot
|
||||
b = self.player.world.ball_abs_pos
|
||||
w = self.player.world
|
||||
t = w.time_local_ms
|
||||
if self.step_counter < 2:
|
||||
r.joints_target_speed[2:18] = action * [
|
||||
2,
|
||||
2,
|
||||
1,
|
||||
1,
|
||||
0.2,
|
||||
0.2,
|
||||
0.2,
|
||||
0.2,
|
||||
2,
|
||||
2,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1]
|
||||
r.joints_target_speed[12] += 1
|
||||
r.joints_target_speed[6] += 3
|
||||
r.joints_target_speed[5] -= 1
|
||||
r.joints_target_speed[8] += 3
|
||||
r.joints_target_speed[9] -= 5
|
||||
else:
|
||||
r.joints_target_speed[2:18] = action * [
|
||||
6,
|
||||
6,
|
||||
3,
|
||||
3,
|
||||
6,
|
||||
6,
|
||||
6,
|
||||
6,
|
||||
6,
|
||||
6,
|
||||
2,
|
||||
2,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1]
|
||||
r.joints_target_speed[6] -= 3
|
||||
r.joints_target_speed[7] += 3
|
||||
r.joints_target_speed[9] += 3
|
||||
r.joints_target_speed[11] -= 1
|
||||
r.set_joints_target_position_direct([0, 1], np.array([0, -44], float), False)
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
self.lastx = r.cheat_abs_pos[0]
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
if self.step_counter > 13:
|
||||
obs = self.observe()
|
||||
self.player.scom.unofficial_beam((-14.5, 0, 0.51), 0) # beam player continuously (floating above ground)
|
||||
waiting_steps = 0
|
||||
while waiting_steps < 300: # 假设额外等待5个步骤
|
||||
self.sync() # 继续执行仿真步骤
|
||||
waiting_steps += 1
|
||||
dis = np.linalg.norm(self.ball_pos - w.ball_cheat_abs_pos)
|
||||
reward = 5 - abs(5 - dis) - abs(w.ball_cheat_abs_pos[1] - self.ball_pos[1])
|
||||
# print(reward)
|
||||
self.terminal = True
|
||||
|
||||
else:
|
||||
obs = self.observe()
|
||||
reward = 0
|
||||
self.terminal = False
|
||||
|
||||
return obs, reward, self.terminal, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = min(12, os.cpu_count())
|
||||
n_steps_per_env = 128 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = 64 # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = 3e-4
|
||||
folder_name = f'Short_Kick_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
# print("Model path:", model_path)
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env):
|
||||
def thunk():
|
||||
return Short_Kick(self.ip, self.server_p + i_env, self.monitor_p_1000 + i_env, self.robot_type, False)
|
||||
|
||||
return thunk
|
||||
|
||||
servers = Server(self.server_p, self.monitor_p_1000, n_envs + 1) # include 1 extra server for testing
|
||||
|
||||
env = SubprocVecEnv([init_env(i) for i in range(n_envs)])
|
||||
eval_env = SubprocVecEnv([init_env(n_envs)])
|
||||
|
||||
try:
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cuda", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO("MlpPolicy", env=env, verbose=1, n_steps=n_steps_per_env, batch_size=minibatch_size,
|
||||
learning_rate=learning_rate, device="cuda")
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 200,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server = Server(self.server_p - 1, self.monitor_p, 1)
|
||||
env = Short_Kick(self.ip, self.server_p - 1, self.monitor_p, self.robot_type, True)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
|
||||
|
||||
'''
|
||||
The learning process takes several hours.
|
||||
A video with the results can be seen at:
|
||||
https://imgur.com/a/dC2V6Et
|
||||
|
||||
Stats:
|
||||
- Avg. reward: 7.7
|
||||
- Avg. ep. length: 5.5s (episode is limited to 6s)
|
||||
- Max. reward: 9.3 (speed: 1.55m/s)
|
||||
|
||||
State space:
|
||||
- Composed of all joint positions + torso height
|
||||
- Stage of the underlying Step behavior
|
||||
|
||||
Reward:
|
||||
- Displacement in the x-axis (it can be negative)
|
||||
- Note that cheat and visual data is only updated every 3 steps
|
||||
'''
|
||||
BIN
scripts/gyms/__pycache__/Dribble.cpython-313.pyc
Normal file
BIN
scripts/gyms/__pycache__/Dribble.cpython-313.pyc
Normal file
Binary file not shown.
Reference in New Issue
Block a user