Compare commits

...

5 Commits

Author SHA1 Message Date
xxh
a52cdff013 add no gui, no realtime mode, and train bash script 2026-03-21 08:53:31 -04:00
xxh
ec8b648a3b stand stable 0.1 amendment and add some info 2026-03-21 06:46:55 -04:00
xxh
f99fae68f6 change model and policy 2026-03-14 01:08:22 -04:00
xxh
294fe0bd79 restore 2026-03-13 21:44:59 -04:00
xxh
cf80becd17 restore 2026-03-13 21:38:44 -04:00
9 changed files with 429 additions and 173 deletions

10
.gitignore vendored
View File

@@ -10,3 +10,13 @@ poetry.toml
**/log/ **/log/
*.spec *.spec
dist/ dist/
*steps.zip
*.pkl
best_model.zip
*.csv
*.npz
*.xml
*.json
*.yaml
*.iml
*.TXT

14
command.md Normal file
View File

@@ -0,0 +1,14 @@
训练(默认)
bash train.sh
测试(实时+显示画面)
GYM_CPU_MODE=test GYM_CPU_TEST_MODEL=scripts/gyms/logs/Walk_R0_005/best_model.zip GYM_CPU_TEST_FOLDER=scripts/gyms/logs/Walk_R0_005/ GYM_CPU_TEST_NO_RENDER=0 GYM_CPU_TEST_NO_REALTIME=0 bash train.sh
测试(无画面、非实时)
GYM_CPU_MODE=test GYM_CPU_TEST_NO_RENDER=1 GYM_CPU_TEST_NO_REALTIME=1 bash train.sh
retrain继续训练
GYM_CPU_MODE=train GYM_CPU_TRAIN_MODEL=scripts/gyms/logs/Walk_R0_005/best_model.zip bash train.sh
retrain+改训练超参
GYM_CPU_MODE=train GYM_CPU_TRAIN_MODEL=scripts/gyms/logs/Walk_R0_004/best_model.zip GYM_CPU_TRAIN_LR=2e-4 GYM_CPU_TRAIN_BATCH_SIZE=256 GYM_CPU_TRAIN_EPOCHS=8 bash train.sh

View File

@@ -1,5 +1,6 @@
import logging import logging
import socket import socket
import time
from select import select from select import select
from communication.world_parser import WorldParser from communication.world_parser import WorldParser
@@ -10,15 +11,27 @@ class Server:
def __init__(self, host: str, port: int, world_parser: WorldParser): def __init__(self, host: str, port: int, world_parser: WorldParser):
self.world_parser: WorldParser = world_parser self.world_parser: WorldParser = world_parser
self.__host: str = host self.__host: str = host
self.__port: str = port self.__port: int = port
self.__socket: socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.__socket: socket.socket = self._create_socket()
self.__socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.__send_buff = [] self.__send_buff = []
self.__rcv_buffer_size = 1024 self.__rcv_buffer_size = 1024
self.__rcv_buffer = bytearray(self.__rcv_buffer_size) self.__rcv_buffer = bytearray(self.__rcv_buffer_size)
def _create_socket(self) -> socket.socket:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return sock
def connect(self) -> None: def connect(self) -> None:
logger.info("Connecting to server at %s:%d...", self.__host, self.__port) logger.info("Connecting to server at %s:%d...", self.__host, self.__port)
# Always reconnect with a fresh socket object.
try:
self.__socket.close()
except OSError:
pass
self.__socket = self._create_socket()
while True: while True:
try: try:
self.__socket.connect((self.__host, self.__port)) self.__socket.connect((self.__host, self.__port))
@@ -27,12 +40,19 @@ class Server:
logger.error( logger.error(
"Connection refused. Make sure the server is running and listening on {self.__host}:{self.__port}." "Connection refused. Make sure the server is running and listening on {self.__host}:{self.__port}."
) )
time.sleep(0.05)
logger.info(f"Server connection established to {self.__host}:{self.__port}.") logger.info(f"Server connection established to {self.__host}:{self.__port}.")
def shutdown(self) -> None: def shutdown(self) -> None:
self.__socket.close() try:
self.__socket.shutdown(socket.SHUT_RDWR) self.__socket.shutdown(socket.SHUT_RDWR)
except OSError:
pass
try:
self.__socket.close()
except OSError:
pass
def send_immediate(self, msg: str) -> None: def send_immediate(self, msg: str) -> None:
""" """

View File

@@ -1,9 +1,10 @@
import subprocess import subprocess
import os import os
import time
class Server(): class Server():
def __init__(self, first_server_p, first_monitor_p, n_servers) -> None: def __init__(self, first_server_p, first_monitor_p, n_servers, no_render=True, no_realtime=True) -> None:
try: try:
import psutil import psutil
self.check_running_servers(psutil, first_server_p, first_monitor_p, n_servers) self.check_running_servers(psutil, first_server_p, first_monitor_p, n_servers)
@@ -17,21 +18,32 @@ class Server():
# makes it easier to kill test servers without affecting train servers # makes it easier to kill test servers without affecting train servers
cmd = "rcssservermj" cmd = "rcssservermj"
render_arg = "--no-render" if no_render else ""
realtime_arg = "--no-realtime" if no_realtime else ""
for i in range(n_servers): for i in range(n_servers):
port = first_server_p + i port = first_server_p + i
mport = first_monitor_p + i mport = first_monitor_p + i
server_cmd = f"{cmd} --aport {port} --mport {mport} --no-render --no-realtime" server_cmd = f"{cmd} -c {port} -m {mport} {render_arg} {realtime_arg}".strip()
self.rcss_processes.append( proc = subprocess.Popen(
subprocess.Popen( server_cmd.split(),
server_cmd.split(), stdout=subprocess.DEVNULL,
stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT,
stderr=subprocess.STDOUT, start_new_session=True
start_new_session=True
)
) )
# Avoid startup storm when launching many servers at once.
time.sleep(0.03)
rc = proc.poll()
if rc is not None:
raise RuntimeError(
f"rcssservermj exited early (code={rc}) on server port {port}, monitor port {mport}"
)
self.rcss_processes.append(proc)
def check_running_servers(self, psutil, first_server_p, first_monitor_p, n_servers): def check_running_servers(self, psutil, first_server_p, first_monitor_p, n_servers):
''' Check if any server is running on chosen ports ''' ''' Check if any server is running on chosen ports '''
found = False found = False

View File

@@ -171,8 +171,8 @@ class Train_Base():
ep_reward += reward ep_reward += reward
ep_length += 1 ep_length += 1
if enable_FPS_control: # control simulation speed (using non blocking user input) # if enable_FPS_control: # control simulation speed (using non blocking user input)
self.control_fps(select.select([sys.stdin], [], [], 0)[0]) # self.control_fps(select.select([sys.stdin], [], [], 0)[0])
if done: if done:
obs, _ = env.reset() obs, _ = env.reset()

387
scripts/gyms/Walk.py Normal file → Executable file
View File

@@ -7,7 +7,8 @@ from random import random
from random import uniform from random import uniform
from stable_baselines3 import PPO from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import SubprocVecEnv from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
import gymnasium as gym import gymnasium as gym
from gymnasium import spaces from gymnasium import spaces
@@ -50,6 +51,7 @@ class WalkEnv(gym.Env):
self.waypoint_index = 0 self.waypoint_index = 0
self.route_completed = False self.route_completed = False
self.debug_every_n_steps = 5 self.debug_every_n_steps = 5
self.enable_debug_joint_status = False
self.calibrate_nominal_from_neutral = True self.calibrate_nominal_from_neutral = True
self.auto_calibrate_train_sim_flip = True self.auto_calibrate_train_sim_flip = True
self.nominal_calibrated_once = False self.nominal_calibrated_once = False
@@ -57,7 +59,7 @@ class WalkEnv(gym.Env):
self._target_hz = 0.0 self._target_hz = 0.0
self._target_dt = 0.0 self._target_dt = 0.0
self._last_sync_time = None self._last_sync_time = None
target_hz_env = 24 target_hz_env = 0
if target_hz_env: if target_hz_env:
try: try:
self._target_hz = float(target_hz_env) self._target_hz = float(target_hz_env)
@@ -80,8 +82,8 @@ class WalkEnv(gym.Env):
action_dim = len(self.Player.robot.ROBOT_MOTORS) action_dim = len(self.Player.robot.ROBOT_MOTORS)
self.no_of_actions = action_dim self.no_of_actions = action_dim
self.action_space = spaces.Box( self.action_space = spaces.Box(
low=-1.0, low=-10.0,
high=1.0, high=10.0,
shape=(action_dim,), shape=(action_dim,),
dtype=np.float32 dtype=np.float32
) )
@@ -114,18 +116,17 @@ class WalkEnv(gym.Env):
0.0, 0.0,
] ]
) )
self.reference_joint_nominal_position = self.joint_nominal_position.copy() self.joint_nominal_position = np.zeros(self.no_of_actions)
self.train_sim_flip = np.array( self.train_sim_flip = np.array(
[ [
1.0, # 0: Head_yaw (he1) 1.0, # 0: Head_yaw (he1)
-1.0, # 1: Head_pitch (he2) -1.0, # 1: Head_pitch (he2)
1.0, # 2: Left_Shoulder_Pitch (lae1) 1.0, # 2: Left_Shoulder_Pitch (lae1)
-1.0, # 3: Left_Shoulder_Roll (lae2) -1.0, # 3: Left_Shoulder_Roll (lae2)
1.0, # 4: Left_Elbow_Pitch (lae3) -1.0, # 4: Left_Elbow_Pitch (lae3)
1.0, # 5: Left_Elbow_Yaw (lae4) 1.0, # 5: Left_Elbow_Yaw (lae4)
-1.0, # 6: Right_Shoulder_Pitch (rae1) -1.0, # 6: Right_Shoulder_Pitch (rae1)
1.0, # 7: Right_Shoulder_Roll (rae2) -1.0, # 7: Right_Shoulder_Roll (rae2)
1.0, # 8: Right_Elbow_Pitch (rae3) 1.0, # 8: Right_Elbow_Pitch (rae3)
1.0, # 9: Right_Elbow_Yaw (rae4) 1.0, # 9: Right_Elbow_Yaw (rae4)
1.0, # 10: Waist (te1) 1.0, # 10: Waist (te1)
@@ -144,15 +145,51 @@ class WalkEnv(gym.Env):
] ]
) )
self.scaling_factor = 0.5 self.scaling_factor = 0.3
# self.scaling_factor = 1
# Small reset perturbations for robustness training.
self.enable_reset_perturb = True
self.reset_beam_yaw_range_deg = 180 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
self.reset_joint_noise_rad = 0.015
self.reset_perturb_steps = 3
self.reset_recover_steps = 8
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS)) self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.previous_pos = np.array([0.0, 0.0]) # Track previous position self.previous_pos = np.array([0.0, 0.0]) # Track previous position
self.Player.server.connect() self.Player.server.connect()
# sleep(2.0) # Longer wait for connection to establish completely # sleep(2.0) # Longer wait for connection to establish completely
self.Player.server.send_immediate( self.Player.server.send_immediate(
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})" f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
) )
self.start_time = time.time()
def _reconnect_server(self):
try:
self.Player.server.shutdown()
except Exception:
pass
self.Player.server.connect()
self.Player.server.send_immediate(
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
)
def _safe_receive_world_update(self, retries=1):
last_exc = None
for attempt in range(retries + 1):
try:
self.Player.server.receive()
self.Player.world.update()
return
except (ConnectionResetError, OSError) as exc:
last_exc = exc
if attempt >= retries:
raise
self._reconnect_server()
if last_exc is not None:
raise last_exc
def debug_log(self, message): def debug_log(self, message):
print(message) print(message)
@@ -163,45 +200,6 @@ class WalkEnv(gym.Env):
except OSError: except OSError:
pass pass
def calibrate_train_sim_flip_from_neutral(self, neutral_joint_positions):
updated_flip = self.train_sim_flip.copy()
changed = []
for idx, (reference_value, observed_value) in enumerate(
zip(self.reference_joint_nominal_position, neutral_joint_positions)
):
if idx >= 10:
continue
if abs(reference_value) < 0.15 or abs(observed_value) < 0.15:
continue
inferred_flip = 1.0 if np.sign(reference_value) == np.sign(observed_value) else -1.0
if updated_flip[idx] != inferred_flip:
changed.append((idx, updated_flip[idx], inferred_flip))
updated_flip[idx] = inferred_flip
self.train_sim_flip = updated_flip
if changed:
self.debug_log(
"[FlipDebug] "
f"changes={[(idx, old, new) for idx, old, new in changed]}"
)
def is_reliable_neutral_pose(self, neutral_joint_positions):
leg_positions = neutral_joint_positions[11:]
leg_norm = float(np.linalg.norm(leg_positions))
leg_max = float(np.max(np.abs(leg_positions)))
height = float(self.Player.world.global_position[2])
reliable = (
leg_norm > 0.8
and leg_max > 0.35
and 0.12 < height < 0.8
)
return reliable, leg_norm, leg_max, height
def observe(self, init=False): def observe(self, init=False):
"""获取当前观测值""" """获取当前观测值"""
@@ -260,8 +258,7 @@ class WalkEnv(gym.Env):
def sync(self): def sync(self):
''' Run a single simulation step ''' ''' Run a single simulation step '''
self.Player.server.receive() self._safe_receive_world_update(retries=1)
self.Player.world.update()
self.Player.robot.commit_motor_targets_pd() self.Player.robot.commit_motor_targets_pd()
self.Player.server.send() self.Player.server.send()
if self._target_dt > 0.0: if self._target_dt > 0.0:
@@ -299,6 +296,7 @@ class WalkEnv(gym.Env):
f"err_norm={float(np.linalg.norm(joint_error)):.4f} " f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
f"fallen={self.Player.world.global_position[2] < 0.3}" f"fallen={self.Player.world.global_position[2] < 0.3}"
) )
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
def reset(self, seed=None, options=None): def reset(self, seed=None, options=None):
''' '''
@@ -310,9 +308,9 @@ class WalkEnv(gym.Env):
if seed is not None: if seed is not None:
np.random.seed(seed) np.random.seed(seed)
length1 = np.random.uniform(10, 20) # randomize target distance length1 = 2 # randomize target distance
length2 = np.random.uniform(10, 20) # randomize target distance length2 = np.random.uniform(0.6, 1) # randomize target distance
length3 = np.random.uniform(10, 20) # randomize target distance length3 = np.random.uniform(0.6, 1) # randomize target distance
angle2 = np.random.uniform(-30, 30) # randomize initial orientation angle2 = np.random.uniform(-30, 30) # randomize initial orientation
angle3 = np.random.uniform(-30, 30) # randomize target direction angle3 = np.random.uniform(-30, 30) # randomize target direction
@@ -320,70 +318,72 @@ class WalkEnv(gym.Env):
self.waypoint_index = 0 self.waypoint_index = 0
self.route_completed = False self.route_completed = False
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS)) self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
self.walk_cycle_step = 0 self.walk_cycle_step = 0
# 随机 beam 目标位置和朝向,增加训练多样性 # 随机 beam 目标位置和朝向,增加训练多样性
beam_x = (random() - 0.5) * 10 beam_x = (random() - 0.5) * 10
beam_y = (random() - 0.5) * 10 beam_y = (random() - 0.5) * 10
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
for _ in range(5): for _ in range(5):
self.Player.server.receive() self._safe_receive_world_update(retries=2)
self.Player.world.update()
self.Player.robot.commit_motor_targets_pd() self.Player.robot.commit_motor_targets_pd()
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=0) self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
self.Player.server.send() self.Player.server.send()
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立 # 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
finished_count = 0 finished_count = 0
for _ in range(10): for _ in range(50):
finished = self.Player.skills_manager.execute("Neutral") finished = self.Player.skills_manager.execute("Neutral")
self.sync() self.sync()
if finished: if finished:
finished_count += 1 finished_count += 1
if finished_count >= 3: # 假设需要连续3次完成才算成功 if finished_count >= 20: # 假设需要连续20次完成才算成功
break break
# neutral_joint_positions = np.deg2rad( if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
# [self.Player.robot.motor_positions[motor] for motor in self.Player.robot.ROBOT_MOTORS] perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
# ) # Perturb waist + lower body only (10:), keep head/arms stable.
# reliable_neutral, neutral_leg_norm, neutral_leg_max, neutral_height = self.is_reliable_neutral_pose(neutral_joint_positions) perturb_action[10:] = np.random.uniform(
-self.reset_joint_noise_rad,
self.reset_joint_noise_rad,
size=(self.no_of_actions - 10,)
)
# if self.auto_calibrate_train_sim_flip and reliable_neutral and not self.flip_calibrated_once: for _ in range(self.reset_perturb_steps):
# self.calibrate_train_sim_flip_from_neutral(neutral_joint_positions) target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
# self.flip_calibrated_once = True for idx, target in enumerate(target_joint_positions):
# if self.calibrate_nominal_from_neutral and reliable_neutral and not self.nominal_calibrated_once: r.set_motor_target_position(
# self.joint_nominal_position = neutral_joint_positions * self.train_sim_flip r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
# self.nominal_calibrated_once = True )
# self.debug_log( self.sync()
# "[ResetDebug] "
# f"neutral_pos={np.round(self.Player.world.global_position, 3).tolist()} "
# f"shoulders={np.round(neutral_joint_positions[2:10], 3).tolist()} "
# f"legs={np.round(neutral_joint_positions[11:], 3).tolist()} "
# f"flip={self.train_sim_flip.tolist()} "
# f"nominal_legs={np.round(self.joint_nominal_position[11:], 3).tolist()} "
# f"calibrated_once={(self.flip_calibrated_once, self.nominal_calibrated_once)} "
# f"reliable_neutral={reliable_neutral} "
# f"leg_norm={neutral_leg_norm:.3f} leg_max={neutral_leg_max:.3f} height={neutral_height:.3f}"
# )
# reset_action_noise = np.random.uniform(-0.015, 0.015, size=(len(self.Player.robot.ROBOT_MOTORS),)) for i in range(self.reset_recover_steps):
# self.target_joint_positions = (self.joint_nominal_position + reset_action_noise) * self.train_sim_flip # Linearly fade perturbation to help policy start from near-neutral.
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
# for idx, target in enumerate(self.target_joint_positions): target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
# r.set_motor_target_position( for idx, target in enumerate(target_joint_positions):
# r.ROBOT_MOTORS[idx], target*180/math.pi, kp=25, kd=0.6 r.set_motor_target_position(
# ) r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
)
self.sync()
# memory variables # memory variables
self.sync()
self.initial_position = np.array(self.Player.world.global_position[:2]) self.initial_position = np.array(self.Player.world.global_position[:2])
self.previous_pos = self.initial_position.copy() # Critical: set to actual position self.previous_pos = self.initial_position.copy() # Critical: set to actual position
self.act = np.zeros(self.no_of_actions, np.float32) self.act = np.zeros(self.no_of_actions, np.float32)
point1 = self.initial_position + np.array([length1, 0]) # Build target in the robot's current forward direction instead of fixed global +x.
heading_deg = float(r.global_orientation_euler[2])
forward_offset = MathOps.rotate_2d_vec(np.array([length1, 0.0]), heading_deg, is_rad=False)
point1 = self.initial_position + forward_offset
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False) point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False) point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
self.point_list = [point1, point2, point3] self.point_list = [point1]
self.target_position = self.point_list[self.waypoint_index] self.target_position = self.point_list[self.waypoint_index]
self.initial_height = self.Player.world.global_position[2]
return self.observe(True), {} return self.observe(True), {}
@@ -391,83 +391,123 @@ class WalkEnv(gym.Env):
return return
def compute_reward(self, previous_pos, current_pos, action): def compute_reward(self, previous_pos, current_pos, action):
velocity = current_pos - previous_pos height = float(self.Player.world.global_position[2])
velocity_magnitude = np.linalg.norm(velocity)
direction_to_target = self.target_position - current_pos
prev_direction_to_target = self.target_position - previous_pos
distance_to_target = np.linalg.norm(direction_to_target)
prev_distance_to_target = np.linalg.norm(prev_direction_to_target)
progress_reward = np.clip((prev_distance_to_target - distance_to_target) * 30.0, -2.0, 4.0) orientation_quat_inv = R.from_quat(self.Player.robot._global_cheat_orientation).inv()
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
ang_vel = np.deg2rad(self.Player.robot.gyroscope)
ang_vel_mag = float(np.linalg.norm(ang_vel))
velocity_in_m_per_sec = velocity_magnitude / 0.05 is_fallen = height < 0.3
speed_reward = np.clip(velocity_in_m_per_sec * 1.5, 0.0, 1.5) if is_fallen:
# remain = max(0, 800 - self.step_counter)
# return -8.0 - 0.01 * remain
return -1.0
if velocity_magnitude > 1e-4 and distance_to_target > 1e-4:
directional_alignment = np.dot(velocity, direction_to_target) / (velocity_magnitude * distance_to_target)
directional_alignment = np.clip(directional_alignment, -1.0, 1.0)
direction_reward = max(0.0, directional_alignment)
else:
direction_reward = 0.0
alive_bonus = 0.05
height = self.Player.world.global_position[2] # # 目标方向
if 0.45 <= height <= 1.2: # to_target = self.target_position - current_pos
height_reward = 1.5 # dist_to_target = float(np.linalg.norm(to_target))
else: # if dist_to_target < 0.5:
height_reward = -6.0 # return 15.0
motionless_penalty = -1.5 if velocity_magnitude < 0.003 else 0.0 # forward_dir = to_target / dist_to_target if dist_to_target > 0.1 else np.array([1.0, 0.0])
# delta_pos = current_pos - previous_pos
# forward_step = float(np.dot(delta_pos, forward_dir))
# lateral_step = float(np.linalg.norm(delta_pos - forward_dir * forward_step))
waypoint_bonus = 0.0 # 奖励项
if distance_to_target < 0.5: # progress_reward = 2 * forward_step
waypoint_bonus = 25.0 # lateral_penalty = -0.1 * lateral_step
if self.waypoint_index < len(self.point_list) - 1: alive_bonus = 2.0
self.waypoint_index += 1
self.target_position = self.point_list[self.waypoint_index] # action_penalty = -0.01 * float(np.linalg.norm(action))
else: smoothness_penalty = -0.01 * float(np.linalg.norm(action - self.last_action_for_reward))
waypoint_bonus = 100.0
self.route_completed = True posture_penalty = -0.3 * (tilt_mag)
ang_vel_penalty = -0.02 * ang_vel_mag
target_height = self.initial_height
height_error = height - target_height
height_penalty = -0.5 * abs(height_error) # 惩罚高度偏离,系数可调
# # 在 compute_reward 开头附近,添加高度变化率计算
# if not hasattr(self, 'last_height'):
# self.last_height = height
# self.last_height_time = self.step_counter # 可选,用于时间间隔
# height_rate = height - self.last_height # 正为上升,负为下降
# self.last_height = height
# 惩罚高度下降(负变化率)
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
# # 在 compute_reward 中
# if self.step_counter > 50:
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
# novelty = float(np.linalg.norm(action - avg_prev_action))
# exploration_bonus = 0.05 * novelty
# else:
# exploration_bonus = 0
# self.prev_action_history[self.history_idx] = action
# self.history_idx = (self.history_idx + 1) % 50
total = (
# progress_reward +
alive_bonus +
# lateral_penalty +
# action_penalty +
smoothness_penalty +
posture_penalty
+ ang_vel_penalty
+ height_penalty
# + exploration_bonus
# + height_down_penalty
)
if time.time() - self.start_time >= 1200:
self.start_time = time.time()
print(
# f"progress_reward:{progress_reward:.4f}",
# f"lateral_penalty:{lateral_penalty:.4f}",
# f"action_penalty:{action_penalty:.4f}"s,
f"height_penalty:{height_penalty:.4f}",
f"smoothness_penalty:{smoothness_penalty:.4f},",
f"posture_penalty:{posture_penalty:.4f}",
# f"ang_vel_penalty:{ang_vel_penalty:.4f}",
# f"height_down_penalty:{height_down_penalty:.4f}",
# f"exploration_bonus:{exploration_bonus:.4f}"
)
return total
action_magnitude = np.linalg.norm(action[11:])
action_penalty = -0.08 * action_magnitude
tilt_penalty = -0.2 * np.linalg.norm(self.Player.robot.gyroscope[:2]) / 100.0
return (
progress_reward
+ speed_reward
+ direction_reward
+ alive_bonus
+ height_reward
+ motionless_penalty
+ waypoint_bonus
+ action_penalty
+ tilt_penalty
)
def step(self, action): def step(self, action):
r = self.Player.robot r = self.Player.robot
self.previous_action = action self.previous_action = action
self.target_joint_positions = ( self.target_joint_positions = (
self.joint_nominal_position # self.joint_nominal_position +
+ self.scaling_factor * action self.scaling_factor * action
) )
self.target_joint_positions *= self.train_sim_flip self.target_joint_positions *= self.train_sim_flip
for idx, target in enumerate(self.target_joint_positions): for idx, target in enumerate(self.target_joint_positions):
r.set_motor_target_position( r.set_motor_target_position(
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6 r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=40, kd=1.0
) )
self.previous_action = action
self.sync() # run simulation step self.sync() # run simulation step
self.step_counter += 1 self.step_counter += 1
# if self.step_counter % self.debug_every_n_steps == 0: if self.enable_debug_joint_status and self.step_counter % self.debug_every_n_steps == 0:
# self.debug_joint_status() self.debug_joint_status()
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32) current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
@@ -476,6 +516,7 @@ class WalkEnv(gym.Env):
# Update previous position # Update previous position
self.previous_pos = current_pos.copy() self.previous_pos = current_pos.copy()
self.last_action_for_reward = action.copy()
# Fall detection and penalty # Fall detection and penalty
is_fallen = self.Player.world.global_position[2] < 0.3 is_fallen = self.Player.world.global_position[2] < 0.3
@@ -494,13 +535,14 @@ class Train(Train_Base):
def train(self, args): def train(self, args):
# --------------------------------------- Learning parameters # --------------------------------------- Learning parameters
n_envs = 8 # Reduced from 8 to decrease CPU/network pressure during init n_envs = int(os.environ.get("GYM_CPU_N_ENVS", "20"))
if n_envs < 1: if n_envs < 1:
raise ValueError("GYM_CPU_N_ENVS must be >= 1") raise ValueError("GYM_CPU_N_ENVS must be >= 1")
n_steps_per_env = 512 # RolloutBuffer is of size (n_steps_per_env * n_envs) server_warmup_sec = float(os.environ.get("GYM_CPU_SERVER_WARMUP_SEC", "3.0"))
minibatch_size = 128 # should be a factor of (n_steps_per_env * n_envs) n_steps_per_env = int(os.environ.get("GYM_CPU_TRAIN_STEPS_PER_ENV", "256")) # RolloutBuffer is of size (n_steps_per_env * n_envs)
minibatch_size = int(os.environ.get("GYM_CPU_TRAIN_BATCH_SIZE", "512")) # should be a factor of (n_steps_per_env * n_envs)
total_steps = 30000000 total_steps = 30000000
learning_rate = 3e-4 learning_rate = float(os.environ.get("GYM_CPU_TRAIN_LR", "3e-4"))
folder_name = f'Walk_R{self.robot_type}' folder_name = f'Walk_R{self.robot_type}'
model_path = f'./scripts/gyms/logs/{folder_name}/' model_path = f'./scripts/gyms/logs/{folder_name}/'
@@ -508,31 +550,38 @@ class Train(Train_Base):
print(f"Using {n_envs} parallel environments") print(f"Using {n_envs} parallel environments")
# --------------------------------------- Run algorithm # --------------------------------------- Run algorithm
def init_env(i_env): def init_env(i_env, monitor=False):
def thunk(): def thunk():
return WalkEnv(self.ip, self.server_p + i_env) env = WalkEnv(self.ip, self.server_p + i_env)
if monitor:
env = Monitor(env)
return env
return thunk return thunk
server_log_dir = os.path.join(model_path, "server_logs") server_log_dir = os.path.join(model_path, "server_logs")
os.makedirs(server_log_dir, exist_ok=True) os.makedirs(server_log_dir, exist_ok=True)
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1) # include 1 extra server for testing servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1, no_render=True, no_realtime=True) # include 1 extra server for testing
# Wait for servers to start # Wait for servers to start
print(f"Starting {n_envs + 1} rcssservermj servers...") print(f"Starting {n_envs + 1} rcssservermj servers...")
if server_warmup_sec > 0:
print(f"Waiting {server_warmup_sec:.1f}s for server warmup...")
sleep(server_warmup_sec)
print("Servers started, creating environments...") print("Servers started, creating environments...")
env = SubprocVecEnv([init_env(i) for i in range(n_envs)]) env = SubprocVecEnv([init_env(i, monitor=True) for i in range(n_envs)])
eval_env = SubprocVecEnv([init_env(n_envs)]) # Use single-process eval env to avoid extra subprocess fragility during callback evaluation.
eval_env = DummyVecEnv([init_env(n_envs, monitor=True)])
try: try:
# Custom policy network architecture # Custom policy network architecture
policy_kwargs = dict( policy_kwargs = dict(
net_arch=dict( net_arch=dict(
pi=[256, 256, 128], # Policy network: 3 layers pi=[512, 256, 128], # Policy network: 3 layers
vf=[256, 256, 128] # Value network: 3 layers vf=[512, 256, 128] # Value network: 3 layers
), ),
activation_fn=__import__('torch.nn', fromlist=['ReLU']).ReLU, activation_fn=__import__('torch.nn', fromlist=['ELU']).ELU,
) )
if "model_file" in args: # retrain if "model_file" in args: # retrain
@@ -548,14 +597,17 @@ class Train(Train_Base):
learning_rate=learning_rate, learning_rate=learning_rate,
device="cpu", device="cpu",
policy_kwargs=policy_kwargs, policy_kwargs=policy_kwargs,
ent_coef=0.01, # Entropy coefficient for exploration ent_coef=float(os.environ.get("GYM_CPU_TRAIN_ENT_COEF", "0.05")), # Entropy coefficient for exploration
clip_range=0.2, # PPO clipping parameter clip_range=float(os.environ.get("GYM_CPU_TRAIN_CLIP_RANGE", "0.2")), # PPO clipping parameter
gae_lambda=0.95, # GAE lambda gae_lambda=0.95, # GAE lambda
gamma=0.99 # Discount factor gamma=float(os.environ.get("GYM_CPU_TRAIN_GAMMA", "0.95")), # Discount factor
target_kl=0.03,
n_epochs=int(os.environ.get("GYM_CPU_TRAIN_EPOCHS", "5")),
# tensorboard_log=f"./scripts/gyms/logs/{folder_name}/tensorboard/"
) )
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env, model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
eval_freq=n_steps_per_env * 10, save_freq=n_steps_per_env * 10, eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 20,
backup_env_file=__file__) backup_env_file=__file__)
except KeyboardInterrupt: except KeyboardInterrupt:
sleep(1) # wait for child processes sleep(1) # wait for child processes
@@ -572,7 +624,16 @@ class Train(Train_Base):
# Uses different server and monitor ports # Uses different server and monitor ports
server_log_dir = os.path.join(args["folder_dir"], "server_logs") server_log_dir = os.path.join(args["folder_dir"], "server_logs")
os.makedirs(server_log_dir, exist_ok=True) os.makedirs(server_log_dir, exist_ok=True)
server = Train_Server(self.server_p - 1, self.monitor_p, 1, log_dir=server_log_dir) test_no_render = os.environ.get("GYM_CPU_TEST_NO_RENDER", "0") == "1"
test_no_realtime = os.environ.get("GYM_CPU_TEST_NO_REALTIME", "0") == "1"
server = Train_Server(
self.server_p - 1,
self.monitor_p,
1,
no_render=test_no_render,
no_realtime=test_no_realtime,
)
env = WalkEnv(self.ip, self.server_p - 1) env = WalkEnv(self.ip, self.server_p - 1)
model = PPO.load(args["model_file"], env=env) model = PPO.load(args["model_file"], env=env)
@@ -603,4 +664,16 @@ if __name__ == "__main__":
) )
trainer = Train(script_args) trainer = Train(script_args)
trainer.train({"model_file": "scripts/gyms/logs/Walk_R0_000/model_245760_steps.zip"})
run_mode = os.environ.get("GYM_CPU_MODE", "train").strip().lower()
if run_mode == "test":
test_model_file = os.environ.get("GYM_CPU_TEST_MODEL", "scripts/gyms/logs/Walk_R0_004/best_model.zip")
test_folder = os.environ.get("GYM_CPU_TEST_FOLDER", "scripts/gyms/logs/Walk_R0_004/")
trainer.test({"model_file": test_model_file, "folder_dir": test_folder})
else:
retrain_model = os.environ.get("GYM_CPU_TRAIN_MODEL", "").strip()
if retrain_model:
trainer.train({"model_file": retrain_model})
else:
trainer.train({})

Binary file not shown.

126
train.sh Executable file
View File

@@ -0,0 +1,126 @@
#!/usr/bin/env bash
set -euo pipefail
# ------------------------------
# 资源限制配置cgroup v2 + systemd-run
# ------------------------------
# 说明:
# 1) 这个脚本会把训练进程放进一个临时的 systemd scope 中,并施加 CPU/内存上限。
# 2) 仅限制“本次训练进程”,不会永久改系统配置。
# 3) 下面变量都支持“环境变量覆盖”,即你可以在命令前临时指定。
#
# CPU 核数基准(默认 20
# 例如你的机器按 20 核预算来算,可保持默认。
CORES="${CORES:-20}"
# CPU 占用百分比(默认 95
# 最终会与 CORES 相乘得到 CPUQuota。
# 例CORES=20, UTIL_PERCENT=95 -> CPUQuota=1900%(约 19 核等效)
UTIL_PERCENT="${UTIL_PERCENT:-95}"
CPU_QUOTA="$((CORES * UTIL_PERCENT))%"
# 内存上限(默认 28G
# 可改成 16G、24G 等,避免训练把系统内存吃满。
MEMORY_MAX="${MEMORY_MAX:-28G}"
# ------------------------------
# 训练运行参数(由 scripts/gyms/Walk.py 读取)
# ------------------------------
# 运行模式train 或 test
GYM_CPU_MODE="${GYM_CPU_MODE:-train}"
# 并行环境数量:越大通常吞吐越高,但也更容易触发服务器连接不稳定。
# 建议从 8~12 起步,稳定后再升到 16/20。
GYM_CPU_N_ENVS="${GYM_CPU_N_ENVS:-20}"
# 服务器预热时间(秒):
# 在批量拉起 rcssserver 后等待一段时间,再创建 SubprocVecEnv
# 可降低 ConnectionReset/EOFError 概率。
GYM_CPU_SERVER_WARMUP_SEC="${GYM_CPU_SERVER_WARMUP_SEC:-10}"
# 训练专用参数
GYM_CPU_TRAIN_STEPS_PER_ENV="${GYM_CPU_TRAIN_STEPS_PER_ENV:-256}"
GYM_CPU_TRAIN_BATCH_SIZE="${GYM_CPU_TRAIN_BATCH_SIZE:-512}"
GYM_CPU_TRAIN_LR="${GYM_CPU_TRAIN_LR:-1e-4}"
GYM_CPU_TRAIN_ENT_COEF="${GYM_CPU_TRAIN_ENT_COEF:-0.03}"
GYM_CPU_TRAIN_CLIP_RANGE="${GYM_CPU_TRAIN_CLIP_RANGE:-0.13}"
GYM_CPU_TRAIN_GAMMA="${GYM_CPU_TRAIN_GAMMA:-0.95}"
GYM_CPU_TRAIN_EPOCHS="${GYM_CPU_TRAIN_EPOCHS:-5}"
GYM_CPU_TRAIN_MODEL="${GYM_CPU_TRAIN_MODEL:-}"
# 测试专用参数
GYM_CPU_TEST_MODEL="${GYM_CPU_TEST_MODEL:-scripts/gyms/logs/Walk_R0_004/best_model.zip}"
GYM_CPU_TEST_FOLDER="${GYM_CPU_TEST_FOLDER:-scripts/gyms/logs/Walk_R0_004/}"
# 测试默认实时且显示画面:默认均为 0
# 设为 1 表示关闭对应能力
GYM_CPU_TEST_NO_RENDER="${GYM_CPU_TEST_NO_RENDER:-0}"
GYM_CPU_TEST_NO_REALTIME="${GYM_CPU_TEST_NO_REALTIME:-0}"
# Python 解释器选择策略:
# 1) 优先使用你手动传入的 PYTHON_BIN
# 2) 其次用当前激活 conda 环境CONDA_PREFIX/bin/python
# 3) 再回退到默认 mujoco 环境路径
# 4) 最后尝试系统 python / python3
DEFAULT_PYTHON="/home/solren/Downloads/Anaconda/envs/mujoco/bin/python"
CONDA_PYTHON="${CONDA_PREFIX:-}/bin/python"
# 安全保护:不要用 sudo 运行。
# 原因sudo 可能导致 conda 环境与用户会话环境不一致,
# 会引发 python 路径丢失、systemd --user 会话不可见等问题。
if [[ "${EUID}" -eq 0 ]]; then
echo "Do not run this script with sudo; run as your normal user in conda env 'mujoco'."
exit 1
fi
# 解析最终使用的 Python 可执行文件。
if [[ -n "${PYTHON_BIN:-}" ]]; then
PYTHON_EXEC="${PYTHON_BIN}"
elif [[ -n "${CONDA_PREFIX:-}" && -x "${CONDA_PYTHON}" ]]; then
PYTHON_EXEC="${CONDA_PYTHON}"
elif [[ -x "${DEFAULT_PYTHON}" ]]; then
PYTHON_EXEC="${DEFAULT_PYTHON}"
elif command -v python >/dev/null 2>&1; then
PYTHON_EXEC="$(command -v python)"
elif command -v python3 >/dev/null 2>&1; then
PYTHON_EXEC="$(command -v python3)"
else
echo "No Python executable found. Set PYTHON_BIN=/abs/path/to/python and retry."
exit 1
fi
# 脚本所在目录(绝对路径),便于后续定位模块/相对路径。
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
# 打印当前生效配置,方便排障和复现实验。
echo "Starting training with limits: CPU=${CPU_QUOTA}, Memory=${MEMORY_MAX}"
echo "Mode: ${GYM_CPU_MODE}"
echo "Runtime knobs: GYM_CPU_N_ENVS=${GYM_CPU_N_ENVS}, GYM_CPU_SERVER_WARMUP_SEC=${GYM_CPU_SERVER_WARMUP_SEC}"
echo "Using Python: ${PYTHON_EXEC}"
if [[ -n "${CONDA_DEFAULT_ENV:-}" ]]; then
echo "Detected conda env: ${CONDA_DEFAULT_ENV}"
fi
# 使用 systemd-run --user --scope 启动“受限资源”的训练进程:
# - CPUQuota: 总 CPU 配额
# - MemoryMax: 最大内存
# - env ... : 显式传递训练参数到 Python 进程
# - python -m scripts.gyms.Walk: 以模块方式启动训练入口
systemd-run --user --scope \
-p CPUQuota="${CPU_QUOTA}" \
-p MemoryMax="${MEMORY_MAX}" \
env \
GYM_CPU_MODE="${GYM_CPU_MODE}" \
GYM_CPU_N_ENVS="${GYM_CPU_N_ENVS}" \
GYM_CPU_SERVER_WARMUP_SEC="${GYM_CPU_SERVER_WARMUP_SEC}" \
GYM_CPU_TRAIN_STEPS_PER_ENV="${GYM_CPU_TRAIN_STEPS_PER_ENV}" \
GYM_CPU_TRAIN_BATCH_SIZE="${GYM_CPU_TRAIN_BATCH_SIZE}" \
GYM_CPU_TRAIN_LR="${GYM_CPU_TRAIN_LR}" \
GYM_CPU_TRAIN_ENT_COEF="${GYM_CPU_TRAIN_ENT_COEF}" \
GYM_CPU_TRAIN_CLIP_RANGE="${GYM_CPU_TRAIN_CLIP_RANGE}" \
GYM_CPU_TRAIN_GAMMA="${GYM_CPU_TRAIN_GAMMA}" \
GYM_CPU_TRAIN_EPOCHS="${GYM_CPU_TRAIN_EPOCHS}" \
GYM_CPU_TRAIN_MODEL="${GYM_CPU_TRAIN_MODEL}" \
GYM_CPU_TEST_MODEL="${GYM_CPU_TEST_MODEL}" \
GYM_CPU_TEST_FOLDER="${GYM_CPU_TEST_FOLDER}" \
GYM_CPU_TEST_NO_RENDER="${GYM_CPU_TEST_NO_RENDER}" \
GYM_CPU_TEST_NO_REALTIME="${GYM_CPU_TEST_NO_REALTIME}" \
"${PYTHON_EXEC}" "-m" "scripts.gyms.Walk"

View File

@@ -47,6 +47,7 @@ class World:
self.their_team_players: list[OtherRobot] = [OtherRobot(is_teammate=False) for _ in self.their_team_players: list[OtherRobot] = [OtherRobot(is_teammate=False) for _ in
range(self.MAX_PLAYERS_PER_TEAM)] range(self.MAX_PLAYERS_PER_TEAM)]
self.field: Field = self.__initialize_field(field_name=field_name) self.field: Field = self.__initialize_field(field_name=field_name)
self.WORLD_STEPTIME: float = 0.005 # Time step of the world in seconds
def update(self) -> None: def update(self) -> None:
""" """