fxxk the bahirt and the add Walk gym

This commit is contained in:
徐学颢
2026-03-12 19:59:07 +08:00
parent 0e402c2b4c
commit 092fb521e1
4 changed files with 309 additions and 118 deletions

View File

@@ -98,8 +98,12 @@ class Walk(Behavior):
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
radian_joint_positions = np.deg2rad(list(robot.motor_positions.values()))
radian_joint_speeds = np.deg2rad(list(robot.motor_speeds.values()))
radian_joint_positions = np.deg2rad(
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
)
radian_joint_speeds = np.deg2rad(
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
)
qpos_qvel_previous_action = np.vstack(
(

View File

@@ -50,10 +50,13 @@ class Server:
"""
Send all committed messages
"""
if len(select([self.__socket], [], [], 0.0)[0]) == 0:
if not self.__send_buff:
return
if len(select([self.__socket], [], [], 0.0)[0]) != 0:
logger.debug("Socket is readable while sending; keeping full-duplex command send.")
self.send_immediate(("".join(self.__send_buff)))
else:
logger.info("Server_Comm.py: Received a new packet while thinking!")
self.__send_buff = []
def commit(self, msg: str) -> None:

View File

@@ -1,4 +1,5 @@
import logging
import os
import re
import numpy as np
from scipy.spatial.transform import Rotation as R
@@ -7,6 +8,16 @@ from utils.math_ops import MathOps
from world.commons.play_mode import PlayModeEnum
logger = logging.getLogger()
DEBUG_LOG_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
def _debug_log(message: str) -> None:
print(message)
try:
with open(DEBUG_LOG_FILE, "a", encoding="utf-8") as f:
f.write(message + "\n")
except OSError:
pass
class WorldParser:
@@ -14,6 +25,36 @@ class WorldParser:
from agent.base_agent import Base_Agent # type hinting
self.agent: Base_Agent = agent
self._hj_debug_prints = 0
def _normalize_motor_name(self, motor_name: str) -> str:
alias_map = {
"q_hj1": "he1",
"q_hj2": "he2",
"q_laj1": "lae1",
"q_laj2": "lae2",
"q_laj3": "lae3",
"q_laj4": "lae4",
"q_raj1": "rae1",
"q_raj2": "rae2",
"q_raj3": "rae3",
"q_raj4": "rae4",
"q_wj1": "te1",
"q_tj1": "te1",
"q_llj1": "lle1",
"q_llj2": "lle2",
"q_llj3": "lle3",
"q_llj4": "lle4",
"q_llj5": "lle5",
"q_llj6": "lle6",
"q_rlj1": "rle1",
"q_rlj2": "rle2",
"q_rlj3": "rle3",
"q_rlj4": "rle4",
"q_rlj5": "rle5",
"q_rlj6": "rle6",
}
return alias_map.get(motor_name, motor_name)
def parse(self, message: str) -> None:
perception_dict: dict = self.__sexpression_to_dict(message)
@@ -51,9 +92,29 @@ class WorldParser:
robot = self.agent.robot
robot.motor_positions = {h["n"]: h["ax"] for h in perception_dict["HJ"]}
hj_states = perception_dict["HJ"] if isinstance(perception_dict["HJ"], list) else [perception_dict["HJ"]]
robot.motor_speeds = {h["n"]: h["vx"] for h in perception_dict["HJ"]}
if self._hj_debug_prints < 5:
names = [joint_state.get("n", "<missing>") for joint_state in hj_states]
normalized_names = [self._normalize_motor_name(name) for name in names]
matched_names = [name for name in names if name in robot.motor_positions]
matched_normalized_names = [name for name in normalized_names if name in robot.motor_positions]
# _debug_log(
# "[ParserDebug] "
# f"hj_count={len(hj_states)} "
# f"sample_names={names[:8]} "
# f"normalized_sample={normalized_names[:8]} "
# f"matched={len(matched_names)}/{len(names)} "
# f"matched_normalized={len(matched_normalized_names)}/{len(normalized_names)}"
# )
self._hj_debug_prints += 1
for joint_state in hj_states:
motor_name = self._normalize_motor_name(joint_state["n"])
if motor_name in robot.motor_positions:
robot.motor_positions[motor_name] = joint_state["ax"]
if motor_name in robot.motor_speeds:
robot.motor_speeds[motor_name] = joint_state["vx"]
world._global_cheat_position = np.array(perception_dict["pos"]["p"])

View File

@@ -15,7 +15,7 @@ from gymnasium import spaces
from scripts.commons.Train_Base import Train_Base
from scripts.commons.Server import Server as Train_Server
from agent import Agent
from agent.base_agent import Base_Agent
from utils.math_ops import MathOps
from scipy.spatial.transform import Rotation as R
@@ -34,7 +34,7 @@ class WalkEnv(gym.Env):
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
self.Player = player = Agent(
self.Player = player = Base_Agent(
team_name="Gym",
number=1,
host=ip,
@@ -49,9 +49,16 @@ class WalkEnv(gym.Env):
self.target_direction = 0.0 # target direction in the x-y plane (relative to the robot's orientation)
self.isfallen = False
self.waypoint_index = 0
self.route_completed = False
self.debug_every_n_steps = 5
self.calibrate_nominal_from_neutral = True
self.auto_calibrate_train_sim_flip = True
self.nominal_calibrated_once = False
self.flip_calibrated_once = False
# State space
# 原始观测大小: 78
obs_size = 78
self.obs = np.zeros(obs_size, np.float32)
self.observation_space = spaces.Box(
@@ -70,6 +77,8 @@ class WalkEnv(gym.Env):
dtype=np.float32
)
# 中立姿态
self.joint_nominal_position = np.array(
[
0.0,
@@ -97,31 +106,33 @@ class WalkEnv(gym.Env):
0.0,
]
)
self.reference_joint_nominal_position = self.joint_nominal_position.copy()
self.train_sim_flip = np.array(
[
1.0,
-1.0,
1.0,
-1.0,
-1.0,
1.0,
-1.0,
-1.0,
1.0,
1.0,
1.0,
1.0,
-1.0,
-1.0,
1.0,
1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
1.0, # 0: Head_yaw (he1)
-1.0, # 1: Head_pitch (he2)
1.0, # 2: Left_Shoulder_Pitch (lae1)
-1.0, # 3: Left_Shoulder_Roll (lae2)
1.0, # 4: Left_Elbow_Pitch (lae3)
1.0, # 5: Left_Elbow_Yaw (lae4)
-1.0, # 6: Right_Shoulder_Pitch (rae1)
1.0, # 7: Right_Shoulder_Roll (rae2)
1.0, # 8: Right_Elbow_Pitch (rae3)
1.0, # 9: Right_Elbow_Yaw (rae4)
1.0, # 10: Waist (te1)
1.0, # 11: Left_Hip_Pitch (lle1)
-1.0, # 12: Left_Hip_Roll (lle2)
-1.0, # 13: Left_Hip_Yaw (lle3)
1.0, # 14: Left_Knee_Pitch (lle4)
1.0, # 15: Left_Ankle_Pitch (lle5)
-1.0, # 16: Left_Ankle_Roll (lle6)
-1.0, # 17: Right_Hip_Pitch (rle1)
-1.0, # 18: Right_Hip_Roll (rle2)
-1.0, # 19: Right_Hip_Yaw (rle3)
-1.0, # 20: Right_Knee_Pitch (rle4)
-1.0, # 21: Right_Ankle_Pitch (rle5)
-1.0, # 22: Right_Ankle_Roll (rle6)
]
)
@@ -135,6 +146,54 @@ class WalkEnv(gym.Env):
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
)
def debug_log(self, message):
print(message)
try:
log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
with open(log_path, "a", encoding="utf-8") as f:
f.write(message + "\n")
except OSError:
pass
def calibrate_train_sim_flip_from_neutral(self, neutral_joint_positions):
updated_flip = self.train_sim_flip.copy()
changed = []
for idx, (reference_value, observed_value) in enumerate(
zip(self.reference_joint_nominal_position, neutral_joint_positions)
):
if idx >= 10:
continue
if abs(reference_value) < 0.15 or abs(observed_value) < 0.15:
continue
inferred_flip = 1.0 if np.sign(reference_value) == np.sign(observed_value) else -1.0
if updated_flip[idx] != inferred_flip:
changed.append((idx, updated_flip[idx], inferred_flip))
updated_flip[idx] = inferred_flip
self.train_sim_flip = updated_flip
if changed:
self.debug_log(
"[FlipDebug] "
f"changes={[(idx, old, new) for idx, old, new in changed]}"
)
def is_reliable_neutral_pose(self, neutral_joint_positions):
leg_positions = neutral_joint_positions[11:]
leg_norm = float(np.linalg.norm(leg_positions))
leg_max = float(np.max(np.abs(leg_positions)))
height = float(self.Player.world.global_position[2])
reliable = (
leg_norm > 0.8
and leg_max > 0.35
and 0.12 < height < 0.8
)
return reliable, leg_norm, leg_max, height
def observe(self, init=False):
@@ -143,8 +202,6 @@ class WalkEnv(gym.Env):
world = self.Player.world
# Safety check: ensure data is available
if not robot.motor_positions or not robot.motor_speeds:
return np.zeros(78, dtype=np.float32)
# 计算目标速度
raw_target = self.target_position - world.global_position[:2]
@@ -163,8 +220,12 @@ class WalkEnv(gym.Env):
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
# 关节状态
radian_joint_positions = np.deg2rad(list(robot.motor_positions.values()))
radian_joint_speeds = np.deg2rad(list(robot.motor_speeds.values()))
radian_joint_positions = np.deg2rad(
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
)
radian_joint_speeds = np.deg2rad(
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
)
qpos_qvel_previous_action = np.concatenate([
(radian_joint_positions * self.train_sim_flip - self.joint_nominal_position) / 4.6,
@@ -179,6 +240,7 @@ class WalkEnv(gym.Env):
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
# 组合观测
observation = np.concatenate([
qpos_qvel_previous_action,
@@ -187,6 +249,8 @@ class WalkEnv(gym.Env):
projected_gravity,
])
observation = np.clip(observation, -10.0, 10.0)
return observation.astype(np.float32)
@@ -197,11 +261,36 @@ class WalkEnv(gym.Env):
self.Player.robot.commit_motor_targets_pd()
self.Player.server.send()
def debug_joint_status(self):
robot = self.Player.robot
actual_joint_positions = np.deg2rad(
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
)
target_joint_positions = getattr(
self,
'target_joint_positions',
np.zeros(len(robot.ROBOT_MOTORS), dtype=np.float32)
)
joint_error = actual_joint_positions - target_joint_positions
leg_slice = slice(11, None)
self.debug_log(
"[WalkDebug] "
f"step={self.step_counter} "
f"pos={np.round(self.Player.world.global_position, 3).tolist()} "
f"target_xy={np.round(self.target_position, 3).tolist()} "
f"target_leg={np.round(target_joint_positions[leg_slice], 3).tolist()} "
f"actual_leg={np.round(actual_joint_positions[leg_slice], 3).tolist()} "
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
f"fallen={self.Player.world.global_position[2] < 0.3}"
)
def reset(self, seed=None, options=None):
'''
Reset and stabilize the robot
Note: for some behaviors it would be better to reduce stabilization or add noise
'''
r = self.Player.robot
super().reset(seed=seed)
if seed is not None:
np.random.seed(seed)
@@ -209,41 +298,76 @@ class WalkEnv(gym.Env):
length1 = np.random.uniform(10, 20) # randomize target distance
length2 = np.random.uniform(10, 20) # randomize target distance
length3 = np.random.uniform(10, 20) # randomize target distance
angle1 = np.random.uniform(-30, 30) # randomize target direction
angle2 = np.random.uniform(-30, 30) # randomize initial orientation
angle3 = np.random.uniform(-30, 30) # randomize target direction
self.step_counter = 0
self.waypoint_index = 0
self.route_completed = False
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
self.walk_cycle_step = 0
# beam player to ground
self.Player.server.commit_beam(
pos2d=((random()-1) * 5, (random()-1) * 5), # randomize initial position
rotation=0,
)
# 随机 beam 目标位置和朝向,增加训练多样性
beam_x = (random() - 0.5) * 10
beam_y = (random() - 0.5) * 10
# Wait until first valid world timestamp is available
for _ in range(7):
self.sync()
if self.Player.world.server_time is not None:
break
for _ in range(5):
self.Player.server.receive()
self.Player.world.update()
self.Player.robot.commit_motor_targets_pd()
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=0)
self.Player.server.send()
# Execute Neutral skill until it finishes
for _ in range(7):
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
finished_count = 0
for _ in range(20):
finished = self.Player.skills_manager.execute("Neutral")
self.sync()
if finished:
finished_count += 1
if finished_count >= 2: # 假设需要连续2次完成才算成功
break
# neutral_joint_positions = np.deg2rad(
# [self.Player.robot.motor_positions[motor] for motor in self.Player.robot.ROBOT_MOTORS]
# )
# reliable_neutral, neutral_leg_norm, neutral_leg_max, neutral_height = self.is_reliable_neutral_pose(neutral_joint_positions)
# if self.auto_calibrate_train_sim_flip and reliable_neutral and not self.flip_calibrated_once:
# self.calibrate_train_sim_flip_from_neutral(neutral_joint_positions)
# self.flip_calibrated_once = True
# if self.calibrate_nominal_from_neutral and reliable_neutral and not self.nominal_calibrated_once:
# self.joint_nominal_position = neutral_joint_positions * self.train_sim_flip
# self.nominal_calibrated_once = True
# self.debug_log(
# "[ResetDebug] "
# f"neutral_pos={np.round(self.Player.world.global_position, 3).tolist()} "
# f"shoulders={np.round(neutral_joint_positions[2:10], 3).tolist()} "
# f"legs={np.round(neutral_joint_positions[11:], 3).tolist()} "
# f"flip={self.train_sim_flip.tolist()} "
# f"nominal_legs={np.round(self.joint_nominal_position[11:], 3).tolist()} "
# f"calibrated_once={(self.flip_calibrated_once, self.nominal_calibrated_once)} "
# f"reliable_neutral={reliable_neutral} "
# f"leg_norm={neutral_leg_norm:.3f} leg_max={neutral_leg_max:.3f} height={neutral_height:.3f}"
# )
# reset_action_noise = np.random.uniform(-0.015, 0.015, size=(len(self.Player.robot.ROBOT_MOTORS),))
# self.target_joint_positions = (self.joint_nominal_position + reset_action_noise) * self.train_sim_flip
# for idx, target in enumerate(self.target_joint_positions):
# r.set_motor_target_position(
# r.ROBOT_MOTORS[idx], target*180/math.pi, kp=25, kd=0.6
# )
# memory variables
self.initial_position = np.array(self.Player.world.global_position[:2])
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
self.act = np.zeros(self.no_of_actions ,np.float32)
point1 = self.initial_position + MathOps.rotate_2d_vec(np.array([length1, 0]), angle1, is_rad=False)
point1 = self.initial_position + np.array([length1, 0])
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
self.point_list = [point1, point2, point3]
@@ -254,69 +378,77 @@ class WalkEnv(gym.Env):
def render(self, mode='human', close=False):
return
def close(self):
self.Player.shutdown() # close server connection and cleanup
def compute_reward(self, previous_pos, current_pos):
"""
Reward function focused on forward progress and stability
"""
# 1. Progress reward: must move toward target
distance_before = np.linalg.norm(self.target_position - previous_pos)
distance_after = np.linalg.norm(self.target_position - current_pos)
progress = distance_before - distance_after
# Heavily reward forward progress, punish backward movement
if progress > 0:
progress_reward = progress * 20.0 # Strong reward for closing distance
else:
progress_reward = progress * 30.0 # Even stronger penalty for going backward
# 2. Absolute speed reward: reward any movement toward goal
movement_magnitude = np.linalg.norm(current_pos - previous_pos)
def compute_reward(self, previous_pos, current_pos, action):
velocity = current_pos - previous_pos
velocity_magnitude = np.linalg.norm(velocity)
direction_to_target = self.target_position - current_pos
if np.linalg.norm(direction_to_target) > 0.01:
direction_to_target = direction_to_target / np.linalg.norm(direction_to_target)
movement_vector = current_pos - previous_pos
# Dot product: reward movement in target direction
directional_alignment = np.dot(movement_vector, direction_to_target)
speed_reward = max(0, directional_alignment) * 10.0
else:
speed_reward = 0.0
prev_direction_to_target = self.target_position - previous_pos
distance_to_target = np.linalg.norm(direction_to_target)
prev_distance_to_target = np.linalg.norm(prev_direction_to_target)
progress_reward = np.clip((prev_distance_to_target - distance_to_target) * 30.0, -2.0, 4.0)
velocity_in_m_per_sec = velocity_magnitude / 0.05
speed_reward = np.clip(velocity_in_m_per_sec * 1.5, 0.0, 1.5)
if velocity_magnitude > 1e-4 and distance_to_target > 1e-4:
directional_alignment = np.dot(velocity, direction_to_target) / (velocity_magnitude * distance_to_target)
directional_alignment = np.clip(directional_alignment, -1.0, 1.0)
direction_reward = max(0.0, directional_alignment)
else:
direction_reward = 0.0
alive_bonus = 0.05
# 3. Height maintenance: encourage upright posture
height = self.Player.world.global_position[2]
if height > 0.40:
height_reward = 0.5
elif height > 0.30:
height_reward = 0.0
if 0.45 <= height <= 1.2:
height_reward = 1.5
else:
height_reward = -0.5
height_reward = -6.0
motionless_penalty = -1.5 if velocity_magnitude < 0.003 else 0.0
# 4. Waypoint bonuses
waypoint_bonus = 0.0
if distance_after < 0.8:
waypoint_bonus = 20.0
if distance_to_target < 0.5:
waypoint_bonus = 25.0
if self.waypoint_index < len(self.point_list) - 1:
self.waypoint_index += 1
self.target_position = self.point_list[self.waypoint_index]
else:
waypoint_bonus = 50.0 # Final waypoint
waypoint_bonus = 100.0
self.route_completed = True
return progress_reward + speed_reward + height_reward + waypoint_bonus
action_magnitude = np.linalg.norm(action[11:])
action_penalty = -0.08 * action_magnitude
tilt_penalty = -0.2 * np.linalg.norm(self.Player.robot.gyroscope[:2]) / 100.0
return (
progress_reward
+ speed_reward
+ direction_reward
+ alive_bonus
+ height_reward
+ motionless_penalty
+ waypoint_bonus
+ action_penalty
+ tilt_penalty
)
def step(self, action):
r = self.Player.robot
target_joint_positions = (
self.joint_nominal_position + self.scaling_factor * action
)
target_joint_positions *= self.train_sim_flip
self.previous_action = action
for idx, target in enumerate(target_joint_positions):
self.target_joint_positions = (
self.joint_nominal_position
+ self.scaling_factor * action
)
self.target_joint_positions *= self.train_sim_flip
for idx, target in enumerate(self.target_joint_positions):
r.set_motor_target_position(
r.ROBOT_MOTORS[idx], target*180/math.pi, kp=25, kd=0.6
)
@@ -327,30 +459,23 @@ class WalkEnv(gym.Env):
self.sync() # run simulation step
self.step_counter += 1
# if self.step_counter % self.debug_every_n_steps == 0:
# self.debug_joint_status()
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
# Compute reward based on movement from previous step
reward = self.compute_reward(self.previous_pos, current_pos)
reward = self.compute_reward(self.previous_pos, current_pos, action)
# Penalty for standing still or minimal movement
movement = np.linalg.norm(current_pos - self.previous_pos)
if movement < 0.005: # Less than 5mm = basically standing
reward -= 2.0
# Small action penalty to encourage efficiency
action_magnitude = np.linalg.norm(action)
reward -= action_magnitude * 0.01
# Update previous position
self.previous_pos = current_pos.copy()
# Fall detection and penalty
is_fallen = self.Player.world.global_position[2] < 0.25
if is_fallen:
reward -= 15.0
is_fallen = self.Player.world.global_position[2] < 0.3
# terminal state: the robot is falling or timeout
terminated = is_fallen or self.step_counter > 500 or self.waypoint_index >= len(self.point_list)
terminated = is_fallen or self.step_counter > 800 or self.route_completed
truncated = False
return self.observe(), reward, terminated, truncated, {}
@@ -367,13 +492,13 @@ class Train(Train_Base):
def train(self, args):
#--------------------------------------- Learning parameters
n_envs = 4 # Reduced from 8 to decrease CPU/network pressure during init
n_envs = 8 # Reduced from 8 to decrease CPU/network pressure during init
n_steps_per_env = 512 # RolloutBuffer is of size (n_steps_per_env * n_envs)
minibatch_size = 64 # should be a factor of (n_steps_per_env * n_envs)
total_steps = 30000000
learning_rate = 3e-4
folder_name = f'Walk_R{self.robot_type}'
model_path = f'./mujococodebase/scripts/gyms/logs/{folder_name}/'
model_path = f'./scripts/gyms/logs/{folder_name}/'
print(f"Model path: {model_path}")
print(f"Using {n_envs} parallel environments")
@@ -438,7 +563,7 @@ class Train(Train_Base):
# Uses different server and monitor ports
server = Train_Server( self.server_p-1, self.monitor_p, 1 )
env = WalkEnv( self.ip, self.server_p-1, self.monitor_p, self.robot_type, True )
env = WalkEnv( self.ip, self.server_p-1 )
model = PPO.load( args["model_file"], env=env )
try:
@@ -468,5 +593,3 @@ if __name__ == "__main__":
trainer = Train(script_args)
trainer.train({})