stand stable 0.1 amendment and add some info
This commit is contained in:
@@ -50,6 +50,7 @@ class WalkEnv(gym.Env):
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.enable_debug_joint_status = False
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
self.auto_calibrate_train_sim_flip = True
|
||||
self.nominal_calibrated_once = False
|
||||
@@ -57,7 +58,7 @@ class WalkEnv(gym.Env):
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
target_hz_env = 1000
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
self._target_hz = float(target_hz_env)
|
||||
@@ -114,7 +115,6 @@ class WalkEnv(gym.Env):
|
||||
0.0,
|
||||
]
|
||||
)
|
||||
self.reference_joint_nominal_position = self.joint_nominal_position.copy()
|
||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||
self.train_sim_flip = np.array(
|
||||
[
|
||||
@@ -144,9 +144,16 @@ class WalkEnv(gym.Env):
|
||||
]
|
||||
)
|
||||
|
||||
self.scaling_factor = 0.5
|
||||
self.scaling_factor = 0.3
|
||||
# self.scaling_factor = 1
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = True
|
||||
self.reset_beam_yaw_range_deg = 180 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
|
||||
self.reset_joint_noise_rad = 0.015
|
||||
self.reset_perturb_steps = 3
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
@@ -155,6 +162,7 @@ class WalkEnv(gym.Env):
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
self.start_time = time.time()
|
||||
|
||||
def debug_log(self, message):
|
||||
print(message)
|
||||
@@ -165,45 +173,6 @@ class WalkEnv(gym.Env):
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def calibrate_train_sim_flip_from_neutral(self, neutral_joint_positions):
|
||||
updated_flip = self.train_sim_flip.copy()
|
||||
changed = []
|
||||
|
||||
for idx, (reference_value, observed_value) in enumerate(
|
||||
zip(self.reference_joint_nominal_position, neutral_joint_positions)
|
||||
):
|
||||
if idx >= 10:
|
||||
continue
|
||||
if abs(reference_value) < 0.15 or abs(observed_value) < 0.15:
|
||||
continue
|
||||
|
||||
inferred_flip = 1.0 if np.sign(reference_value) == np.sign(observed_value) else -1.0
|
||||
if updated_flip[idx] != inferred_flip:
|
||||
changed.append((idx, updated_flip[idx], inferred_flip))
|
||||
updated_flip[idx] = inferred_flip
|
||||
|
||||
self.train_sim_flip = updated_flip
|
||||
|
||||
if changed:
|
||||
self.debug_log(
|
||||
"[FlipDebug] "
|
||||
f"changes={[(idx, old, new) for idx, old, new in changed]}"
|
||||
)
|
||||
|
||||
def is_reliable_neutral_pose(self, neutral_joint_positions):
|
||||
leg_positions = neutral_joint_positions[11:]
|
||||
leg_norm = float(np.linalg.norm(leg_positions))
|
||||
leg_max = float(np.max(np.abs(leg_positions)))
|
||||
height = float(self.Player.world.global_position[2])
|
||||
|
||||
reliable = (
|
||||
leg_norm > 0.8
|
||||
and leg_max > 0.35
|
||||
and 0.12 < height < 0.8
|
||||
)
|
||||
|
||||
return reliable, leg_norm, leg_max, height
|
||||
|
||||
def observe(self, init=False):
|
||||
|
||||
"""获取当前观测值"""
|
||||
@@ -301,6 +270,7 @@ class WalkEnv(gym.Env):
|
||||
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
|
||||
f"fallen={self.Player.world.global_position[2] < 0.3}"
|
||||
)
|
||||
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
|
||||
|
||||
def reset(self, seed=None, options=None):
|
||||
'''
|
||||
@@ -312,9 +282,9 @@ class WalkEnv(gym.Env):
|
||||
if seed is not None:
|
||||
np.random.seed(seed)
|
||||
|
||||
length1 = np.random.uniform(10, 20) # randomize target distance
|
||||
length2 = np.random.uniform(10, 20) # randomize target distance
|
||||
length3 = np.random.uniform(10, 20) # randomize target distance
|
||||
length1 = 2 # randomize target distance
|
||||
length2 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
length3 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
angle2 = np.random.uniform(-30, 30) # randomize initial orientation
|
||||
angle3 = np.random.uniform(-30, 30) # randomize target direction
|
||||
|
||||
@@ -329,64 +299,66 @@ class WalkEnv(gym.Env):
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
beam_y = (random() - 0.5) * 10
|
||||
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
|
||||
|
||||
for _ in range(5):
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=0)
|
||||
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
|
||||
self.Player.server.send()
|
||||
|
||||
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
|
||||
finished_count = 0
|
||||
for _ in range(10):
|
||||
for _ in range(50):
|
||||
finished = self.Player.skills_manager.execute("Neutral")
|
||||
self.sync()
|
||||
if finished:
|
||||
finished_count += 1
|
||||
if finished_count >= 3: # 假设需要连续3次完成才算成功
|
||||
if finished_count >= 20: # 假设需要连续20次完成才算成功
|
||||
break
|
||||
|
||||
# neutral_joint_positions = np.deg2rad(
|
||||
# [self.Player.robot.motor_positions[motor] for motor in self.Player.robot.ROBOT_MOTORS]
|
||||
# )
|
||||
# reliable_neutral, neutral_leg_norm, neutral_leg_max, neutral_height = self.is_reliable_neutral_pose(neutral_joint_positions)
|
||||
if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
|
||||
perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
|
||||
# Perturb waist + lower body only (10:), keep head/arms stable.
|
||||
perturb_action[10:] = np.random.uniform(
|
||||
-self.reset_joint_noise_rad,
|
||||
self.reset_joint_noise_rad,
|
||||
size=(self.no_of_actions - 10,)
|
||||
)
|
||||
|
||||
# if self.auto_calibrate_train_sim_flip and reliable_neutral and not self.flip_calibrated_once:
|
||||
# self.calibrate_train_sim_flip_from_neutral(neutral_joint_positions)
|
||||
# self.flip_calibrated_once = True
|
||||
# if self.calibrate_nominal_from_neutral and reliable_neutral and not self.nominal_calibrated_once:
|
||||
# self.joint_nominal_position = neutral_joint_positions * self.train_sim_flip
|
||||
# self.nominal_calibrated_once = True
|
||||
# self.debug_log(
|
||||
# "[ResetDebug] "
|
||||
# f"neutral_pos={np.round(self.Player.world.global_position, 3).tolist()} "
|
||||
# f"shoulders={np.round(neutral_joint_positions[2:10], 3).tolist()} "
|
||||
# f"legs={np.round(neutral_joint_positions[11:], 3).tolist()} "
|
||||
# f"flip={self.train_sim_flip.tolist()} "
|
||||
# f"nominal_legs={np.round(self.joint_nominal_position[11:], 3).tolist()} "
|
||||
# f"calibrated_once={(self.flip_calibrated_once, self.nominal_calibrated_once)} "
|
||||
# f"reliable_neutral={reliable_neutral} "
|
||||
# f"leg_norm={neutral_leg_norm:.3f} leg_max={neutral_leg_max:.3f} height={neutral_height:.3f}"
|
||||
# )
|
||||
for _ in range(self.reset_perturb_steps):
|
||||
target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
# reset_action_noise = np.random.uniform(-0.015, 0.015, size=(len(self.Player.robot.ROBOT_MOTORS),))
|
||||
# self.target_joint_positions = (self.joint_nominal_position + reset_action_noise) * self.train_sim_flip
|
||||
|
||||
# for idx, target in enumerate(self.target_joint_positions):
|
||||
# r.set_motor_target_position(
|
||||
# r.ROBOT_MOTORS[idx], target*180/math.pi, kp=25, kd=0.6
|
||||
# )
|
||||
for i in range(self.reset_recover_steps):
|
||||
# Linearly fade perturbation to help policy start from near-neutral.
|
||||
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
|
||||
target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.sync()
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
point1 = self.initial_position + np.array([length1, 0])
|
||||
# Build target in the robot's current forward direction instead of fixed global +x.
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
forward_offset = MathOps.rotate_2d_vec(np.array([length1, 0.0]), heading_deg, is_rad=False)
|
||||
point1 = self.initial_position + forward_offset
|
||||
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
|
||||
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
|
||||
self.point_list = [point1, point2, point3]
|
||||
self.point_list = [point1]
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
|
||||
@@ -394,89 +366,99 @@ class WalkEnv(gym.Env):
|
||||
return
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
eps = 1e-6
|
||||
dt = 0.05
|
||||
|
||||
velocity = current_pos - previous_pos
|
||||
speed_step = float(np.linalg.norm(velocity))
|
||||
speed = speed_step / dt
|
||||
|
||||
direction_to_target = self.target_position - current_pos
|
||||
prev_direction_to_target = self.target_position - previous_pos
|
||||
distance_to_target = float(np.linalg.norm(direction_to_target))
|
||||
prev_distance_to_target = float(np.linalg.norm(prev_direction_to_target))
|
||||
|
||||
# Progress toward waypoint (secondary signal)
|
||||
progress = prev_distance_to_target - distance_to_target
|
||||
progress_reward = np.clip(progress * 2.0, -1.5, 2.5)
|
||||
|
||||
# Forward speed and lateral drift
|
||||
forward_dir = direction_to_target / max(distance_to_target, eps)
|
||||
forward_speed = float(np.dot(velocity, forward_dir)) / dt
|
||||
target_speed = 1.0
|
||||
speed_error = forward_speed - target_speed
|
||||
speed_reward = 3.0 * math.exp(-1.5 * (speed_error ** 2))
|
||||
|
||||
lateral_vec = velocity - forward_dir * np.dot(velocity, forward_dir)
|
||||
lateral_speed = float(np.linalg.norm(lateral_vec)) / dt
|
||||
lateral_penalty = -0.6 * np.clip(lateral_speed, 0.0, 2.0)
|
||||
|
||||
# Heading alignment (small shaping term)
|
||||
if speed_step > 1e-4 and distance_to_target > 1e-4:
|
||||
directional_alignment = np.dot(velocity, direction_to_target) / (speed_step * distance_to_target)
|
||||
directional_alignment = float(np.clip(directional_alignment, -1.0, 1.0))
|
||||
direction_reward = max(0.0, directional_alignment) * 0.3
|
||||
else:
|
||||
direction_reward = 0.0
|
||||
|
||||
alive_bonus = 0.05
|
||||
|
||||
# Height and posture
|
||||
height = float(self.Player.world.global_position[2])
|
||||
if 0.8 <= height <= 1.05:
|
||||
height_reward = 1.0
|
||||
elif 0.40 <= height <= 1.20:
|
||||
height_reward = -1.0
|
||||
else:
|
||||
height_reward = -6.0
|
||||
|
||||
orientation_quat_inv = R.from_quat(self.Player.robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
posture_penalty = -2.2 * (tilt_mag ** 2)
|
||||
ang_vel = np.deg2rad(self.Player.robot.gyroscope)
|
||||
ang_vel_mag = float(np.linalg.norm(ang_vel))
|
||||
|
||||
motionless_penalty = -1.5 if speed < 0.1 else 0.0
|
||||
is_fallen = height < 0.3
|
||||
if is_fallen:
|
||||
# remain = max(0, 800 - self.step_counter)
|
||||
# return -8.0 - 0.01 * remain
|
||||
return -1.0
|
||||
|
||||
|
||||
# Waypoint bonus
|
||||
waypoint_bonus = 0.0
|
||||
if distance_to_target < 0.5:
|
||||
waypoint_bonus = 25.0
|
||||
if self.waypoint_index < len(self.point_list) - 1:
|
||||
self.waypoint_index += 1
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
else:
|
||||
waypoint_bonus = 100.0
|
||||
self.route_completed = True
|
||||
|
||||
# Effort + smoothness
|
||||
action_magnitude = float(np.linalg.norm(action[11:]))
|
||||
action_penalty = -0.05 * action_magnitude
|
||||
action_delta = action - self.last_action_for_reward
|
||||
smoothness_penalty = -0.02 * float(np.linalg.norm(action_delta[11:]))
|
||||
# # 目标方向
|
||||
# to_target = self.target_position - current_pos
|
||||
# dist_to_target = float(np.linalg.norm(to_target))
|
||||
# if dist_to_target < 0.5:
|
||||
# return 15.0
|
||||
|
||||
# forward_dir = to_target / dist_to_target if dist_to_target > 0.1 else np.array([1.0, 0.0])
|
||||
# delta_pos = current_pos - previous_pos
|
||||
# forward_step = float(np.dot(delta_pos, forward_dir))
|
||||
# lateral_step = float(np.linalg.norm(delta_pos - forward_dir * forward_step))
|
||||
|
||||
# 奖励项
|
||||
# progress_reward = 2 * forward_step
|
||||
# lateral_penalty = -0.1 * lateral_step
|
||||
alive_bonus = 2.0
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.01 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -0.3 * (tilt_mag)
|
||||
ang_vel_penalty = -0.02 * ang_vel_mag
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height
|
||||
height_penalty = -0.5 * abs(height_error) # 惩罚高度偏离,系数可调
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
+ ang_vel_penalty
|
||||
+ height_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
if time.time() - self.start_time >= 1200:
|
||||
self.start_time = time.time()
|
||||
print(
|
||||
# f"progress_reward:{progress_reward:.4f}",
|
||||
# f"lateral_penalty:{lateral_penalty:.4f}",
|
||||
# f"action_penalty:{action_penalty:.4f}"s,
|
||||
f"height_penalty:{height_penalty:.4f}",
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},",
|
||||
f"posture_penalty:{posture_penalty:.4f}",
|
||||
# f"ang_vel_penalty:{ang_vel_penalty:.4f}",
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
)
|
||||
|
||||
return total
|
||||
|
||||
|
||||
return (
|
||||
progress_reward
|
||||
+ speed_reward
|
||||
+ lateral_penalty
|
||||
+ direction_reward
|
||||
+ alive_bonus
|
||||
+ height_reward
|
||||
+ posture_penalty
|
||||
+ motionless_penalty
|
||||
+ waypoint_bonus
|
||||
+ action_penalty
|
||||
+ smoothness_penalty
|
||||
)
|
||||
|
||||
def step(self, action):
|
||||
|
||||
@@ -484,21 +466,23 @@ class WalkEnv(gym.Env):
|
||||
self.previous_action = action
|
||||
|
||||
self.target_joint_positions = (
|
||||
self.joint_nominal_position
|
||||
+ self.scaling_factor * action
|
||||
# self.joint_nominal_position +
|
||||
self.scaling_factor * action
|
||||
)
|
||||
self.target_joint_positions *= self.train_sim_flip
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=40, kd=1.0
|
||||
)
|
||||
|
||||
self.previous_action = action
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
|
||||
# if self.step_counter % self.debug_every_n_steps == 0:
|
||||
# self.debug_joint_status()
|
||||
if self.enable_debug_joint_status and self.step_counter % self.debug_every_n_steps == 0:
|
||||
self.debug_joint_status()
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
@@ -529,10 +513,10 @@ class Train(Train_Base):
|
||||
n_envs = 20 # Reduced from 8 to decrease CPU/network pressure during init
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
n_steps_per_env = 512 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = 128 # should be a factor of (n_steps_per_env * n_envs)
|
||||
n_steps_per_env = 256 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = 512 # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = 2e-4
|
||||
learning_rate = 1e-4
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
@@ -580,10 +564,12 @@ class Train(Train_Base):
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
# ent_coef=0.01, # Entropy coefficient for exploration
|
||||
# clip_range=0.2, # PPO clipping parameter
|
||||
# gae_lambda=0.95, # GAE lambda
|
||||
# gamma=0.99 # Discount factor
|
||||
ent_coef=0.03, # Entropy coefficient for exploration
|
||||
clip_range=0.13, # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=0.95 , # Discount factor
|
||||
target_kl=0.03,
|
||||
n_epochs=5
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
@@ -635,6 +621,6 @@ if __name__ == "__main__":
|
||||
)
|
||||
|
||||
trainer = Train(script_args)
|
||||
trainer.train({})
|
||||
# trainer.test({"model_file": "scripts/gyms/logs/Walk_R0_003/best_model.zip",
|
||||
# "folder_dir": "Walk_R0_003",})
|
||||
trainer.train({"model_file": "scripts/gyms/logs/Walk_R0_004/best_model.zip"})
|
||||
# trainer.test({"model_file": "scripts/gyms/logs/Walk_R0_004/best_model.zip",
|
||||
# "folder_dir": "scripts/gyms/logs/Walk_R0_004/",})
|
||||
BIN
scripts/gyms/logs/stand_stable_0.1.zip
Normal file
BIN
scripts/gyms/logs/stand_stable_0.1.zip
Normal file
Binary file not shown.
Reference in New Issue
Block a user