From 7f7ec781c5ae77fedd5bf41676356fb4cd9cd635 Mon Sep 17 00:00:00 2001 From: ChenXi Date: Sun, 22 Mar 2026 21:11:46 -0400 Subject: [PATCH] Add weighting function, change the reward logic --- rl_game/get_up/config/ppo_cfg.yaml | 2 +- rl_game/get_up/config/t1_env_cfg.py | 178 ++++++++++++---------------- rl_game/get_up/env/t1_env.py | 6 +- rl_game/get_up/train.py | 2 +- 4 files changed, 84 insertions(+), 104 deletions(-) diff --git a/rl_game/get_up/config/ppo_cfg.yaml b/rl_game/get_up/config/ppo_cfg.yaml index 1d047de..26fb73f 100644 --- a/rl_game/get_up/config/ppo_cfg.yaml +++ b/rl_game/get_up/config/ppo_cfg.yaml @@ -39,7 +39,7 @@ params: reward_shaper: scale_value: 1.0 normalize_advantage: True - gamma: 0.96 + gamma: 0.98 tau: 0.95 learning_rate: 3e-4 lr_schedule: adaptive diff --git a/rl_game/get_up/config/t1_env_cfg.py b/rl_game/get_up/config/t1_env_cfg.py index c8ff606..bafe95d 100644 --- a/rl_game/get_up/config/t1_env_cfg.py +++ b/rl_game/get_up/config/t1_env_cfg.py @@ -26,87 +26,98 @@ def standing_with_feet_reward( force_threshold: float = 20.0, max_v_z: float = 0.5 ) -> torch.Tensor: - # 增加防护:从场景中安全获取 body 索引 + """终极高度目标:头高、盆骨高、足部受力稳定""" head_idx, _ = env.scene["robot"].find_bodies("H2") pelvis_idx, _ = env.scene["robot"].find_bodies("Trunk") - # 1. 高度奖励:使用更稳定的归一化,限制范围在 [0, 1] curr_head_h = env.scene["robot"].data.body_state_w[:, head_idx[0], 2] curr_pelvis_h = env.scene["robot"].data.body_state_w[:, pelvis_idx[0], 2] - # 使用 sigmoid 或简单的 min-max 映射,避免除以极小值 + # 归一化高度评分 head_score = torch.clamp(curr_head_h / min_head_height, 0.0, 1.2) pelvis_score = torch.clamp(curr_pelvis_h / min_pelvis_height, 0.0, 1.2) height_reward = (head_score + pelvis_score) / 2.0 - # 2. 足部受力:增加对 NaN 的防御 + # 足部受力判定 contact_sensor = env.scene.sensors.get(sensor_cfg.name) - # 某些步数传感器可能未初始化,加个判空 if contact_sensor is None: return torch.zeros(env.num_envs, device=env.device) foot_forces_z = torch.sum(contact_sensor.data.net_forces_w[:, :, 2], dim=-1) - # 对巨大的冲击力做剪裁,防止 sigmoid 输入过大 - foot_forces_z = torch.clamp(foot_forces_z, 0.0, 500.0) force_weight = torch.sigmoid((foot_forces_z - force_threshold) / 5.0) - # 3. 垂直速度惩罚:使用更平滑的惩罚 + # 垂直速度惩罚(防止跳跃不稳) root_vel_z = env.scene["robot"].data.root_lin_vel_w[:, 2] vel_penalty = torch.exp(-torch.abs(root_vel_z) / max_v_z) - # 逻辑组合:高度 * 稳定性 return height_reward * (0.5 + 0.5 * force_weight * vel_penalty) -def universal_arm_support_reward( +def arm_tuck_incremental_reward( env: ManagerBasedRLEnv, - sensor_cfg: SceneEntityCfg, - height_threshold: float = 0.60, - min_force: float = 15.0 + pitch_threshold: float = 1.4, + shaping_weight: float = 0.2 ) -> torch.Tensor: + """增量式收手奖励:鼓励向弯曲方向运动,达到阈值给大奖""" + joint_names = ["Left_Elbow_Pitch", "Right_Elbow_Pitch"] + joint_ids, _ = env.scene["robot"].find_joints(joint_names) + + elbow_pos = env.scene["robot"].data.joint_pos[:, joint_ids] + elbow_vel = env.scene["robot"].data.joint_vel[:, joint_ids] + + # 1. 速度引导:只要在收缩(速度为正)就给小奖,伸直则惩罚 + avg_vel = torch.mean(elbow_vel, dim=-1) + shaping_reward = torch.tanh(avg_vel) * shaping_weight + + # 2. 阈值触发:一旦收缩到位,给稳定的静态奖 + is_tucked = torch.all(elbow_pos > pitch_threshold, dim=-1).float() + goal_bonus = is_tucked * 1.5 + + return shaping_reward + goal_bonus + + +def dynamic_getup_strategy_reward(env: ManagerBasedRLEnv) -> torch.Tensor: """ - 通用手臂支撑奖励:同时支持仰卧起坐支撑和俯卧撑起。 - 逻辑:只要手臂有向上的推力,且身体正在向上移动,就给奖。 + 状态机奖励切换逻辑: + - 仰卧时:重点是 翻身 + 缩手。 + - 俯卧时:重点是 撑地起立。 """ - # 1. 获取传感器数据 - contact_sensor = env.scene.sensors.get(sensor_cfg.name) - if contact_sensor is None: - return torch.zeros(env.num_envs, device=env.device) + # 获取重力投影:Z轴分量 > 0 表示仰卧 + gravity_z = env.scene["robot"].data.projected_gravity_b[:, 2] - # 获取所有定义的手臂/手部 link 的垂直总受力 (World Z) - # net_forces_w 形状: (num_envs, num_bodies, 3) - arm_forces_z = contact_sensor.data.net_forces_w[:, :, 2] - # 取所有受力点的最大值或平均值,代表支撑强度 - max_arm_force = torch.max(arm_forces_z, dim=-1)[0] + # 状态掩码 + is_on_back = (gravity_z > 0.2).float() + is_on_belly = (gravity_z < -0.2).float() + is_transition = (1.0 - is_on_back - is_on_belly) - # 2. 获取状态数据 - pelvis_idx, _ = env.scene["robot"].find_bodies("Trunk") - pelvis_pos_z = env.scene["robot"].data.body_state_w[:, pelvis_idx[0], 2] - root_vel_z = env.scene["robot"].data.root_lin_vel_w[:, 2] + # 1. 翻身势能:引导 gravity_z 向 -1.0 靠拢 + flip_shaping = torch.clamp(-gravity_z, min=-1.0, max=1.0) - # 3. 计算奖励项 - # A. 受力奖励:鼓励手部与地面产生大于 min_force 的推力 - # 使用 tanh 归一化,防止力矩过大导致奖励爆炸 (NaN 风险) - force_reward = torch.tanh(torch.clamp(max_arm_force - min_force, min=0.0) / 50.0) + # 2. 缩手动作 + tuck_rew = arm_tuck_incremental_reward(env) - # B. 速度引导:只有当机器人正在“向上起”时,支撑奖励才翻倍 - # 这样可以防止它趴在地上乱按手骗分 - velocity_factor = torch.clamp(root_vel_z, min=0.0, max=2.0) + # 3. 撑地动作 (复用原逻辑,但去掉内部的高度衰减,统一由状态机控制) + contact_sensor = env.scene.sensors.get("contact_sensor") + max_arm_force = torch.zeros(env.num_envs, device=env.device) + if contact_sensor is not None: + # 假设手臂/手部 link 的受力 + arm_forces_z = contact_sensor.data.net_forces_w[:, :, 2] + max_arm_force = torch.max(arm_forces_z, dim=-1)[0] - # C. 姿态惩罚回避: - # 不再检查手是否在盆骨下方,而是检查手是否“在干活” - # 只要受力足够大,就认为是在支撑 - is_supporting = (max_arm_force > min_force).float() + push_rew = torch.tanh(torch.clamp(max_arm_force - 15.0, min=0.0) / 40.0) - # 4. 阶段性退出机制 (Curriculum) - # 当盆骨高度超过 height_threshold (0.6m) 时,奖励线性消失 - # 强迫机器人最终依靠腿部力量平衡,而不是一直扶着地 - height_fade = torch.clamp((height_threshold - pelvis_pos_z) / 0.15, min=0.0, max=1.0) + # --- 权重动态合成 --- + # 仰卧区:翻身(8.0) + 缩手(4.0) + back_strategy = is_on_back * (8.0 * flip_shaping + 4.0 * tuck_rew) - # 最终组合 - # 逻辑:受力 * (1 + 垂直速度) * 高度衰减 - total_reward = force_reward * (1.0 + 2.0 * velocity_factor) * is_supporting * height_fade + # 俯卧区:撑地(25.0) + 缩手维持(1.0) + # 这里撑地权重远高于翻身,确保机器人更愿意待在俯卧区尝试站立 + belly_strategy = is_on_belly * (25.0 * push_rew + 1.0 * tuck_rew) + + # 过渡区 + trans_strategy = is_transition * (4.0 * flip_shaping + 10.0 * push_rew + 2.0 * tuck_rew) + + return back_strategy + belly_strategy + trans_strategy - return total_reward def is_standing_still( env: ManagerBasedRLEnv, @@ -126,7 +137,6 @@ def is_standing_still( gravity_error = torch.norm(env.scene["robot"].data.projected_gravity_b[:, :2], dim=-1) root_vel_norm = torch.norm(env.scene["robot"].data.root_lin_vel_w, dim=-1) - # 判定条件:头够高 且 盆骨够高 且 垂直误差小 且 速度低 is_stable_now = ( (current_head_h > min_head_height) & (current_pelvis_h > min_pelvis_height) & @@ -143,21 +153,17 @@ def is_standing_still( return env.extras["stable_timer"] > standing_time + # --- 2. 配置类 --- T1_JOINT_NAMES = [ - 'AAHead_yaw', 'Head_pitch', - 'Left_Shoulder_Pitch', 'Left_Shoulder_Roll', 'Left_Elbow_Pitch', 'Left_Elbow_Yaw', 'Right_Shoulder_Pitch', 'Right_Shoulder_Roll', 'Right_Elbow_Pitch', 'Right_Elbow_Yaw', - 'Waist', - 'Left_Hip_Pitch', 'Right_Hip_Pitch', 'Left_Hip_Roll', 'Right_Hip_Roll', 'Left_Hip_Yaw', 'Right_Hip_Yaw', 'Left_Knee_Pitch', 'Right_Knee_Pitch', 'Left_Ankle_Pitch', 'Right_Ankle_Pitch', 'Left_Ankle_Roll', 'Right_Ankle_Roll' - ] @@ -186,14 +192,13 @@ class T1EventCfg: params={ "asset_cfg": SceneEntityCfg("robot"), "pose_range": { - "roll": (-1.57, 1.57), # 左右侧卧 + "roll": (-1.57, 1.57), "pitch": tuple(numpy.array([1.4, 1.6], dtype=np.float32) * random.choice([-1 , 1])), # 仰卧/俯卧 - "yaw": (-3.14, 3.14), # 全向旋转 + "yaw": (-3.14, 3.14), "x": (0.0, 0.0), "y": (0.0, 0.0), "z": (0.3, 0.4), }, - "velocity_range": {}, }, mode="reset", ) @@ -201,61 +206,41 @@ class T1EventCfg: @configclass class T1ActionCfg: - """关键修改:降低 scale 让动作变丝滑,增大阻尼效果""" joint_pos = JointPositionActionCfg( - asset_name="robot", - joint_names=T1_JOINT_NAMES, - scale=0.5, - use_default_offset=True + asset_name="robot", joint_names=T1_JOINT_NAMES, scale=0.5, use_default_offset=True ) @configclass class T1GetUpRewardCfg: - # 1. 姿态基础奖 (引导身体变正) - upright = RewTerm(func=mdp.flat_orientation_l2, weight=2.0) + # --- 1. 动态策略整合奖励 (包含了翻身、缩手、撑地的逻辑切换) --- + adaptive_strategy = RewTerm( + func=dynamic_getup_strategy_reward, + weight=1.0 # 内部已经有细分权重 + ) - # 2. 【条件高度奖】:双高度判定(头+盆骨),且必须脚踩地 + # --- 2. 核心高度目标 (维持最高优先级) --- height_with_feet = RewTerm( func=standing_with_feet_reward, - weight=20.0, # 作为核心引导,增加权重 + weight=15.0, params={ - "min_head_height": 1.10, + "min_head_height": 1.1, "min_pelvis_height": 0.7, "sensor_cfg": SceneEntityCfg("contact_sensor", body_names=[".*_foot_link"]), - "force_threshold": 20.0, + "force_threshold": 30.0, "max_v_z": 0.3 } ) - # 3. 手臂撑地奖:辅助脱离地面阶段 - arm_push_support = RewTerm( - func=universal_arm_support_reward, - weight=15.0, # 显著增加权重(从 3.0 提到 15.0),让它成为起步的关键 - params={ - "sensor_cfg": SceneEntityCfg("contact_sensor", body_names=[".*_hand_link", "AL3", "AR3"]), - "height_threshold": 0.65, # 躯干升到 0.6m 前都鼓励手臂用力 - "min_force": 8.0 # 只要有 15N 的力就触发 - } - ) + # --- 3. 辅助约束与惩罚 --- + upright = RewTerm(func=mdp.flat_orientation_l2, weight=1.0) + joint_limits = RewTerm(func=mdp.joint_pos_limits, weight=-20.0, params={"asset_cfg": SceneEntityCfg("robot")}) + action_rate = RewTerm(func=mdp.action_rate_l2, weight=-0.01) - # 4. 关节限位惩罚 (新增:防止关节撞死导致数值问题) - joint_limits = RewTerm( - func=mdp.joint_pos_limits, - weight=-1.0, - params={"asset_cfg": SceneEntityCfg("robot")} - ) - - # 5. 时间惩罚 (强制效率) - time_penalty = RewTerm( - func=mdp.is_alive, - weight=-1.2 - ) - - # 6. 成功终极大奖 - is_success = RewTerm( + # --- 4. 成功奖励 --- + is_success_bonus = RewTerm( func=is_standing_still, - weight=800.0, + weight=1000.0, params={ "min_head_height": 1.05, "min_pelvis_height": 0.75, @@ -268,11 +253,6 @@ class T1GetUpRewardCfg: @configclass class T1GetUpTerminationsCfg: time_out = DoneTerm(func=mdp.time_out) - - # 失败判定:躯干倾斜超过 45 度重置 - #base_crash = DoneTerm(func=mdp.bad_orientation, params={"limit_angle": 0.785}) - - # 成功判定:双高度 + 稳定 standing_success = DoneTerm( func=is_standing_still, params={ @@ -287,7 +267,7 @@ class T1GetUpTerminationsCfg: @configclass class T1EnvCfg(ManagerBasedRLEnvCfg): - scene = T1SceneCfg(num_envs=8192, env_spacing=2.5) # 5090 性能全开 + scene = T1SceneCfg(num_envs=8192, env_spacing=2.5) def __post_init__(self): super().__post_init__() diff --git a/rl_game/get_up/env/t1_env.py b/rl_game/get_up/env/t1_env.py index 9dd1cf0..37d5e7a 100644 --- a/rl_game/get_up/env/t1_env.py +++ b/rl_game/get_up/env/t1_env.py @@ -51,10 +51,10 @@ class T1SceneCfg(InteractiveSceneCfg): actuators={ "t1_joints": ImplicitActuatorCfg( joint_names_expr=[".*"], - effort_limit=400.0, + effort_limit=800.0, # 翻倍,确保电机有力气 velocity_limit=20.0, - stiffness=150.0, - damping=5.0, + stiffness=500.0, # 【关键】从 150 提到 500-800 之间 + damping=40.0, # 【关键】从 5 提到 30-50 之间,抑制乱抖 ), }, ) diff --git a/rl_game/get_up/train.py b/rl_game/get_up/train.py index ddda5fe..f7e344b 100644 --- a/rl_game/get_up/train.py +++ b/rl_game/get_up/train.py @@ -9,7 +9,7 @@ from isaaclab.app import AppLauncher # 1. 配置启动参数 parser = argparse.ArgumentParser(description="Train T1 robot to Get-Up with RL-Games.") -parser.add_argument("--num_envs", type=int, default=16384, help="起身任务建议并行 4096 即可") +parser.add_argument("--num_envs", type=int, default=8192, help="起身任务建议并行 4096 即可") parser.add_argument("--task", type=str, default="Isaac-T1-GetUp-v0", help="任务 ID") parser.add_argument("--seed", type=int, default=42, help="随机种子") AppLauncher.add_app_launcher_args(parser)