From d78fdeda0dd3876c09b5010833555edca3b94504 Mon Sep 17 00:00:00 2001 From: ChenXi Date: Sat, 21 Mar 2026 07:00:49 -0400 Subject: [PATCH] change reward function --- rl_game/get_up/config/t1_env_cfg.py | 174 +++++----------------------- 1 file changed, 27 insertions(+), 147 deletions(-) diff --git a/rl_game/get_up/config/t1_env_cfg.py b/rl_game/get_up/config/t1_env_cfg.py index 2998b30..5f13245 100644 --- a/rl_game/get_up/config/t1_env_cfg.py +++ b/rl_game/get_up/config/t1_env_cfg.py @@ -23,46 +23,30 @@ def standing_with_feet_reward( min_head_height: float, min_pelvis_height: float, sensor_cfg: SceneEntityCfg, - force_threshold: float = 30.0, - max_v_z: float = 0.25 + force_threshold: float = 20.0, + max_v_z: float = 0.5 ) -> torch.Tensor: - """ - 平滑切换的高度奖励: - 低高度 -> 纯高度引导 - 高高度 -> 高度 + 足底力 + 速度约束 - """ - # 1. 获取基本状态 + head_idx, _ = env.scene["robot"].find_bodies("H2") pelvis_idx, _ = env.scene["robot"].find_bodies("Trunk") - current_head_h = env.scene["robot"].data.body_state_w[:, head_idx[0], 2] - current_pelvis_h = env.scene["robot"].data.body_state_w[:, pelvis_idx[0], 2] - # 2. 计算基础高度得分 (0.0 - 1.0) - head_score = torch.clamp(current_head_h / min_head_height, max=1.0) - pelvis_score = torch.clamp(current_pelvis_h / min_pelvis_height, max=1.0) - combined_height_score = (head_score + pelvis_score) / 2.0 + curr_head_h = torch.clamp(env.scene["robot"].data.body_state_w[:, head_idx[0], 2], 0.0, 2.0) + curr_pelvis_h = torch.clamp(env.scene["robot"].data.body_state_w[:, pelvis_idx[0], 2], 0.0, 2.0) + + head_score = torch.tanh(curr_head_h / (min_head_height + 1e-6) * 2.0) + pelvis_score = torch.tanh(curr_pelvis_h / (min_pelvis_height + 1e-6) * 2.0) + height_reward = (head_score + pelvis_score) / 2.0 - # 3. 计算足底力判定 contact_sensor = env.scene.sensors.get(sensor_cfg.name) foot_forces_z = torch.sum(contact_sensor.data.net_forces_w[:, :, 2], dim=-1) - is_feet_on_ground = (foot_forces_z > force_threshold).float() - - # 4. 计算速度惩罚 (抑制乱跳) + force_weight = torch.sigmoid((foot_forces_z - force_threshold) / 5.0) root_vel_z = env.scene["robot"].data.root_lin_vel_w[:, 2] - vel_penalty_factor = torch.exp(-4.0 * torch.clamp(torch.abs(root_vel_z) - max_v_z, min=0.0)) + vel_penalty = torch.exp(-2.0 * torch.clamp(torch.abs(root_vel_z) - max_v_z, min=0.0)) - # --- 核心逻辑切换 --- - # 定义一个“过渡高度” (例如盆骨达到 0.4m) - transition_h = 0.4 - - # 如果高度很低:给纯高度奖,诱导它向上动 - low_height_reward = combined_height_score - - # 如果高度较高:给 综合奖 (高度 * 速度限制 * 必须踩地) - high_height_reward = combined_height_score * vel_penalty_factor * is_feet_on_ground - - return torch.where(current_pelvis_h < transition_h, low_height_reward, high_height_reward) + influence_weight = torch.clamp((curr_pelvis_h - 0.2) / 0.4, min=0.0, max=1.0) + combined_reward = height_reward * ((1.0 - influence_weight) + influence_weight * force_weight * vel_penalty) + return combined_reward def arm_push_up_reward( env: ManagerBasedRLEnv, @@ -105,18 +89,6 @@ def arm_push_up_reward( pushing_up_bonus, torch.zeros_like(pushing_up_bonus)) - -def linear_head_height_reward(env: ManagerBasedRLEnv, target_height: float, base_height: float = 0.15) -> torch.Tensor: - """ - 计算头部从地面到目标高度的线性增量奖励 - """ - head_idx, _ = env.scene["robot"].find_bodies("H2") - current_head_h = env.scene["robot"].data.body_state_w[:, head_idx[0], 2] - - # 计算相对于地面的提升量,并归一化到 0-1 - reward = (current_head_h - base_height) / (target_height - base_height) - return torch.clamp(reward, min=0.0, max=1.0) - def is_standing_still( env: ManagerBasedRLEnv, min_head_height: float, @@ -152,51 +124,6 @@ def is_standing_still( return env.extras["stable_timer"] > standing_time -def feet_airtime_penalty_local( - env: ManagerBasedRLEnv, - sensor_cfg: SceneEntityCfg, - threshold: float = 1.0 -) -> torch.Tensor: - """ - 自定义滞空惩罚逻辑: - 如果脚部的垂直合力小于阈值,说明脚离地了。 - 返回一个 Tensor,离地时为 1.0,着地时为 0.0。 - """ - # 1. 获取传感器对象 - contact_sensor = env.scene.sensors.get(sensor_cfg.name) - - if contact_sensor is None: - # 如果没搜到传感器,返回全 0,防止程序崩溃 - return torch.zeros(env.num_envs, device=env.device) - - # 2. 获取触地力 (num_envs, num_bodies_in_sensor, 3) - # 我们取所有被监测 Body (左右脚) 的 Z 轴推力 - # 如果所有脚的力都小于 threshold,判定为“完全腾空” - foot_forces_z = contact_sensor.data.net_forces_w[:, :, 2] - is_in_air = torch.all(foot_forces_z < threshold, dim=-1) - - return is_in_air.float() - -def root_vel_z_l2_local(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: - # 严厉惩罚 Z 轴正向速度(向上窜) - vel_z = env.scene[asset_cfg.name].data.root_lin_vel_w[:, 2] - return torch.square(torch.clamp(vel_z, min=0.0)) - -def joint_pos_rel_l2_local(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: - # 获取相对默认位置的偏差 (num_envs, num_joints) - rel_pos = mdp.joint_pos_rel(env, asset_cfg) - # 计算平方和 (L2) - return torch.sum(torch.square(rel_pos), dim=-1) - -def strict_feet_contact_reward(env: ManagerBasedRLEnv, sensor_cfg: SceneEntityCfg) -> torch.Tensor: - """如果脚不着地,直接给一个很大的负分,强制它必须寻找支点""" - contact_sensor = env.scene.sensors.get(sensor_cfg.name) - # 只要有一只脚没力,就判定为不稳 - foot_forces_z = contact_sensor.data.net_forces_w[:, :, 2] - all_feet_cond = torch.min(foot_forces_z, dim=-1)[0] > 5.0 # 左右脚都要有至少5N的力 - - return (~all_feet_cond).float() # 返回1表示违规 - # --- 2. 配置类 --- T1_JOINT_NAMES = [ @@ -263,19 +190,20 @@ class T1GetUpRewardCfg: # 2. 【条件高度奖】:双高度判定(头+盆骨),且必须脚踩地 height_with_feet = RewTerm( func=standing_with_feet_reward, - weight=50.0, # 作为核心引导,增加权重 + weight=25.0, # 作为核心引导,增加权重 params={ "min_head_height": 1.10, - "min_pelvis_height": 0.65, + "min_pelvis_height": 0.7, "sensor_cfg": SceneEntityCfg("contact_sensor", body_names=[".*_foot_link"]), - "force_threshold": 10.0 + "force_threshold": 20.0, + "max_v_z": 0.3 } ) # 3. 手臂撑地奖:辅助脱离地面阶段 arm_push_support = RewTerm( func=arm_push_up_reward, - weight=20.0, # 显著增加权重(从 3.0 提到 15.0),让它成为起步的关键 + weight=15.0, # 显著增加权重(从 3.0 提到 15.0),让它成为起步的关键 params={ "sensor_cfg": SceneEntityCfg("contact_sensor", body_names=[".*_hand_link", "AL3", "AR3"]), "height_threshold": 0.6, # 躯干升到 0.6m 前都鼓励手臂用力 @@ -283,66 +211,18 @@ class T1GetUpRewardCfg: } ) - # 4. 引导机器人“向上看”和“抬起头” - head_lift = RewTerm( - func=linear_head_height_reward, - weight=15.0, - params={"target_height": 1.1, "base_height": 0.15} - ) - # 5. 惩罚项 - undesired_contacts = RewTerm( - func=mdp.undesired_contacts, - weight=-2.0, - params={ - "sensor_cfg": SceneEntityCfg("contact_sensor", body_names=["H2", "Trunk"]), - # 注意:此处必须排除手臂相关 link,否则手臂用力时会同时被扣分 - "threshold": 1.0 - } - ) - - # 6. 抑制跳跃:严厉惩罚向上窜的速度 - root_vel_z_penalty = RewTerm( - func=root_vel_z_l2_local, - weight=-1.0, # 增大负权重 + # 4. 关节限位惩罚 (新增:防止关节撞死导致数值问题) + joint_limits = RewTerm( + func=mdp.joint_pos_limits, + weight=-1.0, params={"asset_cfg": SceneEntityCfg("robot")} ) - # 7. 抑制滞空 (Airtime Penalty) - feet_airtime = RewTerm( - func=strict_feet_contact_reward, - weight=-5.0, # 加大权重,跳一下扣的分比站起来得的分还多 - params={"sensor_cfg": SceneEntityCfg("contact_sensor", body_names=[".*_foot_link"])} - ) - - joint_vel_penalty = RewTerm( - func=mdp.joint_vel_l2, - weight=-0.01, # 惩罚过快的关节运动 - params={"asset_cfg": SceneEntityCfg("robot")} - ) - - action_rate = RewTerm( - func=mdp.action_rate_l2, - weight=-0.05, # 惩罚动作的突变,让动作更丝滑,减少爆发力 - ) - - # 惩罚躯干的翻转和俯仰角速度 - base_ang_vel_penalty = RewTerm( - func=lambda env, asset_cfg: torch.norm(mdp.base_ang_vel(env, asset_cfg), dim=-1), - weight=-0.1, - params={"asset_cfg": SceneEntityCfg("robot")} - ) - - joint_deviation = RewTerm( - func=joint_pos_rel_l2_local, - weight=0.1, # 权重不要太高,只是为了让它动起来 - params={"asset_cfg": SceneEntityCfg("robot")} - ) - - # 7. 成功终极大奖 + # 5. 成功终极大奖 is_success = RewTerm( - func=lambda env, keys: env.termination_manager.get_term(keys), - weight=300.0, + func=lambda env, keys: env.termination_manager.get_term(keys).float(), + weight=50.0, params={"keys": "standing_success"} ) @@ -382,4 +262,4 @@ class T1EnvCfg(ManagerBasedRLEnvCfg): actions = T1ActionCfg() episode_length_s = 6.0 - decimation = 2 \ No newline at end of file + decimation = 4 \ No newline at end of file