198 lines
6.5 KiB
Python
198 lines
6.5 KiB
Python
import torch
|
||
from isaaclab.assets import ArticulationCfg
|
||
from isaaclab.envs import ManagerBasedRLEnvCfg, ManagerBasedRLEnv
|
||
from isaaclab.managers import ObservationGroupCfg as ObsGroup
|
||
from isaaclab.managers import ObservationTermCfg as ObsTerm
|
||
from isaaclab.managers import RewardTermCfg as RewTerm
|
||
from isaaclab.managers import TerminationTermCfg as DoneTerm
|
||
from isaaclab.managers import EventTermCfg as EventTerm
|
||
from isaaclab.envs.mdp import JointPositionActionCfg
|
||
from isaaclab.managers import SceneEntityCfg
|
||
from isaaclab.utils import configclass
|
||
from rl_game.get_up.env.t1_env import T1SceneCfg
|
||
|
||
import isaaclab.envs.mdp as mdp
|
||
|
||
|
||
# --- 1. 自定义 MDP 逻辑函数 (放在配置类之前) ---
|
||
|
||
def is_standing_still(
|
||
env: ManagerBasedRLEnv,
|
||
minimum_height: float,
|
||
max_angle_error: float,
|
||
standing_time: float
|
||
) -> torch.Tensor:
|
||
"""
|
||
判定机器人是否稳定站立了一段时间。
|
||
逻辑:高度达标且姿态垂直,持续时间超过 standing_time 则返回 True。
|
||
"""
|
||
# 获取当前状态:高度 (Z轴) 和 投影重力 (前两个分量越小越垂直)
|
||
current_height = env.scene["robot"].data.root_pos_w[:, 2]
|
||
gravity_error = torch.norm(env.scene["robot"].data.projected_gravity_b[:, :2], dim=-1)
|
||
|
||
# 判断当前时刻是否“达标”
|
||
is_stable_now = (current_height > minimum_height) & (gravity_error < max_angle_error)
|
||
|
||
# 在 env.extras 中维护一个计时器
|
||
if "stable_timer" not in env.extras:
|
||
env.extras["stable_timer"] = torch.zeros(env.num_envs, device=env.device)
|
||
|
||
# 核心计时逻辑:达标累加 dt,不达标清零
|
||
# dt = physics_dt * decimation (即控制频率的步长)
|
||
dt = env.physics_dt * env.cfg.decimation
|
||
env.extras["stable_timer"] = torch.where(
|
||
is_stable_now,
|
||
env.extras["stable_timer"] + dt,
|
||
torch.zeros_like(env.extras["stable_timer"])
|
||
)
|
||
|
||
# 判定是否达到目标时长
|
||
return env.extras["stable_timer"] > standing_time
|
||
|
||
def get_success_reward(env: ManagerBasedRLEnv, term_keys: str) -> torch.Tensor:
|
||
"""检查是否触发了特定的成功终止条件"""
|
||
return env.termination_manager.get_term(term_keys)
|
||
|
||
def root_lin_vel_norm(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor:
|
||
"""手动计算 root 线性速度的 L2 范数"""
|
||
# 获取速度 (16384, 3)
|
||
vel = env.scene[asset_cfg.name].data.root_lin_vel_w
|
||
# 返回模长 (16384,)
|
||
return torch.norm(vel, dim=-1)
|
||
|
||
def root_ang_vel_norm(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor:
|
||
"""手动计算 root 角速度的 L2 范数"""
|
||
vel = env.scene[asset_cfg.name].data.root_ang_vel_w
|
||
return torch.norm(vel, dim=-1)
|
||
|
||
# --- 2. 配置类定义 ---
|
||
|
||
@configclass
|
||
class T1ObservationCfg:
|
||
"""观察值配置"""
|
||
|
||
@configclass
|
||
class PolicyCfg(ObsGroup):
|
||
concatenate_terms = True
|
||
enable_corruption = False
|
||
|
||
base_lin_vel = ObsTerm(func=mdp.base_lin_vel)
|
||
base_ang_vel = ObsTerm(func=mdp.base_ang_vel)
|
||
projected_gravity = ObsTerm(func=mdp.projected_gravity)
|
||
root_height = ObsTerm(func=mdp.root_pos_w) # 高度信息对起身至关重要
|
||
joint_pos = ObsTerm(func=mdp.joint_pos_rel)
|
||
joint_vel = ObsTerm(func=mdp.joint_vel_rel)
|
||
actions = ObsTerm(func=mdp.last_action)
|
||
|
||
policy = PolicyCfg()
|
||
|
||
|
||
@configclass
|
||
class T1EventCfg:
|
||
"""重置时的随机姿态:背躺、趴着、侧躺"""
|
||
reset_robot_rotation = EventTerm(
|
||
func=mdp.reset_root_state_uniform,
|
||
params={
|
||
"asset_cfg": SceneEntityCfg("robot"),
|
||
"pose_range": {
|
||
"roll": (-1.57, 1.57),
|
||
"pitch": (-1.57, 1.57),
|
||
"yaw": (-3.14, 3.14),
|
||
"x": (0.0, 0.0),
|
||
"y": (0.0, 0.0),
|
||
"z": (0.0, 0.0),
|
||
},
|
||
"velocity_range": {},
|
||
},
|
||
mode="reset",
|
||
)
|
||
|
||
|
||
@configclass
|
||
class T1ActionCfg:
|
||
"""动作空间"""
|
||
joint_pos = JointPositionActionCfg(
|
||
asset_name="robot",
|
||
joint_names=[".*"],
|
||
scale=0.5,
|
||
use_default_offset=True
|
||
)
|
||
|
||
|
||
@configclass
|
||
class T1GetUpRewardCfg:
|
||
"""奖励函数:引导、稳定、终点奖"""
|
||
# 1. 进度引导:越高分越高
|
||
height_progress = RewTerm(
|
||
func=mdp.root_height_below_minimum,
|
||
weight=15.0,
|
||
params={"minimum_height": 0.65}
|
||
)
|
||
|
||
# 2. 时间惩罚:鼓励尽快起身
|
||
time_penalty = RewTerm(func=mdp.is_alive, weight=-0.5)
|
||
|
||
# 3. 姿态奖:保持躯干垂直
|
||
upright = RewTerm(func=mdp.flat_orientation_l2, weight=2.0)
|
||
|
||
# 4. 稳定性奖:站起来后不要乱晃
|
||
root_static = RewTerm(
|
||
func=root_lin_vel_norm, # 使用上面定义的本地函数
|
||
weight=-1.5,
|
||
params={"asset_cfg": SceneEntityCfg("robot")}
|
||
)
|
||
|
||
# 角速度稳定性
|
||
root_ang_static = RewTerm(
|
||
func=root_ang_vel_norm, # 使用上面定义的本地函数
|
||
weight=-0.5,
|
||
params={"asset_cfg": SceneEntityCfg("robot")}
|
||
)
|
||
|
||
# 5. 核心终点奖励:当满足 standing_success 终止条件时,给 500 分
|
||
is_success = RewTerm(
|
||
func=get_success_reward, # 使用我们刚刚定义的本地函数
|
||
weight=500.0,
|
||
params={"term_keys": "standing_success"} # 确保名字对应 TerminationsCfg 里的变量名
|
||
)
|
||
|
||
# 6. 平滑惩罚
|
||
action_rate = RewTerm(func=mdp.action_rate_l2, weight=-0.01)
|
||
joint_vel = RewTerm(func=mdp.joint_vel_l2, weight=-0.001)
|
||
|
||
|
||
@configclass
|
||
class T1GetUpTerminationsCfg:
|
||
"""终止条件:站稳即算任务完成"""
|
||
# 失败:跌倒
|
||
base_crash = DoneTerm(func=mdp.bad_orientation, params={"limit_angle": 1.0})
|
||
|
||
# 成功:满足高度和角度要求,且维持 1.0 秒
|
||
standing_success = DoneTerm(
|
||
func=is_standing_still,
|
||
params={
|
||
"minimum_height": 0.63,
|
||
"max_angle_error": 0.15,
|
||
"standing_time": 1.0
|
||
}
|
||
)
|
||
|
||
|
||
@configclass
|
||
class T1EnvCfg(ManagerBasedRLEnvCfg):
|
||
"""主环境配置"""
|
||
scene = T1SceneCfg(num_envs=16384, env_spacing=2.5)
|
||
|
||
def __post_init__(self):
|
||
super().__post_init__()
|
||
# 初始高度设低,配合随机旋转事件实现“从地上爬起来”
|
||
self.scene.robot.init_state.pos = (0.0, 0.0, 0.2)
|
||
|
||
observations = T1ObservationCfg()
|
||
rewards = T1GetUpRewardCfg()
|
||
terminations = T1GetUpTerminationsCfg()
|
||
events = T1EventCfg()
|
||
actions = T1ActionCfg()
|
||
|
||
episode_length_s = 5.0 # 5秒强制重置
|
||
decimation = 4 # 控制频率 |