Files
Gym_GPU/rl_game/get_up/config/t1_env_cfg.py

265 lines
9.5 KiB
Python
Raw Normal View History

2026-03-20 07:03:41 -04:00
import random
2026-03-20 08:00:51 -04:00
import numpy
2026-03-20 08:12:08 -04:00
import numpy as np
import torch
2026-03-16 05:00:20 -04:00
from isaaclab.assets import ArticulationCfg
from isaaclab.envs import ManagerBasedRLEnvCfg, ManagerBasedRLEnv
2026-03-16 05:00:20 -04:00
from isaaclab.managers import ObservationGroupCfg as ObsGroup
from isaaclab.managers import ObservationTermCfg as ObsTerm
from isaaclab.managers import RewardTermCfg as RewTerm
from isaaclab.managers import TerminationTermCfg as DoneTerm
from isaaclab.managers import EventTermCfg as EventTerm
from isaaclab.envs.mdp import JointPositionActionCfg
from isaaclab.managers import SceneEntityCfg
from isaaclab.utils import configclass
from rl_game.get_up.env.t1_env import T1SceneCfg
import isaaclab.envs.mdp as mdp
2026-03-19 09:08:57 -04:00
# --- 1. 自定义 MDP 逻辑函数 ---
2026-03-19 09:08:57 -04:00
def standing_with_feet_reward(
env: ManagerBasedRLEnv,
2026-03-19 09:08:57 -04:00
min_head_height: float,
min_pelvis_height: float,
sensor_cfg: SceneEntityCfg,
2026-03-21 07:00:49 -04:00
force_threshold: float = 20.0,
max_v_z: float = 0.5
) -> torch.Tensor:
2026-03-21 07:00:49 -04:00
head_idx, _ = env.scene["robot"].find_bodies("H2")
2026-03-19 09:08:57 -04:00
pelvis_idx, _ = env.scene["robot"].find_bodies("Trunk")
2026-03-19 06:29:30 -04:00
2026-03-21 07:00:49 -04:00
curr_head_h = torch.clamp(env.scene["robot"].data.body_state_w[:, head_idx[0], 2], 0.0, 2.0)
curr_pelvis_h = torch.clamp(env.scene["robot"].data.body_state_w[:, pelvis_idx[0], 2], 0.0, 2.0)
head_score = torch.tanh(curr_head_h / (min_head_height + 1e-6) * 2.0)
pelvis_score = torch.tanh(curr_pelvis_h / (min_pelvis_height + 1e-6) * 2.0)
height_reward = (head_score + pelvis_score) / 2.0
2026-03-20 03:37:56 -04:00
contact_sensor = env.scene.sensors.get(sensor_cfg.name)
foot_forces_z = torch.sum(contact_sensor.data.net_forces_w[:, :, 2], dim=-1)
2026-03-21 07:00:49 -04:00
force_weight = torch.sigmoid((foot_forces_z - force_threshold) / 5.0)
2026-03-20 03:37:56 -04:00
root_vel_z = env.scene["robot"].data.root_lin_vel_w[:, 2]
2026-03-21 07:00:49 -04:00
vel_penalty = torch.exp(-2.0 * torch.clamp(torch.abs(root_vel_z) - max_v_z, min=0.0))
2026-03-20 03:37:56 -04:00
2026-03-21 07:00:49 -04:00
influence_weight = torch.clamp((curr_pelvis_h - 0.2) / 0.4, min=0.0, max=1.0)
combined_reward = height_reward * ((1.0 - influence_weight) + influence_weight * force_weight * vel_penalty)
2026-03-21 07:00:49 -04:00
return combined_reward
2026-03-19 09:08:57 -04:00
def arm_push_up_reward(
env: ManagerBasedRLEnv,
sensor_cfg: SceneEntityCfg,
height_threshold: float = 0.5,
min_force: float = 20.0
2026-03-19 09:08:57 -04:00
) -> torch.Tensor:
2026-03-20 03:37:56 -04:00
"""
强化版手臂撑地奖励
1. 鼓励手臂产生超过阈值的垂直反作用力
2. 当手臂用力且躯干有向上速度时给予额外加成
2026-03-20 03:37:56 -04:00
"""
# 获取手臂传感器数据
2026-03-19 09:08:57 -04:00
contact_sensor = env.scene.sensors.get(sensor_cfg.name)
if contact_sensor is None:
return torch.zeros(env.num_envs, device=env.device)
2026-03-19 06:29:30 -04:00
# 1. 获取手臂 Z 轴受力 (取所有手臂 Body 的合力或最大力)
2026-03-20 03:37:56 -04:00
arm_forces_z = contact_sensor.data.net_forces_w[:, :, 2]
max_arm_force = torch.max(arm_forces_z, dim=-1)[0]
# 归一化受力奖励:在 20N 到 100N 之间线性增长
force_reward = torch.clamp((max_arm_force - min_force) / 80.0, min=0.0, max=1.0)
2026-03-19 06:29:30 -04:00
# 2. 获取躯干高度和垂直速度
pelvis_idx, _ = env.scene["robot"].find_bodies("Trunk")
current_height = env.scene["robot"].data.body_state_w[:, pelvis_idx[0], 2]
root_vel_z = env.scene["robot"].data.root_lin_vel_w[:, 2]
# 3. 协同奖励:当手臂在用力推,且躯干正在上升时,给高分
# 只有在高度低于阈值(还在撑起阶段)时生效
pushing_up_bonus = torch.where(
(max_arm_force > min_force) & (root_vel_z > 0.05),
force_reward * (1.0 + root_vel_z * 2.0), # 速度越快奖励越高
force_reward
)
# 只有在躯干较低时才发放此奖励
2026-03-20 03:37:56 -04:00
return torch.where(current_height < height_threshold,
pushing_up_bonus,
torch.zeros_like(pushing_up_bonus))
2026-03-19 06:29:30 -04:00
2026-03-19 09:08:57 -04:00
def is_standing_still(
2026-03-19 06:29:30 -04:00
env: ManagerBasedRLEnv,
2026-03-19 09:08:57 -04:00
min_head_height: float,
min_pelvis_height: float,
max_angle_error: float,
standing_time: float,
velocity_threshold: float = 0.15
2026-03-19 06:29:30 -04:00
) -> torch.Tensor:
2026-03-19 09:08:57 -04:00
"""判定逻辑:双高度达标 + 躯干垂直 + 全身静止"""
head_idx, _ = env.scene["robot"].find_bodies("H2")
pelvis_idx, _ = env.scene["robot"].find_bodies("Trunk")
2026-03-19 06:29:30 -04:00
2026-03-19 09:08:57 -04:00
current_head_h = env.scene["robot"].data.body_state_w[:, head_idx[0], 2]
current_pelvis_h = env.scene["robot"].data.body_state_w[:, pelvis_idx[0], 2]
2026-03-19 06:29:30 -04:00
2026-03-19 09:08:57 -04:00
gravity_error = torch.norm(env.scene["robot"].data.projected_gravity_b[:, :2], dim=-1)
root_vel_norm = torch.norm(env.scene["robot"].data.root_lin_vel_w, dim=-1)
2026-03-19 06:29:30 -04:00
2026-03-19 09:08:57 -04:00
# 判定条件:头够高 且 盆骨够高 且 垂直误差小 且 速度低
is_stable_now = (
(current_head_h > min_head_height) &
(current_pelvis_h > min_pelvis_height) &
(gravity_error < max_angle_error) &
(root_vel_norm < velocity_threshold)
)
2026-03-19 09:08:57 -04:00
if "stable_timer" not in env.extras:
env.extras["stable_timer"] = torch.zeros(env.num_envs, device=env.device)
dt = env.physics_dt * env.cfg.decimation
env.extras["stable_timer"] = torch.where(is_stable_now, env.extras["stable_timer"] + dt,
torch.zeros_like(env.extras["stable_timer"]))
return env.extras["stable_timer"] > standing_time
# --- 2. 配置类 ---
T1_JOINT_NAMES = [
2026-03-19 06:29:30 -04:00
'Left_Hip_Pitch', 'Right_Hip_Pitch', 'Left_Hip_Roll', 'Right_Hip_Roll',
'Left_Hip_Yaw', 'Right_Hip_Yaw', 'Left_Knee_Pitch', 'Right_Knee_Pitch',
'Left_Ankle_Pitch', 'Right_Ankle_Pitch', 'Left_Ankle_Roll', 'Right_Ankle_Roll'
]
2026-03-19 06:29:30 -04:00
2026-03-16 05:00:20 -04:00
@configclass
class T1ObservationCfg:
@configclass
class PolicyCfg(ObsGroup):
concatenate_terms = True
base_lin_vel = ObsTerm(func=mdp.base_lin_vel)
base_ang_vel = ObsTerm(func=mdp.base_ang_vel)
projected_gravity = ObsTerm(func=mdp.projected_gravity)
2026-03-20 08:55:29 -04:00
root_pos = ObsTerm(func=mdp.root_pos_w)
2026-03-19 06:29:30 -04:00
joint_pos = ObsTerm(func=mdp.joint_pos_rel,
params={"asset_cfg": SceneEntityCfg("robot", joint_names=T1_JOINT_NAMES)})
joint_vel = ObsTerm(func=mdp.joint_vel_rel,
params={"asset_cfg": SceneEntityCfg("robot", joint_names=T1_JOINT_NAMES)})
2026-03-16 05:00:20 -04:00
actions = ObsTerm(func=mdp.last_action)
policy = PolicyCfg()
@configclass
class T1EventCfg:
reset_robot_rotation = EventTerm(
2026-03-20 07:03:41 -04:00
func=mdp.reset_root_state_uniform,
2026-03-16 05:00:20 -04:00
params={
"asset_cfg": SceneEntityCfg("robot"),
2026-03-16 05:46:49 -04:00
"pose_range": {
2026-03-20 08:00:51 -04:00
"roll": (-1.57, 1.57), # 左右侧卧
2026-03-20 08:12:08 -04:00
"pitch": tuple(numpy.array([1.4, 1.6], dtype=np.float32) * random.choice([-1 , 1])), # 仰卧/俯卧
"yaw": (-3.14, 3.14), # 全向旋转
"x": (0.0, 0.0),
2026-03-16 05:46:49 -04:00
"y": (0.0, 0.0),
2026-03-20 08:55:29 -04:00
"z": (0.1, 0.2),
2026-03-16 05:46:49 -04:00
},
"velocity_range": {},
2026-03-16 05:00:20 -04:00
},
mode="reset",
)
2026-03-16 05:46:49 -04:00
@configclass
class T1ActionCfg:
2026-03-19 06:29:30 -04:00
"""关键修改:降低 scale 让动作变丝滑,增大阻尼效果"""
2026-03-16 05:46:49 -04:00
joint_pos = JointPositionActionCfg(
asset_name="robot",
joint_names=T1_JOINT_NAMES,
2026-03-20 08:55:29 -04:00
scale=0.5,
2026-03-16 05:46:49 -04:00
use_default_offset=True
)
2026-03-16 05:00:20 -04:00
2026-03-16 05:00:20 -04:00
@configclass
class T1GetUpRewardCfg:
2026-03-19 09:08:57 -04:00
# 1. 姿态基础奖 (引导身体变正)
2026-03-20 08:55:29 -04:00
upright = RewTerm(func=mdp.flat_orientation_l2, weight=10.0)
2026-03-19 09:08:57 -04:00
# 2. 【条件高度奖】:双高度判定(头+盆骨),且必须脚踩地
height_with_feet = RewTerm(
func=standing_with_feet_reward,
2026-03-21 07:00:49 -04:00
weight=25.0, # 作为核心引导,增加权重
params={
2026-03-19 09:08:57 -04:00
"min_head_height": 1.10,
2026-03-21 07:00:49 -04:00
"min_pelvis_height": 0.7,
2026-03-19 09:08:57 -04:00
"sensor_cfg": SceneEntityCfg("contact_sensor", body_names=[".*_foot_link"]),
2026-03-21 07:00:49 -04:00
"force_threshold": 20.0,
"max_v_z": 0.3
}
2026-03-16 05:00:20 -04:00
)
2026-03-19 09:08:57 -04:00
# 3. 手臂撑地奖:辅助脱离地面阶段
arm_push_support = RewTerm(
func=arm_push_up_reward,
2026-03-21 07:00:49 -04:00
weight=15.0, # 显著增加权重(从 3.0 提到 15.0),让它成为起步的关键
params={
"sensor_cfg": SceneEntityCfg("contact_sensor", body_names=[".*_hand_link", "AL3", "AR3"]),
"height_threshold": 0.6, # 躯干升到 0.6m 前都鼓励手臂用力
2026-03-20 08:55:29 -04:00
"min_force": 10.0 # 只要有 15N 的力就触发
}
)
2026-03-20 03:37:56 -04:00
2026-03-21 07:00:49 -04:00
# 4. 关节限位惩罚 (新增:防止关节撞死导致数值问题)
joint_limits = RewTerm(
func=mdp.joint_pos_limits,
weight=-1.0,
2026-03-20 03:37:56 -04:00
params={"asset_cfg": SceneEntityCfg("robot")}
2026-03-19 09:25:20 -04:00
)
2026-03-21 07:00:49 -04:00
# 5. 成功终极大奖
is_success = RewTerm(
2026-03-21 07:00:49 -04:00
func=lambda env, keys: env.termination_manager.get_term(keys).float(),
weight=50.0,
2026-03-19 09:08:57 -04:00
params={"keys": "standing_success"}
)
2026-03-16 05:00:20 -04:00
2026-03-19 09:08:57 -04:00
2026-03-16 05:00:20 -04:00
@configclass
class T1GetUpTerminationsCfg:
time_out = DoneTerm(func=mdp.time_out)
2026-03-19 09:08:57 -04:00
# 失败判定:躯干倾斜超过 45 度重置
2026-03-20 03:37:56 -04:00
#base_crash = DoneTerm(func=mdp.bad_orientation, params={"limit_angle": 0.785})
2026-03-19 09:08:57 -04:00
# 成功判定:双高度 + 稳定
standing_success = DoneTerm(
2026-03-19 06:29:30 -04:00
func=is_standing_still,
params={
2026-03-19 09:08:57 -04:00
"min_head_height": 1.05,
"min_pelvis_height": 0.75,
"max_angle_error": 0.15,
"standing_time": 0.8,
"velocity_threshold": 0.15
}
2026-03-16 05:00:20 -04:00
)
@configclass
class T1EnvCfg(ManagerBasedRLEnvCfg):
2026-03-19 09:08:57 -04:00
scene = T1SceneCfg(num_envs=16384, env_spacing=2.5) # 5090 性能全开
2026-03-16 05:00:20 -04:00
def __post_init__(self):
super().__post_init__()
2026-03-19 09:08:57 -04:00
self.scene.robot.init_state.pos = (0.0, 0.0, 0.2)
2026-03-16 05:00:20 -04:00
observations = T1ObservationCfg()
rewards = T1GetUpRewardCfg()
terminations = T1GetUpTerminationsCfg()
events = T1EventCfg()
2026-03-16 05:46:49 -04:00
actions = T1ActionCfg()
2026-03-16 05:00:20 -04:00
2026-03-19 09:08:57 -04:00
episode_length_s = 6.0
2026-03-21 07:00:49 -04:00
decimation = 4