Compare commits
37 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| af42087bd8 | |||
| 7f7ec781c5 | |||
| a642274fa6 | |||
| 20c961936d | |||
| 0315b4cb99 | |||
| 616dd06e78 | |||
| 2e2d68a933 | |||
| f7c8e6e325 | |||
| a8199fd056 | |||
| 0e70d34e81 | |||
| 905e998596 | |||
| 4833ba33c8 | |||
| fd8238dc41 | |||
| 72a22bd78a | |||
| d78fdeda0d | |||
| 6d2ad9846a | |||
| 1fbc9dccac | |||
| 49da77db51 | |||
| c0088ebac3 | |||
| 00d3be8e7a | |||
| ad2255bc18 | |||
| 14f2151014 | |||
| 31a9fa9965 | |||
| 2ae7210062 | |||
| 9cfc127694 | |||
| af3ba4704f | |||
| 5df147b0b1 | |||
| 6ca671dce5 | |||
| d4089b103e | |||
| 118d39f4bc | |||
| fdfd962fbc | |||
| 08d1bb539b | |||
| 9f3ec9d67a | |||
| 4933567ef8 | |||
| c1e3d9382f | |||
| 6510cb0bfc | |||
| 4b0b1fac8d |
13
rl_game/get_up/__init__.py
Normal file
13
rl_game/get_up/__init__.py
Normal file
@@ -0,0 +1,13 @@
|
||||
import gymnasium as gym
|
||||
|
||||
# 导入你的配置
|
||||
from rl_game.demo.config.t1_env_cfg import T1EnvCfg
|
||||
|
||||
# 注册环境到 Gymnasium
|
||||
gym.register(
|
||||
id="Isaac-T1-GetUp-v0",
|
||||
entry_point="isaaclab.envs:ManagerBasedRLEnv", # Isaac Lab 统一的强化学习环境入口
|
||||
kwargs={
|
||||
"cfg": T1EnvCfg(),
|
||||
},
|
||||
)
|
||||
BIN
rl_game/get_up/asset/t1/T1_locomotion_base.usd
Normal file
BIN
rl_game/get_up/asset/t1/T1_locomotion_base.usd
Normal file
Binary file not shown.
BIN
rl_game/get_up/asset/t1/T1_locomotion_physics_lab.usd
Normal file
BIN
rl_game/get_up/asset/t1/T1_locomotion_physics_lab.usd
Normal file
Binary file not shown.
60
rl_game/get_up/config/ppo_cfg.yaml
Normal file
60
rl_game/get_up/config/ppo_cfg.yaml
Normal file
@@ -0,0 +1,60 @@
|
||||
params:
|
||||
seed: 42
|
||||
algo:
|
||||
name: a2c_continuous
|
||||
|
||||
model:
|
||||
name: continuous_a2c_logstd
|
||||
|
||||
network:
|
||||
name: actor_critic
|
||||
separate: False
|
||||
space:
|
||||
continuous:
|
||||
mu_activation: None
|
||||
sigma_activation: None
|
||||
mu_init:
|
||||
name: default
|
||||
sigma_init:
|
||||
name: const_initializer
|
||||
val: 0.5
|
||||
fixed_sigma: False
|
||||
mlp:
|
||||
units: [512, 256, 128]
|
||||
activation: relu
|
||||
d2rl: False
|
||||
initializer:
|
||||
name: default
|
||||
|
||||
config:
|
||||
name: T1_Walking
|
||||
env_name: rlgym # Isaac Lab 包装器
|
||||
multi_gpu: False
|
||||
ppo: True
|
||||
mixed_precision: True
|
||||
normalize_input: True
|
||||
normalize_value: True
|
||||
value_bootstrap: True
|
||||
num_actors: 8192 # 同时训练的机器人数量
|
||||
reward_shaper:
|
||||
scale_value: 1.0
|
||||
normalize_advantage: True
|
||||
gamma: 0.98
|
||||
tau: 0.95
|
||||
learning_rate: 3e-4
|
||||
lr_schedule: adaptive
|
||||
kl_threshold: 0.015
|
||||
score_to_win: 20000
|
||||
max_epochs: 500
|
||||
save_best_after: 50
|
||||
save_frequency: 100
|
||||
grad_norm: 1.0
|
||||
entropy_coef: 0.005
|
||||
truncate_grads: True
|
||||
bounds_loss_coef: 0.001
|
||||
e_clip: 0.2
|
||||
horizon_length: 256
|
||||
minibatch_size: 65536
|
||||
mini_epochs: 4
|
||||
critic_coef: 1
|
||||
clip_value: True
|
||||
284
rl_game/get_up/config/t1_env_cfg.py
Normal file
284
rl_game/get_up/config/t1_env_cfg.py
Normal file
@@ -0,0 +1,284 @@
|
||||
import random
|
||||
import numpy
|
||||
import numpy as np
|
||||
import torch
|
||||
from isaaclab.assets import ArticulationCfg
|
||||
from isaaclab.envs import ManagerBasedRLEnvCfg, ManagerBasedRLEnv
|
||||
from isaaclab.managers import ObservationGroupCfg as ObsGroup
|
||||
from isaaclab.managers import ObservationTermCfg as ObsTerm
|
||||
from isaaclab.managers import RewardTermCfg as RewTerm
|
||||
from isaaclab.managers import TerminationTermCfg as DoneTerm
|
||||
from isaaclab.managers import EventTermCfg as EventTerm
|
||||
from isaaclab.envs.mdp import JointPositionActionCfg
|
||||
from isaaclab.managers import SceneEntityCfg
|
||||
from isaaclab.utils import configclass
|
||||
from rl_game.get_up.env.t1_env import T1SceneCfg
|
||||
import isaaclab.envs.mdp as mdp
|
||||
|
||||
|
||||
# --- 1. 自定义 MDP 逻辑函数 ---
|
||||
|
||||
def standing_with_feet_reward(
|
||||
env: ManagerBasedRLEnv,
|
||||
min_head_height: float,
|
||||
min_pelvis_height: float,
|
||||
sensor_cfg: SceneEntityCfg,
|
||||
force_threshold: float = 20.0,
|
||||
max_v_z: float = 0.5
|
||||
) -> torch.Tensor:
|
||||
"""终极高度目标:头高、盆骨高、足部受力稳定"""
|
||||
head_idx, _ = env.scene["robot"].find_bodies("H2")
|
||||
pelvis_idx, _ = env.scene["robot"].find_bodies("Trunk")
|
||||
|
||||
curr_head_h = env.scene["robot"].data.body_state_w[:, head_idx[0], 2]
|
||||
curr_pelvis_h = env.scene["robot"].data.body_state_w[:, pelvis_idx[0], 2]
|
||||
|
||||
# 归一化高度评分
|
||||
head_score = torch.clamp(curr_head_h / min_head_height, 0.0, 1.2)
|
||||
pelvis_score = torch.clamp(curr_pelvis_h / min_pelvis_height, 0.0, 1.2)
|
||||
height_reward = (head_score + pelvis_score) / 2.0
|
||||
|
||||
# 足部受力判定
|
||||
contact_sensor = env.scene.sensors.get(sensor_cfg.name)
|
||||
if contact_sensor is None: return torch.zeros(env.num_envs, device=env.device)
|
||||
|
||||
foot_forces_z = torch.sum(contact_sensor.data.net_forces_w[:, :, 2], dim=-1)
|
||||
force_weight = torch.sigmoid((foot_forces_z - force_threshold) / 5.0)
|
||||
|
||||
# 垂直速度惩罚(防止跳跃不稳)
|
||||
root_vel_z = env.scene["robot"].data.root_lin_vel_w[:, 2]
|
||||
vel_penalty = torch.exp(-torch.abs(root_vel_z) / max_v_z)
|
||||
|
||||
return height_reward * (0.5 + 0.5 * force_weight * vel_penalty)
|
||||
|
||||
|
||||
def arm_tuck_incremental_reward(
|
||||
env: ManagerBasedRLEnv,
|
||||
pitch_threshold: float = 1.4,
|
||||
shaping_weight: float = 0.2
|
||||
) -> torch.Tensor:
|
||||
"""增量式收手奖励:鼓励向弯曲方向运动,达到阈值给大奖"""
|
||||
joint_names = ["Left_Elbow_Pitch", "Right_Elbow_Pitch"]
|
||||
joint_ids, _ = env.scene["robot"].find_joints(joint_names)
|
||||
|
||||
elbow_pos = env.scene["robot"].data.joint_pos[:, joint_ids]
|
||||
elbow_vel = env.scene["robot"].data.joint_vel[:, joint_ids]
|
||||
|
||||
# 1. 速度引导:只要在收缩(速度为正)就给小奖,伸直则惩罚
|
||||
avg_vel = torch.mean(elbow_vel, dim=-1)
|
||||
shaping_reward = torch.tanh(avg_vel) * shaping_weight
|
||||
|
||||
# 2. 阈值触发:一旦收缩到位,给稳定的静态奖
|
||||
is_tucked = torch.all(elbow_pos > pitch_threshold, dim=-1).float()
|
||||
goal_bonus = is_tucked * 1.5
|
||||
|
||||
return shaping_reward + goal_bonus
|
||||
|
||||
|
||||
def dynamic_getup_strategy_reward(env: ManagerBasedRLEnv) -> torch.Tensor:
|
||||
"""
|
||||
状态机奖励切换逻辑:
|
||||
- 仰卧时:重点是 翻身 + 缩手。
|
||||
- 俯卧时:重点是 撑地起立。
|
||||
"""
|
||||
# 获取重力投影:Z轴分量 > 0 表示仰卧
|
||||
gravity_z = env.scene["robot"].data.projected_gravity_b[:, 2]
|
||||
|
||||
# 状态掩码
|
||||
is_on_back = (gravity_z > 0.2).float()
|
||||
is_on_belly = (gravity_z < -0.2).float()
|
||||
is_transition = (1.0 - is_on_back - is_on_belly)
|
||||
|
||||
# 1. 翻身势能:引导 gravity_z 向 -1.0 靠拢
|
||||
flip_shaping = torch.clamp(-gravity_z, min=-1.0, max=1.0)
|
||||
|
||||
# 2. 缩手动作
|
||||
tuck_rew = arm_tuck_incremental_reward(env)
|
||||
|
||||
# 3. 撑地动作 (复用原逻辑,但去掉内部的高度衰减,统一由状态机控制)
|
||||
contact_sensor = env.scene.sensors.get("contact_sensor")
|
||||
max_arm_force = torch.zeros(env.num_envs, device=env.device)
|
||||
if contact_sensor is not None:
|
||||
# 假设手臂/手部 link 的受力
|
||||
arm_forces_z = contact_sensor.data.net_forces_w[:, :, 2]
|
||||
max_arm_force = torch.max(arm_forces_z, dim=-1)[0]
|
||||
|
||||
push_rew = torch.tanh(torch.clamp(max_arm_force - 15.0, min=0.0) / 40.0)
|
||||
|
||||
# --- 权重动态合成 ---
|
||||
# 仰卧区:翻身(8.0) + 缩手(4.0)
|
||||
back_strategy = is_on_back * (8.0 * flip_shaping + 4.0 * tuck_rew)
|
||||
|
||||
# 俯卧区:撑地(25.0) + 缩手维持(1.0)
|
||||
# 这里撑地权重远高于翻身,确保机器人更愿意待在俯卧区尝试站立
|
||||
belly_strategy = is_on_belly * (25.0 * push_rew + 1.0 * tuck_rew)
|
||||
|
||||
# 过渡区
|
||||
trans_strategy = is_transition * (4.0 * flip_shaping + 10.0 * push_rew + 2.0 * tuck_rew)
|
||||
|
||||
return back_strategy + belly_strategy + trans_strategy
|
||||
|
||||
|
||||
def is_standing_still(
|
||||
env: ManagerBasedRLEnv,
|
||||
min_head_height: float,
|
||||
min_pelvis_height: float,
|
||||
max_angle_error: float,
|
||||
standing_time: float,
|
||||
velocity_threshold: float = 0.15
|
||||
) -> torch.Tensor:
|
||||
"""判定逻辑:双高度达标 + 躯干垂直 + 全身静止"""
|
||||
head_idx, _ = env.scene["robot"].find_bodies("H2")
|
||||
pelvis_idx, _ = env.scene["robot"].find_bodies("Trunk")
|
||||
|
||||
current_head_h = env.scene["robot"].data.body_state_w[:, head_idx[0], 2]
|
||||
current_pelvis_h = env.scene["robot"].data.body_state_w[:, pelvis_idx[0], 2]
|
||||
|
||||
gravity_error = torch.norm(env.scene["robot"].data.projected_gravity_b[:, :2], dim=-1)
|
||||
root_vel_norm = torch.norm(env.scene["robot"].data.root_lin_vel_w, dim=-1)
|
||||
|
||||
is_stable_now = (
|
||||
(current_head_h > min_head_height) &
|
||||
(current_pelvis_h > min_pelvis_height) &
|
||||
(gravity_error < max_angle_error) &
|
||||
(root_vel_norm < velocity_threshold)
|
||||
)
|
||||
|
||||
if "stable_timer" not in env.extras:
|
||||
env.extras["stable_timer"] = torch.zeros(env.num_envs, device=env.device)
|
||||
|
||||
dt = env.physics_dt * env.cfg.decimation
|
||||
env.extras["stable_timer"] = torch.where(is_stable_now, env.extras["stable_timer"] + dt,
|
||||
torch.zeros_like(env.extras["stable_timer"]))
|
||||
|
||||
return env.extras["stable_timer"] > standing_time
|
||||
|
||||
|
||||
# --- 2. 配置类 ---
|
||||
|
||||
T1_JOINT_NAMES = [
|
||||
'AAHead_yaw', 'Head_pitch',
|
||||
'Left_Shoulder_Pitch', 'Left_Shoulder_Roll', 'Left_Elbow_Pitch', 'Left_Elbow_Yaw',
|
||||
'Right_Shoulder_Pitch', 'Right_Shoulder_Roll', 'Right_Elbow_Pitch', 'Right_Elbow_Yaw',
|
||||
'Waist',
|
||||
'Left_Hip_Pitch', 'Right_Hip_Pitch', 'Left_Hip_Roll', 'Right_Hip_Roll',
|
||||
'Left_Hip_Yaw', 'Right_Hip_Yaw', 'Left_Knee_Pitch', 'Right_Knee_Pitch',
|
||||
'Left_Ankle_Pitch', 'Right_Ankle_Pitch', 'Left_Ankle_Roll', 'Right_Ankle_Roll'
|
||||
]
|
||||
|
||||
|
||||
@configclass
|
||||
class T1ObservationCfg:
|
||||
@configclass
|
||||
class PolicyCfg(ObsGroup):
|
||||
concatenate_terms = True
|
||||
base_lin_vel = ObsTerm(func=mdp.base_lin_vel)
|
||||
base_ang_vel = ObsTerm(func=mdp.base_ang_vel)
|
||||
projected_gravity = ObsTerm(func=mdp.projected_gravity)
|
||||
root_pos = ObsTerm(func=mdp.root_pos_w)
|
||||
joint_pos = ObsTerm(func=mdp.joint_pos_rel,
|
||||
params={"asset_cfg": SceneEntityCfg("robot", joint_names=T1_JOINT_NAMES)})
|
||||
joint_vel = ObsTerm(func=mdp.joint_vel_rel,
|
||||
params={"asset_cfg": SceneEntityCfg("robot", joint_names=T1_JOINT_NAMES)})
|
||||
actions = ObsTerm(func=mdp.last_action)
|
||||
|
||||
policy = PolicyCfg()
|
||||
|
||||
|
||||
@configclass
|
||||
class T1EventCfg:
|
||||
reset_robot_rotation = EventTerm(
|
||||
func=mdp.reset_root_state_uniform,
|
||||
params={
|
||||
"asset_cfg": SceneEntityCfg("robot"),
|
||||
"pose_range": {
|
||||
"roll": (-1.57, 1.57),
|
||||
"pitch": tuple(numpy.array([1.4, 1.6], dtype=np.float32) * random.choice([-1 , 1])), # 仰卧/俯卧
|
||||
"yaw": (-3.14, 3.14),
|
||||
"x": (0.0, 0.0),
|
||||
"y": (0.0, 0.0),
|
||||
"z": (0.3, 0.4),
|
||||
},
|
||||
"velocity_range": {},
|
||||
},
|
||||
mode="reset",
|
||||
)
|
||||
|
||||
|
||||
@configclass
|
||||
class T1ActionCfg:
|
||||
joint_pos = JointPositionActionCfg(
|
||||
asset_name="robot", joint_names=T1_JOINT_NAMES, scale=0.5, use_default_offset=True
|
||||
)
|
||||
|
||||
|
||||
@configclass
|
||||
class T1GetUpRewardCfg:
|
||||
# --- 1. 动态策略整合奖励 (包含了翻身、缩手、撑地的逻辑切换) ---
|
||||
adaptive_strategy = RewTerm(
|
||||
func=dynamic_getup_strategy_reward,
|
||||
weight=1.0 # 内部已经有细分权重
|
||||
)
|
||||
|
||||
# --- 2. 核心高度目标 (维持最高优先级) ---
|
||||
height_with_feet = RewTerm(
|
||||
func=standing_with_feet_reward,
|
||||
weight=15.0,
|
||||
params={
|
||||
"min_head_height": 1.1,
|
||||
"min_pelvis_height": 0.7,
|
||||
"sensor_cfg": SceneEntityCfg("contact_sensor", body_names=[".*_foot_link"]),
|
||||
"force_threshold": 30.0,
|
||||
"max_v_z": 0.3
|
||||
}
|
||||
)
|
||||
|
||||
# --- 3. 辅助约束与惩罚 ---
|
||||
upright = RewTerm(func=mdp.flat_orientation_l2, weight=1.0)
|
||||
joint_limits = RewTerm(func=mdp.joint_pos_limits, weight=-20.0, params={"asset_cfg": SceneEntityCfg("robot")})
|
||||
action_rate = RewTerm(func=mdp.action_rate_l2, weight=-0.01)
|
||||
|
||||
# --- 4. 成功奖励 ---
|
||||
is_success_bonus = RewTerm(
|
||||
func=is_standing_still,
|
||||
weight=1000.0,
|
||||
params={
|
||||
"min_head_height": 1.05,
|
||||
"min_pelvis_height": 0.75,
|
||||
"max_angle_error": 0.3,
|
||||
"standing_time": 0.2,
|
||||
"velocity_threshold": 0.5
|
||||
}
|
||||
)
|
||||
|
||||
@configclass
|
||||
class T1GetUpTerminationsCfg:
|
||||
time_out = DoneTerm(func=mdp.time_out)
|
||||
standing_success = DoneTerm(
|
||||
func=is_standing_still,
|
||||
params={
|
||||
"min_head_height": 1.05,
|
||||
"min_pelvis_height": 0.75,
|
||||
"max_angle_error": 0.3,
|
||||
"standing_time": 0.2,
|
||||
"velocity_threshold": 0.5
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@configclass
|
||||
class T1EnvCfg(ManagerBasedRLEnvCfg):
|
||||
scene = T1SceneCfg(num_envs=8192, env_spacing=2.5)
|
||||
|
||||
def __post_init__(self):
|
||||
super().__post_init__()
|
||||
self.scene.robot.init_state.pos = (0.0, 0.0, 0.2)
|
||||
|
||||
observations = T1ObservationCfg()
|
||||
rewards = T1GetUpRewardCfg()
|
||||
terminations = T1GetUpTerminationsCfg()
|
||||
events = T1EventCfg()
|
||||
actions = T1ActionCfg()
|
||||
|
||||
episode_length_s = 6.0
|
||||
decimation = 4
|
||||
74
rl_game/get_up/env/t1_env.py
vendored
Normal file
74
rl_game/get_up/env/t1_env.py
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
from isaaclab.assets import ArticulationCfg, AssetBaseCfg
|
||||
from isaaclab.scene import InteractiveSceneCfg
|
||||
from isaaclab.sensors import ContactSensorCfg
|
||||
from isaaclab.utils import configclass
|
||||
from isaaclab.actuators import ImplicitActuatorCfg
|
||||
from isaaclab import sim as sim_utils
|
||||
|
||||
import os
|
||||
|
||||
_DEMO_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
||||
T1_USD_PATH = os.path.join(_DEMO_DIR, "asset", "t1", "T1_locomotion_physics_lab.usd")
|
||||
|
||||
@configclass
|
||||
class T1SceneCfg(InteractiveSceneCfg):
|
||||
"""最终修正版:彻底解决 Unknown asset config type 报错"""
|
||||
|
||||
# 1. 地面配置:直接在 spawn 内部定义材质
|
||||
ground = AssetBaseCfg(
|
||||
prim_path="/World/ground",
|
||||
spawn=sim_utils.GroundPlaneCfg(
|
||||
physics_material=sim_utils.RigidBodyMaterialCfg(
|
||||
static_friction=1.0,
|
||||
dynamic_friction=1.0,
|
||||
restitution=0.3,
|
||||
friction_combine_mode="average",
|
||||
restitution_combine_mode="average",
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
# 2. 机器人配置
|
||||
robot = ArticulationCfg(
|
||||
prim_path="{ENV_REGEX_NS}/Robot",
|
||||
spawn=sim_utils.UsdFileCfg(
|
||||
usd_path=T1_USD_PATH,
|
||||
activate_contact_sensors=True,
|
||||
rigid_props=sim_utils.RigidBodyPropertiesCfg(
|
||||
disable_gravity=False,
|
||||
max_depenetration_velocity=10.0,
|
||||
),
|
||||
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
|
||||
enabled_self_collisions=True,
|
||||
solver_position_iteration_count=8,
|
||||
solver_velocity_iteration_count=4,
|
||||
),
|
||||
),
|
||||
init_state=ArticulationCfg.InitialStateCfg(
|
||||
pos=(0.0, 0.0, 0.4), # 掉落高度
|
||||
joint_pos={".*": 0.0},
|
||||
),
|
||||
actuators={
|
||||
"t1_joints": ImplicitActuatorCfg(
|
||||
joint_names_expr=[".*"],
|
||||
effort_limit=800.0, # 翻倍,确保电机有力气
|
||||
velocity_limit=20.0,
|
||||
stiffness=500.0, # 【关键】从 150 提到 500-800 之间
|
||||
damping=40.0, # 【关键】从 5 提到 30-50 之间,抑制乱抖
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
contact_sensor = ContactSensorCfg(
|
||||
prim_path="{ENV_REGEX_NS}/Robot/.*",
|
||||
update_period=0.0,
|
||||
history_length=3,
|
||||
)
|
||||
|
||||
# 3. 光照配置
|
||||
light = AssetBaseCfg(
|
||||
prim_path="/World/light",
|
||||
spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0),
|
||||
)
|
||||
|
||||
# ['Trunk', 'H1', 'H2', 'AL1', 'AL2', 'AL3', 'left_hand_link', 'AR1', 'AR2', 'AR3', 'right_hand_link', 'Waist', 'Hip_Pitch_Left', 'Hip_Roll_Left', 'Hip_Yaw_Left', 'Shank_Left', 'Ankle_Cross_Left', 'left_foot_link', 'Hip_Pitch_Right', 'Hip_Roll_Right', 'Hip_Yaw_Right', 'Shank_Right', 'Ankle_Cross_Right', 'right_foot_link']
|
||||
101
rl_game/get_up/train.py
Normal file
101
rl_game/get_up/train.py
Normal file
@@ -0,0 +1,101 @@
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
|
||||
# 确保能找到项目根目录下的模块
|
||||
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
from isaaclab.app import AppLauncher
|
||||
|
||||
# 1. 配置启动参数
|
||||
parser = argparse.ArgumentParser(description="Train T1 robot to Get-Up with RL-Games.")
|
||||
parser.add_argument("--num_envs", type=int, default=8192, help="起身任务建议并行 4096 即可")
|
||||
parser.add_argument("--task", type=str, default="Isaac-T1-GetUp-v0", help="任务 ID")
|
||||
parser.add_argument("--seed", type=int, default=42, help="随机种子")
|
||||
AppLauncher.add_app_launcher_args(parser)
|
||||
args_cli = parser.parse_args()
|
||||
|
||||
# 2. 启动仿真器(必须在导入其他 isaaclab 模块前)
|
||||
app_launcher = AppLauncher(args_cli)
|
||||
simulation_app = app_launcher.app
|
||||
|
||||
import torch
|
||||
import gymnasium as gym
|
||||
import yaml
|
||||
from isaaclab_rl.rl_games import RlGamesVecEnvWrapper
|
||||
from rl_games.torch_runner import Runner
|
||||
from rl_games.common import env_configurations, vecenv
|
||||
|
||||
# 导入你刚刚修改好的配置类
|
||||
# 假设你的文件名是 t1_getup_cfg.py,类名是 T1EnvCfg
|
||||
from config.t1_env_cfg import T1EnvCfg
|
||||
|
||||
# 3. 注册环境
|
||||
gym.register(
|
||||
id="Isaac-T1-GetUp-v0",
|
||||
entry_point="isaaclab.envs:ManagerBasedRLEnv",
|
||||
kwargs={
|
||||
"cfg": T1EnvCfg(), # 这里会加载你设置的随机旋转、时间惩罚等
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
# --- 新增:处理 Retrain 参数 ---
|
||||
# 你可以手动指定路径,或者在 argparse 里增加一个 --checkpoint 参数
|
||||
checkpoint_path = os.path.join(os.path.dirname(__file__), "logs/T1_GetUp/nn/T1_GetUp.pth")
|
||||
# 检查模型文件是否存在
|
||||
should_retrain = os.path.exists(checkpoint_path)
|
||||
|
||||
env = gym.make("Isaac-T1-GetUp-v0", num_envs=args_cli.num_envs)
|
||||
|
||||
# 注意:rl_device 必须设置为 args_cli.device (通常是 'cuda:0')
|
||||
wrapped_env = RlGamesVecEnvWrapper(
|
||||
env,
|
||||
rl_device=args_cli.device,
|
||||
clip_obs=5.0,
|
||||
clip_actions=1.0
|
||||
)
|
||||
|
||||
vecenv.register('as_is', lambda config_name, num_actors, **kwargs: wrapped_env)
|
||||
|
||||
env_configurations.register('rlgym', {
|
||||
'vecenv_type': 'as_is',
|
||||
'env_creator': lambda **kwargs: wrapped_env
|
||||
})
|
||||
|
||||
config_path = os.path.join(os.path.dirname(__file__), "config", "ppo_cfg.yaml")
|
||||
with open(config_path, "r") as f:
|
||||
rl_config = yaml.safe_load(f)
|
||||
|
||||
# 设置日志和实验名称
|
||||
rl_game_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "."))
|
||||
log_dir = os.path.join(rl_game_dir, "logs")
|
||||
rl_config['params']['config']['train_dir'] = log_dir
|
||||
rl_config['params']['config']['name'] = "T1_GetUp"
|
||||
|
||||
# --- 关键修改:注入模型路径 ---
|
||||
if should_retrain:
|
||||
print(f"[INFO]: 检测到预训练模型,正在从 {checkpoint_path} 恢复训练...")
|
||||
# rl_games 会读取 config 中的 load_path 进行续训
|
||||
rl_config['params']['config']['load_path'] = checkpoint_path
|
||||
else:
|
||||
print("[INFO]: 未找到预训练模型,将从零开始训练。")
|
||||
|
||||
# 7. 运行训练
|
||||
runner = Runner()
|
||||
runner.load(rl_config)
|
||||
|
||||
runner.run({
|
||||
"train": True,
|
||||
"play": False,
|
||||
# 如果你想强制从某个 checkpoint 开始,也可以在这里传参
|
||||
"checkpoint": checkpoint_path if should_retrain else None,
|
||||
"vec_env": wrapped_env
|
||||
})
|
||||
|
||||
simulation_app.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user