The example of train-T1

This commit is contained in:
2026-03-15 20:14:06 -04:00
parent 571b4283c7
commit f0a5f8f4b7
8 changed files with 357 additions and 0 deletions

View File

@@ -0,0 +1,60 @@
params:
seed: 42
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: False
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
config:
name: T1_Walking
env_name: rlgym # Isaac Lab 包装器
multi_gpu: False
ppo: True
mixed_precision: True
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: 16384 # 同时训练的机器人数量
reward_shaper:
scale_value: 1.0
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 3e-4
lr_schedule: adaptive
kl_threshold: 0.008
score_to_win: 20000
max_epochs: 5000
save_best_after: 50
save_frequency: 100
grad_norm: 1.0
entropy_coef: 0.01
truncate_grads: True
bounds_loss_coef: 0.0
e_clip: 0.2
horizon_length: 128
minibatch_size: 32768
mini_epochs: 5
critic_coef: 2
clip_value: True

View File

@@ -0,0 +1,94 @@
from isaaclab.envs import ManagerBasedRLEnvCfg
from isaaclab.managers import ObservationGroupCfg as ObsGroup
from isaaclab.managers import ObservationTermCfg as ObsTerm
from isaaclab.managers import RewardTermCfg as RewTerm
from isaaclab.managers import TerminationTermCfg as DoneTerm
from isaaclab.envs.mdp import JointPositionActionCfg
import isaaclab.envs.mdp as mdp
from isaaclab.utils import configclass
from rl_game.demo.env.t1_env import T1SceneCfg
@configclass
class T1ObservationCfg:
"""观察值空间配置容器"""
@configclass
class PolicyCfg(ObsGroup):
concatenate_terms = True
enable_corruption = False
# ⬅️ 2. 修改点:直接使用 mdp.函数名,不要引号
base_lin_vel = ObsTerm(func=mdp.base_lin_vel)
base_ang_vel = ObsTerm(func=mdp.base_ang_vel)
projected_gravity = ObsTerm(func=mdp.projected_gravity)
joint_pos = ObsTerm(func=mdp.joint_pos_rel)
joint_vel = ObsTerm(func=mdp.joint_vel_rel)
actions = ObsTerm(func=mdp.last_action)
policy = PolicyCfg()
@configclass
class T1ActionCfg:
"""动作空间配置"""
joint_pos = JointPositionActionCfg(
asset_name="robot", # 注意这里是 asset_name对应场景里的机器人名称
joint_names=[".*"], # 控制所有关节
scale=0.5, # 缩放网络输出
use_default_offset=True # 动作是相对于默认关节角度(init_state里的0)的偏移
)
@configclass
class T1TerminationsCfg:
"""终止条件:什么时候重置环境"""
# 1. 摔倒重置:如果躯干高度低于 0.35米 (假设 T1 胯部在 0.7米)
base_height_too_low = DoneTerm(
func=mdp.root_height_below_minimum,
params={"minimum_height": 0.35},
)
# 2. 存活时间限制 (Timeout)
time_out = DoneTerm(func=mdp.time_out)
@configclass
class T1CommandsCfg:
"""命令配置:定义机器人的目标速度"""
base_velocity = mdp.UniformVelocityCommandCfg(
asset_name="robot",
resampling_time_range=(10.0, 10.0),
ranges=mdp.UniformVelocityCommandCfg.Ranges(
lin_vel_x=(0.5, 1.5),
lin_vel_y=(0.0, 0.0),
ang_vel_z=(-0.1, 0.1),
),
)
@configclass
class T1RewardCfg:
"""奖励函数配置:鼓励向前走,惩罚摔倒和过大能耗"""
# 速度追踪奖励 (假设目标是沿 X 轴走)
track_lin_vel_xy_exp = RewTerm(
func=mdp.track_lin_vel_xy_exp,
weight=1.0,
params={
"std": 0.5,
"command_name": "base_velocity"
}
)
# 姿态惩罚 (保持上半身直立)
upright = RewTerm(func=mdp.flat_orientation_l2, weight=0.1)
# 动作平滑惩罚
action_rate_l2 = RewTerm(func=mdp.action_rate_l2, weight=-0.01)
@configclass
class T1EnvCfg(ManagerBasedRLEnvCfg):
"""主环境配置"""
# 场景设置
scene = T1SceneCfg(num_envs=16384, env_spacing=2.5)
# 观察与奖励
observations = T1ObservationCfg()
rewards = T1RewardCfg()
terminations = T1TerminationsCfg()
actions = T1ActionCfg()
commands = T1CommandsCfg()
episode_length_s = 20.0
# 默认步长
decimation = 6 # 仿真频率/控制频率