update Walk.py and update models for Walk gym

This commit is contained in:
xxh
2026-04-15 04:47:56 -04:00
parent 43067000db
commit 29adc4b3df
4 changed files with 1159 additions and 51 deletions

View File

@@ -7,7 +7,7 @@ from random import random
from random import uniform
from itertools import count
from stable_baselines3 import PPO
from stable_baselines3 import PPO, TD3, DDPG, SAC, A2C
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
@@ -66,6 +66,7 @@ class WalkEnv(gym.Env):
self._last_sync_time = None
self._speed_estimate = 0.0
self._speed_from_acc = 0.0
self._prev_accelerometer = np.zeros(3, dtype=np.float32)
self._speed_smoothing = 0.85
self._fallback_dt = 0.02
target_hz_env = 0
@@ -125,7 +126,7 @@ class WalkEnv(gym.Env):
0.0, # 22: Right_Ankle_Roll (rle6)
]
)
self.joint_nominal_position = np.zeros(self.no_of_actions)
# self.joint_nominal_position = np.zeros(self.no_of_actions)
self.train_sim_flip = np.array(
[
1.0, # 0: Head_yaw (he1)
@@ -175,7 +176,7 @@ class WalkEnv(gym.Env):
self.reset_perturb_steps = 4
self.reset_recover_steps = 8
self.reward_smoothness_scale = 0.06
self.reward_smoothness_scale = 0.03
self.reward_smoothness_cap = 0.45
self.reward_forward_stability_gate = 0.35
self.reward_forward_tilt_hard_threshold = 0.50
@@ -192,17 +193,17 @@ class WalkEnv(gym.Env):
self.in_place_drift_penalty_scale = 1.20
self.waypoint_reach_distance = 0.3
self.num_waypoints = 1
self.exploration_start_steps = 80
self.exploration_scale = 0.08
self.exploration_cap = 0.25
self.exploration_start_steps = 40
self.exploration_scale = 0.012
self.exploration_cap = 0.2
self.exploration_target_novelty = 1.0
self.exploration_sigma = 0.7
self.reward_stride_swing_scale = 0.20
self.reward_stride_phase_scale = 0.18
self.reward_knee_drive_scale = 0.10
self.reward_knee_lift_scale = 0.12
self.reward_knee_lift_target = 0.95
self.reward_knee_lift_shortfall_scale = 0.20
self.reward_knee_lift_target = 0.15
self.reward_knee_lift_shortfall_scale = 0.05
self.reward_knee_overbend_threshold = 0.60
self.reward_knee_overbend_scale = 0.35
self.reward_hip_lift_scale = 0.12
@@ -218,12 +219,32 @@ class WalkEnv(gym.Env):
self.knee_phase_max_hold_frames = 28
self.knee_phase_hold_penalty_scale = 0.18
self.reward_stride_cap = 0.80
self.reward_knee_explore_scale = 0.10
self.reward_knee_explore_delta_scale = 0.12
self.reward_knee_explore_cap = 0.55
self.reward_hip_pitch_explore_scale = 0.14
self.reward_hip_pitch_explore_delta_scale = 0.10
self.reward_hip_pitch_explore_cap = 0.45
self.reward_knee_explore_scale = 0.03
self.reward_knee_explore_delta_scale = 0.03
self.reward_knee_explore_cap = 0.10
self.reward_hip_pitch_explore_scale = 0.07
self.reward_hip_pitch_explore_delta_scale = 0.07
self.reward_hip_pitch_explore_cap = 0.10
self.reward_progress_scale = 18
self.reward_survival_scale = 0.5
self.reward_idle_penalty_scale = 0.6
self.reward_accel_penalty_scale = 0.08
self.reward_accel_penalty_cap = 0.40
self.reward_accel_abs_limit = 13.5
self.reward_accel_abs_penalty_scale = 0.05
self.reward_accel_abs_penalty_cap = 0.40
self.reward_heading_align_scale = 0.28
self.reward_heading_error_scale = 0.05
self.reward_progress_tilt_gate_start = 0.28
self.reward_progress_knee_gate_min = 0.16
self.reward_progress_hip_gate_over = 0.18
self.reward_progress_gate_floor = 0.25
self.reward_knee_straight_threshold = 0.18
self.reward_knee_straight_penalty_scale = 0.45
self.reward_hip_overextend_threshold = 0.9
self.reward_hip_overextend_penalty_scale = 1
self.reward_leg_stretch_penalty_scale = 0.35
self.reward_stretch_lean_combo_scale = 0.55
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
@@ -410,6 +431,10 @@ class WalkEnv(gym.Env):
self._reward_debug_steps_left = 0
self._speed_estimate = 0.0
self._speed_from_acc = 0.0
self._prev_accelerometer = np.array(
getattr(self.Player.robot, "accelerometer", np.zeros(3)),
dtype=np.float32,
)
# 随机 beam 目标位置和朝向,增加训练多样性
beam_x = (random() - 0.5) * 10
@@ -503,9 +528,9 @@ class WalkEnv(gym.Env):
curr_dist_to_target = float(np.linalg.norm(self.target_position - current_pos))
dist_delta = prev_dist_to_target - curr_dist_to_target
is_fallen = height < 0.55
is_fallen = height < 0.45
if is_fallen:
return -20.0
return -2.0
joint_pos = np.deg2rad(
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
@@ -519,44 +544,82 @@ class WalkEnv(gym.Env):
right_knee_flex = abs(float(joint_pos[20]))
avg_knee_flex = 0.5 * (left_knee_flex + right_knee_flex)
max_leg_roll = 1 # 防止劈叉姿势
split_penalty = -0.6 * max(0.0, (left_hip_roll + right_hip_roll - 2 * max_leg_roll) / max_leg_roll)
max_leg_roll = 0.5 # 防止劈叉姿势
split_penalty = -0.12 * max(0.0, (left_hip_roll + right_hip_roll - 2 * max_leg_roll) / max_leg_roll)
left_hip_yaw = -float(joint_pos[13])
right_hip_yaw = float(joint_pos[19])
min_leg_separation = 0.04 # 最小腿间距(防止贴得太近)
inward_penalty = -0.175 * min(0.0, left_hip_roll-min_leg_separation) + -0.175 * min(0.0, right_hip_roll-min_leg_separation) # 惩罚左右腿过度内扣
inward_penalty = 0.3 * min(0.0, (left_hip_roll-min_leg_separation)) + 0.3 * min(0.0, (right_hip_roll-min_leg_separation)) # 惩罚左右腿过度内扣
# 脚踝roll角度检测防止过度外翻或内翻
max_ankle_roll = 0.15 # 最大允许的脚踝roll角度
# 惩罚脚踝过度外翻/内翻(绝对值过大)
ankle_roll_penalty = -0.35 * max(0.0, (abs(left_ankle_roll) + abs(right_ankle_roll) - 2 * max_ankle_roll) / max_ankle_roll)
ankle_roll_penalty = -0.12 * max(0.0, (abs(left_ankle_roll) + abs(right_ankle_roll) - 2 * max_ankle_roll) / max_ankle_roll)
# 惩罚两脚踝roll方向相反不稳定姿势
ankle_roll_cross_penalty = -0.2 * max(0.0, -(left_ankle_roll * right_ankle_roll))
ankle_roll_cross_penalty = -0.12 * max(0.0, -(left_ankle_roll * right_ankle_roll))
# 分别惩罚左右大腿过度转动
max_hip_yaw = 0.5 # 最大允许的yaw角度
left_hip_yaw_penalty = -0.25 * max(0.0, abs(left_hip_yaw) - max_hip_yaw)
right_hip_yaw_penalty = -0.25 * max(0.0, abs(right_hip_yaw) - max_hip_yaw)
max_hip_yaw = 0.2 # 最大允许的yaw角度
left_hip_yaw_penalty = -0.6 * max(0.0, abs(left_hip_yaw) - max_hip_yaw)
right_hip_yaw_penalty = -0.6 * max(0.0, abs(right_hip_yaw) - max_hip_yaw)
target_vec = self.target_position - current_pos
target_dist = float(np.linalg.norm(target_vec))
if target_dist > 1e-6:
target_heading = math.atan2(float(target_vec[1]), float(target_vec[0]))
robot_heading = math.radians(float(robot.global_orientation_euler[2]))
heading_error = self._wrap_to_pi(target_heading - robot_heading)
heading_align_reward = self.reward_heading_align_scale * math.cos(heading_error)
heading_error_penalty = -self.reward_heading_error_scale * abs(heading_error)
else:
heading_align_reward = 0.0
heading_error_penalty = 0.0
# Forward-progress reward (distance delta) with anti-stuck shaping.
progress_reward = 18.0 * dist_delta
survival_reward = 0.03
smoothness_penalty = -0.03 * float(np.linalg.norm(action - self.last_action_for_reward))
progress_reward_raw = self.reward_progress_scale * dist_delta
survival_reward = self.reward_survival_scale
smoothness_penalty = -self.reward_smoothness_scale * float(np.linalg.norm(action - self.last_action_for_reward))
step_displacement = float(np.linalg.norm(current_pos - previous_pos))
if self.step_counter > 30 and step_displacement < 0.008 and self.target_distance_wp > 0.3:
idle_penalty = -0.20
accel_signal = 0.0
accel_source = "imu_delta"
accel_now = np.array(getattr(robot, "accelerometer", np.zeros(3)), dtype=np.float32)
if accel_now.shape[0] >= 3:
# Use IMU acceleration delta to reduce gravity bias and punish abrupt bursts.
accel_signal = float(np.linalg.norm(accel_now[:3] - self._prev_accelerometer[:3]))
self._prev_accelerometer = accel_now
accel_penalty = -min(
self.reward_accel_penalty_cap,
self.reward_accel_penalty_scale * accel_signal,
)
accel_abs = float(np.linalg.norm(accel_now[:3])) if accel_now.shape[0] >= 3 else 0.0
accel_abs_over = max(0.0, accel_abs - self.reward_accel_abs_limit)
accel_abs_penalty = -min(
self.reward_accel_abs_penalty_cap,
self.reward_accel_abs_penalty_scale * accel_abs_over,
)
if self.step_counter > 30 and step_displacement < 0.015 and self.target_distance_wp > 0.3:
idle_penalty = -self.reward_idle_penalty_scale
else:
idle_penalty = 0.0
if self.step_counter > self.exploration_start_steps:
displacement_novelty = step_displacement / max(1e-6, self.stationary_step_eps)
exploration_bonus = min(
self.exploration_cap,
self.exploration_scale * max(0.0, displacement_novelty - self.exploration_target_novelty),
)
else:
exploration_bonus = 0.0
# Encourage active/varied knee motions early in training without dominating progress reward.
left_knee_act = float(action[14])
right_knee_act = float(action[20])
left_knee_delta = abs(left_knee_act - float(self.last_action_for_reward[14]))
right_knee_delta = abs(right_knee_act - float(self.last_action_for_reward[20]))
left_knee_delta = abs(left_knee_act - float(self.last_action_for_reward[14]))
right_knee_delta = abs(right_knee_act - float(self.last_action_for_reward[20]))
knee_action_mag = 0.5 * (abs(left_knee_act) + abs(right_knee_act))
knee_action_delta = 0.5 * (left_knee_delta + right_knee_delta)
if self.step_counter > 10:
@@ -569,8 +632,6 @@ class WalkEnv(gym.Env):
knee_explore_reward = 0.0
# Directly encourage observable knee flexion instead of only action exploration.
knee_lift_ratio = min(1.0, avg_knee_flex / max(1e-6, self.reward_knee_lift_target))
knee_lift_reward = self.reward_knee_lift_scale * knee_lift_ratio
knee_lift_shortfall_penalty = -self.reward_knee_lift_shortfall_scale * max(
0.0, self.reward_knee_lift_target - avg_knee_flex
)
@@ -595,11 +656,52 @@ class WalkEnv(gym.Env):
arrival_bonus = self.target_distance_wp * 8 ## 奖励到达目标点
else:
arrival_bonus = 0.0
target_height = self.initial_height
height_error = height - target_height
height_error = height - target_height
height_penalty = -0.5 * (math.exp(15*abs(height_error))-1)
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
# Gate progress reward when posture quality is poor.
# Important: include leg overextension so upright torso cannot exploit progress reward.
tilt_excess = max(0.0, tilt_mag - self.reward_progress_tilt_gate_start)
knee_gate_excess = max(0.0, self.reward_progress_knee_gate_min - avg_knee_flex)
left_hip_pitch = float(joint_pos[11])
right_hip_pitch = float(joint_pos[17])
left_hip_over = max(0.0, abs(left_hip_pitch) - self.reward_hip_overextend_threshold)
right_hip_over = max(0.0, abs(right_hip_pitch) - self.reward_hip_overextend_threshold)
hip_over_mean = 0.5 * (left_hip_over + right_hip_over)
hip_gate_excess = max(0.0, hip_over_mean - self.reward_progress_hip_gate_over)
posture_gate = 1.0 - 1.2 * tilt_excess - 2.0 * knee_gate_excess - 1.8 * hip_gate_excess
posture_gate = float(np.clip(posture_gate, self.reward_progress_gate_floor, 1.0))
progress_reward = progress_reward_raw * posture_gate
knee_straight_penalty = -self.reward_knee_straight_penalty_scale * max(
0.0, self.reward_knee_straight_threshold - avg_knee_flex
)
hip_overextend_penalty = -self.reward_hip_overextend_penalty_scale * (left_hip_over + right_hip_over)
# Penalize over-stretched legs even if torso stays upright.
stretch_amount = left_hip_over + right_hip_over
straight_amount = max(0.0, self.reward_knee_straight_threshold - avg_knee_flex)
leg_stretch_penalty = -self.reward_leg_stretch_penalty_scale * stretch_amount * (1.0 + 2.5 * straight_amount)
# Keep extra combo penalty, but no longer vanish when torso is upright.
stretch_lean_combo_penalty = -self.reward_stretch_lean_combo_scale * (0.5 + tilt_mag) * stretch_amount * (1.0 + 3.0 * straight_amount)
posture_penalty = -0.6 * (tilt_mag)
total = (
progress_reward
+ survival_reward
+ smoothness_penalty
+ accel_penalty
+ accel_abs_penalty
+ idle_penalty
+ split_penalty
+ inward_penalty
@@ -607,11 +709,19 @@ class WalkEnv(gym.Env):
+ ankle_roll_cross_penalty
+ left_hip_yaw_penalty
+ right_hip_yaw_penalty
+ knee_explore_reward
# + knee_lift_reward
+ heading_align_reward
+ heading_error_penalty
# + knee_straight_penalty
+ hip_overextend_penalty
+ leg_stretch_penalty
+ stretch_lean_combo_penalty
# + exploration_bonus
# + knee_explore_reward
# + knee_lift_shortfall_penalty
+ hip_pitch_explore_reward
# + hip_pitch_explore_reward
+ arrival_bonus
+ height_penalty
+ posture_penalty
)
now = time.time()
@@ -625,6 +735,11 @@ class WalkEnv(gym.Env):
f"progress_reward:{progress_reward:.4f},"
f"survival_reward:{survival_reward:.4f},"
f"smoothness_penalty:{smoothness_penalty:.4f},"
f"accel_penalty:{accel_penalty:.4f},"
f"accel_source:{accel_source},"
f"accel_signal:{accel_signal:.4f},"
f"accel_abs:{accel_abs:.4f},"
f"accel_abs_penalty:{accel_abs_penalty:.4f},"
f"idle_penalty:{idle_penalty:.4f},"
f"split_penalty:{split_penalty:.4f},"
f"inward_penalty:{inward_penalty:.4f},"
@@ -632,10 +747,20 @@ class WalkEnv(gym.Env):
f"ankle_roll_cross_penalty:{ankle_roll_cross_penalty:.4f},"
f"left_hip_yaw_penalty:{left_hip_yaw_penalty:.4f},"
f"right_hip_yaw_penalty:{right_hip_yaw_penalty:.4f},"
f"heading_align_reward:{heading_align_reward:.4f},"
f"heading_error_penalty:{heading_error_penalty:.4f},"
f"knee_straight_penalty:{knee_straight_penalty:.4f},"
f"hip_overextend_penalty:{hip_overextend_penalty:.4f},"
f"leg_stretch_penalty:{leg_stretch_penalty:.4f},"
f"stretch_lean_combo_penalty:{stretch_lean_combo_penalty:.4f},"
f"posture_gate:{posture_gate:.4f},"
f"progress_reward_raw:{progress_reward_raw:.4f},"
# f"exploration_bonus:{exploration_bonus:.4f},"
f"height_penalty:{height_penalty:.4f},"
# f"knee_explore_reward:{knee_explore_reward:.4f},"
# f"knee_lift_reward:{knee_lift_reward:.4f},"
f"knee_lift_shortfall_penalty:{knee_lift_shortfall_penalty:.4f},"
f"hip_pitch_explore_reward:{hip_pitch_explore_reward:.4f},"
f"posture_penalty:{posture_penalty:.4f},"
# f"knee_lift_shortfall_penalty:{knee_lift_shortfall_penalty:.4f},"
# f"hip_pitch_explore_reward:{hip_pitch_explore_reward:.4f},"
f"arrival_bonus:{arrival_bonus:.4f},"
f"total:{total:.4f}"
)
@@ -661,14 +786,14 @@ class WalkEnv(gym.Env):
action[9] = np.clip(action[9], 8, 2)
action[10] = np.clip(action[10], -0.6, 0.6)
# Boost knee command range so policy can produce visible knee flexion earlier.
# action[14] = np.clip(action[14] * 1.1, -10.0, 10.0)
# action[20] = np.clip(action[20] * 1.1, -10.0, 10.0)
action[14] = np.clip(action[14], 0, 10.0)
action[20] = np.clip(action[20], -10.0, 0)
# action[14] = 1 # the correct left knee sign
# action[20] = -1 # the correct right knee sign
# action[11] = 1
# action[17] = 1
# action[12] = -0.01
# action[18] = 0.01
# action[11] = 2
# action[17] = -2
# action[12] = -1
# action[18] = 1
# action[13] = -1.0
# action[19] = 1.0
self.previous_action = action.copy()
@@ -717,7 +842,7 @@ class WalkEnv(gym.Env):
self.target_position = self.point_list[self.waypoint_index]
# Fall detection and penalty
is_fallen = self.Player.world.global_position[2] < 0.55
is_fallen = self.Player.world.global_position[2] < 0.45
# terminal state: the robot is falling or timeout
terminated = is_fallen or self.step_counter > 800 or self.route_completed
@@ -737,7 +862,7 @@ class Train(Train_Base):
server_warmup_sec = 3.0
n_steps_per_env = 256 # RolloutBuffer is of size (n_steps_per_env * n_envs)
minibatch_size = 512 # should be a factor of (n_steps_per_env * n_envs)
total_steps = 30000000
total_steps = 90000000
learning_rate = 2e-4
ent_coef = 0.035
clip_range = 0.2