loosen action constraints to allow more exploration, encourage active/varied knee motions early in training without dominating progress reward.

This commit is contained in:
xxh
2026-04-11 01:31:51 -04:00
parent 5a7952a91c
commit 387336b35a
3 changed files with 98 additions and 799 deletions

View File

@@ -164,8 +164,8 @@ class WalkEnv(gym.Env):
self.enable_reset_perturb = False
self.reset_beam_yaw_range_deg = 180.0
self.reset_target_bearing_range_deg = 0.0
self.reset_target_distance_min = 3.0
self.reset_target_distance_max = 5.0
self.reset_target_distance_min = 5
self.reset_target_distance_max = 10
if self.reset_target_distance_min > self.reset_target_distance_max:
self.reset_target_distance_min, self.reset_target_distance_max = (
self.reset_target_distance_max,
@@ -218,6 +218,9 @@ class WalkEnv(gym.Env):
self.knee_phase_max_hold_frames = 28
self.knee_phase_hold_penalty_scale = 0.18
self.reward_stride_cap = 0.80
self.reward_knee_explore_scale = 0.10
self.reward_knee_explore_delta_scale = 0.12
self.reward_knee_explore_cap = 0.55
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
@@ -466,6 +469,7 @@ class WalkEnv(gym.Env):
for i in range(self.num_waypoints):
# Each waypoint is placed further along the path
target_distance_wp = np.random.uniform(self.reset_target_distance_min, self.reset_target_distance_max)
self.target_distance_wp = target_distance_wp
target_bearing_deg_wp = np.random.uniform(-self.reset_target_bearing_range_deg, self.reset_target_bearing_range_deg)
target_offset = MathOps.rotate_2d_vec(
@@ -490,26 +494,93 @@ class WalkEnv(gym.Env):
def compute_reward(self, previous_pos, current_pos, action):
height = float(self.Player.world.global_position[2])
is_fallen = height < 0.55
if is_fallen:
return -20.0
robot = self.Player.robot
prev_dist_to_target = float(np.linalg.norm(self.target_position - previous_pos))
curr_dist_to_target = float(np.linalg.norm(self.target_position - current_pos))
dist_delta = prev_dist_to_target - curr_dist_to_target
is_fallen = height < 0.55
if is_fallen:
return -20.0
joint_pos = np.deg2rad(
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
) * self.train_sim_flip
left_hip_roll = -float(joint_pos[12])
right_hip_roll = float(joint_pos[18])
left_ankle_roll = -float(joint_pos[16])
right_ankle_roll = float(joint_pos[22])
max_leg_roll = 1 # 防止劈叉姿势
split_penalty = -0.6 * max(0.0, (left_hip_roll + right_hip_roll - 2 * max_leg_roll) / max_leg_roll)
left_hip_yaw = -float(joint_pos[13])
right_hip_yaw = float(joint_pos[19])
min_leg_separation = 0.04 # 最小腿间距(防止贴得太近)
inward_penalty = -0.175 * min(0.0, left_hip_roll-min_leg_separation) + -0.175 * min(0.0, right_hip_roll-min_leg_separation) # 惩罚左右腿过度内扣
# 脚踝roll角度检测防止过度外翻或内翻
max_ankle_roll = 0.15 # 最大允许的脚踝roll角度
# 惩罚脚踝过度外翻/内翻(绝对值过大)
ankle_roll_penalty = -0.35 * max(0.0, (abs(left_ankle_roll) + abs(right_ankle_roll) - 2 * max_ankle_roll) / max_ankle_roll)
# 惩罚两脚踝roll方向相反不稳定姿势
ankle_roll_cross_penalty = -0.2 * max(0.0, -(left_ankle_roll * right_ankle_roll))
# 分别惩罚左右大腿过度转动
max_hip_yaw = 1 # 最大允许的yaw角度
left_hip_yaw_penalty = -0.25 * max(0.0, abs(left_hip_yaw) - max_hip_yaw)
right_hip_yaw_penalty = -0.25 * max(0.0, abs(right_hip_yaw) - max_hip_yaw)
# Forward-progress reward (distance delta) with anti-stuck shaping.
progress_reward = 22.0 * dist_delta
survival_reward = 0.02
smoothness_penalty = -0.015 * float(np.linalg.norm(action - self.last_action_for_reward))
progress_reward = 18.0 * dist_delta
survival_reward = 0.03
smoothness_penalty = -0.025 * float(np.linalg.norm(action - self.last_action_for_reward))
step_displacement = float(np.linalg.norm(current_pos - previous_pos))
if self.step_counter > 30 and step_displacement < 0.006:
idle_penalty = -0.06
if self.step_counter > 30 and step_displacement < 0.008 and self.target_distance_wp > 0.3:
idle_penalty = -0.20
else:
idle_penalty = 0.0
total = progress_reward + survival_reward + smoothness_penalty + idle_penalty
# Encourage active/varied knee motions early in training without dominating progress reward.
left_knee_act = -float(action[14])
right_knee_act = float(action[20])
left_knee_delta = abs(left_knee_act - float(self.last_action_for_reward[14]))
right_knee_delta = abs(right_knee_act - float(self.last_action_for_reward[20]))
knee_action_mag = 0.5 * (abs(left_knee_act) + abs(right_knee_act))
knee_action_delta = 0.5 * (left_knee_delta + right_knee_delta)
if self.step_counter > 10:
knee_explore_reward = min(
self.reward_knee_explore_cap,
self.reward_knee_explore_scale * knee_action_mag
+ self.reward_knee_explore_delta_scale * knee_action_delta,
)
else:
knee_explore_reward = 0.0
if curr_dist_to_target < 0.3:
arrival_bonus = self.target_distance_wp * 8 ## 奖励到达目标点
else:
arrival_bonus = 0.0
total = (
progress_reward
+ survival_reward
+ smoothness_penalty
+ idle_penalty
+ split_penalty
+ inward_penalty
+ ankle_roll_penalty
+ ankle_roll_cross_penalty
+ left_hip_yaw_penalty
+ right_hip_yaw_penalty
+ knee_explore_reward
+ arrival_bonus
)
now = time.time()
if self.reward_debug_interval_sec > 0 and now - self._reward_debug_last_time >= self.reward_debug_interval_sec:
@@ -523,6 +594,14 @@ class WalkEnv(gym.Env):
f"survival_reward:{survival_reward:.4f},"
f"smoothness_penalty:{smoothness_penalty:.4f},"
f"idle_penalty:{idle_penalty:.4f},"
f"split_penalty:{split_penalty:.4f},"
f"inward_penalty:{inward_penalty:.4f},"
f"ankle_roll_penalty:{ankle_roll_penalty:.4f},"
f"ankle_roll_cross_penalty:{ankle_roll_cross_penalty:.4f},"
f"left_hip_yaw_penalty:{left_hip_yaw_penalty:.4f},"
f"right_hip_yaw_penalty:{right_hip_yaw_penalty:.4f},"
f"knee_explore_reward:{knee_explore_reward:.4f},"
f"arrival_bonus:{arrival_bonus:.4f},"
f"total:{total:.4f}"
)
return total
@@ -535,16 +614,10 @@ class WalkEnv(gym.Env):
max_action_delta = 0.5# Limit how much the action can change from the previous step to encourage smoother motions.
if self.previous_action is not None:
action = np.clip(action, self.previous_action - max_action_delta, self.previous_action + max_action_delta)
action[0:2] = 0
action[3] = 4
action[7] = -4
action[2] = 0
action[6] = 0
action[4] = 0
action[5] = -5
action[8] = 0
action[9] = 5
action[10] = 0
# Loosen upper-body constraints: keep motion bounded but no longer hard-lock head/arms/waist.
action[0:2] = np.clip(action[0:2], -2.0, 2.0)
action[2:10] = np.clip(action[2:10], -7.0, 7.0)
action[10] = np.clip(action[10], -3.0, 3.0)
action[11] = np.clip(action[11], -6, 6)
action[17] = np.clip(action[17], -6, 6)
# action[11] = 1
@@ -615,7 +688,7 @@ class Train(Train_Base):
def train(self, args):
# --------------------------------------- Learning parameters
n_envs = 12
n_envs = 20
server_warmup_sec = 3.0
n_steps_per_env = 256 # RolloutBuffer is of size (n_steps_per_env * n_envs)
minibatch_size = 512 # should be a factor of (n_steps_per_env * n_envs)
@@ -629,7 +702,7 @@ class Train(Train_Base):
monitor_train_env = False
eval_freq_mult = 60
save_freq_mult = 60
eval_eps = 3
eval_eps = 7
folder_name = f'Walk_R{self.robot_type}'
model_path = f'./scripts/gyms/logs/{folder_name}/'