From 239fbf4087e6e08ec31958b44499b981ae783f28 Mon Sep 17 00:00:00 2001 From: xxh Date: Sat, 11 Apr 2026 04:15:26 -0400 Subject: [PATCH] fix knee action sign --- scripts/gyms/Walk.py | 67 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 55 insertions(+), 12 deletions(-) diff --git a/scripts/gyms/Walk.py b/scripts/gyms/Walk.py index 7759489..9bee12b 100755 --- a/scripts/gyms/Walk.py +++ b/scripts/gyms/Walk.py @@ -154,7 +154,7 @@ class WalkEnv(gym.Env): ] ) - self.scaling_factor = 0.3 + self.scaling_factor = 0.5 # self.scaling_factor = 1 # Encourage a minimum lateral stance so the policy avoids feet overlap. @@ -221,6 +221,9 @@ class WalkEnv(gym.Env): self.reward_knee_explore_scale = 0.10 self.reward_knee_explore_delta_scale = 0.12 self.reward_knee_explore_cap = 0.55 + self.reward_hip_pitch_explore_scale = 0.14 + self.reward_hip_pitch_explore_delta_scale = 0.10 + self.reward_hip_pitch_explore_cap = 0.45 self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS)) self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS)) @@ -512,6 +515,9 @@ class WalkEnv(gym.Env): left_ankle_roll = -float(joint_pos[16]) right_ankle_roll = float(joint_pos[22]) + left_knee_flex = abs(float(joint_pos[14])) + right_knee_flex = abs(float(joint_pos[20])) + avg_knee_flex = 0.5 * (left_knee_flex + right_knee_flex) max_leg_roll = 1 # 防止劈叉姿势 split_penalty = -0.6 * max(0.0, (left_hip_roll + right_hip_roll - 2 * max_leg_roll) / max_leg_roll) @@ -532,14 +538,14 @@ class WalkEnv(gym.Env): ankle_roll_cross_penalty = -0.2 * max(0.0, -(left_ankle_roll * right_ankle_roll)) # 分别惩罚左右大腿过度转动 - max_hip_yaw = 1 # 最大允许的yaw角度 + max_hip_yaw = 0.5 # 最大允许的yaw角度 left_hip_yaw_penalty = -0.25 * max(0.0, abs(left_hip_yaw) - max_hip_yaw) right_hip_yaw_penalty = -0.25 * max(0.0, abs(right_hip_yaw) - max_hip_yaw) # Forward-progress reward (distance delta) with anti-stuck shaping. progress_reward = 18.0 * dist_delta survival_reward = 0.03 - smoothness_penalty = -0.025 * float(np.linalg.norm(action - self.last_action_for_reward)) + smoothness_penalty = -0.03 * float(np.linalg.norm(action - self.last_action_for_reward)) step_displacement = float(np.linalg.norm(current_pos - previous_pos)) if self.step_counter > 30 and step_displacement < 0.008 and self.target_distance_wp > 0.3: idle_penalty = -0.20 @@ -547,8 +553,8 @@ class WalkEnv(gym.Env): idle_penalty = 0.0 # Encourage active/varied knee motions early in training without dominating progress reward. - left_knee_act = -float(action[14]) - right_knee_act = float(action[20]) + left_knee_act = float(action[14]) + right_knee_act = -float(action[20]) left_knee_delta = abs(left_knee_act - float(self.last_action_for_reward[14])) right_knee_delta = abs(right_knee_act - float(self.last_action_for_reward[20])) knee_action_mag = 0.5 * (abs(left_knee_act) + abs(right_knee_act)) @@ -562,6 +568,29 @@ class WalkEnv(gym.Env): else: knee_explore_reward = 0.0 + # Directly encourage observable knee flexion instead of only action exploration. + knee_lift_ratio = min(1.0, avg_knee_flex / max(1e-6, self.reward_knee_lift_target)) + knee_lift_reward = self.reward_knee_lift_scale * knee_lift_ratio + knee_lift_shortfall_penalty = -self.reward_knee_lift_shortfall_scale * max( + 0.0, self.reward_knee_lift_target - avg_knee_flex + ) + + # Encourage hip-pitch exploration to improve forward stride generation. + left_hip_pitch_act = float(action[11]) + right_hip_pitch_act = float(action[17]) + left_hip_pitch_delta = abs(left_hip_pitch_act - float(self.last_action_for_reward[11])) + right_hip_pitch_delta = abs(right_hip_pitch_act - float(self.last_action_for_reward[17])) + hip_pitch_action_mag = 0.5 * (abs(left_hip_pitch_act) + abs(right_hip_pitch_act)) + hip_pitch_action_delta = 0.5 * (left_hip_pitch_delta + right_hip_pitch_delta) + if self.step_counter > 10: + hip_pitch_explore_reward = min( + self.reward_hip_pitch_explore_cap, + self.reward_hip_pitch_explore_scale * hip_pitch_action_mag + + self.reward_hip_pitch_explore_delta_scale * hip_pitch_action_delta, + ) + else: + hip_pitch_explore_reward = 0.0 + if curr_dist_to_target < 0.3: arrival_bonus = self.target_distance_wp * 8 ## 奖励到达目标点 else: @@ -579,6 +608,9 @@ class WalkEnv(gym.Env): + left_hip_yaw_penalty + right_hip_yaw_penalty + knee_explore_reward + # + knee_lift_reward + # + knee_lift_shortfall_penalty + + hip_pitch_explore_reward + arrival_bonus ) @@ -600,7 +632,10 @@ class WalkEnv(gym.Env): f"ankle_roll_cross_penalty:{ankle_roll_cross_penalty:.4f}," f"left_hip_yaw_penalty:{left_hip_yaw_penalty:.4f}," f"right_hip_yaw_penalty:{right_hip_yaw_penalty:.4f}," - f"knee_explore_reward:{knee_explore_reward:.4f}," + # f"knee_explore_reward:{knee_explore_reward:.4f}," + # f"knee_lift_reward:{knee_lift_reward:.4f}," + f"knee_lift_shortfall_penalty:{knee_lift_shortfall_penalty:.4f}," + f"hip_pitch_explore_reward:{hip_pitch_explore_reward:.4f}," f"arrival_bonus:{arrival_bonus:.4f}," f"total:{total:.4f}" ) @@ -615,11 +650,19 @@ class WalkEnv(gym.Env): if self.previous_action is not None: action = np.clip(action, self.previous_action - max_action_delta, self.previous_action + max_action_delta) # Loosen upper-body constraints: keep motion bounded but no longer hard-lock head/arms/waist. - action[0:2] = np.clip(action[0:2], -2.0, 2.0) - action[2:10] = np.clip(action[2:10], -7.0, 7.0) - action[10] = np.clip(action[10], -3.0, 3.0) - action[11] = np.clip(action[11], -6, 6) - action[17] = np.clip(action[17], -6, 6) + action[0:2] = 0 + action[3] = np.clip(action[3], 3, 5) + action[7] = np.clip(action[7], -5, -3) + action[2] = np.clip(action[2], -6, 6) + action[6] = np.clip(action[6], -6, 6) + action[4] = 0 + action[5] = np.clip(action[5], -8, -2) + action[8] = 0 + action[9] = np.clip(action[9], 8, 2) + action[10] = np.clip(action[10], -0.6, 0.6) + # Boost knee command range so policy can produce visible knee flexion earlier. + # action[14] = np.clip(action[14] * 1.1, -10.0, 10.0) + # action[20] = np.clip(action[20] * 1.1, -10.0, 10.0) # action[11] = 1 # action[17] = 1 # action[12] = -0.01 @@ -694,7 +737,7 @@ class Train(Train_Base): minibatch_size = 512 # should be a factor of (n_steps_per_env * n_envs) total_steps = 30000000 learning_rate = 2e-4 - ent_coef = 0.08 + ent_coef = 0.035 clip_range = 0.2 gamma = 0.97 n_epochs = 3