update scripts and upload models for turn around gym

This commit is contained in:
xxh
2026-04-01 07:48:36 -04:00
parent 6ffc9452f9
commit 28e7eb0692
20 changed files with 12657 additions and 62 deletions

View File

@@ -158,11 +158,23 @@ class WalkEnv(gym.Env):
# Small reset perturbations for robustness training.
self.enable_reset_perturb = False
self.reset_beam_yaw_range_deg = 45 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
self.reset_beam_yaw_range_deg = float(os.environ.get("GYM_CPU_RESET_BEAM_YAW_RANGE_DEG", "180"))
self.reset_target_bearing_range_deg = float(os.environ.get("GYM_CPU_RESET_TARGET_BEARING_RANGE_DEG", "45"))
self.reset_target_distance_min = float(os.environ.get("GYM_CPU_RESET_TARGET_DISTANCE_MIN", "1.2"))
self.reset_target_distance_max = float(os.environ.get("GYM_CPU_RESET_TARGET_DISTANCE_MAX", "2.8"))
if self.reset_target_distance_min > self.reset_target_distance_max:
self.reset_target_distance_min, self.reset_target_distance_max = (
self.reset_target_distance_max,
self.reset_target_distance_min,
)
self.reset_joint_noise_rad = 0.025
self.reset_perturb_steps = 4
self.reset_recover_steps = 8
self.reward_smoothness_scale = float(os.environ.get("GYM_CPU_REWARD_SMOOTHNESS_SCALE", "0.06"))
self.reward_smoothness_cap = float(os.environ.get("GYM_CPU_REWARD_SMOOTHNESS_CAP", "0.45"))
self.reward_head_toward_bonus = float(os.environ.get("GYM_CPU_REWARD_HEAD_TOWARD_BONUS", "0.7"))
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
@@ -321,8 +333,8 @@ class WalkEnv(gym.Env):
if seed is not None:
np.random.seed(seed)
target_distance = np.random.uniform(1.2, 2.8)
target_bearing_deg = np.random.uniform(-180.0, 180.0)
target_distance = np.random.uniform(self.reset_target_distance_min, self.reset_target_distance_max)
target_bearing_deg = np.random.uniform(-self.reset_target_bearing_range_deg, self.reset_target_bearing_range_deg)
self.step_counter = 0
self.waypoint_index = 0
@@ -405,6 +417,7 @@ class WalkEnv(gym.Env):
return
def compute_reward(self, previous_pos, current_pos, action):
print(time.time(), self.step_counter)
height = float(self.Player.world.global_position[2])
robot = self.Player.robot
@@ -443,12 +456,31 @@ class WalkEnv(gym.Env):
# Keep reward simple: turn correctly, stay stable, avoid jerky actions.
delta_action_norm = float(np.linalg.norm(action - self.last_action_for_reward))
smoothness_penalty = -0.1 * delta_action_norm
# Cap smoothness penalty so it regularizes behavior without dominating total reward.
smoothness_penalty = -min(self.reward_smoothness_cap, self.reward_smoothness_scale * delta_action_norm)
posture_penalty = -0.45 * tilt_mag
# Penalize roll/pitch rotational shake but do not penalize yaw turning directly.
ang_vel_penalty = -0.04 * rp_ang_vel_mag
joint_pos = np.deg2rad(
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
) * self.train_sim_flip
left_hip_roll = float(joint_pos[12])
right_hip_roll = float(joint_pos[18])
left_ankle_roll = float(joint_pos[16])
right_ankle_roll = float(joint_pos[22])
hip_spread = left_hip_roll - right_hip_roll
ankle_spread = left_ankle_roll - right_ankle_roll
stance_metric = 0.6 * abs(hip_spread) + 0.4 * abs(ankle_spread)
# Penalize narrow stance (feet too close) and scissoring (cross-leg pattern).
stance_collapse_penalty = -4 * max(0.0, self.min_stance_rad - stance_metric)
cross_leg_penalty = -2.5 * max(0.0, -(hip_spread * ankle_spread))
# Torso-lower-body linkage: reward coordinated turning, punish waist-only spinning.
waist_speed = abs(float(joint_speed_rad[10]))
lower_body_speed = float(np.mean(np.abs(joint_speed_rad[11:23])))
@@ -475,31 +507,56 @@ class WalkEnv(gym.Env):
# Main heading objective: face the target direction.
# heading_align_reward = 1.0 * math.cos(yaw_error)
abs_yaw_error = abs(yaw_error)
# Reward reducing heading error between consecutive steps.
# if self.last_yaw_error is None:
# heading_progress_reward = 0.0
# else:
# heading_progress_reward = 0.7 * (abs(self.last_yaw_error) - abs(yaw_error))
# self.last_yaw_error = yaw_error
# Use a deadzone and smaller gain to avoid high-frequency jitter near alignment.
if self.last_yaw_error is None:
heading_progress_reward = 0.0
else:
prev_abs_yaw_error = abs(self.last_yaw_error)
yaw_err_delta = prev_abs_yaw_error - abs_yaw_error
progress_gate = 1.0 if abs_yaw_error > math.radians(4.0) else 0.0
heading_progress_reward = 0.70 * progress_gate * yaw_err_delta
heading_progress_reward = float(np.clip(heading_progress_reward, -0.70, 0.70))
self.last_yaw_error = yaw_error
yaw_rate = float(np.deg2rad(robot.gyroscope[2]))
yaw_rate_abs = abs(yaw_rate)
abs_yaw_error = abs(yaw_error)
turn_dir = float(np.sign(yaw_error))
# Continuous turn shaping prevents reward discontinuity near small heading error.
turn_gate = min(1.0, abs_yaw_error / math.radians(45.0))
turn_rate_reward = 0.45 * turn_gate * math.tanh(2.0 * turn_dir * yaw_rate)
head_toward_bonus = 1 if abs_yaw_error < math.radians(10.0) else 0
turn_rate_reward = 0.70 * turn_gate * math.tanh(2.0 * turn_dir * yaw_rate)
head_toward_bonus = self.reward_head_toward_bonus if abs_yaw_error < math.radians(8.0) else 0.0
# After roughly aligning with target, prioritize standing stability over continued aggressive turning.
aligned_gate = max(0.0, 1.0 - abs_yaw_error / math.radians(18.0))
post_turn_ang_vel_penalty = -0.10 * aligned_gate * min(rp_ang_vel_mag, math.radians(60.0))
lower_body_speed_mag = float(np.mean(np.abs(joint_speed_rad[11:23])))
post_turn_pose_bonus = 0.30 * aligned_gate * math.exp(-tilt_mag / 0.20) * math.exp(-lower_body_speed_mag / 1.10)
# Keep feet separation when aligned so robot does not collapse stance after turning.
aligned_stance_bonus = 0.20 * aligned_gate * min(1.0, stance_metric / max(self.min_stance_rad, 1e-4))
# Once roughly aligned, damp yaw oscillation and reward keeping a stable stance.
anti_oscillation_penalty = -0.22 * yaw_rate_abs if abs_yaw_error < math.radians(12.0) else 0.0
stabilize_bonus = 0.35 if (
anti_oscillation_penalty = -0.08 * min(yaw_rate_abs, math.radians(35.0)) if abs_yaw_error < math.radians(7.0) else 0.0
stabilize_bonus = 0.45 if (
abs_yaw_error < math.radians(8.0)
and yaw_rate_abs < math.radians(10.0)
and tilt_mag < 0.22
and tilt_mag < 0.28
) else 0.0
alive_bonus = max(0.5, 1.5 * math.cos(yaw_error)) # Encourage facing target, but give some baseline reward for not falling even if not facing target yet.
# 改进线性分段sigmoid 过渡)
if abs_yaw_error < math.radians(15.0):
alive_bonus = 2 * (1.0 - abs_yaw_error / math.radians(15.0)) ** 0.5 # 平方根让小角度更敏感
else:
alive_bonus = max(0.1, 2 * (1.0 - (abs_yaw_error - math.radians(15.0)) / math.radians(75.0)))
target_height = self.initial_height
height_error = height - target_height
# 改进(分段,偏离越多惩罚越重)
height_error = height - target_height
if abs(height_error) < 0.04:
height_penalty = -2.5 * abs(height_error) # 小偏离,保持线性
else:
height_penalty = -2.5 * 0.04 - 4.0 * (abs(height_error) - 0.04) # 大偏离,惩罚加速
total = (
alive_bonus
@@ -510,11 +567,17 @@ class WalkEnv(gym.Env):
+ waist_only_turn_penalty
+ yaw_link_reward
+ head_toward_bonus
+ heading_progress_reward
+ anti_oscillation_penalty
+ stabilize_bonus
+ height_penalty
# + post_turn_ang_vel_penalty
# + post_turn_pose_bonus
# + aligned_stance_bonus
# + heading_align_reward
# + heading_progress_reward
+ turn_rate_reward
# + stance_collapse_penalty
# + cross_leg_penalty
)
now = time.time()
@@ -524,23 +587,48 @@ class WalkEnv(gym.Env):
if self._reward_debug_steps_left > 0:
self._reward_debug_steps_left -= 1
print(
# print(
# f"reward_debug: step={self.step_counter}, "
# f"alive_bonus:{alive_bonus:.4f}, "
# # f"heading_align_reward:{heading_align_reward:.4f}, "
# # f"heading_progress_reward:{heading_progress_reward:.4f}, "
# f"head_towards_bonus:{head_toward_bonus},"
# f"posture_penalty:{posture_penalty:.4f}, "
# f"ang_vel_penalty:{ang_vel_penalty:.4f}, "
# f"smoothness_penalty:{smoothness_penalty:.4f}, "
# f"linkage_reward:{linkage_reward:.4f}, "
# f"waist_only_turn_penalty:{waist_only_turn_penalty:.4f}, "
# f"yaw_link_reward:{yaw_link_reward:.4f}, "
# f"anti_oscillation_penalty:{anti_oscillation_penalty:.4f}, "
# f"stabilize_bonus:{stabilize_bonus:.4f}, "
# f"turn_rate_reward:{turn_rate_reward:.4f}, "
# f"total:{total:.4f}"
# )
self.debug_log(
f"reward_debug: step={self.step_counter}, "
f"alive_bonus:{alive_bonus:.4f}, "
# f"heading_align_reward:{heading_align_reward:.4f}, "
# f"heading_progress_reward:{heading_progress_reward:.4f}, "
f"heading_progress_reward:{heading_progress_reward:.4f}, "
f"head_towards_bonus:{head_toward_bonus},"
f"posture_penalty:{posture_penalty:.4f}, "
f"ang_vel_penalty:{ang_vel_penalty:.4f}, "
f"smoothness_penalty:{smoothness_penalty:.4f}, "
f"heading_progress_reward:{heading_progress_reward:.4f}, "
f"linkage_reward:{linkage_reward:.4f}, "
f"waist_only_turn_penalty:{waist_only_turn_penalty:.4f}, "
f"yaw_link_reward:{yaw_link_reward:.4f}, "
f"anti_oscillation_penalty:{anti_oscillation_penalty:.4f}, "
f"stabilize_bonus:{stabilize_bonus:.4f}, "
f"turn_rate_reward:{turn_rate_reward:.4f}, "
f"height_penalty:{height_penalty:.4f}, "
# f"post_turn_ang_vel_penalty:{post_turn_ang_vel_penalty:.4f}, "
# f"post_turn_pose_bonus:{post_turn_pose_bonus:.4f}, "
f"aligned_stance_bonus:{aligned_stance_bonus:.4f}, "
# f"turn_rate_reward:{turn_rate_reward:.4f}, "
f"stance_collapse_penalty:{stance_collapse_penalty:.4f}, "
f"cross_leg_penalty:{cross_leg_penalty:.4f}, "
f"total:{total:.4f}"
)
)
return total
@@ -549,7 +637,10 @@ class WalkEnv(gym.Env):
def step(self, action):
r = self.Player.robot
self.previous_action = action
max_action_delta = 0.1# Limit how much the action can change from the previous step to encourage smoother motions.
if self.previous_action is not None:
action = np.clip(action, self.previous_action - max_action_delta, self.previous_action + max_action_delta)
self.previous_action = action.copy()
self.target_joint_positions = (
# self.joint_nominal_position +
@@ -559,10 +650,10 @@ class WalkEnv(gym.Env):
for idx, target in enumerate(self.target_joint_positions):
r.set_motor_target_position(
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=40, kd=1.2
)
self.previous_action = action
self.previous_action = action.copy()
self.sync() # run simulation step
self.step_counter += 1
@@ -664,11 +755,12 @@ class Train(Train_Base):
gamma=float(os.environ.get("GYM_CPU_TRAIN_GAMMA", "0.95")), # Discount factor
# target_kl=0.03,
n_epochs=int(os.environ.get("GYM_CPU_TRAIN_EPOCHS", "5")),
tensorboard_log=f"./scripts/gyms/logs/{folder_name}/tensorboard/"
tensorboard_log=f"./scripts/gyms/logs/{folder_name}/tensorboard/",
max_grad_norm=float(os.environ.get("GYM_CPU_TRAIN_MAX_GRAD_NORM", "0.5"))
)
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 20, eval_eps=30,
eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 20, eval_eps=5,
backup_env_file=__file__)
except KeyboardInterrupt:
sleep(1) # wait for child processes