Compare commits

7 Commits

9 changed files with 1490 additions and 304 deletions

1
.gitignore vendored
View File

@@ -20,3 +20,4 @@ best_model.zip
*.yaml
*.iml
*.TXT
events.out.tfevents.*

View File

@@ -1,14 +0,0 @@
训练(默认)
bash train.sh
测试(实时+显示画面)
GYM_CPU_MODE=test GYM_CPU_TEST_MODEL=scripts/gyms/logs/Walk_R0_005/best_model.zip GYM_CPU_TEST_FOLDER=scripts/gyms/logs/Walk_R0_005/ GYM_CPU_TEST_NO_RENDER=0 GYM_CPU_TEST_NO_REALTIME=0 bash train.sh
测试(无画面、非实时)
GYM_CPU_MODE=test GYM_CPU_TEST_NO_RENDER=1 GYM_CPU_TEST_NO_REALTIME=1 bash train.sh
retrain继续训练
GYM_CPU_MODE=train GYM_CPU_TRAIN_MODEL=scripts/gyms/logs/Walk_R0_005/best_model.zip bash train.sh
retrain+改训练超参
GYM_CPU_MODE=train GYM_CPU_TRAIN_MODEL=scripts/gyms/logs/Walk_R0_004/best_model.zip GYM_CPU_TRAIN_LR=2e-4 GYM_CPU_TRAIN_CLIP_RANGE=0.13 GYM_CPU_TRAIN_BATCH_SIZE=256 YM_CPU_TRAIN_GAMMA=0.95 GYM_CPU_TRAIN_ENT_COEF=0.05 GYM_CPU_TRAIN_EPOCHS=8 bash train.sh

View File

@@ -1,4 +1,5 @@
import logging
import os
import socket
import time
from select import select
@@ -15,6 +16,11 @@ class Server:
self.__socket: socket.socket = self._create_socket()
self.__send_buff = []
self.__rcv_buffer_size = 1024
self.__rcv_buffer_default_size = 1024
self.__max_msg_size = 1048576
self.__shrink_threshold = 8192
self.__shrink_after_msgs = 200
self.__small_msg_streak = 0
self.__rcv_buffer = bytearray(self.__rcv_buffer_size)
def _create_socket(self) -> socket.socket:
@@ -105,6 +111,10 @@ class Server:
msg_size = int.from_bytes(self.__rcv_buffer[:4], byteorder="big", signed=False)
# Guard against corrupted frame lengths that would trigger huge allocations.
if msg_size <= 0 or msg_size > self.__max_msg_size:
raise ConnectionResetError
if msg_size > self.__rcv_buffer_size:
self.__rcv_buffer_size = msg_size
self.__rcv_buffer = bytearray(self.__rcv_buffer_size)
@@ -120,6 +130,15 @@ class Server:
message=self.__rcv_buffer[:msg_size].decode()
)
if msg_size <= self.__shrink_threshold and self.__rcv_buffer_size > self.__rcv_buffer_default_size:
self.__small_msg_streak += 1
if self.__small_msg_streak >= self.__shrink_after_msgs:
self.__rcv_buffer_size = self.__rcv_buffer_default_size
self.__rcv_buffer = bytearray(self.__rcv_buffer_size)
self.__small_msg_streak = 0
else:
self.__small_msg_streak = 0
# 如果socket没有更多数据就退出
if len(select([self.__socket], [], [], 0.0)[0]) == 0:
break

View File

@@ -1,9 +1,14 @@
import subprocess
import os
import time
import threading
class Server():
WATCHDOG_ENABLED = True
WATCHDOG_INTERVAL_SEC = 30.0
WATCHDOG_RSS_MB_LIMIT = 800
def __init__(self, first_server_p, first_monitor_p, n_servers, no_render=True, no_realtime=True) -> None:
try:
import psutil
@@ -14,6 +19,10 @@ class Server():
self.first_server_p = first_server_p
self.n_servers = n_servers
self.rcss_processes = []
self._server_specs = []
self._watchdog_stop = threading.Event()
self._watchdog_lock = threading.Lock()
self._watchdog_thread = None
first_monitor_p = first_monitor_p + 100
# makes it easier to kill test servers without affecting train servers
@@ -23,38 +32,106 @@ class Server():
for i in range(n_servers):
port = first_server_p + i
mport = first_monitor_p + i
self._server_specs.append((port, mport, cmd, render_arg, realtime_arg))
proc = self._spawn_server(port, mport, cmd, render_arg, realtime_arg)
self.rcss_processes.append(proc)
server_cmd = f"{cmd} -c {port} -m {mport} {render_arg} {realtime_arg}".strip()
if self.WATCHDOG_ENABLED:
self._watchdog_thread = threading.Thread(target=self._watchdog_loop, daemon=True)
self._watchdog_thread.start()
proc = subprocess.Popen(
server_cmd.split(),
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
start_new_session=True
def _spawn_server(self, port, mport, cmd, render_arg, realtime_arg):
server_cmd = f"{cmd} -c {port} -m {mport} {render_arg} {realtime_arg}".strip()
proc = subprocess.Popen(
server_cmd.split(),
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
start_new_session=True
)
# Avoid startup storm when launching many servers at once.
time.sleep(0.03)
rc = proc.poll()
if rc is not None:
raise RuntimeError(
f"rcssservermj exited early (code={rc}) on server port {port}, monitor port {mport}"
)
# Avoid startup storm when launching many servers at once.
time.sleep(0.03)
return proc
rc = proc.poll()
if rc is not None:
raise RuntimeError(
f"rcssservermj exited early (code={rc}) on server port {port}, monitor port {mport}"
)
@staticmethod
def _pid_rss_mb(pid):
try:
with open(f"/proc/{pid}/status", "r", encoding="utf-8") as f:
for line in f:
if line.startswith("VmRSS:"):
parts = line.split()
if len(parts) >= 2:
# VmRSS is kB
return float(parts[1]) / 1024.0
except (FileNotFoundError, ProcessLookupError, PermissionError, OSError):
return 0.0
return 0.0
self.rcss_processes.append(proc)
def _restart_server_at_index(self, idx, reason):
port, mport, cmd, render_arg, realtime_arg = self._server_specs[idx]
old_proc = self.rcss_processes[idx]
try:
old_proc.terminate()
old_proc.wait(timeout=1.0)
except Exception:
try:
old_proc.kill()
except Exception:
pass
new_proc = self._spawn_server(port, mport, cmd, render_arg, realtime_arg)
self.rcss_processes[idx] = new_proc
print(
f"[ServerWatchdog] Restarted server idx={idx} port={port} monitor={mport} reason={reason}"
)
def _watchdog_loop(self):
while not self._watchdog_stop.wait(self.WATCHDOG_INTERVAL_SEC):
with self._watchdog_lock:
for i, proc in enumerate(self.rcss_processes):
rc = proc.poll()
if rc is not None:
self._restart_server_at_index(i, f"exited:{rc}")
continue
rss_mb = self._pid_rss_mb(proc.pid)
if rss_mb > self.WATCHDOG_RSS_MB_LIMIT:
self._restart_server_at_index(i, f"rss_mb:{rss_mb:.1f}")
def check_running_servers(self, psutil, first_server_p, first_monitor_p, n_servers):
''' Check if any server is running on chosen ports '''
found = False
p_list = [p for p in psutil.process_iter() if p.cmdline() and "rcssservermj" in " ".join(p.cmdline())]
range1 = (first_server_p, first_server_p + n_servers)
range2 = (first_monitor_p, first_monitor_p + n_servers)
bad_processes = []
def safe_cmdline(proc):
try:
return proc.cmdline()
except (psutil.ZombieProcess, psutil.NoSuchProcess, psutil.AccessDenied, OSError):
return []
p_list = []
for p in psutil.process_iter():
cmdline = safe_cmdline(p)
if cmdline and "rcssservermj" in " ".join(cmdline):
p_list.append(p)
for p in p_list:
# currently ignoring remaining default port when only one of the ports is specified (uncommon scenario)
ports = [int(arg) for arg in p.cmdline()[1:] if arg.isdigit()]
cmdline = safe_cmdline(p)
if not cmdline:
continue
ports = [int(arg) for arg in cmdline[1:] if arg.isdigit()]
if len(ports) == 0:
ports = [60000, 60100] # default server ports (changing this is unlikely)
@@ -66,7 +143,7 @@ class Server():
print("\nThere are already servers running on the same port(s)!")
found = True
bad_processes.append(p)
print(f"Port(s) {','.join(conflicts)} already in use by \"{' '.join(p.cmdline())}\" (PID:{p.pid})")
print(f"Port(s) {','.join(conflicts)} already in use by \"{' '.join(cmdline)}\" (PID:{p.pid})")
if found:
print()
@@ -78,6 +155,9 @@ class Server():
return
def kill(self):
self._watchdog_stop.set()
if self._watchdog_thread is not None:
self._watchdog_thread.join(timeout=1.0)
for p in self.rcss_processes:
p.kill()
print(f"Killed {self.n_servers} rcssservermj processes starting at {self.first_server_p}")

View File

@@ -7,7 +7,7 @@ from random import random
from random import uniform
from itertools import count
from stable_baselines3 import PPO
from stable_baselines3 import PPO, TD3, DDPG, SAC, A2C
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
@@ -53,8 +53,8 @@ class WalkEnv(gym.Env):
self.route_completed = False
self.debug_every_n_steps = 5
self.enable_debug_joint_status = False
self.reward_debug_interval_sec = float(os.environ.get("GYM_CPU_REWARD_DEBUG_INTERVAL_SEC", "600"))
self.reward_debug_burst_steps = int(os.environ.get("GYM_CPU_REWARD_DEBUG_BURST_STEPS", "10"))
self.reward_debug_interval_sec = 600.0
self.reward_debug_burst_steps = 10
self._reward_debug_last_time = time.time()
self._reward_debug_steps_left = 0
self.calibrate_nominal_from_neutral = True
@@ -64,6 +64,11 @@ class WalkEnv(gym.Env):
self._target_hz = 0.0
self._target_dt = 0.0
self._last_sync_time = None
self._speed_estimate = 0.0
self._speed_from_acc = 0.0
self._prev_accelerometer = np.zeros(3, dtype=np.float32)
self._speed_smoothing = 0.85
self._fallback_dt = 0.02
target_hz_env = 0
if target_hz_env:
try:
@@ -121,7 +126,7 @@ class WalkEnv(gym.Env):
0.0, # 22: Right_Ankle_Roll (rle6)
]
)
self.joint_nominal_position = np.zeros(self.no_of_actions)
# self.joint_nominal_position = np.zeros(self.no_of_actions)
self.train_sim_flip = np.array(
[
1.0, # 0: Head_yaw (he1)
@@ -150,7 +155,7 @@ class WalkEnv(gym.Env):
]
)
self.scaling_factor = 0.3
self.scaling_factor = 0.5
# self.scaling_factor = 1
# Encourage a minimum lateral stance so the policy avoids feet overlap.
@@ -158,10 +163,10 @@ class WalkEnv(gym.Env):
# Small reset perturbations for robustness training.
self.enable_reset_perturb = False
self.reset_beam_yaw_range_deg = float(os.environ.get("GYM_CPU_RESET_BEAM_YAW_RANGE_DEG", "180"))
self.reset_target_bearing_range_deg = float(os.environ.get("GYM_CPU_RESET_TARGET_BEARING_RANGE_DEG", "120"))
self.reset_target_distance_min = float(os.environ.get("GYM_CPU_RESET_TARGET_DISTANCE_MIN", "1.2"))
self.reset_target_distance_max = float(os.environ.get("GYM_CPU_RESET_TARGET_DISTANCE_MAX", "2.8"))
self.reset_beam_yaw_range_deg = 180.0
self.reset_target_bearing_range_deg = 0.0
self.reset_target_distance_min = 5
self.reset_target_distance_max = 10
if self.reset_target_distance_min > self.reset_target_distance_max:
self.reset_target_distance_min, self.reset_target_distance_max = (
self.reset_target_distance_max,
@@ -171,14 +176,87 @@ class WalkEnv(gym.Env):
self.reset_perturb_steps = 4
self.reset_recover_steps = 8
self.reward_smoothness_scale = float(os.environ.get("GYM_CPU_REWARD_SMOOTHNESS_SCALE", "0.06"))
self.reward_smoothness_cap = float(os.environ.get("GYM_CPU_REWARD_SMOOTHNESS_CAP", "0.45"))
self.reward_head_toward_bonus = float(os.environ.get("GYM_CPU_REWARD_HEAD_TOWARD_BONUS", "1"))
self.reward_smoothness_scale = 0.03
self.reward_smoothness_cap = 0.45
self.reward_forward_stability_gate = 0.35
self.reward_forward_tilt_hard_threshold = 0.50
self.reward_forward_tilt_hard_scale = 0.20
self.reward_head_toward_bonus = 1.0
self.turn_stationary_radius = 0.2
self.turn_stationary_penalty_scale = 3.0
self.stationary_start_steps = 20
self.stationary_step_eps = 0.015
self.stationary_penalty_scale = 1.2
self.train_stage = "walk"
self.in_place_radius = 0.18
self.in_place_center_reward_scale = 0.60
self.in_place_drift_penalty_scale = 1.20
self.waypoint_reach_distance = 0.3
self.num_waypoints = 1
self.exploration_start_steps = 40
self.exploration_scale = 0.012
self.exploration_cap = 0.2
self.exploration_target_novelty = 1.0
self.exploration_sigma = 0.7
self.reward_stride_swing_scale = 0.20
self.reward_stride_phase_scale = 0.18
self.reward_knee_drive_scale = 0.10
self.reward_knee_lift_scale = 0.12
self.reward_knee_lift_target = 0.15
self.reward_knee_lift_shortfall_scale = 0.05
self.reward_knee_overbend_threshold = 0.60
self.reward_knee_overbend_scale = 0.35
self.reward_hip_lift_scale = 0.12
self.reward_hip_lift_target = 0.80
self.reward_knee_alternate_scale = 0.10
self.reward_knee_bilateral_scale = 0.16
self.reward_single_leg_penalty_scale = 0.22
self.reward_knee_phase_switch_scale = 0.14
self.knee_phase_deadband = 0.10
self.knee_phase_min_interval = 18
self.knee_phase_target_interval = 22
self.knee_phase_fast_switch_penalty_scale = 0.10
self.knee_phase_max_hold_frames = 28
self.knee_phase_hold_penalty_scale = 0.18
self.reward_stride_cap = 0.80
self.reward_knee_explore_scale = 0.03
self.reward_knee_explore_delta_scale = 0.03
self.reward_knee_explore_cap = 0.10
self.reward_hip_pitch_explore_scale = 0.07
self.reward_hip_pitch_explore_delta_scale = 0.07
self.reward_hip_pitch_explore_cap = 0.10
self.reward_progress_scale = 18
self.reward_survival_scale = 0.5
self.reward_idle_penalty_scale = 0.6
self.reward_accel_penalty_scale = 0.08
self.reward_accel_penalty_cap = 0.40
self.reward_accel_abs_limit = 13.5
self.reward_accel_abs_penalty_scale = 0.05
self.reward_accel_abs_penalty_cap = 0.40
self.reward_heading_align_scale = 0.28
self.reward_heading_error_scale = 0.05
self.reward_progress_tilt_gate_start = 0.28
self.reward_progress_knee_gate_min = 0.16
self.reward_progress_hip_gate_over = 0.18
self.reward_progress_gate_floor = 0.25
self.reward_knee_straight_threshold = 0.18
self.reward_knee_straight_penalty_scale = 0.45
self.reward_hip_overextend_threshold = 0.9
self.reward_hip_overextend_penalty_scale = 1
self.reward_leg_stretch_penalty_scale = 0.35
self.reward_stretch_lean_combo_scale = 0.55
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.action_history_len = 50
self.prev_action_history = np.zeros((self.action_history_len, self.no_of_actions), dtype=np.float32)
self.history_idx = 0
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
self.last_yaw_error = None
self.prev_knee_balance = 0.0
self.prev_knee_phase_sign = 0
self.knee_phase_frames_since_switch = 0
self.knee_phase_hold_frames = 0
self.Player.server.connect()
# sleep(2.0) # Longer wait for connection to establish completely
self.Player.server.send_immediate(
@@ -341,10 +419,22 @@ class WalkEnv(gym.Env):
self.route_completed = False
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.prev_action_history.fill(0.0)
self.history_idx = 0
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
self.last_yaw_error = None
self.prev_knee_balance = 0.0
self.prev_knee_phase_sign = 0
self.knee_phase_frames_since_switch = 0
self.knee_phase_hold_frames = 0
self.walk_cycle_step = 0
self._reward_debug_steps_left = 0
self._speed_estimate = 0.0
self._speed_from_acc = 0.0
self._prev_accelerometer = np.array(
getattr(self.Player.robot, "accelerometer", np.zeros(3)),
dtype=np.float32,
)
# 随机 beam 目标位置和朝向,增加训练多样性
beam_x = (random() - 0.5) * 10
@@ -399,16 +489,29 @@ class WalkEnv(gym.Env):
self.initial_position = np.array(self.Player.world.global_position[:2])
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
self.act = np.zeros(self.no_of_actions, np.float32)
# Randomize global target bearing so policy must learn to rotate toward it first.
# Generate multiple waypoints along a path
heading_deg = float(r.global_orientation_euler[2])
target_offset = MathOps.rotate_2d_vec(
np.array([target_distance, 0.0]),
heading_deg + target_bearing_deg,
is_rad=False,
)
point1 = self.initial_position + target_offset
self.point_list = [point1]
self.point_list = []
current_point = self.initial_position.copy()
for i in range(self.num_waypoints):
# Each waypoint is placed further along the path
target_distance_wp = np.random.uniform(self.reset_target_distance_min, self.reset_target_distance_max)
self.target_distance_wp = target_distance_wp
target_bearing_deg_wp = np.random.uniform(-self.reset_target_bearing_range_deg, self.reset_target_bearing_range_deg)
target_offset = MathOps.rotate_2d_vec(
np.array([target_distance_wp, 0.0]),
heading_deg + target_bearing_deg_wp,
is_rad=False,
)
next_point = current_point + target_offset
self.point_list.append(next_point)
current_point = next_point.copy()
self.target_position = self.point_list[self.waypoint_index]
if self.train_stage == "in_place":
self.target_position = self.initial_position.copy()
self.initial_height = self.Player.world.global_position[2]
return self.observe(True), {}
@@ -421,182 +524,205 @@ class WalkEnv(gym.Env):
height = float(self.Player.world.global_position[2])
robot = self.Player.robot
prev_dist_to_target = float(np.linalg.norm(self.target_position - previous_pos))
curr_dist_to_target = float(np.linalg.norm(self.target_position - current_pos))
dist_delta = prev_dist_to_target - curr_dist_to_target
joint_pos_rad = np.deg2rad(
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
)
joint_speed_rad = np.deg2rad(
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
)
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
ang_vel = np.deg2rad(robot.gyroscope)
rp_ang_vel_mag = float(np.linalg.norm(ang_vel[:2]))
is_fallen = height < 0.55
is_fallen = height < 0.45
if is_fallen:
# remain = max(0, 800 - self.step_counter)
# return -8.0 - 0.01 * remain
return -20.0
if np.linalg.norm(current_pos - previous_pos) > 0.005:
position_penalty = -3 * float(np.linalg.norm(current_pos - previous_pos))
else:
position_penalty = 0.0
# Turn-to-target shaping.
to_target = self.target_position - current_pos
dist_to_target = float(np.linalg.norm(to_target))
if dist_to_target > 1e-6:
target_yaw = math.atan2(float(to_target[1]), float(to_target[0]))
else:
target_yaw = 0.0
robot_yaw = math.radians(float(robot.global_orientation_euler[2]))
yaw_error = target_yaw - robot_yaw
# Main heading objective: face the target direction.
# heading_align_reward = 1.0 * math.cos(yaw_error)
abs_yaw_error = abs(yaw_error)
alive_bonus = 2.0 * max(0.0, 1.0 - abs_yaw_error / math.pi)
head_toward_bonus = self.reward_head_toward_bonus if abs_yaw_error < math.radians(4.0) else 0.0
if self.last_yaw_error is None:
heading_progress_reward = 0.0
else:
prev_abs_yaw_error = abs(self.last_yaw_error)
yaw_err_delta = prev_abs_yaw_error - abs_yaw_error
progress_gate = 1.0 if abs_yaw_error > math.radians(4.0) else 0.0
heading_progress_reward = progress_gate * yaw_err_delta
heading_progress_reward = float(np.clip(heading_progress_reward, -1, 1))
self.last_yaw_error = yaw_error
# action_penalty = -0.01 * float(np.linalg.norm(action))
smoothness_penalty = -0.05 * float(np.linalg.norm(action - self.last_action_for_reward))
posture_penalty = -0.6 * tilt_mag
# Penalize roll/pitch rotational shake but do not penalize yaw turning directly.
ang_vel_penalty = -0.06 * rp_ang_vel_mag
return -2.0
joint_pos = np.deg2rad(
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
) * self.train_sim_flip
left_hip_roll = float(joint_pos[12])
left_hip_roll = -float(joint_pos[12])
right_hip_roll = float(joint_pos[18])
left_hip_pitch = float(joint_pos[11])
right_hip_pitch = float(joint_pos[17])
left_ankle_roll = float(joint_pos[16])
left_ankle_roll = -float(joint_pos[16])
right_ankle_roll = float(joint_pos[22])
left_knee_flex = abs(float(joint_pos[14]))
right_knee_flex = abs(float(joint_pos[20]))
avg_knee_flex = 0.5 * (left_knee_flex + right_knee_flex)
max_leg_roll = 0.2 # 防止劈叉姿势
split_penalty = -0.8 * max(0.0, (-left_hip_roll + right_hip_roll - 2 * max_leg_roll) / max_leg_roll)
left_hip_yaw = float(joint_pos[13])
max_leg_roll = 0.5 # 防止劈叉姿势
split_penalty = -0.12 * max(0.0, (left_hip_roll + right_hip_roll - 2 * max_leg_roll) / max_leg_roll)
left_hip_yaw = -float(joint_pos[13])
right_hip_yaw = float(joint_pos[19])
min_leg_separation = 0.05 # 最小腿间距(防止贴得太近)
# 惩罚腿过分靠拢(内收)- 基于两腿间距
leg_separation = -left_hip_roll + right_hip_roll
inward_penalty = -0.25 * max(0.0, (min_leg_separation - leg_separation) / min_leg_separation)
min_leg_separation = 0.04 # 最小腿间距(防止贴得太近)
inward_penalty = 0.3 * min(0.0, (left_hip_roll-min_leg_separation)) + 0.3 * min(0.0, (right_hip_roll-min_leg_separation)) # 惩罚左右腿过度内扣
# 脚踝roll角度检测防止过度外翻或内翻
max_ankle_roll = 0.15 # 最大允许的脚踝roll角度
# 惩罚脚踝过度外翻/内翻(绝对值过大)
ankle_roll_penalty = -0.5 * max(0.0, (abs(left_ankle_roll) + abs(right_ankle_roll) - 2 * max_ankle_roll) / max_ankle_roll)
ankle_roll_penalty = -0.12 * max(0.0, (abs(left_ankle_roll) + abs(right_ankle_roll) - 2 * max_ankle_roll) / max_ankle_roll)
# 惩罚两脚踝roll方向相反不稳定姿势
ankle_roll_cross_penalty = -0.3 * max(0.0, -(left_ankle_roll * right_ankle_roll))
ankle_roll_cross_penalty = -0.12 * max(0.0, -(left_ankle_roll * right_ankle_roll))
# 分别惩罚左右大腿过度转动
max_hip_yaw = 0.5 # 最大允许的yaw角度
left_hip_yaw_penalty = -0.4 * max(0.0, abs(left_hip_yaw) - max_hip_yaw)
right_hip_yaw_penalty = -0.4 * max(0.0, abs(right_hip_yaw) - max_hip_yaw)
# 智能交叉腿惩罚:只在站立时惩罚,转身时允许交叉腿
yaw_rate = float(np.deg2rad(robot.gyroscope[2]))
yaw_rate_abs = abs(yaw_rate)
max_hip_yaw = 0.2 # 最大允许的yaw角度
left_hip_yaw_penalty = -0.6 * max(0.0, abs(left_hip_yaw) - max_hip_yaw)
right_hip_yaw_penalty = -0.6 * max(0.0, abs(right_hip_yaw) - max_hip_yaw)
# 当转身速度较小时才惩罚交叉腿(站立状态)
cross_leg_gate = max(0.0, 1.0 - yaw_rate_abs / math.radians(8.0))
hip_yaw_cross_penalty = -1.0 * cross_leg_gate * max(0.0, -(left_hip_yaw * right_hip_yaw)) if left_hip_yaw > 0 and right_hip_yaw < 0 else 0.0
target_vec = self.target_position - current_pos
target_dist = float(np.linalg.norm(target_vec))
if target_dist > 1e-6:
target_heading = math.atan2(float(target_vec[1]), float(target_vec[0]))
robot_heading = math.radians(float(robot.global_orientation_euler[2]))
heading_error = self._wrap_to_pi(target_heading - robot_heading)
heading_align_reward = self.reward_heading_align_scale * math.cos(heading_error)
heading_error_penalty = -self.reward_heading_error_scale * abs(heading_error)
else:
heading_align_reward = 0.0
heading_error_penalty = 0.0
# Torso-lower-body linkage: reward coordinated turning, punish waist-only spinning.
waist_speed = abs(float(joint_speed_rad[10]))
lower_body_speed = float(np.mean(np.abs(joint_speed_rad[11:23])))
lower_body_follow_ratio = lower_body_speed / (waist_speed + 1e-4)
linkage_reward = 0.24 * min(1.0, lower_body_follow_ratio) * min(1.0, waist_speed / 1.2)
waist_only_turn_penalty = -0.20 * max(0.0, waist_speed - 1.35 * lower_body_speed)
# Forward-progress reward (distance delta) with anti-stuck shaping.
progress_reward_raw = self.reward_progress_scale * dist_delta
survival_reward = self.reward_survival_scale
smoothness_penalty = -self.reward_smoothness_scale * float(np.linalg.norm(action - self.last_action_for_reward))
step_displacement = float(np.linalg.norm(current_pos - previous_pos))
accel_signal = 0.0
accel_source = "imu_delta"
accel_now = np.array(getattr(robot, "accelerometer", np.zeros(3)), dtype=np.float32)
if accel_now.shape[0] >= 3:
# Use IMU acceleration delta to reduce gravity bias and punish abrupt bursts.
accel_signal = float(np.linalg.norm(accel_now[:3] - self._prev_accelerometer[:3]))
self._prev_accelerometer = accel_now
accel_penalty = -min(
self.reward_accel_penalty_cap,
self.reward_accel_penalty_scale * accel_signal,
)
accel_abs = float(np.linalg.norm(accel_now[:3])) if accel_now.shape[0] >= 3 else 0.0
accel_abs_over = max(0.0, accel_abs - self.reward_accel_abs_limit)
accel_abs_penalty = -min(
self.reward_accel_abs_penalty_cap,
self.reward_accel_abs_penalty_scale * accel_abs_over,
)
if self.step_counter > 30 and step_displacement < 0.015 and self.target_distance_wp > 0.3:
idle_penalty = -self.reward_idle_penalty_scale
else:
idle_penalty = 0.0
# Extra posture linkage in yaw joints to avoid decoupled torso twist.
waist_yaw = abs(float(joint_pos_rad[10]))
hip_yaw_mean = 0.5 * (abs(float(joint_pos_rad[13])) + abs(float(joint_pos_rad[19])))
yaw_link_reward = 0.12 * math.exp(-abs(waist_yaw - hip_yaw_mean) / 0.22)
if self.step_counter > self.exploration_start_steps:
displacement_novelty = step_displacement / max(1e-6, self.stationary_step_eps)
exploration_bonus = min(
self.exploration_cap,
self.exploration_scale * max(0.0, displacement_novelty - self.exploration_target_novelty),
)
else:
exploration_bonus = 0.0
# Encourage active/varied knee motions early in training without dominating progress reward.
left_knee_act = float(action[14])
right_knee_act = float(action[20])
left_knee_delta = abs(left_knee_act - float(self.last_action_for_reward[14]))
right_knee_delta = abs(right_knee_act - float(self.last_action_for_reward[20]))
knee_action_mag = 0.5 * (abs(left_knee_act) + abs(right_knee_act))
knee_action_delta = 0.5 * (left_knee_delta + right_knee_delta)
if self.step_counter > 10:
knee_explore_reward = min(
self.reward_knee_explore_cap,
self.reward_knee_explore_scale * knee_action_mag
+ self.reward_knee_explore_delta_scale * knee_action_delta,
)
else:
knee_explore_reward = 0.0
# Directly encourage observable knee flexion instead of only action exploration.
knee_lift_shortfall_penalty = -self.reward_knee_lift_shortfall_scale * max(
0.0, self.reward_knee_lift_target - avg_knee_flex
)
# Encourage hip-pitch exploration to improve forward stride generation.
left_hip_pitch_act = float(action[11])
right_hip_pitch_act = float(action[17])
left_hip_pitch_delta = abs(left_hip_pitch_act - float(self.last_action_for_reward[11]))
right_hip_pitch_delta = abs(right_hip_pitch_act - float(self.last_action_for_reward[17]))
hip_pitch_action_mag = 0.5 * (abs(left_hip_pitch_act) + abs(right_hip_pitch_act))
hip_pitch_action_delta = 0.5 * (left_hip_pitch_delta + right_hip_pitch_delta)
if self.step_counter > 10:
hip_pitch_explore_reward = min(
self.reward_hip_pitch_explore_cap,
self.reward_hip_pitch_explore_scale * hip_pitch_action_mag
+ self.reward_hip_pitch_explore_delta_scale * hip_pitch_action_delta,
)
else:
hip_pitch_explore_reward = 0.0
if curr_dist_to_target < 0.3:
arrival_bonus = self.target_distance_wp * 8 ## 奖励到达目标点
else:
arrival_bonus = 0.0
target_height = self.initial_height
height_error = height - target_height
height_error = height - target_height
height_penalty = -(math.exp(12*abs(height_error))-1) if height_error > 0.04 else 0
height_penalty = -0.5 * (math.exp(15*abs(height_error))-1)
# # 在 compute_reward 开头附近,添加高度变化率计算
# if not hasattr(self, 'last_height'):
# self.last_height = height
# self.last_height_time = self.step_counter # 可选,用于时间间隔
# height_rate = height - self.last_height # 正为上升,负为下降
# self.last_height = height
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
# 惩罚高度下降(负变化率)
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
# Gate progress reward when posture quality is poor.
# Important: include leg overextension so upright torso cannot exploit progress reward.
tilt_excess = max(0.0, tilt_mag - self.reward_progress_tilt_gate_start)
knee_gate_excess = max(0.0, self.reward_progress_knee_gate_min - avg_knee_flex)
left_hip_pitch = float(joint_pos[11])
right_hip_pitch = float(joint_pos[17])
left_hip_over = max(0.0, abs(left_hip_pitch) - self.reward_hip_overextend_threshold)
right_hip_over = max(0.0, abs(right_hip_pitch) - self.reward_hip_overextend_threshold)
hip_over_mean = 0.5 * (left_hip_over + right_hip_over)
# # 在 compute_reward 中
# if self.step_counter > 50:
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
# novelty = float(np.linalg.norm(action - avg_prev_action))
# exploration_bonus = 0.05 * novelty
# else:
# exploration_bonus = 0
hip_gate_excess = max(0.0, hip_over_mean - self.reward_progress_hip_gate_over)
posture_gate = 1.0 - 1.2 * tilt_excess - 2.0 * knee_gate_excess - 1.8 * hip_gate_excess
posture_gate = float(np.clip(posture_gate, self.reward_progress_gate_floor, 1.0))
progress_reward = progress_reward_raw * posture_gate
# self.prev_action_history[self.history_idx] = action
# self.history_idx = (self.history_idx + 1) % 50
knee_straight_penalty = -self.reward_knee_straight_penalty_scale * max(
0.0, self.reward_knee_straight_threshold - avg_knee_flex
)
hip_overextend_penalty = -self.reward_hip_overextend_penalty_scale * (left_hip_over + right_hip_over)
# Penalize over-stretched legs even if torso stays upright.
stretch_amount = left_hip_over + right_hip_over
straight_amount = max(0.0, self.reward_knee_straight_threshold - avg_knee_flex)
leg_stretch_penalty = -self.reward_leg_stretch_penalty_scale * stretch_amount * (1.0 + 2.5 * straight_amount)
# Keep extra combo penalty, but no longer vanish when torso is upright.
stretch_lean_combo_penalty = -self.reward_stretch_lean_combo_scale * (0.5 + tilt_mag) * stretch_amount * (1.0 + 3.0 * straight_amount)
posture_penalty = -0.6 * (tilt_mag)
total = (
# progress_reward +
alive_bonus +
head_toward_bonus +
heading_progress_reward +
# lateral_penalty +
# action_penalty +
smoothness_penalty +
posture_penalty
+ ang_vel_penalty
+ height_penalty
+ ankle_roll_penalty
+ ankle_roll_cross_penalty
+ split_penalty
+ inward_penalty
# + leg_proximity_penalty
+ left_hip_yaw_penalty
+ right_hip_yaw_penalty
+ hip_yaw_cross_penalty
+ position_penalty
# + linkage_reward
# + waist_only_turn_penalty
# + yaw_link_reward
# + stance_collapse_penalty
# + hip_yaw_yaw_cross_penalty
# + stance_collapse_penalty
# + cross_leg_penalty
# + exploration_bonus
# + height_down_penalty
)
# print(height_error, height_penalty)
progress_reward
+ survival_reward
+ smoothness_penalty
+ accel_penalty
+ accel_abs_penalty
+ idle_penalty
+ split_penalty
+ inward_penalty
+ ankle_roll_penalty
+ ankle_roll_cross_penalty
+ left_hip_yaw_penalty
+ right_hip_yaw_penalty
+ heading_align_reward
+ heading_error_penalty
# + knee_straight_penalty
+ hip_overextend_penalty
+ leg_stretch_penalty
+ stretch_lean_combo_penalty
# + exploration_bonus
# + knee_explore_reward
# + knee_lift_shortfall_penalty
# + hip_pitch_explore_reward
+ arrival_bonus
+ height_penalty
+ posture_penalty
)
now = time.time()
if self.reward_debug_interval_sec > 0 and now - self._reward_debug_last_time >= self.reward_debug_interval_sec:
@@ -606,35 +732,38 @@ class WalkEnv(gym.Env):
if self._reward_debug_steps_left > 0:
self._reward_debug_steps_left -= 1
self.debug_log(
f"height_penalty:{height_penalty:.4f},"
f"progress_reward:{progress_reward:.4f},"
f"survival_reward:{survival_reward:.4f},"
f"smoothness_penalty:{smoothness_penalty:.4f},"
f"posture_penalty:{posture_penalty:.4f},"
f"heading_progress_reward:{heading_progress_reward:.4f},"
# f"stance_collapse_penalty:{stance_collapse_penalty:.4f},"
# f"cross_leg_penalty:{cross_leg_penalty:.4f},"
f"ang_vel_penalty:{ang_vel_penalty:.4f},"
f"accel_penalty:{accel_penalty:.4f},"
f"accel_source:{accel_source},"
f"accel_signal:{accel_signal:.4f},"
f"accel_abs:{accel_abs:.4f},"
f"accel_abs_penalty:{accel_abs_penalty:.4f},"
f"idle_penalty:{idle_penalty:.4f},"
f"split_penalty:{split_penalty:.4f},"
f"inward_penalty:{inward_penalty:.4f},"
f"ankle_roll_penalty:{ankle_roll_penalty:.4f},"
f"ankle_roll_cross_penalty:{ankle_roll_cross_penalty:.4f},"
f"left_hip_yaw_penalty:{left_hip_yaw_penalty:.4f},"
f"right_hip_yaw_penalty:{right_hip_yaw_penalty:.4f},"
f"hip_yaw_cross_penalty:{hip_yaw_cross_penalty:.4f},"
f"inward_penalty:{inward_penalty:.4f},"
f"position_penalty:{position_penalty:.4f},"
# f"linkage_reward:{linkage_reward:.4f},"
# f"waist_only_turn_penalty:{waist_only_turn_penalty:.4f},"
# f"yaw_link_reward:{yaw_link_reward:.4f}"
# f"leg_proximity_penalty:{leg_proximity_penalty:.4f},"
# f"stance_collapse_penalty:{stance_collapse_penalty:.4f},"
# f"hip_yaw_yaw_cross_penalty:{hip_yaw_yaw_cross_penalty:.4f},"
# f"height_down_penalty:{height_down_penalty:.4f}",
# f"exploration_bonus:{exploration_bonus:.4f}"
f"alive_bonus:{alive_bonus:.4f},"
f"abs_yaw_error:{abs_yaw_error:.4f}"
f"heading_align_reward:{heading_align_reward:.4f},"
f"heading_error_penalty:{heading_error_penalty:.4f},"
f"knee_straight_penalty:{knee_straight_penalty:.4f},"
f"hip_overextend_penalty:{hip_overextend_penalty:.4f},"
f"leg_stretch_penalty:{leg_stretch_penalty:.4f},"
f"stretch_lean_combo_penalty:{stretch_lean_combo_penalty:.4f},"
f"posture_gate:{posture_gate:.4f},"
f"progress_reward_raw:{progress_reward_raw:.4f},"
# f"exploration_bonus:{exploration_bonus:.4f},"
f"height_penalty:{height_penalty:.4f},"
# f"knee_explore_reward:{knee_explore_reward:.4f},"
f"posture_penalty:{posture_penalty:.4f},"
# f"knee_lift_shortfall_penalty:{knee_lift_shortfall_penalty:.4f},"
# f"hip_pitch_explore_reward:{hip_pitch_explore_reward:.4f},"
f"arrival_bonus:{arrival_bonus:.4f},"
f"total:{total:.4f}"
)
# print(f"abs_yaw_error:{abs_yaw_error:.4f}")
)
return total
@@ -645,20 +774,26 @@ class WalkEnv(gym.Env):
max_action_delta = 0.5# Limit how much the action can change from the previous step to encourage smoother motions.
if self.previous_action is not None:
action = np.clip(action, self.previous_action - max_action_delta, self.previous_action + max_action_delta)
# Loosen upper-body constraints: keep motion bounded but no longer hard-lock head/arms/waist.
action[0:2] = 0
action[3] = 4
action[7] = -4
action[2] = 0
action[6] = 0
action[3] = np.clip(action[3], 3, 5)
action[7] = np.clip(action[7], -5, -3)
action[2] = np.clip(action[2], -6, 6)
action[6] = np.clip(action[6], -6, 6)
action[4] = 0
action[5] = -5
action[5] = np.clip(action[5], -8, -2)
action[8] = 0
action[9] = 5
action[10] = 0
action[11] = np.clip(action[11], -0.7, 0.7)
action[17] = np.clip(action[17], -0.7, 0.7)
# action[12] = -1.0
# action[18] = 1.0
action[9] = np.clip(action[9], 8, 2)
action[10] = np.clip(action[10], -0.6, 0.6)
# Boost knee command range so policy can produce visible knee flexion earlier.
action[14] = np.clip(action[14], 0, 10.0)
action[20] = np.clip(action[20], -10.0, 0)
# action[14] = 1 # the correct left knee sign
# action[20] = -1 # the correct right knee sign
# action[11] = 2
# action[17] = -2
# action[12] = -1
# action[18] = 1
# action[13] = -1.0
# action[19] = 1.0
self.previous_action = action.copy()
@@ -671,7 +806,7 @@ class WalkEnv(gym.Env):
for idx, target in enumerate(self.target_joint_positions):
r.set_motor_target_position(
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=80, kd=4.67
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=60, kd=1.2
)
self.previous_action = action.copy()
@@ -684,16 +819,30 @@ class WalkEnv(gym.Env):
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
if self.step_counter % 10 == 0:
self.previous_pos = current_pos.copy()
# Compute reward based on movement from previous step
reward = self.compute_reward(self.previous_pos, current_pos, action)
self.previous_pos = current_pos.copy()
self.prev_action_history[self.history_idx] = action.copy()
self.history_idx = (self.history_idx + 1) % self.action_history_len
self.last_action_for_reward = action.copy()
# Check if current waypoint is reached
if self.train_stage != "in_place":
dist_to_waypoint = float(np.linalg.norm(current_pos - self.target_position))
if dist_to_waypoint < self.waypoint_reach_distance:
# Move to next waypoint
self.waypoint_index += 1
if self.waypoint_index >= len(self.point_list):
# All waypoints completed
self.route_completed = True
else:
# Update target to next waypoint
self.target_position = self.point_list[self.waypoint_index]
# Fall detection and penalty
is_fallen = self.Player.world.global_position[2] < 0.55
is_fallen = self.Player.world.global_position[2] < 0.45
# terminal state: the robot is falling or timeout
terminated = is_fallen or self.step_counter > 800 or self.route_completed
@@ -709,15 +858,22 @@ class Train(Train_Base):
def train(self, args):
# --------------------------------------- Learning parameters
n_envs = int(os.environ.get("GYM_CPU_N_ENVS", "20"))
if n_envs < 1:
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
server_warmup_sec = float(os.environ.get("GYM_CPU_SERVER_WARMUP_SEC", "3.0"))
n_steps_per_env = int(os.environ.get("GYM_CPU_TRAIN_STEPS_PER_ENV", "512")) # RolloutBuffer is of size (n_steps_per_env * n_envs)
minibatch_size = int(os.environ.get("GYM_CPU_TRAIN_BATCH_SIZE", "512")) # should be a factor of (n_steps_per_env * n_envs)
total_steps = 30000000
learning_rate = float(os.environ.get("GYM_CPU_TRAIN_LR", "3e-4"))
folder_name = f'Turn_R{self.robot_type}'
n_envs = 20
server_warmup_sec = 3.0
n_steps_per_env = 256 # RolloutBuffer is of size (n_steps_per_env * n_envs)
minibatch_size = 512 # should be a factor of (n_steps_per_env * n_envs)
total_steps = 90000000
learning_rate = 2e-4
ent_coef = 0.035
clip_range = 0.2
gamma = 0.97
n_epochs = 3
enable_eval = True
monitor_train_env = False
eval_freq_mult = 60
save_freq_mult = 60
eval_eps = 7
folder_name = f'Walk_R{self.robot_type}'
model_path = f'./scripts/gyms/logs/{folder_name}/'
print(f"Model path: {model_path}")
@@ -733,22 +889,26 @@ class Train(Train_Base):
return thunk
server_log_dir = os.path.join(model_path, "server_logs")
os.makedirs(server_log_dir, exist_ok=True)
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1, no_render=True, no_realtime=True) # include 1 extra server for testing
# Wait for servers to start
print(f"Starting {n_envs + 1} rcssservermj servers...")
if server_warmup_sec > 0:
print(f"Waiting {server_warmup_sec:.1f}s for server warmup...")
sleep(server_warmup_sec)
print("Servers started, creating environments...")
env = SubprocVecEnv([init_env(i, monitor=True) for i in range(n_envs)], start_method="spawn")
# Use single-process eval env to avoid extra subprocess fragility during callback evaluation.
eval_env = DummyVecEnv([init_env(n_envs, monitor=True)])
env = None
eval_env = None
servers = None
try:
server_log_dir = os.path.join(model_path, "server_logs")
os.makedirs(server_log_dir, exist_ok=True)
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1, no_render=True, no_realtime=True) # include 1 extra server for testing
# Wait for servers to start
print(f"Starting {n_envs + 1} rcssservermj servers...")
if server_warmup_sec > 0:
print(f"Waiting {server_warmup_sec:.1f}s for server warmup...")
sleep(server_warmup_sec)
print("Servers started, creating environments...")
env = SubprocVecEnv([init_env(i, monitor=monitor_train_env) for i in range(n_envs)], start_method="spawn")
# Use single-process eval env to avoid extra subprocess fragility during callback evaluation.
if enable_eval:
eval_env = DummyVecEnv([init_env(n_envs, monitor=True)])
# Custom policy network architecture
policy_kwargs = dict(
net_arch=dict(
@@ -771,35 +931,39 @@ class Train(Train_Base):
learning_rate=learning_rate,
device="cpu",
policy_kwargs=policy_kwargs,
ent_coef=float(os.environ.get("GYM_CPU_TRAIN_ENT_COEF", "0.05")), # Entropy coefficient for exploration
clip_range=float(os.environ.get("GYM_CPU_TRAIN_CLIP_RANGE", "0.2")), # PPO clipping parameter
ent_coef=ent_coef, # Entropy coefficient for exploration
clip_range=clip_range, # PPO clipping parameter
gae_lambda=0.95, # GAE lambda
gamma=float(os.environ.get("GYM_CPU_TRAIN_GAMMA", "0.95")), # Discount factor
gamma=gamma, # Discount factor
# target_kl=0.03,
n_epochs=int(os.environ.get("GYM_CPU_TRAIN_EPOCHS", "5")),
n_epochs=n_epochs,
tensorboard_log=f"./scripts/gyms/logs/{folder_name}/tensorboard/"
)
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 20, eval_eps=7,
eval_freq=n_steps_per_env * max(1, eval_freq_mult),
save_freq=n_steps_per_env * max(1, save_freq_mult),
eval_eps=max(1, eval_eps),
backup_env_file=__file__)
except KeyboardInterrupt:
sleep(1) # wait for child processes
print("\nctrl+c pressed, aborting...\n")
servers.kill()
return
env.close()
eval_env.close()
servers.kill()
finally:
if env is not None:
env.close()
if eval_env is not None:
eval_env.close()
if servers is not None:
servers.kill()
def test(self, args):
# Uses different server and monitor ports
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
os.makedirs(server_log_dir, exist_ok=True)
test_no_render = os.environ.get("GYM_CPU_TEST_NO_RENDER", "0") == "1"
test_no_realtime = os.environ.get("GYM_CPU_TEST_NO_REALTIME", "0") == "1"
test_no_render = False
test_no_realtime = False
server = Train_Server(
self.server_p - 1,

Binary file not shown.

View File

@@ -0,0 +1,968 @@
import os
import numpy as np
import math
import time
from time import sleep
from random import random
from random import uniform
from itertools import count
from stable_baselines3 import PPO, TD3, DDPG, SAC, A2C
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
import gymnasium as gym
from gymnasium import spaces
from scripts.commons.Train_Base import Train_Base
from scripts.commons.Server import Server as Train_Server
from agent.base_agent import Base_Agent
from utils.math_ops import MathOps
from scipy.spatial.transform import Rotation as R
'''
Objective:
Learn how to run forward using step primitive
----------
- class Basic_Run: implements an OpenAI custom gym
- class Train: implements algorithms to train a new model or test an existing model
'''
class WalkEnv(gym.Env):
def __init__(self, ip, server_p) -> None:
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
self.Player = player = Base_Agent(
team_name="Gym",
number=1,
host=ip,
port=server_p
)
self.robot_type = self.Player.robot
self.step_counter = 0 # to limit episode size
self.force_play_on = True
self.target_position = np.array([0.0, 0.0]) # target position in the x-y plane
self.initial_position = np.array([0.0, 0.0]) # initial position in the x-y plane
self.target_direction = 0.0 # target direction in the x-y plane (relative to the robot's orientation)
self.isfallen = False
self.waypoint_index = 0
self.route_completed = False
self.debug_every_n_steps = 5
self.enable_debug_joint_status = False
self.reward_debug_interval_sec = 600.0
self.reward_debug_burst_steps = 10
self._reward_debug_last_time = time.time()
self._reward_debug_steps_left = 0
self.calibrate_nominal_from_neutral = True
self.auto_calibrate_train_sim_flip = True
self.nominal_calibrated_once = False
self.flip_calibrated_once = False
self._target_hz = 0.0
self._target_dt = 0.0
self._last_sync_time = None
self._speed_estimate = 0.0
self._speed_from_acc = 0.0
self._prev_accelerometer = np.zeros(3, dtype=np.float32)
self._speed_smoothing = 0.85
self._fallback_dt = 0.02
target_hz_env = 0
if target_hz_env:
try:
self._target_hz = float(target_hz_env)
except ValueError:
self._target_hz = 0.0
if self._target_hz > 0.0:
self._target_dt = 1.0 / self._target_hz
# State space
# 原始观测大小: 78
obs_size = 78
self.obs = np.zeros(obs_size, np.float32)
self.observation_space = spaces.Box(
low=-10.0,
high=10.0,
shape=(obs_size,),
dtype=np.float32
)
action_dim = len(self.Player.robot.ROBOT_MOTORS)
self.no_of_actions = action_dim
self.action_space = spaces.Box(
low=-10.0,
high=10.0,
shape=(action_dim,),
dtype=np.float32
)
# 中立姿态
self.joint_nominal_position = np.array(
[
0.0, # 0: Head_yaw (he1)
0.0, # 1: Head_pitch (he2)
0.0, # 2: Left_Shoulder_Pitch (lae1)
0.0, # 3: Left_Shoulder_Roll (lae2)
0.0, # 4: Left_Elbow_Pitch (lae3)
0.0, # 5: Left_Elbow_Yaw (lae4)
0.0, # 6: Right_Shoulder_Pitch (rae1)
0.0, # 7: Right_Shoulder_Roll (rae2)
0.0, # 8: Right_Elbow_Pitch (rae3)
0.0, # 9: Right_Elbow_Yaw (rae4)
0.0, # 10: Waist (te1)
0.0, # 11: Left_Hip_Pitch (lle1)
0.0, # 12: Left_Hip_Roll (lle2)
1.0, # 13: Left_Hip_Yaw (lle3)
0.0, # 14: Left_Knee_Pitch (lle4)
0.0, # 15: Left_Ankle_Pitch (lle5)
0.0, # 16: Left_Ankle_Roll (lle6)
0.0, # 17: Right_Hip_Pitch (rle1)
0.0, # 18: Right_Hip_Roll (rle2)
1.0, # 19: Right_Hip_Yaw (rle3)
0.0, # 20: Right_Knee_Pitch (rle4)
0.0, # 21: Right_Ankle_Pitch (rle5)
0.0, # 22: Right_Ankle_Roll (rle6)
]
)
# self.joint_nominal_position = np.zeros(self.no_of_actions)
self.train_sim_flip = np.array(
[
1.0, # 0: Head_yaw (he1)
-1.0, # 1: Head_pitch (he2)
1.0, # 2: Left_Shoulder_Pitch (lae1)
-1.0, # 3: Left_Shoulder_Roll (lae2)
-1.0, # 4: Left_Elbow_Pitch (lae3)
1.0, # 5: Left_Elbow_Yaw (lae4)
-1.0, # 6: Right_Shoulder_Pitch (rae1)
-1.0, # 7: Right_Shoulder_Roll (rae2)
1.0, # 8: Right_Elbow_Pitch (rae3)
1.0, # 9: Right_Elbow_Yaw (rae4)
1.0, # 10: Waist (te1)
1.0, # 11: Left_Hip_Pitch (lle1)
-1.0, # 12: Left_Hip_Roll (lle2)
-1.0, # 13: Left_Hip_Yaw (lle3)
1.0, # 14: Left_Knee_Pitch (lle4)
1.0, # 15: Left_Ankle_Pitch (lle5)
-1.0, # 16: Left_Ankle_Roll (lle6)
-1.0, # 17: Right_Hip_Pitch (rle1)
-1.0, # 18: Right_Hip_Roll (rle2)
-1.0, # 19: Right_Hip_Yaw (rle3)
-1.0, # 20: Right_Knee_Pitch (rle4)
-1.0, # 21: Right_Ankle_Pitch (rle5)
-1.0, # 22: Right_Ankle_Roll (rle6)
]
)
self.scaling_factor = 0.5
# self.scaling_factor = 1
# Encourage a minimum lateral stance so the policy avoids feet overlap.
self.min_stance_rad = 0.10
# Small reset perturbations for robustness training.
self.enable_reset_perturb = False
self.reset_beam_yaw_range_deg = 180.0
self.reset_target_bearing_range_deg = 0.0
self.reset_target_distance_min = 5
self.reset_target_distance_max = 10
if self.reset_target_distance_min > self.reset_target_distance_max:
self.reset_target_distance_min, self.reset_target_distance_max = (
self.reset_target_distance_max,
self.reset_target_distance_min,
)
self.reset_joint_noise_rad = 0.025
self.reset_perturb_steps = 4
self.reset_recover_steps = 8
self.reward_smoothness_scale = 0.03
self.reward_smoothness_cap = 0.45
self.reward_forward_stability_gate = 0.35
self.reward_forward_tilt_hard_threshold = 0.50
self.reward_forward_tilt_hard_scale = 0.20
self.reward_head_toward_bonus = 1.0
self.turn_stationary_radius = 0.2
self.turn_stationary_penalty_scale = 3.0
self.stationary_start_steps = 20
self.stationary_step_eps = 0.015
self.stationary_penalty_scale = 1.2
self.train_stage = "walk"
self.in_place_radius = 0.18
self.in_place_center_reward_scale = 0.60
self.in_place_drift_penalty_scale = 1.20
self.waypoint_reach_distance = 0.3
self.num_waypoints = 1
self.exploration_start_steps = 40
self.exploration_scale = 0.012
self.exploration_cap = 0.2
self.exploration_target_novelty = 1.0
self.exploration_sigma = 0.7
self.reward_stride_swing_scale = 0.20
self.reward_stride_phase_scale = 0.18
self.reward_knee_drive_scale = 0.10
self.reward_knee_lift_scale = 0.12
self.reward_knee_lift_target = 0.15
self.reward_knee_lift_shortfall_scale = 0.05
self.reward_knee_overbend_threshold = 0.60
self.reward_knee_overbend_scale = 0.35
self.reward_hip_lift_scale = 0.12
self.reward_hip_lift_target = 0.80
self.reward_knee_alternate_scale = 0.10
self.reward_knee_bilateral_scale = 0.16
self.reward_single_leg_penalty_scale = 0.22
self.reward_knee_phase_switch_scale = 0.14
self.knee_phase_deadband = 0.10
self.knee_phase_min_interval = 18
self.knee_phase_target_interval = 22
self.knee_phase_fast_switch_penalty_scale = 0.10
self.knee_phase_max_hold_frames = 28
self.knee_phase_hold_penalty_scale = 0.18
self.reward_stride_cap = 0.80
self.reward_knee_explore_scale = 0.03
self.reward_knee_explore_delta_scale = 0.03
self.reward_knee_explore_cap = 0.10
self.reward_hip_pitch_explore_scale = 0.07
self.reward_hip_pitch_explore_delta_scale = 0.07
self.reward_hip_pitch_explore_cap = 0.10
self.reward_progress_scale = 18
self.reward_survival_scale = 0.5
self.reward_idle_penalty_scale = 0.6
self.reward_accel_penalty_scale = 0.08
self.reward_accel_penalty_cap = 0.40
self.reward_accel_abs_limit = 13.5
self.reward_accel_abs_penalty_scale = 0.05
self.reward_accel_abs_penalty_cap = 0.40
self.reward_heading_align_scale = 0.28
self.reward_heading_error_scale = 0.05
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.action_history_len = 50
self.prev_action_history = np.zeros((self.action_history_len, self.no_of_actions), dtype=np.float32)
self.history_idx = 0
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
self.last_yaw_error = None
self.prev_knee_balance = 0.0
self.prev_knee_phase_sign = 0
self.knee_phase_frames_since_switch = 0
self.knee_phase_hold_frames = 0
self.Player.server.connect()
# sleep(2.0) # Longer wait for connection to establish completely
self.Player.server.send_immediate(
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
)
self.start_time = time.time()
def _reconnect_server(self):
try:
self.Player.server.shutdown()
except Exception:
pass
self.Player.server.connect()
self.Player.server.send_immediate(
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
)
def _safe_receive_world_update(self, retries=1):
last_exc = None
for attempt in range(retries + 1):
try:
self.Player.server.receive()
self.Player.world.update()
return
except (ConnectionResetError, OSError) as exc:
last_exc = exc
if attempt >= retries:
raise
self._reconnect_server()
if last_exc is not None:
raise last_exc
def debug_log(self, message):
print(message)
try:
log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
with open(log_path, "a", encoding="utf-8") as f:
f.write(message + "\n")
except OSError:
pass
@staticmethod
def _wrap_to_pi(angle_rad: float) -> float:
return (angle_rad + math.pi) % (2.0 * math.pi) - math.pi
def observe(self, init=False):
"""获取当前观测值"""
robot = self.Player.robot
world = self.Player.world
# Safety check: ensure data is available
# 计算目标速度
raw_target = self.target_position - world.global_position[:2]
velocity = MathOps.rotate_2d_vec(
raw_target,
-robot.global_orientation_euler[2],
is_rad=False
)
# 计算相对方向
rel_orientation = MathOps.vector_angle(velocity) * 0.3
rel_orientation = np.clip(rel_orientation, -0.25, 0.25)
velocity = np.concatenate([velocity, np.array([rel_orientation])])
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
# 关节状态
radian_joint_positions = np.deg2rad(
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
)
radian_joint_speeds = np.deg2rad(
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
)
qpos_qvel_previous_action = np.concatenate([
(radian_joint_positions * self.train_sim_flip - self.joint_nominal_position) / 4.6,
radian_joint_speeds / 110.0 * self.train_sim_flip,
self.previous_action / 10.0,
])
# 角速度
ang_vel = np.clip(np.deg2rad(robot.gyroscope) / 50.0, -1.0, 1.0)
# 投影的重力方向
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
# 组合观测
observation = np.concatenate([
qpos_qvel_previous_action,
ang_vel,
velocity,
projected_gravity,
])
observation = np.clip(observation, -10.0, 10.0)
return observation.astype(np.float32)
def sync(self):
''' Run a single simulation step '''
self._safe_receive_world_update(retries=1)
self.Player.robot.commit_motor_targets_pd()
self.Player.server.send()
if self._target_dt > 0.0:
now = time.time()
if self._last_sync_time is None:
self._last_sync_time = now
return
elapsed = now - self._last_sync_time
remaining = self._target_dt - elapsed
if remaining > 0.0:
time.sleep(remaining)
now = time.time()
self._last_sync_time = now
def debug_joint_status(self):
robot = self.Player.robot
actual_joint_positions = np.deg2rad(
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
)
target_joint_positions = getattr(
self,
'target_joint_positions',
np.zeros(len(robot.ROBOT_MOTORS), dtype=np.float32)
)
joint_error = actual_joint_positions - target_joint_positions
leg_slice = slice(11, None)
self.debug_log(
"[WalkDebug] "
f"step={self.step_counter} "
f"pos={np.round(self.Player.world.global_position, 3).tolist()} "
f"target_xy={np.round(self.target_position, 3).tolist()} "
f"target_leg={np.round(target_joint_positions[leg_slice], 3).tolist()} "
f"actual_leg={np.round(actual_joint_positions[leg_slice], 3).tolist()} "
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
f"fallen={self.Player.world.global_position[2] < 0.3}"
)
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
def reset(self, seed=None, options=None):
'''
Reset and stabilize the robot
Note: for some behaviors it would be better to reduce stabilization or add noise
'''
r = self.Player.robot
super().reset(seed=seed)
if seed is not None:
np.random.seed(seed)
target_distance = np.random.uniform(self.reset_target_distance_min, self.reset_target_distance_max)
target_bearing_deg = np.random.uniform(-self.reset_target_bearing_range_deg, self.reset_target_bearing_range_deg)
self.step_counter = 0
self.waypoint_index = 0
self.route_completed = False
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
self.prev_action_history.fill(0.0)
self.history_idx = 0
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
self.last_yaw_error = None
self.prev_knee_balance = 0.0
self.prev_knee_phase_sign = 0
self.knee_phase_frames_since_switch = 0
self.knee_phase_hold_frames = 0
self.walk_cycle_step = 0
self._reward_debug_steps_left = 0
self._speed_estimate = 0.0
self._speed_from_acc = 0.0
self._prev_accelerometer = np.array(
getattr(self.Player.robot, "accelerometer", np.zeros(3)),
dtype=np.float32,
)
# 随机 beam 目标位置和朝向,增加训练多样性
beam_x = (random() - 0.5) * 10
beam_y = (random() - 0.5) * 10
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
for _ in range(5):
self._safe_receive_world_update(retries=2)
self.Player.robot.commit_motor_targets_pd()
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
self.Player.server.send()
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
finished_count = 0
for _ in range(50):
finished = self.Player.skills_manager.execute("Neutral")
self.sync()
if finished:
finished_count += 1
if finished_count >= 20: # 假设需要连续20次完成才算成功
break
if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
# Perturb waist + lower body only (10:), keep head/arms stable.
perturb_action[10:] = np.random.uniform(
-self.reset_joint_noise_rad,
self.reset_joint_noise_rad,
size=(self.no_of_actions - 10,)
)
for _ in range(self.reset_perturb_steps):
target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
for idx, target in enumerate(target_joint_positions):
r.set_motor_target_position(
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
)
self.sync()
for i in range(self.reset_recover_steps):
# Linearly fade perturbation to help policy start from near-neutral.
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
for idx, target in enumerate(target_joint_positions):
r.set_motor_target_position(
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
)
self.sync()
# memory variables
self.sync()
self.initial_position = np.array(self.Player.world.global_position[:2])
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
self.act = np.zeros(self.no_of_actions, np.float32)
# Generate multiple waypoints along a path
heading_deg = float(r.global_orientation_euler[2])
self.point_list = []
current_point = self.initial_position.copy()
for i in range(self.num_waypoints):
# Each waypoint is placed further along the path
target_distance_wp = np.random.uniform(self.reset_target_distance_min, self.reset_target_distance_max)
self.target_distance_wp = target_distance_wp
target_bearing_deg_wp = np.random.uniform(-self.reset_target_bearing_range_deg, self.reset_target_bearing_range_deg)
target_offset = MathOps.rotate_2d_vec(
np.array([target_distance_wp, 0.0]),
heading_deg + target_bearing_deg_wp,
is_rad=False,
)
next_point = current_point + target_offset
self.point_list.append(next_point)
current_point = next_point.copy()
self.target_position = self.point_list[self.waypoint_index]
if self.train_stage == "in_place":
self.target_position = self.initial_position.copy()
self.initial_height = self.Player.world.global_position[2]
return self.observe(True), {}
def render(self, mode='human', close=False):
return
def compute_reward(self, previous_pos, current_pos, action):
height = float(self.Player.world.global_position[2])
robot = self.Player.robot
prev_dist_to_target = float(np.linalg.norm(self.target_position - previous_pos))
curr_dist_to_target = float(np.linalg.norm(self.target_position - current_pos))
dist_delta = prev_dist_to_target - curr_dist_to_target
is_fallen = height < 0.55
if is_fallen:
return -2.0
joint_pos = np.deg2rad(
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
) * self.train_sim_flip
left_hip_roll = -float(joint_pos[12])
right_hip_roll = float(joint_pos[18])
left_ankle_roll = -float(joint_pos[16])
right_ankle_roll = float(joint_pos[22])
left_knee_flex = abs(float(joint_pos[14]))
right_knee_flex = abs(float(joint_pos[20]))
avg_knee_flex = 0.5 * (left_knee_flex + right_knee_flex)
max_leg_roll = 0.5 # 防止劈叉姿势
split_penalty = -0.12 * max(0.0, (left_hip_roll + right_hip_roll - 2 * max_leg_roll) / max_leg_roll)
left_hip_yaw = -float(joint_pos[13])
right_hip_yaw = float(joint_pos[19])
min_leg_separation = 0.04 # 最小腿间距(防止贴得太近)
inward_penalty = 0.3 * min(0.0, (left_hip_roll-min_leg_separation)) + 0.3 * min(0.0, (right_hip_roll-min_leg_separation)) # 惩罚左右腿过度内扣
# 脚踝roll角度检测防止过度外翻或内翻
max_ankle_roll = 0.15 # 最大允许的脚踝roll角度
# 惩罚脚踝过度外翻/内翻(绝对值过大)
ankle_roll_penalty = -0.12 * max(0.0, (abs(left_ankle_roll) + abs(right_ankle_roll) - 2 * max_ankle_roll) / max_ankle_roll)
# 惩罚两脚踝roll方向相反不稳定姿势
ankle_roll_cross_penalty = -0.12 * max(0.0, -(left_ankle_roll * right_ankle_roll))
# 分别惩罚左右大腿过度转动
max_hip_yaw = 0.2 # 最大允许的yaw角度
left_hip_yaw_penalty = -0.6 * max(0.0, abs(left_hip_yaw) - max_hip_yaw)
right_hip_yaw_penalty = -0.6 * max(0.0, abs(right_hip_yaw) - max_hip_yaw)
target_vec = self.target_position - current_pos
target_dist = float(np.linalg.norm(target_vec))
if target_dist > 1e-6:
target_heading = math.atan2(float(target_vec[1]), float(target_vec[0]))
robot_heading = math.radians(float(robot.global_orientation_euler[2]))
heading_error = self._wrap_to_pi(target_heading - robot_heading)
heading_align_reward = self.reward_heading_align_scale * math.cos(heading_error)
heading_error_penalty = -self.reward_heading_error_scale * abs(heading_error)
else:
heading_align_reward = 0.0
heading_error_penalty = 0.0
# Forward-progress reward (distance delta) with anti-stuck shaping.
progress_reward = self.reward_progress_scale * dist_delta
survival_reward = self.reward_survival_scale
smoothness_penalty = -self.reward_smoothness_scale * float(np.linalg.norm(action - self.last_action_for_reward))
step_displacement = float(np.linalg.norm(current_pos - previous_pos))
accel_signal = 0.0
accel_source = "imu_delta"
accel_now = np.array(getattr(robot, "accelerometer", np.zeros(3)), dtype=np.float32)
if accel_now.shape[0] >= 3:
# Use IMU acceleration delta to reduce gravity bias and punish abrupt bursts.
accel_signal = float(np.linalg.norm(accel_now[:3] - self._prev_accelerometer[:3]))
self._prev_accelerometer = accel_now
accel_penalty = -min(
self.reward_accel_penalty_cap,
self.reward_accel_penalty_scale * accel_signal,
)
accel_abs = float(np.linalg.norm(accel_now[:3])) if accel_now.shape[0] >= 3 else 0.0
accel_abs_over = max(0.0, accel_abs - self.reward_accel_abs_limit)
accel_abs_penalty = -min(
self.reward_accel_abs_penalty_cap,
self.reward_accel_abs_penalty_scale * accel_abs_over,
)
if self.step_counter > 30 and step_displacement < 0.015 and self.target_distance_wp > 0.3:
idle_penalty = -self.reward_idle_penalty_scale
else:
idle_penalty = 0.0
if self.step_counter > self.exploration_start_steps:
displacement_novelty = step_displacement / max(1e-6, self.stationary_step_eps)
exploration_bonus = min(
self.exploration_cap,
self.exploration_scale * max(0.0, displacement_novelty - self.exploration_target_novelty),
)
else:
exploration_bonus = 0.0
# Encourage active/varied knee motions early in training without dominating progress reward.
left_knee_act = float(action[14])
right_knee_act = float(action[20])
left_knee_delta = abs(left_knee_act - float(self.last_action_for_reward[14]))
right_knee_delta = abs(right_knee_act - float(self.last_action_for_reward[20]))
knee_action_mag = 0.5 * (abs(left_knee_act) + abs(right_knee_act))
knee_action_delta = 0.5 * (left_knee_delta + right_knee_delta)
if self.step_counter > 10:
knee_explore_reward = min(
self.reward_knee_explore_cap,
self.reward_knee_explore_scale * knee_action_mag
+ self.reward_knee_explore_delta_scale * knee_action_delta,
)
else:
knee_explore_reward = 0.0
# Directly encourage observable knee flexion instead of only action exploration.
knee_lift_shortfall_penalty = -self.reward_knee_lift_shortfall_scale * max(
0.0, self.reward_knee_lift_target - avg_knee_flex
)
# Encourage hip-pitch exploration to improve forward stride generation.
left_hip_pitch_act = float(action[11])
right_hip_pitch_act = float(action[17])
left_hip_pitch_delta = abs(left_hip_pitch_act - float(self.last_action_for_reward[11]))
right_hip_pitch_delta = abs(right_hip_pitch_act - float(self.last_action_for_reward[17]))
hip_pitch_action_mag = 0.5 * (abs(left_hip_pitch_act) + abs(right_hip_pitch_act))
hip_pitch_action_delta = 0.5 * (left_hip_pitch_delta + right_hip_pitch_delta)
if self.step_counter > 10:
hip_pitch_explore_reward = min(
self.reward_hip_pitch_explore_cap,
self.reward_hip_pitch_explore_scale * hip_pitch_action_mag
+ self.reward_hip_pitch_explore_delta_scale * hip_pitch_action_delta,
)
else:
hip_pitch_explore_reward = 0.0
if curr_dist_to_target < 0.3:
arrival_bonus = self.target_distance_wp * 8 ## 奖励到达目标点
else:
arrival_bonus = 0.0
target_height = self.initial_height
height_error = height - target_height
height_error = height - target_height
height_penalty = -0.5 * (math.exp(15*abs(height_error))-1)
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
posture_penalty = -0.6 * (tilt_mag)
total = (
progress_reward
+ survival_reward
+ smoothness_penalty
+ accel_penalty
+ accel_abs_penalty
+ idle_penalty
+ split_penalty
+ inward_penalty
+ ankle_roll_penalty
+ ankle_roll_cross_penalty
+ left_hip_yaw_penalty
+ right_hip_yaw_penalty
+ heading_align_reward
+ heading_error_penalty
# + exploration_bonus
# + knee_explore_reward
# + knee_lift_shortfall_penalty
# + hip_pitch_explore_reward
+ arrival_bonus
+ height_penalty
+ posture_penalty
)
now = time.time()
if self.reward_debug_interval_sec > 0 and now - self._reward_debug_last_time >= self.reward_debug_interval_sec:
self._reward_debug_last_time = now
self._reward_debug_steps_left = max(1, self.reward_debug_burst_steps)
if self._reward_debug_steps_left > 0:
self._reward_debug_steps_left -= 1
self.debug_log(
f"progress_reward:{progress_reward:.4f},"
f"survival_reward:{survival_reward:.4f},"
f"smoothness_penalty:{smoothness_penalty:.4f},"
f"accel_penalty:{accel_penalty:.4f},"
f"accel_source:{accel_source},"
f"accel_signal:{accel_signal:.4f},"
f"accel_abs:{accel_abs:.4f},"
f"accel_abs_penalty:{accel_abs_penalty:.4f},"
f"idle_penalty:{idle_penalty:.4f},"
f"split_penalty:{split_penalty:.4f},"
f"inward_penalty:{inward_penalty:.4f},"
f"ankle_roll_penalty:{ankle_roll_penalty:.4f},"
f"ankle_roll_cross_penalty:{ankle_roll_cross_penalty:.4f},"
f"left_hip_yaw_penalty:{left_hip_yaw_penalty:.4f},"
f"right_hip_yaw_penalty:{right_hip_yaw_penalty:.4f},"
f"heading_align_reward:{heading_align_reward:.4f},"
f"heading_error_penalty:{heading_error_penalty:.4f},"
# f"exploration_bonus:{exploration_bonus:.4f},"
f"height_penalty:{height_penalty:.4f},"
# f"knee_explore_reward:{knee_explore_reward:.4f},"
f"posture_penalty:{posture_penalty:.4f},"
# f"knee_lift_shortfall_penalty:{knee_lift_shortfall_penalty:.4f},"
# f"hip_pitch_explore_reward:{hip_pitch_explore_reward:.4f},"
f"arrival_bonus:{arrival_bonus:.4f},"
f"total:{total:.4f}"
)
return total
def step(self, action):
r = self.Player.robot
max_action_delta = 0.5# Limit how much the action can change from the previous step to encourage smoother motions.
if self.previous_action is not None:
action = np.clip(action, self.previous_action - max_action_delta, self.previous_action + max_action_delta)
# Loosen upper-body constraints: keep motion bounded but no longer hard-lock head/arms/waist.
action[0:2] = 0
action[3] = np.clip(action[3], 3, 5)
action[7] = np.clip(action[7], -5, -3)
action[2] = np.clip(action[2], -6, 6)
action[6] = np.clip(action[6], -6, 6)
action[4] = 0
action[5] = np.clip(action[5], -8, -2)
action[8] = 0
action[9] = np.clip(action[9], 8, 2)
action[10] = np.clip(action[10], -0.6, 0.6)
# Boost knee command range so policy can produce visible knee flexion earlier.
action[14] = np.clip(action[14], 0, 10.0)
action[20] = np.clip(action[20], -10.0, 0)
# action[14] = 1 # the correct left knee sign
# action[20] = -1 # the correct right knee sign
# action[11] = 1
# action[17] = 1
# action[12] = -1
# action[18] = 1
# action[13] = -1.0
# action[19] = 1.0
self.previous_action = action.copy()
self.target_joint_positions = (
# self.joint_nominal_position +
self.scaling_factor * action
)
self.target_joint_positions *= self.train_sim_flip
for idx, target in enumerate(self.target_joint_positions):
r.set_motor_target_position(
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=60, kd=1.2
)
self.previous_action = action.copy()
self.sync() # run simulation step
self.step_counter += 1
if self.enable_debug_joint_status and self.step_counter % self.debug_every_n_steps == 0:
self.debug_joint_status()
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
# Compute reward based on movement from previous step
reward = self.compute_reward(self.previous_pos, current_pos, action)
self.previous_pos = current_pos.copy()
self.prev_action_history[self.history_idx] = action.copy()
self.history_idx = (self.history_idx + 1) % self.action_history_len
self.last_action_for_reward = action.copy()
# Check if current waypoint is reached
if self.train_stage != "in_place":
dist_to_waypoint = float(np.linalg.norm(current_pos - self.target_position))
if dist_to_waypoint < self.waypoint_reach_distance:
# Move to next waypoint
self.waypoint_index += 1
if self.waypoint_index >= len(self.point_list):
# All waypoints completed
self.route_completed = True
else:
# Update target to next waypoint
self.target_position = self.point_list[self.waypoint_index]
# Fall detection and penalty
is_fallen = self.Player.world.global_position[2] < 0.55
# terminal state: the robot is falling or timeout
terminated = is_fallen or self.step_counter > 800 or self.route_completed
truncated = False
return self.observe(), reward, terminated, truncated, {}
class Train(Train_Base):
def __init__(self, script) -> None:
super().__init__(script)
def train(self, args):
# --------------------------------------- Learning parameters
n_envs = 20
server_warmup_sec = 3.0
n_steps_per_env = 256 # RolloutBuffer is of size (n_steps_per_env * n_envs)
minibatch_size = 512 # should be a factor of (n_steps_per_env * n_envs)
total_steps = 90000000
learning_rate = 2e-4
ent_coef = 0.035
clip_range = 0.2
gamma = 0.97
n_epochs = 3
enable_eval = True
monitor_train_env = False
eval_freq_mult = 60
save_freq_mult = 60
eval_eps = 7
folder_name = f'Walk_R{self.robot_type}'
model_path = f'./scripts/gyms/logs/{folder_name}/'
print(f"Model path: {model_path}")
print(f"Using {n_envs} parallel environments")
# --------------------------------------- Run algorithm
def init_env(i_env, monitor=False):
def thunk():
env = WalkEnv(self.ip, self.server_p + i_env)
if monitor:
env = Monitor(env)
return env
return thunk
env = None
eval_env = None
servers = None
try:
server_log_dir = os.path.join(model_path, "server_logs")
os.makedirs(server_log_dir, exist_ok=True)
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1, no_render=True, no_realtime=True) # include 1 extra server for testing
# Wait for servers to start
print(f"Starting {n_envs + 1} rcssservermj servers...")
if server_warmup_sec > 0:
print(f"Waiting {server_warmup_sec:.1f}s for server warmup...")
sleep(server_warmup_sec)
print("Servers started, creating environments...")
env = SubprocVecEnv([init_env(i, monitor=monitor_train_env) for i in range(n_envs)], start_method="spawn")
# Use single-process eval env to avoid extra subprocess fragility during callback evaluation.
if enable_eval:
eval_env = DummyVecEnv([init_env(n_envs, monitor=True)])
# Custom policy network architecture
policy_kwargs = dict(
net_arch=dict(
pi=[512, 256, 128], # Policy network: 3 layers
vf=[512, 256, 128] # Value network: 3 layers
),
activation_fn=__import__('torch.nn', fromlist=['ELU']).ELU,
)
if "model_file" in args: # retrain
model = PPO.load(args["model_file"], env=env, device="cpu", n_envs=n_envs, n_steps=n_steps_per_env,
batch_size=minibatch_size, learning_rate=learning_rate)
else: # train new model
model = PPO(
"MlpPolicy",
env=env,
verbose=1,
n_steps=n_steps_per_env,
batch_size=minibatch_size,
learning_rate=learning_rate,
device="cpu",
policy_kwargs=policy_kwargs,
ent_coef=ent_coef, # Entropy coefficient for exploration
clip_range=clip_range, # PPO clipping parameter
gae_lambda=0.95, # GAE lambda
gamma=gamma, # Discount factor
# target_kl=0.03,
n_epochs=n_epochs,
tensorboard_log=f"./scripts/gyms/logs/{folder_name}/tensorboard/"
)
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
eval_freq=n_steps_per_env * max(1, eval_freq_mult),
save_freq=n_steps_per_env * max(1, save_freq_mult),
eval_eps=max(1, eval_eps),
backup_env_file=__file__)
except KeyboardInterrupt:
sleep(1) # wait for child processes
print("\nctrl+c pressed, aborting...\n")
return
finally:
if env is not None:
env.close()
if eval_env is not None:
eval_env.close()
if servers is not None:
servers.kill()
def test(self, args):
# Uses different server and monitor ports
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
os.makedirs(server_log_dir, exist_ok=True)
test_no_render = False
test_no_realtime = False
server = Train_Server(
self.server_p - 1,
self.monitor_p,
1,
no_render=test_no_render,
no_realtime=test_no_realtime,
)
env = WalkEnv(self.ip, self.server_p - 1)
model = PPO.load(args["model_file"], env=env)
try:
self.export_model(args["model_file"], args["model_file"] + ".pkl",
False) # Export to pkl to create custom behavior
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
except KeyboardInterrupt:
print()
env.close()
server.kill()
if __name__ == "__main__":
from types import SimpleNamespace
# 创建默认参数
script_args = SimpleNamespace(
args=SimpleNamespace(
i='127.0.0.1', # Server IP
p=3100, # Server port
m=3200, # Monitor port
r=0, # Robot type
t='Gym', # Team name
u=1 # Uniform number
)
)
trainer = Train(script_args)
run_mode = os.environ.get("GYM_CPU_MODE", "train").strip().lower()
if run_mode == "test":
test_model_file = os.environ.get("GYM_CPU_TEST_MODEL", "scripts/gyms/logs/Turn_R0_004/best_model.zip")
test_folder = os.environ.get("GYM_CPU_TEST_FOLDER", "scripts/gyms/logs/Turn_R0_004/")
trainer.test({"model_file": test_model_file, "folder_dir": test_folder})
else:
retrain_model = os.environ.get("GYM_CPU_TRAIN_MODEL", "").strip()
if retrain_model:
trainer.train({"model_file": retrain_model})
else:
trainer.train({})

View File

@@ -24,36 +24,14 @@ CPU_QUOTA="$((CORES * UTIL_PERCENT))%"
MEMORY_MAX="${MEMORY_MAX:-0}"
# ------------------------------
# 训练运行参数(由 scripts/gyms/Walk.py 读取)
# 精简运行参数(由 scripts/gyms/Walk.py 读取)
# ------------------------------
# 运行模式train 或 test
# 仅保留最常用开关,避免超长环境变量命令。
GYM_CPU_MODE="${GYM_CPU_MODE:-train}"
# 并行环境数量:越大通常吞吐越高,但也更容易触发 OOM 或连接不稳定。
# 默认使用更稳妥的 12确认稳定后再升到 16/20。
GYM_CPU_N_ENVS="${GYM_CPU_N_ENVS:-12}"
# 服务器预热时间(秒):
# 在批量拉起 rcssserver 后等待一段时间,再创建 SubprocVecEnv
# 可降低 ConnectionReset/EOFError 概率。
GYM_CPU_SERVER_WARMUP_SEC="${GYM_CPU_SERVER_WARMUP_SEC:-10}"
# 训练专用参数
GYM_CPU_TRAIN_STEPS_PER_ENV="${GYM_CPU_TRAIN_STEPS_PER_ENV:-256}"
GYM_CPU_TRAIN_BATCH_SIZE="${GYM_CPU_TRAIN_BATCH_SIZE:-512}"
GYM_CPU_TRAIN_LR="${GYM_CPU_TRAIN_LR:-1e-4}"
GYM_CPU_TRAIN_ENT_COEF="${GYM_CPU_TRAIN_ENT_COEF:-0.03}"
GYM_CPU_TRAIN_CLIP_RANGE="${GYM_CPU_TRAIN_CLIP_RANGE:-0.13}"
GYM_CPU_TRAIN_GAMMA="${GYM_CPU_TRAIN_GAMMA:-0.95}"
GYM_CPU_TRAIN_EPOCHS="${GYM_CPU_TRAIN_EPOCHS:-5}"
GYM_CPU_TRAIN_STAGE="${GYM_CPU_TRAIN_STAGE:-walk}"
GYM_CPU_TRAIN_MODEL="${GYM_CPU_TRAIN_MODEL:-}"
# 测试专用参数
GYM_CPU_TEST_MODEL="${GYM_CPU_TEST_MODEL:-scripts/gyms/logs/Walk_R0_004/best_model.zip}"
GYM_CPU_TEST_FOLDER="${GYM_CPU_TEST_FOLDER:-scripts/gyms/logs/Walk_R0_004/}"
# 测试默认实时且显示画面:默认均为 0
# 设为 1 表示关闭对应能力
GYM_CPU_TEST_NO_RENDER="${GYM_CPU_TEST_NO_RENDER:-0}"
GYM_CPU_TEST_NO_REALTIME="${GYM_CPU_TEST_NO_REALTIME:-0}"
# Python 解释器选择策略:
# 1) 优先使用你手动传入的 PYTHON_BIN
@@ -93,7 +71,7 @@ SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
# 打印当前生效配置,方便排障和复现实验。
echo "Starting training with limits: CPU=${CPU_QUOTA}, Memory=${MEMORY_MAX}"
echo "Mode: ${GYM_CPU_MODE}"
echo "Runtime knobs: GYM_CPU_N_ENVS=${GYM_CPU_N_ENVS}, GYM_CPU_SERVER_WARMUP_SEC=${GYM_CPU_SERVER_WARMUP_SEC}"
echo "Run knobs: GYM_CPU_MODE=${GYM_CPU_MODE}, GYM_CPU_TRAIN_STAGE=${GYM_CPU_TRAIN_STAGE}"
echo "Using Python: ${PYTHON_EXEC}"
if [[ -n "${CONDA_DEFAULT_ENV:-}" ]]; then
echo "Detected conda env: ${CONDA_DEFAULT_ENV}"
@@ -118,19 +96,9 @@ systemd-run --user --scope \
"${SYSTEMD_PROPS[@]}" \
env \
GYM_CPU_MODE="${GYM_CPU_MODE}" \
GYM_CPU_N_ENVS="${GYM_CPU_N_ENVS}" \
GYM_CPU_SERVER_WARMUP_SEC="${GYM_CPU_SERVER_WARMUP_SEC}" \
GYM_CPU_TRAIN_STEPS_PER_ENV="${GYM_CPU_TRAIN_STEPS_PER_ENV}" \
GYM_CPU_TRAIN_BATCH_SIZE="${GYM_CPU_TRAIN_BATCH_SIZE}" \
GYM_CPU_TRAIN_LR="${GYM_CPU_TRAIN_LR}" \
GYM_CPU_TRAIN_ENT_COEF="${GYM_CPU_TRAIN_ENT_COEF}" \
GYM_CPU_TRAIN_CLIP_RANGE="${GYM_CPU_TRAIN_CLIP_RANGE}" \
GYM_CPU_TRAIN_GAMMA="${GYM_CPU_TRAIN_GAMMA}" \
GYM_CPU_TRAIN_EPOCHS="${GYM_CPU_TRAIN_EPOCHS}" \
GYM_CPU_TRAIN_STAGE="${GYM_CPU_TRAIN_STAGE}" \
GYM_CPU_TRAIN_MODEL="${GYM_CPU_TRAIN_MODEL}" \
GYM_CPU_TEST_MODEL="${GYM_CPU_TEST_MODEL}" \
GYM_CPU_TEST_FOLDER="${GYM_CPU_TEST_FOLDER}" \
GYM_CPU_TEST_NO_RENDER="${GYM_CPU_TEST_NO_RENDER}" \
GYM_CPU_TEST_NO_REALTIME="${GYM_CPU_TEST_NO_REALTIME}" \
"${PYTHON_EXEC}" "-m" "scripts.gyms.Walk"

View File

@@ -47,7 +47,7 @@ class World:
self.their_team_players: list[OtherRobot] = [OtherRobot(is_teammate=False) for _ in
range(self.MAX_PLAYERS_PER_TEAM)]
self.field: Field = self.__initialize_field(field_name=field_name)
self.WORLD_STEPTIME: float = 0.005 # Time step of the world in seconds
self.WORLD_STEPTIME: float = 0.02 # Time step of the world in seconds
def update(self) -> None:
"""