Compare commits
2 Commits
28e7eb0692
...
c5ecbae1cf
| Author | SHA1 | Date | |
|---|---|---|---|
| c5ecbae1cf | |||
| 87e5c6d931 |
14
command.md
14
command.md
@@ -1,14 +0,0 @@
|
||||
训练(默认)
|
||||
bash train.sh
|
||||
|
||||
测试(实时+显示画面)
|
||||
GYM_CPU_MODE=test GYM_CPU_TEST_MODEL=scripts/gyms/logs/Walk_R0_005/best_model.zip GYM_CPU_TEST_FOLDER=scripts/gyms/logs/Walk_R0_005/ GYM_CPU_TEST_NO_RENDER=0 GYM_CPU_TEST_NO_REALTIME=0 bash train.sh
|
||||
|
||||
测试(无画面、非实时)
|
||||
GYM_CPU_MODE=test GYM_CPU_TEST_NO_RENDER=1 GYM_CPU_TEST_NO_REALTIME=1 bash train.sh
|
||||
|
||||
retrain(继续训练)
|
||||
GYM_CPU_MODE=train GYM_CPU_TRAIN_MODEL=scripts/gyms/logs/Walk_R0_005/best_model.zip bash train.sh
|
||||
|
||||
retrain+改训练超参
|
||||
GYM_CPU_MODE=train GYM_CPU_TRAIN_MODEL=scripts/gyms/logs/Walk_R0_004/best_model.zip GYM_CPU_TRAIN_LR=2e-4 GYM_CPU_TRAIN_CLIP_RANGE=0.13 GYM_CPU_TRAIN_BATCH_SIZE=256 YM_CPU_TRAIN_GAMMA=0.95 GYM_CPU_TRAIN_ENT_COEF=0.05 GYM_CPU_TRAIN_EPOCHS=8 bash train.sh
|
||||
@@ -1,4 +1,5 @@
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import time
|
||||
from select import select
|
||||
@@ -15,6 +16,11 @@ class Server:
|
||||
self.__socket: socket.socket = self._create_socket()
|
||||
self.__send_buff = []
|
||||
self.__rcv_buffer_size = 1024
|
||||
self.__rcv_buffer_default_size = 1024
|
||||
self.__max_msg_size = 1048576
|
||||
self.__shrink_threshold = 8192
|
||||
self.__shrink_after_msgs = 200
|
||||
self.__small_msg_streak = 0
|
||||
self.__rcv_buffer = bytearray(self.__rcv_buffer_size)
|
||||
|
||||
def _create_socket(self) -> socket.socket:
|
||||
@@ -105,6 +111,10 @@ class Server:
|
||||
|
||||
msg_size = int.from_bytes(self.__rcv_buffer[:4], byteorder="big", signed=False)
|
||||
|
||||
# Guard against corrupted frame lengths that would trigger huge allocations.
|
||||
if msg_size <= 0 or msg_size > self.__max_msg_size:
|
||||
raise ConnectionResetError
|
||||
|
||||
if msg_size > self.__rcv_buffer_size:
|
||||
self.__rcv_buffer_size = msg_size
|
||||
self.__rcv_buffer = bytearray(self.__rcv_buffer_size)
|
||||
@@ -120,6 +130,15 @@ class Server:
|
||||
message=self.__rcv_buffer[:msg_size].decode()
|
||||
)
|
||||
|
||||
if msg_size <= self.__shrink_threshold and self.__rcv_buffer_size > self.__rcv_buffer_default_size:
|
||||
self.__small_msg_streak += 1
|
||||
if self.__small_msg_streak >= self.__shrink_after_msgs:
|
||||
self.__rcv_buffer_size = self.__rcv_buffer_default_size
|
||||
self.__rcv_buffer = bytearray(self.__rcv_buffer_size)
|
||||
self.__small_msg_streak = 0
|
||||
else:
|
||||
self.__small_msg_streak = 0
|
||||
|
||||
# 如果socket没有更多数据就退出
|
||||
if len(select([self.__socket], [], [], 0.0)[0]) == 0:
|
||||
break
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
import subprocess
|
||||
import os
|
||||
import time
|
||||
import threading
|
||||
|
||||
|
||||
class Server():
|
||||
WATCHDOG_ENABLED = True
|
||||
WATCHDOG_INTERVAL_SEC = 30.0
|
||||
WATCHDOG_RSS_MB_LIMIT = 2000.0
|
||||
|
||||
def __init__(self, first_server_p, first_monitor_p, n_servers, no_render=True, no_realtime=True) -> None:
|
||||
try:
|
||||
import psutil
|
||||
@@ -14,6 +19,10 @@ class Server():
|
||||
self.first_server_p = first_server_p
|
||||
self.n_servers = n_servers
|
||||
self.rcss_processes = []
|
||||
self._server_specs = []
|
||||
self._watchdog_stop = threading.Event()
|
||||
self._watchdog_lock = threading.Lock()
|
||||
self._watchdog_thread = None
|
||||
first_monitor_p = first_monitor_p + 100
|
||||
|
||||
# makes it easier to kill test servers without affecting train servers
|
||||
@@ -23,7 +32,15 @@ class Server():
|
||||
for i in range(n_servers):
|
||||
port = first_server_p + i
|
||||
mport = first_monitor_p + i
|
||||
self._server_specs.append((port, mport, cmd, render_arg, realtime_arg))
|
||||
proc = self._spawn_server(port, mport, cmd, render_arg, realtime_arg)
|
||||
self.rcss_processes.append(proc)
|
||||
|
||||
if self.WATCHDOG_ENABLED:
|
||||
self._watchdog_thread = threading.Thread(target=self._watchdog_loop, daemon=True)
|
||||
self._watchdog_thread.start()
|
||||
|
||||
def _spawn_server(self, port, mport, cmd, render_arg, realtime_arg):
|
||||
server_cmd = f"{cmd} -c {port} -m {mport} {render_arg} {realtime_arg}".strip()
|
||||
|
||||
proc = subprocess.Popen(
|
||||
@@ -42,7 +59,52 @@ class Server():
|
||||
f"rcssservermj exited early (code={rc}) on server port {port}, monitor port {mport}"
|
||||
)
|
||||
|
||||
self.rcss_processes.append(proc)
|
||||
return proc
|
||||
|
||||
@staticmethod
|
||||
def _pid_rss_mb(pid):
|
||||
try:
|
||||
with open(f"/proc/{pid}/status", "r", encoding="utf-8") as f:
|
||||
for line in f:
|
||||
if line.startswith("VmRSS:"):
|
||||
parts = line.split()
|
||||
if len(parts) >= 2:
|
||||
# VmRSS is kB
|
||||
return float(parts[1]) / 1024.0
|
||||
except (FileNotFoundError, ProcessLookupError, PermissionError, OSError):
|
||||
return 0.0
|
||||
return 0.0
|
||||
|
||||
def _restart_server_at_index(self, idx, reason):
|
||||
port, mport, cmd, render_arg, realtime_arg = self._server_specs[idx]
|
||||
old_proc = self.rcss_processes[idx]
|
||||
try:
|
||||
old_proc.terminate()
|
||||
old_proc.wait(timeout=1.0)
|
||||
except Exception:
|
||||
try:
|
||||
old_proc.kill()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
new_proc = self._spawn_server(port, mport, cmd, render_arg, realtime_arg)
|
||||
self.rcss_processes[idx] = new_proc
|
||||
print(
|
||||
f"[ServerWatchdog] Restarted server idx={idx} port={port} monitor={mport} reason={reason}"
|
||||
)
|
||||
|
||||
def _watchdog_loop(self):
|
||||
while not self._watchdog_stop.wait(self.WATCHDOG_INTERVAL_SEC):
|
||||
with self._watchdog_lock:
|
||||
for i, proc in enumerate(self.rcss_processes):
|
||||
rc = proc.poll()
|
||||
if rc is not None:
|
||||
self._restart_server_at_index(i, f"exited:{rc}")
|
||||
continue
|
||||
|
||||
rss_mb = self._pid_rss_mb(proc.pid)
|
||||
if rss_mb > self.WATCHDOG_RSS_MB_LIMIT:
|
||||
self._restart_server_at_index(i, f"rss_mb:{rss_mb:.1f}")
|
||||
|
||||
def check_running_servers(self, psutil, first_server_p, first_monitor_p, n_servers):
|
||||
''' Check if any server is running on chosen ports '''
|
||||
@@ -78,6 +140,9 @@ class Server():
|
||||
return
|
||||
|
||||
def kill(self):
|
||||
self._watchdog_stop.set()
|
||||
if self._watchdog_thread is not None:
|
||||
self._watchdog_thread.join(timeout=1.0)
|
||||
for p in self.rcss_processes:
|
||||
p.kill()
|
||||
print(f"Killed {self.n_servers} rcssservermj processes starting at {self.first_server_p}")
|
||||
|
||||
@@ -53,8 +53,8 @@ class WalkEnv(gym.Env):
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.enable_debug_joint_status = False
|
||||
self.reward_debug_interval_sec = float(os.environ.get("GYM_CPU_REWARD_DEBUG_INTERVAL_SEC", "600"))
|
||||
self.reward_debug_burst_steps = int(os.environ.get("GYM_CPU_REWARD_DEBUG_BURST_STEPS", "10"))
|
||||
self.reward_debug_interval_sec = 600.0
|
||||
self.reward_debug_burst_steps = 10
|
||||
self._reward_debug_last_time = time.time()
|
||||
self._reward_debug_steps_left = 0
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
@@ -64,6 +64,10 @@ class WalkEnv(gym.Env):
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
self._speed_estimate = 0.0
|
||||
self._speed_from_acc = 0.0
|
||||
self._speed_smoothing = 0.85
|
||||
self._fallback_dt = 0.02
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
@@ -158,10 +162,10 @@ class WalkEnv(gym.Env):
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = False
|
||||
self.reset_beam_yaw_range_deg = float(os.environ.get("GYM_CPU_RESET_BEAM_YAW_RANGE_DEG", "180"))
|
||||
self.reset_target_bearing_range_deg = float(os.environ.get("GYM_CPU_RESET_TARGET_BEARING_RANGE_DEG", "120"))
|
||||
self.reset_target_distance_min = float(os.environ.get("GYM_CPU_RESET_TARGET_DISTANCE_MIN", "1.2"))
|
||||
self.reset_target_distance_max = float(os.environ.get("GYM_CPU_RESET_TARGET_DISTANCE_MAX", "2.8"))
|
||||
self.reset_beam_yaw_range_deg = 180.0
|
||||
self.reset_target_bearing_range_deg = 0.0
|
||||
self.reset_target_distance_min = 3.0
|
||||
self.reset_target_distance_max = 5.0
|
||||
if self.reset_target_distance_min > self.reset_target_distance_max:
|
||||
self.reset_target_distance_min, self.reset_target_distance_max = (
|
||||
self.reset_target_distance_max,
|
||||
@@ -171,14 +175,61 @@ class WalkEnv(gym.Env):
|
||||
self.reset_perturb_steps = 4
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.reward_smoothness_scale = float(os.environ.get("GYM_CPU_REWARD_SMOOTHNESS_SCALE", "0.06"))
|
||||
self.reward_smoothness_cap = float(os.environ.get("GYM_CPU_REWARD_SMOOTHNESS_CAP", "0.45"))
|
||||
self.reward_head_toward_bonus = float(os.environ.get("GYM_CPU_REWARD_HEAD_TOWARD_BONUS", "1"))
|
||||
self.reward_smoothness_scale = 0.06
|
||||
self.reward_smoothness_cap = 0.45
|
||||
self.reward_forward_stability_gate = 0.35
|
||||
self.reward_forward_tilt_hard_threshold = 0.50
|
||||
self.reward_forward_tilt_hard_scale = 0.20
|
||||
self.reward_head_toward_bonus = 1.0
|
||||
self.turn_stationary_radius = 0.2
|
||||
self.turn_stationary_penalty_scale = 3.0
|
||||
self.stationary_start_steps = 20
|
||||
self.stationary_step_eps = 0.015
|
||||
self.stationary_penalty_scale = 1.2
|
||||
self.train_stage = "walk"
|
||||
self.in_place_radius = 0.18
|
||||
self.in_place_center_reward_scale = 0.60
|
||||
self.in_place_drift_penalty_scale = 1.20
|
||||
self.waypoint_reach_distance = 0.3
|
||||
self.num_waypoints = 1
|
||||
self.exploration_start_steps = 80
|
||||
self.exploration_scale = 0.08
|
||||
self.exploration_cap = 0.25
|
||||
self.exploration_target_novelty = 1.0
|
||||
self.exploration_sigma = 0.7
|
||||
self.reward_stride_swing_scale = 0.20
|
||||
self.reward_stride_phase_scale = 0.18
|
||||
self.reward_knee_drive_scale = 0.10
|
||||
self.reward_knee_lift_scale = 0.12
|
||||
self.reward_knee_lift_target = 0.95
|
||||
self.reward_knee_lift_shortfall_scale = 0.20
|
||||
self.reward_knee_overbend_threshold = 0.60
|
||||
self.reward_knee_overbend_scale = 0.35
|
||||
self.reward_hip_lift_scale = 0.12
|
||||
self.reward_hip_lift_target = 0.80
|
||||
self.reward_knee_alternate_scale = 0.10
|
||||
self.reward_knee_bilateral_scale = 0.16
|
||||
self.reward_single_leg_penalty_scale = 0.22
|
||||
self.reward_knee_phase_switch_scale = 0.14
|
||||
self.knee_phase_deadband = 0.10
|
||||
self.knee_phase_min_interval = 18
|
||||
self.knee_phase_target_interval = 22
|
||||
self.knee_phase_fast_switch_penalty_scale = 0.10
|
||||
self.knee_phase_max_hold_frames = 28
|
||||
self.knee_phase_hold_penalty_scale = 0.18
|
||||
self.reward_stride_cap = 0.80
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.action_history_len = 50
|
||||
self.prev_action_history = np.zeros((self.action_history_len, self.no_of_actions), dtype=np.float32)
|
||||
self.history_idx = 0
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
self.last_yaw_error = None
|
||||
self.prev_knee_balance = 0.0
|
||||
self.prev_knee_phase_sign = 0
|
||||
self.knee_phase_frames_since_switch = 0
|
||||
self.knee_phase_hold_frames = 0
|
||||
self.Player.server.connect()
|
||||
# sleep(2.0) # Longer wait for connection to establish completely
|
||||
self.Player.server.send_immediate(
|
||||
@@ -341,10 +392,18 @@ class WalkEnv(gym.Env):
|
||||
self.route_completed = False
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.prev_action_history.fill(0.0)
|
||||
self.history_idx = 0
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
|
||||
self.last_yaw_error = None
|
||||
self.prev_knee_balance = 0.0
|
||||
self.prev_knee_phase_sign = 0
|
||||
self.knee_phase_frames_since_switch = 0
|
||||
self.knee_phase_hold_frames = 0
|
||||
self.walk_cycle_step = 0
|
||||
self._reward_debug_steps_left = 0
|
||||
self._speed_estimate = 0.0
|
||||
self._speed_from_acc = 0.0
|
||||
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
@@ -399,16 +458,28 @@ class WalkEnv(gym.Env):
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
# Randomize global target bearing so policy must learn to rotate toward it first.
|
||||
# Generate multiple waypoints along a path
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
self.point_list = []
|
||||
current_point = self.initial_position.copy()
|
||||
|
||||
for i in range(self.num_waypoints):
|
||||
# Each waypoint is placed further along the path
|
||||
target_distance_wp = np.random.uniform(self.reset_target_distance_min, self.reset_target_distance_max)
|
||||
target_bearing_deg_wp = np.random.uniform(-self.reset_target_bearing_range_deg, self.reset_target_bearing_range_deg)
|
||||
|
||||
target_offset = MathOps.rotate_2d_vec(
|
||||
np.array([target_distance, 0.0]),
|
||||
heading_deg + target_bearing_deg,
|
||||
np.array([target_distance_wp, 0.0]),
|
||||
heading_deg + target_bearing_deg_wp,
|
||||
is_rad=False,
|
||||
)
|
||||
point1 = self.initial_position + target_offset
|
||||
self.point_list = [point1]
|
||||
next_point = current_point + target_offset
|
||||
self.point_list.append(next_point)
|
||||
current_point = next_point.copy()
|
||||
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
if self.train_stage == "in_place":
|
||||
self.target_position = self.initial_position.copy()
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
@@ -419,184 +490,26 @@ class WalkEnv(gym.Env):
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
height = float(self.Player.world.global_position[2])
|
||||
robot = self.Player.robot
|
||||
|
||||
|
||||
joint_pos_rad = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
joint_speed_rad = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
ang_vel = np.deg2rad(robot.gyroscope)
|
||||
rp_ang_vel_mag = float(np.linalg.norm(ang_vel[:2]))
|
||||
|
||||
is_fallen = height < 0.55
|
||||
if is_fallen:
|
||||
# remain = max(0, 800 - self.step_counter)
|
||||
# return -8.0 - 0.01 * remain
|
||||
return -20.0
|
||||
|
||||
prev_dist_to_target = float(np.linalg.norm(self.target_position - previous_pos))
|
||||
curr_dist_to_target = float(np.linalg.norm(self.target_position - current_pos))
|
||||
dist_delta = prev_dist_to_target - curr_dist_to_target
|
||||
|
||||
if np.linalg.norm(current_pos - previous_pos) > 0.005:
|
||||
position_penalty = -3 * float(np.linalg.norm(current_pos - previous_pos))
|
||||
# Forward-progress reward (distance delta) with anti-stuck shaping.
|
||||
progress_reward = 22.0 * dist_delta
|
||||
survival_reward = 0.02
|
||||
smoothness_penalty = -0.015 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
step_displacement = float(np.linalg.norm(current_pos - previous_pos))
|
||||
if self.step_counter > 30 and step_displacement < 0.006:
|
||||
idle_penalty = -0.06
|
||||
else:
|
||||
position_penalty = 0.0
|
||||
idle_penalty = 0.0
|
||||
|
||||
|
||||
# Turn-to-target shaping.
|
||||
to_target = self.target_position - current_pos
|
||||
dist_to_target = float(np.linalg.norm(to_target))
|
||||
if dist_to_target > 1e-6:
|
||||
target_yaw = math.atan2(float(to_target[1]), float(to_target[0]))
|
||||
else:
|
||||
target_yaw = 0.0
|
||||
|
||||
robot_yaw = math.radians(float(robot.global_orientation_euler[2]))
|
||||
yaw_error = target_yaw - robot_yaw
|
||||
|
||||
# Main heading objective: face the target direction.
|
||||
# heading_align_reward = 1.0 * math.cos(yaw_error)
|
||||
|
||||
abs_yaw_error = abs(yaw_error)
|
||||
alive_bonus = 2.0 * max(0.0, 1.0 - abs_yaw_error / math.pi)
|
||||
head_toward_bonus = self.reward_head_toward_bonus if abs_yaw_error < math.radians(4.0) else 0.0
|
||||
|
||||
if self.last_yaw_error is None:
|
||||
heading_progress_reward = 0.0
|
||||
else:
|
||||
prev_abs_yaw_error = abs(self.last_yaw_error)
|
||||
yaw_err_delta = prev_abs_yaw_error - abs_yaw_error
|
||||
progress_gate = 1.0 if abs_yaw_error > math.radians(4.0) else 0.0
|
||||
heading_progress_reward = progress_gate * yaw_err_delta
|
||||
heading_progress_reward = float(np.clip(heading_progress_reward, -1, 1))
|
||||
self.last_yaw_error = yaw_error
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.05 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -0.6 * tilt_mag
|
||||
# Penalize roll/pitch rotational shake but do not penalize yaw turning directly.
|
||||
ang_vel_penalty = -0.06 * rp_ang_vel_mag
|
||||
|
||||
joint_pos = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
) * self.train_sim_flip
|
||||
left_hip_roll = float(joint_pos[12])
|
||||
right_hip_roll = float(joint_pos[18])
|
||||
left_hip_pitch = float(joint_pos[11])
|
||||
right_hip_pitch = float(joint_pos[17])
|
||||
|
||||
left_ankle_roll = float(joint_pos[16])
|
||||
right_ankle_roll = float(joint_pos[22])
|
||||
|
||||
max_leg_roll = 0.2 # 防止劈叉姿势
|
||||
split_penalty = -0.8 * max(0.0, (-left_hip_roll + right_hip_roll - 2 * max_leg_roll) / max_leg_roll)
|
||||
left_hip_yaw = float(joint_pos[13])
|
||||
right_hip_yaw = float(joint_pos[19])
|
||||
|
||||
min_leg_separation = 0.05 # 最小腿间距(防止贴得太近)
|
||||
# 惩罚腿过分靠拢(内收)- 基于两腿间距
|
||||
leg_separation = -left_hip_roll + right_hip_roll
|
||||
inward_penalty = -0.25 * max(0.0, (min_leg_separation - leg_separation) / min_leg_separation)
|
||||
|
||||
|
||||
# 脚踝roll角度检测:防止过度外翻或内翻
|
||||
max_ankle_roll = 0.15 # 最大允许的脚踝roll角度
|
||||
|
||||
# 惩罚脚踝过度外翻/内翻(绝对值过大)
|
||||
ankle_roll_penalty = -0.5 * max(0.0, (abs(left_ankle_roll) + abs(right_ankle_roll) - 2 * max_ankle_roll) / max_ankle_roll)
|
||||
|
||||
# 惩罚两脚踝roll方向相反(不稳定姿势)
|
||||
ankle_roll_cross_penalty = -0.3 * max(0.0, -(left_ankle_roll * right_ankle_roll))
|
||||
|
||||
# 分别惩罚左右大腿过度转动
|
||||
max_hip_yaw = 0.5 # 最大允许的yaw角度
|
||||
left_hip_yaw_penalty = -0.4 * max(0.0, abs(left_hip_yaw) - max_hip_yaw)
|
||||
right_hip_yaw_penalty = -0.4 * max(0.0, abs(right_hip_yaw) - max_hip_yaw)
|
||||
# 智能交叉腿惩罚:只在站立时惩罚,转身时允许交叉腿
|
||||
yaw_rate = float(np.deg2rad(robot.gyroscope[2]))
|
||||
yaw_rate_abs = abs(yaw_rate)
|
||||
|
||||
# 当转身速度较小时才惩罚交叉腿(站立状态)
|
||||
cross_leg_gate = max(0.0, 1.0 - yaw_rate_abs / math.radians(8.0))
|
||||
hip_yaw_cross_penalty = -1.0 * cross_leg_gate * max(0.0, -(left_hip_yaw * right_hip_yaw)) if left_hip_yaw > 0 and right_hip_yaw < 0 else 0.0
|
||||
|
||||
# Torso-lower-body linkage: reward coordinated turning, punish waist-only spinning.
|
||||
waist_speed = abs(float(joint_speed_rad[10]))
|
||||
lower_body_speed = float(np.mean(np.abs(joint_speed_rad[11:23])))
|
||||
lower_body_follow_ratio = lower_body_speed / (waist_speed + 1e-4)
|
||||
linkage_reward = 0.24 * min(1.0, lower_body_follow_ratio) * min(1.0, waist_speed / 1.2)
|
||||
waist_only_turn_penalty = -0.20 * max(0.0, waist_speed - 1.35 * lower_body_speed)
|
||||
|
||||
# Extra posture linkage in yaw joints to avoid decoupled torso twist.
|
||||
waist_yaw = abs(float(joint_pos_rad[10]))
|
||||
hip_yaw_mean = 0.5 * (abs(float(joint_pos_rad[13])) + abs(float(joint_pos_rad[19])))
|
||||
yaw_link_reward = 0.12 * math.exp(-abs(waist_yaw - hip_yaw_mean) / 0.22)
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height
|
||||
height_error = height - target_height
|
||||
|
||||
height_penalty = -(math.exp(12*abs(height_error))-1) if height_error > 0.04 else 0
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
head_toward_bonus +
|
||||
heading_progress_reward +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
+ ang_vel_penalty
|
||||
+ height_penalty
|
||||
+ ankle_roll_penalty
|
||||
+ ankle_roll_cross_penalty
|
||||
+ split_penalty
|
||||
+ inward_penalty
|
||||
# + leg_proximity_penalty
|
||||
+ left_hip_yaw_penalty
|
||||
+ right_hip_yaw_penalty
|
||||
+ hip_yaw_cross_penalty
|
||||
+ position_penalty
|
||||
# + linkage_reward
|
||||
# + waist_only_turn_penalty
|
||||
# + yaw_link_reward
|
||||
# + stance_collapse_penalty
|
||||
# + hip_yaw_yaw_cross_penalty
|
||||
# + stance_collapse_penalty
|
||||
# + cross_leg_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
# print(height_error, height_penalty)
|
||||
total = progress_reward + survival_reward + smoothness_penalty + idle_penalty
|
||||
|
||||
now = time.time()
|
||||
if self.reward_debug_interval_sec > 0 and now - self._reward_debug_last_time >= self.reward_debug_interval_sec:
|
||||
@@ -606,35 +519,12 @@ class WalkEnv(gym.Env):
|
||||
if self._reward_debug_steps_left > 0:
|
||||
self._reward_debug_steps_left -= 1
|
||||
self.debug_log(
|
||||
f"height_penalty:{height_penalty:.4f},"
|
||||
f"progress_reward:{progress_reward:.4f},"
|
||||
f"survival_reward:{survival_reward:.4f},"
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},"
|
||||
f"posture_penalty:{posture_penalty:.4f},"
|
||||
f"heading_progress_reward:{heading_progress_reward:.4f},"
|
||||
# f"stance_collapse_penalty:{stance_collapse_penalty:.4f},"
|
||||
# f"cross_leg_penalty:{cross_leg_penalty:.4f},"
|
||||
f"ang_vel_penalty:{ang_vel_penalty:.4f},"
|
||||
f"split_penalty:{split_penalty:.4f},"
|
||||
f"ankle_roll_penalty:{ankle_roll_penalty:.4f},"
|
||||
f"ankle_roll_cross_penalty:{ankle_roll_cross_penalty:.4f},"
|
||||
f"left_hip_yaw_penalty:{left_hip_yaw_penalty:.4f},"
|
||||
f"right_hip_yaw_penalty:{right_hip_yaw_penalty:.4f},"
|
||||
f"hip_yaw_cross_penalty:{hip_yaw_cross_penalty:.4f},"
|
||||
f"inward_penalty:{inward_penalty:.4f},"
|
||||
f"position_penalty:{position_penalty:.4f},"
|
||||
# f"linkage_reward:{linkage_reward:.4f},"
|
||||
# f"waist_only_turn_penalty:{waist_only_turn_penalty:.4f},"
|
||||
# f"yaw_link_reward:{yaw_link_reward:.4f}"
|
||||
# f"leg_proximity_penalty:{leg_proximity_penalty:.4f},"
|
||||
|
||||
# f"stance_collapse_penalty:{stance_collapse_penalty:.4f},"
|
||||
# f"hip_yaw_yaw_cross_penalty:{hip_yaw_yaw_cross_penalty:.4f},"
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
f"alive_bonus:{alive_bonus:.4f},"
|
||||
f"abs_yaw_error:{abs_yaw_error:.4f}"
|
||||
f"idle_penalty:{idle_penalty:.4f},"
|
||||
f"total:{total:.4f}"
|
||||
)
|
||||
# print(f"abs_yaw_error:{abs_yaw_error:.4f}")
|
||||
return total
|
||||
|
||||
|
||||
@@ -655,10 +545,12 @@ class WalkEnv(gym.Env):
|
||||
action[8] = 0
|
||||
action[9] = 5
|
||||
action[10] = 0
|
||||
action[11] = np.clip(action[11], -0.7, 0.7)
|
||||
action[17] = np.clip(action[17], -0.7, 0.7)
|
||||
# action[12] = -1.0
|
||||
# action[18] = 1.0
|
||||
action[11] = np.clip(action[11], -6, 6)
|
||||
action[17] = np.clip(action[17], -6, 6)
|
||||
# action[11] = 1
|
||||
# action[17] = 1
|
||||
# action[12] = -0.01
|
||||
# action[18] = 0.01
|
||||
# action[13] = -1.0
|
||||
# action[19] = 1.0
|
||||
self.previous_action = action.copy()
|
||||
@@ -671,7 +563,7 @@ class WalkEnv(gym.Env):
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=80, kd=4.67
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=60, kd=1.2
|
||||
)
|
||||
|
||||
self.previous_action = action.copy()
|
||||
@@ -684,14 +576,28 @@ class WalkEnv(gym.Env):
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
if self.step_counter % 10 == 0:
|
||||
self.previous_pos = current_pos.copy()
|
||||
|
||||
# Compute reward based on movement from previous step
|
||||
reward = self.compute_reward(self.previous_pos, current_pos, action)
|
||||
self.previous_pos = current_pos.copy()
|
||||
|
||||
self.prev_action_history[self.history_idx] = action.copy()
|
||||
self.history_idx = (self.history_idx + 1) % self.action_history_len
|
||||
|
||||
self.last_action_for_reward = action.copy()
|
||||
|
||||
# Check if current waypoint is reached
|
||||
if self.train_stage != "in_place":
|
||||
dist_to_waypoint = float(np.linalg.norm(current_pos - self.target_position))
|
||||
if dist_to_waypoint < self.waypoint_reach_distance:
|
||||
# Move to next waypoint
|
||||
self.waypoint_index += 1
|
||||
if self.waypoint_index >= len(self.point_list):
|
||||
# All waypoints completed
|
||||
self.route_completed = True
|
||||
else:
|
||||
# Update target to next waypoint
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
|
||||
# Fall detection and penalty
|
||||
is_fallen = self.Player.world.global_position[2] < 0.55
|
||||
|
||||
@@ -709,15 +615,22 @@ class Train(Train_Base):
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = int(os.environ.get("GYM_CPU_N_ENVS", "20"))
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
server_warmup_sec = float(os.environ.get("GYM_CPU_SERVER_WARMUP_SEC", "3.0"))
|
||||
n_steps_per_env = int(os.environ.get("GYM_CPU_TRAIN_STEPS_PER_ENV", "512")) # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = int(os.environ.get("GYM_CPU_TRAIN_BATCH_SIZE", "512")) # should be a factor of (n_steps_per_env * n_envs)
|
||||
n_envs = 12
|
||||
server_warmup_sec = 3.0
|
||||
n_steps_per_env = 256 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = 512 # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = float(os.environ.get("GYM_CPU_TRAIN_LR", "3e-4"))
|
||||
folder_name = f'Turn_R{self.robot_type}'
|
||||
learning_rate = 2e-4
|
||||
ent_coef = 0.08
|
||||
clip_range = 0.2
|
||||
gamma = 0.97
|
||||
n_epochs = 3
|
||||
enable_eval = True
|
||||
monitor_train_env = False
|
||||
eval_freq_mult = 30
|
||||
save_freq_mult = 20
|
||||
eval_eps = 3
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
print(f"Model path: {model_path}")
|
||||
@@ -733,6 +646,10 @@ class Train(Train_Base):
|
||||
|
||||
return thunk
|
||||
|
||||
env = None
|
||||
eval_env = None
|
||||
servers = None
|
||||
try:
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1, no_render=True, no_realtime=True) # include 1 extra server for testing
|
||||
@@ -744,11 +661,11 @@ class Train(Train_Base):
|
||||
sleep(server_warmup_sec)
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i, monitor=True) for i in range(n_envs)], start_method="spawn")
|
||||
env = SubprocVecEnv([init_env(i, monitor=monitor_train_env) for i in range(n_envs)], start_method="spawn")
|
||||
# Use single-process eval env to avoid extra subprocess fragility during callback evaluation.
|
||||
if enable_eval:
|
||||
eval_env = DummyVecEnv([init_env(n_envs, monitor=True)])
|
||||
|
||||
try:
|
||||
# Custom policy network architecture
|
||||
policy_kwargs = dict(
|
||||
net_arch=dict(
|
||||
@@ -771,26 +688,30 @@ class Train(Train_Base):
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
ent_coef=float(os.environ.get("GYM_CPU_TRAIN_ENT_COEF", "0.05")), # Entropy coefficient for exploration
|
||||
clip_range=float(os.environ.get("GYM_CPU_TRAIN_CLIP_RANGE", "0.2")), # PPO clipping parameter
|
||||
ent_coef=ent_coef, # Entropy coefficient for exploration
|
||||
clip_range=clip_range, # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=float(os.environ.get("GYM_CPU_TRAIN_GAMMA", "0.95")), # Discount factor
|
||||
gamma=gamma, # Discount factor
|
||||
# target_kl=0.03,
|
||||
n_epochs=int(os.environ.get("GYM_CPU_TRAIN_EPOCHS", "5")),
|
||||
n_epochs=n_epochs,
|
||||
tensorboard_log=f"./scripts/gyms/logs/{folder_name}/tensorboard/"
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 20, eval_eps=7,
|
||||
eval_freq=n_steps_per_env * max(1, eval_freq_mult),
|
||||
save_freq=n_steps_per_env * max(1, save_freq_mult),
|
||||
eval_eps=max(1, eval_eps),
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
finally:
|
||||
if env is not None:
|
||||
env.close()
|
||||
if eval_env is not None:
|
||||
eval_env.close()
|
||||
if servers is not None:
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
@@ -798,8 +719,8 @@ class Train(Train_Base):
|
||||
# Uses different server and monitor ports
|
||||
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
test_no_render = os.environ.get("GYM_CPU_TEST_NO_RENDER", "0") == "1"
|
||||
test_no_realtime = os.environ.get("GYM_CPU_TEST_NO_REALTIME", "0") == "1"
|
||||
test_no_render = False
|
||||
test_no_realtime = False
|
||||
|
||||
server = Train_Server(
|
||||
self.server_p - 1,
|
||||
|
||||
42
train.sh
42
train.sh
@@ -24,36 +24,14 @@ CPU_QUOTA="$((CORES * UTIL_PERCENT))%"
|
||||
MEMORY_MAX="${MEMORY_MAX:-0}"
|
||||
|
||||
# ------------------------------
|
||||
# 训练运行参数(由 scripts/gyms/Walk.py 读取)
|
||||
# 精简运行参数(由 scripts/gyms/Walk.py 读取)
|
||||
# ------------------------------
|
||||
# 运行模式:train 或 test
|
||||
# 仅保留最常用开关,避免超长环境变量命令。
|
||||
GYM_CPU_MODE="${GYM_CPU_MODE:-train}"
|
||||
|
||||
# 并行环境数量:越大通常吞吐越高,但也更容易触发 OOM 或连接不稳定。
|
||||
# 默认使用更稳妥的 12,确认稳定后再升到 16/20。
|
||||
GYM_CPU_N_ENVS="${GYM_CPU_N_ENVS:-12}"
|
||||
# 服务器预热时间(秒):
|
||||
# 在批量拉起 rcssserver 后等待一段时间,再创建 SubprocVecEnv,
|
||||
# 可降低 ConnectionReset/EOFError 概率。
|
||||
GYM_CPU_SERVER_WARMUP_SEC="${GYM_CPU_SERVER_WARMUP_SEC:-10}"
|
||||
|
||||
# 训练专用参数
|
||||
GYM_CPU_TRAIN_STEPS_PER_ENV="${GYM_CPU_TRAIN_STEPS_PER_ENV:-256}"
|
||||
GYM_CPU_TRAIN_BATCH_SIZE="${GYM_CPU_TRAIN_BATCH_SIZE:-512}"
|
||||
GYM_CPU_TRAIN_LR="${GYM_CPU_TRAIN_LR:-1e-4}"
|
||||
GYM_CPU_TRAIN_ENT_COEF="${GYM_CPU_TRAIN_ENT_COEF:-0.03}"
|
||||
GYM_CPU_TRAIN_CLIP_RANGE="${GYM_CPU_TRAIN_CLIP_RANGE:-0.13}"
|
||||
GYM_CPU_TRAIN_GAMMA="${GYM_CPU_TRAIN_GAMMA:-0.95}"
|
||||
GYM_CPU_TRAIN_EPOCHS="${GYM_CPU_TRAIN_EPOCHS:-5}"
|
||||
GYM_CPU_TRAIN_STAGE="${GYM_CPU_TRAIN_STAGE:-walk}"
|
||||
GYM_CPU_TRAIN_MODEL="${GYM_CPU_TRAIN_MODEL:-}"
|
||||
|
||||
# 测试专用参数
|
||||
GYM_CPU_TEST_MODEL="${GYM_CPU_TEST_MODEL:-scripts/gyms/logs/Walk_R0_004/best_model.zip}"
|
||||
GYM_CPU_TEST_FOLDER="${GYM_CPU_TEST_FOLDER:-scripts/gyms/logs/Walk_R0_004/}"
|
||||
# 测试默认实时且显示画面:默认均为 0
|
||||
# 设为 1 表示关闭对应能力
|
||||
GYM_CPU_TEST_NO_RENDER="${GYM_CPU_TEST_NO_RENDER:-0}"
|
||||
GYM_CPU_TEST_NO_REALTIME="${GYM_CPU_TEST_NO_REALTIME:-0}"
|
||||
|
||||
# Python 解释器选择策略:
|
||||
# 1) 优先使用你手动传入的 PYTHON_BIN
|
||||
@@ -93,7 +71,7 @@ SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
# 打印当前生效配置,方便排障和复现实验。
|
||||
echo "Starting training with limits: CPU=${CPU_QUOTA}, Memory=${MEMORY_MAX}"
|
||||
echo "Mode: ${GYM_CPU_MODE}"
|
||||
echo "Runtime knobs: GYM_CPU_N_ENVS=${GYM_CPU_N_ENVS}, GYM_CPU_SERVER_WARMUP_SEC=${GYM_CPU_SERVER_WARMUP_SEC}"
|
||||
echo "Run knobs: GYM_CPU_MODE=${GYM_CPU_MODE}, GYM_CPU_TRAIN_STAGE=${GYM_CPU_TRAIN_STAGE}"
|
||||
echo "Using Python: ${PYTHON_EXEC}"
|
||||
if [[ -n "${CONDA_DEFAULT_ENV:-}" ]]; then
|
||||
echo "Detected conda env: ${CONDA_DEFAULT_ENV}"
|
||||
@@ -118,19 +96,9 @@ systemd-run --user --scope \
|
||||
"${SYSTEMD_PROPS[@]}" \
|
||||
env \
|
||||
GYM_CPU_MODE="${GYM_CPU_MODE}" \
|
||||
GYM_CPU_N_ENVS="${GYM_CPU_N_ENVS}" \
|
||||
GYM_CPU_SERVER_WARMUP_SEC="${GYM_CPU_SERVER_WARMUP_SEC}" \
|
||||
GYM_CPU_TRAIN_STEPS_PER_ENV="${GYM_CPU_TRAIN_STEPS_PER_ENV}" \
|
||||
GYM_CPU_TRAIN_BATCH_SIZE="${GYM_CPU_TRAIN_BATCH_SIZE}" \
|
||||
GYM_CPU_TRAIN_LR="${GYM_CPU_TRAIN_LR}" \
|
||||
GYM_CPU_TRAIN_ENT_COEF="${GYM_CPU_TRAIN_ENT_COEF}" \
|
||||
GYM_CPU_TRAIN_CLIP_RANGE="${GYM_CPU_TRAIN_CLIP_RANGE}" \
|
||||
GYM_CPU_TRAIN_GAMMA="${GYM_CPU_TRAIN_GAMMA}" \
|
||||
GYM_CPU_TRAIN_EPOCHS="${GYM_CPU_TRAIN_EPOCHS}" \
|
||||
GYM_CPU_TRAIN_STAGE="${GYM_CPU_TRAIN_STAGE}" \
|
||||
GYM_CPU_TRAIN_MODEL="${GYM_CPU_TRAIN_MODEL}" \
|
||||
GYM_CPU_TEST_MODEL="${GYM_CPU_TEST_MODEL}" \
|
||||
GYM_CPU_TEST_FOLDER="${GYM_CPU_TEST_FOLDER}" \
|
||||
GYM_CPU_TEST_NO_RENDER="${GYM_CPU_TEST_NO_RENDER}" \
|
||||
GYM_CPU_TEST_NO_REALTIME="${GYM_CPU_TEST_NO_REALTIME}" \
|
||||
"${PYTHON_EXEC}" "-m" "scripts.gyms.Walk"
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ class World:
|
||||
self.their_team_players: list[OtherRobot] = [OtherRobot(is_teammate=False) for _ in
|
||||
range(self.MAX_PLAYERS_PER_TEAM)]
|
||||
self.field: Field = self.__initialize_field(field_name=field_name)
|
||||
self.WORLD_STEPTIME: float = 0.005 # Time step of the world in seconds
|
||||
self.WORLD_STEPTIME: float = 0.02 # Time step of the world in seconds
|
||||
|
||||
def update(self) -> None:
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user