Compare commits
6 Commits
5a7952a91c
...
Walk
| Author | SHA1 | Date | |
|---|---|---|---|
| c9c4b35e89 | |||
| 2ff69d64be | |||
| 29adc4b3df | |||
| 43067000db | |||
| 239fbf4087 | |||
| 387336b35a |
@@ -7,7 +7,7 @@ import threading
|
|||||||
class Server():
|
class Server():
|
||||||
WATCHDOG_ENABLED = True
|
WATCHDOG_ENABLED = True
|
||||||
WATCHDOG_INTERVAL_SEC = 30.0
|
WATCHDOG_INTERVAL_SEC = 30.0
|
||||||
WATCHDOG_RSS_MB_LIMIT = 2000.0
|
WATCHDOG_RSS_MB_LIMIT = 800
|
||||||
|
|
||||||
def __init__(self, first_server_p, first_monitor_p, n_servers, no_render=True, no_realtime=True) -> None:
|
def __init__(self, first_server_p, first_monitor_p, n_servers, no_render=True, no_realtime=True) -> None:
|
||||||
try:
|
try:
|
||||||
@@ -109,14 +109,29 @@ class Server():
|
|||||||
def check_running_servers(self, psutil, first_server_p, first_monitor_p, n_servers):
|
def check_running_servers(self, psutil, first_server_p, first_monitor_p, n_servers):
|
||||||
''' Check if any server is running on chosen ports '''
|
''' Check if any server is running on chosen ports '''
|
||||||
found = False
|
found = False
|
||||||
p_list = [p for p in psutil.process_iter() if p.cmdline() and "rcssservermj" in " ".join(p.cmdline())]
|
|
||||||
range1 = (first_server_p, first_server_p + n_servers)
|
range1 = (first_server_p, first_server_p + n_servers)
|
||||||
range2 = (first_monitor_p, first_monitor_p + n_servers)
|
range2 = (first_monitor_p, first_monitor_p + n_servers)
|
||||||
bad_processes = []
|
bad_processes = []
|
||||||
|
|
||||||
|
def safe_cmdline(proc):
|
||||||
|
try:
|
||||||
|
return proc.cmdline()
|
||||||
|
except (psutil.ZombieProcess, psutil.NoSuchProcess, psutil.AccessDenied, OSError):
|
||||||
|
return []
|
||||||
|
|
||||||
|
p_list = []
|
||||||
|
for p in psutil.process_iter():
|
||||||
|
cmdline = safe_cmdline(p)
|
||||||
|
if cmdline and "rcssservermj" in " ".join(cmdline):
|
||||||
|
p_list.append(p)
|
||||||
|
|
||||||
for p in p_list:
|
for p in p_list:
|
||||||
# currently ignoring remaining default port when only one of the ports is specified (uncommon scenario)
|
# currently ignoring remaining default port when only one of the ports is specified (uncommon scenario)
|
||||||
ports = [int(arg) for arg in p.cmdline()[1:] if arg.isdigit()]
|
cmdline = safe_cmdline(p)
|
||||||
|
if not cmdline:
|
||||||
|
continue
|
||||||
|
|
||||||
|
ports = [int(arg) for arg in cmdline[1:] if arg.isdigit()]
|
||||||
if len(ports) == 0:
|
if len(ports) == 0:
|
||||||
ports = [60000, 60100] # default server ports (changing this is unlikely)
|
ports = [60000, 60100] # default server ports (changing this is unlikely)
|
||||||
|
|
||||||
@@ -128,7 +143,7 @@ class Server():
|
|||||||
print("\nThere are already servers running on the same port(s)!")
|
print("\nThere are already servers running on the same port(s)!")
|
||||||
found = True
|
found = True
|
||||||
bad_processes.append(p)
|
bad_processes.append(p)
|
||||||
print(f"Port(s) {','.join(conflicts)} already in use by \"{' '.join(p.cmdline())}\" (PID:{p.pid})")
|
print(f"Port(s) {','.join(conflicts)} already in use by \"{' '.join(cmdline)}\" (PID:{p.pid})")
|
||||||
|
|
||||||
if found:
|
if found:
|
||||||
print()
|
print()
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from random import random
|
|||||||
from random import uniform
|
from random import uniform
|
||||||
from itertools import count
|
from itertools import count
|
||||||
|
|
||||||
from stable_baselines3 import PPO
|
from stable_baselines3 import PPO, TD3, DDPG, SAC, A2C
|
||||||
from stable_baselines3.common.monitor import Monitor
|
from stable_baselines3.common.monitor import Monitor
|
||||||
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
|
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
|
||||||
|
|
||||||
@@ -66,6 +66,7 @@ class WalkEnv(gym.Env):
|
|||||||
self._last_sync_time = None
|
self._last_sync_time = None
|
||||||
self._speed_estimate = 0.0
|
self._speed_estimate = 0.0
|
||||||
self._speed_from_acc = 0.0
|
self._speed_from_acc = 0.0
|
||||||
|
self._prev_accelerometer = np.zeros(3, dtype=np.float32)
|
||||||
self._speed_smoothing = 0.85
|
self._speed_smoothing = 0.85
|
||||||
self._fallback_dt = 0.02
|
self._fallback_dt = 0.02
|
||||||
target_hz_env = 0
|
target_hz_env = 0
|
||||||
@@ -125,7 +126,7 @@ class WalkEnv(gym.Env):
|
|||||||
0.0, # 22: Right_Ankle_Roll (rle6)
|
0.0, # 22: Right_Ankle_Roll (rle6)
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
# self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||||
self.train_sim_flip = np.array(
|
self.train_sim_flip = np.array(
|
||||||
[
|
[
|
||||||
1.0, # 0: Head_yaw (he1)
|
1.0, # 0: Head_yaw (he1)
|
||||||
@@ -154,7 +155,7 @@ class WalkEnv(gym.Env):
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
self.scaling_factor = 0.3
|
self.scaling_factor = 0.5
|
||||||
# self.scaling_factor = 1
|
# self.scaling_factor = 1
|
||||||
|
|
||||||
# Encourage a minimum lateral stance so the policy avoids feet overlap.
|
# Encourage a minimum lateral stance so the policy avoids feet overlap.
|
||||||
@@ -164,8 +165,8 @@ class WalkEnv(gym.Env):
|
|||||||
self.enable_reset_perturb = False
|
self.enable_reset_perturb = False
|
||||||
self.reset_beam_yaw_range_deg = 180.0
|
self.reset_beam_yaw_range_deg = 180.0
|
||||||
self.reset_target_bearing_range_deg = 0.0
|
self.reset_target_bearing_range_deg = 0.0
|
||||||
self.reset_target_distance_min = 3.0
|
self.reset_target_distance_min = 1.5
|
||||||
self.reset_target_distance_max = 5.0
|
self.reset_target_distance_max = 3.0
|
||||||
if self.reset_target_distance_min > self.reset_target_distance_max:
|
if self.reset_target_distance_min > self.reset_target_distance_max:
|
||||||
self.reset_target_distance_min, self.reset_target_distance_max = (
|
self.reset_target_distance_min, self.reset_target_distance_max = (
|
||||||
self.reset_target_distance_max,
|
self.reset_target_distance_max,
|
||||||
@@ -175,12 +176,12 @@ class WalkEnv(gym.Env):
|
|||||||
self.reset_perturb_steps = 4
|
self.reset_perturb_steps = 4
|
||||||
self.reset_recover_steps = 8
|
self.reset_recover_steps = 8
|
||||||
|
|
||||||
self.reward_smoothness_scale = 0.06
|
self.reward_smoothness_scale = 0.03
|
||||||
self.reward_smoothness_cap = 0.45
|
self.reward_smoothness_cap = 0.45
|
||||||
self.reward_forward_stability_gate = 0.35
|
self.reward_forward_stability_gate = 0.35
|
||||||
self.reward_forward_tilt_hard_threshold = 0.50
|
self.reward_forward_tilt_hard_threshold = 0.50
|
||||||
self.reward_forward_tilt_hard_scale = 0.20
|
self.reward_forward_tilt_hard_scale = 0.20
|
||||||
self.reward_head_toward_bonus = 1.0
|
self.reward_head_toward_bonus = 0.8
|
||||||
self.turn_stationary_radius = 0.2
|
self.turn_stationary_radius = 0.2
|
||||||
self.turn_stationary_penalty_scale = 3.0
|
self.turn_stationary_penalty_scale = 3.0
|
||||||
self.stationary_start_steps = 20
|
self.stationary_start_steps = 20
|
||||||
@@ -192,17 +193,17 @@ class WalkEnv(gym.Env):
|
|||||||
self.in_place_drift_penalty_scale = 1.20
|
self.in_place_drift_penalty_scale = 1.20
|
||||||
self.waypoint_reach_distance = 0.3
|
self.waypoint_reach_distance = 0.3
|
||||||
self.num_waypoints = 1
|
self.num_waypoints = 1
|
||||||
self.exploration_start_steps = 80
|
self.exploration_start_steps = 40
|
||||||
self.exploration_scale = 0.08
|
self.exploration_scale = 0.012
|
||||||
self.exploration_cap = 0.25
|
self.exploration_cap = 0.2
|
||||||
self.exploration_target_novelty = 1.0
|
self.exploration_target_novelty = 1.0
|
||||||
self.exploration_sigma = 0.7
|
self.exploration_sigma = 0.7
|
||||||
self.reward_stride_swing_scale = 0.20
|
self.reward_stride_swing_scale = 0.20
|
||||||
self.reward_stride_phase_scale = 0.18
|
self.reward_stride_phase_scale = 0.18
|
||||||
self.reward_knee_drive_scale = 0.10
|
self.reward_knee_drive_scale = 0.10
|
||||||
self.reward_knee_lift_scale = 0.12
|
self.reward_knee_lift_scale = 0.12
|
||||||
self.reward_knee_lift_target = 0.95
|
self.reward_knee_lift_target = 0.15
|
||||||
self.reward_knee_lift_shortfall_scale = 0.20
|
self.reward_knee_lift_shortfall_scale = 0.05
|
||||||
self.reward_knee_overbend_threshold = 0.60
|
self.reward_knee_overbend_threshold = 0.60
|
||||||
self.reward_knee_overbend_scale = 0.35
|
self.reward_knee_overbend_scale = 0.35
|
||||||
self.reward_hip_lift_scale = 0.12
|
self.reward_hip_lift_scale = 0.12
|
||||||
@@ -218,6 +219,38 @@ class WalkEnv(gym.Env):
|
|||||||
self.knee_phase_max_hold_frames = 28
|
self.knee_phase_max_hold_frames = 28
|
||||||
self.knee_phase_hold_penalty_scale = 0.18
|
self.knee_phase_hold_penalty_scale = 0.18
|
||||||
self.reward_stride_cap = 0.80
|
self.reward_stride_cap = 0.80
|
||||||
|
self.reward_knee_explore_scale = 0.03
|
||||||
|
self.reward_knee_explore_delta_scale = 0.03
|
||||||
|
self.reward_knee_explore_cap = 0.10
|
||||||
|
self.reward_hip_pitch_explore_scale = 0.03
|
||||||
|
self.reward_hip_pitch_explore_delta_scale = 0.03
|
||||||
|
self.reward_hip_pitch_explore_cap = 0.05
|
||||||
|
self.reward_progress_scale = 22.0
|
||||||
|
self.reward_survival_scale = 0.3
|
||||||
|
self.reward_idle_penalty_scale = 0.6
|
||||||
|
self.reward_accel_penalty_scale = 0.05
|
||||||
|
self.reward_accel_penalty_cap = 0.25
|
||||||
|
self.reward_accel_abs_limit = 13.5
|
||||||
|
self.reward_accel_abs_penalty_scale = 0.04
|
||||||
|
self.reward_accel_abs_penalty_cap = 0.30
|
||||||
|
self.reward_heading_align_scale = 0.28
|
||||||
|
self.reward_heading_error_scale = 0.05
|
||||||
|
self.reward_progress_tilt_gate_start = 0.20
|
||||||
|
self.reward_progress_knee_gate_min = 0.16
|
||||||
|
self.reward_progress_hip_gate_over = 0.18
|
||||||
|
self.reward_progress_gate_floor = 0.10
|
||||||
|
self.reward_height_deadband = 0.02
|
||||||
|
self.reward_height_penalty_scale = 10.0
|
||||||
|
self.reward_height_penalty_cap = 1.2
|
||||||
|
self.reward_forward_lean_threshold = 0.20
|
||||||
|
self.reward_forward_lean_penalty_scale = 0.9
|
||||||
|
self.reward_forward_lean_penalty_cap = 0.7
|
||||||
|
self.reward_knee_straight_threshold = 0.18
|
||||||
|
self.reward_knee_straight_penalty_scale = 0.70
|
||||||
|
self.reward_hip_overextend_threshold = 1.1
|
||||||
|
self.reward_hip_overextend_penalty_scale = 1.30
|
||||||
|
self.reward_leg_stretch_penalty_scale = 1.20
|
||||||
|
self.reward_stretch_lean_combo_scale = 1.40
|
||||||
|
|
||||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||||
@@ -404,6 +437,10 @@ class WalkEnv(gym.Env):
|
|||||||
self._reward_debug_steps_left = 0
|
self._reward_debug_steps_left = 0
|
||||||
self._speed_estimate = 0.0
|
self._speed_estimate = 0.0
|
||||||
self._speed_from_acc = 0.0
|
self._speed_from_acc = 0.0
|
||||||
|
self._prev_accelerometer = np.array(
|
||||||
|
getattr(self.Player.robot, "accelerometer", np.zeros(3)),
|
||||||
|
dtype=np.float32,
|
||||||
|
)
|
||||||
|
|
||||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||||
beam_x = (random() - 0.5) * 10
|
beam_x = (random() - 0.5) * 10
|
||||||
@@ -466,6 +503,7 @@ class WalkEnv(gym.Env):
|
|||||||
for i in range(self.num_waypoints):
|
for i in range(self.num_waypoints):
|
||||||
# Each waypoint is placed further along the path
|
# Each waypoint is placed further along the path
|
||||||
target_distance_wp = np.random.uniform(self.reset_target_distance_min, self.reset_target_distance_max)
|
target_distance_wp = np.random.uniform(self.reset_target_distance_min, self.reset_target_distance_max)
|
||||||
|
self.target_distance_wp = target_distance_wp
|
||||||
target_bearing_deg_wp = np.random.uniform(-self.reset_target_bearing_range_deg, self.reset_target_bearing_range_deg)
|
target_bearing_deg_wp = np.random.uniform(-self.reset_target_bearing_range_deg, self.reset_target_bearing_range_deg)
|
||||||
|
|
||||||
target_offset = MathOps.rotate_2d_vec(
|
target_offset = MathOps.rotate_2d_vec(
|
||||||
@@ -490,26 +528,223 @@ class WalkEnv(gym.Env):
|
|||||||
|
|
||||||
def compute_reward(self, previous_pos, current_pos, action):
|
def compute_reward(self, previous_pos, current_pos, action):
|
||||||
height = float(self.Player.world.global_position[2])
|
height = float(self.Player.world.global_position[2])
|
||||||
|
robot = self.Player.robot
|
||||||
is_fallen = height < 0.55
|
|
||||||
if is_fallen:
|
|
||||||
return -20.0
|
|
||||||
|
|
||||||
prev_dist_to_target = float(np.linalg.norm(self.target_position - previous_pos))
|
prev_dist_to_target = float(np.linalg.norm(self.target_position - previous_pos))
|
||||||
curr_dist_to_target = float(np.linalg.norm(self.target_position - current_pos))
|
curr_dist_to_target = float(np.linalg.norm(self.target_position - current_pos))
|
||||||
dist_delta = prev_dist_to_target - curr_dist_to_target
|
dist_delta = prev_dist_to_target - curr_dist_to_target
|
||||||
|
|
||||||
|
is_fallen = height < 0.45
|
||||||
|
if is_fallen:
|
||||||
|
return -2.0
|
||||||
|
|
||||||
|
joint_pos = np.deg2rad(
|
||||||
|
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||||
|
) * self.train_sim_flip
|
||||||
|
left_hip_roll = -float(joint_pos[12])
|
||||||
|
right_hip_roll = float(joint_pos[18])
|
||||||
|
|
||||||
|
left_ankle_roll = -float(joint_pos[16])
|
||||||
|
right_ankle_roll = float(joint_pos[22])
|
||||||
|
left_ankle_pitch = -float(joint_pos[15])
|
||||||
|
right_ankle_pitch = float(joint_pos[21])
|
||||||
|
left_knee_flex = abs(float(joint_pos[14]))
|
||||||
|
right_knee_flex = abs(float(joint_pos[20]))
|
||||||
|
avg_knee_flex = 0.5 * (left_knee_flex + right_knee_flex)
|
||||||
|
|
||||||
|
max_leg_roll = 0.5 # 防止劈叉姿势
|
||||||
|
split_penalty = -0.12 * max(0.0, (left_hip_roll + right_hip_roll - 2 * max_leg_roll) / max_leg_roll)
|
||||||
|
left_hip_yaw = -float(joint_pos[13])
|
||||||
|
right_hip_yaw = float(joint_pos[19])
|
||||||
|
|
||||||
|
min_leg_separation = 0.04 # 最小腿间距(防止贴得太近)
|
||||||
|
inward_penalty = 0.3 * min(0.0, (left_hip_roll-min_leg_separation)) + 0.3 * min(0.0, (right_hip_roll-min_leg_separation)) # 惩罚左右腿过度内扣
|
||||||
|
|
||||||
|
|
||||||
|
# 脚踝roll角度检测:防止过度外翻或内翻
|
||||||
|
max_ankle_roll = 0.15 # 最大允许的脚踝roll角度
|
||||||
|
max_ankle_pitch = 0.25 # 最大允许的脚踝pitch角度
|
||||||
|
|
||||||
|
# 惩罚脚踝过度外翻/内翻(绝对值过大)
|
||||||
|
ankle_roll_penalty = -0.12 * max(0.0, (abs(left_ankle_roll) + abs(right_ankle_roll) - 2 * max_ankle_roll) / max_ankle_roll)
|
||||||
|
|
||||||
|
# 惩罚两脚踝roll方向相反(不稳定姿势)
|
||||||
|
ankle_roll_cross_penalty = -0.12 * max(0.0, -(left_ankle_roll * right_ankle_roll))
|
||||||
|
|
||||||
|
ankle_pitch_penalty = -0.12 * max(0.0, (abs(left_ankle_pitch) + abs(right_ankle_pitch) - 2 * max_ankle_pitch) / max_ankle_pitch)
|
||||||
|
|
||||||
|
# 分别惩罚左右大腿过度转动
|
||||||
|
max_hip_yaw = 0.2 # 最大允许的yaw角度
|
||||||
|
left_hip_yaw_penalty = -0.6 * max(0.0, abs(left_hip_yaw) - max_hip_yaw)
|
||||||
|
right_hip_yaw_penalty = -0.6 * max(0.0, abs(right_hip_yaw) - max_hip_yaw)
|
||||||
|
|
||||||
|
target_vec = self.target_position - current_pos
|
||||||
|
target_dist = float(np.linalg.norm(target_vec))
|
||||||
|
if target_dist > 1e-6:
|
||||||
|
target_heading = math.atan2(float(target_vec[1]), float(target_vec[0]))
|
||||||
|
robot_heading = math.radians(float(robot.global_orientation_euler[2]))
|
||||||
|
heading_error = self._wrap_to_pi(target_heading - robot_heading)
|
||||||
|
heading_align_reward = self.reward_heading_align_scale * math.cos(heading_error)
|
||||||
|
heading_error_penalty = -self.reward_heading_error_scale * abs(heading_error)
|
||||||
|
else:
|
||||||
|
heading_align_reward = 0.0
|
||||||
|
heading_error_penalty = 0.0
|
||||||
|
|
||||||
# Forward-progress reward (distance delta) with anti-stuck shaping.
|
# Forward-progress reward (distance delta) with anti-stuck shaping.
|
||||||
progress_reward = 22.0 * dist_delta
|
progress_reward_raw = self.reward_progress_scale * dist_delta
|
||||||
survival_reward = 0.02
|
survival_reward = self.reward_survival_scale
|
||||||
smoothness_penalty = -0.015 * float(np.linalg.norm(action - self.last_action_for_reward))
|
smoothness_penalty = -self.reward_smoothness_scale * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||||
step_displacement = float(np.linalg.norm(current_pos - previous_pos))
|
step_displacement = float(np.linalg.norm(current_pos - previous_pos))
|
||||||
if self.step_counter > 30 and step_displacement < 0.006:
|
accel_signal = 0.0
|
||||||
idle_penalty = -0.06
|
accel_source = "imu_delta"
|
||||||
|
accel_now = np.array(getattr(robot, "accelerometer", np.zeros(3)), dtype=np.float32)
|
||||||
|
if accel_now.shape[0] >= 3:
|
||||||
|
# Use IMU acceleration delta to reduce gravity bias and punish abrupt bursts.
|
||||||
|
accel_signal = float(np.linalg.norm(accel_now[:3] - self._prev_accelerometer[:3]))
|
||||||
|
self._prev_accelerometer = accel_now
|
||||||
|
accel_penalty = -min(
|
||||||
|
self.reward_accel_penalty_cap,
|
||||||
|
self.reward_accel_penalty_scale * accel_signal,
|
||||||
|
)
|
||||||
|
accel_abs = float(np.linalg.norm(accel_now[:3])) if accel_now.shape[0] >= 3 else 0.0
|
||||||
|
accel_abs_over = max(0.0, accel_abs - self.reward_accel_abs_limit)
|
||||||
|
accel_abs_penalty = -min(
|
||||||
|
self.reward_accel_abs_penalty_cap,
|
||||||
|
self.reward_accel_abs_penalty_scale * accel_abs_over,
|
||||||
|
)
|
||||||
|
if self.step_counter > 30 and step_displacement < 0.015 and self.target_distance_wp > 0.3:
|
||||||
|
idle_penalty = -self.reward_idle_penalty_scale
|
||||||
else:
|
else:
|
||||||
idle_penalty = 0.0
|
idle_penalty = 0.0
|
||||||
|
|
||||||
total = progress_reward + survival_reward + smoothness_penalty + idle_penalty
|
if self.step_counter > self.exploration_start_steps:
|
||||||
|
displacement_novelty = step_displacement / max(1e-6, self.stationary_step_eps)
|
||||||
|
exploration_bonus = min(
|
||||||
|
self.exploration_cap,
|
||||||
|
self.exploration_scale * max(0.0, displacement_novelty - self.exploration_target_novelty),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
exploration_bonus = 0.0
|
||||||
|
|
||||||
|
# Encourage active/varied knee motions early in training without dominating progress reward.
|
||||||
|
left_knee_act = float(action[14])
|
||||||
|
right_knee_act = float(action[20])
|
||||||
|
left_knee_delta = abs(left_knee_act - float(self.last_action_for_reward[14]))
|
||||||
|
right_knee_delta = abs(right_knee_act - float(self.last_action_for_reward[20]))
|
||||||
|
knee_action_mag = 0.5 * (abs(left_knee_act) + abs(right_knee_act))
|
||||||
|
knee_action_delta = 0.5 * (left_knee_delta + right_knee_delta)
|
||||||
|
if self.step_counter > 10:
|
||||||
|
knee_explore_reward = min(
|
||||||
|
self.reward_knee_explore_cap,
|
||||||
|
self.reward_knee_explore_scale * knee_action_mag
|
||||||
|
+ self.reward_knee_explore_delta_scale * knee_action_delta,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
knee_explore_reward = 0.0
|
||||||
|
|
||||||
|
# Directly encourage observable knee flexion instead of only action exploration.
|
||||||
|
knee_lift_shortfall_penalty = -self.reward_knee_lift_shortfall_scale * max(
|
||||||
|
0.0, self.reward_knee_lift_target - avg_knee_flex
|
||||||
|
)
|
||||||
|
|
||||||
|
# Encourage hip-pitch exploration to improve forward stride generation.
|
||||||
|
left_hip_pitch_act = float(action[11])
|
||||||
|
right_hip_pitch_act = float(action[17])
|
||||||
|
left_hip_pitch_delta = abs(left_hip_pitch_act - float(self.last_action_for_reward[11]))
|
||||||
|
right_hip_pitch_delta = abs(right_hip_pitch_act - float(self.last_action_for_reward[17]))
|
||||||
|
hip_pitch_action_mag = 0.5 * (abs(left_hip_pitch_act) + abs(right_hip_pitch_act))
|
||||||
|
hip_pitch_action_delta = 0.5 * (left_hip_pitch_delta + right_hip_pitch_delta)
|
||||||
|
if self.step_counter > 10:
|
||||||
|
hip_pitch_explore_reward = min(
|
||||||
|
self.reward_hip_pitch_explore_cap,
|
||||||
|
self.reward_hip_pitch_explore_scale * hip_pitch_action_mag
|
||||||
|
+ self.reward_hip_pitch_explore_delta_scale * hip_pitch_action_delta,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
hip_pitch_explore_reward = 0.0
|
||||||
|
|
||||||
|
if curr_dist_to_target < 0.3:
|
||||||
|
arrival_bonus = self.target_distance_wp * 8 ## 奖励到达目标点
|
||||||
|
else:
|
||||||
|
arrival_bonus = 0.0
|
||||||
|
|
||||||
|
target_height = self.initial_height
|
||||||
|
height_error = height - target_height
|
||||||
|
# Keep height shaping smooth and bounded to avoid exploding negatives.
|
||||||
|
height_dev = max(0.0, abs(height_error) - self.reward_height_deadband)
|
||||||
|
height_penalty = -min(
|
||||||
|
self.reward_height_penalty_cap,
|
||||||
|
self.reward_height_penalty_scale * (height_dev ** 2),
|
||||||
|
)
|
||||||
|
|
||||||
|
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||||
|
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||||
|
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||||
|
pitch_lean = abs(float(projected_gravity[0]))
|
||||||
|
forward_lean_excess = max(0.0, pitch_lean - self.reward_forward_lean_threshold)
|
||||||
|
forward_lean_penalty = -min(
|
||||||
|
self.reward_forward_lean_penalty_cap,
|
||||||
|
self.reward_forward_lean_penalty_scale * (forward_lean_excess ** 2),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Gate progress reward when posture quality is poor.
|
||||||
|
# Important: include leg overextension so upright torso cannot exploit progress reward.
|
||||||
|
tilt_excess = max(0.0, tilt_mag - self.reward_progress_tilt_gate_start)
|
||||||
|
knee_gate_excess = max(0.0, self.reward_progress_knee_gate_min - avg_knee_flex)
|
||||||
|
left_hip_pitch = float(joint_pos[11])
|
||||||
|
right_hip_pitch = -float(joint_pos[17])
|
||||||
|
left_hip_over = max(0.0, abs(left_hip_pitch) - self.reward_hip_overextend_threshold)
|
||||||
|
right_hip_over = max(0.0, abs(right_hip_pitch) - self.reward_hip_overextend_threshold)
|
||||||
|
hip_over_mean = 0.5 * (left_hip_over + right_hip_over)
|
||||||
|
|
||||||
|
hip_gate_excess = max(0.0, hip_over_mean - self.reward_progress_hip_gate_over)
|
||||||
|
posture_gate = 1.0 - 1.2 * tilt_excess - 2.2 * knee_gate_excess - 2.4 * hip_gate_excess
|
||||||
|
posture_gate = float(np.clip(posture_gate, self.reward_progress_gate_floor, 1.0))
|
||||||
|
progress_reward = progress_reward_raw * posture_gate
|
||||||
|
|
||||||
|
knee_straight_penalty = -self.reward_knee_straight_penalty_scale * max(
|
||||||
|
0.0, self.reward_knee_straight_threshold - avg_knee_flex
|
||||||
|
)
|
||||||
|
|
||||||
|
hip_overextend_penalty = -self.reward_hip_overextend_penalty_scale * (left_hip_over + right_hip_over)
|
||||||
|
|
||||||
|
# Penalize over-stretched legs even if torso stays upright.
|
||||||
|
stretch_amount = left_hip_over + right_hip_over
|
||||||
|
straight_amount = max(0.0, self.reward_knee_straight_threshold - avg_knee_flex)
|
||||||
|
leg_stretch_penalty = -self.reward_leg_stretch_penalty_scale * stretch_amount * (1.0 + 2.5 * straight_amount)
|
||||||
|
|
||||||
|
# Keep extra combo penalty, but no longer vanish when torso is upright.
|
||||||
|
stretch_lean_combo_penalty = -self.reward_stretch_lean_combo_scale * (0.5 + tilt_mag) * stretch_amount * (1.0 + 3.0 * straight_amount)
|
||||||
|
posture_penalty = -0.6 * (tilt_mag)
|
||||||
|
total = (
|
||||||
|
progress_reward
|
||||||
|
+ survival_reward
|
||||||
|
+ smoothness_penalty
|
||||||
|
+ accel_penalty
|
||||||
|
+ accel_abs_penalty
|
||||||
|
+ idle_penalty
|
||||||
|
+ split_penalty
|
||||||
|
+ inward_penalty
|
||||||
|
+ ankle_roll_penalty
|
||||||
|
+ ankle_pitch_penalty
|
||||||
|
+ ankle_roll_cross_penalty
|
||||||
|
+ left_hip_yaw_penalty
|
||||||
|
+ right_hip_yaw_penalty
|
||||||
|
+ heading_align_reward
|
||||||
|
+ heading_error_penalty
|
||||||
|
+ knee_straight_penalty
|
||||||
|
+ hip_overextend_penalty
|
||||||
|
+ leg_stretch_penalty
|
||||||
|
+ stretch_lean_combo_penalty
|
||||||
|
# + exploration_bonus
|
||||||
|
# + knee_explore_reward
|
||||||
|
# + knee_lift_shortfall_penalty
|
||||||
|
+ hip_pitch_explore_reward
|
||||||
|
+ arrival_bonus
|
||||||
|
+ height_penalty
|
||||||
|
+ forward_lean_penalty
|
||||||
|
+ posture_penalty
|
||||||
|
)
|
||||||
|
|
||||||
now = time.time()
|
now = time.time()
|
||||||
if self.reward_debug_interval_sec > 0 and now - self._reward_debug_last_time >= self.reward_debug_interval_sec:
|
if self.reward_debug_interval_sec > 0 and now - self._reward_debug_last_time >= self.reward_debug_interval_sec:
|
||||||
@@ -522,7 +757,35 @@ class WalkEnv(gym.Env):
|
|||||||
f"progress_reward:{progress_reward:.4f},"
|
f"progress_reward:{progress_reward:.4f},"
|
||||||
f"survival_reward:{survival_reward:.4f},"
|
f"survival_reward:{survival_reward:.4f},"
|
||||||
f"smoothness_penalty:{smoothness_penalty:.4f},"
|
f"smoothness_penalty:{smoothness_penalty:.4f},"
|
||||||
|
f"accel_penalty:{accel_penalty:.4f},"
|
||||||
|
f"accel_source:{accel_source},"
|
||||||
|
f"accel_signal:{accel_signal:.4f},"
|
||||||
|
f"accel_abs:{accel_abs:.4f},"
|
||||||
|
f"accel_abs_penalty:{accel_abs_penalty:.4f},"
|
||||||
f"idle_penalty:{idle_penalty:.4f},"
|
f"idle_penalty:{idle_penalty:.4f},"
|
||||||
|
f"split_penalty:{split_penalty:.4f},"
|
||||||
|
f"inward_penalty:{inward_penalty:.4f},"
|
||||||
|
f"ankle_roll_penalty:{ankle_roll_penalty:.4f},"
|
||||||
|
f"ankle_pitch_penalty:{ankle_pitch_penalty:.4f},"
|
||||||
|
f"ankle_roll_cross_penalty:{ankle_roll_cross_penalty:.4f},"
|
||||||
|
f"left_hip_yaw_penalty:{left_hip_yaw_penalty:.4f},"
|
||||||
|
f"right_hip_yaw_penalty:{right_hip_yaw_penalty:.4f},"
|
||||||
|
f"heading_align_reward:{heading_align_reward:.4f},"
|
||||||
|
f"heading_error_penalty:{heading_error_penalty:.4f},"
|
||||||
|
f"knee_straight_penalty:{knee_straight_penalty:.4f},"
|
||||||
|
f"hip_overextend_penalty:{hip_overextend_penalty:.4f},"
|
||||||
|
f"leg_stretch_penalty:{leg_stretch_penalty:.4f},"
|
||||||
|
f"stretch_lean_combo_penalty:{stretch_lean_combo_penalty:.4f},"
|
||||||
|
f"posture_gate:{posture_gate:.4f},"
|
||||||
|
f"progress_reward_raw:{progress_reward_raw:.4f},"
|
||||||
|
# f"exploration_bonus:{exploration_bonus:.4f},"
|
||||||
|
f"height_penalty:{height_penalty:.4f},"
|
||||||
|
f"forward_lean_penalty:{forward_lean_penalty:.4f},"
|
||||||
|
# f"knee_explore_reward:{knee_explore_reward:.4f},"
|
||||||
|
f"posture_penalty:{posture_penalty:.4f},"
|
||||||
|
# f"knee_lift_shortfall_penalty:{knee_lift_shortfall_penalty:.4f},"
|
||||||
|
f"hip_pitch_explore_reward:{hip_pitch_explore_reward:.4f},"
|
||||||
|
f"arrival_bonus:{arrival_bonus:.4f},"
|
||||||
f"total:{total:.4f}"
|
f"total:{total:.4f}"
|
||||||
)
|
)
|
||||||
return total
|
return total
|
||||||
@@ -535,22 +798,26 @@ class WalkEnv(gym.Env):
|
|||||||
max_action_delta = 0.5# Limit how much the action can change from the previous step to encourage smoother motions.
|
max_action_delta = 0.5# Limit how much the action can change from the previous step to encourage smoother motions.
|
||||||
if self.previous_action is not None:
|
if self.previous_action is not None:
|
||||||
action = np.clip(action, self.previous_action - max_action_delta, self.previous_action + max_action_delta)
|
action = np.clip(action, self.previous_action - max_action_delta, self.previous_action + max_action_delta)
|
||||||
|
# Loosen upper-body constraints: keep motion bounded but no longer hard-lock head/arms/waist.
|
||||||
action[0:2] = 0
|
action[0:2] = 0
|
||||||
action[3] = 4
|
action[3] = np.clip(action[3], 3, 5)
|
||||||
action[7] = -4
|
action[7] = np.clip(action[7], -5, -3)
|
||||||
action[2] = 0
|
action[2] = np.clip(action[2], -6, 6)
|
||||||
action[6] = 0
|
action[6] = np.clip(action[6], -6, 6)
|
||||||
action[4] = 0
|
action[4] = 0
|
||||||
action[5] = -5
|
action[5] = np.clip(action[5], -8, -2)
|
||||||
action[8] = 0
|
action[8] = 0
|
||||||
action[9] = 5
|
action[9] = np.clip(action[9], 8, 2)
|
||||||
action[10] = 0
|
action[10] = np.clip(action[10], -0.6, 0.6)
|
||||||
action[11] = np.clip(action[11], -6, 6)
|
# Boost knee command range so policy can produce visible knee flexion earlier.
|
||||||
action[17] = np.clip(action[17], -6, 6)
|
action[14] = np.clip(action[14], 0, 10.0)
|
||||||
# action[11] = 1
|
action[20] = np.clip(action[20], -10.0, 0)
|
||||||
# action[17] = 1
|
# action[14] = 1 # the correct left knee sign
|
||||||
# action[12] = -0.01
|
# action[20] = -1 # the correct right knee sign
|
||||||
# action[18] = 0.01
|
action[11] = np.clip(action[11], -8, 8)
|
||||||
|
action[17] = np.clip(action[17], -8, 8)
|
||||||
|
# action[12] = -1
|
||||||
|
# action[18] = 1
|
||||||
# action[13] = -1.0
|
# action[13] = -1.0
|
||||||
# action[19] = 1.0
|
# action[19] = 1.0
|
||||||
self.previous_action = action.copy()
|
self.previous_action = action.copy()
|
||||||
@@ -599,7 +866,7 @@ class WalkEnv(gym.Env):
|
|||||||
self.target_position = self.point_list[self.waypoint_index]
|
self.target_position = self.point_list[self.waypoint_index]
|
||||||
|
|
||||||
# Fall detection and penalty
|
# Fall detection and penalty
|
||||||
is_fallen = self.Player.world.global_position[2] < 0.55
|
is_fallen = self.Player.world.global_position[2] < 0.45
|
||||||
|
|
||||||
# terminal state: the robot is falling or timeout
|
# terminal state: the robot is falling or timeout
|
||||||
terminated = is_fallen or self.step_counter > 800 or self.route_completed
|
terminated = is_fallen or self.step_counter > 800 or self.route_completed
|
||||||
@@ -615,21 +882,21 @@ class Train(Train_Base):
|
|||||||
def train(self, args):
|
def train(self, args):
|
||||||
|
|
||||||
# --------------------------------------- Learning parameters
|
# --------------------------------------- Learning parameters
|
||||||
n_envs = 12
|
n_envs = 20
|
||||||
server_warmup_sec = 3.0
|
server_warmup_sec = 3.0
|
||||||
n_steps_per_env = 256 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
n_steps_per_env = 1024 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||||
minibatch_size = 512 # should be a factor of (n_steps_per_env * n_envs)
|
minibatch_size = 1024 # should be a factor of (n_steps_per_env * n_envs)
|
||||||
total_steps = 30000000
|
total_steps = 180000000
|
||||||
learning_rate = 2e-4
|
learning_rate = 3e-4
|
||||||
ent_coef = 0.08
|
ent_coef = 0.05
|
||||||
clip_range = 0.2
|
clip_range = 0.2
|
||||||
gamma = 0.97
|
gamma = 0.99
|
||||||
n_epochs = 3
|
n_epochs = 5
|
||||||
enable_eval = True
|
enable_eval = True
|
||||||
monitor_train_env = False
|
monitor_train_env = False
|
||||||
eval_freq_mult = 60
|
eval_freq_mult = 15
|
||||||
save_freq_mult = 60
|
save_freq_mult = 15
|
||||||
eval_eps = 3
|
eval_eps = 7
|
||||||
folder_name = f'Walk_R{self.robot_type}'
|
folder_name = f'Walk_R{self.robot_type}'
|
||||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||||
|
|
||||||
|
|||||||
BIN
scripts/gyms/logs/Walk_version_0.10.zip
Normal file
BIN
scripts/gyms/logs/Walk_version_0.10.zip
Normal file
Binary file not shown.
@@ -7,7 +7,7 @@ from random import random
|
|||||||
from random import uniform
|
from random import uniform
|
||||||
from itertools import count
|
from itertools import count
|
||||||
|
|
||||||
from stable_baselines3 import PPO
|
from stable_baselines3 import PPO, TD3, DDPG, SAC, A2C
|
||||||
from stable_baselines3.common.monitor import Monitor
|
from stable_baselines3.common.monitor import Monitor
|
||||||
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
|
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
|
||||||
|
|
||||||
@@ -66,6 +66,7 @@ class WalkEnv(gym.Env):
|
|||||||
self._last_sync_time = None
|
self._last_sync_time = None
|
||||||
self._speed_estimate = 0.0
|
self._speed_estimate = 0.0
|
||||||
self._speed_from_acc = 0.0
|
self._speed_from_acc = 0.0
|
||||||
|
self._prev_accelerometer = np.zeros(3, dtype=np.float32)
|
||||||
self._speed_smoothing = 0.85
|
self._speed_smoothing = 0.85
|
||||||
self._fallback_dt = 0.02
|
self._fallback_dt = 0.02
|
||||||
target_hz_env = 0
|
target_hz_env = 0
|
||||||
@@ -125,7 +126,7 @@ class WalkEnv(gym.Env):
|
|||||||
0.0, # 22: Right_Ankle_Roll (rle6)
|
0.0, # 22: Right_Ankle_Roll (rle6)
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
# self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||||
self.train_sim_flip = np.array(
|
self.train_sim_flip = np.array(
|
||||||
[
|
[
|
||||||
1.0, # 0: Head_yaw (he1)
|
1.0, # 0: Head_yaw (he1)
|
||||||
@@ -154,7 +155,7 @@ class WalkEnv(gym.Env):
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
self.scaling_factor = 0.3
|
self.scaling_factor = 0.5
|
||||||
# self.scaling_factor = 1
|
# self.scaling_factor = 1
|
||||||
|
|
||||||
# Encourage a minimum lateral stance so the policy avoids feet overlap.
|
# Encourage a minimum lateral stance so the policy avoids feet overlap.
|
||||||
@@ -164,8 +165,8 @@ class WalkEnv(gym.Env):
|
|||||||
self.enable_reset_perturb = False
|
self.enable_reset_perturb = False
|
||||||
self.reset_beam_yaw_range_deg = 180.0
|
self.reset_beam_yaw_range_deg = 180.0
|
||||||
self.reset_target_bearing_range_deg = 0.0
|
self.reset_target_bearing_range_deg = 0.0
|
||||||
self.reset_target_distance_min = 3.0
|
self.reset_target_distance_min = 5
|
||||||
self.reset_target_distance_max = 5.0
|
self.reset_target_distance_max = 10
|
||||||
if self.reset_target_distance_min > self.reset_target_distance_max:
|
if self.reset_target_distance_min > self.reset_target_distance_max:
|
||||||
self.reset_target_distance_min, self.reset_target_distance_max = (
|
self.reset_target_distance_min, self.reset_target_distance_max = (
|
||||||
self.reset_target_distance_max,
|
self.reset_target_distance_max,
|
||||||
@@ -175,7 +176,7 @@ class WalkEnv(gym.Env):
|
|||||||
self.reset_perturb_steps = 4
|
self.reset_perturb_steps = 4
|
||||||
self.reset_recover_steps = 8
|
self.reset_recover_steps = 8
|
||||||
|
|
||||||
self.reward_smoothness_scale = 0.06
|
self.reward_smoothness_scale = 0.03
|
||||||
self.reward_smoothness_cap = 0.45
|
self.reward_smoothness_cap = 0.45
|
||||||
self.reward_forward_stability_gate = 0.35
|
self.reward_forward_stability_gate = 0.35
|
||||||
self.reward_forward_tilt_hard_threshold = 0.50
|
self.reward_forward_tilt_hard_threshold = 0.50
|
||||||
@@ -192,17 +193,17 @@ class WalkEnv(gym.Env):
|
|||||||
self.in_place_drift_penalty_scale = 1.20
|
self.in_place_drift_penalty_scale = 1.20
|
||||||
self.waypoint_reach_distance = 0.3
|
self.waypoint_reach_distance = 0.3
|
||||||
self.num_waypoints = 1
|
self.num_waypoints = 1
|
||||||
self.exploration_start_steps = 80
|
self.exploration_start_steps = 40
|
||||||
self.exploration_scale = 0.08
|
self.exploration_scale = 0.012
|
||||||
self.exploration_cap = 0.25
|
self.exploration_cap = 0.2
|
||||||
self.exploration_target_novelty = 1.0
|
self.exploration_target_novelty = 1.0
|
||||||
self.exploration_sigma = 0.7
|
self.exploration_sigma = 0.7
|
||||||
self.reward_stride_swing_scale = 0.20
|
self.reward_stride_swing_scale = 0.20
|
||||||
self.reward_stride_phase_scale = 0.18
|
self.reward_stride_phase_scale = 0.18
|
||||||
self.reward_knee_drive_scale = 0.10
|
self.reward_knee_drive_scale = 0.10
|
||||||
self.reward_knee_lift_scale = 0.12
|
self.reward_knee_lift_scale = 0.12
|
||||||
self.reward_knee_lift_target = 0.95
|
self.reward_knee_lift_target = 0.15
|
||||||
self.reward_knee_lift_shortfall_scale = 0.20
|
self.reward_knee_lift_shortfall_scale = 0.05
|
||||||
self.reward_knee_overbend_threshold = 0.60
|
self.reward_knee_overbend_threshold = 0.60
|
||||||
self.reward_knee_overbend_scale = 0.35
|
self.reward_knee_overbend_scale = 0.35
|
||||||
self.reward_hip_lift_scale = 0.12
|
self.reward_hip_lift_scale = 0.12
|
||||||
@@ -218,6 +219,22 @@ class WalkEnv(gym.Env):
|
|||||||
self.knee_phase_max_hold_frames = 28
|
self.knee_phase_max_hold_frames = 28
|
||||||
self.knee_phase_hold_penalty_scale = 0.18
|
self.knee_phase_hold_penalty_scale = 0.18
|
||||||
self.reward_stride_cap = 0.80
|
self.reward_stride_cap = 0.80
|
||||||
|
self.reward_knee_explore_scale = 0.03
|
||||||
|
self.reward_knee_explore_delta_scale = 0.03
|
||||||
|
self.reward_knee_explore_cap = 0.10
|
||||||
|
self.reward_hip_pitch_explore_scale = 0.07
|
||||||
|
self.reward_hip_pitch_explore_delta_scale = 0.07
|
||||||
|
self.reward_hip_pitch_explore_cap = 0.10
|
||||||
|
self.reward_progress_scale = 18
|
||||||
|
self.reward_survival_scale = 0.5
|
||||||
|
self.reward_idle_penalty_scale = 0.6
|
||||||
|
self.reward_accel_penalty_scale = 0.08
|
||||||
|
self.reward_accel_penalty_cap = 0.40
|
||||||
|
self.reward_accel_abs_limit = 13.5
|
||||||
|
self.reward_accel_abs_penalty_scale = 0.05
|
||||||
|
self.reward_accel_abs_penalty_cap = 0.40
|
||||||
|
self.reward_heading_align_scale = 0.28
|
||||||
|
self.reward_heading_error_scale = 0.05
|
||||||
|
|
||||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||||
@@ -404,6 +421,10 @@ class WalkEnv(gym.Env):
|
|||||||
self._reward_debug_steps_left = 0
|
self._reward_debug_steps_left = 0
|
||||||
self._speed_estimate = 0.0
|
self._speed_estimate = 0.0
|
||||||
self._speed_from_acc = 0.0
|
self._speed_from_acc = 0.0
|
||||||
|
self._prev_accelerometer = np.array(
|
||||||
|
getattr(self.Player.robot, "accelerometer", np.zeros(3)),
|
||||||
|
dtype=np.float32,
|
||||||
|
)
|
||||||
|
|
||||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||||
beam_x = (random() - 0.5) * 10
|
beam_x = (random() - 0.5) * 10
|
||||||
@@ -466,6 +487,7 @@ class WalkEnv(gym.Env):
|
|||||||
for i in range(self.num_waypoints):
|
for i in range(self.num_waypoints):
|
||||||
# Each waypoint is placed further along the path
|
# Each waypoint is placed further along the path
|
||||||
target_distance_wp = np.random.uniform(self.reset_target_distance_min, self.reset_target_distance_max)
|
target_distance_wp = np.random.uniform(self.reset_target_distance_min, self.reset_target_distance_max)
|
||||||
|
self.target_distance_wp = target_distance_wp
|
||||||
target_bearing_deg_wp = np.random.uniform(-self.reset_target_bearing_range_deg, self.reset_target_bearing_range_deg)
|
target_bearing_deg_wp = np.random.uniform(-self.reset_target_bearing_range_deg, self.reset_target_bearing_range_deg)
|
||||||
|
|
||||||
target_offset = MathOps.rotate_2d_vec(
|
target_offset = MathOps.rotate_2d_vec(
|
||||||
@@ -490,26 +512,174 @@ class WalkEnv(gym.Env):
|
|||||||
|
|
||||||
def compute_reward(self, previous_pos, current_pos, action):
|
def compute_reward(self, previous_pos, current_pos, action):
|
||||||
height = float(self.Player.world.global_position[2])
|
height = float(self.Player.world.global_position[2])
|
||||||
|
robot = self.Player.robot
|
||||||
is_fallen = height < 0.55
|
|
||||||
if is_fallen:
|
|
||||||
return -20.0
|
|
||||||
|
|
||||||
prev_dist_to_target = float(np.linalg.norm(self.target_position - previous_pos))
|
prev_dist_to_target = float(np.linalg.norm(self.target_position - previous_pos))
|
||||||
curr_dist_to_target = float(np.linalg.norm(self.target_position - current_pos))
|
curr_dist_to_target = float(np.linalg.norm(self.target_position - current_pos))
|
||||||
dist_delta = prev_dist_to_target - curr_dist_to_target
|
dist_delta = prev_dist_to_target - curr_dist_to_target
|
||||||
|
|
||||||
|
is_fallen = height < 0.55
|
||||||
|
if is_fallen:
|
||||||
|
return -2.0
|
||||||
|
|
||||||
|
joint_pos = np.deg2rad(
|
||||||
|
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||||
|
) * self.train_sim_flip
|
||||||
|
left_hip_roll = -float(joint_pos[12])
|
||||||
|
right_hip_roll = float(joint_pos[18])
|
||||||
|
|
||||||
|
left_ankle_roll = -float(joint_pos[16])
|
||||||
|
right_ankle_roll = float(joint_pos[22])
|
||||||
|
left_knee_flex = abs(float(joint_pos[14]))
|
||||||
|
right_knee_flex = abs(float(joint_pos[20]))
|
||||||
|
avg_knee_flex = 0.5 * (left_knee_flex + right_knee_flex)
|
||||||
|
|
||||||
|
max_leg_roll = 0.5 # 防止劈叉姿势
|
||||||
|
split_penalty = -0.12 * max(0.0, (left_hip_roll + right_hip_roll - 2 * max_leg_roll) / max_leg_roll)
|
||||||
|
left_hip_yaw = -float(joint_pos[13])
|
||||||
|
right_hip_yaw = float(joint_pos[19])
|
||||||
|
|
||||||
|
min_leg_separation = 0.04 # 最小腿间距(防止贴得太近)
|
||||||
|
inward_penalty = 0.3 * min(0.0, (left_hip_roll-min_leg_separation)) + 0.3 * min(0.0, (right_hip_roll-min_leg_separation)) # 惩罚左右腿过度内扣
|
||||||
|
|
||||||
|
|
||||||
|
# 脚踝roll角度检测:防止过度外翻或内翻
|
||||||
|
max_ankle_roll = 0.15 # 最大允许的脚踝roll角度
|
||||||
|
|
||||||
|
# 惩罚脚踝过度外翻/内翻(绝对值过大)
|
||||||
|
ankle_roll_penalty = -0.12 * max(0.0, (abs(left_ankle_roll) + abs(right_ankle_roll) - 2 * max_ankle_roll) / max_ankle_roll)
|
||||||
|
|
||||||
|
# 惩罚两脚踝roll方向相反(不稳定姿势)
|
||||||
|
ankle_roll_cross_penalty = -0.12 * max(0.0, -(left_ankle_roll * right_ankle_roll))
|
||||||
|
|
||||||
|
# 分别惩罚左右大腿过度转动
|
||||||
|
max_hip_yaw = 0.2 # 最大允许的yaw角度
|
||||||
|
left_hip_yaw_penalty = -0.6 * max(0.0, abs(left_hip_yaw) - max_hip_yaw)
|
||||||
|
right_hip_yaw_penalty = -0.6 * max(0.0, abs(right_hip_yaw) - max_hip_yaw)
|
||||||
|
|
||||||
|
target_vec = self.target_position - current_pos
|
||||||
|
target_dist = float(np.linalg.norm(target_vec))
|
||||||
|
if target_dist > 1e-6:
|
||||||
|
target_heading = math.atan2(float(target_vec[1]), float(target_vec[0]))
|
||||||
|
robot_heading = math.radians(float(robot.global_orientation_euler[2]))
|
||||||
|
heading_error = self._wrap_to_pi(target_heading - robot_heading)
|
||||||
|
heading_align_reward = self.reward_heading_align_scale * math.cos(heading_error)
|
||||||
|
heading_error_penalty = -self.reward_heading_error_scale * abs(heading_error)
|
||||||
|
else:
|
||||||
|
heading_align_reward = 0.0
|
||||||
|
heading_error_penalty = 0.0
|
||||||
|
|
||||||
# Forward-progress reward (distance delta) with anti-stuck shaping.
|
# Forward-progress reward (distance delta) with anti-stuck shaping.
|
||||||
progress_reward = 22.0 * dist_delta
|
progress_reward = self.reward_progress_scale * dist_delta
|
||||||
survival_reward = 0.02
|
survival_reward = self.reward_survival_scale
|
||||||
smoothness_penalty = -0.015 * float(np.linalg.norm(action - self.last_action_for_reward))
|
smoothness_penalty = -self.reward_smoothness_scale * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||||
step_displacement = float(np.linalg.norm(current_pos - previous_pos))
|
step_displacement = float(np.linalg.norm(current_pos - previous_pos))
|
||||||
if self.step_counter > 30 and step_displacement < 0.006:
|
accel_signal = 0.0
|
||||||
idle_penalty = -0.06
|
accel_source = "imu_delta"
|
||||||
|
accel_now = np.array(getattr(robot, "accelerometer", np.zeros(3)), dtype=np.float32)
|
||||||
|
if accel_now.shape[0] >= 3:
|
||||||
|
# Use IMU acceleration delta to reduce gravity bias and punish abrupt bursts.
|
||||||
|
accel_signal = float(np.linalg.norm(accel_now[:3] - self._prev_accelerometer[:3]))
|
||||||
|
self._prev_accelerometer = accel_now
|
||||||
|
accel_penalty = -min(
|
||||||
|
self.reward_accel_penalty_cap,
|
||||||
|
self.reward_accel_penalty_scale * accel_signal,
|
||||||
|
)
|
||||||
|
accel_abs = float(np.linalg.norm(accel_now[:3])) if accel_now.shape[0] >= 3 else 0.0
|
||||||
|
accel_abs_over = max(0.0, accel_abs - self.reward_accel_abs_limit)
|
||||||
|
accel_abs_penalty = -min(
|
||||||
|
self.reward_accel_abs_penalty_cap,
|
||||||
|
self.reward_accel_abs_penalty_scale * accel_abs_over,
|
||||||
|
)
|
||||||
|
if self.step_counter > 30 and step_displacement < 0.015 and self.target_distance_wp > 0.3:
|
||||||
|
idle_penalty = -self.reward_idle_penalty_scale
|
||||||
else:
|
else:
|
||||||
idle_penalty = 0.0
|
idle_penalty = 0.0
|
||||||
|
|
||||||
total = progress_reward + survival_reward + smoothness_penalty + idle_penalty
|
if self.step_counter > self.exploration_start_steps:
|
||||||
|
displacement_novelty = step_displacement / max(1e-6, self.stationary_step_eps)
|
||||||
|
exploration_bonus = min(
|
||||||
|
self.exploration_cap,
|
||||||
|
self.exploration_scale * max(0.0, displacement_novelty - self.exploration_target_novelty),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
exploration_bonus = 0.0
|
||||||
|
|
||||||
|
# Encourage active/varied knee motions early in training without dominating progress reward.
|
||||||
|
left_knee_act = float(action[14])
|
||||||
|
right_knee_act = float(action[20])
|
||||||
|
left_knee_delta = abs(left_knee_act - float(self.last_action_for_reward[14]))
|
||||||
|
right_knee_delta = abs(right_knee_act - float(self.last_action_for_reward[20]))
|
||||||
|
knee_action_mag = 0.5 * (abs(left_knee_act) + abs(right_knee_act))
|
||||||
|
knee_action_delta = 0.5 * (left_knee_delta + right_knee_delta)
|
||||||
|
if self.step_counter > 10:
|
||||||
|
knee_explore_reward = min(
|
||||||
|
self.reward_knee_explore_cap,
|
||||||
|
self.reward_knee_explore_scale * knee_action_mag
|
||||||
|
+ self.reward_knee_explore_delta_scale * knee_action_delta,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
knee_explore_reward = 0.0
|
||||||
|
|
||||||
|
# Directly encourage observable knee flexion instead of only action exploration.
|
||||||
|
knee_lift_shortfall_penalty = -self.reward_knee_lift_shortfall_scale * max(
|
||||||
|
0.0, self.reward_knee_lift_target - avg_knee_flex
|
||||||
|
)
|
||||||
|
|
||||||
|
# Encourage hip-pitch exploration to improve forward stride generation.
|
||||||
|
left_hip_pitch_act = float(action[11])
|
||||||
|
right_hip_pitch_act = float(action[17])
|
||||||
|
left_hip_pitch_delta = abs(left_hip_pitch_act - float(self.last_action_for_reward[11]))
|
||||||
|
right_hip_pitch_delta = abs(right_hip_pitch_act - float(self.last_action_for_reward[17]))
|
||||||
|
hip_pitch_action_mag = 0.5 * (abs(left_hip_pitch_act) + abs(right_hip_pitch_act))
|
||||||
|
hip_pitch_action_delta = 0.5 * (left_hip_pitch_delta + right_hip_pitch_delta)
|
||||||
|
if self.step_counter > 10:
|
||||||
|
hip_pitch_explore_reward = min(
|
||||||
|
self.reward_hip_pitch_explore_cap,
|
||||||
|
self.reward_hip_pitch_explore_scale * hip_pitch_action_mag
|
||||||
|
+ self.reward_hip_pitch_explore_delta_scale * hip_pitch_action_delta,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
hip_pitch_explore_reward = 0.0
|
||||||
|
|
||||||
|
if curr_dist_to_target < 0.3:
|
||||||
|
arrival_bonus = self.target_distance_wp * 8 ## 奖励到达目标点
|
||||||
|
else:
|
||||||
|
arrival_bonus = 0.0
|
||||||
|
|
||||||
|
target_height = self.initial_height
|
||||||
|
height_error = height - target_height
|
||||||
|
height_error = height - target_height
|
||||||
|
|
||||||
|
height_penalty = -0.5 * (math.exp(15*abs(height_error))-1)
|
||||||
|
|
||||||
|
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||||
|
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||||
|
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||||
|
posture_penalty = -0.6 * (tilt_mag)
|
||||||
|
total = (
|
||||||
|
progress_reward
|
||||||
|
+ survival_reward
|
||||||
|
+ smoothness_penalty
|
||||||
|
+ accel_penalty
|
||||||
|
+ accel_abs_penalty
|
||||||
|
+ idle_penalty
|
||||||
|
+ split_penalty
|
||||||
|
+ inward_penalty
|
||||||
|
+ ankle_roll_penalty
|
||||||
|
+ ankle_roll_cross_penalty
|
||||||
|
+ left_hip_yaw_penalty
|
||||||
|
+ right_hip_yaw_penalty
|
||||||
|
+ heading_align_reward
|
||||||
|
+ heading_error_penalty
|
||||||
|
# + exploration_bonus
|
||||||
|
# + knee_explore_reward
|
||||||
|
# + knee_lift_shortfall_penalty
|
||||||
|
# + hip_pitch_explore_reward
|
||||||
|
+ arrival_bonus
|
||||||
|
+ height_penalty
|
||||||
|
+ posture_penalty
|
||||||
|
)
|
||||||
|
|
||||||
now = time.time()
|
now = time.time()
|
||||||
if self.reward_debug_interval_sec > 0 and now - self._reward_debug_last_time >= self.reward_debug_interval_sec:
|
if self.reward_debug_interval_sec > 0 and now - self._reward_debug_last_time >= self.reward_debug_interval_sec:
|
||||||
@@ -522,7 +692,27 @@ class WalkEnv(gym.Env):
|
|||||||
f"progress_reward:{progress_reward:.4f},"
|
f"progress_reward:{progress_reward:.4f},"
|
||||||
f"survival_reward:{survival_reward:.4f},"
|
f"survival_reward:{survival_reward:.4f},"
|
||||||
f"smoothness_penalty:{smoothness_penalty:.4f},"
|
f"smoothness_penalty:{smoothness_penalty:.4f},"
|
||||||
|
f"accel_penalty:{accel_penalty:.4f},"
|
||||||
|
f"accel_source:{accel_source},"
|
||||||
|
f"accel_signal:{accel_signal:.4f},"
|
||||||
|
f"accel_abs:{accel_abs:.4f},"
|
||||||
|
f"accel_abs_penalty:{accel_abs_penalty:.4f},"
|
||||||
f"idle_penalty:{idle_penalty:.4f},"
|
f"idle_penalty:{idle_penalty:.4f},"
|
||||||
|
f"split_penalty:{split_penalty:.4f},"
|
||||||
|
f"inward_penalty:{inward_penalty:.4f},"
|
||||||
|
f"ankle_roll_penalty:{ankle_roll_penalty:.4f},"
|
||||||
|
f"ankle_roll_cross_penalty:{ankle_roll_cross_penalty:.4f},"
|
||||||
|
f"left_hip_yaw_penalty:{left_hip_yaw_penalty:.4f},"
|
||||||
|
f"right_hip_yaw_penalty:{right_hip_yaw_penalty:.4f},"
|
||||||
|
f"heading_align_reward:{heading_align_reward:.4f},"
|
||||||
|
f"heading_error_penalty:{heading_error_penalty:.4f},"
|
||||||
|
# f"exploration_bonus:{exploration_bonus:.4f},"
|
||||||
|
f"height_penalty:{height_penalty:.4f},"
|
||||||
|
# f"knee_explore_reward:{knee_explore_reward:.4f},"
|
||||||
|
f"posture_penalty:{posture_penalty:.4f},"
|
||||||
|
# f"knee_lift_shortfall_penalty:{knee_lift_shortfall_penalty:.4f},"
|
||||||
|
# f"hip_pitch_explore_reward:{hip_pitch_explore_reward:.4f},"
|
||||||
|
f"arrival_bonus:{arrival_bonus:.4f},"
|
||||||
f"total:{total:.4f}"
|
f"total:{total:.4f}"
|
||||||
)
|
)
|
||||||
return total
|
return total
|
||||||
@@ -535,22 +725,26 @@ class WalkEnv(gym.Env):
|
|||||||
max_action_delta = 0.5# Limit how much the action can change from the previous step to encourage smoother motions.
|
max_action_delta = 0.5# Limit how much the action can change from the previous step to encourage smoother motions.
|
||||||
if self.previous_action is not None:
|
if self.previous_action is not None:
|
||||||
action = np.clip(action, self.previous_action - max_action_delta, self.previous_action + max_action_delta)
|
action = np.clip(action, self.previous_action - max_action_delta, self.previous_action + max_action_delta)
|
||||||
|
# Loosen upper-body constraints: keep motion bounded but no longer hard-lock head/arms/waist.
|
||||||
action[0:2] = 0
|
action[0:2] = 0
|
||||||
action[3] = 4
|
action[3] = np.clip(action[3], 3, 5)
|
||||||
action[7] = -4
|
action[7] = np.clip(action[7], -5, -3)
|
||||||
action[2] = 0
|
action[2] = np.clip(action[2], -6, 6)
|
||||||
action[6] = 0
|
action[6] = np.clip(action[6], -6, 6)
|
||||||
action[4] = 0
|
action[4] = 0
|
||||||
action[5] = -5
|
action[5] = np.clip(action[5], -8, -2)
|
||||||
action[8] = 0
|
action[8] = 0
|
||||||
action[9] = 5
|
action[9] = np.clip(action[9], 8, 2)
|
||||||
action[10] = 0
|
action[10] = np.clip(action[10], -0.6, 0.6)
|
||||||
action[11] = np.clip(action[11], -6, 6)
|
# Boost knee command range so policy can produce visible knee flexion earlier.
|
||||||
action[17] = np.clip(action[17], -6, 6)
|
action[14] = np.clip(action[14], 0, 10.0)
|
||||||
|
action[20] = np.clip(action[20], -10.0, 0)
|
||||||
|
# action[14] = 1 # the correct left knee sign
|
||||||
|
# action[20] = -1 # the correct right knee sign
|
||||||
# action[11] = 1
|
# action[11] = 1
|
||||||
# action[17] = 1
|
# action[17] = 1
|
||||||
# action[12] = -0.01
|
# action[12] = -1
|
||||||
# action[18] = 0.01
|
# action[18] = 1
|
||||||
# action[13] = -1.0
|
# action[13] = -1.0
|
||||||
# action[19] = 1.0
|
# action[19] = 1.0
|
||||||
self.previous_action = action.copy()
|
self.previous_action = action.copy()
|
||||||
@@ -615,21 +809,21 @@ class Train(Train_Base):
|
|||||||
def train(self, args):
|
def train(self, args):
|
||||||
|
|
||||||
# --------------------------------------- Learning parameters
|
# --------------------------------------- Learning parameters
|
||||||
n_envs = 12
|
n_envs = 20
|
||||||
server_warmup_sec = 3.0
|
server_warmup_sec = 3.0
|
||||||
n_steps_per_env = 256 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
n_steps_per_env = 256 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||||
minibatch_size = 512 # should be a factor of (n_steps_per_env * n_envs)
|
minibatch_size = 512 # should be a factor of (n_steps_per_env * n_envs)
|
||||||
total_steps = 30000000
|
total_steps = 90000000
|
||||||
learning_rate = 2e-4
|
learning_rate = 2e-4
|
||||||
ent_coef = 0.08
|
ent_coef = 0.035
|
||||||
clip_range = 0.2
|
clip_range = 0.2
|
||||||
gamma = 0.97
|
gamma = 0.97
|
||||||
n_epochs = 3
|
n_epochs = 3
|
||||||
enable_eval = True
|
enable_eval = True
|
||||||
monitor_train_env = False
|
monitor_train_env = False
|
||||||
eval_freq_mult = 30
|
eval_freq_mult = 60
|
||||||
save_freq_mult = 20
|
save_freq_mult = 60
|
||||||
eval_eps = 3
|
eval_eps = 7
|
||||||
folder_name = f'Walk_R{self.robot_type}'
|
folder_name = f'Walk_R{self.robot_type}'
|
||||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||||
|
|
||||||
BIN
scripts/gyms/logs/Walk_version_0.2.0.zip
Normal file
BIN
scripts/gyms/logs/Walk_version_0.2.0.zip
Normal file
Binary file not shown.
Reference in New Issue
Block a user