train Walk and tackle the memory leak issue.
This commit is contained in:
@@ -53,8 +53,8 @@ class WalkEnv(gym.Env):
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.enable_debug_joint_status = False
|
||||
self.reward_debug_interval_sec = float(os.environ.get("GYM_CPU_REWARD_DEBUG_INTERVAL_SEC", "600"))
|
||||
self.reward_debug_burst_steps = int(os.environ.get("GYM_CPU_REWARD_DEBUG_BURST_STEPS", "10"))
|
||||
self.reward_debug_interval_sec = 600.0
|
||||
self.reward_debug_burst_steps = 10
|
||||
self._reward_debug_last_time = time.time()
|
||||
self._reward_debug_steps_left = 0
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
@@ -64,6 +64,10 @@ class WalkEnv(gym.Env):
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
self._speed_estimate = 0.0
|
||||
self._speed_from_acc = 0.0
|
||||
self._speed_smoothing = 0.85
|
||||
self._fallback_dt = 0.02
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
@@ -158,10 +162,10 @@ class WalkEnv(gym.Env):
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = False
|
||||
self.reset_beam_yaw_range_deg = float(os.environ.get("GYM_CPU_RESET_BEAM_YAW_RANGE_DEG", "180"))
|
||||
self.reset_target_bearing_range_deg = float(os.environ.get("GYM_CPU_RESET_TARGET_BEARING_RANGE_DEG", "120"))
|
||||
self.reset_target_distance_min = float(os.environ.get("GYM_CPU_RESET_TARGET_DISTANCE_MIN", "1.2"))
|
||||
self.reset_target_distance_max = float(os.environ.get("GYM_CPU_RESET_TARGET_DISTANCE_MAX", "2.8"))
|
||||
self.reset_beam_yaw_range_deg = 180.0
|
||||
self.reset_target_bearing_range_deg = 0.0
|
||||
self.reset_target_distance_min = 3.0
|
||||
self.reset_target_distance_max = 5.0
|
||||
if self.reset_target_distance_min > self.reset_target_distance_max:
|
||||
self.reset_target_distance_min, self.reset_target_distance_max = (
|
||||
self.reset_target_distance_max,
|
||||
@@ -171,14 +175,61 @@ class WalkEnv(gym.Env):
|
||||
self.reset_perturb_steps = 4
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.reward_smoothness_scale = float(os.environ.get("GYM_CPU_REWARD_SMOOTHNESS_SCALE", "0.06"))
|
||||
self.reward_smoothness_cap = float(os.environ.get("GYM_CPU_REWARD_SMOOTHNESS_CAP", "0.45"))
|
||||
self.reward_head_toward_bonus = float(os.environ.get("GYM_CPU_REWARD_HEAD_TOWARD_BONUS", "1"))
|
||||
self.reward_smoothness_scale = 0.06
|
||||
self.reward_smoothness_cap = 0.45
|
||||
self.reward_forward_stability_gate = 0.35
|
||||
self.reward_forward_tilt_hard_threshold = 0.50
|
||||
self.reward_forward_tilt_hard_scale = 0.20
|
||||
self.reward_head_toward_bonus = 1.0
|
||||
self.turn_stationary_radius = 0.2
|
||||
self.turn_stationary_penalty_scale = 3.0
|
||||
self.stationary_start_steps = 20
|
||||
self.stationary_step_eps = 0.015
|
||||
self.stationary_penalty_scale = 1.2
|
||||
self.train_stage = "walk"
|
||||
self.in_place_radius = 0.18
|
||||
self.in_place_center_reward_scale = 0.60
|
||||
self.in_place_drift_penalty_scale = 1.20
|
||||
self.waypoint_reach_distance = 0.3
|
||||
self.num_waypoints = 1
|
||||
self.exploration_start_steps = 80
|
||||
self.exploration_scale = 0.08
|
||||
self.exploration_cap = 0.25
|
||||
self.exploration_target_novelty = 1.0
|
||||
self.exploration_sigma = 0.7
|
||||
self.reward_stride_swing_scale = 0.20
|
||||
self.reward_stride_phase_scale = 0.18
|
||||
self.reward_knee_drive_scale = 0.10
|
||||
self.reward_knee_lift_scale = 0.12
|
||||
self.reward_knee_lift_target = 0.95
|
||||
self.reward_knee_lift_shortfall_scale = 0.20
|
||||
self.reward_knee_overbend_threshold = 0.60
|
||||
self.reward_knee_overbend_scale = 0.35
|
||||
self.reward_hip_lift_scale = 0.12
|
||||
self.reward_hip_lift_target = 0.80
|
||||
self.reward_knee_alternate_scale = 0.10
|
||||
self.reward_knee_bilateral_scale = 0.16
|
||||
self.reward_single_leg_penalty_scale = 0.22
|
||||
self.reward_knee_phase_switch_scale = 0.14
|
||||
self.knee_phase_deadband = 0.10
|
||||
self.knee_phase_min_interval = 18
|
||||
self.knee_phase_target_interval = 22
|
||||
self.knee_phase_fast_switch_penalty_scale = 0.10
|
||||
self.knee_phase_max_hold_frames = 28
|
||||
self.knee_phase_hold_penalty_scale = 0.18
|
||||
self.reward_stride_cap = 0.80
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.action_history_len = 50
|
||||
self.prev_action_history = np.zeros((self.action_history_len, self.no_of_actions), dtype=np.float32)
|
||||
self.history_idx = 0
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
self.last_yaw_error = None
|
||||
self.prev_knee_balance = 0.0
|
||||
self.prev_knee_phase_sign = 0
|
||||
self.knee_phase_frames_since_switch = 0
|
||||
self.knee_phase_hold_frames = 0
|
||||
self.Player.server.connect()
|
||||
# sleep(2.0) # Longer wait for connection to establish completely
|
||||
self.Player.server.send_immediate(
|
||||
@@ -341,10 +392,18 @@ class WalkEnv(gym.Env):
|
||||
self.route_completed = False
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.prev_action_history.fill(0.0)
|
||||
self.history_idx = 0
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
|
||||
self.last_yaw_error = None
|
||||
self.prev_knee_balance = 0.0
|
||||
self.prev_knee_phase_sign = 0
|
||||
self.knee_phase_frames_since_switch = 0
|
||||
self.knee_phase_hold_frames = 0
|
||||
self.walk_cycle_step = 0
|
||||
self._reward_debug_steps_left = 0
|
||||
self._speed_estimate = 0.0
|
||||
self._speed_from_acc = 0.0
|
||||
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
@@ -399,16 +458,28 @@ class WalkEnv(gym.Env):
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
# Randomize global target bearing so policy must learn to rotate toward it first.
|
||||
# Generate multiple waypoints along a path
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
target_offset = MathOps.rotate_2d_vec(
|
||||
np.array([target_distance, 0.0]),
|
||||
heading_deg + target_bearing_deg,
|
||||
is_rad=False,
|
||||
)
|
||||
point1 = self.initial_position + target_offset
|
||||
self.point_list = [point1]
|
||||
self.point_list = []
|
||||
current_point = self.initial_position.copy()
|
||||
|
||||
for i in range(self.num_waypoints):
|
||||
# Each waypoint is placed further along the path
|
||||
target_distance_wp = np.random.uniform(self.reset_target_distance_min, self.reset_target_distance_max)
|
||||
target_bearing_deg_wp = np.random.uniform(-self.reset_target_bearing_range_deg, self.reset_target_bearing_range_deg)
|
||||
|
||||
target_offset = MathOps.rotate_2d_vec(
|
||||
np.array([target_distance_wp, 0.0]),
|
||||
heading_deg + target_bearing_deg_wp,
|
||||
is_rad=False,
|
||||
)
|
||||
next_point = current_point + target_offset
|
||||
self.point_list.append(next_point)
|
||||
current_point = next_point.copy()
|
||||
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
if self.train_stage == "in_place":
|
||||
self.target_position = self.initial_position.copy()
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
@@ -419,185 +490,27 @@ class WalkEnv(gym.Env):
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
height = float(self.Player.world.global_position[2])
|
||||
robot = self.Player.robot
|
||||
|
||||
|
||||
joint_pos_rad = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
joint_speed_rad = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
ang_vel = np.deg2rad(robot.gyroscope)
|
||||
rp_ang_vel_mag = float(np.linalg.norm(ang_vel[:2]))
|
||||
|
||||
is_fallen = height < 0.55
|
||||
if is_fallen:
|
||||
# remain = max(0, 800 - self.step_counter)
|
||||
# return -8.0 - 0.01 * remain
|
||||
return -20.0
|
||||
|
||||
|
||||
if np.linalg.norm(current_pos - previous_pos) > 0.005:
|
||||
position_penalty = -3 * float(np.linalg.norm(current_pos - previous_pos))
|
||||
prev_dist_to_target = float(np.linalg.norm(self.target_position - previous_pos))
|
||||
curr_dist_to_target = float(np.linalg.norm(self.target_position - current_pos))
|
||||
dist_delta = prev_dist_to_target - curr_dist_to_target
|
||||
|
||||
# Forward-progress reward (distance delta) with anti-stuck shaping.
|
||||
progress_reward = 22.0 * dist_delta
|
||||
survival_reward = 0.02
|
||||
smoothness_penalty = -0.015 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
step_displacement = float(np.linalg.norm(current_pos - previous_pos))
|
||||
if self.step_counter > 30 and step_displacement < 0.006:
|
||||
idle_penalty = -0.06
|
||||
else:
|
||||
position_penalty = 0.0
|
||||
idle_penalty = 0.0
|
||||
|
||||
total = progress_reward + survival_reward + smoothness_penalty + idle_penalty
|
||||
|
||||
# Turn-to-target shaping.
|
||||
to_target = self.target_position - current_pos
|
||||
dist_to_target = float(np.linalg.norm(to_target))
|
||||
if dist_to_target > 1e-6:
|
||||
target_yaw = math.atan2(float(to_target[1]), float(to_target[0]))
|
||||
else:
|
||||
target_yaw = 0.0
|
||||
|
||||
robot_yaw = math.radians(float(robot.global_orientation_euler[2]))
|
||||
yaw_error = target_yaw - robot_yaw
|
||||
|
||||
# Main heading objective: face the target direction.
|
||||
# heading_align_reward = 1.0 * math.cos(yaw_error)
|
||||
|
||||
abs_yaw_error = abs(yaw_error)
|
||||
alive_bonus = 2.0 * max(0.0, 1.0 - abs_yaw_error / math.pi)
|
||||
head_toward_bonus = self.reward_head_toward_bonus if abs_yaw_error < math.radians(4.0) else 0.0
|
||||
|
||||
if self.last_yaw_error is None:
|
||||
heading_progress_reward = 0.0
|
||||
else:
|
||||
prev_abs_yaw_error = abs(self.last_yaw_error)
|
||||
yaw_err_delta = prev_abs_yaw_error - abs_yaw_error
|
||||
progress_gate = 1.0 if abs_yaw_error > math.radians(4.0) else 0.0
|
||||
heading_progress_reward = progress_gate * yaw_err_delta
|
||||
heading_progress_reward = float(np.clip(heading_progress_reward, -1, 1))
|
||||
self.last_yaw_error = yaw_error
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.05 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -0.6 * tilt_mag
|
||||
# Penalize roll/pitch rotational shake but do not penalize yaw turning directly.
|
||||
ang_vel_penalty = -0.06 * rp_ang_vel_mag
|
||||
|
||||
joint_pos = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
) * self.train_sim_flip
|
||||
left_hip_roll = float(joint_pos[12])
|
||||
right_hip_roll = float(joint_pos[18])
|
||||
left_hip_pitch = float(joint_pos[11])
|
||||
right_hip_pitch = float(joint_pos[17])
|
||||
|
||||
left_ankle_roll = float(joint_pos[16])
|
||||
right_ankle_roll = float(joint_pos[22])
|
||||
|
||||
max_leg_roll = 0.2 # 防止劈叉姿势
|
||||
split_penalty = -0.8 * max(0.0, (-left_hip_roll + right_hip_roll - 2 * max_leg_roll) / max_leg_roll)
|
||||
left_hip_yaw = float(joint_pos[13])
|
||||
right_hip_yaw = float(joint_pos[19])
|
||||
|
||||
min_leg_separation = 0.05 # 最小腿间距(防止贴得太近)
|
||||
# 惩罚腿过分靠拢(内收)- 基于两腿间距
|
||||
leg_separation = -left_hip_roll + right_hip_roll
|
||||
inward_penalty = -0.25 * max(0.0, (min_leg_separation - leg_separation) / min_leg_separation)
|
||||
|
||||
|
||||
# 脚踝roll角度检测:防止过度外翻或内翻
|
||||
max_ankle_roll = 0.15 # 最大允许的脚踝roll角度
|
||||
|
||||
# 惩罚脚踝过度外翻/内翻(绝对值过大)
|
||||
ankle_roll_penalty = -0.5 * max(0.0, (abs(left_ankle_roll) + abs(right_ankle_roll) - 2 * max_ankle_roll) / max_ankle_roll)
|
||||
|
||||
# 惩罚两脚踝roll方向相反(不稳定姿势)
|
||||
ankle_roll_cross_penalty = -0.3 * max(0.0, -(left_ankle_roll * right_ankle_roll))
|
||||
|
||||
# 分别惩罚左右大腿过度转动
|
||||
max_hip_yaw = 0.5 # 最大允许的yaw角度
|
||||
left_hip_yaw_penalty = -0.4 * max(0.0, abs(left_hip_yaw) - max_hip_yaw)
|
||||
right_hip_yaw_penalty = -0.4 * max(0.0, abs(right_hip_yaw) - max_hip_yaw)
|
||||
# 智能交叉腿惩罚:只在站立时惩罚,转身时允许交叉腿
|
||||
yaw_rate = float(np.deg2rad(robot.gyroscope[2]))
|
||||
yaw_rate_abs = abs(yaw_rate)
|
||||
|
||||
# 当转身速度较小时才惩罚交叉腿(站立状态)
|
||||
cross_leg_gate = max(0.0, 1.0 - yaw_rate_abs / math.radians(8.0))
|
||||
hip_yaw_cross_penalty = -1.0 * cross_leg_gate * max(0.0, -(left_hip_yaw * right_hip_yaw)) if left_hip_yaw > 0 and right_hip_yaw < 0 else 0.0
|
||||
|
||||
# Torso-lower-body linkage: reward coordinated turning, punish waist-only spinning.
|
||||
waist_speed = abs(float(joint_speed_rad[10]))
|
||||
lower_body_speed = float(np.mean(np.abs(joint_speed_rad[11:23])))
|
||||
lower_body_follow_ratio = lower_body_speed / (waist_speed + 1e-4)
|
||||
linkage_reward = 0.24 * min(1.0, lower_body_follow_ratio) * min(1.0, waist_speed / 1.2)
|
||||
waist_only_turn_penalty = -0.20 * max(0.0, waist_speed - 1.35 * lower_body_speed)
|
||||
|
||||
# Extra posture linkage in yaw joints to avoid decoupled torso twist.
|
||||
waist_yaw = abs(float(joint_pos_rad[10]))
|
||||
hip_yaw_mean = 0.5 * (abs(float(joint_pos_rad[13])) + abs(float(joint_pos_rad[19])))
|
||||
yaw_link_reward = 0.12 * math.exp(-abs(waist_yaw - hip_yaw_mean) / 0.22)
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height
|
||||
height_error = height - target_height
|
||||
|
||||
height_penalty = -(math.exp(12*abs(height_error))-1) if height_error > 0.04 else 0
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
head_toward_bonus +
|
||||
heading_progress_reward +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
+ ang_vel_penalty
|
||||
+ height_penalty
|
||||
+ ankle_roll_penalty
|
||||
+ ankle_roll_cross_penalty
|
||||
+ split_penalty
|
||||
+ inward_penalty
|
||||
# + leg_proximity_penalty
|
||||
+ left_hip_yaw_penalty
|
||||
+ right_hip_yaw_penalty
|
||||
+ hip_yaw_cross_penalty
|
||||
+ position_penalty
|
||||
# + linkage_reward
|
||||
# + waist_only_turn_penalty
|
||||
# + yaw_link_reward
|
||||
# + stance_collapse_penalty
|
||||
# + hip_yaw_yaw_cross_penalty
|
||||
# + stance_collapse_penalty
|
||||
# + cross_leg_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
# print(height_error, height_penalty)
|
||||
|
||||
now = time.time()
|
||||
if self.reward_debug_interval_sec > 0 and now - self._reward_debug_last_time >= self.reward_debug_interval_sec:
|
||||
self._reward_debug_last_time = now
|
||||
@@ -606,35 +519,12 @@ class WalkEnv(gym.Env):
|
||||
if self._reward_debug_steps_left > 0:
|
||||
self._reward_debug_steps_left -= 1
|
||||
self.debug_log(
|
||||
f"height_penalty:{height_penalty:.4f},"
|
||||
f"progress_reward:{progress_reward:.4f},"
|
||||
f"survival_reward:{survival_reward:.4f},"
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},"
|
||||
f"posture_penalty:{posture_penalty:.4f},"
|
||||
f"heading_progress_reward:{heading_progress_reward:.4f},"
|
||||
# f"stance_collapse_penalty:{stance_collapse_penalty:.4f},"
|
||||
# f"cross_leg_penalty:{cross_leg_penalty:.4f},"
|
||||
f"ang_vel_penalty:{ang_vel_penalty:.4f},"
|
||||
f"split_penalty:{split_penalty:.4f},"
|
||||
f"ankle_roll_penalty:{ankle_roll_penalty:.4f},"
|
||||
f"ankle_roll_cross_penalty:{ankle_roll_cross_penalty:.4f},"
|
||||
f"left_hip_yaw_penalty:{left_hip_yaw_penalty:.4f},"
|
||||
f"right_hip_yaw_penalty:{right_hip_yaw_penalty:.4f},"
|
||||
f"hip_yaw_cross_penalty:{hip_yaw_cross_penalty:.4f},"
|
||||
f"inward_penalty:{inward_penalty:.4f},"
|
||||
f"position_penalty:{position_penalty:.4f},"
|
||||
# f"linkage_reward:{linkage_reward:.4f},"
|
||||
# f"waist_only_turn_penalty:{waist_only_turn_penalty:.4f},"
|
||||
# f"yaw_link_reward:{yaw_link_reward:.4f}"
|
||||
# f"leg_proximity_penalty:{leg_proximity_penalty:.4f},"
|
||||
|
||||
# f"stance_collapse_penalty:{stance_collapse_penalty:.4f},"
|
||||
# f"hip_yaw_yaw_cross_penalty:{hip_yaw_yaw_cross_penalty:.4f},"
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
f"alive_bonus:{alive_bonus:.4f},"
|
||||
f"abs_yaw_error:{abs_yaw_error:.4f}"
|
||||
f"idle_penalty:{idle_penalty:.4f},"
|
||||
f"total:{total:.4f}"
|
||||
)
|
||||
# print(f"abs_yaw_error:{abs_yaw_error:.4f}")
|
||||
)
|
||||
return total
|
||||
|
||||
|
||||
@@ -655,10 +545,12 @@ class WalkEnv(gym.Env):
|
||||
action[8] = 0
|
||||
action[9] = 5
|
||||
action[10] = 0
|
||||
action[11] = np.clip(action[11], -0.7, 0.7)
|
||||
action[17] = np.clip(action[17], -0.7, 0.7)
|
||||
# action[12] = -1.0
|
||||
# action[18] = 1.0
|
||||
action[11] = np.clip(action[11], -6, 6)
|
||||
action[17] = np.clip(action[17], -6, 6)
|
||||
# action[11] = 1
|
||||
# action[17] = 1
|
||||
# action[12] = -0.01
|
||||
# action[18] = 0.01
|
||||
# action[13] = -1.0
|
||||
# action[19] = 1.0
|
||||
self.previous_action = action.copy()
|
||||
@@ -671,7 +563,7 @@ class WalkEnv(gym.Env):
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=80, kd=4.67
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=60, kd=1.2
|
||||
)
|
||||
|
||||
self.previous_action = action.copy()
|
||||
@@ -684,13 +576,27 @@ class WalkEnv(gym.Env):
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
if self.step_counter % 10 == 0:
|
||||
self.previous_pos = current_pos.copy()
|
||||
|
||||
# Compute reward based on movement from previous step
|
||||
reward = self.compute_reward(self.previous_pos, current_pos, action)
|
||||
self.previous_pos = current_pos.copy()
|
||||
|
||||
self.prev_action_history[self.history_idx] = action.copy()
|
||||
self.history_idx = (self.history_idx + 1) % self.action_history_len
|
||||
|
||||
self.last_action_for_reward = action.copy()
|
||||
|
||||
# Check if current waypoint is reached
|
||||
if self.train_stage != "in_place":
|
||||
dist_to_waypoint = float(np.linalg.norm(current_pos - self.target_position))
|
||||
if dist_to_waypoint < self.waypoint_reach_distance:
|
||||
# Move to next waypoint
|
||||
self.waypoint_index += 1
|
||||
if self.waypoint_index >= len(self.point_list):
|
||||
# All waypoints completed
|
||||
self.route_completed = True
|
||||
else:
|
||||
# Update target to next waypoint
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
|
||||
# Fall detection and penalty
|
||||
is_fallen = self.Player.world.global_position[2] < 0.55
|
||||
@@ -709,15 +615,22 @@ class Train(Train_Base):
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = int(os.environ.get("GYM_CPU_N_ENVS", "20"))
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
server_warmup_sec = float(os.environ.get("GYM_CPU_SERVER_WARMUP_SEC", "3.0"))
|
||||
n_steps_per_env = int(os.environ.get("GYM_CPU_TRAIN_STEPS_PER_ENV", "512")) # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = int(os.environ.get("GYM_CPU_TRAIN_BATCH_SIZE", "512")) # should be a factor of (n_steps_per_env * n_envs)
|
||||
n_envs = 12
|
||||
server_warmup_sec = 3.0
|
||||
n_steps_per_env = 256 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = 512 # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = float(os.environ.get("GYM_CPU_TRAIN_LR", "3e-4"))
|
||||
folder_name = f'Turn_R{self.robot_type}'
|
||||
learning_rate = 2e-4
|
||||
ent_coef = 0.08
|
||||
clip_range = 0.2
|
||||
gamma = 0.97
|
||||
n_epochs = 3
|
||||
enable_eval = True
|
||||
monitor_train_env = False
|
||||
eval_freq_mult = 30
|
||||
save_freq_mult = 20
|
||||
eval_eps = 3
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
print(f"Model path: {model_path}")
|
||||
@@ -733,22 +646,26 @@ class Train(Train_Base):
|
||||
|
||||
return thunk
|
||||
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1, no_render=True, no_realtime=True) # include 1 extra server for testing
|
||||
|
||||
# Wait for servers to start
|
||||
print(f"Starting {n_envs + 1} rcssservermj servers...")
|
||||
if server_warmup_sec > 0:
|
||||
print(f"Waiting {server_warmup_sec:.1f}s for server warmup...")
|
||||
sleep(server_warmup_sec)
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i, monitor=True) for i in range(n_envs)], start_method="spawn")
|
||||
# Use single-process eval env to avoid extra subprocess fragility during callback evaluation.
|
||||
eval_env = DummyVecEnv([init_env(n_envs, monitor=True)])
|
||||
|
||||
env = None
|
||||
eval_env = None
|
||||
servers = None
|
||||
try:
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1, no_render=True, no_realtime=True) # include 1 extra server for testing
|
||||
|
||||
# Wait for servers to start
|
||||
print(f"Starting {n_envs + 1} rcssservermj servers...")
|
||||
if server_warmup_sec > 0:
|
||||
print(f"Waiting {server_warmup_sec:.1f}s for server warmup...")
|
||||
sleep(server_warmup_sec)
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i, monitor=monitor_train_env) for i in range(n_envs)], start_method="spawn")
|
||||
# Use single-process eval env to avoid extra subprocess fragility during callback evaluation.
|
||||
if enable_eval:
|
||||
eval_env = DummyVecEnv([init_env(n_envs, monitor=True)])
|
||||
|
||||
# Custom policy network architecture
|
||||
policy_kwargs = dict(
|
||||
net_arch=dict(
|
||||
@@ -771,35 +688,39 @@ class Train(Train_Base):
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
ent_coef=float(os.environ.get("GYM_CPU_TRAIN_ENT_COEF", "0.05")), # Entropy coefficient for exploration
|
||||
clip_range=float(os.environ.get("GYM_CPU_TRAIN_CLIP_RANGE", "0.2")), # PPO clipping parameter
|
||||
ent_coef=ent_coef, # Entropy coefficient for exploration
|
||||
clip_range=clip_range, # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=float(os.environ.get("GYM_CPU_TRAIN_GAMMA", "0.95")), # Discount factor
|
||||
gamma=gamma, # Discount factor
|
||||
# target_kl=0.03,
|
||||
n_epochs=int(os.environ.get("GYM_CPU_TRAIN_EPOCHS", "5")),
|
||||
n_epochs=n_epochs,
|
||||
tensorboard_log=f"./scripts/gyms/logs/{folder_name}/tensorboard/"
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 20, eval_eps=7,
|
||||
eval_freq=n_steps_per_env * max(1, eval_freq_mult),
|
||||
save_freq=n_steps_per_env * max(1, save_freq_mult),
|
||||
eval_eps=max(1, eval_eps),
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
finally:
|
||||
if env is not None:
|
||||
env.close()
|
||||
if eval_env is not None:
|
||||
eval_env.close()
|
||||
if servers is not None:
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
test_no_render = os.environ.get("GYM_CPU_TEST_NO_RENDER", "0") == "1"
|
||||
test_no_realtime = os.environ.get("GYM_CPU_TEST_NO_REALTIME", "0") == "1"
|
||||
test_no_render = False
|
||||
test_no_realtime = False
|
||||
|
||||
server = Train_Server(
|
||||
self.server_p - 1,
|
||||
|
||||
Reference in New Issue
Block a user