update Walk.py and update model of Walk version 0.2.0

This commit is contained in:
xxh
2026-04-19 00:34:57 -04:00
parent 2ff69d64be
commit c9c4b35e89
2 changed files with 2 additions and 2 deletions

View File

@@ -247,7 +247,7 @@ class WalkEnv(gym.Env):
self.reward_forward_lean_penalty_cap = 0.7 self.reward_forward_lean_penalty_cap = 0.7
self.reward_knee_straight_threshold = 0.18 self.reward_knee_straight_threshold = 0.18
self.reward_knee_straight_penalty_scale = 0.70 self.reward_knee_straight_penalty_scale = 0.70
self.reward_hip_overextend_threshold = 0.95 self.reward_hip_overextend_threshold = 1.1
self.reward_hip_overextend_penalty_scale = 1.30 self.reward_hip_overextend_penalty_scale = 1.30
self.reward_leg_stretch_penalty_scale = 1.20 self.reward_leg_stretch_penalty_scale = 1.20
self.reward_stretch_lean_combo_scale = 1.40 self.reward_stretch_lean_combo_scale = 1.40
@@ -886,7 +886,7 @@ class Train(Train_Base):
server_warmup_sec = 3.0 server_warmup_sec = 3.0
n_steps_per_env = 1024 # RolloutBuffer is of size (n_steps_per_env * n_envs) n_steps_per_env = 1024 # RolloutBuffer is of size (n_steps_per_env * n_envs)
minibatch_size = 1024 # should be a factor of (n_steps_per_env * n_envs) minibatch_size = 1024 # should be a factor of (n_steps_per_env * n_envs)
total_steps = 18000000 total_steps = 180000000
learning_rate = 3e-4 learning_rate = 3e-4
ent_coef = 0.05 ent_coef = 0.05
clip_range = 0.2 clip_range = 0.2

Binary file not shown.