Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| fa6b3d644a | |||
| 5832cb7ba9 | |||
| 7e9b71e4fb | |||
|
|
978a064012 | ||
|
|
02afa3c1fc |
12
.gitignore
vendored
12
.gitignore
vendored
@@ -10,13 +10,9 @@ poetry.toml
|
||||
**/log/
|
||||
*.spec
|
||||
dist/
|
||||
*steps.zip
|
||||
*.pkl
|
||||
best_model.zip
|
||||
*.zip
|
||||
*.csv
|
||||
*.npz
|
||||
*.xml
|
||||
*.json
|
||||
*.yaml
|
||||
*.iml
|
||||
*.TXT
|
||||
*.xml
|
||||
*.npz
|
||||
*.pkl
|
||||
|
||||
@@ -98,12 +98,8 @@ class Walk(Behavior):
|
||||
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
|
||||
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
|
||||
|
||||
radian_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
radian_joint_speeds = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
radian_joint_positions = np.deg2rad(list(robot.motor_positions.values()))
|
||||
radian_joint_speeds = np.deg2rad(list(robot.motor_speeds.values()))
|
||||
|
||||
qpos_qvel_previous_action = np.vstack(
|
||||
(
|
||||
|
||||
14
command.md
14
command.md
@@ -1,14 +0,0 @@
|
||||
训练(默认)
|
||||
bash train.sh
|
||||
|
||||
测试(实时+显示画面)
|
||||
GYM_CPU_MODE=test GYM_CPU_TEST_MODEL=scripts/gyms/logs/Walk_R0_005/best_model.zip GYM_CPU_TEST_FOLDER=scripts/gyms/logs/Walk_R0_005/ GYM_CPU_TEST_NO_RENDER=0 GYM_CPU_TEST_NO_REALTIME=0 bash train.sh
|
||||
|
||||
测试(无画面、非实时)
|
||||
GYM_CPU_MODE=test GYM_CPU_TEST_NO_RENDER=1 GYM_CPU_TEST_NO_REALTIME=1 bash train.sh
|
||||
|
||||
retrain(继续训练)
|
||||
GYM_CPU_MODE=train GYM_CPU_TRAIN_MODEL=scripts/gyms/logs/Walk_R0_005/best_model.zip bash train.sh
|
||||
|
||||
retrain+改训练超参
|
||||
GYM_CPU_MODE=train GYM_CPU_TRAIN_MODEL=scripts/gyms/logs/Walk_R0_004/best_model.zip GYM_CPU_TRAIN_LR=2e-4 GYM_CPU_TRAIN_CLIP_RANGE=0.13 GYM_CPU_TRAIN_BATCH_SIZE=256 YM_CPU_TRAIN_GAMMA=0.95 GYM_CPU_TRAIN_ENT_COEF=0.05 GYM_CPU_TRAIN_EPOCHS=8 bash train.sh
|
||||
@@ -1,4 +1,5 @@
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import time
|
||||
from select import select
|
||||
@@ -15,6 +16,11 @@ class Server:
|
||||
self.__socket: socket.socket = self._create_socket()
|
||||
self.__send_buff = []
|
||||
self.__rcv_buffer_size = 1024
|
||||
self.__rcv_buffer_default_size = 1024
|
||||
self.__max_msg_size = 1048576
|
||||
self.__shrink_threshold = 8192
|
||||
self.__shrink_after_msgs = 200
|
||||
self.__small_msg_streak = 0
|
||||
self.__rcv_buffer = bytearray(self.__rcv_buffer_size)
|
||||
|
||||
def _create_socket(self) -> socket.socket:
|
||||
@@ -105,6 +111,10 @@ class Server:
|
||||
|
||||
msg_size = int.from_bytes(self.__rcv_buffer[:4], byteorder="big", signed=False)
|
||||
|
||||
# Guard against corrupted frame lengths that would trigger huge allocations.
|
||||
if msg_size <= 0 or msg_size > self.__max_msg_size:
|
||||
raise ConnectionResetError
|
||||
|
||||
if msg_size > self.__rcv_buffer_size:
|
||||
self.__rcv_buffer_size = msg_size
|
||||
self.__rcv_buffer = bytearray(self.__rcv_buffer_size)
|
||||
@@ -120,6 +130,15 @@ class Server:
|
||||
message=self.__rcv_buffer[:msg_size].decode()
|
||||
)
|
||||
|
||||
if msg_size <= self.__shrink_threshold and self.__rcv_buffer_size > self.__rcv_buffer_default_size:
|
||||
self.__small_msg_streak += 1
|
||||
if self.__small_msg_streak >= self.__shrink_after_msgs:
|
||||
self.__rcv_buffer_size = self.__rcv_buffer_default_size
|
||||
self.__rcv_buffer = bytearray(self.__rcv_buffer_size)
|
||||
self.__small_msg_streak = 0
|
||||
else:
|
||||
self.__small_msg_streak = 0
|
||||
|
||||
# 如果socket没有更多数据就退出
|
||||
if len(select([self.__socket], [], [], 0.0)[0]) == 0:
|
||||
break
|
||||
|
||||
15
readme.md
15
readme.md
@@ -74,6 +74,21 @@ poetry run ./build_binary.sh <team-name>
|
||||
|
||||
Once binary generation is finished, the result will be inside the build folder, as ```<team-name>.tar.gz```
|
||||
|
||||
### GYM
|
||||
|
||||
To use the gym, you need to install the following dependencies:
|
||||
```bash
|
||||
pip install gymnasium
|
||||
pip install psutil
|
||||
pip install stable-baselines3
|
||||
```
|
||||
|
||||
Then, you can run gym examples under the ```GYM_CPU``` folder:
|
||||
```bash
|
||||
python3 -m scripts.gyms.Walk # Run the Walk gym example
|
||||
# of course, you can run other gym examples
|
||||
```
|
||||
|
||||
### Authors and acknowledgment
|
||||
This project was developed and contributed by:
|
||||
- **Chenxi Liu**
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
import subprocess
|
||||
import os
|
||||
import time
|
||||
import threading
|
||||
|
||||
|
||||
class Server():
|
||||
WATCHDOG_ENABLED = True
|
||||
WATCHDOG_INTERVAL_SEC = 30.0
|
||||
WATCHDOG_RSS_MB_LIMIT = 2000.0
|
||||
|
||||
def __init__(self, first_server_p, first_monitor_p, n_servers, no_render=True, no_realtime=True) -> None:
|
||||
try:
|
||||
import psutil
|
||||
@@ -14,6 +19,10 @@ class Server():
|
||||
self.first_server_p = first_server_p
|
||||
self.n_servers = n_servers
|
||||
self.rcss_processes = []
|
||||
self._server_specs = []
|
||||
self._watchdog_stop = threading.Event()
|
||||
self._watchdog_lock = threading.Lock()
|
||||
self._watchdog_thread = None
|
||||
first_monitor_p = first_monitor_p + 100
|
||||
|
||||
# makes it easier to kill test servers without affecting train servers
|
||||
@@ -23,26 +32,79 @@ class Server():
|
||||
for i in range(n_servers):
|
||||
port = first_server_p + i
|
||||
mport = first_monitor_p + i
|
||||
self._server_specs.append((port, mport, cmd, render_arg, realtime_arg))
|
||||
proc = self._spawn_server(port, mport, cmd, render_arg, realtime_arg)
|
||||
self.rcss_processes.append(proc)
|
||||
|
||||
server_cmd = f"{cmd} -c {port} -m {mport} {render_arg} {realtime_arg}".strip()
|
||||
if self.WATCHDOG_ENABLED:
|
||||
self._watchdog_thread = threading.Thread(target=self._watchdog_loop, daemon=True)
|
||||
self._watchdog_thread.start()
|
||||
|
||||
proc = subprocess.Popen(
|
||||
server_cmd.split(),
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.STDOUT,
|
||||
start_new_session=True
|
||||
def _spawn_server(self, port, mport, cmd, render_arg, realtime_arg):
|
||||
server_cmd = f"{cmd} -c {port} -m {mport} {render_arg} {realtime_arg}".strip()
|
||||
|
||||
proc = subprocess.Popen(
|
||||
server_cmd.split(),
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.STDOUT,
|
||||
start_new_session=True
|
||||
)
|
||||
|
||||
# Avoid startup storm when launching many servers at once.
|
||||
time.sleep(0.03)
|
||||
|
||||
rc = proc.poll()
|
||||
if rc is not None:
|
||||
raise RuntimeError(
|
||||
f"rcssservermj exited early (code={rc}) on server port {port}, monitor port {mport}"
|
||||
)
|
||||
|
||||
# Avoid startup storm when launching many servers at once.
|
||||
time.sleep(0.03)
|
||||
return proc
|
||||
|
||||
rc = proc.poll()
|
||||
if rc is not None:
|
||||
raise RuntimeError(
|
||||
f"rcssservermj exited early (code={rc}) on server port {port}, monitor port {mport}"
|
||||
)
|
||||
@staticmethod
|
||||
def _pid_rss_mb(pid):
|
||||
try:
|
||||
with open(f"/proc/{pid}/status", "r", encoding="utf-8") as f:
|
||||
for line in f:
|
||||
if line.startswith("VmRSS:"):
|
||||
parts = line.split()
|
||||
if len(parts) >= 2:
|
||||
# VmRSS is kB
|
||||
return float(parts[1]) / 1024.0
|
||||
except (FileNotFoundError, ProcessLookupError, PermissionError, OSError):
|
||||
return 0.0
|
||||
return 0.0
|
||||
|
||||
self.rcss_processes.append(proc)
|
||||
def _restart_server_at_index(self, idx, reason):
|
||||
port, mport, cmd, render_arg, realtime_arg = self._server_specs[idx]
|
||||
old_proc = self.rcss_processes[idx]
|
||||
try:
|
||||
old_proc.terminate()
|
||||
old_proc.wait(timeout=1.0)
|
||||
except Exception:
|
||||
try:
|
||||
old_proc.kill()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
new_proc = self._spawn_server(port, mport, cmd, render_arg, realtime_arg)
|
||||
self.rcss_processes[idx] = new_proc
|
||||
print(
|
||||
f"[ServerWatchdog] Restarted server idx={idx} port={port} monitor={mport} reason={reason}"
|
||||
)
|
||||
|
||||
def _watchdog_loop(self):
|
||||
while not self._watchdog_stop.wait(self.WATCHDOG_INTERVAL_SEC):
|
||||
with self._watchdog_lock:
|
||||
for i, proc in enumerate(self.rcss_processes):
|
||||
rc = proc.poll()
|
||||
if rc is not None:
|
||||
self._restart_server_at_index(i, f"exited:{rc}")
|
||||
continue
|
||||
|
||||
rss_mb = self._pid_rss_mb(proc.pid)
|
||||
if rss_mb > self.WATCHDOG_RSS_MB_LIMIT:
|
||||
self._restart_server_at_index(i, f"rss_mb:{rss_mb:.1f}")
|
||||
|
||||
def check_running_servers(self, psutil, first_server_p, first_monitor_p, n_servers):
|
||||
''' Check if any server is running on chosen ports '''
|
||||
@@ -78,6 +140,9 @@ class Server():
|
||||
return
|
||||
|
||||
def kill(self):
|
||||
self._watchdog_stop.set()
|
||||
if self._watchdog_thread is not None:
|
||||
self._watchdog_thread.join(timeout=1.0)
|
||||
for p in self.rcss_processes:
|
||||
p.kill()
|
||||
print(f"Killed {self.n_servers} rcssservermj processes starting at {self.first_server_p}")
|
||||
|
||||
@@ -1,302 +0,0 @@
|
||||
from itertools import zip_longest
|
||||
from math import inf
|
||||
import math
|
||||
import numpy as np
|
||||
import shutil
|
||||
|
||||
class UI():
|
||||
console_width = 80
|
||||
console_height = 24
|
||||
|
||||
@staticmethod
|
||||
def read_particle(prompt, str_options, dtype=str, interval=[-inf,inf]):
|
||||
'''
|
||||
Read particle from user from a given dtype or from a str_options list
|
||||
|
||||
Parameters
|
||||
----------
|
||||
prompt : `str`
|
||||
prompt to show user before reading input
|
||||
str_options : `list`
|
||||
list of str options (in addition to dtype if dtype is not str)
|
||||
dtype : `class`
|
||||
if dtype is str, then user must choose a value from str_options, otherwise it can also send a dtype value
|
||||
interval : `list`
|
||||
[>=min,<max] interval for numeric dtypes
|
||||
|
||||
Returns
|
||||
-------
|
||||
choice : `int` or dtype
|
||||
index of str_options (int) or value (dtype)
|
||||
is_str_option : `bool`
|
||||
True if `choice` is an index from str_options
|
||||
'''
|
||||
# Check if user has no choice
|
||||
if dtype is str and len(str_options) == 1:
|
||||
print(prompt, str_options[0], sep="")
|
||||
return 0, True
|
||||
elif dtype is int and interval[0] == interval[1]-1:
|
||||
print(prompt, interval[0], sep="")
|
||||
return interval[0], False
|
||||
|
||||
while True:
|
||||
inp = input(prompt)
|
||||
if inp in str_options:
|
||||
return str_options.index(inp), True
|
||||
|
||||
if dtype is not str:
|
||||
try:
|
||||
inp = dtype(inp)
|
||||
if inp >= interval[0] and inp < interval[1]:
|
||||
return inp, False
|
||||
except:
|
||||
pass
|
||||
|
||||
print("Error: illegal input! Options:", str_options, f" or {dtype}" if dtype != str else "")
|
||||
|
||||
@staticmethod
|
||||
def read_int(prompt, min, max):
|
||||
'''
|
||||
Read int from user in a given interval
|
||||
:param prompt: prompt to show user before reading input
|
||||
:param min: minimum input (inclusive)
|
||||
:param max: maximum input (exclusive)
|
||||
:return: choice
|
||||
'''
|
||||
while True:
|
||||
inp = input(prompt)
|
||||
try:
|
||||
inp = int(inp)
|
||||
assert inp >= min and inp < max
|
||||
return inp
|
||||
except:
|
||||
print(f"Error: illegal input! Choose number between {min} and {max-1}")
|
||||
|
||||
@staticmethod
|
||||
def print_table(data, titles=None, alignment=None, cols_width=None, cols_per_title=None, margins=None, numbering=None, prompt=None):
|
||||
'''
|
||||
Print table
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data : `list`
|
||||
list of columns, where each column is a list of items
|
||||
titles : `list`
|
||||
list of titles for each column, default is `None` (no titles)
|
||||
alignment : `list`
|
||||
list of alignments per column (excluding titles), default is `None` (left alignment for all cols)
|
||||
cols_width : `list`
|
||||
list of widths per column, default is `None` (fit to content)
|
||||
Positive values indicate a fixed column width
|
||||
Zero indicates that the column will fit its content
|
||||
cols_per_title : `list`
|
||||
maximum number of subcolumns per title, default is `None` (1 subcolumn per title)
|
||||
margins : `list`
|
||||
number of added leading and trailing spaces per column, default is `None` (margin=2 for all columns)
|
||||
numbering : `list`
|
||||
list of booleans per columns, indicating whether to assign numbers to each option
|
||||
prompt : `str`
|
||||
the prompt string, if given, is printed after the table before reading input
|
||||
|
||||
Returns
|
||||
-------
|
||||
index : `int`
|
||||
returns global index of selected item (relative to table)
|
||||
col_index : `int`
|
||||
returns local index of selected item (relative to column)
|
||||
column : `int`
|
||||
returns number of column of selected item (starts at 0)
|
||||
* if `numbering` or `prompt` are `None`, `None` is returned
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
titles = ["Name","Age"]
|
||||
data = [[John,Graciete], [30,50]]
|
||||
alignment = ["<","^"] # 1st column is left-aligned, 2nd is centered
|
||||
cols_width = [10,5] # 1st column's width=10, 2nd column's width=5
|
||||
margins = [3,3]
|
||||
numbering = [True,False] # prints: [0-John,1-Graciete][30,50]
|
||||
prompt = "Choose a person:"
|
||||
'''
|
||||
|
||||
#--------------------------------------------- parameters
|
||||
cols_no = len(data)
|
||||
|
||||
if alignment is None:
|
||||
alignment = ["<"]*cols_no
|
||||
|
||||
if cols_width is None:
|
||||
cols_width = [0]*cols_no
|
||||
|
||||
if numbering is None:
|
||||
numbering = [False]*cols_no
|
||||
any_numbering = False
|
||||
else:
|
||||
any_numbering = True
|
||||
|
||||
if margins is None:
|
||||
margins = [2]*cols_no
|
||||
|
||||
# Fit column to content + margin, if required
|
||||
subcol = [] # subcolumn length and widths
|
||||
for i in range(cols_no):
|
||||
subcol.append([[],[]])
|
||||
if cols_width[i] == 0:
|
||||
numbering_width = 4 if numbering[i] else 0
|
||||
if cols_per_title is None or cols_per_title[i] < 2:
|
||||
cols_width[i] = max([len(str(item))+numbering_width for item in data[i]]) + margins[i]*2
|
||||
else:
|
||||
subcol[i][0] = math.ceil(len(data[i])/cols_per_title[i]) # subcolumn maximum length
|
||||
cols_per_title[i] = math.ceil(len(data[i])/subcol[i][0]) # reduce number of columns as needed
|
||||
cols_width[i] = margins[i]*(1+cols_per_title[i]) - (1 if numbering[i] else 0) # remove one if numbering, same as when printing
|
||||
for j in range(cols_per_title[i]):
|
||||
subcol_data_width = max([len(str(item))+numbering_width for item in data[i][j*subcol[i][0]:j*subcol[i][0]+subcol[i][0]]])
|
||||
cols_width[i] += subcol_data_width # add subcolumn data width to column width
|
||||
subcol[i][1].append(subcol_data_width) # save subcolumn data width
|
||||
|
||||
if titles is not None: # expand to acomodate titles if needed
|
||||
cols_width[i] = max(cols_width[i], len(titles[i]) + margins[i]*2 )
|
||||
|
||||
if any_numbering:
|
||||
no_of_items=0
|
||||
cumulative_item_per_col=[0] # useful for getting the local index
|
||||
for i in range(cols_no):
|
||||
assert type(data[i]) == list, "In function 'print_table', 'data' must be a list of lists!"
|
||||
|
||||
if numbering[i]:
|
||||
data[i] = [f"{n+no_of_items:3}-{d}" for n,d in enumerate(data[i])]
|
||||
no_of_items+=len(data[i])
|
||||
cumulative_item_per_col.append(no_of_items)
|
||||
|
||||
table_width = sum(cols_width)+cols_no-1
|
||||
|
||||
#--------------------------------------------- col titles
|
||||
print(f'{"="*table_width}')
|
||||
if titles is not None:
|
||||
for i in range(cols_no):
|
||||
print(f'{titles[i]:^{cols_width[i]}}', end='|' if i < cols_no - 1 else '')
|
||||
print()
|
||||
for i in range(cols_no):
|
||||
print(f'{"-"*cols_width[i]}', end='+' if i < cols_no - 1 else '')
|
||||
print()
|
||||
|
||||
#--------------------------------------------- merge subcolumns
|
||||
if cols_per_title is not None:
|
||||
for i,col in enumerate(data):
|
||||
if cols_per_title[i] < 2:
|
||||
continue
|
||||
for k in range(subcol[i][0]): # create merged items
|
||||
col[k] = (" "*margins[i]).join( f'{col[item]:{alignment[i]}{subcol[i][1][subcol_idx]}}'
|
||||
for subcol_idx, item in enumerate(range(k,len(col),subcol[i][0])) )
|
||||
del col[subcol[i][0]:] # delete repeated items
|
||||
|
||||
#--------------------------------------------- col items
|
||||
for line in zip_longest(*data):
|
||||
for i,item in enumerate(line):
|
||||
l_margin = margins[i]-1 if numbering[i] else margins[i] # adjust margins when there are numbered options
|
||||
item = "" if item is None else f'{" "*l_margin}{item}{" "*margins[i]}' # add margins
|
||||
print(f'{item:{alignment[i]}{cols_width[i]}}', end='')
|
||||
if i < cols_no - 1:
|
||||
print(end='|')
|
||||
print(end="\n")
|
||||
print(f'{"="*table_width}')
|
||||
|
||||
#--------------------------------------------- prompt
|
||||
if prompt is None:
|
||||
return None
|
||||
|
||||
if not any_numbering:
|
||||
print(prompt)
|
||||
return None
|
||||
|
||||
index = UI.read_int(prompt, 0, no_of_items)
|
||||
|
||||
for i,n in enumerate(cumulative_item_per_col):
|
||||
if index < n:
|
||||
return index, index-cumulative_item_per_col[i-1], i-1
|
||||
|
||||
raise ValueError('Failed to catch illegal input')
|
||||
|
||||
|
||||
@staticmethod
|
||||
def print_list(data, numbering=True, prompt=None, divider=" | ", alignment="<", min_per_col=6):
|
||||
'''
|
||||
Print list - prints list, using as many columns as possible
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data : `list`
|
||||
list of items
|
||||
numbering : `bool`
|
||||
assigns number to each option
|
||||
prompt : `str`
|
||||
the prompt string, if given, is printed after the table before reading input
|
||||
divider : `str`
|
||||
string that divides columns
|
||||
alignment : `str`
|
||||
f-string style alignment ( '<', '>', '^' )
|
||||
min_per_col : int
|
||||
avoid splitting columns with fewer items
|
||||
|
||||
Returns
|
||||
-------
|
||||
item : `int`, item
|
||||
returns tuple with global index of selected item and the item object,
|
||||
or `None` (if `numbering` or `prompt` are `None`)
|
||||
|
||||
'''
|
||||
|
||||
WIDTH = shutil.get_terminal_size()[0]
|
||||
|
||||
data_size = len(data)
|
||||
items = []
|
||||
items_len = []
|
||||
|
||||
#--------------------------------------------- Add numbers, margins and divider
|
||||
for i in range(data_size):
|
||||
number = f"{i}-" if numbering else ""
|
||||
items.append( f"{divider}{number}{data[i]}" )
|
||||
items_len.append( len(items[-1]) )
|
||||
|
||||
max_cols = np.clip((WIDTH+len(divider)) // min(items_len),1,math.ceil(data_size/max(min_per_col,1))) # width + len(divider) because it is not needed in last col
|
||||
|
||||
#--------------------------------------------- Check maximum number of columns, considering content width (min:1)
|
||||
for i in range(max_cols,0,-1):
|
||||
cols_width = []
|
||||
cols_items = []
|
||||
table_width = 0
|
||||
a,b = divmod(data_size,i)
|
||||
for col in range(i):
|
||||
start = a*col + min(b,col)
|
||||
end = start+a+(1 if col<b else 0)
|
||||
cols_items.append( items[start:end] )
|
||||
col_width = max(items_len[start:end])
|
||||
cols_width.append( col_width )
|
||||
table_width += col_width
|
||||
if table_width <= WIDTH+len(divider):
|
||||
break
|
||||
table_width -= len(divider)
|
||||
|
||||
#--------------------------------------------- Print columns
|
||||
print("="*table_width)
|
||||
for row in range(math.ceil(data_size / i)):
|
||||
for col in range(i):
|
||||
content = cols_items[col][row] if len(cols_items[col]) > row else divider # print divider when there are no items
|
||||
if col == 0:
|
||||
l = len(divider)
|
||||
print(end=f"{content[l:]:{alignment}{cols_width[col]-l}}") # remove divider from 1st col
|
||||
else:
|
||||
print(end=f"{content :{alignment}{cols_width[col] }}")
|
||||
print()
|
||||
print("="*table_width)
|
||||
|
||||
#--------------------------------------------- Prompt
|
||||
if prompt is None:
|
||||
return None
|
||||
|
||||
if numbering is None:
|
||||
return None
|
||||
else:
|
||||
idx = UI.read_int( prompt, 0, data_size )
|
||||
return idx, data[idx]
|
||||
@@ -1,705 +0,0 @@
|
||||
import os
|
||||
import numpy as np
|
||||
import math
|
||||
import time
|
||||
from time import sleep
|
||||
from random import random
|
||||
from random import uniform
|
||||
from itertools import count
|
||||
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.monitor import Monitor
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
|
||||
|
||||
import gymnasium as gym
|
||||
from gymnasium import spaces
|
||||
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from scripts.commons.Server import Server as Train_Server
|
||||
|
||||
from agent.base_agent import Base_Agent
|
||||
from utils.math_ops import MathOps
|
||||
|
||||
from scipy.spatial.transform import Rotation as R
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class WalkEnv(gym.Env):
|
||||
def __init__(self, ip, server_p) -> None:
|
||||
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.Player = player = Base_Agent(
|
||||
team_name="Gym",
|
||||
number=1,
|
||||
host=ip,
|
||||
port=server_p
|
||||
)
|
||||
self.robot_type = self.Player.robot
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.force_play_on = True
|
||||
|
||||
self.target_position = np.array([0.0, 0.0]) # target position in the x-y plane
|
||||
self.initial_position = np.array([0.0, 0.0]) # initial position in the x-y plane
|
||||
self.target_direction = 0.0 # target direction in the x-y plane (relative to the robot's orientation)
|
||||
self.isfallen = False
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.enable_debug_joint_status = False
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
self.auto_calibrate_train_sim_flip = True
|
||||
self.nominal_calibrated_once = False
|
||||
self.flip_calibrated_once = False
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
self._target_hz = float(target_hz_env)
|
||||
except ValueError:
|
||||
self._target_hz = 0.0
|
||||
if self._target_hz > 0.0:
|
||||
self._target_dt = 1.0 / self._target_hz
|
||||
|
||||
# State space
|
||||
# 原始观测大小: 78
|
||||
obs_size = 78
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(obs_size,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
action_dim = len(self.Player.robot.ROBOT_MOTORS)
|
||||
self.no_of_actions = action_dim
|
||||
self.action_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(action_dim,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
# 中立姿态
|
||||
self.joint_nominal_position = np.array(
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
-1.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
0.8,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
-0.8,
|
||||
0.4,
|
||||
0.0,
|
||||
]
|
||||
)
|
||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||
self.train_sim_flip = np.array(
|
||||
[
|
||||
1.0, # 0: Head_yaw (he1)
|
||||
-1.0, # 1: Head_pitch (he2)
|
||||
1.0, # 2: Left_Shoulder_Pitch (lae1)
|
||||
-1.0, # 3: Left_Shoulder_Roll (lae2)
|
||||
-1.0, # 4: Left_Elbow_Pitch (lae3)
|
||||
1.0, # 5: Left_Elbow_Yaw (lae4)
|
||||
-1.0, # 6: Right_Shoulder_Pitch (rae1)
|
||||
-1.0, # 7: Right_Shoulder_Roll (rae2)
|
||||
1.0, # 8: Right_Elbow_Pitch (rae3)
|
||||
1.0, # 9: Right_Elbow_Yaw (rae4)
|
||||
1.0, # 10: Waist (te1)
|
||||
1.0, # 11: Left_Hip_Pitch (lle1)
|
||||
-1.0, # 12: Left_Hip_Roll (lle2)
|
||||
-1.0, # 13: Left_Hip_Yaw (lle3)
|
||||
1.0, # 14: Left_Knee_Pitch (lle4)
|
||||
1.0, # 15: Left_Ankle_Pitch (lle5)
|
||||
-1.0, # 16: Left_Ankle_Roll (lle6)
|
||||
-1.0, # 17: Right_Hip_Pitch (rle1)
|
||||
-1.0, # 18: Right_Hip_Roll (rle2)
|
||||
-1.0, # 19: Right_Hip_Yaw (rle3)
|
||||
-1.0, # 20: Right_Knee_Pitch (rle4)
|
||||
-1.0, # 21: Right_Ankle_Pitch (rle5)
|
||||
-1.0, # 22: Right_Ankle_Roll (rle6)
|
||||
]
|
||||
)
|
||||
|
||||
self.scaling_factor = 0.3
|
||||
# self.scaling_factor = 1
|
||||
|
||||
# Encourage a minimum lateral stance so the policy avoids feet overlap.
|
||||
self.min_stance_rad = 0.10
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = True
|
||||
self.reset_beam_yaw_range_deg = 180 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
|
||||
self.reset_joint_noise_rad = 0.025
|
||||
self.reset_perturb_steps = 4
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
self.Player.server.connect()
|
||||
# sleep(2.0) # Longer wait for connection to establish completely
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
self.start_time = time.time()
|
||||
|
||||
def _reconnect_server(self):
|
||||
try:
|
||||
self.Player.server.shutdown()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self.Player.server.connect()
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
|
||||
def _safe_receive_world_update(self, retries=1):
|
||||
last_exc = None
|
||||
for attempt in range(retries + 1):
|
||||
try:
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
return
|
||||
except (ConnectionResetError, OSError) as exc:
|
||||
last_exc = exc
|
||||
if attempt >= retries:
|
||||
raise
|
||||
self._reconnect_server()
|
||||
if last_exc is not None:
|
||||
raise last_exc
|
||||
|
||||
def debug_log(self, message):
|
||||
print(message)
|
||||
try:
|
||||
log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
|
||||
with open(log_path, "a", encoding="utf-8") as f:
|
||||
f.write(message + "\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def observe(self, init=False):
|
||||
|
||||
"""获取当前观测值"""
|
||||
robot = self.Player.robot
|
||||
world = self.Player.world
|
||||
|
||||
# Safety check: ensure data is available
|
||||
|
||||
# 计算目标速度
|
||||
raw_target = self.target_position - world.global_position[:2]
|
||||
velocity = MathOps.rotate_2d_vec(
|
||||
raw_target,
|
||||
-robot.global_orientation_euler[2],
|
||||
is_rad=False
|
||||
)
|
||||
|
||||
# 计算相对方向
|
||||
rel_orientation = MathOps.vector_angle(velocity) * 0.3
|
||||
rel_orientation = np.clip(rel_orientation, -0.25, 0.25)
|
||||
|
||||
velocity = np.concatenate([velocity, np.array([rel_orientation])])
|
||||
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
|
||||
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
|
||||
|
||||
# 关节状态
|
||||
radian_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
radian_joint_speeds = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
|
||||
qpos_qvel_previous_action = np.concatenate([
|
||||
(radian_joint_positions * self.train_sim_flip - self.joint_nominal_position) / 4.6,
|
||||
radian_joint_speeds / 110.0 * self.train_sim_flip,
|
||||
self.previous_action / 10.0,
|
||||
])
|
||||
|
||||
# 角速度
|
||||
ang_vel = np.clip(np.deg2rad(robot.gyroscope) / 50.0, -1.0, 1.0)
|
||||
|
||||
# 投影的重力方向
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
|
||||
# 组合观测
|
||||
observation = np.concatenate([
|
||||
qpos_qvel_previous_action,
|
||||
ang_vel,
|
||||
velocity,
|
||||
projected_gravity,
|
||||
])
|
||||
|
||||
observation = np.clip(observation, -10.0, 10.0)
|
||||
return observation.astype(np.float32)
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
self._safe_receive_world_update(retries=1)
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.send()
|
||||
if self._target_dt > 0.0:
|
||||
now = time.time()
|
||||
if self._last_sync_time is None:
|
||||
self._last_sync_time = now
|
||||
return
|
||||
elapsed = now - self._last_sync_time
|
||||
remaining = self._target_dt - elapsed
|
||||
if remaining > 0.0:
|
||||
time.sleep(remaining)
|
||||
now = time.time()
|
||||
self._last_sync_time = now
|
||||
|
||||
def debug_joint_status(self):
|
||||
robot = self.Player.robot
|
||||
actual_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
target_joint_positions = getattr(
|
||||
self,
|
||||
'target_joint_positions',
|
||||
np.zeros(len(robot.ROBOT_MOTORS), dtype=np.float32)
|
||||
)
|
||||
joint_error = actual_joint_positions - target_joint_positions
|
||||
leg_slice = slice(11, None)
|
||||
|
||||
self.debug_log(
|
||||
"[WalkDebug] "
|
||||
f"step={self.step_counter} "
|
||||
f"pos={np.round(self.Player.world.global_position, 3).tolist()} "
|
||||
f"target_xy={np.round(self.target_position, 3).tolist()} "
|
||||
f"target_leg={np.round(target_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"actual_leg={np.round(actual_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
|
||||
f"fallen={self.Player.world.global_position[2] < 0.3}"
|
||||
)
|
||||
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
|
||||
|
||||
def reset(self, seed=None, options=None):
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
r = self.Player.robot
|
||||
super().reset(seed=seed)
|
||||
if seed is not None:
|
||||
np.random.seed(seed)
|
||||
|
||||
length1 = 2 # randomize target distance
|
||||
length2 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
length3 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
angle2 = np.random.uniform(-30, 30) # randomize initial orientation
|
||||
angle3 = np.random.uniform(-30, 30) # randomize target direction
|
||||
|
||||
self.step_counter = 0
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
|
||||
self.walk_cycle_step = 0
|
||||
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
beam_y = (random() - 0.5) * 10
|
||||
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
|
||||
|
||||
for _ in range(5):
|
||||
self._safe_receive_world_update(retries=2)
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
|
||||
self.Player.server.send()
|
||||
|
||||
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
|
||||
finished_count = 0
|
||||
for _ in range(50):
|
||||
finished = self.Player.skills_manager.execute("Neutral")
|
||||
self.sync()
|
||||
if finished:
|
||||
finished_count += 1
|
||||
if finished_count >= 20: # 假设需要连续20次完成才算成功
|
||||
break
|
||||
|
||||
if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
|
||||
perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
|
||||
# Perturb waist + lower body only (10:), keep head/arms stable.
|
||||
perturb_action[10:] = np.random.uniform(
|
||||
-self.reset_joint_noise_rad,
|
||||
self.reset_joint_noise_rad,
|
||||
size=(self.no_of_actions - 10,)
|
||||
)
|
||||
|
||||
for _ in range(self.reset_perturb_steps):
|
||||
target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
for i in range(self.reset_recover_steps):
|
||||
# Linearly fade perturbation to help policy start from near-neutral.
|
||||
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
|
||||
target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.sync()
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
# Build target in the robot's current forward direction instead of fixed global +x.
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
forward_offset = MathOps.rotate_2d_vec(np.array([length1, 0.0]), heading_deg, is_rad=False)
|
||||
point1 = self.initial_position + forward_offset
|
||||
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
|
||||
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
|
||||
self.point_list = [point1]
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
height = float(self.Player.world.global_position[2])
|
||||
robot = self.Player.robot
|
||||
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
ang_vel = np.deg2rad(robot.gyroscope)
|
||||
ang_vel_mag = float(np.linalg.norm(ang_vel))
|
||||
|
||||
is_fallen = height < 0.55
|
||||
if is_fallen:
|
||||
# remain = max(0, 800 - self.step_counter)
|
||||
# return -8.0 - 0.01 * remain
|
||||
return -1.0
|
||||
|
||||
|
||||
|
||||
# # 目标方向
|
||||
# to_target = self.target_position - current_pos
|
||||
# dist_to_target = float(np.linalg.norm(to_target))
|
||||
# if dist_to_target < 0.5:
|
||||
# return 15.0
|
||||
|
||||
# forward_dir = to_target / dist_to_target if dist_to_target > 0.1 else np.array([1.0, 0.0])
|
||||
# delta_pos = current_pos - previous_pos
|
||||
# forward_step = float(np.dot(delta_pos, forward_dir))
|
||||
# lateral_step = float(np.linalg.norm(delta_pos - forward_dir * forward_step))
|
||||
|
||||
# 奖励项
|
||||
# progress_reward = 2 * forward_step
|
||||
# lateral_penalty = -0.1 * lateral_step
|
||||
alive_bonus = 2.0
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.01 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -0.3 * (tilt_mag)
|
||||
ang_vel_penalty = -0.02 * ang_vel_mag
|
||||
|
||||
# Use simulator joint readings in training frame to shape lateral stance.
|
||||
joint_pos = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
) * self.train_sim_flip
|
||||
left_hip_roll = float(joint_pos[12])
|
||||
right_hip_roll = float(joint_pos[18])
|
||||
left_ankle_roll = float(joint_pos[16])
|
||||
right_ankle_roll = float(joint_pos[22])
|
||||
|
||||
hip_spread = left_hip_roll - right_hip_roll
|
||||
ankle_spread = left_ankle_roll - right_ankle_roll
|
||||
stance_metric = 0.6 * abs(hip_spread) + 0.4 * abs(ankle_spread)
|
||||
|
||||
# Penalize narrow stance (feet too close) and scissoring (cross-leg pattern).
|
||||
stance_collapse_penalty = -4.0 * max(0.0, self.min_stance_rad - stance_metric)
|
||||
cross_leg_penalty = -1.2 * max(0.0, -(hip_spread * ankle_spread))
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height
|
||||
height_penalty = -0.5 * abs(height_error) # 惩罚高度偏离,系数可调
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
+ ang_vel_penalty
|
||||
+ height_penalty
|
||||
+ stance_collapse_penalty
|
||||
+ cross_leg_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
if time.time() - self.start_time >= 600:
|
||||
self.start_time = time.time()
|
||||
print(
|
||||
# f"progress_reward:{progress_reward:.4f}",
|
||||
# f"lateral_penalty:{lateral_penalty:.4f}",
|
||||
# f"action_penalty:{action_penalty:.4f}"s,
|
||||
f"height_penalty:{height_penalty:.4f}",
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},",
|
||||
f"posture_penalty:{posture_penalty:.4f}",
|
||||
f"stance_collapse_penalty:{stance_collapse_penalty:.4f}",
|
||||
f"cross_leg_penalty:{cross_leg_penalty:.4f}",
|
||||
# f"ang_vel_penalty:{ang_vel_penalty:.4f}",
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
)
|
||||
|
||||
return total
|
||||
|
||||
|
||||
|
||||
def step(self, action):
|
||||
|
||||
r = self.Player.robot
|
||||
self.previous_action = action
|
||||
|
||||
self.target_joint_positions = (
|
||||
# self.joint_nominal_position +
|
||||
self.scaling_factor * action
|
||||
)
|
||||
self.target_joint_positions *= self.train_sim_flip
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=40, kd=1.0
|
||||
)
|
||||
|
||||
self.previous_action = action
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
|
||||
if self.enable_debug_joint_status and self.step_counter % self.debug_every_n_steps == 0:
|
||||
self.debug_joint_status()
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
# Compute reward based on movement from previous step
|
||||
reward = self.compute_reward(self.previous_pos, current_pos, action)
|
||||
|
||||
# Update previous position
|
||||
self.previous_pos = current_pos.copy()
|
||||
self.last_action_for_reward = action.copy()
|
||||
|
||||
# Fall detection and penalty
|
||||
is_fallen = self.Player.world.global_position[2] < 0.55
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
terminated = is_fallen or self.step_counter > 800 or self.route_completed
|
||||
truncated = False
|
||||
|
||||
return self.observe(), reward, terminated, truncated, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = int(os.environ.get("GYM_CPU_N_ENVS", "20"))
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
server_warmup_sec = float(os.environ.get("GYM_CPU_SERVER_WARMUP_SEC", "3.0"))
|
||||
n_steps_per_env = int(os.environ.get("GYM_CPU_TRAIN_STEPS_PER_ENV", "256")) # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = int(os.environ.get("GYM_CPU_TRAIN_BATCH_SIZE", "512")) # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = float(os.environ.get("GYM_CPU_TRAIN_LR", "3e-4"))
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
print(f"Model path: {model_path}")
|
||||
print(f"Using {n_envs} parallel environments")
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env, monitor=False):
|
||||
def thunk():
|
||||
env = WalkEnv(self.ip, self.server_p + i_env)
|
||||
if monitor:
|
||||
env = Monitor(env)
|
||||
return env
|
||||
|
||||
return thunk
|
||||
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1, no_render=True, no_realtime=True) # include 1 extra server for testing
|
||||
|
||||
# Wait for servers to start
|
||||
print(f"Starting {n_envs + 1} rcssservermj servers...")
|
||||
if server_warmup_sec > 0:
|
||||
print(f"Waiting {server_warmup_sec:.1f}s for server warmup...")
|
||||
sleep(server_warmup_sec)
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i, monitor=True) for i in range(n_envs)])
|
||||
# Use single-process eval env to avoid extra subprocess fragility during callback evaluation.
|
||||
eval_env = DummyVecEnv([init_env(n_envs, monitor=True)])
|
||||
|
||||
try:
|
||||
# Custom policy network architecture
|
||||
policy_kwargs = dict(
|
||||
net_arch=dict(
|
||||
pi=[512, 256, 128], # Policy network: 3 layers
|
||||
vf=[512, 256, 128] # Value network: 3 layers
|
||||
),
|
||||
activation_fn=__import__('torch.nn', fromlist=['ELU']).ELU,
|
||||
)
|
||||
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cpu", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO(
|
||||
"MlpPolicy",
|
||||
env=env,
|
||||
verbose=1,
|
||||
n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size,
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
ent_coef=float(os.environ.get("GYM_CPU_TRAIN_ENT_COEF", "0.05")), # Entropy coefficient for exploration
|
||||
clip_range=float(os.environ.get("GYM_CPU_TRAIN_CLIP_RANGE", "0.2")), # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=float(os.environ.get("GYM_CPU_TRAIN_GAMMA", "0.95")), # Discount factor
|
||||
# target_kl=0.03,
|
||||
n_epochs=int(os.environ.get("GYM_CPU_TRAIN_EPOCHS", "5")),
|
||||
tensorboard_log=f"./scripts/gyms/logs/{folder_name}/tensorboard/"
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 20, eval_eps=100,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
test_no_render = os.environ.get("GYM_CPU_TEST_NO_RENDER", "0") == "1"
|
||||
test_no_realtime = os.environ.get("GYM_CPU_TEST_NO_REALTIME", "0") == "1"
|
||||
|
||||
server = Train_Server(
|
||||
self.server_p - 1,
|
||||
self.monitor_p,
|
||||
1,
|
||||
no_render=test_no_render,
|
||||
no_realtime=test_no_realtime,
|
||||
)
|
||||
env = WalkEnv(self.ip, self.server_p - 1)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from types import SimpleNamespace
|
||||
|
||||
# 创建默认参数
|
||||
script_args = SimpleNamespace(
|
||||
args=SimpleNamespace(
|
||||
i='127.0.0.1', # Server IP
|
||||
p=3100, # Server port
|
||||
m=3200, # Monitor port
|
||||
r=0, # Robot type
|
||||
t='Gym', # Team name
|
||||
u=1 # Uniform number
|
||||
)
|
||||
)
|
||||
|
||||
trainer = Train(script_args)
|
||||
|
||||
run_mode = os.environ.get("GYM_CPU_MODE", "train").strip().lower()
|
||||
|
||||
if run_mode == "test":
|
||||
test_model_file = os.environ.get("GYM_CPU_TEST_MODEL", "scripts/gyms/logs/Walk_R0_004/best_model.zip")
|
||||
test_folder = os.environ.get("GYM_CPU_TEST_FOLDER", "scripts/gyms/logs/Walk_R0_004/")
|
||||
trainer.test({"model_file": test_model_file, "folder_dir": test_folder})
|
||||
else:
|
||||
retrain_model = os.environ.get("GYM_CPU_TRAIN_MODEL", "").strip()
|
||||
if retrain_model:
|
||||
trainer.train({"model_file": retrain_model})
|
||||
else:
|
||||
trainer.train({})
|
||||
@@ -1,624 +0,0 @@
|
||||
import os
|
||||
import numpy as np
|
||||
import math
|
||||
import time
|
||||
from time import sleep
|
||||
from random import random
|
||||
from random import uniform
|
||||
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
|
||||
import gymnasium as gym
|
||||
from gymnasium import spaces
|
||||
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from scripts.commons.Server import Server as Train_Server
|
||||
|
||||
from agent.base_agent import Base_Agent
|
||||
from utils.math_ops import MathOps
|
||||
|
||||
from scipy.spatial.transform import Rotation as R
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class WalkEnv(gym.Env):
|
||||
def __init__(self, ip, server_p) -> None:
|
||||
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.Player = player = Base_Agent(
|
||||
team_name="Gym",
|
||||
number=1,
|
||||
host=ip,
|
||||
port=server_p
|
||||
)
|
||||
self.robot_type = self.Player.robot
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.force_play_on = True
|
||||
|
||||
self.target_position = np.array([0.0, 0.0]) # target position in the x-y plane
|
||||
self.initial_position = np.array([0.0, 0.0]) # initial position in the x-y plane
|
||||
self.target_direction = 0.0 # target direction in the x-y plane (relative to the robot's orientation)
|
||||
self.isfallen = False
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
self.auto_calibrate_train_sim_flip = True
|
||||
self.nominal_calibrated_once = False
|
||||
self.flip_calibrated_once = False
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
self._target_hz = float(target_hz_env)
|
||||
except ValueError:
|
||||
self._target_hz = 0.0
|
||||
if self._target_hz > 0.0:
|
||||
self._target_dt = 1.0 / self._target_hz
|
||||
|
||||
# State space
|
||||
# 原始观测大小: 78
|
||||
obs_size = 78
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(obs_size,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
action_dim = len(self.Player.robot.ROBOT_MOTORS)
|
||||
self.no_of_actions = action_dim
|
||||
self.action_space = spaces.Box(
|
||||
low=-1.0,
|
||||
high=1.0,
|
||||
shape=(action_dim,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
# 中立姿态
|
||||
self.joint_nominal_position = np.array(
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
-1.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
0.8,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
-0.8,
|
||||
0.4,
|
||||
0.0,
|
||||
]
|
||||
)
|
||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||
self.train_sim_flip = np.array(
|
||||
[
|
||||
1.0, # 0: Head_yaw (he1)
|
||||
-1.0, # 1: Head_pitch (he2)
|
||||
1.0, # 2: Left_Shoulder_Pitch (lae1)
|
||||
-1.0, # 3: Left_Shoulder_Roll (lae2)
|
||||
-1.0, # 4: Left_Elbow_Pitch (lae3)
|
||||
1.0, # 5: Left_Elbow_Yaw (lae4)
|
||||
-1.0, # 6: Right_Shoulder_Pitch (rae1)
|
||||
-1.0, # 7: Right_Shoulder_Roll (rae2)
|
||||
1.0, # 8: Right_Elbow_Pitch (rae3)
|
||||
1.0, # 9: Right_Elbow_Yaw (rae4)
|
||||
1.0, # 10: Waist (te1)
|
||||
1.0, # 11: Left_Hip_Pitch (lle1)
|
||||
-1.0, # 12: Left_Hip_Roll (lle2)
|
||||
-1.0, # 13: Left_Hip_Yaw (lle3)
|
||||
1.0, # 14: Left_Knee_Pitch (lle4)
|
||||
1.0, # 15: Left_Ankle_Pitch (lle5)
|
||||
-1.0, # 16: Left_Ankle_Roll (lle6)
|
||||
-1.0, # 17: Right_Hip_Pitch (rle1)
|
||||
-1.0, # 18: Right_Hip_Roll (rle2)
|
||||
-1.0, # 19: Right_Hip_Yaw (rle3)
|
||||
-1.0, # 20: Right_Knee_Pitch (rle4)
|
||||
-1.0, # 21: Right_Ankle_Pitch (rle5)
|
||||
-1.0, # 22: Right_Ankle_Roll (rle6)
|
||||
]
|
||||
)
|
||||
|
||||
self.scaling_factor = 0.5
|
||||
# self.scaling_factor = 1
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = False
|
||||
self.reset_beam_yaw_range_deg = 180 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
|
||||
self.reset_joint_noise_rad = 0.035
|
||||
self.reset_perturb_steps = 5
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
self.Player.server.connect()
|
||||
# sleep(2.0) # Longer wait for connection to establish completely
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
self.start_time = time.time()
|
||||
|
||||
def debug_log(self, message):
|
||||
print(message)
|
||||
try:
|
||||
log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
|
||||
with open(log_path, "a", encoding="utf-8") as f:
|
||||
f.write(message + "\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def observe(self, init=False):
|
||||
|
||||
"""获取当前观测值"""
|
||||
robot = self.Player.robot
|
||||
world = self.Player.world
|
||||
|
||||
# Safety check: ensure data is available
|
||||
|
||||
# 计算目标速度
|
||||
raw_target = self.target_position - world.global_position[:2]
|
||||
velocity = MathOps.rotate_2d_vec(
|
||||
raw_target,
|
||||
-robot.global_orientation_euler[2],
|
||||
is_rad=False
|
||||
)
|
||||
|
||||
# 计算相对方向
|
||||
rel_orientation = MathOps.vector_angle(velocity) * 0.3
|
||||
rel_orientation = np.clip(rel_orientation, -0.25, 0.25)
|
||||
|
||||
velocity = np.concatenate([velocity, np.array([rel_orientation])])
|
||||
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
|
||||
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
|
||||
|
||||
# 关节状态
|
||||
radian_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
radian_joint_speeds = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
|
||||
qpos_qvel_previous_action = np.concatenate([
|
||||
(radian_joint_positions * self.train_sim_flip - self.joint_nominal_position) / 4.6,
|
||||
radian_joint_speeds / 110.0 * self.train_sim_flip,
|
||||
self.previous_action / 10.0,
|
||||
])
|
||||
|
||||
# 角速度
|
||||
ang_vel = np.clip(np.deg2rad(robot.gyroscope) / 50.0, -1.0, 1.0)
|
||||
|
||||
# 投影的重力方向
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
|
||||
# 组合观测
|
||||
observation = np.concatenate([
|
||||
qpos_qvel_previous_action,
|
||||
ang_vel,
|
||||
velocity,
|
||||
projected_gravity,
|
||||
])
|
||||
|
||||
observation = np.clip(observation, -10.0, 10.0)
|
||||
return observation.astype(np.float32)
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.send()
|
||||
if self._target_dt > 0.0:
|
||||
now = time.time()
|
||||
if self._last_sync_time is None:
|
||||
self._last_sync_time = now
|
||||
return
|
||||
elapsed = now - self._last_sync_time
|
||||
remaining = self._target_dt - elapsed
|
||||
if remaining > 0.0:
|
||||
time.sleep(remaining)
|
||||
now = time.time()
|
||||
self._last_sync_time = now
|
||||
|
||||
def debug_joint_status(self):
|
||||
robot = self.Player.robot
|
||||
actual_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
target_joint_positions = getattr(
|
||||
self,
|
||||
'target_joint_positions',
|
||||
np.zeros(len(robot.ROBOT_MOTORS), dtype=np.float32)
|
||||
)
|
||||
joint_error = actual_joint_positions - target_joint_positions
|
||||
leg_slice = slice(11, None)
|
||||
|
||||
self.debug_log(
|
||||
"[WalkDebug] "
|
||||
f"step={self.step_counter} "
|
||||
f"pos={np.round(self.Player.world.global_position, 3).tolist()} "
|
||||
f"target_xy={np.round(self.target_position, 3).tolist()} "
|
||||
f"target_leg={np.round(target_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"actual_leg={np.round(actual_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
|
||||
f"fallen={self.Player.world.global_position[2] < 0.3}"
|
||||
)
|
||||
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
|
||||
|
||||
def reset(self, seed=None, options=None):
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
r = self.Player.robot
|
||||
super().reset(seed=seed)
|
||||
if seed is not None:
|
||||
np.random.seed(seed)
|
||||
|
||||
length1 = 2 # randomize target distance
|
||||
length2 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
length3 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
angle2 = np.random.uniform(-30, 30) # randomize initial orientation
|
||||
angle3 = np.random.uniform(-30, 30) # randomize target direction
|
||||
|
||||
self.step_counter = 0
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
|
||||
self.walk_cycle_step = 0
|
||||
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
beam_y = (random() - 0.5) * 10
|
||||
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
|
||||
|
||||
for _ in range(5):
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
|
||||
self.Player.server.send()
|
||||
|
||||
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
|
||||
finished_count = 0
|
||||
for _ in range(50):
|
||||
finished = self.Player.skills_manager.execute("Neutral")
|
||||
self.sync()
|
||||
if finished:
|
||||
finished_count += 1
|
||||
if finished_count >= 20: # 假设需要连续20次完成才算成功
|
||||
break
|
||||
|
||||
if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
|
||||
perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
|
||||
# Perturb waist + lower body only (10:), keep head/arms stable.
|
||||
perturb_action[10:] = np.random.uniform(
|
||||
-self.reset_joint_noise_rad,
|
||||
self.reset_joint_noise_rad,
|
||||
size=(self.no_of_actions - 10,)
|
||||
)
|
||||
|
||||
for _ in range(self.reset_perturb_steps):
|
||||
target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
for i in range(self.reset_recover_steps):
|
||||
# Linearly fade perturbation to help policy start from near-neutral.
|
||||
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
|
||||
target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
# Build target in the robot's current forward direction instead of fixed global +x.
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
forward_offset = MathOps.rotate_2d_vec(np.array([length1, 0.0]), heading_deg, is_rad=False)
|
||||
point1 = self.initial_position + forward_offset
|
||||
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
|
||||
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
|
||||
self.point_list = [point1]
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
height = float(self.Player.world.global_position[2])
|
||||
|
||||
orientation_quat_inv = R.from_quat(self.Player.robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
ang_vel = np.deg2rad(self.Player.robot.gyroscope)
|
||||
ang_vel_mag = float(np.linalg.norm(ang_vel))
|
||||
|
||||
# 摔倒检测(重要!)
|
||||
if height < 0.3:
|
||||
if time.time() - self.start_time > 1200:
|
||||
self.start_time = time.time()
|
||||
print("fall_penalty: -20")
|
||||
return -20.0
|
||||
|
||||
|
||||
|
||||
# # 目标方向
|
||||
# to_target = self.target_position - current_pos
|
||||
# dist_to_target = float(np.linalg.norm(to_target))
|
||||
# if dist_to_target < 0.5:
|
||||
# return 15.0
|
||||
|
||||
# forward_dir = to_target / dist_to_target if dist_to_target > 0.1 else np.array([1.0, 0.0])
|
||||
# delta_pos = current_pos - previous_pos
|
||||
# forward_step = float(np.dot(delta_pos, forward_dir))
|
||||
# lateral_step = float(np.linalg.norm(delta_pos - forward_dir * forward_step))
|
||||
|
||||
# 奖励项
|
||||
# progress_reward = 2 * forward_step
|
||||
# lateral_penalty = -0.1 * lateral_step
|
||||
alive_bonus = 0.1
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.05 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -1.0 * (tilt_mag)
|
||||
# ang_vel_penalty = -0.05 * ang_vel_mag
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height if abs(height - target_height) > 0.05 else 0.0
|
||||
height_penalty = -2.0 * abs(height_error) # 惩罚高度偏离,系数可调
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
# + ang_vel_penalty
|
||||
+ height_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
if time.time() - self.start_time >= 1200:
|
||||
self.start_time = time.time()
|
||||
print(
|
||||
# f"progress_reward:{progress_reward:.4f}",
|
||||
# f"lateral_penalty:{lateral_penalty:.4f}",
|
||||
# f"action_penalty:{action_penalty:.4f}"s,
|
||||
f"height_penalty:{height_penalty:.4f}",
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},",
|
||||
f"posture_penalty:{posture_penalty:.4f}",
|
||||
# f"ang_vel_penalty:{ang_vel_penalty:.4f}",
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
)
|
||||
|
||||
return total
|
||||
|
||||
|
||||
|
||||
def step(self, action):
|
||||
|
||||
r = self.Player.robot
|
||||
self.previous_action = action
|
||||
|
||||
self.target_joint_positions = (
|
||||
self.joint_nominal_position
|
||||
+ self.scaling_factor * action
|
||||
)
|
||||
self.target_joint_positions *= self.train_sim_flip
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
|
||||
self.previous_action = action
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
|
||||
# self.debug_joint_status()
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
# Compute reward based on movement from previous step
|
||||
reward = self.compute_reward(self.previous_pos, current_pos, action)
|
||||
|
||||
# Update previous position
|
||||
self.previous_pos = current_pos.copy()
|
||||
self.last_action_for_reward = action.copy()
|
||||
|
||||
# Fall detection and penalty
|
||||
is_fallen = self.Player.world.global_position[2] < 0.3
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
terminated = is_fallen or self.step_counter > 800 or self.route_completed
|
||||
truncated = False
|
||||
|
||||
return self.observe(), reward, terminated, truncated, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = 20 # Reduced from 8 to decrease CPU/network pressure during init
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
n_steps_per_env = 1024 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = 128 # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = 3e-4
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
print(f"Model path: {model_path}")
|
||||
print(f"Using {n_envs} parallel environments")
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env):
|
||||
def thunk():
|
||||
return WalkEnv(self.ip, self.server_p + i_env)
|
||||
|
||||
return thunk
|
||||
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1) # include 1 extra server for testing
|
||||
|
||||
# Wait for servers to start
|
||||
print(f"Starting {n_envs + 1} rcssservermj servers...")
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i) for i in range(n_envs)])
|
||||
eval_env = SubprocVecEnv([init_env(n_envs)])
|
||||
|
||||
try:
|
||||
# Custom policy network architecture
|
||||
policy_kwargs = dict(
|
||||
net_arch=dict(
|
||||
pi=[512, 256, 128], # Policy network: 3 layers
|
||||
vf=[512, 256, 128] # Value network: 3 layers
|
||||
),
|
||||
activation_fn=__import__('torch.nn', fromlist=['ELU']).ELU,
|
||||
)
|
||||
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cpu", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO(
|
||||
"MlpPolicy",
|
||||
env=env,
|
||||
verbose=1,
|
||||
n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size,
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
ent_coef=0.005, # Entropy coefficient for exploration
|
||||
# clip_range=0.13, # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=0.99 , # Discount factor
|
||||
# target_kl=0.03,
|
||||
# n_epochs=5
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 10, save_freq=n_steps_per_env * 10,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
server = Train_Server(self.server_p - 1, self.monitor_p, 1)
|
||||
env = WalkEnv(self.ip, self.server_p - 1)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from types import SimpleNamespace
|
||||
|
||||
# 创建默认参数
|
||||
script_args = SimpleNamespace(
|
||||
args=SimpleNamespace(
|
||||
i='127.0.0.1', # Server IP
|
||||
p=3100, # Server port
|
||||
m=3200, # Monitor port
|
||||
r=0, # Robot type
|
||||
t='Gym', # Team name
|
||||
u=1 # Uniform number
|
||||
)
|
||||
)
|
||||
|
||||
trainer = Train(script_args)
|
||||
trainer.train({})
|
||||
# trainer.test({"model_file": "scripts/gyms/logs/Walk_R0_012/best_model.zip",
|
||||
# "folder_dir": "scripts/gyms/logs/Walk_R0_012/",})
|
||||
@@ -1,625 +0,0 @@
|
||||
import os
|
||||
import numpy as np
|
||||
import math
|
||||
import time
|
||||
from time import sleep
|
||||
from random import random
|
||||
from random import uniform
|
||||
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
|
||||
import gymnasium as gym
|
||||
from gymnasium import spaces
|
||||
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from scripts.commons.Server import Server as Train_Server
|
||||
|
||||
from agent.base_agent import Base_Agent
|
||||
from utils.math_ops import MathOps
|
||||
|
||||
from scipy.spatial.transform import Rotation as R
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class WalkEnv(gym.Env):
|
||||
def __init__(self, ip, server_p) -> None:
|
||||
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.Player = player = Base_Agent(
|
||||
team_name="Gym",
|
||||
number=1,
|
||||
host=ip,
|
||||
port=server_p
|
||||
)
|
||||
self.robot_type = self.Player.robot
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.force_play_on = True
|
||||
|
||||
self.target_position = np.array([0.0, 0.0]) # target position in the x-y plane
|
||||
self.initial_position = np.array([0.0, 0.0]) # initial position in the x-y plane
|
||||
self.target_direction = 0.0 # target direction in the x-y plane (relative to the robot's orientation)
|
||||
self.isfallen = False
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.enable_debug_joint_status = False
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
self.auto_calibrate_train_sim_flip = True
|
||||
self.nominal_calibrated_once = False
|
||||
self.flip_calibrated_once = False
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
self._target_hz = float(target_hz_env)
|
||||
except ValueError:
|
||||
self._target_hz = 0.0
|
||||
if self._target_hz > 0.0:
|
||||
self._target_dt = 1.0 / self._target_hz
|
||||
|
||||
# State space
|
||||
# 原始观测大小: 78
|
||||
obs_size = 78
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(obs_size,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
action_dim = len(self.Player.robot.ROBOT_MOTORS)
|
||||
self.no_of_actions = action_dim
|
||||
self.action_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(action_dim,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
# 中立姿态
|
||||
self.joint_nominal_position = np.array(
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
-1.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
0.8,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
-0.8,
|
||||
0.4,
|
||||
0.0,
|
||||
]
|
||||
)
|
||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||
self.train_sim_flip = np.array(
|
||||
[
|
||||
1.0, # 0: Head_yaw (he1)
|
||||
-1.0, # 1: Head_pitch (he2)
|
||||
1.0, # 2: Left_Shoulder_Pitch (lae1)
|
||||
-1.0, # 3: Left_Shoulder_Roll (lae2)
|
||||
-1.0, # 4: Left_Elbow_Pitch (lae3)
|
||||
1.0, # 5: Left_Elbow_Yaw (lae4)
|
||||
-1.0, # 6: Right_Shoulder_Pitch (rae1)
|
||||
-1.0, # 7: Right_Shoulder_Roll (rae2)
|
||||
1.0, # 8: Right_Elbow_Pitch (rae3)
|
||||
1.0, # 9: Right_Elbow_Yaw (rae4)
|
||||
1.0, # 10: Waist (te1)
|
||||
1.0, # 11: Left_Hip_Pitch (lle1)
|
||||
-1.0, # 12: Left_Hip_Roll (lle2)
|
||||
-1.0, # 13: Left_Hip_Yaw (lle3)
|
||||
1.0, # 14: Left_Knee_Pitch (lle4)
|
||||
1.0, # 15: Left_Ankle_Pitch (lle5)
|
||||
-1.0, # 16: Left_Ankle_Roll (lle6)
|
||||
-1.0, # 17: Right_Hip_Pitch (rle1)
|
||||
-1.0, # 18: Right_Hip_Roll (rle2)
|
||||
-1.0, # 19: Right_Hip_Yaw (rle3)
|
||||
-1.0, # 20: Right_Knee_Pitch (rle4)
|
||||
-1.0, # 21: Right_Ankle_Pitch (rle5)
|
||||
-1.0, # 22: Right_Ankle_Roll (rle6)
|
||||
]
|
||||
)
|
||||
|
||||
self.scaling_factor = 0.1
|
||||
# self.scaling_factor = 1
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = False
|
||||
self.reset_beam_yaw_range_deg = 180 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
|
||||
self.reset_joint_noise_rad = 0.035
|
||||
self.reset_perturb_steps = 5
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
self.Player.server.connect()
|
||||
# sleep(2.0) # Longer wait for connection to establish completely
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
self.start_time = time.time()
|
||||
|
||||
def debug_log(self, message):
|
||||
print(message)
|
||||
try:
|
||||
log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
|
||||
with open(log_path, "a", encoding="utf-8") as f:
|
||||
f.write(message + "\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def observe(self, init=False):
|
||||
|
||||
"""获取当前观测值"""
|
||||
robot = self.Player.robot
|
||||
world = self.Player.world
|
||||
|
||||
# Safety check: ensure data is available
|
||||
|
||||
# 计算目标速度
|
||||
raw_target = self.target_position - world.global_position[:2]
|
||||
velocity = MathOps.rotate_2d_vec(
|
||||
raw_target,
|
||||
-robot.global_orientation_euler[2],
|
||||
is_rad=False
|
||||
)
|
||||
|
||||
# 计算相对方向
|
||||
rel_orientation = MathOps.vector_angle(velocity) * 0.3
|
||||
rel_orientation = np.clip(rel_orientation, -0.25, 0.25)
|
||||
|
||||
velocity = np.concatenate([velocity, np.array([rel_orientation])])
|
||||
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
|
||||
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
|
||||
|
||||
# 关节状态
|
||||
radian_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
radian_joint_speeds = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
|
||||
qpos_qvel_previous_action = np.concatenate([
|
||||
(radian_joint_positions * self.train_sim_flip - self.joint_nominal_position) / 4.6,
|
||||
radian_joint_speeds / 110.0 * self.train_sim_flip,
|
||||
self.previous_action / 10.0,
|
||||
])
|
||||
|
||||
# 角速度
|
||||
ang_vel = np.clip(np.deg2rad(robot.gyroscope) / 50.0, -1.0, 1.0)
|
||||
|
||||
# 投影的重力方向
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
|
||||
# 组合观测
|
||||
observation = np.concatenate([
|
||||
qpos_qvel_previous_action,
|
||||
ang_vel,
|
||||
velocity,
|
||||
projected_gravity,
|
||||
])
|
||||
|
||||
observation = np.clip(observation, -10.0, 10.0)
|
||||
return observation.astype(np.float32)
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.send()
|
||||
if self._target_dt > 0.0:
|
||||
now = time.time()
|
||||
if self._last_sync_time is None:
|
||||
self._last_sync_time = now
|
||||
return
|
||||
elapsed = now - self._last_sync_time
|
||||
remaining = self._target_dt - elapsed
|
||||
if remaining > 0.0:
|
||||
time.sleep(remaining)
|
||||
now = time.time()
|
||||
self._last_sync_time = now
|
||||
|
||||
def debug_joint_status(self):
|
||||
robot = self.Player.robot
|
||||
actual_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
target_joint_positions = getattr(
|
||||
self,
|
||||
'target_joint_positions',
|
||||
np.zeros(len(robot.ROBOT_MOTORS), dtype=np.float32)
|
||||
)
|
||||
joint_error = actual_joint_positions - target_joint_positions
|
||||
leg_slice = slice(11, None)
|
||||
|
||||
self.debug_log(
|
||||
"[WalkDebug] "
|
||||
f"step={self.step_counter} "
|
||||
f"pos={np.round(self.Player.world.global_position, 3).tolist()} "
|
||||
f"target_xy={np.round(self.target_position, 3).tolist()} "
|
||||
f"target_leg={np.round(target_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"actual_leg={np.round(actual_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
|
||||
f"fallen={self.Player.world.global_position[2] < 0.3}"
|
||||
)
|
||||
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
|
||||
|
||||
def reset(self, seed=None, options=None):
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
r = self.Player.robot
|
||||
super().reset(seed=seed)
|
||||
if seed is not None:
|
||||
np.random.seed(seed)
|
||||
|
||||
length1 = 2 # randomize target distance
|
||||
length2 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
length3 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
angle2 = np.random.uniform(-30, 30) # randomize initial orientation
|
||||
angle3 = np.random.uniform(-30, 30) # randomize target direction
|
||||
|
||||
self.step_counter = 0
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
|
||||
self.walk_cycle_step = 0
|
||||
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
beam_y = (random() - 0.5) * 10
|
||||
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
|
||||
|
||||
for _ in range(5):
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
|
||||
self.Player.server.send()
|
||||
|
||||
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
|
||||
finished_count = 0
|
||||
for _ in range(50):
|
||||
finished = self.Player.skills_manager.execute("Neutral")
|
||||
self.sync()
|
||||
if finished:
|
||||
finished_count += 1
|
||||
if finished_count >= 20: # 假设需要连续20次完成才算成功
|
||||
break
|
||||
|
||||
if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
|
||||
perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
|
||||
# Perturb waist + lower body only (10:), keep head/arms stable.
|
||||
perturb_action[10:] = np.random.uniform(
|
||||
-self.reset_joint_noise_rad,
|
||||
self.reset_joint_noise_rad,
|
||||
size=(self.no_of_actions - 10,)
|
||||
)
|
||||
|
||||
for _ in range(self.reset_perturb_steps):
|
||||
target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
for i in range(self.reset_recover_steps):
|
||||
# Linearly fade perturbation to help policy start from near-neutral.
|
||||
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
|
||||
target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.sync()
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
# Build target in the robot's current forward direction instead of fixed global +x.
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
forward_offset = MathOps.rotate_2d_vec(np.array([length1, 0.0]), heading_deg, is_rad=False)
|
||||
point1 = self.initial_position + forward_offset
|
||||
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
|
||||
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
|
||||
self.point_list = [point1]
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
height = float(self.Player.world.global_position[2])
|
||||
|
||||
orientation_quat_inv = R.from_quat(self.Player.robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
ang_vel = np.deg2rad(self.Player.robot.gyroscope)
|
||||
ang_vel_mag = float(np.linalg.norm(ang_vel))
|
||||
|
||||
is_fallen = height < 0.3
|
||||
if is_fallen:
|
||||
remain = max(0, 800 - self.step_counter)
|
||||
return -8.0 - 0.01 * remain
|
||||
|
||||
|
||||
|
||||
# # 目标方向
|
||||
# to_target = self.target_position - current_pos
|
||||
# dist_to_target = float(np.linalg.norm(to_target))
|
||||
# if dist_to_target < 0.5:
|
||||
# return 15.0
|
||||
|
||||
# forward_dir = to_target / dist_to_target if dist_to_target > 0.1 else np.array([1.0, 0.0])
|
||||
# delta_pos = current_pos - previous_pos
|
||||
# forward_step = float(np.dot(delta_pos, forward_dir))
|
||||
# lateral_step = float(np.linalg.norm(delta_pos - forward_dir * forward_step))
|
||||
|
||||
# 奖励项
|
||||
# progress_reward = 2 * forward_step
|
||||
# lateral_penalty = -0.1 * lateral_step
|
||||
alive_bonus = 0.3
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.03 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -1.0 * (tilt_mag)
|
||||
# ang_vel_penalty = -0.05 * ang_vel_mag
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height if abs(height - target_height) > 0.05 else 0.0
|
||||
height_penalty = -2.0 * abs(height_error) # 惩罚高度偏离,系数可调
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
# + ang_vel_penalty
|
||||
+ height_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
if time.time() - self.start_time >= 1200:
|
||||
self.start_time = time.time()
|
||||
print(
|
||||
# f"progress_reward:{progress_reward:.4f}",
|
||||
# f"lateral_penalty:{lateral_penalty:.4f}",
|
||||
# f"action_penalty:{action_penalty:.4f}"s,
|
||||
f"height_penalty:{height_penalty:.4f}",
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},",
|
||||
f"posture_penalty:{posture_penalty:.4f}",
|
||||
# f"ang_vel_penalty:{ang_vel_penalty:.4f}",
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
)
|
||||
|
||||
return total
|
||||
|
||||
|
||||
|
||||
def step(self, action):
|
||||
|
||||
r = self.Player.robot
|
||||
self.previous_action = action
|
||||
|
||||
self.target_joint_positions = (
|
||||
# self.joint_nominal_position +
|
||||
self.scaling_factor * action
|
||||
)
|
||||
self.target_joint_positions *= self.train_sim_flip
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
|
||||
self.previous_action = action
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
|
||||
if self.enable_debug_joint_status and self.step_counter % self.debug_every_n_steps == 0:
|
||||
self.debug_joint_status()
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
# Compute reward based on movement from previous step
|
||||
reward = self.compute_reward(self.previous_pos, current_pos, action)
|
||||
|
||||
# Update previous position
|
||||
self.previous_pos = current_pos.copy()
|
||||
self.last_action_for_reward = action.copy()
|
||||
|
||||
# Fall detection and penalty
|
||||
is_fallen = self.Player.world.global_position[2] < 0.3
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
terminated = is_fallen or self.step_counter > 800 or self.route_completed
|
||||
truncated = False
|
||||
|
||||
return self.observe(), reward, terminated, truncated, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = 20 # Reduced from 8 to decrease CPU/network pressure during init
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
n_steps_per_env = 1024 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = 128 # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = 3e-4
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
print(f"Model path: {model_path}")
|
||||
print(f"Using {n_envs} parallel environments")
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env):
|
||||
def thunk():
|
||||
return WalkEnv(self.ip, self.server_p + i_env)
|
||||
|
||||
return thunk
|
||||
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1) # include 1 extra server for testing
|
||||
|
||||
# Wait for servers to start
|
||||
print(f"Starting {n_envs + 1} rcssservermj servers...")
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i) for i in range(n_envs)])
|
||||
eval_env = SubprocVecEnv([init_env(n_envs)])
|
||||
|
||||
try:
|
||||
# Custom policy network architecture
|
||||
policy_kwargs = dict(
|
||||
net_arch=dict(
|
||||
pi=[512, 256, 128], # Policy network: 3 layers
|
||||
vf=[512, 256, 128] # Value network: 3 layers
|
||||
),
|
||||
activation_fn=__import__('torch.nn', fromlist=['ELU']).ELU,
|
||||
)
|
||||
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cpu", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO(
|
||||
"MlpPolicy",
|
||||
env=env,
|
||||
verbose=1,
|
||||
n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size,
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
ent_coef=0.005, # Entropy coefficient for exploration
|
||||
# clip_range=0.13, # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=0.99 , # Discount factor
|
||||
# target_kl=0.03,
|
||||
# n_epochs=5
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 10, save_freq=n_steps_per_env * 10,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
server = Train_Server(self.server_p - 1, self.monitor_p, 1)
|
||||
env = WalkEnv(self.ip, self.server_p - 1)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from types import SimpleNamespace
|
||||
|
||||
# 创建默认参数
|
||||
script_args = SimpleNamespace(
|
||||
args=SimpleNamespace(
|
||||
i='127.0.0.1', # Server IP
|
||||
p=3100, # Server port
|
||||
m=3200, # Monitor port
|
||||
r=0, # Robot type
|
||||
t='Gym', # Team name
|
||||
u=1 # Uniform number
|
||||
)
|
||||
)
|
||||
|
||||
trainer = Train(script_args)
|
||||
trainer.train({})
|
||||
# trainer.test({"model_file": "scripts/gyms/logs/Walk_R0_000/best_model.zip",
|
||||
# "folder_dir": "scripts/gyms/logs/Walk_R0_000/",})
|
||||
@@ -1,625 +0,0 @@
|
||||
import os
|
||||
import numpy as np
|
||||
import math
|
||||
import time
|
||||
from time import sleep
|
||||
from random import random
|
||||
from random import uniform
|
||||
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
|
||||
import gymnasium as gym
|
||||
from gymnasium import spaces
|
||||
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from scripts.commons.Server import Server as Train_Server
|
||||
|
||||
from agent.base_agent import Base_Agent
|
||||
from utils.math_ops import MathOps
|
||||
|
||||
from scipy.spatial.transform import Rotation as R
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class WalkEnv(gym.Env):
|
||||
def __init__(self, ip, server_p) -> None:
|
||||
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.Player = player = Base_Agent(
|
||||
team_name="Gym",
|
||||
number=1,
|
||||
host=ip,
|
||||
port=server_p
|
||||
)
|
||||
self.robot_type = self.Player.robot
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.force_play_on = True
|
||||
|
||||
self.target_position = np.array([0.0, 0.0]) # target position in the x-y plane
|
||||
self.initial_position = np.array([0.0, 0.0]) # initial position in the x-y plane
|
||||
self.target_direction = 0.0 # target direction in the x-y plane (relative to the robot's orientation)
|
||||
self.isfallen = False
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.enable_debug_joint_status = False
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
self.auto_calibrate_train_sim_flip = True
|
||||
self.nominal_calibrated_once = False
|
||||
self.flip_calibrated_once = False
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
self._target_hz = float(target_hz_env)
|
||||
except ValueError:
|
||||
self._target_hz = 0.0
|
||||
if self._target_hz > 0.0:
|
||||
self._target_dt = 1.0 / self._target_hz
|
||||
|
||||
# State space
|
||||
# 原始观测大小: 78
|
||||
obs_size = 78
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(obs_size,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
action_dim = len(self.Player.robot.ROBOT_MOTORS)
|
||||
self.no_of_actions = action_dim
|
||||
self.action_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(action_dim,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
# 中立姿态
|
||||
self.joint_nominal_position = np.array(
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
-1.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
0.8,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
-0.8,
|
||||
0.4,
|
||||
0.0,
|
||||
]
|
||||
)
|
||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||
self.train_sim_flip = np.array(
|
||||
[
|
||||
1.0, # 0: Head_yaw (he1)
|
||||
-1.0, # 1: Head_pitch (he2)
|
||||
1.0, # 2: Left_Shoulder_Pitch (lae1)
|
||||
-1.0, # 3: Left_Shoulder_Roll (lae2)
|
||||
-1.0, # 4: Left_Elbow_Pitch (lae3)
|
||||
1.0, # 5: Left_Elbow_Yaw (lae4)
|
||||
-1.0, # 6: Right_Shoulder_Pitch (rae1)
|
||||
-1.0, # 7: Right_Shoulder_Roll (rae2)
|
||||
1.0, # 8: Right_Elbow_Pitch (rae3)
|
||||
1.0, # 9: Right_Elbow_Yaw (rae4)
|
||||
1.0, # 10: Waist (te1)
|
||||
1.0, # 11: Left_Hip_Pitch (lle1)
|
||||
-1.0, # 12: Left_Hip_Roll (lle2)
|
||||
-1.0, # 13: Left_Hip_Yaw (lle3)
|
||||
1.0, # 14: Left_Knee_Pitch (lle4)
|
||||
1.0, # 15: Left_Ankle_Pitch (lle5)
|
||||
-1.0, # 16: Left_Ankle_Roll (lle6)
|
||||
-1.0, # 17: Right_Hip_Pitch (rle1)
|
||||
-1.0, # 18: Right_Hip_Roll (rle2)
|
||||
-1.0, # 19: Right_Hip_Yaw (rle3)
|
||||
-1.0, # 20: Right_Knee_Pitch (rle4)
|
||||
-1.0, # 21: Right_Ankle_Pitch (rle5)
|
||||
-1.0, # 22: Right_Ankle_Roll (rle6)
|
||||
]
|
||||
)
|
||||
|
||||
self.scaling_factor = 0.1
|
||||
# self.scaling_factor = 1
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = False
|
||||
self.reset_beam_yaw_range_deg = 180 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
|
||||
self.reset_joint_noise_rad = 0.035
|
||||
self.reset_perturb_steps = 5
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
self.Player.server.connect()
|
||||
# sleep(2.0) # Longer wait for connection to establish completely
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
self.start_time = time.time()
|
||||
|
||||
def debug_log(self, message):
|
||||
print(message)
|
||||
try:
|
||||
log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
|
||||
with open(log_path, "a", encoding="utf-8") as f:
|
||||
f.write(message + "\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def observe(self, init=False):
|
||||
|
||||
"""获取当前观测值"""
|
||||
robot = self.Player.robot
|
||||
world = self.Player.world
|
||||
|
||||
# Safety check: ensure data is available
|
||||
|
||||
# 计算目标速度
|
||||
raw_target = self.target_position - world.global_position[:2]
|
||||
velocity = MathOps.rotate_2d_vec(
|
||||
raw_target,
|
||||
-robot.global_orientation_euler[2],
|
||||
is_rad=False
|
||||
)
|
||||
|
||||
# 计算相对方向
|
||||
rel_orientation = MathOps.vector_angle(velocity) * 0.3
|
||||
rel_orientation = np.clip(rel_orientation, -0.25, 0.25)
|
||||
|
||||
velocity = np.concatenate([velocity, np.array([rel_orientation])])
|
||||
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
|
||||
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
|
||||
|
||||
# 关节状态
|
||||
radian_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
radian_joint_speeds = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
|
||||
qpos_qvel_previous_action = np.concatenate([
|
||||
(radian_joint_positions * self.train_sim_flip - self.joint_nominal_position) / 4.6,
|
||||
radian_joint_speeds / 110.0 * self.train_sim_flip,
|
||||
self.previous_action / 10.0,
|
||||
])
|
||||
|
||||
# 角速度
|
||||
ang_vel = np.clip(np.deg2rad(robot.gyroscope) / 50.0, -1.0, 1.0)
|
||||
|
||||
# 投影的重力方向
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
|
||||
# 组合观测
|
||||
observation = np.concatenate([
|
||||
qpos_qvel_previous_action,
|
||||
ang_vel,
|
||||
velocity,
|
||||
projected_gravity,
|
||||
])
|
||||
|
||||
observation = np.clip(observation, -10.0, 10.0)
|
||||
return observation.astype(np.float32)
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.send()
|
||||
if self._target_dt > 0.0:
|
||||
now = time.time()
|
||||
if self._last_sync_time is None:
|
||||
self._last_sync_time = now
|
||||
return
|
||||
elapsed = now - self._last_sync_time
|
||||
remaining = self._target_dt - elapsed
|
||||
if remaining > 0.0:
|
||||
time.sleep(remaining)
|
||||
now = time.time()
|
||||
self._last_sync_time = now
|
||||
|
||||
def debug_joint_status(self):
|
||||
robot = self.Player.robot
|
||||
actual_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
target_joint_positions = getattr(
|
||||
self,
|
||||
'target_joint_positions',
|
||||
np.zeros(len(robot.ROBOT_MOTORS), dtype=np.float32)
|
||||
)
|
||||
joint_error = actual_joint_positions - target_joint_positions
|
||||
leg_slice = slice(11, None)
|
||||
|
||||
self.debug_log(
|
||||
"[WalkDebug] "
|
||||
f"step={self.step_counter} "
|
||||
f"pos={np.round(self.Player.world.global_position, 3).tolist()} "
|
||||
f"target_xy={np.round(self.target_position, 3).tolist()} "
|
||||
f"target_leg={np.round(target_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"actual_leg={np.round(actual_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
|
||||
f"fallen={self.Player.world.global_position[2] < 0.3}"
|
||||
)
|
||||
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
|
||||
|
||||
def reset(self, seed=None, options=None):
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
r = self.Player.robot
|
||||
super().reset(seed=seed)
|
||||
if seed is not None:
|
||||
np.random.seed(seed)
|
||||
|
||||
length1 = 2 # randomize target distance
|
||||
length2 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
length3 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
angle2 = np.random.uniform(-30, 30) # randomize initial orientation
|
||||
angle3 = np.random.uniform(-30, 30) # randomize target direction
|
||||
|
||||
self.step_counter = 0
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
|
||||
self.walk_cycle_step = 0
|
||||
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
beam_y = (random() - 0.5) * 10
|
||||
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
|
||||
|
||||
for _ in range(5):
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
|
||||
self.Player.server.send()
|
||||
|
||||
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
|
||||
finished_count = 0
|
||||
for _ in range(50):
|
||||
finished = self.Player.skills_manager.execute("Neutral")
|
||||
self.sync()
|
||||
if finished:
|
||||
finished_count += 1
|
||||
if finished_count >= 20: # 假设需要连续20次完成才算成功
|
||||
break
|
||||
|
||||
if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
|
||||
perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
|
||||
# Perturb waist + lower body only (10:), keep head/arms stable.
|
||||
perturb_action[10:] = np.random.uniform(
|
||||
-self.reset_joint_noise_rad,
|
||||
self.reset_joint_noise_rad,
|
||||
size=(self.no_of_actions - 10,)
|
||||
)
|
||||
|
||||
for _ in range(self.reset_perturb_steps):
|
||||
target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
for i in range(self.reset_recover_steps):
|
||||
# Linearly fade perturbation to help policy start from near-neutral.
|
||||
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
|
||||
target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.sync()
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
# Build target in the robot's current forward direction instead of fixed global +x.
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
forward_offset = MathOps.rotate_2d_vec(np.array([length1, 0.0]), heading_deg, is_rad=False)
|
||||
point1 = self.initial_position + forward_offset
|
||||
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
|
||||
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
|
||||
self.point_list = [point1]
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
height = float(self.Player.world.global_position[2])
|
||||
|
||||
orientation_quat_inv = R.from_quat(self.Player.robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
ang_vel = np.deg2rad(self.Player.robot.gyroscope)
|
||||
ang_vel_mag = float(np.linalg.norm(ang_vel))
|
||||
|
||||
is_fallen = height < 0.3
|
||||
if is_fallen:
|
||||
remain = max(0, 800 - self.step_counter)
|
||||
return -8.0 - 0.01 * remain
|
||||
|
||||
|
||||
|
||||
# # 目标方向
|
||||
# to_target = self.target_position - current_pos
|
||||
# dist_to_target = float(np.linalg.norm(to_target))
|
||||
# if dist_to_target < 0.5:
|
||||
# return 15.0
|
||||
|
||||
# forward_dir = to_target / dist_to_target if dist_to_target > 0.1 else np.array([1.0, 0.0])
|
||||
# delta_pos = current_pos - previous_pos
|
||||
# forward_step = float(np.dot(delta_pos, forward_dir))
|
||||
# lateral_step = float(np.linalg.norm(delta_pos - forward_dir * forward_step))
|
||||
|
||||
# 奖励项
|
||||
# progress_reward = 2 * forward_step
|
||||
# lateral_penalty = -0.1 * lateral_step
|
||||
alive_bonus = 0.3
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.03 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -1.0 * (tilt_mag)
|
||||
# ang_vel_penalty = -0.05 * ang_vel_mag
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height if abs(height - target_height) > 0.05 else 0.0
|
||||
height_penalty = -2.0 * abs(height_error) # 惩罚高度偏离,系数可调
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
# + ang_vel_penalty
|
||||
+ height_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
if time.time() - self.start_time >= 1200:
|
||||
self.start_time = time.time()
|
||||
print(
|
||||
# f"progress_reward:{progress_reward:.4f}",
|
||||
# f"lateral_penalty:{lateral_penalty:.4f}",
|
||||
# f"action_penalty:{action_penalty:.4f}"s,
|
||||
f"height_penalty:{height_penalty:.4f}",
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},",
|
||||
f"posture_penalty:{posture_penalty:.4f}",
|
||||
# f"ang_vel_penalty:{ang_vel_penalty:.4f}",
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
)
|
||||
|
||||
return total
|
||||
|
||||
|
||||
|
||||
def step(self, action):
|
||||
|
||||
r = self.Player.robot
|
||||
self.previous_action = action
|
||||
|
||||
self.target_joint_positions = (
|
||||
# self.joint_nominal_position +
|
||||
self.scaling_factor * action
|
||||
)
|
||||
self.target_joint_positions *= self.train_sim_flip
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
|
||||
self.previous_action = action
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
|
||||
if self.enable_debug_joint_status and self.step_counter % self.debug_every_n_steps == 0:
|
||||
self.debug_joint_status()
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
# Compute reward based on movement from previous step
|
||||
reward = self.compute_reward(self.previous_pos, current_pos, action)
|
||||
|
||||
# Update previous position
|
||||
self.previous_pos = current_pos.copy()
|
||||
self.last_action_for_reward = action.copy()
|
||||
|
||||
# Fall detection and penalty
|
||||
is_fallen = self.Player.world.global_position[2] < 0.3
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
terminated = is_fallen or self.step_counter > 800 or self.route_completed
|
||||
truncated = False
|
||||
|
||||
return self.observe(), reward, terminated, truncated, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = 20 # Reduced from 8 to decrease CPU/network pressure during init
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
n_steps_per_env = 1024 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = 128 # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = 3e-4
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
print(f"Model path: {model_path}")
|
||||
print(f"Using {n_envs} parallel environments")
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env):
|
||||
def thunk():
|
||||
return WalkEnv(self.ip, self.server_p + i_env)
|
||||
|
||||
return thunk
|
||||
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1) # include 1 extra server for testing
|
||||
|
||||
# Wait for servers to start
|
||||
print(f"Starting {n_envs + 1} rcssservermj servers...")
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i) for i in range(n_envs)])
|
||||
eval_env = SubprocVecEnv([init_env(n_envs)])
|
||||
|
||||
try:
|
||||
# Custom policy network architecture
|
||||
policy_kwargs = dict(
|
||||
net_arch=dict(
|
||||
pi=[512, 256, 128], # Policy network: 3 layers
|
||||
vf=[512, 256, 128] # Value network: 3 layers
|
||||
),
|
||||
activation_fn=__import__('torch.nn', fromlist=['ELU']).ELU,
|
||||
)
|
||||
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cpu", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO(
|
||||
"MlpPolicy",
|
||||
env=env,
|
||||
verbose=1,
|
||||
n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size,
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
ent_coef=0.005, # Entropy coefficient for exploration
|
||||
# clip_range=0.13, # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=0.99 , # Discount factor
|
||||
# target_kl=0.03,
|
||||
# n_epochs=5
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 10, save_freq=n_steps_per_env * 10,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
server = Train_Server(self.server_p - 1, self.monitor_p, 1)
|
||||
env = WalkEnv(self.ip, self.server_p - 1)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from types import SimpleNamespace
|
||||
|
||||
# 创建默认参数
|
||||
script_args = SimpleNamespace(
|
||||
args=SimpleNamespace(
|
||||
i='127.0.0.1', # Server IP
|
||||
p=3100, # Server port
|
||||
m=3200, # Monitor port
|
||||
r=0, # Robot type
|
||||
t='Gym', # Team name
|
||||
u=1 # Uniform number
|
||||
)
|
||||
)
|
||||
|
||||
trainer = Train(script_args)
|
||||
trainer.train({})
|
||||
# trainer.test({"model_file": "scripts/gyms/logs/Walk_R0_000/best_model.zip",
|
||||
# "folder_dir": "scripts/gyms/logs/Walk_R0_000/",})
|
||||
@@ -1,625 +0,0 @@
|
||||
import os
|
||||
import numpy as np
|
||||
import math
|
||||
import time
|
||||
from time import sleep
|
||||
from random import random
|
||||
from random import uniform
|
||||
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
|
||||
import gymnasium as gym
|
||||
from gymnasium import spaces
|
||||
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from scripts.commons.Server import Server as Train_Server
|
||||
|
||||
from agent.base_agent import Base_Agent
|
||||
from utils.math_ops import MathOps
|
||||
|
||||
from scipy.spatial.transform import Rotation as R
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class WalkEnv(gym.Env):
|
||||
def __init__(self, ip, server_p) -> None:
|
||||
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.Player = player = Base_Agent(
|
||||
team_name="Gym",
|
||||
number=1,
|
||||
host=ip,
|
||||
port=server_p
|
||||
)
|
||||
self.robot_type = self.Player.robot
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.force_play_on = True
|
||||
|
||||
self.target_position = np.array([0.0, 0.0]) # target position in the x-y plane
|
||||
self.initial_position = np.array([0.0, 0.0]) # initial position in the x-y plane
|
||||
self.target_direction = 0.0 # target direction in the x-y plane (relative to the robot's orientation)
|
||||
self.isfallen = False
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.enable_debug_joint_status = False
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
self.auto_calibrate_train_sim_flip = True
|
||||
self.nominal_calibrated_once = False
|
||||
self.flip_calibrated_once = False
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
self._target_hz = float(target_hz_env)
|
||||
except ValueError:
|
||||
self._target_hz = 0.0
|
||||
if self._target_hz > 0.0:
|
||||
self._target_dt = 1.0 / self._target_hz
|
||||
|
||||
# State space
|
||||
# 原始观测大小: 78
|
||||
obs_size = 78
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(obs_size,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
action_dim = len(self.Player.robot.ROBOT_MOTORS)
|
||||
self.no_of_actions = action_dim
|
||||
self.action_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(action_dim,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
# 中立姿态
|
||||
self.joint_nominal_position = np.array(
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
-1.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
0.8,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
-0.8,
|
||||
0.4,
|
||||
0.0,
|
||||
]
|
||||
)
|
||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||
self.train_sim_flip = np.array(
|
||||
[
|
||||
1.0, # 0: Head_yaw (he1)
|
||||
-1.0, # 1: Head_pitch (he2)
|
||||
1.0, # 2: Left_Shoulder_Pitch (lae1)
|
||||
-1.0, # 3: Left_Shoulder_Roll (lae2)
|
||||
-1.0, # 4: Left_Elbow_Pitch (lae3)
|
||||
1.0, # 5: Left_Elbow_Yaw (lae4)
|
||||
-1.0, # 6: Right_Shoulder_Pitch (rae1)
|
||||
-1.0, # 7: Right_Shoulder_Roll (rae2)
|
||||
1.0, # 8: Right_Elbow_Pitch (rae3)
|
||||
1.0, # 9: Right_Elbow_Yaw (rae4)
|
||||
1.0, # 10: Waist (te1)
|
||||
1.0, # 11: Left_Hip_Pitch (lle1)
|
||||
-1.0, # 12: Left_Hip_Roll (lle2)
|
||||
-1.0, # 13: Left_Hip_Yaw (lle3)
|
||||
1.0, # 14: Left_Knee_Pitch (lle4)
|
||||
1.0, # 15: Left_Ankle_Pitch (lle5)
|
||||
-1.0, # 16: Left_Ankle_Roll (lle6)
|
||||
-1.0, # 17: Right_Hip_Pitch (rle1)
|
||||
-1.0, # 18: Right_Hip_Roll (rle2)
|
||||
-1.0, # 19: Right_Hip_Yaw (rle3)
|
||||
-1.0, # 20: Right_Knee_Pitch (rle4)
|
||||
-1.0, # 21: Right_Ankle_Pitch (rle5)
|
||||
-1.0, # 22: Right_Ankle_Roll (rle6)
|
||||
]
|
||||
)
|
||||
|
||||
self.scaling_factor = 0.3
|
||||
# self.scaling_factor = 1
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = True
|
||||
self.reset_beam_yaw_range_deg = 180 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
|
||||
self.reset_joint_noise_rad = 0.035
|
||||
self.reset_perturb_steps = 5
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
self.Player.server.connect()
|
||||
# sleep(2.0) # Longer wait for connection to establish completely
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
self.start_time = time.time()
|
||||
|
||||
def debug_log(self, message):
|
||||
print(message)
|
||||
try:
|
||||
log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
|
||||
with open(log_path, "a", encoding="utf-8") as f:
|
||||
f.write(message + "\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def observe(self, init=False):
|
||||
|
||||
"""获取当前观测值"""
|
||||
robot = self.Player.robot
|
||||
world = self.Player.world
|
||||
|
||||
# Safety check: ensure data is available
|
||||
|
||||
# 计算目标速度
|
||||
raw_target = self.target_position - world.global_position[:2]
|
||||
velocity = MathOps.rotate_2d_vec(
|
||||
raw_target,
|
||||
-robot.global_orientation_euler[2],
|
||||
is_rad=False
|
||||
)
|
||||
|
||||
# 计算相对方向
|
||||
rel_orientation = MathOps.vector_angle(velocity) * 0.3
|
||||
rel_orientation = np.clip(rel_orientation, -0.25, 0.25)
|
||||
|
||||
velocity = np.concatenate([velocity, np.array([rel_orientation])])
|
||||
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
|
||||
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
|
||||
|
||||
# 关节状态
|
||||
radian_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
radian_joint_speeds = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
|
||||
qpos_qvel_previous_action = np.concatenate([
|
||||
(radian_joint_positions * self.train_sim_flip - self.joint_nominal_position) / 4.6,
|
||||
radian_joint_speeds / 110.0 * self.train_sim_flip,
|
||||
self.previous_action / 10.0,
|
||||
])
|
||||
|
||||
# 角速度
|
||||
ang_vel = np.clip(np.deg2rad(robot.gyroscope) / 50.0, -1.0, 1.0)
|
||||
|
||||
# 投影的重力方向
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
|
||||
# 组合观测
|
||||
observation = np.concatenate([
|
||||
qpos_qvel_previous_action,
|
||||
ang_vel,
|
||||
velocity,
|
||||
projected_gravity,
|
||||
])
|
||||
|
||||
observation = np.clip(observation, -10.0, 10.0)
|
||||
return observation.astype(np.float32)
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.send()
|
||||
if self._target_dt > 0.0:
|
||||
now = time.time()
|
||||
if self._last_sync_time is None:
|
||||
self._last_sync_time = now
|
||||
return
|
||||
elapsed = now - self._last_sync_time
|
||||
remaining = self._target_dt - elapsed
|
||||
if remaining > 0.0:
|
||||
time.sleep(remaining)
|
||||
now = time.time()
|
||||
self._last_sync_time = now
|
||||
|
||||
def debug_joint_status(self):
|
||||
robot = self.Player.robot
|
||||
actual_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
target_joint_positions = getattr(
|
||||
self,
|
||||
'target_joint_positions',
|
||||
np.zeros(len(robot.ROBOT_MOTORS), dtype=np.float32)
|
||||
)
|
||||
joint_error = actual_joint_positions - target_joint_positions
|
||||
leg_slice = slice(11, None)
|
||||
|
||||
self.debug_log(
|
||||
"[WalkDebug] "
|
||||
f"step={self.step_counter} "
|
||||
f"pos={np.round(self.Player.world.global_position, 3).tolist()} "
|
||||
f"target_xy={np.round(self.target_position, 3).tolist()} "
|
||||
f"target_leg={np.round(target_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"actual_leg={np.round(actual_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
|
||||
f"fallen={self.Player.world.global_position[2] < 0.3}"
|
||||
)
|
||||
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
|
||||
|
||||
def reset(self, seed=None, options=None):
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
r = self.Player.robot
|
||||
super().reset(seed=seed)
|
||||
if seed is not None:
|
||||
np.random.seed(seed)
|
||||
|
||||
length1 = 2 # randomize target distance
|
||||
length2 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
length3 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
angle2 = np.random.uniform(-30, 30) # randomize initial orientation
|
||||
angle3 = np.random.uniform(-30, 30) # randomize target direction
|
||||
|
||||
self.step_counter = 0
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
|
||||
self.walk_cycle_step = 0
|
||||
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
beam_y = (random() - 0.5) * 10
|
||||
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
|
||||
|
||||
for _ in range(5):
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
|
||||
self.Player.server.send()
|
||||
|
||||
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
|
||||
finished_count = 0
|
||||
for _ in range(50):
|
||||
finished = self.Player.skills_manager.execute("Neutral")
|
||||
self.sync()
|
||||
if finished:
|
||||
finished_count += 1
|
||||
if finished_count >= 20: # 假设需要连续20次完成才算成功
|
||||
break
|
||||
|
||||
if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
|
||||
perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
|
||||
# Perturb waist + lower body only (10:), keep head/arms stable.
|
||||
perturb_action[10:] = np.random.uniform(
|
||||
-self.reset_joint_noise_rad,
|
||||
self.reset_joint_noise_rad,
|
||||
size=(self.no_of_actions - 10,)
|
||||
)
|
||||
|
||||
for _ in range(self.reset_perturb_steps):
|
||||
target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
for i in range(self.reset_recover_steps):
|
||||
# Linearly fade perturbation to help policy start from near-neutral.
|
||||
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
|
||||
target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.sync()
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
# Build target in the robot's current forward direction instead of fixed global +x.
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
forward_offset = MathOps.rotate_2d_vec(np.array([length1, 0.0]), heading_deg, is_rad=False)
|
||||
point1 = self.initial_position + forward_offset
|
||||
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
|
||||
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
|
||||
self.point_list = [point1]
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
height = float(self.Player.world.global_position[2])
|
||||
|
||||
orientation_quat_inv = R.from_quat(self.Player.robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
ang_vel = np.deg2rad(self.Player.robot.gyroscope)
|
||||
ang_vel_mag = float(np.linalg.norm(ang_vel))
|
||||
|
||||
is_fallen = height < 0.3
|
||||
if is_fallen:
|
||||
remain = max(0, 800 - self.step_counter)
|
||||
return -8.0 - 0.01 * remain
|
||||
|
||||
|
||||
|
||||
# # 目标方向
|
||||
# to_target = self.target_position - current_pos
|
||||
# dist_to_target = float(np.linalg.norm(to_target))
|
||||
# if dist_to_target < 0.5:
|
||||
# return 15.0
|
||||
|
||||
# forward_dir = to_target / dist_to_target if dist_to_target > 0.1 else np.array([1.0, 0.0])
|
||||
# delta_pos = current_pos - previous_pos
|
||||
# forward_step = float(np.dot(delta_pos, forward_dir))
|
||||
# lateral_step = float(np.linalg.norm(delta_pos - forward_dir * forward_step))
|
||||
|
||||
# 奖励项
|
||||
# progress_reward = 2 * forward_step
|
||||
# lateral_penalty = -0.1 * lateral_step
|
||||
alive_bonus = 0.3
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.03 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -1.0 * (tilt_mag)
|
||||
ang_vel_penalty = -0.05 * ang_vel_mag
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height
|
||||
height_penalty = -2.0 * abs(height_error) # 惩罚高度偏离,系数可调
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
+ ang_vel_penalty
|
||||
+ height_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
if time.time() - self.start_time >= 1200:
|
||||
self.start_time = time.time()
|
||||
print(
|
||||
# f"progress_reward:{progress_reward:.4f}",
|
||||
# f"lateral_penalty:{lateral_penalty:.4f}",
|
||||
# f"action_penalty:{action_penalty:.4f}"s,
|
||||
f"height_penalty:{height_penalty:.4f}",
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},",
|
||||
f"posture_penalty:{posture_penalty:.4f}",
|
||||
# f"ang_vel_penalty:{ang_vel_penalty:.4f}",
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
)
|
||||
|
||||
return total
|
||||
|
||||
|
||||
|
||||
def step(self, action):
|
||||
|
||||
r = self.Player.robot
|
||||
self.previous_action = action
|
||||
|
||||
self.target_joint_positions = (
|
||||
# self.joint_nominal_position +
|
||||
self.scaling_factor * action
|
||||
)
|
||||
self.target_joint_positions *= self.train_sim_flip
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
|
||||
self.previous_action = action
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
|
||||
if self.enable_debug_joint_status and self.step_counter % self.debug_every_n_steps == 0:
|
||||
self.debug_joint_status()
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
# Compute reward based on movement from previous step
|
||||
reward = self.compute_reward(self.previous_pos, current_pos, action)
|
||||
|
||||
# Update previous position
|
||||
self.previous_pos = current_pos.copy()
|
||||
self.last_action_for_reward = action.copy()
|
||||
|
||||
# Fall detection and penalty
|
||||
is_fallen = self.Player.world.global_position[2] < 0.3
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
terminated = is_fallen or self.step_counter > 800 or self.route_completed
|
||||
truncated = False
|
||||
|
||||
return self.observe(), reward, terminated, truncated, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = 20 # Reduced from 8 to decrease CPU/network pressure during init
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
n_steps_per_env = 1024 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = 128 # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = 1e-4
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
print(f"Model path: {model_path}")
|
||||
print(f"Using {n_envs} parallel environments")
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env):
|
||||
def thunk():
|
||||
return WalkEnv(self.ip, self.server_p + i_env)
|
||||
|
||||
return thunk
|
||||
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1) # include 1 extra server for testing
|
||||
|
||||
# Wait for servers to start
|
||||
print(f"Starting {n_envs + 1} rcssservermj servers...")
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i) for i in range(n_envs)])
|
||||
eval_env = SubprocVecEnv([init_env(n_envs)])
|
||||
|
||||
try:
|
||||
# Custom policy network architecture
|
||||
policy_kwargs = dict(
|
||||
net_arch=dict(
|
||||
pi=[512, 256, 128], # Policy network: 3 layers
|
||||
vf=[512, 256, 128] # Value network: 3 layers
|
||||
),
|
||||
activation_fn=__import__('torch.nn', fromlist=['ELU']).ELU,
|
||||
)
|
||||
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cpu", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO(
|
||||
"MlpPolicy",
|
||||
env=env,
|
||||
verbose=1,
|
||||
n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size,
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
ent_coef=0.001, # Entropy coefficient for exploration
|
||||
# clip_range=0.13, # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=0.99 , # Discount factor
|
||||
target_kl=0.03,
|
||||
# n_epochs=5
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 10, save_freq=n_steps_per_env * 10,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
server = Train_Server(self.server_p - 1, self.monitor_p, 1)
|
||||
env = WalkEnv(self.ip, self.server_p - 1)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from types import SimpleNamespace
|
||||
|
||||
# 创建默认参数
|
||||
script_args = SimpleNamespace(
|
||||
args=SimpleNamespace(
|
||||
i='127.0.0.1', # Server IP
|
||||
p=3100, # Server port
|
||||
m=3200, # Monitor port
|
||||
r=0, # Robot type
|
||||
t='Gym', # Team name
|
||||
u=1 # Uniform number
|
||||
)
|
||||
)
|
||||
|
||||
trainer = Train(script_args)
|
||||
trainer.train({})
|
||||
# trainer.test({"model_file": "scripts/gyms/logs/Walk_R0_000/best_model.zip",
|
||||
# "folder_dir": "scripts/gyms/logs/Walk_R0_000/",})
|
||||
@@ -1,626 +0,0 @@
|
||||
import os
|
||||
import numpy as np
|
||||
import math
|
||||
import time
|
||||
from time import sleep
|
||||
from random import random
|
||||
from random import uniform
|
||||
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
|
||||
import gymnasium as gym
|
||||
from gymnasium import spaces
|
||||
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from scripts.commons.Server import Server as Train_Server
|
||||
|
||||
from agent.base_agent import Base_Agent
|
||||
from utils.math_ops import MathOps
|
||||
|
||||
from scipy.spatial.transform import Rotation as R
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class WalkEnv(gym.Env):
|
||||
def __init__(self, ip, server_p) -> None:
|
||||
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.Player = player = Base_Agent(
|
||||
team_name="Gym",
|
||||
number=1,
|
||||
host=ip,
|
||||
port=server_p
|
||||
)
|
||||
self.robot_type = self.Player.robot
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.force_play_on = True
|
||||
|
||||
self.target_position = np.array([0.0, 0.0]) # target position in the x-y plane
|
||||
self.initial_position = np.array([0.0, 0.0]) # initial position in the x-y plane
|
||||
self.target_direction = 0.0 # target direction in the x-y plane (relative to the robot's orientation)
|
||||
self.isfallen = False
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.enable_debug_joint_status = False
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
self.auto_calibrate_train_sim_flip = True
|
||||
self.nominal_calibrated_once = False
|
||||
self.flip_calibrated_once = False
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
self._target_hz = float(target_hz_env)
|
||||
except ValueError:
|
||||
self._target_hz = 0.0
|
||||
if self._target_hz > 0.0:
|
||||
self._target_dt = 1.0 / self._target_hz
|
||||
|
||||
# State space
|
||||
# 原始观测大小: 78
|
||||
obs_size = 78
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(obs_size,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
action_dim = len(self.Player.robot.ROBOT_MOTORS)
|
||||
self.no_of_actions = action_dim
|
||||
self.action_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(action_dim,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
# 中立姿态
|
||||
self.joint_nominal_position = np.array(
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
-1.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
0.8,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
-0.8,
|
||||
0.4,
|
||||
0.0,
|
||||
]
|
||||
)
|
||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||
self.train_sim_flip = np.array(
|
||||
[
|
||||
1.0, # 0: Head_yaw (he1)
|
||||
-1.0, # 1: Head_pitch (he2)
|
||||
1.0, # 2: Left_Shoulder_Pitch (lae1)
|
||||
-1.0, # 3: Left_Shoulder_Roll (lae2)
|
||||
-1.0, # 4: Left_Elbow_Pitch (lae3)
|
||||
1.0, # 5: Left_Elbow_Yaw (lae4)
|
||||
-1.0, # 6: Right_Shoulder_Pitch (rae1)
|
||||
-1.0, # 7: Right_Shoulder_Roll (rae2)
|
||||
1.0, # 8: Right_Elbow_Pitch (rae3)
|
||||
1.0, # 9: Right_Elbow_Yaw (rae4)
|
||||
1.0, # 10: Waist (te1)
|
||||
1.0, # 11: Left_Hip_Pitch (lle1)
|
||||
-1.0, # 12: Left_Hip_Roll (lle2)
|
||||
-1.0, # 13: Left_Hip_Yaw (lle3)
|
||||
1.0, # 14: Left_Knee_Pitch (lle4)
|
||||
1.0, # 15: Left_Ankle_Pitch (lle5)
|
||||
-1.0, # 16: Left_Ankle_Roll (lle6)
|
||||
-1.0, # 17: Right_Hip_Pitch (rle1)
|
||||
-1.0, # 18: Right_Hip_Roll (rle2)
|
||||
-1.0, # 19: Right_Hip_Yaw (rle3)
|
||||
-1.0, # 20: Right_Knee_Pitch (rle4)
|
||||
-1.0, # 21: Right_Ankle_Pitch (rle5)
|
||||
-1.0, # 22: Right_Ankle_Roll (rle6)
|
||||
]
|
||||
)
|
||||
|
||||
self.scaling_factor = 0.3
|
||||
# self.scaling_factor = 1
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = True
|
||||
self.reset_beam_yaw_range_deg = 180 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
|
||||
self.reset_joint_noise_rad = 0.015
|
||||
self.reset_perturb_steps = 3
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
self.Player.server.connect()
|
||||
# sleep(2.0) # Longer wait for connection to establish completely
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
self.start_time = time.time()
|
||||
|
||||
def debug_log(self, message):
|
||||
print(message)
|
||||
try:
|
||||
log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
|
||||
with open(log_path, "a", encoding="utf-8") as f:
|
||||
f.write(message + "\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def observe(self, init=False):
|
||||
|
||||
"""获取当前观测值"""
|
||||
robot = self.Player.robot
|
||||
world = self.Player.world
|
||||
|
||||
# Safety check: ensure data is available
|
||||
|
||||
# 计算目标速度
|
||||
raw_target = self.target_position - world.global_position[:2]
|
||||
velocity = MathOps.rotate_2d_vec(
|
||||
raw_target,
|
||||
-robot.global_orientation_euler[2],
|
||||
is_rad=False
|
||||
)
|
||||
|
||||
# 计算相对方向
|
||||
rel_orientation = MathOps.vector_angle(velocity) * 0.3
|
||||
rel_orientation = np.clip(rel_orientation, -0.25, 0.25)
|
||||
|
||||
velocity = np.concatenate([velocity, np.array([rel_orientation])])
|
||||
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
|
||||
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
|
||||
|
||||
# 关节状态
|
||||
radian_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
radian_joint_speeds = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
|
||||
qpos_qvel_previous_action = np.concatenate([
|
||||
(radian_joint_positions * self.train_sim_flip - self.joint_nominal_position) / 4.6,
|
||||
radian_joint_speeds / 110.0 * self.train_sim_flip,
|
||||
self.previous_action / 10.0,
|
||||
])
|
||||
|
||||
# 角速度
|
||||
ang_vel = np.clip(np.deg2rad(robot.gyroscope) / 50.0, -1.0, 1.0)
|
||||
|
||||
# 投影的重力方向
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
|
||||
# 组合观测
|
||||
observation = np.concatenate([
|
||||
qpos_qvel_previous_action,
|
||||
ang_vel,
|
||||
velocity,
|
||||
projected_gravity,
|
||||
])
|
||||
|
||||
observation = np.clip(observation, -10.0, 10.0)
|
||||
return observation.astype(np.float32)
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.send()
|
||||
if self._target_dt > 0.0:
|
||||
now = time.time()
|
||||
if self._last_sync_time is None:
|
||||
self._last_sync_time = now
|
||||
return
|
||||
elapsed = now - self._last_sync_time
|
||||
remaining = self._target_dt - elapsed
|
||||
if remaining > 0.0:
|
||||
time.sleep(remaining)
|
||||
now = time.time()
|
||||
self._last_sync_time = now
|
||||
|
||||
def debug_joint_status(self):
|
||||
robot = self.Player.robot
|
||||
actual_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
target_joint_positions = getattr(
|
||||
self,
|
||||
'target_joint_positions',
|
||||
np.zeros(len(robot.ROBOT_MOTORS), dtype=np.float32)
|
||||
)
|
||||
joint_error = actual_joint_positions - target_joint_positions
|
||||
leg_slice = slice(11, None)
|
||||
|
||||
self.debug_log(
|
||||
"[WalkDebug] "
|
||||
f"step={self.step_counter} "
|
||||
f"pos={np.round(self.Player.world.global_position, 3).tolist()} "
|
||||
f"target_xy={np.round(self.target_position, 3).tolist()} "
|
||||
f"target_leg={np.round(target_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"actual_leg={np.round(actual_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
|
||||
f"fallen={self.Player.world.global_position[2] < 0.3}"
|
||||
)
|
||||
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
|
||||
|
||||
def reset(self, seed=None, options=None):
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
r = self.Player.robot
|
||||
super().reset(seed=seed)
|
||||
if seed is not None:
|
||||
np.random.seed(seed)
|
||||
|
||||
length1 = 2 # randomize target distance
|
||||
length2 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
length3 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
angle2 = np.random.uniform(-30, 30) # randomize initial orientation
|
||||
angle3 = np.random.uniform(-30, 30) # randomize target direction
|
||||
|
||||
self.step_counter = 0
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
|
||||
self.walk_cycle_step = 0
|
||||
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
beam_y = (random() - 0.5) * 10
|
||||
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
|
||||
|
||||
for _ in range(5):
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
|
||||
self.Player.server.send()
|
||||
|
||||
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
|
||||
finished_count = 0
|
||||
for _ in range(50):
|
||||
finished = self.Player.skills_manager.execute("Neutral")
|
||||
self.sync()
|
||||
if finished:
|
||||
finished_count += 1
|
||||
if finished_count >= 20: # 假设需要连续20次完成才算成功
|
||||
break
|
||||
|
||||
if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
|
||||
perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
|
||||
# Perturb waist + lower body only (10:), keep head/arms stable.
|
||||
perturb_action[10:] = np.random.uniform(
|
||||
-self.reset_joint_noise_rad,
|
||||
self.reset_joint_noise_rad,
|
||||
size=(self.no_of_actions - 10,)
|
||||
)
|
||||
|
||||
for _ in range(self.reset_perturb_steps):
|
||||
target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
for i in range(self.reset_recover_steps):
|
||||
# Linearly fade perturbation to help policy start from near-neutral.
|
||||
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
|
||||
target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.sync()
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
# Build target in the robot's current forward direction instead of fixed global +x.
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
forward_offset = MathOps.rotate_2d_vec(np.array([length1, 0.0]), heading_deg, is_rad=False)
|
||||
point1 = self.initial_position + forward_offset
|
||||
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
|
||||
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
|
||||
self.point_list = [point1]
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
height = float(self.Player.world.global_position[2])
|
||||
|
||||
orientation_quat_inv = R.from_quat(self.Player.robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
ang_vel = np.deg2rad(self.Player.robot.gyroscope)
|
||||
ang_vel_mag = float(np.linalg.norm(ang_vel))
|
||||
|
||||
is_fallen = height < 0.3
|
||||
if is_fallen:
|
||||
# remain = max(0, 800 - self.step_counter)
|
||||
# return -8.0 - 0.01 * remain
|
||||
return -1.0
|
||||
|
||||
|
||||
|
||||
# # 目标方向
|
||||
# to_target = self.target_position - current_pos
|
||||
# dist_to_target = float(np.linalg.norm(to_target))
|
||||
# if dist_to_target < 0.5:
|
||||
# return 15.0
|
||||
|
||||
# forward_dir = to_target / dist_to_target if dist_to_target > 0.1 else np.array([1.0, 0.0])
|
||||
# delta_pos = current_pos - previous_pos
|
||||
# forward_step = float(np.dot(delta_pos, forward_dir))
|
||||
# lateral_step = float(np.linalg.norm(delta_pos - forward_dir * forward_step))
|
||||
|
||||
# 奖励项
|
||||
# progress_reward = 2 * forward_step
|
||||
# lateral_penalty = -0.1 * lateral_step
|
||||
alive_bonus = 2.0
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.01 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -0.3 * (tilt_mag)
|
||||
ang_vel_penalty = -0.02 * ang_vel_mag
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height
|
||||
height_penalty = -0.5 * abs(height_error) # 惩罚高度偏离,系数可调
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
+ ang_vel_penalty
|
||||
+ height_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
if time.time() - self.start_time >= 1200:
|
||||
self.start_time = time.time()
|
||||
print(
|
||||
# f"progress_reward:{progress_reward:.4f}",
|
||||
# f"lateral_penalty:{lateral_penalty:.4f}",
|
||||
# f"action_penalty:{action_penalty:.4f}"s,
|
||||
f"height_penalty:{height_penalty:.4f}",
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},",
|
||||
f"posture_penalty:{posture_penalty:.4f}",
|
||||
# f"ang_vel_penalty:{ang_vel_penalty:.4f}",
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
)
|
||||
|
||||
return total
|
||||
|
||||
|
||||
|
||||
def step(self, action):
|
||||
|
||||
r = self.Player.robot
|
||||
self.previous_action = action
|
||||
|
||||
self.target_joint_positions = (
|
||||
# self.joint_nominal_position +
|
||||
self.scaling_factor * action
|
||||
)
|
||||
self.target_joint_positions *= self.train_sim_flip
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=40, kd=1.0
|
||||
)
|
||||
|
||||
self.previous_action = action
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
|
||||
if self.enable_debug_joint_status and self.step_counter % self.debug_every_n_steps == 0:
|
||||
self.debug_joint_status()
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
# Compute reward based on movement from previous step
|
||||
reward = self.compute_reward(self.previous_pos, current_pos, action)
|
||||
|
||||
# Update previous position
|
||||
self.previous_pos = current_pos.copy()
|
||||
self.last_action_for_reward = action.copy()
|
||||
|
||||
# Fall detection and penalty
|
||||
is_fallen = self.Player.world.global_position[2] < 0.3
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
terminated = is_fallen or self.step_counter > 800 or self.route_completed
|
||||
truncated = False
|
||||
|
||||
return self.observe(), reward, terminated, truncated, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = 20 # Reduced from 8 to decrease CPU/network pressure during init
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
n_steps_per_env = 256 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = 512 # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = 3e-4
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
print(f"Model path: {model_path}")
|
||||
print(f"Using {n_envs} parallel environments")
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env):
|
||||
def thunk():
|
||||
return WalkEnv(self.ip, self.server_p + i_env)
|
||||
|
||||
return thunk
|
||||
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1) # include 1 extra server for testing
|
||||
|
||||
# Wait for servers to start
|
||||
print(f"Starting {n_envs + 1} rcssservermj servers...")
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i) for i in range(n_envs)])
|
||||
eval_env = SubprocVecEnv([init_env(n_envs)])
|
||||
|
||||
try:
|
||||
# Custom policy network architecture
|
||||
policy_kwargs = dict(
|
||||
net_arch=dict(
|
||||
pi=[512, 256, 128], # Policy network: 3 layers
|
||||
vf=[512, 256, 128] # Value network: 3 layers
|
||||
),
|
||||
activation_fn=__import__('torch.nn', fromlist=['ELU']).ELU,
|
||||
)
|
||||
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cpu", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO(
|
||||
"MlpPolicy",
|
||||
env=env,
|
||||
verbose=1,
|
||||
n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size,
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
ent_coef=0.05, # Entropy coefficient for exploration
|
||||
# clip_range=0.13, # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=0.95 , # Discount factor
|
||||
target_kl=0.03,
|
||||
n_epochs=5
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 10, save_freq=n_steps_per_env * 10,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
server = Train_Server(self.server_p - 1, self.monitor_p, 1)
|
||||
env = WalkEnv(self.ip, self.server_p - 1)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from types import SimpleNamespace
|
||||
|
||||
# 创建默认参数
|
||||
script_args = SimpleNamespace(
|
||||
args=SimpleNamespace(
|
||||
i='127.0.0.1', # Server IP
|
||||
p=3100, # Server port
|
||||
m=3200, # Monitor port
|
||||
r=0, # Robot type
|
||||
t='Gym', # Team name
|
||||
u=1 # Uniform number
|
||||
)
|
||||
)
|
||||
|
||||
trainer = Train(script_args)
|
||||
trainer.train({})
|
||||
# trainer.test({"model_file": "scripts/gyms/logs/Walk_R0_000/best_model.zip",
|
||||
# "folder_dir": "scripts/gyms/logs/Walk_R0_000/",})
|
||||
@@ -1,660 +0,0 @@
|
||||
import os
|
||||
import numpy as np
|
||||
import math
|
||||
import time
|
||||
from time import sleep
|
||||
from random import random
|
||||
from random import uniform
|
||||
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.monitor import Monitor
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
|
||||
|
||||
import gymnasium as gym
|
||||
from gymnasium import spaces
|
||||
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from scripts.commons.Server import Server as Train_Server
|
||||
|
||||
from agent.base_agent import Base_Agent
|
||||
from utils.math_ops import MathOps
|
||||
|
||||
from scipy.spatial.transform import Rotation as R
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class WalkEnv(gym.Env):
|
||||
def __init__(self, ip, server_p) -> None:
|
||||
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.Player = player = Base_Agent(
|
||||
team_name="Gym",
|
||||
number=1,
|
||||
host=ip,
|
||||
port=server_p
|
||||
)
|
||||
self.robot_type = self.Player.robot
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.force_play_on = True
|
||||
|
||||
self.target_position = np.array([0.0, 0.0]) # target position in the x-y plane
|
||||
self.initial_position = np.array([0.0, 0.0]) # initial position in the x-y plane
|
||||
self.target_direction = 0.0 # target direction in the x-y plane (relative to the robot's orientation)
|
||||
self.isfallen = False
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.enable_debug_joint_status = False
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
self.auto_calibrate_train_sim_flip = True
|
||||
self.nominal_calibrated_once = False
|
||||
self.flip_calibrated_once = False
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
self._target_hz = float(target_hz_env)
|
||||
except ValueError:
|
||||
self._target_hz = 0.0
|
||||
if self._target_hz > 0.0:
|
||||
self._target_dt = 1.0 / self._target_hz
|
||||
|
||||
# State space
|
||||
# 原始观测大小: 78
|
||||
obs_size = 78
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(obs_size,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
action_dim = len(self.Player.robot.ROBOT_MOTORS)
|
||||
self.no_of_actions = action_dim
|
||||
self.action_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(action_dim,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
# 中立姿态
|
||||
self.joint_nominal_position = np.array(
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
-1.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
0.8,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
-0.8,
|
||||
0.4,
|
||||
0.0,
|
||||
]
|
||||
)
|
||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||
self.train_sim_flip = np.array(
|
||||
[
|
||||
1.0, # 0: Head_yaw (he1)
|
||||
-1.0, # 1: Head_pitch (he2)
|
||||
1.0, # 2: Left_Shoulder_Pitch (lae1)
|
||||
-1.0, # 3: Left_Shoulder_Roll (lae2)
|
||||
-1.0, # 4: Left_Elbow_Pitch (lae3)
|
||||
1.0, # 5: Left_Elbow_Yaw (lae4)
|
||||
-1.0, # 6: Right_Shoulder_Pitch (rae1)
|
||||
-1.0, # 7: Right_Shoulder_Roll (rae2)
|
||||
1.0, # 8: Right_Elbow_Pitch (rae3)
|
||||
1.0, # 9: Right_Elbow_Yaw (rae4)
|
||||
1.0, # 10: Waist (te1)
|
||||
1.0, # 11: Left_Hip_Pitch (lle1)
|
||||
-1.0, # 12: Left_Hip_Roll (lle2)
|
||||
-1.0, # 13: Left_Hip_Yaw (lle3)
|
||||
1.0, # 14: Left_Knee_Pitch (lle4)
|
||||
1.0, # 15: Left_Ankle_Pitch (lle5)
|
||||
-1.0, # 16: Left_Ankle_Roll (lle6)
|
||||
-1.0, # 17: Right_Hip_Pitch (rle1)
|
||||
-1.0, # 18: Right_Hip_Roll (rle2)
|
||||
-1.0, # 19: Right_Hip_Yaw (rle3)
|
||||
-1.0, # 20: Right_Knee_Pitch (rle4)
|
||||
-1.0, # 21: Right_Ankle_Pitch (rle5)
|
||||
-1.0, # 22: Right_Ankle_Roll (rle6)
|
||||
]
|
||||
)
|
||||
|
||||
self.scaling_factor = 0.3
|
||||
# self.scaling_factor = 1
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = True
|
||||
self.reset_beam_yaw_range_deg = 180 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
|
||||
self.reset_joint_noise_rad = 0.015
|
||||
self.reset_perturb_steps = 3
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
self.Player.server.connect()
|
||||
# sleep(2.0) # Longer wait for connection to establish completely
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
self.start_time = time.time()
|
||||
|
||||
def _reconnect_server(self):
|
||||
try:
|
||||
self.Player.server.shutdown()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self.Player.server.connect()
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
|
||||
def _safe_receive_world_update(self, retries=1):
|
||||
last_exc = None
|
||||
for attempt in range(retries + 1):
|
||||
try:
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
return
|
||||
except (ConnectionResetError, OSError) as exc:
|
||||
last_exc = exc
|
||||
if attempt >= retries:
|
||||
raise
|
||||
self._reconnect_server()
|
||||
if last_exc is not None:
|
||||
raise last_exc
|
||||
|
||||
def debug_log(self, message):
|
||||
print(message)
|
||||
try:
|
||||
log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
|
||||
with open(log_path, "a", encoding="utf-8") as f:
|
||||
f.write(message + "\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def observe(self, init=False):
|
||||
|
||||
"""获取当前观测值"""
|
||||
robot = self.Player.robot
|
||||
world = self.Player.world
|
||||
|
||||
# Safety check: ensure data is available
|
||||
|
||||
# 计算目标速度
|
||||
raw_target = self.target_position - world.global_position[:2]
|
||||
velocity = MathOps.rotate_2d_vec(
|
||||
raw_target,
|
||||
-robot.global_orientation_euler[2],
|
||||
is_rad=False
|
||||
)
|
||||
|
||||
# 计算相对方向
|
||||
rel_orientation = MathOps.vector_angle(velocity) * 0.3
|
||||
rel_orientation = np.clip(rel_orientation, -0.25, 0.25)
|
||||
|
||||
velocity = np.concatenate([velocity, np.array([rel_orientation])])
|
||||
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
|
||||
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
|
||||
|
||||
# 关节状态
|
||||
radian_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
radian_joint_speeds = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
|
||||
qpos_qvel_previous_action = np.concatenate([
|
||||
(radian_joint_positions * self.train_sim_flip - self.joint_nominal_position) / 4.6,
|
||||
radian_joint_speeds / 110.0 * self.train_sim_flip,
|
||||
self.previous_action / 10.0,
|
||||
])
|
||||
|
||||
# 角速度
|
||||
ang_vel = np.clip(np.deg2rad(robot.gyroscope) / 50.0, -1.0, 1.0)
|
||||
|
||||
# 投影的重力方向
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
|
||||
# 组合观测
|
||||
observation = np.concatenate([
|
||||
qpos_qvel_previous_action,
|
||||
ang_vel,
|
||||
velocity,
|
||||
projected_gravity,
|
||||
])
|
||||
|
||||
observation = np.clip(observation, -10.0, 10.0)
|
||||
return observation.astype(np.float32)
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
self._safe_receive_world_update(retries=1)
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.send()
|
||||
if self._target_dt > 0.0:
|
||||
now = time.time()
|
||||
if self._last_sync_time is None:
|
||||
self._last_sync_time = now
|
||||
return
|
||||
elapsed = now - self._last_sync_time
|
||||
remaining = self._target_dt - elapsed
|
||||
if remaining > 0.0:
|
||||
time.sleep(remaining)
|
||||
now = time.time()
|
||||
self._last_sync_time = now
|
||||
|
||||
def debug_joint_status(self):
|
||||
robot = self.Player.robot
|
||||
actual_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
target_joint_positions = getattr(
|
||||
self,
|
||||
'target_joint_positions',
|
||||
np.zeros(len(robot.ROBOT_MOTORS), dtype=np.float32)
|
||||
)
|
||||
joint_error = actual_joint_positions - target_joint_positions
|
||||
leg_slice = slice(11, None)
|
||||
|
||||
self.debug_log(
|
||||
"[WalkDebug] "
|
||||
f"step={self.step_counter} "
|
||||
f"pos={np.round(self.Player.world.global_position, 3).tolist()} "
|
||||
f"target_xy={np.round(self.target_position, 3).tolist()} "
|
||||
f"target_leg={np.round(target_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"actual_leg={np.round(actual_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
|
||||
f"fallen={self.Player.world.global_position[2] < 0.3}"
|
||||
)
|
||||
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
|
||||
|
||||
def reset(self, seed=None, options=None):
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
r = self.Player.robot
|
||||
super().reset(seed=seed)
|
||||
if seed is not None:
|
||||
np.random.seed(seed)
|
||||
|
||||
length1 = 2 # randomize target distance
|
||||
length2 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
length3 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
angle2 = np.random.uniform(-30, 30) # randomize initial orientation
|
||||
angle3 = np.random.uniform(-30, 30) # randomize target direction
|
||||
|
||||
self.step_counter = 0
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
|
||||
self.walk_cycle_step = 0
|
||||
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
beam_y = (random() - 0.5) * 10
|
||||
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
|
||||
|
||||
for _ in range(5):
|
||||
self._safe_receive_world_update(retries=2)
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
|
||||
self.Player.server.send()
|
||||
|
||||
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
|
||||
finished_count = 0
|
||||
for _ in range(50):
|
||||
finished = self.Player.skills_manager.execute("Neutral")
|
||||
self.sync()
|
||||
if finished:
|
||||
finished_count += 1
|
||||
if finished_count >= 20: # 假设需要连续20次完成才算成功
|
||||
break
|
||||
|
||||
if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
|
||||
perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
|
||||
# Perturb waist + lower body only (10:), keep head/arms stable.
|
||||
perturb_action[10:] = np.random.uniform(
|
||||
-self.reset_joint_noise_rad,
|
||||
self.reset_joint_noise_rad,
|
||||
size=(self.no_of_actions - 10,)
|
||||
)
|
||||
|
||||
for _ in range(self.reset_perturb_steps):
|
||||
target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
for i in range(self.reset_recover_steps):
|
||||
# Linearly fade perturbation to help policy start from near-neutral.
|
||||
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
|
||||
target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.sync()
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
# Build target in the robot's current forward direction instead of fixed global +x.
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
forward_offset = MathOps.rotate_2d_vec(np.array([length1, 0.0]), heading_deg, is_rad=False)
|
||||
point1 = self.initial_position + forward_offset
|
||||
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
|
||||
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
|
||||
self.point_list = [point1]
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
height = float(self.Player.world.global_position[2])
|
||||
|
||||
orientation_quat_inv = R.from_quat(self.Player.robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
ang_vel = np.deg2rad(self.Player.robot.gyroscope)
|
||||
ang_vel_mag = float(np.linalg.norm(ang_vel))
|
||||
|
||||
is_fallen = height < 0.3
|
||||
if is_fallen:
|
||||
# remain = max(0, 800 - self.step_counter)
|
||||
# return -8.0 - 0.01 * remain
|
||||
return -1.0
|
||||
|
||||
|
||||
|
||||
# # 目标方向
|
||||
# to_target = self.target_position - current_pos
|
||||
# dist_to_target = float(np.linalg.norm(to_target))
|
||||
# if dist_to_target < 0.5:
|
||||
# return 15.0
|
||||
|
||||
# forward_dir = to_target / dist_to_target if dist_to_target > 0.1 else np.array([1.0, 0.0])
|
||||
# delta_pos = current_pos - previous_pos
|
||||
# forward_step = float(np.dot(delta_pos, forward_dir))
|
||||
# lateral_step = float(np.linalg.norm(delta_pos - forward_dir * forward_step))
|
||||
|
||||
# 奖励项
|
||||
# progress_reward = 2 * forward_step
|
||||
# lateral_penalty = -0.1 * lateral_step
|
||||
alive_bonus = 2.0
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.01 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -0.3 * (tilt_mag)
|
||||
ang_vel_penalty = -0.02 * ang_vel_mag
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height
|
||||
height_penalty = -0.5 * abs(height_error) # 惩罚高度偏离,系数可调
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
+ ang_vel_penalty
|
||||
+ height_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
if time.time() - self.start_time >= 1200:
|
||||
self.start_time = time.time()
|
||||
print(
|
||||
# f"progress_reward:{progress_reward:.4f}",
|
||||
# f"lateral_penalty:{lateral_penalty:.4f}",
|
||||
# f"action_penalty:{action_penalty:.4f}"s,
|
||||
f"height_penalty:{height_penalty:.4f}",
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},",
|
||||
f"posture_penalty:{posture_penalty:.4f}",
|
||||
# f"ang_vel_penalty:{ang_vel_penalty:.4f}",
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
)
|
||||
|
||||
return total
|
||||
|
||||
|
||||
|
||||
def step(self, action):
|
||||
|
||||
r = self.Player.robot
|
||||
self.previous_action = action
|
||||
|
||||
self.target_joint_positions = (
|
||||
# self.joint_nominal_position +
|
||||
self.scaling_factor * action
|
||||
)
|
||||
self.target_joint_positions *= self.train_sim_flip
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=40, kd=1.0
|
||||
)
|
||||
|
||||
self.previous_action = action
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
|
||||
if self.enable_debug_joint_status and self.step_counter % self.debug_every_n_steps == 0:
|
||||
self.debug_joint_status()
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
# Compute reward based on movement from previous step
|
||||
reward = self.compute_reward(self.previous_pos, current_pos, action)
|
||||
|
||||
# Update previous position
|
||||
self.previous_pos = current_pos.copy()
|
||||
self.last_action_for_reward = action.copy()
|
||||
|
||||
# Fall detection and penalty
|
||||
is_fallen = self.Player.world.global_position[2] < 0.3
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
terminated = is_fallen or self.step_counter > 800 or self.route_completed
|
||||
truncated = False
|
||||
|
||||
return self.observe(), reward, terminated, truncated, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = int(os.environ.get("GYM_CPU_N_ENVS", "20"))
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
server_warmup_sec = float(os.environ.get("GYM_CPU_SERVER_WARMUP_SEC", "3.0"))
|
||||
n_steps_per_env = 256 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = 512 # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = 1e-4
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
print(f"Model path: {model_path}")
|
||||
print(f"Using {n_envs} parallel environments")
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env, monitor=False):
|
||||
def thunk():
|
||||
env = WalkEnv(self.ip, self.server_p + i_env)
|
||||
if monitor:
|
||||
env = Monitor(env)
|
||||
return env
|
||||
|
||||
return thunk
|
||||
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1) # include 1 extra server for testing
|
||||
|
||||
# Wait for servers to start
|
||||
print(f"Starting {n_envs + 1} rcssservermj servers...")
|
||||
if server_warmup_sec > 0:
|
||||
print(f"Waiting {server_warmup_sec:.1f}s for server warmup...")
|
||||
sleep(server_warmup_sec)
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i, monitor=True) for i in range(n_envs)])
|
||||
# Use single-process eval env to avoid extra subprocess fragility during callback evaluation.
|
||||
eval_env = DummyVecEnv([init_env(n_envs, monitor=True)])
|
||||
|
||||
try:
|
||||
# Custom policy network architecture
|
||||
policy_kwargs = dict(
|
||||
net_arch=dict(
|
||||
pi=[512, 256, 128], # Policy network: 3 layers
|
||||
vf=[512, 256, 128] # Value network: 3 layers
|
||||
),
|
||||
activation_fn=__import__('torch.nn', fromlist=['ELU']).ELU,
|
||||
)
|
||||
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cpu", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO(
|
||||
"MlpPolicy",
|
||||
env=env,
|
||||
verbose=1,
|
||||
n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size,
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
ent_coef=0.03, # Entropy coefficient for exploration
|
||||
clip_range=0.13, # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=0.95 , # Discount factor
|
||||
target_kl=0.03,
|
||||
n_epochs=5,
|
||||
tensorboard_log=f"./scripts/gyms/logs/{folder_name}/tensorboard/"
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 10, save_freq=n_steps_per_env * 10,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
server = Train_Server(self.server_p - 1, self.monitor_p, 1)
|
||||
env = WalkEnv(self.ip, self.server_p - 1)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from types import SimpleNamespace
|
||||
|
||||
# 创建默认参数
|
||||
script_args = SimpleNamespace(
|
||||
args=SimpleNamespace(
|
||||
i='127.0.0.1', # Server IP
|
||||
p=3100, # Server port
|
||||
m=3200, # Monitor port
|
||||
r=0, # Robot type
|
||||
t='Gym', # Team name
|
||||
u=1 # Uniform number
|
||||
)
|
||||
)
|
||||
|
||||
trainer = Train(script_args)
|
||||
trainer.train({"model_file": "scripts/gyms/logs/Walk_R0_004/best_model.zip"})
|
||||
# trainer.test({"model_file": "scripts/gyms/logs/Walk_R0_004/best_model.zip",
|
||||
# "folder_dir": "scripts/gyms/logs/Walk_R0_004/",})
|
||||
@@ -1,679 +0,0 @@
|
||||
import os
|
||||
import numpy as np
|
||||
import math
|
||||
import time
|
||||
from time import sleep
|
||||
from random import random
|
||||
from random import uniform
|
||||
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.monitor import Monitor
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
|
||||
|
||||
import gymnasium as gym
|
||||
from gymnasium import spaces
|
||||
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from scripts.commons.Server import Server as Train_Server
|
||||
|
||||
from agent.base_agent import Base_Agent
|
||||
from utils.math_ops import MathOps
|
||||
|
||||
from scipy.spatial.transform import Rotation as R
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class WalkEnv(gym.Env):
|
||||
def __init__(self, ip, server_p) -> None:
|
||||
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.Player = player = Base_Agent(
|
||||
team_name="Gym",
|
||||
number=1,
|
||||
host=ip,
|
||||
port=server_p
|
||||
)
|
||||
self.robot_type = self.Player.robot
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.force_play_on = True
|
||||
|
||||
self.target_position = np.array([0.0, 0.0]) # target position in the x-y plane
|
||||
self.initial_position = np.array([0.0, 0.0]) # initial position in the x-y plane
|
||||
self.target_direction = 0.0 # target direction in the x-y plane (relative to the robot's orientation)
|
||||
self.isfallen = False
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.enable_debug_joint_status = False
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
self.auto_calibrate_train_sim_flip = True
|
||||
self.nominal_calibrated_once = False
|
||||
self.flip_calibrated_once = False
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
self._target_hz = float(target_hz_env)
|
||||
except ValueError:
|
||||
self._target_hz = 0.0
|
||||
if self._target_hz > 0.0:
|
||||
self._target_dt = 1.0 / self._target_hz
|
||||
|
||||
# State space
|
||||
# 原始观测大小: 78
|
||||
obs_size = 78
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(obs_size,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
action_dim = len(self.Player.robot.ROBOT_MOTORS)
|
||||
self.no_of_actions = action_dim
|
||||
self.action_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(action_dim,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
# 中立姿态
|
||||
self.joint_nominal_position = np.array(
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
-1.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
0.8,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
-0.8,
|
||||
0.4,
|
||||
0.0,
|
||||
]
|
||||
)
|
||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||
self.train_sim_flip = np.array(
|
||||
[
|
||||
1.0, # 0: Head_yaw (he1)
|
||||
-1.0, # 1: Head_pitch (he2)
|
||||
1.0, # 2: Left_Shoulder_Pitch (lae1)
|
||||
-1.0, # 3: Left_Shoulder_Roll (lae2)
|
||||
-1.0, # 4: Left_Elbow_Pitch (lae3)
|
||||
1.0, # 5: Left_Elbow_Yaw (lae4)
|
||||
-1.0, # 6: Right_Shoulder_Pitch (rae1)
|
||||
-1.0, # 7: Right_Shoulder_Roll (rae2)
|
||||
1.0, # 8: Right_Elbow_Pitch (rae3)
|
||||
1.0, # 9: Right_Elbow_Yaw (rae4)
|
||||
1.0, # 10: Waist (te1)
|
||||
1.0, # 11: Left_Hip_Pitch (lle1)
|
||||
-1.0, # 12: Left_Hip_Roll (lle2)
|
||||
-1.0, # 13: Left_Hip_Yaw (lle3)
|
||||
1.0, # 14: Left_Knee_Pitch (lle4)
|
||||
1.0, # 15: Left_Ankle_Pitch (lle5)
|
||||
-1.0, # 16: Left_Ankle_Roll (lle6)
|
||||
-1.0, # 17: Right_Hip_Pitch (rle1)
|
||||
-1.0, # 18: Right_Hip_Roll (rle2)
|
||||
-1.0, # 19: Right_Hip_Yaw (rle3)
|
||||
-1.0, # 20: Right_Knee_Pitch (rle4)
|
||||
-1.0, # 21: Right_Ankle_Pitch (rle5)
|
||||
-1.0, # 22: Right_Ankle_Roll (rle6)
|
||||
]
|
||||
)
|
||||
|
||||
self.scaling_factor = 0.3
|
||||
# self.scaling_factor = 1
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = True
|
||||
self.reset_beam_yaw_range_deg = 180 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
|
||||
self.reset_joint_noise_rad = 0.015
|
||||
self.reset_perturb_steps = 3
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
self.Player.server.connect()
|
||||
# sleep(2.0) # Longer wait for connection to establish completely
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
self.start_time = time.time()
|
||||
|
||||
def _reconnect_server(self):
|
||||
try:
|
||||
self.Player.server.shutdown()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self.Player.server.connect()
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
|
||||
def _safe_receive_world_update(self, retries=1):
|
||||
last_exc = None
|
||||
for attempt in range(retries + 1):
|
||||
try:
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
return
|
||||
except (ConnectionResetError, OSError) as exc:
|
||||
last_exc = exc
|
||||
if attempt >= retries:
|
||||
raise
|
||||
self._reconnect_server()
|
||||
if last_exc is not None:
|
||||
raise last_exc
|
||||
|
||||
def debug_log(self, message):
|
||||
print(message)
|
||||
try:
|
||||
log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
|
||||
with open(log_path, "a", encoding="utf-8") as f:
|
||||
f.write(message + "\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def observe(self, init=False):
|
||||
|
||||
"""获取当前观测值"""
|
||||
robot = self.Player.robot
|
||||
world = self.Player.world
|
||||
|
||||
# Safety check: ensure data is available
|
||||
|
||||
# 计算目标速度
|
||||
raw_target = self.target_position - world.global_position[:2]
|
||||
velocity = MathOps.rotate_2d_vec(
|
||||
raw_target,
|
||||
-robot.global_orientation_euler[2],
|
||||
is_rad=False
|
||||
)
|
||||
|
||||
# 计算相对方向
|
||||
rel_orientation = MathOps.vector_angle(velocity) * 0.3
|
||||
rel_orientation = np.clip(rel_orientation, -0.25, 0.25)
|
||||
|
||||
velocity = np.concatenate([velocity, np.array([rel_orientation])])
|
||||
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
|
||||
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
|
||||
|
||||
# 关节状态
|
||||
radian_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
radian_joint_speeds = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
|
||||
qpos_qvel_previous_action = np.concatenate([
|
||||
(radian_joint_positions * self.train_sim_flip - self.joint_nominal_position) / 4.6,
|
||||
radian_joint_speeds / 110.0 * self.train_sim_flip,
|
||||
self.previous_action / 10.0,
|
||||
])
|
||||
|
||||
# 角速度
|
||||
ang_vel = np.clip(np.deg2rad(robot.gyroscope) / 50.0, -1.0, 1.0)
|
||||
|
||||
# 投影的重力方向
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
|
||||
# 组合观测
|
||||
observation = np.concatenate([
|
||||
qpos_qvel_previous_action,
|
||||
ang_vel,
|
||||
velocity,
|
||||
projected_gravity,
|
||||
])
|
||||
|
||||
observation = np.clip(observation, -10.0, 10.0)
|
||||
return observation.astype(np.float32)
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
self._safe_receive_world_update(retries=1)
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.send()
|
||||
if self._target_dt > 0.0:
|
||||
now = time.time()
|
||||
if self._last_sync_time is None:
|
||||
self._last_sync_time = now
|
||||
return
|
||||
elapsed = now - self._last_sync_time
|
||||
remaining = self._target_dt - elapsed
|
||||
if remaining > 0.0:
|
||||
time.sleep(remaining)
|
||||
now = time.time()
|
||||
self._last_sync_time = now
|
||||
|
||||
def debug_joint_status(self):
|
||||
robot = self.Player.robot
|
||||
actual_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
target_joint_positions = getattr(
|
||||
self,
|
||||
'target_joint_positions',
|
||||
np.zeros(len(robot.ROBOT_MOTORS), dtype=np.float32)
|
||||
)
|
||||
joint_error = actual_joint_positions - target_joint_positions
|
||||
leg_slice = slice(11, None)
|
||||
|
||||
self.debug_log(
|
||||
"[WalkDebug] "
|
||||
f"step={self.step_counter} "
|
||||
f"pos={np.round(self.Player.world.global_position, 3).tolist()} "
|
||||
f"target_xy={np.round(self.target_position, 3).tolist()} "
|
||||
f"target_leg={np.round(target_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"actual_leg={np.round(actual_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
|
||||
f"fallen={self.Player.world.global_position[2] < 0.3}"
|
||||
)
|
||||
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
|
||||
|
||||
def reset(self, seed=None, options=None):
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
r = self.Player.robot
|
||||
super().reset(seed=seed)
|
||||
if seed is not None:
|
||||
np.random.seed(seed)
|
||||
|
||||
length1 = 2 # randomize target distance
|
||||
length2 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
length3 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
angle2 = np.random.uniform(-30, 30) # randomize initial orientation
|
||||
angle3 = np.random.uniform(-30, 30) # randomize target direction
|
||||
|
||||
self.step_counter = 0
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
|
||||
self.walk_cycle_step = 0
|
||||
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
beam_y = (random() - 0.5) * 10
|
||||
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
|
||||
|
||||
for _ in range(5):
|
||||
self._safe_receive_world_update(retries=2)
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
|
||||
self.Player.server.send()
|
||||
|
||||
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
|
||||
finished_count = 0
|
||||
for _ in range(50):
|
||||
finished = self.Player.skills_manager.execute("Neutral")
|
||||
self.sync()
|
||||
if finished:
|
||||
finished_count += 1
|
||||
if finished_count >= 20: # 假设需要连续20次完成才算成功
|
||||
break
|
||||
|
||||
if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
|
||||
perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
|
||||
# Perturb waist + lower body only (10:), keep head/arms stable.
|
||||
perturb_action[10:] = np.random.uniform(
|
||||
-self.reset_joint_noise_rad,
|
||||
self.reset_joint_noise_rad,
|
||||
size=(self.no_of_actions - 10,)
|
||||
)
|
||||
|
||||
for _ in range(self.reset_perturb_steps):
|
||||
target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
for i in range(self.reset_recover_steps):
|
||||
# Linearly fade perturbation to help policy start from near-neutral.
|
||||
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
|
||||
target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.sync()
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
# Build target in the robot's current forward direction instead of fixed global +x.
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
forward_offset = MathOps.rotate_2d_vec(np.array([length1, 0.0]), heading_deg, is_rad=False)
|
||||
point1 = self.initial_position + forward_offset
|
||||
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
|
||||
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
|
||||
self.point_list = [point1]
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
height = float(self.Player.world.global_position[2])
|
||||
|
||||
orientation_quat_inv = R.from_quat(self.Player.robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
ang_vel = np.deg2rad(self.Player.robot.gyroscope)
|
||||
ang_vel_mag = float(np.linalg.norm(ang_vel))
|
||||
|
||||
is_fallen = height < 0.3
|
||||
if is_fallen:
|
||||
# remain = max(0, 800 - self.step_counter)
|
||||
# return -8.0 - 0.01 * remain
|
||||
return -1.0
|
||||
|
||||
|
||||
|
||||
# # 目标方向
|
||||
# to_target = self.target_position - current_pos
|
||||
# dist_to_target = float(np.linalg.norm(to_target))
|
||||
# if dist_to_target < 0.5:
|
||||
# return 15.0
|
||||
|
||||
# forward_dir = to_target / dist_to_target if dist_to_target > 0.1 else np.array([1.0, 0.0])
|
||||
# delta_pos = current_pos - previous_pos
|
||||
# forward_step = float(np.dot(delta_pos, forward_dir))
|
||||
# lateral_step = float(np.linalg.norm(delta_pos - forward_dir * forward_step))
|
||||
|
||||
# 奖励项
|
||||
# progress_reward = 2 * forward_step
|
||||
# lateral_penalty = -0.1 * lateral_step
|
||||
alive_bonus = 2.0
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.01 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -0.3 * (tilt_mag)
|
||||
ang_vel_penalty = -0.02 * ang_vel_mag
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height
|
||||
height_penalty = -0.5 * abs(height_error) # 惩罚高度偏离,系数可调
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
+ ang_vel_penalty
|
||||
+ height_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
if time.time() - self.start_time >= 600:
|
||||
self.start_time = time.time()
|
||||
print(
|
||||
# f"progress_reward:{progress_reward:.4f}",
|
||||
# f"lateral_penalty:{lateral_penalty:.4f}",
|
||||
# f"action_penalty:{action_penalty:.4f}"s,
|
||||
f"height_penalty:{height_penalty:.4f}",
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},",
|
||||
f"posture_penalty:{posture_penalty:.4f}",
|
||||
# f"ang_vel_penalty:{ang_vel_penalty:.4f}",
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
)
|
||||
|
||||
return total
|
||||
|
||||
|
||||
|
||||
def step(self, action):
|
||||
|
||||
r = self.Player.robot
|
||||
self.previous_action = action
|
||||
|
||||
self.target_joint_positions = (
|
||||
# self.joint_nominal_position +
|
||||
self.scaling_factor * action
|
||||
)
|
||||
self.target_joint_positions *= self.train_sim_flip
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=40, kd=1.0
|
||||
)
|
||||
|
||||
self.previous_action = action
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
|
||||
if self.enable_debug_joint_status and self.step_counter % self.debug_every_n_steps == 0:
|
||||
self.debug_joint_status()
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
# Compute reward based on movement from previous step
|
||||
reward = self.compute_reward(self.previous_pos, current_pos, action)
|
||||
|
||||
# Update previous position
|
||||
self.previous_pos = current_pos.copy()
|
||||
self.last_action_for_reward = action.copy()
|
||||
|
||||
# Fall detection and penalty
|
||||
is_fallen = self.Player.world.global_position[2] < 0.3
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
terminated = is_fallen or self.step_counter > 800 or self.route_completed
|
||||
truncated = False
|
||||
|
||||
return self.observe(), reward, terminated, truncated, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = int(os.environ.get("GYM_CPU_N_ENVS", "20"))
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
server_warmup_sec = float(os.environ.get("GYM_CPU_SERVER_WARMUP_SEC", "3.0"))
|
||||
n_steps_per_env = int(os.environ.get("GYM_CPU_TRAIN_STEPS_PER_ENV", "256")) # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = int(os.environ.get("GYM_CPU_TRAIN_BATCH_SIZE", "512")) # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = float(os.environ.get("GYM_CPU_TRAIN_LR", "3e-4"))
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
print(f"Model path: {model_path}")
|
||||
print(f"Using {n_envs} parallel environments")
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env, monitor=False):
|
||||
def thunk():
|
||||
env = WalkEnv(self.ip, self.server_p + i_env)
|
||||
if monitor:
|
||||
env = Monitor(env)
|
||||
return env
|
||||
|
||||
return thunk
|
||||
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1, no_render=True, no_realtime=True) # include 1 extra server for testing
|
||||
|
||||
# Wait for servers to start
|
||||
print(f"Starting {n_envs + 1} rcssservermj servers...")
|
||||
if server_warmup_sec > 0:
|
||||
print(f"Waiting {server_warmup_sec:.1f}s for server warmup...")
|
||||
sleep(server_warmup_sec)
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i, monitor=True) for i in range(n_envs)])
|
||||
# Use single-process eval env to avoid extra subprocess fragility during callback evaluation.
|
||||
eval_env = DummyVecEnv([init_env(n_envs, monitor=True)])
|
||||
|
||||
try:
|
||||
# Custom policy network architecture
|
||||
policy_kwargs = dict(
|
||||
net_arch=dict(
|
||||
pi=[512, 256, 128], # Policy network: 3 layers
|
||||
vf=[512, 256, 128] # Value network: 3 layers
|
||||
),
|
||||
activation_fn=__import__('torch.nn', fromlist=['ELU']).ELU,
|
||||
)
|
||||
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cpu", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO(
|
||||
"MlpPolicy",
|
||||
env=env,
|
||||
verbose=1,
|
||||
n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size,
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
ent_coef=float(os.environ.get("GYM_CPU_TRAIN_ENT_COEF", "0.05")), # Entropy coefficient for exploration
|
||||
clip_range=float(os.environ.get("GYM_CPU_TRAIN_CLIP_RANGE", "0.2")), # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=float(os.environ.get("GYM_CPU_TRAIN_GAMMA", "0.95")), # Discount factor
|
||||
target_kl=0.03,
|
||||
n_epochs=int(os.environ.get("GYM_CPU_TRAIN_EPOCHS", "5")),
|
||||
# tensorboard_log=f"./scripts/gyms/logs/{folder_name}/tensorboard/"
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 20,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
test_no_render = os.environ.get("GYM_CPU_TEST_NO_RENDER", "0") == "1"
|
||||
test_no_realtime = os.environ.get("GYM_CPU_TEST_NO_REALTIME", "0") == "1"
|
||||
|
||||
server = Train_Server(
|
||||
self.server_p - 1,
|
||||
self.monitor_p,
|
||||
1,
|
||||
no_render=test_no_render,
|
||||
no_realtime=test_no_realtime,
|
||||
)
|
||||
env = WalkEnv(self.ip, self.server_p - 1)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from types import SimpleNamespace
|
||||
|
||||
# 创建默认参数
|
||||
script_args = SimpleNamespace(
|
||||
args=SimpleNamespace(
|
||||
i='127.0.0.1', # Server IP
|
||||
p=3100, # Server port
|
||||
m=3200, # Monitor port
|
||||
r=0, # Robot type
|
||||
t='Gym', # Team name
|
||||
u=1 # Uniform number
|
||||
)
|
||||
)
|
||||
|
||||
trainer = Train(script_args)
|
||||
|
||||
run_mode = os.environ.get("GYM_CPU_MODE", "train").strip().lower()
|
||||
|
||||
if run_mode == "test":
|
||||
test_model_file = os.environ.get("GYM_CPU_TEST_MODEL", "scripts/gyms/logs/Walk_R0_004/best_model.zip")
|
||||
test_folder = os.environ.get("GYM_CPU_TEST_FOLDER", "scripts/gyms/logs/Walk_R0_004/")
|
||||
trainer.test({"model_file": test_model_file, "folder_dir": test_folder})
|
||||
else:
|
||||
retrain_model = os.environ.get("GYM_CPU_TRAIN_MODEL", "").strip()
|
||||
if retrain_model:
|
||||
trainer.train({"model_file": retrain_model})
|
||||
else:
|
||||
trainer.train({})
|
||||
@@ -1,679 +0,0 @@
|
||||
import os
|
||||
import numpy as np
|
||||
import math
|
||||
import time
|
||||
from time import sleep
|
||||
from random import random
|
||||
from random import uniform
|
||||
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.monitor import Monitor
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
|
||||
|
||||
import gymnasium as gym
|
||||
from gymnasium import spaces
|
||||
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from scripts.commons.Server import Server as Train_Server
|
||||
|
||||
from agent.base_agent import Base_Agent
|
||||
from utils.math_ops import MathOps
|
||||
|
||||
from scipy.spatial.transform import Rotation as R
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class WalkEnv(gym.Env):
|
||||
def __init__(self, ip, server_p) -> None:
|
||||
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.Player = player = Base_Agent(
|
||||
team_name="Gym",
|
||||
number=1,
|
||||
host=ip,
|
||||
port=server_p
|
||||
)
|
||||
self.robot_type = self.Player.robot
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.force_play_on = True
|
||||
|
||||
self.target_position = np.array([0.0, 0.0]) # target position in the x-y plane
|
||||
self.initial_position = np.array([0.0, 0.0]) # initial position in the x-y plane
|
||||
self.target_direction = 0.0 # target direction in the x-y plane (relative to the robot's orientation)
|
||||
self.isfallen = False
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.enable_debug_joint_status = False
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
self.auto_calibrate_train_sim_flip = True
|
||||
self.nominal_calibrated_once = False
|
||||
self.flip_calibrated_once = False
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
self._target_hz = float(target_hz_env)
|
||||
except ValueError:
|
||||
self._target_hz = 0.0
|
||||
if self._target_hz > 0.0:
|
||||
self._target_dt = 1.0 / self._target_hz
|
||||
|
||||
# State space
|
||||
# 原始观测大小: 78
|
||||
obs_size = 78
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(obs_size,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
action_dim = len(self.Player.robot.ROBOT_MOTORS)
|
||||
self.no_of_actions = action_dim
|
||||
self.action_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(action_dim,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
# 中立姿态
|
||||
self.joint_nominal_position = np.array(
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
-1.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
0.8,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
-0.8,
|
||||
0.4,
|
||||
0.0,
|
||||
]
|
||||
)
|
||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||
self.train_sim_flip = np.array(
|
||||
[
|
||||
1.0, # 0: Head_yaw (he1)
|
||||
-1.0, # 1: Head_pitch (he2)
|
||||
1.0, # 2: Left_Shoulder_Pitch (lae1)
|
||||
-1.0, # 3: Left_Shoulder_Roll (lae2)
|
||||
-1.0, # 4: Left_Elbow_Pitch (lae3)
|
||||
1.0, # 5: Left_Elbow_Yaw (lae4)
|
||||
-1.0, # 6: Right_Shoulder_Pitch (rae1)
|
||||
-1.0, # 7: Right_Shoulder_Roll (rae2)
|
||||
1.0, # 8: Right_Elbow_Pitch (rae3)
|
||||
1.0, # 9: Right_Elbow_Yaw (rae4)
|
||||
1.0, # 10: Waist (te1)
|
||||
1.0, # 11: Left_Hip_Pitch (lle1)
|
||||
-1.0, # 12: Left_Hip_Roll (lle2)
|
||||
-1.0, # 13: Left_Hip_Yaw (lle3)
|
||||
1.0, # 14: Left_Knee_Pitch (lle4)
|
||||
1.0, # 15: Left_Ankle_Pitch (lle5)
|
||||
-1.0, # 16: Left_Ankle_Roll (lle6)
|
||||
-1.0, # 17: Right_Hip_Pitch (rle1)
|
||||
-1.0, # 18: Right_Hip_Roll (rle2)
|
||||
-1.0, # 19: Right_Hip_Yaw (rle3)
|
||||
-1.0, # 20: Right_Knee_Pitch (rle4)
|
||||
-1.0, # 21: Right_Ankle_Pitch (rle5)
|
||||
-1.0, # 22: Right_Ankle_Roll (rle6)
|
||||
]
|
||||
)
|
||||
|
||||
self.scaling_factor = 0.3
|
||||
# self.scaling_factor = 1
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = True
|
||||
self.reset_beam_yaw_range_deg = 180 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
|
||||
self.reset_joint_noise_rad = 0.015
|
||||
self.reset_perturb_steps = 3
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
self.Player.server.connect()
|
||||
# sleep(2.0) # Longer wait for connection to establish completely
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
self.start_time = time.time()
|
||||
|
||||
def _reconnect_server(self):
|
||||
try:
|
||||
self.Player.server.shutdown()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self.Player.server.connect()
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
|
||||
def _safe_receive_world_update(self, retries=1):
|
||||
last_exc = None
|
||||
for attempt in range(retries + 1):
|
||||
try:
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
return
|
||||
except (ConnectionResetError, OSError) as exc:
|
||||
last_exc = exc
|
||||
if attempt >= retries:
|
||||
raise
|
||||
self._reconnect_server()
|
||||
if last_exc is not None:
|
||||
raise last_exc
|
||||
|
||||
def debug_log(self, message):
|
||||
print(message)
|
||||
try:
|
||||
log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
|
||||
with open(log_path, "a", encoding="utf-8") as f:
|
||||
f.write(message + "\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def observe(self, init=False):
|
||||
|
||||
"""获取当前观测值"""
|
||||
robot = self.Player.robot
|
||||
world = self.Player.world
|
||||
|
||||
# Safety check: ensure data is available
|
||||
|
||||
# 计算目标速度
|
||||
raw_target = self.target_position - world.global_position[:2]
|
||||
velocity = MathOps.rotate_2d_vec(
|
||||
raw_target,
|
||||
-robot.global_orientation_euler[2],
|
||||
is_rad=False
|
||||
)
|
||||
|
||||
# 计算相对方向
|
||||
rel_orientation = MathOps.vector_angle(velocity) * 0.3
|
||||
rel_orientation = np.clip(rel_orientation, -0.25, 0.25)
|
||||
|
||||
velocity = np.concatenate([velocity, np.array([rel_orientation])])
|
||||
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
|
||||
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
|
||||
|
||||
# 关节状态
|
||||
radian_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
radian_joint_speeds = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
|
||||
qpos_qvel_previous_action = np.concatenate([
|
||||
(radian_joint_positions * self.train_sim_flip - self.joint_nominal_position) / 4.6,
|
||||
radian_joint_speeds / 110.0 * self.train_sim_flip,
|
||||
self.previous_action / 10.0,
|
||||
])
|
||||
|
||||
# 角速度
|
||||
ang_vel = np.clip(np.deg2rad(robot.gyroscope) / 50.0, -1.0, 1.0)
|
||||
|
||||
# 投影的重力方向
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
|
||||
# 组合观测
|
||||
observation = np.concatenate([
|
||||
qpos_qvel_previous_action,
|
||||
ang_vel,
|
||||
velocity,
|
||||
projected_gravity,
|
||||
])
|
||||
|
||||
observation = np.clip(observation, -10.0, 10.0)
|
||||
return observation.astype(np.float32)
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
self._safe_receive_world_update(retries=1)
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.send()
|
||||
if self._target_dt > 0.0:
|
||||
now = time.time()
|
||||
if self._last_sync_time is None:
|
||||
self._last_sync_time = now
|
||||
return
|
||||
elapsed = now - self._last_sync_time
|
||||
remaining = self._target_dt - elapsed
|
||||
if remaining > 0.0:
|
||||
time.sleep(remaining)
|
||||
now = time.time()
|
||||
self._last_sync_time = now
|
||||
|
||||
def debug_joint_status(self):
|
||||
robot = self.Player.robot
|
||||
actual_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
target_joint_positions = getattr(
|
||||
self,
|
||||
'target_joint_positions',
|
||||
np.zeros(len(robot.ROBOT_MOTORS), dtype=np.float32)
|
||||
)
|
||||
joint_error = actual_joint_positions - target_joint_positions
|
||||
leg_slice = slice(11, None)
|
||||
|
||||
self.debug_log(
|
||||
"[WalkDebug] "
|
||||
f"step={self.step_counter} "
|
||||
f"pos={np.round(self.Player.world.global_position, 3).tolist()} "
|
||||
f"target_xy={np.round(self.target_position, 3).tolist()} "
|
||||
f"target_leg={np.round(target_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"actual_leg={np.round(actual_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
|
||||
f"fallen={self.Player.world.global_position[2] < 0.3}"
|
||||
)
|
||||
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
|
||||
|
||||
def reset(self, seed=None, options=None):
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
r = self.Player.robot
|
||||
super().reset(seed=seed)
|
||||
if seed is not None:
|
||||
np.random.seed(seed)
|
||||
|
||||
length1 = 2 # randomize target distance
|
||||
length2 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
length3 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
angle2 = np.random.uniform(-30, 30) # randomize initial orientation
|
||||
angle3 = np.random.uniform(-30, 30) # randomize target direction
|
||||
|
||||
self.step_counter = 0
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
|
||||
self.walk_cycle_step = 0
|
||||
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
beam_y = (random() - 0.5) * 10
|
||||
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
|
||||
|
||||
for _ in range(5):
|
||||
self._safe_receive_world_update(retries=2)
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
|
||||
self.Player.server.send()
|
||||
|
||||
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
|
||||
finished_count = 0
|
||||
for _ in range(50):
|
||||
finished = self.Player.skills_manager.execute("Neutral")
|
||||
self.sync()
|
||||
if finished:
|
||||
finished_count += 1
|
||||
if finished_count >= 20: # 假设需要连续20次完成才算成功
|
||||
break
|
||||
|
||||
if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
|
||||
perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
|
||||
# Perturb waist + lower body only (10:), keep head/arms stable.
|
||||
perturb_action[10:] = np.random.uniform(
|
||||
-self.reset_joint_noise_rad,
|
||||
self.reset_joint_noise_rad,
|
||||
size=(self.no_of_actions - 10,)
|
||||
)
|
||||
|
||||
for _ in range(self.reset_perturb_steps):
|
||||
target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
for i in range(self.reset_recover_steps):
|
||||
# Linearly fade perturbation to help policy start from near-neutral.
|
||||
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
|
||||
target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.sync()
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
# Build target in the robot's current forward direction instead of fixed global +x.
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
forward_offset = MathOps.rotate_2d_vec(np.array([length1, 0.0]), heading_deg, is_rad=False)
|
||||
point1 = self.initial_position + forward_offset
|
||||
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
|
||||
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
|
||||
self.point_list = [point1]
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
height = float(self.Player.world.global_position[2])
|
||||
|
||||
orientation_quat_inv = R.from_quat(self.Player.robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
ang_vel = np.deg2rad(self.Player.robot.gyroscope)
|
||||
ang_vel_mag = float(np.linalg.norm(ang_vel))
|
||||
|
||||
is_fallen = height < 0.3
|
||||
if is_fallen:
|
||||
# remain = max(0, 800 - self.step_counter)
|
||||
# return -8.0 - 0.01 * remain
|
||||
return -1.0
|
||||
|
||||
|
||||
|
||||
# # 目标方向
|
||||
# to_target = self.target_position - current_pos
|
||||
# dist_to_target = float(np.linalg.norm(to_target))
|
||||
# if dist_to_target < 0.5:
|
||||
# return 15.0
|
||||
|
||||
# forward_dir = to_target / dist_to_target if dist_to_target > 0.1 else np.array([1.0, 0.0])
|
||||
# delta_pos = current_pos - previous_pos
|
||||
# forward_step = float(np.dot(delta_pos, forward_dir))
|
||||
# lateral_step = float(np.linalg.norm(delta_pos - forward_dir * forward_step))
|
||||
|
||||
# 奖励项
|
||||
# progress_reward = 2 * forward_step
|
||||
# lateral_penalty = -0.1 * lateral_step
|
||||
alive_bonus = 2.0
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.01 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -0.3 * (tilt_mag)
|
||||
ang_vel_penalty = -0.02 * ang_vel_mag
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height
|
||||
height_penalty = -1 * abs(height_error) # 惩罚高度偏离,系数可调
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
+ ang_vel_penalty
|
||||
+ height_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
if time.time() - self.start_time >= 1200:
|
||||
self.start_time = time.time()
|
||||
print(
|
||||
# f"progress_reward:{progress_reward:.4f}",
|
||||
# f"lateral_penalty:{lateral_penalty:.4f}",
|
||||
# f"action_penalty:{action_penalty:.4f}"s,
|
||||
f"height_penalty:{height_penalty:.4f}",
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},",
|
||||
f"posture_penalty:{posture_penalty:.4f}",
|
||||
# f"ang_vel_penalty:{ang_vel_penalty:.4f}",
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
)
|
||||
|
||||
return total
|
||||
|
||||
|
||||
|
||||
def step(self, action):
|
||||
|
||||
r = self.Player.robot
|
||||
self.previous_action = action
|
||||
|
||||
self.target_joint_positions = (
|
||||
# self.joint_nominal_position +
|
||||
self.scaling_factor * action
|
||||
)
|
||||
self.target_joint_positions *= self.train_sim_flip
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=40, kd=1.0
|
||||
)
|
||||
|
||||
self.previous_action = action
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
|
||||
if self.enable_debug_joint_status and self.step_counter % self.debug_every_n_steps == 0:
|
||||
self.debug_joint_status()
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
# Compute reward based on movement from previous step
|
||||
reward = self.compute_reward(self.previous_pos, current_pos, action)
|
||||
|
||||
# Update previous position
|
||||
self.previous_pos = current_pos.copy()
|
||||
self.last_action_for_reward = action.copy()
|
||||
|
||||
# Fall detection and penalty
|
||||
is_fallen = self.Player.world.global_position[2] < 0.3
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
terminated = is_fallen or self.step_counter > 800 or self.route_completed
|
||||
truncated = False
|
||||
|
||||
return self.observe(), reward, terminated, truncated, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = int(os.environ.get("GYM_CPU_N_ENVS", "20"))
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
server_warmup_sec = float(os.environ.get("GYM_CPU_SERVER_WARMUP_SEC", "3.0"))
|
||||
n_steps_per_env = int(os.environ.get("GYM_CPU_TRAIN_STEPS_PER_ENV", "256")) # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = int(os.environ.get("GYM_CPU_TRAIN_BATCH_SIZE", "512")) # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = float(os.environ.get("GYM_CPU_TRAIN_LR", "3e-4"))
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
print(f"Model path: {model_path}")
|
||||
print(f"Using {n_envs} parallel environments")
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env, monitor=False):
|
||||
def thunk():
|
||||
env = WalkEnv(self.ip, self.server_p + i_env)
|
||||
if monitor:
|
||||
env = Monitor(env)
|
||||
return env
|
||||
|
||||
return thunk
|
||||
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1, no_render=True, no_realtime=True) # include 1 extra server for testing
|
||||
|
||||
# Wait for servers to start
|
||||
print(f"Starting {n_envs + 1} rcssservermj servers...")
|
||||
if server_warmup_sec > 0:
|
||||
print(f"Waiting {server_warmup_sec:.1f}s for server warmup...")
|
||||
sleep(server_warmup_sec)
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i, monitor=True) for i in range(n_envs)])
|
||||
# Use single-process eval env to avoid extra subprocess fragility during callback evaluation.
|
||||
eval_env = DummyVecEnv([init_env(n_envs, monitor=True)])
|
||||
|
||||
try:
|
||||
# Custom policy network architecture
|
||||
policy_kwargs = dict(
|
||||
net_arch=dict(
|
||||
pi=[512, 256, 128], # Policy network: 3 layers
|
||||
vf=[512, 256, 128] # Value network: 3 layers
|
||||
),
|
||||
activation_fn=__import__('torch.nn', fromlist=['ELU']).ELU,
|
||||
)
|
||||
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cpu", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO(
|
||||
"MlpPolicy",
|
||||
env=env,
|
||||
verbose=1,
|
||||
n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size,
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
ent_coef=float(os.environ.get("GYM_CPU_TRAIN_ENT_COEF", "0.05")), # Entropy coefficient for exploration
|
||||
clip_range=float(os.environ.get("GYM_CPU_TRAIN_CLIP_RANGE", "0.2")), # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=float(os.environ.get("GYM_CPU_TRAIN_GAMMA", "0.95")), # Discount factor
|
||||
target_kl=0.03,
|
||||
n_epochs=int(os.environ.get("GYM_CPU_TRAIN_EPOCHS", "5")),
|
||||
# tensorboard_log=f"./scripts/gyms/logs/{folder_name}/tensorboard/"
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 20, eval_eps=100,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
test_no_render = os.environ.get("GYM_CPU_TEST_NO_RENDER", "0") == "1"
|
||||
test_no_realtime = os.environ.get("GYM_CPU_TEST_NO_REALTIME", "0") == "1"
|
||||
|
||||
server = Train_Server(
|
||||
self.server_p - 1,
|
||||
self.monitor_p,
|
||||
1,
|
||||
no_render=test_no_render,
|
||||
no_realtime=test_no_realtime,
|
||||
)
|
||||
env = WalkEnv(self.ip, self.server_p - 1)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from types import SimpleNamespace
|
||||
|
||||
# 创建默认参数
|
||||
script_args = SimpleNamespace(
|
||||
args=SimpleNamespace(
|
||||
i='127.0.0.1', # Server IP
|
||||
p=3100, # Server port
|
||||
m=3200, # Monitor port
|
||||
r=0, # Robot type
|
||||
t='Gym', # Team name
|
||||
u=1 # Uniform number
|
||||
)
|
||||
)
|
||||
|
||||
trainer = Train(script_args)
|
||||
|
||||
run_mode = os.environ.get("GYM_CPU_MODE", "train").strip().lower()
|
||||
|
||||
if run_mode == "test":
|
||||
test_model_file = os.environ.get("GYM_CPU_TEST_MODEL", "scripts/gyms/logs/Walk_R0_004/best_model.zip")
|
||||
test_folder = os.environ.get("GYM_CPU_TEST_FOLDER", "scripts/gyms/logs/Walk_R0_004/")
|
||||
trainer.test({"model_file": test_model_file, "folder_dir": test_folder})
|
||||
else:
|
||||
retrain_model = os.environ.get("GYM_CPU_TRAIN_MODEL", "").strip()
|
||||
if retrain_model:
|
||||
trainer.train({"model_file": retrain_model})
|
||||
else:
|
||||
trainer.train({})
|
||||
@@ -1,704 +0,0 @@
|
||||
import os
|
||||
import numpy as np
|
||||
import math
|
||||
import time
|
||||
from time import sleep
|
||||
from random import random
|
||||
from random import uniform
|
||||
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.monitor import Monitor
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
|
||||
|
||||
import gymnasium as gym
|
||||
from gymnasium import spaces
|
||||
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from scripts.commons.Server import Server as Train_Server
|
||||
|
||||
from agent.base_agent import Base_Agent
|
||||
from utils.math_ops import MathOps
|
||||
|
||||
from scipy.spatial.transform import Rotation as R
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class WalkEnv(gym.Env):
|
||||
def __init__(self, ip, server_p) -> None:
|
||||
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.Player = player = Base_Agent(
|
||||
team_name="Gym",
|
||||
number=1,
|
||||
host=ip,
|
||||
port=server_p
|
||||
)
|
||||
self.robot_type = self.Player.robot
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.force_play_on = True
|
||||
|
||||
self.target_position = np.array([0.0, 0.0]) # target position in the x-y plane
|
||||
self.initial_position = np.array([0.0, 0.0]) # initial position in the x-y plane
|
||||
self.target_direction = 0.0 # target direction in the x-y plane (relative to the robot's orientation)
|
||||
self.isfallen = False
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.enable_debug_joint_status = False
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
self.auto_calibrate_train_sim_flip = True
|
||||
self.nominal_calibrated_once = False
|
||||
self.flip_calibrated_once = False
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
self._target_hz = float(target_hz_env)
|
||||
except ValueError:
|
||||
self._target_hz = 0.0
|
||||
if self._target_hz > 0.0:
|
||||
self._target_dt = 1.0 / self._target_hz
|
||||
|
||||
# State space
|
||||
# 原始观测大小: 78
|
||||
obs_size = 78
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(obs_size,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
action_dim = len(self.Player.robot.ROBOT_MOTORS)
|
||||
self.no_of_actions = action_dim
|
||||
self.action_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(action_dim,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
# 中立姿态
|
||||
self.joint_nominal_position = np.array(
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
-1.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
0.8,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
-0.8,
|
||||
0.4,
|
||||
0.0,
|
||||
]
|
||||
)
|
||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||
self.train_sim_flip = np.array(
|
||||
[
|
||||
1.0, # 0: Head_yaw (he1)
|
||||
-1.0, # 1: Head_pitch (he2)
|
||||
1.0, # 2: Left_Shoulder_Pitch (lae1)
|
||||
-1.0, # 3: Left_Shoulder_Roll (lae2)
|
||||
-1.0, # 4: Left_Elbow_Pitch (lae3)
|
||||
1.0, # 5: Left_Elbow_Yaw (lae4)
|
||||
-1.0, # 6: Right_Shoulder_Pitch (rae1)
|
||||
-1.0, # 7: Right_Shoulder_Roll (rae2)
|
||||
1.0, # 8: Right_Elbow_Pitch (rae3)
|
||||
1.0, # 9: Right_Elbow_Yaw (rae4)
|
||||
1.0, # 10: Waist (te1)
|
||||
1.0, # 11: Left_Hip_Pitch (lle1)
|
||||
-1.0, # 12: Left_Hip_Roll (lle2)
|
||||
-1.0, # 13: Left_Hip_Yaw (lle3)
|
||||
1.0, # 14: Left_Knee_Pitch (lle4)
|
||||
1.0, # 15: Left_Ankle_Pitch (lle5)
|
||||
-1.0, # 16: Left_Ankle_Roll (lle6)
|
||||
-1.0, # 17: Right_Hip_Pitch (rle1)
|
||||
-1.0, # 18: Right_Hip_Roll (rle2)
|
||||
-1.0, # 19: Right_Hip_Yaw (rle3)
|
||||
-1.0, # 20: Right_Knee_Pitch (rle4)
|
||||
-1.0, # 21: Right_Ankle_Pitch (rle5)
|
||||
-1.0, # 22: Right_Ankle_Roll (rle6)
|
||||
]
|
||||
)
|
||||
|
||||
self.scaling_factor = 0.3
|
||||
# self.scaling_factor = 1
|
||||
|
||||
# Encourage a minimum lateral stance so the policy avoids feet overlap.
|
||||
self.min_stance_rad = 0.10
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = False
|
||||
self.reset_beam_yaw_range_deg = 180 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
|
||||
self.reset_joint_noise_rad = 0.015
|
||||
self.reset_perturb_steps = 3
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
self.Player.server.connect()
|
||||
# sleep(2.0) # Longer wait for connection to establish completely
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
self.start_time = time.time()
|
||||
|
||||
def _reconnect_server(self):
|
||||
try:
|
||||
self.Player.server.shutdown()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self.Player.server.connect()
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
|
||||
def _safe_receive_world_update(self, retries=1):
|
||||
last_exc = None
|
||||
for attempt in range(retries + 1):
|
||||
try:
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
return
|
||||
except (ConnectionResetError, OSError) as exc:
|
||||
last_exc = exc
|
||||
if attempt >= retries:
|
||||
raise
|
||||
self._reconnect_server()
|
||||
if last_exc is not None:
|
||||
raise last_exc
|
||||
|
||||
def debug_log(self, message):
|
||||
print(message)
|
||||
try:
|
||||
log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
|
||||
with open(log_path, "a", encoding="utf-8") as f:
|
||||
f.write(message + "\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def observe(self, init=False):
|
||||
|
||||
"""获取当前观测值"""
|
||||
robot = self.Player.robot
|
||||
world = self.Player.world
|
||||
|
||||
# Safety check: ensure data is available
|
||||
|
||||
# 计算目标速度
|
||||
raw_target = self.target_position - world.global_position[:2]
|
||||
velocity = MathOps.rotate_2d_vec(
|
||||
raw_target,
|
||||
-robot.global_orientation_euler[2],
|
||||
is_rad=False
|
||||
)
|
||||
|
||||
# 计算相对方向
|
||||
rel_orientation = MathOps.vector_angle(velocity) * 0.3
|
||||
rel_orientation = np.clip(rel_orientation, -0.25, 0.25)
|
||||
|
||||
velocity = np.concatenate([velocity, np.array([rel_orientation])])
|
||||
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
|
||||
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
|
||||
|
||||
# 关节状态
|
||||
radian_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
radian_joint_speeds = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
|
||||
qpos_qvel_previous_action = np.concatenate([
|
||||
(radian_joint_positions * self.train_sim_flip - self.joint_nominal_position) / 4.6,
|
||||
radian_joint_speeds / 110.0 * self.train_sim_flip,
|
||||
self.previous_action / 10.0,
|
||||
])
|
||||
|
||||
# 角速度
|
||||
ang_vel = np.clip(np.deg2rad(robot.gyroscope) / 50.0, -1.0, 1.0)
|
||||
|
||||
# 投影的重力方向
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
|
||||
# 组合观测
|
||||
observation = np.concatenate([
|
||||
qpos_qvel_previous_action,
|
||||
ang_vel,
|
||||
velocity,
|
||||
projected_gravity,
|
||||
])
|
||||
|
||||
observation = np.clip(observation, -10.0, 10.0)
|
||||
return observation.astype(np.float32)
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
self._safe_receive_world_update(retries=1)
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.send()
|
||||
if self._target_dt > 0.0:
|
||||
now = time.time()
|
||||
if self._last_sync_time is None:
|
||||
self._last_sync_time = now
|
||||
return
|
||||
elapsed = now - self._last_sync_time
|
||||
remaining = self._target_dt - elapsed
|
||||
if remaining > 0.0:
|
||||
time.sleep(remaining)
|
||||
now = time.time()
|
||||
self._last_sync_time = now
|
||||
|
||||
def debug_joint_status(self):
|
||||
robot = self.Player.robot
|
||||
actual_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
target_joint_positions = getattr(
|
||||
self,
|
||||
'target_joint_positions',
|
||||
np.zeros(len(robot.ROBOT_MOTORS), dtype=np.float32)
|
||||
)
|
||||
joint_error = actual_joint_positions - target_joint_positions
|
||||
leg_slice = slice(11, None)
|
||||
|
||||
self.debug_log(
|
||||
"[WalkDebug] "
|
||||
f"step={self.step_counter} "
|
||||
f"pos={np.round(self.Player.world.global_position, 3).tolist()} "
|
||||
f"target_xy={np.round(self.target_position, 3).tolist()} "
|
||||
f"target_leg={np.round(target_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"actual_leg={np.round(actual_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
|
||||
f"fallen={self.Player.world.global_position[2] < 0.3}"
|
||||
)
|
||||
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
|
||||
|
||||
def reset(self, seed=None, options=None):
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
r = self.Player.robot
|
||||
super().reset(seed=seed)
|
||||
if seed is not None:
|
||||
np.random.seed(seed)
|
||||
|
||||
length1 = 2 # randomize target distance
|
||||
length2 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
length3 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
angle2 = np.random.uniform(-30, 30) # randomize initial orientation
|
||||
angle3 = np.random.uniform(-30, 30) # randomize target direction
|
||||
|
||||
self.step_counter = 0
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
|
||||
self.walk_cycle_step = 0
|
||||
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
beam_y = (random() - 0.5) * 10
|
||||
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
|
||||
|
||||
for _ in range(5):
|
||||
self._safe_receive_world_update(retries=2)
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
|
||||
self.Player.server.send()
|
||||
|
||||
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
|
||||
finished_count = 0
|
||||
for _ in range(50):
|
||||
finished = self.Player.skills_manager.execute("Neutral")
|
||||
self.sync()
|
||||
if finished:
|
||||
finished_count += 1
|
||||
if finished_count >= 20: # 假设需要连续20次完成才算成功
|
||||
break
|
||||
|
||||
if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
|
||||
perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
|
||||
# Perturb waist + lower body only (10:), keep head/arms stable.
|
||||
perturb_action[10:] = np.random.uniform(
|
||||
-self.reset_joint_noise_rad,
|
||||
self.reset_joint_noise_rad,
|
||||
size=(self.no_of_actions - 10,)
|
||||
)
|
||||
|
||||
for _ in range(self.reset_perturb_steps):
|
||||
target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
for i in range(self.reset_recover_steps):
|
||||
# Linearly fade perturbation to help policy start from near-neutral.
|
||||
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
|
||||
target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.sync()
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
# Build target in the robot's current forward direction instead of fixed global +x.
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
forward_offset = MathOps.rotate_2d_vec(np.array([length1, 0.0]), heading_deg, is_rad=False)
|
||||
point1 = self.initial_position + forward_offset
|
||||
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
|
||||
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
|
||||
self.point_list = [point1]
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
height = float(self.Player.world.global_position[2])
|
||||
robot = self.Player.robot
|
||||
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
ang_vel = np.deg2rad(robot.gyroscope)
|
||||
ang_vel_mag = float(np.linalg.norm(ang_vel))
|
||||
|
||||
is_fallen = height < 0.3
|
||||
if is_fallen:
|
||||
# remain = max(0, 800 - self.step_counter)
|
||||
# return -8.0 - 0.01 * remain
|
||||
return -1.0
|
||||
|
||||
|
||||
|
||||
# # 目标方向
|
||||
# to_target = self.target_position - current_pos
|
||||
# dist_to_target = float(np.linalg.norm(to_target))
|
||||
# if dist_to_target < 0.5:
|
||||
# return 15.0
|
||||
|
||||
# forward_dir = to_target / dist_to_target if dist_to_target > 0.1 else np.array([1.0, 0.0])
|
||||
# delta_pos = current_pos - previous_pos
|
||||
# forward_step = float(np.dot(delta_pos, forward_dir))
|
||||
# lateral_step = float(np.linalg.norm(delta_pos - forward_dir * forward_step))
|
||||
|
||||
# 奖励项
|
||||
# progress_reward = 2 * forward_step
|
||||
# lateral_penalty = -0.1 * lateral_step
|
||||
alive_bonus = 2.0
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.01 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -0.3 * (tilt_mag)
|
||||
ang_vel_penalty = -0.02 * ang_vel_mag
|
||||
|
||||
# Use simulator joint readings in training frame to shape lateral stance.
|
||||
joint_pos = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
) * self.train_sim_flip
|
||||
left_hip_roll = float(joint_pos[12])
|
||||
right_hip_roll = float(joint_pos[18])
|
||||
left_ankle_roll = float(joint_pos[16])
|
||||
right_ankle_roll = float(joint_pos[22])
|
||||
|
||||
hip_spread = left_hip_roll - right_hip_roll
|
||||
ankle_spread = left_ankle_roll - right_ankle_roll
|
||||
stance_metric = 0.6 * abs(hip_spread) + 0.4 * abs(ankle_spread)
|
||||
|
||||
# Penalize narrow stance (feet too close) and scissoring (cross-leg pattern).
|
||||
stance_collapse_penalty = -4.0 * max(0.0, self.min_stance_rad - stance_metric)
|
||||
cross_leg_penalty = -1.2 * max(0.0, -(hip_spread * ankle_spread))
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height
|
||||
height_penalty = -0.5 * abs(height_error) # 惩罚高度偏离,系数可调
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
+ ang_vel_penalty
|
||||
+ height_penalty
|
||||
+ stance_collapse_penalty
|
||||
+ cross_leg_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
if time.time() - self.start_time >= 600:
|
||||
self.start_time = time.time()
|
||||
print(
|
||||
# f"progress_reward:{progress_reward:.4f}",
|
||||
# f"lateral_penalty:{lateral_penalty:.4f}",
|
||||
# f"action_penalty:{action_penalty:.4f}"s,
|
||||
f"height_penalty:{height_penalty:.4f}",
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},",
|
||||
f"posture_penalty:{posture_penalty:.4f}",
|
||||
f"stance_collapse_penalty:{stance_collapse_penalty:.4f}",
|
||||
f"cross_leg_penalty:{cross_leg_penalty:.4f}",
|
||||
# f"ang_vel_penalty:{ang_vel_penalty:.4f}",
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
)
|
||||
|
||||
return total
|
||||
|
||||
|
||||
|
||||
def step(self, action):
|
||||
|
||||
r = self.Player.robot
|
||||
self.previous_action = action
|
||||
|
||||
self.target_joint_positions = (
|
||||
# self.joint_nominal_position +
|
||||
self.scaling_factor * action
|
||||
)
|
||||
self.target_joint_positions *= self.train_sim_flip
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=40, kd=1.0
|
||||
)
|
||||
|
||||
self.previous_action = action
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
|
||||
if self.enable_debug_joint_status and self.step_counter % self.debug_every_n_steps == 0:
|
||||
self.debug_joint_status()
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
# Compute reward based on movement from previous step
|
||||
reward = self.compute_reward(self.previous_pos, current_pos, action)
|
||||
|
||||
# Update previous position
|
||||
self.previous_pos = current_pos.copy()
|
||||
self.last_action_for_reward = action.copy()
|
||||
|
||||
# Fall detection and penalty
|
||||
is_fallen = self.Player.world.global_position[2] < 0.3
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
terminated = is_fallen or self.step_counter > 800 or self.route_completed
|
||||
truncated = False
|
||||
|
||||
return self.observe(), reward, terminated, truncated, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = int(os.environ.get("GYM_CPU_N_ENVS", "20"))
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
server_warmup_sec = float(os.environ.get("GYM_CPU_SERVER_WARMUP_SEC", "3.0"))
|
||||
n_steps_per_env = int(os.environ.get("GYM_CPU_TRAIN_STEPS_PER_ENV", "256")) # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = int(os.environ.get("GYM_CPU_TRAIN_BATCH_SIZE", "512")) # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = float(os.environ.get("GYM_CPU_TRAIN_LR", "3e-4"))
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
print(f"Model path: {model_path}")
|
||||
print(f"Using {n_envs} parallel environments")
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env, monitor=False):
|
||||
def thunk():
|
||||
env = WalkEnv(self.ip, self.server_p + i_env)
|
||||
if monitor:
|
||||
env = Monitor(env)
|
||||
return env
|
||||
|
||||
return thunk
|
||||
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1, no_render=True, no_realtime=True) # include 1 extra server for testing
|
||||
|
||||
# Wait for servers to start
|
||||
print(f"Starting {n_envs + 1} rcssservermj servers...")
|
||||
if server_warmup_sec > 0:
|
||||
print(f"Waiting {server_warmup_sec:.1f}s for server warmup...")
|
||||
sleep(server_warmup_sec)
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i, monitor=True) for i in range(n_envs)])
|
||||
# Use single-process eval env to avoid extra subprocess fragility during callback evaluation.
|
||||
eval_env = DummyVecEnv([init_env(n_envs, monitor=True)])
|
||||
|
||||
try:
|
||||
# Custom policy network architecture
|
||||
policy_kwargs = dict(
|
||||
net_arch=dict(
|
||||
pi=[512, 256, 128], # Policy network: 3 layers
|
||||
vf=[512, 256, 128] # Value network: 3 layers
|
||||
),
|
||||
activation_fn=__import__('torch.nn', fromlist=['ELU']).ELU,
|
||||
)
|
||||
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cpu", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO(
|
||||
"MlpPolicy",
|
||||
env=env,
|
||||
verbose=1,
|
||||
n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size,
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
ent_coef=float(os.environ.get("GYM_CPU_TRAIN_ENT_COEF", "0.05")), # Entropy coefficient for exploration
|
||||
clip_range=float(os.environ.get("GYM_CPU_TRAIN_CLIP_RANGE", "0.2")), # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=float(os.environ.get("GYM_CPU_TRAIN_GAMMA", "0.95")), # Discount factor
|
||||
target_kl=0.03,
|
||||
n_epochs=int(os.environ.get("GYM_CPU_TRAIN_EPOCHS", "5")),
|
||||
tensorboard_log=f"./scripts/gyms/logs/{folder_name}/tensorboard/"
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 20, eval_eps=100,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
test_no_render = os.environ.get("GYM_CPU_TEST_NO_RENDER", "0") == "1"
|
||||
test_no_realtime = os.environ.get("GYM_CPU_TEST_NO_REALTIME", "0") == "1"
|
||||
|
||||
server = Train_Server(
|
||||
self.server_p - 1,
|
||||
self.monitor_p,
|
||||
1,
|
||||
no_render=test_no_render,
|
||||
no_realtime=test_no_realtime,
|
||||
)
|
||||
env = WalkEnv(self.ip, self.server_p - 1)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from types import SimpleNamespace
|
||||
|
||||
# 创建默认参数
|
||||
script_args = SimpleNamespace(
|
||||
args=SimpleNamespace(
|
||||
i='127.0.0.1', # Server IP
|
||||
p=3100, # Server port
|
||||
m=3200, # Monitor port
|
||||
r=0, # Robot type
|
||||
t='Gym', # Team name
|
||||
u=1 # Uniform number
|
||||
)
|
||||
)
|
||||
|
||||
trainer = Train(script_args)
|
||||
|
||||
run_mode = os.environ.get("GYM_CPU_MODE", "train").strip().lower()
|
||||
|
||||
if run_mode == "test":
|
||||
test_model_file = os.environ.get("GYM_CPU_TEST_MODEL", "scripts/gyms/logs/Walk_R0_004/best_model.zip")
|
||||
test_folder = os.environ.get("GYM_CPU_TEST_FOLDER", "scripts/gyms/logs/Walk_R0_004/")
|
||||
trainer.test({"model_file": test_model_file, "folder_dir": test_folder})
|
||||
else:
|
||||
retrain_model = os.environ.get("GYM_CPU_TRAIN_MODEL", "").strip()
|
||||
if retrain_model:
|
||||
trainer.train({"model_file": retrain_model})
|
||||
else:
|
||||
trainer.train({})
|
||||
@@ -1,705 +0,0 @@
|
||||
import os
|
||||
import numpy as np
|
||||
import math
|
||||
import time
|
||||
from time import sleep
|
||||
from random import random
|
||||
from random import uniform
|
||||
from itertools import count
|
||||
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.monitor import Monitor
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
|
||||
|
||||
import gymnasium as gym
|
||||
from gymnasium import spaces
|
||||
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from scripts.commons.Server import Server as Train_Server
|
||||
|
||||
from agent.base_agent import Base_Agent
|
||||
from utils.math_ops import MathOps
|
||||
|
||||
from scipy.spatial.transform import Rotation as R
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class WalkEnv(gym.Env):
|
||||
def __init__(self, ip, server_p) -> None:
|
||||
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.Player = player = Base_Agent(
|
||||
team_name="Gym",
|
||||
number=1,
|
||||
host=ip,
|
||||
port=server_p
|
||||
)
|
||||
self.robot_type = self.Player.robot
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.force_play_on = True
|
||||
|
||||
self.target_position = np.array([0.0, 0.0]) # target position in the x-y plane
|
||||
self.initial_position = np.array([0.0, 0.0]) # initial position in the x-y plane
|
||||
self.target_direction = 0.0 # target direction in the x-y plane (relative to the robot's orientation)
|
||||
self.isfallen = False
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.enable_debug_joint_status = False
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
self.auto_calibrate_train_sim_flip = True
|
||||
self.nominal_calibrated_once = False
|
||||
self.flip_calibrated_once = False
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
self._target_hz = float(target_hz_env)
|
||||
except ValueError:
|
||||
self._target_hz = 0.0
|
||||
if self._target_hz > 0.0:
|
||||
self._target_dt = 1.0 / self._target_hz
|
||||
|
||||
# State space
|
||||
# 原始观测大小: 78
|
||||
obs_size = 78
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(obs_size,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
action_dim = len(self.Player.robot.ROBOT_MOTORS)
|
||||
self.no_of_actions = action_dim
|
||||
self.action_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(action_dim,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
# 中立姿态
|
||||
self.joint_nominal_position = np.array(
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
-1.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
0.8,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
-0.8,
|
||||
0.4,
|
||||
0.0,
|
||||
]
|
||||
)
|
||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||
self.train_sim_flip = np.array(
|
||||
[
|
||||
1.0, # 0: Head_yaw (he1)
|
||||
-1.0, # 1: Head_pitch (he2)
|
||||
1.0, # 2: Left_Shoulder_Pitch (lae1)
|
||||
-1.0, # 3: Left_Shoulder_Roll (lae2)
|
||||
-1.0, # 4: Left_Elbow_Pitch (lae3)
|
||||
1.0, # 5: Left_Elbow_Yaw (lae4)
|
||||
-1.0, # 6: Right_Shoulder_Pitch (rae1)
|
||||
-1.0, # 7: Right_Shoulder_Roll (rae2)
|
||||
1.0, # 8: Right_Elbow_Pitch (rae3)
|
||||
1.0, # 9: Right_Elbow_Yaw (rae4)
|
||||
1.0, # 10: Waist (te1)
|
||||
1.0, # 11: Left_Hip_Pitch (lle1)
|
||||
-1.0, # 12: Left_Hip_Roll (lle2)
|
||||
-1.0, # 13: Left_Hip_Yaw (lle3)
|
||||
1.0, # 14: Left_Knee_Pitch (lle4)
|
||||
1.0, # 15: Left_Ankle_Pitch (lle5)
|
||||
-1.0, # 16: Left_Ankle_Roll (lle6)
|
||||
-1.0, # 17: Right_Hip_Pitch (rle1)
|
||||
-1.0, # 18: Right_Hip_Roll (rle2)
|
||||
-1.0, # 19: Right_Hip_Yaw (rle3)
|
||||
-1.0, # 20: Right_Knee_Pitch (rle4)
|
||||
-1.0, # 21: Right_Ankle_Pitch (rle5)
|
||||
-1.0, # 22: Right_Ankle_Roll (rle6)
|
||||
]
|
||||
)
|
||||
|
||||
self.scaling_factor = 0.3
|
||||
# self.scaling_factor = 1
|
||||
|
||||
# Encourage a minimum lateral stance so the policy avoids feet overlap.
|
||||
self.min_stance_rad = 0.10
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = False
|
||||
self.reset_beam_yaw_range_deg = 180 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
|
||||
self.reset_joint_noise_rad = 0.015
|
||||
self.reset_perturb_steps = 3
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
self.Player.server.connect()
|
||||
# sleep(2.0) # Longer wait for connection to establish completely
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
self.start_time = time.time()
|
||||
|
||||
def _reconnect_server(self):
|
||||
try:
|
||||
self.Player.server.shutdown()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self.Player.server.connect()
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
|
||||
def _safe_receive_world_update(self, retries=1):
|
||||
last_exc = None
|
||||
for attempt in range(retries + 1):
|
||||
try:
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
return
|
||||
except (ConnectionResetError, OSError) as exc:
|
||||
last_exc = exc
|
||||
if attempt >= retries:
|
||||
raise
|
||||
self._reconnect_server()
|
||||
if last_exc is not None:
|
||||
raise last_exc
|
||||
|
||||
def debug_log(self, message):
|
||||
print(message)
|
||||
try:
|
||||
log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
|
||||
with open(log_path, "a", encoding="utf-8") as f:
|
||||
f.write(message + "\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def observe(self, init=False):
|
||||
|
||||
"""获取当前观测值"""
|
||||
robot = self.Player.robot
|
||||
world = self.Player.world
|
||||
|
||||
# Safety check: ensure data is available
|
||||
|
||||
# 计算目标速度
|
||||
raw_target = self.target_position - world.global_position[:2]
|
||||
velocity = MathOps.rotate_2d_vec(
|
||||
raw_target,
|
||||
-robot.global_orientation_euler[2],
|
||||
is_rad=False
|
||||
)
|
||||
|
||||
# 计算相对方向
|
||||
rel_orientation = MathOps.vector_angle(velocity) * 0.3
|
||||
rel_orientation = np.clip(rel_orientation, -0.25, 0.25)
|
||||
|
||||
velocity = np.concatenate([velocity, np.array([rel_orientation])])
|
||||
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
|
||||
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
|
||||
|
||||
# 关节状态
|
||||
radian_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
radian_joint_speeds = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
|
||||
qpos_qvel_previous_action = np.concatenate([
|
||||
(radian_joint_positions * self.train_sim_flip - self.joint_nominal_position) / 4.6,
|
||||
radian_joint_speeds / 110.0 * self.train_sim_flip,
|
||||
self.previous_action / 10.0,
|
||||
])
|
||||
|
||||
# 角速度
|
||||
ang_vel = np.clip(np.deg2rad(robot.gyroscope) / 50.0, -1.0, 1.0)
|
||||
|
||||
# 投影的重力方向
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
|
||||
# 组合观测
|
||||
observation = np.concatenate([
|
||||
qpos_qvel_previous_action,
|
||||
ang_vel,
|
||||
velocity,
|
||||
projected_gravity,
|
||||
])
|
||||
|
||||
observation = np.clip(observation, -10.0, 10.0)
|
||||
return observation.astype(np.float32)
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
self._safe_receive_world_update(retries=1)
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.send()
|
||||
if self._target_dt > 0.0:
|
||||
now = time.time()
|
||||
if self._last_sync_time is None:
|
||||
self._last_sync_time = now
|
||||
return
|
||||
elapsed = now - self._last_sync_time
|
||||
remaining = self._target_dt - elapsed
|
||||
if remaining > 0.0:
|
||||
time.sleep(remaining)
|
||||
now = time.time()
|
||||
self._last_sync_time = now
|
||||
|
||||
def debug_joint_status(self):
|
||||
robot = self.Player.robot
|
||||
actual_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
target_joint_positions = getattr(
|
||||
self,
|
||||
'target_joint_positions',
|
||||
np.zeros(len(robot.ROBOT_MOTORS), dtype=np.float32)
|
||||
)
|
||||
joint_error = actual_joint_positions - target_joint_positions
|
||||
leg_slice = slice(11, None)
|
||||
|
||||
self.debug_log(
|
||||
"[WalkDebug] "
|
||||
f"step={self.step_counter} "
|
||||
f"pos={np.round(self.Player.world.global_position, 3).tolist()} "
|
||||
f"target_xy={np.round(self.target_position, 3).tolist()} "
|
||||
f"target_leg={np.round(target_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"actual_leg={np.round(actual_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
|
||||
f"fallen={self.Player.world.global_position[2] < 0.3}"
|
||||
)
|
||||
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
|
||||
|
||||
def reset(self, seed=None, options=None):
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
r = self.Player.robot
|
||||
super().reset(seed=seed)
|
||||
if seed is not None:
|
||||
np.random.seed(seed)
|
||||
|
||||
length1 = 2 # randomize target distance
|
||||
length2 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
length3 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
angle2 = np.random.uniform(-30, 30) # randomize initial orientation
|
||||
angle3 = np.random.uniform(-30, 30) # randomize target direction
|
||||
|
||||
self.step_counter = 0
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
|
||||
self.walk_cycle_step = 0
|
||||
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
beam_y = (random() - 0.5) * 10
|
||||
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
|
||||
|
||||
for _ in range(5):
|
||||
self._safe_receive_world_update(retries=2)
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
|
||||
self.Player.server.send()
|
||||
|
||||
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
|
||||
finished_count = 0
|
||||
for _ in range(50):
|
||||
finished = self.Player.skills_manager.execute("Neutral")
|
||||
self.sync()
|
||||
if finished:
|
||||
finished_count += 1
|
||||
if finished_count >= 20: # 假设需要连续20次完成才算成功
|
||||
break
|
||||
|
||||
if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
|
||||
perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
|
||||
# Perturb waist + lower body only (10:), keep head/arms stable.
|
||||
perturb_action[10:] = np.random.uniform(
|
||||
-self.reset_joint_noise_rad,
|
||||
self.reset_joint_noise_rad,
|
||||
size=(self.no_of_actions - 10,)
|
||||
)
|
||||
|
||||
for _ in range(self.reset_perturb_steps):
|
||||
target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
for i in range(self.reset_recover_steps):
|
||||
# Linearly fade perturbation to help policy start from near-neutral.
|
||||
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
|
||||
target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.sync()
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
# Build target in the robot's current forward direction instead of fixed global +x.
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
forward_offset = MathOps.rotate_2d_vec(np.array([length1, 0.0]), heading_deg, is_rad=False)
|
||||
point1 = self.initial_position + forward_offset
|
||||
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
|
||||
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
|
||||
self.point_list = [point1]
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
height = float(self.Player.world.global_position[2])
|
||||
robot = self.Player.robot
|
||||
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
ang_vel = np.deg2rad(robot.gyroscope)
|
||||
ang_vel_mag = float(np.linalg.norm(ang_vel))
|
||||
|
||||
is_fallen = height < 0.3
|
||||
if is_fallen:
|
||||
# remain = max(0, 800 - self.step_counter)
|
||||
# return -8.0 - 0.01 * remain
|
||||
return -1.0
|
||||
|
||||
|
||||
|
||||
# # 目标方向
|
||||
# to_target = self.target_position - current_pos
|
||||
# dist_to_target = float(np.linalg.norm(to_target))
|
||||
# if dist_to_target < 0.5:
|
||||
# return 15.0
|
||||
|
||||
# forward_dir = to_target / dist_to_target if dist_to_target > 0.1 else np.array([1.0, 0.0])
|
||||
# delta_pos = current_pos - previous_pos
|
||||
# forward_step = float(np.dot(delta_pos, forward_dir))
|
||||
# lateral_step = float(np.linalg.norm(delta_pos - forward_dir * forward_step))
|
||||
|
||||
# 奖励项
|
||||
# progress_reward = 2 * forward_step
|
||||
# lateral_penalty = -0.1 * lateral_step
|
||||
alive_bonus = 2.0
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.01 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -0.3 * (tilt_mag)
|
||||
ang_vel_penalty = -0.02 * ang_vel_mag
|
||||
|
||||
# Use simulator joint readings in training frame to shape lateral stance.
|
||||
joint_pos = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
) * self.train_sim_flip
|
||||
left_hip_roll = float(joint_pos[12])
|
||||
right_hip_roll = float(joint_pos[18])
|
||||
left_ankle_roll = float(joint_pos[16])
|
||||
right_ankle_roll = float(joint_pos[22])
|
||||
|
||||
hip_spread = left_hip_roll - right_hip_roll
|
||||
ankle_spread = left_ankle_roll - right_ankle_roll
|
||||
stance_metric = 0.6 * abs(hip_spread) + 0.4 * abs(ankle_spread)
|
||||
|
||||
# Penalize narrow stance (feet too close) and scissoring (cross-leg pattern).
|
||||
stance_collapse_penalty = -4.0 * max(0.0, self.min_stance_rad - stance_metric)
|
||||
cross_leg_penalty = -1.2 * max(0.0, -(hip_spread * ankle_spread))
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height
|
||||
height_penalty = -0.5 * abs(height_error) # 惩罚高度偏离,系数可调
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
+ ang_vel_penalty
|
||||
+ height_penalty
|
||||
+ stance_collapse_penalty
|
||||
+ cross_leg_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
if time.time() - self.start_time >= 600:
|
||||
self.start_time = time.time()
|
||||
print(
|
||||
# f"progress_reward:{progress_reward:.4f}",
|
||||
# f"lateral_penalty:{lateral_penalty:.4f}",
|
||||
# f"action_penalty:{action_penalty:.4f}"s,
|
||||
f"height_penalty:{height_penalty:.4f}",
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},",
|
||||
f"posture_penalty:{posture_penalty:.4f}",
|
||||
f"stance_collapse_penalty:{stance_collapse_penalty:.4f}",
|
||||
f"cross_leg_penalty:{cross_leg_penalty:.4f}",
|
||||
# f"ang_vel_penalty:{ang_vel_penalty:.4f}",
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
)
|
||||
|
||||
return total
|
||||
|
||||
|
||||
|
||||
def step(self, action):
|
||||
|
||||
r = self.Player.robot
|
||||
self.previous_action = action
|
||||
|
||||
self.target_joint_positions = (
|
||||
# self.joint_nominal_position +
|
||||
self.scaling_factor * action
|
||||
)
|
||||
self.target_joint_positions *= self.train_sim_flip
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=40, kd=1.0
|
||||
)
|
||||
|
||||
self.previous_action = action
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
|
||||
if self.enable_debug_joint_status and self.step_counter % self.debug_every_n_steps == 0:
|
||||
self.debug_joint_status()
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
# Compute reward based on movement from previous step
|
||||
reward = self.compute_reward(self.previous_pos, current_pos, action)
|
||||
|
||||
# Update previous position
|
||||
self.previous_pos = current_pos.copy()
|
||||
self.last_action_for_reward = action.copy()
|
||||
|
||||
# Fall detection and penalty
|
||||
is_fallen = self.Player.world.global_position[2] < 0.3
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
terminated = is_fallen or self.step_counter > 800 or self.route_completed
|
||||
truncated = False
|
||||
|
||||
return self.observe(), reward, terminated, truncated, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = int(os.environ.get("GYM_CPU_N_ENVS", "20"))
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
server_warmup_sec = float(os.environ.get("GYM_CPU_SERVER_WARMUP_SEC", "3.0"))
|
||||
n_steps_per_env = int(os.environ.get("GYM_CPU_TRAIN_STEPS_PER_ENV", "256")) # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = int(os.environ.get("GYM_CPU_TRAIN_BATCH_SIZE", "512")) # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = float(os.environ.get("GYM_CPU_TRAIN_LR", "3e-4"))
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
print(f"Model path: {model_path}")
|
||||
print(f"Using {n_envs} parallel environments")
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env, monitor=False):
|
||||
def thunk():
|
||||
env = WalkEnv(self.ip, self.server_p + i_env)
|
||||
if monitor:
|
||||
env = Monitor(env)
|
||||
return env
|
||||
|
||||
return thunk
|
||||
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1, no_render=True, no_realtime=True) # include 1 extra server for testing
|
||||
|
||||
# Wait for servers to start
|
||||
print(f"Starting {n_envs + 1} rcssservermj servers...")
|
||||
if server_warmup_sec > 0:
|
||||
print(f"Waiting {server_warmup_sec:.1f}s for server warmup...")
|
||||
sleep(server_warmup_sec)
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i, monitor=True) for i in range(n_envs)])
|
||||
# Use single-process eval env to avoid extra subprocess fragility during callback evaluation.
|
||||
eval_env = DummyVecEnv([init_env(n_envs, monitor=True)])
|
||||
|
||||
try:
|
||||
# Custom policy network architecture
|
||||
policy_kwargs = dict(
|
||||
net_arch=dict(
|
||||
pi=[512, 256, 128], # Policy network: 3 layers
|
||||
vf=[512, 256, 128] # Value network: 3 layers
|
||||
),
|
||||
activation_fn=__import__('torch.nn', fromlist=['ELU']).ELU,
|
||||
)
|
||||
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cpu", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO(
|
||||
"MlpPolicy",
|
||||
env=env,
|
||||
verbose=1,
|
||||
n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size,
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
ent_coef=float(os.environ.get("GYM_CPU_TRAIN_ENT_COEF", "0.05")), # Entropy coefficient for exploration
|
||||
clip_range=float(os.environ.get("GYM_CPU_TRAIN_CLIP_RANGE", "0.2")), # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=float(os.environ.get("GYM_CPU_TRAIN_GAMMA", "0.95")), # Discount factor
|
||||
# target_kl=0.03,
|
||||
n_epochs=int(os.environ.get("GYM_CPU_TRAIN_EPOCHS", "5")),
|
||||
tensorboard_log=f"./scripts/gyms/logs/{folder_name}/tensorboard/"
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 20, eval_eps=100,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
test_no_render = os.environ.get("GYM_CPU_TEST_NO_RENDER", "0") == "1"
|
||||
test_no_realtime = os.environ.get("GYM_CPU_TEST_NO_REALTIME", "0") == "1"
|
||||
|
||||
server = Train_Server(
|
||||
self.server_p - 1,
|
||||
self.monitor_p,
|
||||
1,
|
||||
no_render=test_no_render,
|
||||
no_realtime=test_no_realtime,
|
||||
)
|
||||
env = WalkEnv(self.ip, self.server_p - 1)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from types import SimpleNamespace
|
||||
|
||||
# 创建默认参数
|
||||
script_args = SimpleNamespace(
|
||||
args=SimpleNamespace(
|
||||
i='127.0.0.1', # Server IP
|
||||
p=3100, # Server port
|
||||
m=3200, # Monitor port
|
||||
r=0, # Robot type
|
||||
t='Gym', # Team name
|
||||
u=1 # Uniform number
|
||||
)
|
||||
)
|
||||
|
||||
trainer = Train(script_args)
|
||||
|
||||
run_mode = os.environ.get("GYM_CPU_MODE", "train").strip().lower()
|
||||
|
||||
if run_mode == "test":
|
||||
test_model_file = os.environ.get("GYM_CPU_TEST_MODEL", "scripts/gyms/logs/Walk_R0_004/best_model.zip")
|
||||
test_folder = os.environ.get("GYM_CPU_TEST_FOLDER", "scripts/gyms/logs/Walk_R0_004/")
|
||||
trainer.test({"model_file": test_model_file, "folder_dir": test_folder})
|
||||
else:
|
||||
retrain_model = os.environ.get("GYM_CPU_TRAIN_MODEL", "").strip()
|
||||
if retrain_model:
|
||||
trainer.train({"model_file": retrain_model})
|
||||
else:
|
||||
trainer.train({})
|
||||
@@ -1,705 +0,0 @@
|
||||
import os
|
||||
import numpy as np
|
||||
import math
|
||||
import time
|
||||
from time import sleep
|
||||
from random import random
|
||||
from random import uniform
|
||||
from itertools import count
|
||||
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.monitor import Monitor
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
|
||||
|
||||
import gymnasium as gym
|
||||
from gymnasium import spaces
|
||||
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from scripts.commons.Server import Server as Train_Server
|
||||
|
||||
from agent.base_agent import Base_Agent
|
||||
from utils.math_ops import MathOps
|
||||
|
||||
from scipy.spatial.transform import Rotation as R
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class WalkEnv(gym.Env):
|
||||
def __init__(self, ip, server_p) -> None:
|
||||
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.Player = player = Base_Agent(
|
||||
team_name="Gym",
|
||||
number=1,
|
||||
host=ip,
|
||||
port=server_p
|
||||
)
|
||||
self.robot_type = self.Player.robot
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.force_play_on = True
|
||||
|
||||
self.target_position = np.array([0.0, 0.0]) # target position in the x-y plane
|
||||
self.initial_position = np.array([0.0, 0.0]) # initial position in the x-y plane
|
||||
self.target_direction = 0.0 # target direction in the x-y plane (relative to the robot's orientation)
|
||||
self.isfallen = False
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.enable_debug_joint_status = False
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
self.auto_calibrate_train_sim_flip = True
|
||||
self.nominal_calibrated_once = False
|
||||
self.flip_calibrated_once = False
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
self._target_hz = float(target_hz_env)
|
||||
except ValueError:
|
||||
self._target_hz = 0.0
|
||||
if self._target_hz > 0.0:
|
||||
self._target_dt = 1.0 / self._target_hz
|
||||
|
||||
# State space
|
||||
# 原始观测大小: 78
|
||||
obs_size = 78
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(obs_size,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
action_dim = len(self.Player.robot.ROBOT_MOTORS)
|
||||
self.no_of_actions = action_dim
|
||||
self.action_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(action_dim,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
# 中立姿态
|
||||
self.joint_nominal_position = np.array(
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
-1.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
0.8,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
-0.8,
|
||||
0.4,
|
||||
0.0,
|
||||
]
|
||||
)
|
||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||
self.train_sim_flip = np.array(
|
||||
[
|
||||
1.0, # 0: Head_yaw (he1)
|
||||
-1.0, # 1: Head_pitch (he2)
|
||||
1.0, # 2: Left_Shoulder_Pitch (lae1)
|
||||
-1.0, # 3: Left_Shoulder_Roll (lae2)
|
||||
-1.0, # 4: Left_Elbow_Pitch (lae3)
|
||||
1.0, # 5: Left_Elbow_Yaw (lae4)
|
||||
-1.0, # 6: Right_Shoulder_Pitch (rae1)
|
||||
-1.0, # 7: Right_Shoulder_Roll (rae2)
|
||||
1.0, # 8: Right_Elbow_Pitch (rae3)
|
||||
1.0, # 9: Right_Elbow_Yaw (rae4)
|
||||
1.0, # 10: Waist (te1)
|
||||
1.0, # 11: Left_Hip_Pitch (lle1)
|
||||
-1.0, # 12: Left_Hip_Roll (lle2)
|
||||
-1.0, # 13: Left_Hip_Yaw (lle3)
|
||||
1.0, # 14: Left_Knee_Pitch (lle4)
|
||||
1.0, # 15: Left_Ankle_Pitch (lle5)
|
||||
-1.0, # 16: Left_Ankle_Roll (lle6)
|
||||
-1.0, # 17: Right_Hip_Pitch (rle1)
|
||||
-1.0, # 18: Right_Hip_Roll (rle2)
|
||||
-1.0, # 19: Right_Hip_Yaw (rle3)
|
||||
-1.0, # 20: Right_Knee_Pitch (rle4)
|
||||
-1.0, # 21: Right_Ankle_Pitch (rle5)
|
||||
-1.0, # 22: Right_Ankle_Roll (rle6)
|
||||
]
|
||||
)
|
||||
|
||||
self.scaling_factor = 0.3
|
||||
# self.scaling_factor = 1
|
||||
|
||||
# Encourage a minimum lateral stance so the policy avoids feet overlap.
|
||||
self.min_stance_rad = 0.10
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = False
|
||||
self.reset_beam_yaw_range_deg = 180 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
|
||||
self.reset_joint_noise_rad = 0.015
|
||||
self.reset_perturb_steps = 3
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
self.Player.server.connect()
|
||||
# sleep(2.0) # Longer wait for connection to establish completely
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
self.start_time = time.time()
|
||||
|
||||
def _reconnect_server(self):
|
||||
try:
|
||||
self.Player.server.shutdown()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self.Player.server.connect()
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
|
||||
def _safe_receive_world_update(self, retries=1):
|
||||
last_exc = None
|
||||
for attempt in range(retries + 1):
|
||||
try:
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
return
|
||||
except (ConnectionResetError, OSError) as exc:
|
||||
last_exc = exc
|
||||
if attempt >= retries:
|
||||
raise
|
||||
self._reconnect_server()
|
||||
if last_exc is not None:
|
||||
raise last_exc
|
||||
|
||||
def debug_log(self, message):
|
||||
print(message)
|
||||
try:
|
||||
log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
|
||||
with open(log_path, "a", encoding="utf-8") as f:
|
||||
f.write(message + "\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def observe(self, init=False):
|
||||
|
||||
"""获取当前观测值"""
|
||||
robot = self.Player.robot
|
||||
world = self.Player.world
|
||||
|
||||
# Safety check: ensure data is available
|
||||
|
||||
# 计算目标速度
|
||||
raw_target = self.target_position - world.global_position[:2]
|
||||
velocity = MathOps.rotate_2d_vec(
|
||||
raw_target,
|
||||
-robot.global_orientation_euler[2],
|
||||
is_rad=False
|
||||
)
|
||||
|
||||
# 计算相对方向
|
||||
rel_orientation = MathOps.vector_angle(velocity) * 0.3
|
||||
rel_orientation = np.clip(rel_orientation, -0.25, 0.25)
|
||||
|
||||
velocity = np.concatenate([velocity, np.array([rel_orientation])])
|
||||
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
|
||||
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
|
||||
|
||||
# 关节状态
|
||||
radian_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
radian_joint_speeds = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
|
||||
qpos_qvel_previous_action = np.concatenate([
|
||||
(radian_joint_positions * self.train_sim_flip - self.joint_nominal_position) / 4.6,
|
||||
radian_joint_speeds / 110.0 * self.train_sim_flip,
|
||||
self.previous_action / 10.0,
|
||||
])
|
||||
|
||||
# 角速度
|
||||
ang_vel = np.clip(np.deg2rad(robot.gyroscope) / 50.0, -1.0, 1.0)
|
||||
|
||||
# 投影的重力方向
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
|
||||
# 组合观测
|
||||
observation = np.concatenate([
|
||||
qpos_qvel_previous_action,
|
||||
ang_vel,
|
||||
velocity,
|
||||
projected_gravity,
|
||||
])
|
||||
|
||||
observation = np.clip(observation, -10.0, 10.0)
|
||||
return observation.astype(np.float32)
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
self._safe_receive_world_update(retries=1)
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.send()
|
||||
if self._target_dt > 0.0:
|
||||
now = time.time()
|
||||
if self._last_sync_time is None:
|
||||
self._last_sync_time = now
|
||||
return
|
||||
elapsed = now - self._last_sync_time
|
||||
remaining = self._target_dt - elapsed
|
||||
if remaining > 0.0:
|
||||
time.sleep(remaining)
|
||||
now = time.time()
|
||||
self._last_sync_time = now
|
||||
|
||||
def debug_joint_status(self):
|
||||
robot = self.Player.robot
|
||||
actual_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
target_joint_positions = getattr(
|
||||
self,
|
||||
'target_joint_positions',
|
||||
np.zeros(len(robot.ROBOT_MOTORS), dtype=np.float32)
|
||||
)
|
||||
joint_error = actual_joint_positions - target_joint_positions
|
||||
leg_slice = slice(11, None)
|
||||
|
||||
self.debug_log(
|
||||
"[WalkDebug] "
|
||||
f"step={self.step_counter} "
|
||||
f"pos={np.round(self.Player.world.global_position, 3).tolist()} "
|
||||
f"target_xy={np.round(self.target_position, 3).tolist()} "
|
||||
f"target_leg={np.round(target_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"actual_leg={np.round(actual_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
|
||||
f"fallen={self.Player.world.global_position[2] < 0.3}"
|
||||
)
|
||||
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
|
||||
|
||||
def reset(self, seed=None, options=None):
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
r = self.Player.robot
|
||||
super().reset(seed=seed)
|
||||
if seed is not None:
|
||||
np.random.seed(seed)
|
||||
|
||||
length1 = 2 # randomize target distance
|
||||
length2 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
length3 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
angle2 = np.random.uniform(-30, 30) # randomize initial orientation
|
||||
angle3 = np.random.uniform(-30, 30) # randomize target direction
|
||||
|
||||
self.step_counter = 0
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
|
||||
self.walk_cycle_step = 0
|
||||
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
beam_y = (random() - 0.5) * 10
|
||||
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
|
||||
|
||||
for _ in range(5):
|
||||
self._safe_receive_world_update(retries=2)
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
|
||||
self.Player.server.send()
|
||||
|
||||
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
|
||||
finished_count = 0
|
||||
for _ in range(50):
|
||||
finished = self.Player.skills_manager.execute("Neutral")
|
||||
self.sync()
|
||||
if finished:
|
||||
finished_count += 1
|
||||
if finished_count >= 20: # 假设需要连续20次完成才算成功
|
||||
break
|
||||
|
||||
if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
|
||||
perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
|
||||
# Perturb waist + lower body only (10:), keep head/arms stable.
|
||||
perturb_action[10:] = np.random.uniform(
|
||||
-self.reset_joint_noise_rad,
|
||||
self.reset_joint_noise_rad,
|
||||
size=(self.no_of_actions - 10,)
|
||||
)
|
||||
|
||||
for _ in range(self.reset_perturb_steps):
|
||||
target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
for i in range(self.reset_recover_steps):
|
||||
# Linearly fade perturbation to help policy start from near-neutral.
|
||||
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
|
||||
target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.sync()
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
# Build target in the robot's current forward direction instead of fixed global +x.
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
forward_offset = MathOps.rotate_2d_vec(np.array([length1, 0.0]), heading_deg, is_rad=False)
|
||||
point1 = self.initial_position + forward_offset
|
||||
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
|
||||
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
|
||||
self.point_list = [point1]
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
height = float(self.Player.world.global_position[2])
|
||||
robot = self.Player.robot
|
||||
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
ang_vel = np.deg2rad(robot.gyroscope)
|
||||
ang_vel_mag = float(np.linalg.norm(ang_vel))
|
||||
|
||||
is_fallen = height < 0.55
|
||||
if is_fallen:
|
||||
# remain = max(0, 800 - self.step_counter)
|
||||
# return -8.0 - 0.01 * remain
|
||||
return -1.0
|
||||
|
||||
|
||||
|
||||
# # 目标方向
|
||||
# to_target = self.target_position - current_pos
|
||||
# dist_to_target = float(np.linalg.norm(to_target))
|
||||
# if dist_to_target < 0.5:
|
||||
# return 15.0
|
||||
|
||||
# forward_dir = to_target / dist_to_target if dist_to_target > 0.1 else np.array([1.0, 0.0])
|
||||
# delta_pos = current_pos - previous_pos
|
||||
# forward_step = float(np.dot(delta_pos, forward_dir))
|
||||
# lateral_step = float(np.linalg.norm(delta_pos - forward_dir * forward_step))
|
||||
|
||||
# 奖励项
|
||||
# progress_reward = 2 * forward_step
|
||||
# lateral_penalty = -0.1 * lateral_step
|
||||
alive_bonus = 2.0
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.01 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -0.3 * (tilt_mag)
|
||||
ang_vel_penalty = -0.02 * ang_vel_mag
|
||||
|
||||
# Use simulator joint readings in training frame to shape lateral stance.
|
||||
joint_pos = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
) * self.train_sim_flip
|
||||
left_hip_roll = float(joint_pos[12])
|
||||
right_hip_roll = float(joint_pos[18])
|
||||
left_ankle_roll = float(joint_pos[16])
|
||||
right_ankle_roll = float(joint_pos[22])
|
||||
|
||||
hip_spread = left_hip_roll - right_hip_roll
|
||||
ankle_spread = left_ankle_roll - right_ankle_roll
|
||||
stance_metric = 0.6 * abs(hip_spread) + 0.4 * abs(ankle_spread)
|
||||
|
||||
# Penalize narrow stance (feet too close) and scissoring (cross-leg pattern).
|
||||
stance_collapse_penalty = -4.0 * max(0.0, self.min_stance_rad - stance_metric)
|
||||
cross_leg_penalty = -1.2 * max(0.0, -(hip_spread * ankle_spread))
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height
|
||||
height_penalty = -0.5 * abs(height_error) # 惩罚高度偏离,系数可调
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
+ ang_vel_penalty
|
||||
+ height_penalty
|
||||
+ stance_collapse_penalty
|
||||
+ cross_leg_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
if time.time() - self.start_time >= 600:
|
||||
self.start_time = time.time()
|
||||
print(
|
||||
# f"progress_reward:{progress_reward:.4f}",
|
||||
# f"lateral_penalty:{lateral_penalty:.4f}",
|
||||
# f"action_penalty:{action_penalty:.4f}"s,
|
||||
f"height_penalty:{height_penalty:.4f}",
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},",
|
||||
f"posture_penalty:{posture_penalty:.4f}",
|
||||
f"stance_collapse_penalty:{stance_collapse_penalty:.4f}",
|
||||
f"cross_leg_penalty:{cross_leg_penalty:.4f}",
|
||||
# f"ang_vel_penalty:{ang_vel_penalty:.4f}",
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
)
|
||||
|
||||
return total
|
||||
|
||||
|
||||
|
||||
def step(self, action):
|
||||
|
||||
r = self.Player.robot
|
||||
self.previous_action = action
|
||||
|
||||
self.target_joint_positions = (
|
||||
# self.joint_nominal_position +
|
||||
self.scaling_factor * action
|
||||
)
|
||||
self.target_joint_positions *= self.train_sim_flip
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=40, kd=1.0
|
||||
)
|
||||
|
||||
self.previous_action = action
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
|
||||
if self.enable_debug_joint_status and self.step_counter % self.debug_every_n_steps == 0:
|
||||
self.debug_joint_status()
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
# Compute reward based on movement from previous step
|
||||
reward = self.compute_reward(self.previous_pos, current_pos, action)
|
||||
|
||||
# Update previous position
|
||||
self.previous_pos = current_pos.copy()
|
||||
self.last_action_for_reward = action.copy()
|
||||
|
||||
# Fall detection and penalty
|
||||
is_fallen = self.Player.world.global_position[2] < 0.55
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
terminated = is_fallen or self.step_counter > 800 or self.route_completed
|
||||
truncated = False
|
||||
|
||||
return self.observe(), reward, terminated, truncated, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = int(os.environ.get("GYM_CPU_N_ENVS", "20"))
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
server_warmup_sec = float(os.environ.get("GYM_CPU_SERVER_WARMUP_SEC", "3.0"))
|
||||
n_steps_per_env = int(os.environ.get("GYM_CPU_TRAIN_STEPS_PER_ENV", "256")) # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = int(os.environ.get("GYM_CPU_TRAIN_BATCH_SIZE", "512")) # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = float(os.environ.get("GYM_CPU_TRAIN_LR", "3e-4"))
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
print(f"Model path: {model_path}")
|
||||
print(f"Using {n_envs} parallel environments")
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env, monitor=False):
|
||||
def thunk():
|
||||
env = WalkEnv(self.ip, self.server_p + i_env)
|
||||
if monitor:
|
||||
env = Monitor(env)
|
||||
return env
|
||||
|
||||
return thunk
|
||||
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1, no_render=True, no_realtime=True) # include 1 extra server for testing
|
||||
|
||||
# Wait for servers to start
|
||||
print(f"Starting {n_envs + 1} rcssservermj servers...")
|
||||
if server_warmup_sec > 0:
|
||||
print(f"Waiting {server_warmup_sec:.1f}s for server warmup...")
|
||||
sleep(server_warmup_sec)
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i, monitor=True) for i in range(n_envs)])
|
||||
# Use single-process eval env to avoid extra subprocess fragility during callback evaluation.
|
||||
eval_env = DummyVecEnv([init_env(n_envs, monitor=True)])
|
||||
|
||||
try:
|
||||
# Custom policy network architecture
|
||||
policy_kwargs = dict(
|
||||
net_arch=dict(
|
||||
pi=[512, 256, 128], # Policy network: 3 layers
|
||||
vf=[512, 256, 128] # Value network: 3 layers
|
||||
),
|
||||
activation_fn=__import__('torch.nn', fromlist=['ELU']).ELU,
|
||||
)
|
||||
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cpu", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO(
|
||||
"MlpPolicy",
|
||||
env=env,
|
||||
verbose=1,
|
||||
n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size,
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
ent_coef=float(os.environ.get("GYM_CPU_TRAIN_ENT_COEF", "0.05")), # Entropy coefficient for exploration
|
||||
clip_range=float(os.environ.get("GYM_CPU_TRAIN_CLIP_RANGE", "0.2")), # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=float(os.environ.get("GYM_CPU_TRAIN_GAMMA", "0.95")), # Discount factor
|
||||
# target_kl=0.03,
|
||||
n_epochs=int(os.environ.get("GYM_CPU_TRAIN_EPOCHS", "5")),
|
||||
tensorboard_log=f"./scripts/gyms/logs/{folder_name}/tensorboard/"
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 20, eval_eps=100,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
test_no_render = os.environ.get("GYM_CPU_TEST_NO_RENDER", "0") == "1"
|
||||
test_no_realtime = os.environ.get("GYM_CPU_TEST_NO_REALTIME", "0") == "1"
|
||||
|
||||
server = Train_Server(
|
||||
self.server_p - 1,
|
||||
self.monitor_p,
|
||||
1,
|
||||
no_render=test_no_render,
|
||||
no_realtime=test_no_realtime,
|
||||
)
|
||||
env = WalkEnv(self.ip, self.server_p - 1)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from types import SimpleNamespace
|
||||
|
||||
# 创建默认参数
|
||||
script_args = SimpleNamespace(
|
||||
args=SimpleNamespace(
|
||||
i='127.0.0.1', # Server IP
|
||||
p=3100, # Server port
|
||||
m=3200, # Monitor port
|
||||
r=0, # Robot type
|
||||
t='Gym', # Team name
|
||||
u=1 # Uniform number
|
||||
)
|
||||
)
|
||||
|
||||
trainer = Train(script_args)
|
||||
|
||||
run_mode = os.environ.get("GYM_CPU_MODE", "train").strip().lower()
|
||||
|
||||
if run_mode == "test":
|
||||
test_model_file = os.environ.get("GYM_CPU_TEST_MODEL", "scripts/gyms/logs/Walk_R0_004/best_model.zip")
|
||||
test_folder = os.environ.get("GYM_CPU_TEST_FOLDER", "scripts/gyms/logs/Walk_R0_004/")
|
||||
trainer.test({"model_file": test_model_file, "folder_dir": test_folder})
|
||||
else:
|
||||
retrain_model = os.environ.get("GYM_CPU_TRAIN_MODEL", "").strip()
|
||||
if retrain_model:
|
||||
trainer.train({"model_file": retrain_model})
|
||||
else:
|
||||
trainer.train({})
|
||||
Binary file not shown.
@@ -1,626 +0,0 @@
|
||||
import os
|
||||
import numpy as np
|
||||
import math
|
||||
import time
|
||||
from time import sleep
|
||||
from random import random
|
||||
from random import uniform
|
||||
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
|
||||
import gymnasium as gym
|
||||
from gymnasium import spaces
|
||||
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from scripts.commons.Server import Server as Train_Server
|
||||
|
||||
from agent.base_agent import Base_Agent
|
||||
from utils.math_ops import MathOps
|
||||
|
||||
from scipy.spatial.transform import Rotation as R
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class WalkEnv(gym.Env):
|
||||
def __init__(self, ip, server_p) -> None:
|
||||
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.Player = player = Base_Agent(
|
||||
team_name="Gym",
|
||||
number=1,
|
||||
host=ip,
|
||||
port=server_p
|
||||
)
|
||||
self.robot_type = self.Player.robot
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.force_play_on = True
|
||||
|
||||
self.target_position = np.array([0.0, 0.0]) # target position in the x-y plane
|
||||
self.initial_position = np.array([0.0, 0.0]) # initial position in the x-y plane
|
||||
self.target_direction = 0.0 # target direction in the x-y plane (relative to the robot's orientation)
|
||||
self.isfallen = False
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.enable_debug_joint_status = False
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
self.auto_calibrate_train_sim_flip = True
|
||||
self.nominal_calibrated_once = False
|
||||
self.flip_calibrated_once = False
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
self._target_hz = float(target_hz_env)
|
||||
except ValueError:
|
||||
self._target_hz = 0.0
|
||||
if self._target_hz > 0.0:
|
||||
self._target_dt = 1.0 / self._target_hz
|
||||
|
||||
# State space
|
||||
# 原始观测大小: 78
|
||||
obs_size = 78
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(obs_size,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
action_dim = len(self.Player.robot.ROBOT_MOTORS)
|
||||
self.no_of_actions = action_dim
|
||||
self.action_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(action_dim,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
# 中立姿态
|
||||
self.joint_nominal_position = np.array(
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
-1.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
0.8,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
-0.8,
|
||||
0.4,
|
||||
0.0,
|
||||
]
|
||||
)
|
||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||
self.train_sim_flip = np.array(
|
||||
[
|
||||
1.0, # 0: Head_yaw (he1)
|
||||
-1.0, # 1: Head_pitch (he2)
|
||||
1.0, # 2: Left_Shoulder_Pitch (lae1)
|
||||
-1.0, # 3: Left_Shoulder_Roll (lae2)
|
||||
-1.0, # 4: Left_Elbow_Pitch (lae3)
|
||||
1.0, # 5: Left_Elbow_Yaw (lae4)
|
||||
-1.0, # 6: Right_Shoulder_Pitch (rae1)
|
||||
-1.0, # 7: Right_Shoulder_Roll (rae2)
|
||||
1.0, # 8: Right_Elbow_Pitch (rae3)
|
||||
1.0, # 9: Right_Elbow_Yaw (rae4)
|
||||
1.0, # 10: Waist (te1)
|
||||
1.0, # 11: Left_Hip_Pitch (lle1)
|
||||
-1.0, # 12: Left_Hip_Roll (lle2)
|
||||
-1.0, # 13: Left_Hip_Yaw (lle3)
|
||||
1.0, # 14: Left_Knee_Pitch (lle4)
|
||||
1.0, # 15: Left_Ankle_Pitch (lle5)
|
||||
-1.0, # 16: Left_Ankle_Roll (lle6)
|
||||
-1.0, # 17: Right_Hip_Pitch (rle1)
|
||||
-1.0, # 18: Right_Hip_Roll (rle2)
|
||||
-1.0, # 19: Right_Hip_Yaw (rle3)
|
||||
-1.0, # 20: Right_Knee_Pitch (rle4)
|
||||
-1.0, # 21: Right_Ankle_Pitch (rle5)
|
||||
-1.0, # 22: Right_Ankle_Roll (rle6)
|
||||
]
|
||||
)
|
||||
|
||||
self.scaling_factor = 0.3
|
||||
# self.scaling_factor = 1
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = True
|
||||
self.reset_beam_yaw_range_deg = 180 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
|
||||
self.reset_joint_noise_rad = 0.015
|
||||
self.reset_perturb_steps = 3
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
self.Player.server.connect()
|
||||
# sleep(2.0) # Longer wait for connection to establish completely
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
self.start_time = time.time()
|
||||
|
||||
def debug_log(self, message):
|
||||
print(message)
|
||||
try:
|
||||
log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
|
||||
with open(log_path, "a", encoding="utf-8") as f:
|
||||
f.write(message + "\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def observe(self, init=False):
|
||||
|
||||
"""获取当前观测值"""
|
||||
robot = self.Player.robot
|
||||
world = self.Player.world
|
||||
|
||||
# Safety check: ensure data is available
|
||||
|
||||
# 计算目标速度
|
||||
raw_target = self.target_position - world.global_position[:2]
|
||||
velocity = MathOps.rotate_2d_vec(
|
||||
raw_target,
|
||||
-robot.global_orientation_euler[2],
|
||||
is_rad=False
|
||||
)
|
||||
|
||||
# 计算相对方向
|
||||
rel_orientation = MathOps.vector_angle(velocity) * 0.3
|
||||
rel_orientation = np.clip(rel_orientation, -0.25, 0.25)
|
||||
|
||||
velocity = np.concatenate([velocity, np.array([rel_orientation])])
|
||||
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
|
||||
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
|
||||
|
||||
# 关节状态
|
||||
radian_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
radian_joint_speeds = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
|
||||
qpos_qvel_previous_action = np.concatenate([
|
||||
(radian_joint_positions * self.train_sim_flip - self.joint_nominal_position) / 4.6,
|
||||
radian_joint_speeds / 110.0 * self.train_sim_flip,
|
||||
self.previous_action / 10.0,
|
||||
])
|
||||
|
||||
# 角速度
|
||||
ang_vel = np.clip(np.deg2rad(robot.gyroscope) / 50.0, -1.0, 1.0)
|
||||
|
||||
# 投影的重力方向
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
|
||||
# 组合观测
|
||||
observation = np.concatenate([
|
||||
qpos_qvel_previous_action,
|
||||
ang_vel,
|
||||
velocity,
|
||||
projected_gravity,
|
||||
])
|
||||
|
||||
observation = np.clip(observation, -10.0, 10.0)
|
||||
return observation.astype(np.float32)
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.send()
|
||||
if self._target_dt > 0.0:
|
||||
now = time.time()
|
||||
if self._last_sync_time is None:
|
||||
self._last_sync_time = now
|
||||
return
|
||||
elapsed = now - self._last_sync_time
|
||||
remaining = self._target_dt - elapsed
|
||||
if remaining > 0.0:
|
||||
time.sleep(remaining)
|
||||
now = time.time()
|
||||
self._last_sync_time = now
|
||||
|
||||
def debug_joint_status(self):
|
||||
robot = self.Player.robot
|
||||
actual_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
target_joint_positions = getattr(
|
||||
self,
|
||||
'target_joint_positions',
|
||||
np.zeros(len(robot.ROBOT_MOTORS), dtype=np.float32)
|
||||
)
|
||||
joint_error = actual_joint_positions - target_joint_positions
|
||||
leg_slice = slice(11, None)
|
||||
|
||||
self.debug_log(
|
||||
"[WalkDebug] "
|
||||
f"step={self.step_counter} "
|
||||
f"pos={np.round(self.Player.world.global_position, 3).tolist()} "
|
||||
f"target_xy={np.round(self.target_position, 3).tolist()} "
|
||||
f"target_leg={np.round(target_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"actual_leg={np.round(actual_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
|
||||
f"fallen={self.Player.world.global_position[2] < 0.3}"
|
||||
)
|
||||
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
|
||||
|
||||
def reset(self, seed=None, options=None):
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
r = self.Player.robot
|
||||
super().reset(seed=seed)
|
||||
if seed is not None:
|
||||
np.random.seed(seed)
|
||||
|
||||
length1 = 2 # randomize target distance
|
||||
length2 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
length3 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
angle2 = np.random.uniform(-30, 30) # randomize initial orientation
|
||||
angle3 = np.random.uniform(-30, 30) # randomize target direction
|
||||
|
||||
self.step_counter = 0
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
|
||||
self.walk_cycle_step = 0
|
||||
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
beam_y = (random() - 0.5) * 10
|
||||
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
|
||||
|
||||
for _ in range(5):
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
|
||||
self.Player.server.send()
|
||||
|
||||
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
|
||||
finished_count = 0
|
||||
for _ in range(50):
|
||||
finished = self.Player.skills_manager.execute("Neutral")
|
||||
self.sync()
|
||||
if finished:
|
||||
finished_count += 1
|
||||
if finished_count >= 20: # 假设需要连续20次完成才算成功
|
||||
break
|
||||
|
||||
if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
|
||||
perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
|
||||
# Perturb waist + lower body only (10:), keep head/arms stable.
|
||||
perturb_action[10:] = np.random.uniform(
|
||||
-self.reset_joint_noise_rad,
|
||||
self.reset_joint_noise_rad,
|
||||
size=(self.no_of_actions - 10,)
|
||||
)
|
||||
|
||||
for _ in range(self.reset_perturb_steps):
|
||||
target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
for i in range(self.reset_recover_steps):
|
||||
# Linearly fade perturbation to help policy start from near-neutral.
|
||||
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
|
||||
target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.sync()
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
# Build target in the robot's current forward direction instead of fixed global +x.
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
forward_offset = MathOps.rotate_2d_vec(np.array([length1, 0.0]), heading_deg, is_rad=False)
|
||||
point1 = self.initial_position + forward_offset
|
||||
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
|
||||
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
|
||||
self.point_list = [point1]
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
height = float(self.Player.world.global_position[2])
|
||||
|
||||
orientation_quat_inv = R.from_quat(self.Player.robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
ang_vel = np.deg2rad(self.Player.robot.gyroscope)
|
||||
ang_vel_mag = float(np.linalg.norm(ang_vel))
|
||||
|
||||
is_fallen = height < 0.3
|
||||
if is_fallen:
|
||||
# remain = max(0, 800 - self.step_counter)
|
||||
# return -8.0 - 0.01 * remain
|
||||
return -1.0
|
||||
|
||||
|
||||
|
||||
# # 目标方向
|
||||
# to_target = self.target_position - current_pos
|
||||
# dist_to_target = float(np.linalg.norm(to_target))
|
||||
# if dist_to_target < 0.5:
|
||||
# return 15.0
|
||||
|
||||
# forward_dir = to_target / dist_to_target if dist_to_target > 0.1 else np.array([1.0, 0.0])
|
||||
# delta_pos = current_pos - previous_pos
|
||||
# forward_step = float(np.dot(delta_pos, forward_dir))
|
||||
# lateral_step = float(np.linalg.norm(delta_pos - forward_dir * forward_step))
|
||||
|
||||
# 奖励项
|
||||
# progress_reward = 2 * forward_step
|
||||
# lateral_penalty = -0.1 * lateral_step
|
||||
alive_bonus = 2.0
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.01 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -0.3 * (tilt_mag)
|
||||
ang_vel_penalty = -0.02 * ang_vel_mag
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height
|
||||
height_penalty = -0.5 * abs(height_error) # 惩罚高度偏离,系数可调
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
+ ang_vel_penalty
|
||||
+ height_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
if time.time() - self.start_time >= 1200:
|
||||
self.start_time = time.time()
|
||||
print(
|
||||
# f"progress_reward:{progress_reward:.4f}",
|
||||
# f"lateral_penalty:{lateral_penalty:.4f}",
|
||||
# f"action_penalty:{action_penalty:.4f}"s,
|
||||
f"height_penalty:{height_penalty:.4f}",
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},",
|
||||
f"posture_penalty:{posture_penalty:.4f}",
|
||||
# f"ang_vel_penalty:{ang_vel_penalty:.4f}",
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
)
|
||||
|
||||
return total
|
||||
|
||||
|
||||
|
||||
def step(self, action):
|
||||
|
||||
r = self.Player.robot
|
||||
self.previous_action = action
|
||||
|
||||
self.target_joint_positions = (
|
||||
# self.joint_nominal_position +
|
||||
self.scaling_factor * action
|
||||
)
|
||||
self.target_joint_positions *= self.train_sim_flip
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=40, kd=1.0
|
||||
)
|
||||
|
||||
self.previous_action = action
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
|
||||
if self.enable_debug_joint_status and self.step_counter % self.debug_every_n_steps == 0:
|
||||
self.debug_joint_status()
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
# Compute reward based on movement from previous step
|
||||
reward = self.compute_reward(self.previous_pos, current_pos, action)
|
||||
|
||||
# Update previous position
|
||||
self.previous_pos = current_pos.copy()
|
||||
self.last_action_for_reward = action.copy()
|
||||
|
||||
# Fall detection and penalty
|
||||
is_fallen = self.Player.world.global_position[2] < 0.3
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
terminated = is_fallen or self.step_counter > 800 or self.route_completed
|
||||
truncated = False
|
||||
|
||||
return self.observe(), reward, terminated, truncated, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = 20 # Reduced from 8 to decrease CPU/network pressure during init
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
n_steps_per_env = 256 # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = 512 # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = 3e-4
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
print(f"Model path: {model_path}")
|
||||
print(f"Using {n_envs} parallel environments")
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env):
|
||||
def thunk():
|
||||
return WalkEnv(self.ip, self.server_p + i_env)
|
||||
|
||||
return thunk
|
||||
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1) # include 1 extra server for testing
|
||||
|
||||
# Wait for servers to start
|
||||
print(f"Starting {n_envs + 1} rcssservermj servers...")
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i) for i in range(n_envs)])
|
||||
eval_env = SubprocVecEnv([init_env(n_envs)])
|
||||
|
||||
try:
|
||||
# Custom policy network architecture
|
||||
policy_kwargs = dict(
|
||||
net_arch=dict(
|
||||
pi=[512, 256, 128], # Policy network: 3 layers
|
||||
vf=[512, 256, 128] # Value network: 3 layers
|
||||
),
|
||||
activation_fn=__import__('torch.nn', fromlist=['ELU']).ELU,
|
||||
)
|
||||
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cpu", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO(
|
||||
"MlpPolicy",
|
||||
env=env,
|
||||
verbose=1,
|
||||
n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size,
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
ent_coef=0.05, # Entropy coefficient for exploration
|
||||
# clip_range=0.13, # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=0.95 , # Discount factor
|
||||
target_kl=0.03,
|
||||
n_epochs=5
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 10, save_freq=n_steps_per_env * 10,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
server = Train_Server(self.server_p - 1, self.monitor_p, 1)
|
||||
env = WalkEnv(self.ip, self.server_p - 1)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from types import SimpleNamespace
|
||||
|
||||
# 创建默认参数
|
||||
script_args = SimpleNamespace(
|
||||
args=SimpleNamespace(
|
||||
i='127.0.0.1', # Server IP
|
||||
p=3100, # Server port
|
||||
m=3200, # Monitor port
|
||||
r=0, # Robot type
|
||||
t='Gym', # Team name
|
||||
u=1 # Uniform number
|
||||
)
|
||||
)
|
||||
|
||||
trainer = Train(script_args)
|
||||
trainer.train({})
|
||||
# trainer.test({"model_file": "scripts/gyms/logs/Walk_R0_000/best_model.zip",
|
||||
# "folder_dir": "scripts/gyms/logs/Walk_R0_000/",})
|
||||
Binary file not shown.
@@ -1,705 +0,0 @@
|
||||
import os
|
||||
import numpy as np
|
||||
import math
|
||||
import time
|
||||
from time import sleep
|
||||
from random import random
|
||||
from random import uniform
|
||||
from itertools import count
|
||||
|
||||
from stable_baselines3 import PPO
|
||||
from stable_baselines3.common.monitor import Monitor
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv
|
||||
|
||||
import gymnasium as gym
|
||||
from gymnasium import spaces
|
||||
|
||||
from scripts.commons.Train_Base import Train_Base
|
||||
from scripts.commons.Server import Server as Train_Server
|
||||
|
||||
from agent.base_agent import Base_Agent
|
||||
from utils.math_ops import MathOps
|
||||
|
||||
from scipy.spatial.transform import Rotation as R
|
||||
|
||||
'''
|
||||
Objective:
|
||||
Learn how to run forward using step primitive
|
||||
----------
|
||||
- class Basic_Run: implements an OpenAI custom gym
|
||||
- class Train: implements algorithms to train a new model or test an existing model
|
||||
'''
|
||||
|
||||
|
||||
class WalkEnv(gym.Env):
|
||||
def __init__(self, ip, server_p) -> None:
|
||||
|
||||
# Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw
|
||||
self.Player = player = Base_Agent(
|
||||
team_name="Gym",
|
||||
number=1,
|
||||
host=ip,
|
||||
port=server_p
|
||||
)
|
||||
self.robot_type = self.Player.robot
|
||||
self.step_counter = 0 # to limit episode size
|
||||
self.force_play_on = True
|
||||
|
||||
self.target_position = np.array([0.0, 0.0]) # target position in the x-y plane
|
||||
self.initial_position = np.array([0.0, 0.0]) # initial position in the x-y plane
|
||||
self.target_direction = 0.0 # target direction in the x-y plane (relative to the robot's orientation)
|
||||
self.isfallen = False
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.debug_every_n_steps = 5
|
||||
self.enable_debug_joint_status = False
|
||||
self.calibrate_nominal_from_neutral = True
|
||||
self.auto_calibrate_train_sim_flip = True
|
||||
self.nominal_calibrated_once = False
|
||||
self.flip_calibrated_once = False
|
||||
self._target_hz = 0.0
|
||||
self._target_dt = 0.0
|
||||
self._last_sync_time = None
|
||||
target_hz_env = 0
|
||||
if target_hz_env:
|
||||
try:
|
||||
self._target_hz = float(target_hz_env)
|
||||
except ValueError:
|
||||
self._target_hz = 0.0
|
||||
if self._target_hz > 0.0:
|
||||
self._target_dt = 1.0 / self._target_hz
|
||||
|
||||
# State space
|
||||
# 原始观测大小: 78
|
||||
obs_size = 78
|
||||
self.obs = np.zeros(obs_size, np.float32)
|
||||
self.observation_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(obs_size,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
action_dim = len(self.Player.robot.ROBOT_MOTORS)
|
||||
self.no_of_actions = action_dim
|
||||
self.action_space = spaces.Box(
|
||||
low=-10.0,
|
||||
high=10.0,
|
||||
shape=(action_dim,),
|
||||
dtype=np.float32
|
||||
)
|
||||
|
||||
# 中立姿态
|
||||
self.joint_nominal_position = np.array(
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
0.0,
|
||||
1.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
-1.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
0.8,
|
||||
-0.4,
|
||||
0.0,
|
||||
0.4,
|
||||
0.0,
|
||||
0.0,
|
||||
-0.8,
|
||||
0.4,
|
||||
0.0,
|
||||
]
|
||||
)
|
||||
self.joint_nominal_position = np.zeros(self.no_of_actions)
|
||||
self.train_sim_flip = np.array(
|
||||
[
|
||||
1.0, # 0: Head_yaw (he1)
|
||||
-1.0, # 1: Head_pitch (he2)
|
||||
1.0, # 2: Left_Shoulder_Pitch (lae1)
|
||||
-1.0, # 3: Left_Shoulder_Roll (lae2)
|
||||
-1.0, # 4: Left_Elbow_Pitch (lae3)
|
||||
1.0, # 5: Left_Elbow_Yaw (lae4)
|
||||
-1.0, # 6: Right_Shoulder_Pitch (rae1)
|
||||
-1.0, # 7: Right_Shoulder_Roll (rae2)
|
||||
1.0, # 8: Right_Elbow_Pitch (rae3)
|
||||
1.0, # 9: Right_Elbow_Yaw (rae4)
|
||||
1.0, # 10: Waist (te1)
|
||||
1.0, # 11: Left_Hip_Pitch (lle1)
|
||||
-1.0, # 12: Left_Hip_Roll (lle2)
|
||||
-1.0, # 13: Left_Hip_Yaw (lle3)
|
||||
1.0, # 14: Left_Knee_Pitch (lle4)
|
||||
1.0, # 15: Left_Ankle_Pitch (lle5)
|
||||
-1.0, # 16: Left_Ankle_Roll (lle6)
|
||||
-1.0, # 17: Right_Hip_Pitch (rle1)
|
||||
-1.0, # 18: Right_Hip_Roll (rle2)
|
||||
-1.0, # 19: Right_Hip_Yaw (rle3)
|
||||
-1.0, # 20: Right_Knee_Pitch (rle4)
|
||||
-1.0, # 21: Right_Ankle_Pitch (rle5)
|
||||
-1.0, # 22: Right_Ankle_Roll (rle6)
|
||||
]
|
||||
)
|
||||
|
||||
self.scaling_factor = 0.3
|
||||
# self.scaling_factor = 1
|
||||
|
||||
# Encourage a minimum lateral stance so the policy avoids feet overlap.
|
||||
self.min_stance_rad = 0.10
|
||||
|
||||
# Small reset perturbations for robustness training.
|
||||
self.enable_reset_perturb = False
|
||||
self.reset_beam_yaw_range_deg = 180 # randomize target direction fully to encourage learning a real walk instead of a fixed gait
|
||||
self.reset_joint_noise_rad = 0.015
|
||||
self.reset_perturb_steps = 3
|
||||
self.reset_recover_steps = 8
|
||||
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Track previous position
|
||||
self.Player.server.connect()
|
||||
# sleep(2.0) # Longer wait for connection to establish completely
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
self.start_time = time.time()
|
||||
|
||||
def _reconnect_server(self):
|
||||
try:
|
||||
self.Player.server.shutdown()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self.Player.server.connect()
|
||||
self.Player.server.send_immediate(
|
||||
f"(init {self.Player.robot.name} {self.Player.world.team_name} {self.Player.world.number})"
|
||||
)
|
||||
|
||||
def _safe_receive_world_update(self, retries=1):
|
||||
last_exc = None
|
||||
for attempt in range(retries + 1):
|
||||
try:
|
||||
self.Player.server.receive()
|
||||
self.Player.world.update()
|
||||
return
|
||||
except (ConnectionResetError, OSError) as exc:
|
||||
last_exc = exc
|
||||
if attempt >= retries:
|
||||
raise
|
||||
self._reconnect_server()
|
||||
if last_exc is not None:
|
||||
raise last_exc
|
||||
|
||||
def debug_log(self, message):
|
||||
print(message)
|
||||
try:
|
||||
log_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
|
||||
with open(log_path, "a", encoding="utf-8") as f:
|
||||
f.write(message + "\n")
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def observe(self, init=False):
|
||||
|
||||
"""获取当前观测值"""
|
||||
robot = self.Player.robot
|
||||
world = self.Player.world
|
||||
|
||||
# Safety check: ensure data is available
|
||||
|
||||
# 计算目标速度
|
||||
raw_target = self.target_position - world.global_position[:2]
|
||||
velocity = MathOps.rotate_2d_vec(
|
||||
raw_target,
|
||||
-robot.global_orientation_euler[2],
|
||||
is_rad=False
|
||||
)
|
||||
|
||||
# 计算相对方向
|
||||
rel_orientation = MathOps.vector_angle(velocity) * 0.3
|
||||
rel_orientation = np.clip(rel_orientation, -0.25, 0.25)
|
||||
|
||||
velocity = np.concatenate([velocity, np.array([rel_orientation])])
|
||||
velocity[0] = np.clip(velocity[0], -0.5, 0.5)
|
||||
velocity[1] = np.clip(velocity[1], -0.25, 0.25)
|
||||
|
||||
# 关节状态
|
||||
radian_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
radian_joint_speeds = np.deg2rad(
|
||||
[robot.motor_speeds[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
|
||||
qpos_qvel_previous_action = np.concatenate([
|
||||
(radian_joint_positions * self.train_sim_flip - self.joint_nominal_position) / 4.6,
|
||||
radian_joint_speeds / 110.0 * self.train_sim_flip,
|
||||
self.previous_action / 10.0,
|
||||
])
|
||||
|
||||
# 角速度
|
||||
ang_vel = np.clip(np.deg2rad(robot.gyroscope) / 50.0, -1.0, 1.0)
|
||||
|
||||
# 投影的重力方向
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
|
||||
# 组合观测
|
||||
observation = np.concatenate([
|
||||
qpos_qvel_previous_action,
|
||||
ang_vel,
|
||||
velocity,
|
||||
projected_gravity,
|
||||
])
|
||||
|
||||
observation = np.clip(observation, -10.0, 10.0)
|
||||
return observation.astype(np.float32)
|
||||
|
||||
def sync(self):
|
||||
''' Run a single simulation step '''
|
||||
self._safe_receive_world_update(retries=1)
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.send()
|
||||
if self._target_dt > 0.0:
|
||||
now = time.time()
|
||||
if self._last_sync_time is None:
|
||||
self._last_sync_time = now
|
||||
return
|
||||
elapsed = now - self._last_sync_time
|
||||
remaining = self._target_dt - elapsed
|
||||
if remaining > 0.0:
|
||||
time.sleep(remaining)
|
||||
now = time.time()
|
||||
self._last_sync_time = now
|
||||
|
||||
def debug_joint_status(self):
|
||||
robot = self.Player.robot
|
||||
actual_joint_positions = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
)
|
||||
target_joint_positions = getattr(
|
||||
self,
|
||||
'target_joint_positions',
|
||||
np.zeros(len(robot.ROBOT_MOTORS), dtype=np.float32)
|
||||
)
|
||||
joint_error = actual_joint_positions - target_joint_positions
|
||||
leg_slice = slice(11, None)
|
||||
|
||||
self.debug_log(
|
||||
"[WalkDebug] "
|
||||
f"step={self.step_counter} "
|
||||
f"pos={np.round(self.Player.world.global_position, 3).tolist()} "
|
||||
f"target_xy={np.round(self.target_position, 3).tolist()} "
|
||||
f"target_leg={np.round(target_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"actual_leg={np.round(actual_joint_positions[leg_slice], 3).tolist()} "
|
||||
f"err_norm={float(np.linalg.norm(joint_error)):.4f} "
|
||||
f"fallen={self.Player.world.global_position[2] < 0.3}"
|
||||
)
|
||||
print(f"waist target={target_joint_positions[10]:.3f}, actual={actual_joint_positions[10]:.3f}")
|
||||
|
||||
def reset(self, seed=None, options=None):
|
||||
'''
|
||||
Reset and stabilize the robot
|
||||
Note: for some behaviors it would be better to reduce stabilization or add noise
|
||||
'''
|
||||
r = self.Player.robot
|
||||
super().reset(seed=seed)
|
||||
if seed is not None:
|
||||
np.random.seed(seed)
|
||||
|
||||
length1 = 2 # randomize target distance
|
||||
length2 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
length3 = np.random.uniform(0.6, 1) # randomize target distance
|
||||
angle2 = np.random.uniform(-30, 30) # randomize initial orientation
|
||||
angle3 = np.random.uniform(-30, 30) # randomize target direction
|
||||
|
||||
self.step_counter = 0
|
||||
self.waypoint_index = 0
|
||||
self.route_completed = False
|
||||
self.previous_action = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.last_action_for_reward = np.zeros(len(self.Player.robot.ROBOT_MOTORS))
|
||||
self.previous_pos = np.array([0.0, 0.0]) # Initialize for first step
|
||||
self.walk_cycle_step = 0
|
||||
|
||||
# 随机 beam 目标位置和朝向,增加训练多样性
|
||||
beam_x = (random() - 0.5) * 10
|
||||
beam_y = (random() - 0.5) * 10
|
||||
beam_yaw = uniform(-self.reset_beam_yaw_range_deg, self.reset_beam_yaw_range_deg)
|
||||
|
||||
for _ in range(5):
|
||||
self._safe_receive_world_update(retries=2)
|
||||
self.Player.robot.commit_motor_targets_pd()
|
||||
self.Player.server.commit_beam(pos2d=(beam_x, beam_y), rotation=beam_yaw)
|
||||
self.Player.server.send()
|
||||
|
||||
# 执行 Neutral 技能直到完成,给机器人足够时间在 beam 位置稳定站立
|
||||
finished_count = 0
|
||||
for _ in range(50):
|
||||
finished = self.Player.skills_manager.execute("Neutral")
|
||||
self.sync()
|
||||
if finished:
|
||||
finished_count += 1
|
||||
if finished_count >= 20: # 假设需要连续20次完成才算成功
|
||||
break
|
||||
|
||||
if self.enable_reset_perturb and self.reset_joint_noise_rad > 0.0:
|
||||
perturb_action = np.zeros(self.no_of_actions, dtype=np.float32)
|
||||
# Perturb waist + lower body only (10:), keep head/arms stable.
|
||||
perturb_action[10:] = np.random.uniform(
|
||||
-self.reset_joint_noise_rad,
|
||||
self.reset_joint_noise_rad,
|
||||
size=(self.no_of_actions - 10,)
|
||||
)
|
||||
|
||||
for _ in range(self.reset_perturb_steps):
|
||||
target_joint_positions = (self.joint_nominal_position + perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
for i in range(self.reset_recover_steps):
|
||||
# Linearly fade perturbation to help policy start from near-neutral.
|
||||
alpha = 1.0 - float(i + 1) / float(self.reset_recover_steps)
|
||||
target_joint_positions = (self.joint_nominal_position + alpha * perturb_action) * self.train_sim_flip
|
||||
for idx, target in enumerate(target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=25, kd=0.6
|
||||
)
|
||||
self.sync()
|
||||
|
||||
# memory variables
|
||||
self.sync()
|
||||
self.initial_position = np.array(self.Player.world.global_position[:2])
|
||||
self.previous_pos = self.initial_position.copy() # Critical: set to actual position
|
||||
self.act = np.zeros(self.no_of_actions, np.float32)
|
||||
# Build target in the robot's current forward direction instead of fixed global +x.
|
||||
heading_deg = float(r.global_orientation_euler[2])
|
||||
forward_offset = MathOps.rotate_2d_vec(np.array([length1, 0.0]), heading_deg, is_rad=False)
|
||||
point1 = self.initial_position + forward_offset
|
||||
point2 = point1 + MathOps.rotate_2d_vec(np.array([length2, 0]), angle2, is_rad=False)
|
||||
point3 = point2 + MathOps.rotate_2d_vec(np.array([length3, 0]), angle3, is_rad=False)
|
||||
self.point_list = [point1]
|
||||
self.target_position = self.point_list[self.waypoint_index]
|
||||
self.initial_height = self.Player.world.global_position[2]
|
||||
|
||||
return self.observe(True), {}
|
||||
|
||||
def render(self, mode='human', close=False):
|
||||
return
|
||||
|
||||
def compute_reward(self, previous_pos, current_pos, action):
|
||||
height = float(self.Player.world.global_position[2])
|
||||
robot = self.Player.robot
|
||||
|
||||
orientation_quat_inv = R.from_quat(robot._global_cheat_orientation).inv()
|
||||
projected_gravity = orientation_quat_inv.apply(np.array([0.0, 0.0, -1.0]))
|
||||
tilt_mag = float(np.linalg.norm(projected_gravity[:2]))
|
||||
ang_vel = np.deg2rad(robot.gyroscope)
|
||||
ang_vel_mag = float(np.linalg.norm(ang_vel))
|
||||
|
||||
is_fallen = height < 0.55
|
||||
if is_fallen:
|
||||
# remain = max(0, 800 - self.step_counter)
|
||||
# return -8.0 - 0.01 * remain
|
||||
return -1.0
|
||||
|
||||
|
||||
|
||||
# # 目标方向
|
||||
# to_target = self.target_position - current_pos
|
||||
# dist_to_target = float(np.linalg.norm(to_target))
|
||||
# if dist_to_target < 0.5:
|
||||
# return 15.0
|
||||
|
||||
# forward_dir = to_target / dist_to_target if dist_to_target > 0.1 else np.array([1.0, 0.0])
|
||||
# delta_pos = current_pos - previous_pos
|
||||
# forward_step = float(np.dot(delta_pos, forward_dir))
|
||||
# lateral_step = float(np.linalg.norm(delta_pos - forward_dir * forward_step))
|
||||
|
||||
# 奖励项
|
||||
# progress_reward = 2 * forward_step
|
||||
# lateral_penalty = -0.1 * lateral_step
|
||||
alive_bonus = 2.0
|
||||
|
||||
# action_penalty = -0.01 * float(np.linalg.norm(action))
|
||||
smoothness_penalty = -0.01 * float(np.linalg.norm(action - self.last_action_for_reward))
|
||||
|
||||
posture_penalty = -0.3 * (tilt_mag)
|
||||
ang_vel_penalty = -0.02 * ang_vel_mag
|
||||
|
||||
# Use simulator joint readings in training frame to shape lateral stance.
|
||||
joint_pos = np.deg2rad(
|
||||
[robot.motor_positions[motor] for motor in robot.ROBOT_MOTORS]
|
||||
) * self.train_sim_flip
|
||||
left_hip_roll = float(joint_pos[12])
|
||||
right_hip_roll = float(joint_pos[18])
|
||||
left_ankle_roll = float(joint_pos[16])
|
||||
right_ankle_roll = float(joint_pos[22])
|
||||
|
||||
hip_spread = left_hip_roll - right_hip_roll
|
||||
ankle_spread = left_ankle_roll - right_ankle_roll
|
||||
stance_metric = 0.6 * abs(hip_spread) + 0.4 * abs(ankle_spread)
|
||||
|
||||
# Penalize narrow stance (feet too close) and scissoring (cross-leg pattern).
|
||||
stance_collapse_penalty = -4.0 * max(0.0, self.min_stance_rad - stance_metric)
|
||||
cross_leg_penalty = -1.2 * max(0.0, -(hip_spread * ankle_spread))
|
||||
|
||||
target_height = self.initial_height
|
||||
height_error = height - target_height
|
||||
height_penalty = -0.5 * abs(height_error) # 惩罚高度偏离,系数可调
|
||||
|
||||
# # 在 compute_reward 开头附近,添加高度变化率计算
|
||||
# if not hasattr(self, 'last_height'):
|
||||
# self.last_height = height
|
||||
# self.last_height_time = self.step_counter # 可选,用于时间间隔
|
||||
# height_rate = height - self.last_height # 正为上升,负为下降
|
||||
# self.last_height = height
|
||||
|
||||
# 惩罚高度下降(负变化率)
|
||||
# height_down_penalty = -5.0 * max(0, -height_rate) # 系数可调,-height_rate 为正表示下降幅度
|
||||
|
||||
# # 在 compute_reward 中
|
||||
# if self.step_counter > 50:
|
||||
# avg_prev_action = np.mean(self.prev_action_history, axis=0)
|
||||
# novelty = float(np.linalg.norm(action - avg_prev_action))
|
||||
# exploration_bonus = 0.05 * novelty
|
||||
# else:
|
||||
# exploration_bonus = 0
|
||||
|
||||
# self.prev_action_history[self.history_idx] = action
|
||||
# self.history_idx = (self.history_idx + 1) % 50
|
||||
|
||||
|
||||
total = (
|
||||
# progress_reward +
|
||||
alive_bonus +
|
||||
# lateral_penalty +
|
||||
# action_penalty +
|
||||
smoothness_penalty +
|
||||
posture_penalty
|
||||
+ ang_vel_penalty
|
||||
+ height_penalty
|
||||
+ stance_collapse_penalty
|
||||
+ cross_leg_penalty
|
||||
# + exploration_bonus
|
||||
# + height_down_penalty
|
||||
)
|
||||
if time.time() - self.start_time >= 600:
|
||||
self.start_time = time.time()
|
||||
print(
|
||||
# f"progress_reward:{progress_reward:.4f}",
|
||||
# f"lateral_penalty:{lateral_penalty:.4f}",
|
||||
# f"action_penalty:{action_penalty:.4f}"s,
|
||||
f"height_penalty:{height_penalty:.4f}",
|
||||
f"smoothness_penalty:{smoothness_penalty:.4f},",
|
||||
f"posture_penalty:{posture_penalty:.4f}",
|
||||
f"stance_collapse_penalty:{stance_collapse_penalty:.4f}",
|
||||
f"cross_leg_penalty:{cross_leg_penalty:.4f}",
|
||||
# f"ang_vel_penalty:{ang_vel_penalty:.4f}",
|
||||
# f"height_down_penalty:{height_down_penalty:.4f}",
|
||||
# f"exploration_bonus:{exploration_bonus:.4f}"
|
||||
)
|
||||
|
||||
return total
|
||||
|
||||
|
||||
|
||||
def step(self, action):
|
||||
|
||||
r = self.Player.robot
|
||||
self.previous_action = action
|
||||
|
||||
self.target_joint_positions = (
|
||||
# self.joint_nominal_position +
|
||||
self.scaling_factor * action
|
||||
)
|
||||
self.target_joint_positions *= self.train_sim_flip
|
||||
|
||||
for idx, target in enumerate(self.target_joint_positions):
|
||||
r.set_motor_target_position(
|
||||
r.ROBOT_MOTORS[idx], target * 180 / math.pi, kp=40, kd=1.0
|
||||
)
|
||||
|
||||
self.previous_action = action
|
||||
|
||||
self.sync() # run simulation step
|
||||
self.step_counter += 1
|
||||
|
||||
if self.enable_debug_joint_status and self.step_counter % self.debug_every_n_steps == 0:
|
||||
self.debug_joint_status()
|
||||
|
||||
current_pos = np.array(self.Player.world.global_position[:2], dtype=np.float32)
|
||||
|
||||
# Compute reward based on movement from previous step
|
||||
reward = self.compute_reward(self.previous_pos, current_pos, action)
|
||||
|
||||
# Update previous position
|
||||
self.previous_pos = current_pos.copy()
|
||||
self.last_action_for_reward = action.copy()
|
||||
|
||||
# Fall detection and penalty
|
||||
is_fallen = self.Player.world.global_position[2] < 0.55
|
||||
|
||||
# terminal state: the robot is falling or timeout
|
||||
terminated = is_fallen or self.step_counter > 800 or self.route_completed
|
||||
truncated = False
|
||||
|
||||
return self.observe(), reward, terminated, truncated, {}
|
||||
|
||||
|
||||
class Train(Train_Base):
|
||||
def __init__(self, script) -> None:
|
||||
super().__init__(script)
|
||||
|
||||
def train(self, args):
|
||||
|
||||
# --------------------------------------- Learning parameters
|
||||
n_envs = int(os.environ.get("GYM_CPU_N_ENVS", "20"))
|
||||
if n_envs < 1:
|
||||
raise ValueError("GYM_CPU_N_ENVS must be >= 1")
|
||||
server_warmup_sec = float(os.environ.get("GYM_CPU_SERVER_WARMUP_SEC", "3.0"))
|
||||
n_steps_per_env = int(os.environ.get("GYM_CPU_TRAIN_STEPS_PER_ENV", "256")) # RolloutBuffer is of size (n_steps_per_env * n_envs)
|
||||
minibatch_size = int(os.environ.get("GYM_CPU_TRAIN_BATCH_SIZE", "512")) # should be a factor of (n_steps_per_env * n_envs)
|
||||
total_steps = 30000000
|
||||
learning_rate = float(os.environ.get("GYM_CPU_TRAIN_LR", "3e-4"))
|
||||
folder_name = f'Walk_R{self.robot_type}'
|
||||
model_path = f'./scripts/gyms/logs/{folder_name}/'
|
||||
|
||||
print(f"Model path: {model_path}")
|
||||
print(f"Using {n_envs} parallel environments")
|
||||
|
||||
# --------------------------------------- Run algorithm
|
||||
def init_env(i_env, monitor=False):
|
||||
def thunk():
|
||||
env = WalkEnv(self.ip, self.server_p + i_env)
|
||||
if monitor:
|
||||
env = Monitor(env)
|
||||
return env
|
||||
|
||||
return thunk
|
||||
|
||||
server_log_dir = os.path.join(model_path, "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
servers = Train_Server(self.server_p, self.monitor_p_1000, n_envs + 1, no_render=True, no_realtime=True) # include 1 extra server for testing
|
||||
|
||||
# Wait for servers to start
|
||||
print(f"Starting {n_envs + 1} rcssservermj servers...")
|
||||
if server_warmup_sec > 0:
|
||||
print(f"Waiting {server_warmup_sec:.1f}s for server warmup...")
|
||||
sleep(server_warmup_sec)
|
||||
print("Servers started, creating environments...")
|
||||
|
||||
env = SubprocVecEnv([init_env(i, monitor=True) for i in range(n_envs)])
|
||||
# Use single-process eval env to avoid extra subprocess fragility during callback evaluation.
|
||||
eval_env = DummyVecEnv([init_env(n_envs, monitor=True)])
|
||||
|
||||
try:
|
||||
# Custom policy network architecture
|
||||
policy_kwargs = dict(
|
||||
net_arch=dict(
|
||||
pi=[512, 256, 128], # Policy network: 3 layers
|
||||
vf=[512, 256, 128] # Value network: 3 layers
|
||||
),
|
||||
activation_fn=__import__('torch.nn', fromlist=['ELU']).ELU,
|
||||
)
|
||||
|
||||
if "model_file" in args: # retrain
|
||||
model = PPO.load(args["model_file"], env=env, device="cpu", n_envs=n_envs, n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size, learning_rate=learning_rate)
|
||||
else: # train new model
|
||||
model = PPO(
|
||||
"MlpPolicy",
|
||||
env=env,
|
||||
verbose=1,
|
||||
n_steps=n_steps_per_env,
|
||||
batch_size=minibatch_size,
|
||||
learning_rate=learning_rate,
|
||||
device="cpu",
|
||||
policy_kwargs=policy_kwargs,
|
||||
ent_coef=float(os.environ.get("GYM_CPU_TRAIN_ENT_COEF", "0.05")), # Entropy coefficient for exploration
|
||||
clip_range=float(os.environ.get("GYM_CPU_TRAIN_CLIP_RANGE", "0.2")), # PPO clipping parameter
|
||||
gae_lambda=0.95, # GAE lambda
|
||||
gamma=float(os.environ.get("GYM_CPU_TRAIN_GAMMA", "0.95")), # Discount factor
|
||||
# target_kl=0.03,
|
||||
n_epochs=int(os.environ.get("GYM_CPU_TRAIN_EPOCHS", "5")),
|
||||
tensorboard_log=f"./scripts/gyms/logs/{folder_name}/tensorboard/"
|
||||
)
|
||||
|
||||
model_path = self.learn_model(model, total_steps, model_path, eval_env=eval_env,
|
||||
eval_freq=n_steps_per_env * 20, save_freq=n_steps_per_env * 20, eval_eps=100,
|
||||
backup_env_file=__file__)
|
||||
except KeyboardInterrupt:
|
||||
sleep(1) # wait for child processes
|
||||
print("\nctrl+c pressed, aborting...\n")
|
||||
servers.kill()
|
||||
return
|
||||
|
||||
env.close()
|
||||
eval_env.close()
|
||||
servers.kill()
|
||||
|
||||
def test(self, args):
|
||||
|
||||
# Uses different server and monitor ports
|
||||
server_log_dir = os.path.join(args["folder_dir"], "server_logs")
|
||||
os.makedirs(server_log_dir, exist_ok=True)
|
||||
test_no_render = os.environ.get("GYM_CPU_TEST_NO_RENDER", "0") == "1"
|
||||
test_no_realtime = os.environ.get("GYM_CPU_TEST_NO_REALTIME", "0") == "1"
|
||||
|
||||
server = Train_Server(
|
||||
self.server_p - 1,
|
||||
self.monitor_p,
|
||||
1,
|
||||
no_render=test_no_render,
|
||||
no_realtime=test_no_realtime,
|
||||
)
|
||||
env = WalkEnv(self.ip, self.server_p - 1)
|
||||
model = PPO.load(args["model_file"], env=env)
|
||||
|
||||
try:
|
||||
self.export_model(args["model_file"], args["model_file"] + ".pkl",
|
||||
False) # Export to pkl to create custom behavior
|
||||
self.test_model(model, env, log_path=args["folder_dir"], model_path=args["folder_dir"])
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
env.close()
|
||||
server.kill()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from types import SimpleNamespace
|
||||
|
||||
# 创建默认参数
|
||||
script_args = SimpleNamespace(
|
||||
args=SimpleNamespace(
|
||||
i='127.0.0.1', # Server IP
|
||||
p=3100, # Server port
|
||||
m=3200, # Monitor port
|
||||
r=0, # Robot type
|
||||
t='Gym', # Team name
|
||||
u=1 # Uniform number
|
||||
)
|
||||
)
|
||||
|
||||
trainer = Train(script_args)
|
||||
|
||||
run_mode = os.environ.get("GYM_CPU_MODE", "train").strip().lower()
|
||||
|
||||
if run_mode == "test":
|
||||
test_model_file = os.environ.get("GYM_CPU_TEST_MODEL", "scripts/gyms/logs/Walk_R0_004/best_model.zip")
|
||||
test_folder = os.environ.get("GYM_CPU_TEST_FOLDER", "scripts/gyms/logs/Walk_R0_004/")
|
||||
trainer.test({"model_file": test_model_file, "folder_dir": test_folder})
|
||||
else:
|
||||
retrain_model = os.environ.get("GYM_CPU_TRAIN_MODEL", "").strip()
|
||||
if retrain_model:
|
||||
trainer.train({"model_file": retrain_model})
|
||||
else:
|
||||
trainer.train({})
|
||||
136
train.sh
136
train.sh
@@ -1,136 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# ------------------------------
|
||||
# 资源限制配置(cgroup v2 + systemd-run)
|
||||
# ------------------------------
|
||||
# 说明:
|
||||
# 1) 这个脚本会把训练进程放进一个临时的 systemd scope 中,并施加 CPU/内存上限。
|
||||
# 2) 仅限制“本次训练进程”,不会永久改系统配置。
|
||||
# 3) 下面变量都支持“环境变量覆盖”,即你可以在命令前临时指定。
|
||||
#
|
||||
# CPU 核数基准(默认 20):
|
||||
# 例如你的机器按 20 核预算来算,可保持默认。
|
||||
CORES="${CORES:-20}"
|
||||
# CPU 占用百分比(默认 100):
|
||||
# 最终会与 CORES 相乘得到 CPUQuota。
|
||||
# 例:CORES=20, UTIL_PERCENT=100 -> CPUQuota=2000%(约 20 核等效)
|
||||
UTIL_PERCENT="${UTIL_PERCENT:-100}"
|
||||
CPU_QUOTA="$((CORES * UTIL_PERCENT))%"
|
||||
|
||||
# 内存上限(默认关闭):
|
||||
# 设为具体值(如 24G/28G)可限制训练最多占用内存;
|
||||
# 设为 0/none/off/infinity 表示不设置 cgroup 内存上限。
|
||||
MEMORY_MAX="${MEMORY_MAX:-0}"
|
||||
|
||||
# ------------------------------
|
||||
# 训练运行参数(由 scripts/gyms/Walk.py 读取)
|
||||
# ------------------------------
|
||||
# 运行模式:train 或 test
|
||||
GYM_CPU_MODE="${GYM_CPU_MODE:-train}"
|
||||
|
||||
# 并行环境数量:越大通常吞吐越高,但也更容易触发 OOM 或连接不稳定。
|
||||
# 默认使用更稳妥的 12,确认稳定后再升到 16/20。
|
||||
GYM_CPU_N_ENVS="${GYM_CPU_N_ENVS:-20}"
|
||||
# 服务器预热时间(秒):
|
||||
# 在批量拉起 rcssserver 后等待一段时间,再创建 SubprocVecEnv,
|
||||
# 可降低 ConnectionReset/EOFError 概率。
|
||||
GYM_CPU_SERVER_WARMUP_SEC="${GYM_CPU_SERVER_WARMUP_SEC:-10}"
|
||||
|
||||
# 训练专用参数
|
||||
GYM_CPU_TRAIN_STEPS_PER_ENV="${GYM_CPU_TRAIN_STEPS_PER_ENV:-256}"
|
||||
GYM_CPU_TRAIN_BATCH_SIZE="${GYM_CPU_TRAIN_BATCH_SIZE:-512}"
|
||||
GYM_CPU_TRAIN_LR="${GYM_CPU_TRAIN_LR:-1e-4}"
|
||||
GYM_CPU_TRAIN_ENT_COEF="${GYM_CPU_TRAIN_ENT_COEF:-0.03}"
|
||||
GYM_CPU_TRAIN_CLIP_RANGE="${GYM_CPU_TRAIN_CLIP_RANGE:-0.13}"
|
||||
GYM_CPU_TRAIN_GAMMA="${GYM_CPU_TRAIN_GAMMA:-0.95}"
|
||||
GYM_CPU_TRAIN_EPOCHS="${GYM_CPU_TRAIN_EPOCHS:-5}"
|
||||
GYM_CPU_TRAIN_MODEL="${GYM_CPU_TRAIN_MODEL:-}"
|
||||
|
||||
# 测试专用参数
|
||||
GYM_CPU_TEST_MODEL="${GYM_CPU_TEST_MODEL:-scripts/gyms/logs/Walk_R0_004/best_model.zip}"
|
||||
GYM_CPU_TEST_FOLDER="${GYM_CPU_TEST_FOLDER:-scripts/gyms/logs/Walk_R0_004/}"
|
||||
# 测试默认实时且显示画面:默认均为 0
|
||||
# 设为 1 表示关闭对应能力
|
||||
GYM_CPU_TEST_NO_RENDER="${GYM_CPU_TEST_NO_RENDER:-0}"
|
||||
GYM_CPU_TEST_NO_REALTIME="${GYM_CPU_TEST_NO_REALTIME:-0}"
|
||||
|
||||
# Python 解释器选择策略:
|
||||
# 1) 优先使用你手动传入的 PYTHON_BIN
|
||||
# 2) 其次用当前激活 conda 环境(CONDA_PREFIX/bin/python)
|
||||
# 3) 再回退到默认 mujoco 环境路径
|
||||
# 4) 最后尝试系统 python / python3
|
||||
DEFAULT_PYTHON="/home/solren/Downloads/Anaconda/envs/mujoco/bin/python"
|
||||
CONDA_PYTHON="${CONDA_PREFIX:-}/bin/python"
|
||||
|
||||
# 安全保护:不要用 sudo 运行。
|
||||
# 原因:sudo 可能导致 conda 环境与用户会话环境不一致,
|
||||
# 会引发 python 路径丢失、systemd --user 会话不可见等问题。
|
||||
if [[ "${EUID}" -eq 0 ]]; then
|
||||
echo "Do not run this script with sudo; run as your normal user in conda env 'mujoco'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 解析最终使用的 Python 可执行文件。
|
||||
if [[ -n "${PYTHON_BIN:-}" ]]; then
|
||||
PYTHON_EXEC="${PYTHON_BIN}"
|
||||
elif [[ -n "${CONDA_PREFIX:-}" && -x "${CONDA_PYTHON}" ]]; then
|
||||
PYTHON_EXEC="${CONDA_PYTHON}"
|
||||
elif [[ -x "${DEFAULT_PYTHON}" ]]; then
|
||||
PYTHON_EXEC="${DEFAULT_PYTHON}"
|
||||
elif command -v python >/dev/null 2>&1; then
|
||||
PYTHON_EXEC="$(command -v python)"
|
||||
elif command -v python3 >/dev/null 2>&1; then
|
||||
PYTHON_EXEC="$(command -v python3)"
|
||||
else
|
||||
echo "No Python executable found. Set PYTHON_BIN=/abs/path/to/python and retry."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 脚本所在目录(绝对路径),便于后续定位模块/相对路径。
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
|
||||
# 打印当前生效配置,方便排障和复现实验。
|
||||
echo "Starting training with limits: CPU=${CPU_QUOTA}, Memory=${MEMORY_MAX}"
|
||||
echo "Mode: ${GYM_CPU_MODE}"
|
||||
echo "Runtime knobs: GYM_CPU_N_ENVS=${GYM_CPU_N_ENVS}, GYM_CPU_SERVER_WARMUP_SEC=${GYM_CPU_SERVER_WARMUP_SEC}"
|
||||
echo "Using Python: ${PYTHON_EXEC}"
|
||||
if [[ -n "${CONDA_DEFAULT_ENV:-}" ]]; then
|
||||
echo "Detected conda env: ${CONDA_DEFAULT_ENV}"
|
||||
fi
|
||||
|
||||
SYSTEMD_PROPS=("-p" "CPUQuota=${CPU_QUOTA}")
|
||||
case "${MEMORY_MAX,,}" in
|
||||
0|none|off|infinity)
|
||||
echo "MemoryMax is disabled for this run (no cgroup memory cap)."
|
||||
;;
|
||||
*)
|
||||
SYSTEMD_PROPS+=("-p" "MemoryMax=${MEMORY_MAX}")
|
||||
;;
|
||||
esac
|
||||
|
||||
# 使用 systemd-run --user --scope 启动“受限资源”的训练进程:
|
||||
# - CPUQuota: 总 CPU 配额
|
||||
# - MemoryMax: 最大内存
|
||||
# - env ... : 显式传递训练参数到 Python 进程
|
||||
# - python -m scripts.gyms.Walk: 以模块方式启动训练入口
|
||||
systemd-run --user --scope \
|
||||
"${SYSTEMD_PROPS[@]}" \
|
||||
env \
|
||||
GYM_CPU_MODE="${GYM_CPU_MODE}" \
|
||||
GYM_CPU_N_ENVS="${GYM_CPU_N_ENVS}" \
|
||||
GYM_CPU_SERVER_WARMUP_SEC="${GYM_CPU_SERVER_WARMUP_SEC}" \
|
||||
GYM_CPU_TRAIN_STEPS_PER_ENV="${GYM_CPU_TRAIN_STEPS_PER_ENV}" \
|
||||
GYM_CPU_TRAIN_BATCH_SIZE="${GYM_CPU_TRAIN_BATCH_SIZE}" \
|
||||
GYM_CPU_TRAIN_LR="${GYM_CPU_TRAIN_LR}" \
|
||||
GYM_CPU_TRAIN_ENT_COEF="${GYM_CPU_TRAIN_ENT_COEF}" \
|
||||
GYM_CPU_TRAIN_CLIP_RANGE="${GYM_CPU_TRAIN_CLIP_RANGE}" \
|
||||
GYM_CPU_TRAIN_GAMMA="${GYM_CPU_TRAIN_GAMMA}" \
|
||||
GYM_CPU_TRAIN_EPOCHS="${GYM_CPU_TRAIN_EPOCHS}" \
|
||||
GYM_CPU_TRAIN_MODEL="${GYM_CPU_TRAIN_MODEL}" \
|
||||
GYM_CPU_TEST_MODEL="${GYM_CPU_TEST_MODEL}" \
|
||||
GYM_CPU_TEST_FOLDER="${GYM_CPU_TEST_FOLDER}" \
|
||||
GYM_CPU_TEST_NO_RENDER="${GYM_CPU_TEST_NO_RENDER}" \
|
||||
GYM_CPU_TEST_NO_REALTIME="${GYM_CPU_TEST_NO_REALTIME}" \
|
||||
"${PYTHON_EXEC}" "-m" "scripts.gyms.Walk"
|
||||
|
||||
@@ -47,7 +47,6 @@ class World:
|
||||
self.their_team_players: list[OtherRobot] = [OtherRobot(is_teammate=False) for _ in
|
||||
range(self.MAX_PLAYERS_PER_TEAM)]
|
||||
self.field: Field = self.__initialize_field(field_name=field_name)
|
||||
self.WORLD_STEPTIME: float = 0.005 # Time step of the world in seconds
|
||||
|
||||
def update(self) -> None:
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user