Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| fa6b3d644a | |||
| 5832cb7ba9 | |||
| 7e9b71e4fb | |||
|
|
978a064012 | ||
|
|
02afa3c1fc |
18
.gitignore
vendored
Normal file
18
.gitignore
vendored
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
.venv/
|
||||||
|
.vscode/
|
||||||
|
**/__pycache__/
|
||||||
|
poetry.lock
|
||||||
|
poetry.toml
|
||||||
|
**/*.log
|
||||||
|
**/*.txt
|
||||||
|
**/build/
|
||||||
|
**/install/
|
||||||
|
**/log/
|
||||||
|
*.spec
|
||||||
|
dist/
|
||||||
|
*.zip
|
||||||
|
*.csv
|
||||||
|
*.json
|
||||||
|
*.xml
|
||||||
|
*.npz
|
||||||
|
*.pkl
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
import socket
|
import socket
|
||||||
|
import time
|
||||||
from select import select
|
from select import select
|
||||||
from communication.world_parser import WorldParser
|
from communication.world_parser import WorldParser
|
||||||
|
|
||||||
@@ -10,15 +12,32 @@ class Server:
|
|||||||
def __init__(self, host: str, port: int, world_parser: WorldParser):
|
def __init__(self, host: str, port: int, world_parser: WorldParser):
|
||||||
self.world_parser: WorldParser = world_parser
|
self.world_parser: WorldParser = world_parser
|
||||||
self.__host: str = host
|
self.__host: str = host
|
||||||
self.__port: str = port
|
self.__port: int = port
|
||||||
self.__socket: socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
self.__socket: socket.socket = self._create_socket()
|
||||||
self.__socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
|
||||||
self.__send_buff = []
|
self.__send_buff = []
|
||||||
self.__rcv_buffer_size = 1024
|
self.__rcv_buffer_size = 1024
|
||||||
|
self.__rcv_buffer_default_size = 1024
|
||||||
|
self.__max_msg_size = 1048576
|
||||||
|
self.__shrink_threshold = 8192
|
||||||
|
self.__shrink_after_msgs = 200
|
||||||
|
self.__small_msg_streak = 0
|
||||||
self.__rcv_buffer = bytearray(self.__rcv_buffer_size)
|
self.__rcv_buffer = bytearray(self.__rcv_buffer_size)
|
||||||
|
|
||||||
|
def _create_socket(self) -> socket.socket:
|
||||||
|
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
||||||
|
return sock
|
||||||
|
|
||||||
def connect(self) -> None:
|
def connect(self) -> None:
|
||||||
logger.info("Connecting to server at %s:%d...", self.__host, self.__port)
|
logger.info("Connecting to server at %s:%d...", self.__host, self.__port)
|
||||||
|
|
||||||
|
# Always reconnect with a fresh socket object.
|
||||||
|
try:
|
||||||
|
self.__socket.close()
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
self.__socket = self._create_socket()
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
self.__socket.connect((self.__host, self.__port))
|
self.__socket.connect((self.__host, self.__port))
|
||||||
@@ -27,12 +46,19 @@ class Server:
|
|||||||
logger.error(
|
logger.error(
|
||||||
"Connection refused. Make sure the server is running and listening on {self.__host}:{self.__port}."
|
"Connection refused. Make sure the server is running and listening on {self.__host}:{self.__port}."
|
||||||
)
|
)
|
||||||
|
time.sleep(0.05)
|
||||||
|
|
||||||
logger.info(f"Server connection established to {self.__host}:{self.__port}.")
|
logger.info(f"Server connection established to {self.__host}:{self.__port}.")
|
||||||
|
|
||||||
def shutdown(self) -> None:
|
def shutdown(self) -> None:
|
||||||
self.__socket.close()
|
try:
|
||||||
self.__socket.shutdown(socket.SHUT_RDWR)
|
self.__socket.shutdown(socket.SHUT_RDWR)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
self.__socket.close()
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
def send_immediate(self, msg: str) -> None:
|
def send_immediate(self, msg: str) -> None:
|
||||||
"""
|
"""
|
||||||
@@ -50,10 +76,13 @@ class Server:
|
|||||||
"""
|
"""
|
||||||
Send all committed messages
|
Send all committed messages
|
||||||
"""
|
"""
|
||||||
if len(select([self.__socket], [], [], 0.0)[0]) == 0:
|
if not self.__send_buff:
|
||||||
self.send_immediate(("".join(self.__send_buff)))
|
return
|
||||||
else:
|
|
||||||
logger.info("Server_Comm.py: Received a new packet while thinking!")
|
if len(select([self.__socket], [], [], 0.0)[0]) != 0:
|
||||||
|
logger.debug("Socket is readable while sending; keeping full-duplex command send.")
|
||||||
|
|
||||||
|
self.send_immediate(("".join(self.__send_buff)))
|
||||||
self.__send_buff = []
|
self.__send_buff = []
|
||||||
|
|
||||||
def commit(self, msg: str) -> None:
|
def commit(self, msg: str) -> None:
|
||||||
@@ -69,37 +98,50 @@ class Server:
|
|||||||
self.commit(msg)
|
self.commit(msg)
|
||||||
self.send()
|
self.send()
|
||||||
|
|
||||||
def receive(self) -> None:
|
def receive(self):
|
||||||
"""
|
|
||||||
Receive the next message from the TCP/IP socket and updates world
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Receive message length information
|
while True:
|
||||||
if (
|
|
||||||
self.__socket.recv_into(
|
if (
|
||||||
self.__rcv_buffer, nbytes=4, flags=socket.MSG_WAITALL
|
self.__socket.recv_into(
|
||||||
|
self.__rcv_buffer, nbytes=4, flags=socket.MSG_WAITALL
|
||||||
|
) != 4
|
||||||
|
):
|
||||||
|
raise ConnectionResetError
|
||||||
|
|
||||||
|
msg_size = int.from_bytes(self.__rcv_buffer[:4], byteorder="big", signed=False)
|
||||||
|
|
||||||
|
# Guard against corrupted frame lengths that would trigger huge allocations.
|
||||||
|
if msg_size <= 0 or msg_size > self.__max_msg_size:
|
||||||
|
raise ConnectionResetError
|
||||||
|
|
||||||
|
if msg_size > self.__rcv_buffer_size:
|
||||||
|
self.__rcv_buffer_size = msg_size
|
||||||
|
self.__rcv_buffer = bytearray(self.__rcv_buffer_size)
|
||||||
|
|
||||||
|
if (
|
||||||
|
self.__socket.recv_into(
|
||||||
|
self.__rcv_buffer, nbytes=msg_size, flags=socket.MSG_WAITALL
|
||||||
|
) != msg_size
|
||||||
|
):
|
||||||
|
raise ConnectionResetError
|
||||||
|
|
||||||
|
self.world_parser.parse(
|
||||||
|
message=self.__rcv_buffer[:msg_size].decode()
|
||||||
)
|
)
|
||||||
!= 4
|
|
||||||
):
|
|
||||||
raise ConnectionResetError
|
|
||||||
|
|
||||||
msg_size = int.from_bytes(self.__rcv_buffer[:4], byteorder="big", signed=False)
|
if msg_size <= self.__shrink_threshold and self.__rcv_buffer_size > self.__rcv_buffer_default_size:
|
||||||
|
self.__small_msg_streak += 1
|
||||||
|
if self.__small_msg_streak >= self.__shrink_after_msgs:
|
||||||
|
self.__rcv_buffer_size = self.__rcv_buffer_default_size
|
||||||
|
self.__rcv_buffer = bytearray(self.__rcv_buffer_size)
|
||||||
|
self.__small_msg_streak = 0
|
||||||
|
else:
|
||||||
|
self.__small_msg_streak = 0
|
||||||
|
|
||||||
# Ensure receive buffer is large enough to hold the message
|
# 如果socket没有更多数据就退出
|
||||||
if msg_size > self.__rcv_buffer_size:
|
if len(select([self.__socket], [], [], 0.0)[0]) == 0:
|
||||||
self.__rcv_buffer_size = msg_size
|
break
|
||||||
self.__rcv_buffer = bytearray(self.__rcv_buffer_size)
|
|
||||||
|
|
||||||
# Receive message with the specified length
|
|
||||||
if (
|
|
||||||
self.__socket.recv_into(
|
|
||||||
self.__rcv_buffer, nbytes=msg_size, flags=socket.MSG_WAITALL
|
|
||||||
)
|
|
||||||
!= msg_size
|
|
||||||
):
|
|
||||||
raise ConnectionResetError
|
|
||||||
|
|
||||||
self.world_parser.parse(message=self.__rcv_buffer[:msg_size].decode())
|
|
||||||
|
|
||||||
def commit_beam(self, pos2d: list, rotation: float) -> None:
|
def commit_beam(self, pos2d: list, rotation: float) -> None:
|
||||||
assert len(pos2d) == 2
|
assert len(pos2d) == 2
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
import re
|
import re
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from scipy.spatial.transform import Rotation as R
|
from scipy.spatial.transform import Rotation as R
|
||||||
@@ -7,6 +8,16 @@ from utils.math_ops import MathOps
|
|||||||
from world.commons.play_mode import PlayModeEnum
|
from world.commons.play_mode import PlayModeEnum
|
||||||
|
|
||||||
logger = logging.getLogger()
|
logger = logging.getLogger()
|
||||||
|
DEBUG_LOG_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), "comm_debug.log")
|
||||||
|
|
||||||
|
|
||||||
|
def _debug_log(message: str) -> None:
|
||||||
|
print(message)
|
||||||
|
try:
|
||||||
|
with open(DEBUG_LOG_FILE, "a", encoding="utf-8") as f:
|
||||||
|
f.write(message + "\n")
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class WorldParser:
|
class WorldParser:
|
||||||
@@ -14,6 +25,36 @@ class WorldParser:
|
|||||||
from agent.base_agent import Base_Agent # type hinting
|
from agent.base_agent import Base_Agent # type hinting
|
||||||
|
|
||||||
self.agent: Base_Agent = agent
|
self.agent: Base_Agent = agent
|
||||||
|
self._hj_debug_prints = 0
|
||||||
|
|
||||||
|
def _normalize_motor_name(self, motor_name: str) -> str:
|
||||||
|
alias_map = {
|
||||||
|
"q_hj1": "he1",
|
||||||
|
"q_hj2": "he2",
|
||||||
|
"q_laj1": "lae1",
|
||||||
|
"q_laj2": "lae2",
|
||||||
|
"q_laj3": "lae3",
|
||||||
|
"q_laj4": "lae4",
|
||||||
|
"q_raj1": "rae1",
|
||||||
|
"q_raj2": "rae2",
|
||||||
|
"q_raj3": "rae3",
|
||||||
|
"q_raj4": "rae4",
|
||||||
|
"q_wj1": "te1",
|
||||||
|
"q_tj1": "te1",
|
||||||
|
"q_llj1": "lle1",
|
||||||
|
"q_llj2": "lle2",
|
||||||
|
"q_llj3": "lle3",
|
||||||
|
"q_llj4": "lle4",
|
||||||
|
"q_llj5": "lle5",
|
||||||
|
"q_llj6": "lle6",
|
||||||
|
"q_rlj1": "rle1",
|
||||||
|
"q_rlj2": "rle2",
|
||||||
|
"q_rlj3": "rle3",
|
||||||
|
"q_rlj4": "rle4",
|
||||||
|
"q_rlj5": "rle5",
|
||||||
|
"q_rlj6": "rle6",
|
||||||
|
}
|
||||||
|
return alias_map.get(motor_name, motor_name)
|
||||||
|
|
||||||
def parse(self, message: str) -> None:
|
def parse(self, message: str) -> None:
|
||||||
perception_dict: dict = self.__sexpression_to_dict(message)
|
perception_dict: dict = self.__sexpression_to_dict(message)
|
||||||
@@ -51,9 +92,29 @@ class WorldParser:
|
|||||||
|
|
||||||
robot = self.agent.robot
|
robot = self.agent.robot
|
||||||
|
|
||||||
robot.motor_positions = {h["n"]: h["ax"] for h in perception_dict["HJ"]}
|
hj_states = perception_dict["HJ"] if isinstance(perception_dict["HJ"], list) else [perception_dict["HJ"]]
|
||||||
|
|
||||||
robot.motor_speeds = {h["n"]: h["vx"] for h in perception_dict["HJ"]}
|
if self._hj_debug_prints < 5:
|
||||||
|
names = [joint_state.get("n", "<missing>") for joint_state in hj_states]
|
||||||
|
normalized_names = [self._normalize_motor_name(name) for name in names]
|
||||||
|
matched_names = [name for name in names if name in robot.motor_positions]
|
||||||
|
matched_normalized_names = [name for name in normalized_names if name in robot.motor_positions]
|
||||||
|
# _debug_log(
|
||||||
|
# "[ParserDebug] "
|
||||||
|
# f"hj_count={len(hj_states)} "
|
||||||
|
# f"sample_names={names[:8]} "
|
||||||
|
# f"normalized_sample={normalized_names[:8]} "
|
||||||
|
# f"matched={len(matched_names)}/{len(names)} "
|
||||||
|
# f"matched_normalized={len(matched_normalized_names)}/{len(normalized_names)}"
|
||||||
|
# )
|
||||||
|
self._hj_debug_prints += 1
|
||||||
|
|
||||||
|
for joint_state in hj_states:
|
||||||
|
motor_name = self._normalize_motor_name(joint_state["n"])
|
||||||
|
if motor_name in robot.motor_positions:
|
||||||
|
robot.motor_positions[motor_name] = joint_state["ax"]
|
||||||
|
if motor_name in robot.motor_speeds:
|
||||||
|
robot.motor_speeds[motor_name] = joint_state["vx"]
|
||||||
|
|
||||||
world._global_cheat_position = np.array(perception_dict["pos"]["p"])
|
world._global_cheat_position = np.array(perception_dict["pos"]["p"])
|
||||||
|
|
||||||
|
|||||||
15
readme.md
15
readme.md
@@ -74,6 +74,21 @@ poetry run ./build_binary.sh <team-name>
|
|||||||
|
|
||||||
Once binary generation is finished, the result will be inside the build folder, as ```<team-name>.tar.gz```
|
Once binary generation is finished, the result will be inside the build folder, as ```<team-name>.tar.gz```
|
||||||
|
|
||||||
|
### GYM
|
||||||
|
|
||||||
|
To use the gym, you need to install the following dependencies:
|
||||||
|
```bash
|
||||||
|
pip install gymnasium
|
||||||
|
pip install psutil
|
||||||
|
pip install stable-baselines3
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, you can run gym examples under the ```GYM_CPU``` folder:
|
||||||
|
```bash
|
||||||
|
python3 -m scripts.gyms.Walk # Run the Walk gym example
|
||||||
|
# of course, you can run other gym examples
|
||||||
|
```
|
||||||
|
|
||||||
### Authors and acknowledgment
|
### Authors and acknowledgment
|
||||||
This project was developed and contributed by:
|
This project was developed and contributed by:
|
||||||
- **Chenxi Liu**
|
- **Chenxi Liu**
|
||||||
|
|||||||
148
scripts/commons/Server.py
Normal file
148
scripts/commons/Server.py
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import threading
|
||||||
|
|
||||||
|
|
||||||
|
class Server():
|
||||||
|
WATCHDOG_ENABLED = True
|
||||||
|
WATCHDOG_INTERVAL_SEC = 30.0
|
||||||
|
WATCHDOG_RSS_MB_LIMIT = 2000.0
|
||||||
|
|
||||||
|
def __init__(self, first_server_p, first_monitor_p, n_servers, no_render=True, no_realtime=True) -> None:
|
||||||
|
try:
|
||||||
|
import psutil
|
||||||
|
self.check_running_servers(psutil, first_server_p, first_monitor_p, n_servers)
|
||||||
|
except ModuleNotFoundError:
|
||||||
|
print("Info: Cannot check if the server is already running, because the psutil module was not found")
|
||||||
|
|
||||||
|
self.first_server_p = first_server_p
|
||||||
|
self.n_servers = n_servers
|
||||||
|
self.rcss_processes = []
|
||||||
|
self._server_specs = []
|
||||||
|
self._watchdog_stop = threading.Event()
|
||||||
|
self._watchdog_lock = threading.Lock()
|
||||||
|
self._watchdog_thread = None
|
||||||
|
first_monitor_p = first_monitor_p + 100
|
||||||
|
|
||||||
|
# makes it easier to kill test servers without affecting train servers
|
||||||
|
cmd = "rcssservermj"
|
||||||
|
render_arg = "--no-render" if no_render else ""
|
||||||
|
realtime_arg = "--no-realtime" if no_realtime else ""
|
||||||
|
for i in range(n_servers):
|
||||||
|
port = first_server_p + i
|
||||||
|
mport = first_monitor_p + i
|
||||||
|
self._server_specs.append((port, mport, cmd, render_arg, realtime_arg))
|
||||||
|
proc = self._spawn_server(port, mport, cmd, render_arg, realtime_arg)
|
||||||
|
self.rcss_processes.append(proc)
|
||||||
|
|
||||||
|
if self.WATCHDOG_ENABLED:
|
||||||
|
self._watchdog_thread = threading.Thread(target=self._watchdog_loop, daemon=True)
|
||||||
|
self._watchdog_thread.start()
|
||||||
|
|
||||||
|
def _spawn_server(self, port, mport, cmd, render_arg, realtime_arg):
|
||||||
|
server_cmd = f"{cmd} -c {port} -m {mport} {render_arg} {realtime_arg}".strip()
|
||||||
|
|
||||||
|
proc = subprocess.Popen(
|
||||||
|
server_cmd.split(),
|
||||||
|
stdout=subprocess.DEVNULL,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
start_new_session=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Avoid startup storm when launching many servers at once.
|
||||||
|
time.sleep(0.03)
|
||||||
|
|
||||||
|
rc = proc.poll()
|
||||||
|
if rc is not None:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"rcssservermj exited early (code={rc}) on server port {port}, monitor port {mport}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return proc
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _pid_rss_mb(pid):
|
||||||
|
try:
|
||||||
|
with open(f"/proc/{pid}/status", "r", encoding="utf-8") as f:
|
||||||
|
for line in f:
|
||||||
|
if line.startswith("VmRSS:"):
|
||||||
|
parts = line.split()
|
||||||
|
if len(parts) >= 2:
|
||||||
|
# VmRSS is kB
|
||||||
|
return float(parts[1]) / 1024.0
|
||||||
|
except (FileNotFoundError, ProcessLookupError, PermissionError, OSError):
|
||||||
|
return 0.0
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
def _restart_server_at_index(self, idx, reason):
|
||||||
|
port, mport, cmd, render_arg, realtime_arg = self._server_specs[idx]
|
||||||
|
old_proc = self.rcss_processes[idx]
|
||||||
|
try:
|
||||||
|
old_proc.terminate()
|
||||||
|
old_proc.wait(timeout=1.0)
|
||||||
|
except Exception:
|
||||||
|
try:
|
||||||
|
old_proc.kill()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
new_proc = self._spawn_server(port, mport, cmd, render_arg, realtime_arg)
|
||||||
|
self.rcss_processes[idx] = new_proc
|
||||||
|
print(
|
||||||
|
f"[ServerWatchdog] Restarted server idx={idx} port={port} monitor={mport} reason={reason}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _watchdog_loop(self):
|
||||||
|
while not self._watchdog_stop.wait(self.WATCHDOG_INTERVAL_SEC):
|
||||||
|
with self._watchdog_lock:
|
||||||
|
for i, proc in enumerate(self.rcss_processes):
|
||||||
|
rc = proc.poll()
|
||||||
|
if rc is not None:
|
||||||
|
self._restart_server_at_index(i, f"exited:{rc}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
rss_mb = self._pid_rss_mb(proc.pid)
|
||||||
|
if rss_mb > self.WATCHDOG_RSS_MB_LIMIT:
|
||||||
|
self._restart_server_at_index(i, f"rss_mb:{rss_mb:.1f}")
|
||||||
|
|
||||||
|
def check_running_servers(self, psutil, first_server_p, first_monitor_p, n_servers):
|
||||||
|
''' Check if any server is running on chosen ports '''
|
||||||
|
found = False
|
||||||
|
p_list = [p for p in psutil.process_iter() if p.cmdline() and "rcssservermj" in " ".join(p.cmdline())]
|
||||||
|
range1 = (first_server_p, first_server_p + n_servers)
|
||||||
|
range2 = (first_monitor_p, first_monitor_p + n_servers)
|
||||||
|
bad_processes = []
|
||||||
|
|
||||||
|
for p in p_list:
|
||||||
|
# currently ignoring remaining default port when only one of the ports is specified (uncommon scenario)
|
||||||
|
ports = [int(arg) for arg in p.cmdline()[1:] if arg.isdigit()]
|
||||||
|
if len(ports) == 0:
|
||||||
|
ports = [60000, 60100] # default server ports (changing this is unlikely)
|
||||||
|
|
||||||
|
conflicts = [str(port) for port in ports if (
|
||||||
|
(range1[0] <= port < range1[1]) or (range2[0] <= port < range2[1]))]
|
||||||
|
|
||||||
|
if len(conflicts) > 0:
|
||||||
|
if not found:
|
||||||
|
print("\nThere are already servers running on the same port(s)!")
|
||||||
|
found = True
|
||||||
|
bad_processes.append(p)
|
||||||
|
print(f"Port(s) {','.join(conflicts)} already in use by \"{' '.join(p.cmdline())}\" (PID:{p.pid})")
|
||||||
|
|
||||||
|
if found:
|
||||||
|
print()
|
||||||
|
while True:
|
||||||
|
inp = input("Enter 'kill' to kill these processes or ctrl+c to abort. ")
|
||||||
|
if inp == "kill":
|
||||||
|
for p in bad_processes:
|
||||||
|
p.kill()
|
||||||
|
return
|
||||||
|
|
||||||
|
def kill(self):
|
||||||
|
self._watchdog_stop.set()
|
||||||
|
if self._watchdog_thread is not None:
|
||||||
|
self._watchdog_thread.join(timeout=1.0)
|
||||||
|
for p in self.rcss_processes:
|
||||||
|
p.kill()
|
||||||
|
print(f"Killed {self.n_servers} rcssservermj processes starting at {self.first_server_p}")
|
||||||
601
scripts/commons/Train_Base.py
Normal file
601
scripts/commons/Train_Base.py
Normal file
@@ -0,0 +1,601 @@
|
|||||||
|
from datetime import datetime, timedelta
|
||||||
|
from itertools import count
|
||||||
|
from os import listdir
|
||||||
|
from os.path import isdir, join, isfile
|
||||||
|
from scripts.commons.UI import UI
|
||||||
|
from shutil import copy
|
||||||
|
from stable_baselines3 import PPO
|
||||||
|
from stable_baselines3.common.base_class import BaseAlgorithm
|
||||||
|
from stable_baselines3.common.callbacks import EvalCallback, CheckpointCallback, CallbackList, BaseCallback
|
||||||
|
from typing import Callable
|
||||||
|
# from world.world import World
|
||||||
|
from xml.dom import minidom
|
||||||
|
import numpy as np
|
||||||
|
import os, time, math, csv, select, sys
|
||||||
|
import pickle
|
||||||
|
import xml.etree.ElementTree as ET
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
|
||||||
|
class Train_Base():
|
||||||
|
def __init__(self, script) -> None:
|
||||||
|
'''
|
||||||
|
When training with multiple environments (multiprocessing):
|
||||||
|
The server port is incremented as follows:
|
||||||
|
self.server_p, self.server_p+1, self.server_p+2, ...
|
||||||
|
We add +1000 to the initial monitor port, so than we can have more than 100 environments:
|
||||||
|
self.monitor_p+1000, self.monitor_p+1001, self.monitor_p+1002, ...
|
||||||
|
When testing we use self.server_p and self.monitor_p
|
||||||
|
'''
|
||||||
|
|
||||||
|
args = script.args
|
||||||
|
self.script = script
|
||||||
|
self.ip = args.i
|
||||||
|
self.server_p = args.p # (initial) server port
|
||||||
|
self.monitor_p = args.m + 100 # monitor port when testing
|
||||||
|
self.monitor_p_1000 = args.m + 1100 # initial monitor port when training
|
||||||
|
self.robot_type = args.r
|
||||||
|
self.team = args.t
|
||||||
|
self.uniform = args.u
|
||||||
|
self.cf_last_time = 0
|
||||||
|
self.cf_delay = 0
|
||||||
|
# self.cf_target_period = World.STEPTIME # target simulation speed while testing (default: real-time)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def prompt_user_for_model(self):
|
||||||
|
|
||||||
|
gyms_logs_path = "./scripts/gyms/logs/"
|
||||||
|
folders = [f for f in listdir(gyms_logs_path) if isdir(join(gyms_logs_path, f))]
|
||||||
|
folders.sort(key=lambda f: os.path.getmtime(join(gyms_logs_path, f)), reverse=True) # sort by modification date
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
folder_name = UI.print_list(folders, prompt="Choose folder (ctrl+c to return): ")[1]
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print()
|
||||||
|
return None # ctrl+c
|
||||||
|
|
||||||
|
folder_dir = os.path.join(gyms_logs_path, folder_name)
|
||||||
|
models = [m[:-4] for m in listdir(folder_dir) if isfile(join(folder_dir, m)) and m.endswith(".zip")]
|
||||||
|
|
||||||
|
if not models:
|
||||||
|
print("The chosen folder does not contain any .zip file!")
|
||||||
|
continue
|
||||||
|
|
||||||
|
models.sort(key=lambda m: os.path.getmtime(join(folder_dir, m + ".zip")),
|
||||||
|
reverse=True) # sort by modification date
|
||||||
|
|
||||||
|
try:
|
||||||
|
model_name = UI.print_list(models, prompt="Choose model (ctrl+c to return): ")[1]
|
||||||
|
break
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print()
|
||||||
|
|
||||||
|
return {"folder_dir": folder_dir, "folder_name": folder_name,
|
||||||
|
"model_file": os.path.join(folder_dir, model_name + ".zip")}
|
||||||
|
|
||||||
|
# def control_fps(self, read_input = False):
|
||||||
|
# ''' Add delay to control simulation speed '''
|
||||||
|
|
||||||
|
# if read_input:
|
||||||
|
# speed = input()
|
||||||
|
# if speed == '':
|
||||||
|
# self.cf_target_period = 0
|
||||||
|
# print(f"Changed simulation speed to MAX")
|
||||||
|
# else:
|
||||||
|
# if speed == '0':
|
||||||
|
# inp = input("Paused. Set new speed or '' to use previous speed:")
|
||||||
|
# if inp != '':
|
||||||
|
# speed = inp
|
||||||
|
|
||||||
|
# try:
|
||||||
|
# speed = int(speed)
|
||||||
|
# assert speed >= 0
|
||||||
|
# self.cf_target_period = World.STEPTIME * 100 / speed
|
||||||
|
# print(f"Changed simulation speed to {speed}%")
|
||||||
|
# except:
|
||||||
|
# print("""Train_Base.py:
|
||||||
|
# Error: To control the simulation speed, enter a non-negative integer.
|
||||||
|
# To disable this control module, use test_model(..., enable_FPS_control=False) in your gyms environment.""")
|
||||||
|
|
||||||
|
# now = time.time()
|
||||||
|
# period = now - self.cf_last_time
|
||||||
|
# self.cf_last_time = now
|
||||||
|
# self.cf_delay += (self.cf_target_period - period)*0.9
|
||||||
|
# if self.cf_delay > 0:
|
||||||
|
# time.sleep(self.cf_delay)
|
||||||
|
# else:
|
||||||
|
# self.cf_delay = 0
|
||||||
|
|
||||||
|
def test_model(self, model: BaseAlgorithm, env, log_path: str = None, model_path: str = None, max_episodes=0,
|
||||||
|
enable_FPS_control=True, verbose=1):
|
||||||
|
'''
|
||||||
|
Test model and log results
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
model : BaseAlgorithm
|
||||||
|
Trained model
|
||||||
|
env : Env
|
||||||
|
Gym-like environment
|
||||||
|
log_path : str
|
||||||
|
Folder where statistics file is saved, default is `None` (no file is saved)
|
||||||
|
model_path : str
|
||||||
|
Folder where it reads evaluations.npz to plot it and create evaluations.csv, default is `None` (no plot, no csv)
|
||||||
|
max_episodes : int
|
||||||
|
Run tests for this number of episodes
|
||||||
|
Default is 0 (run until user aborts)
|
||||||
|
verbose : int
|
||||||
|
0 - no output (except if enable_FPS_control=True)
|
||||||
|
1 - print episode statistics
|
||||||
|
'''
|
||||||
|
|
||||||
|
if model_path is not None:
|
||||||
|
assert os.path.isdir(model_path), f"{model_path} is not a valid path"
|
||||||
|
self.display_evaluations(model_path)
|
||||||
|
|
||||||
|
if log_path is not None:
|
||||||
|
assert os.path.isdir(log_path), f"{log_path} is not a valid path"
|
||||||
|
|
||||||
|
# If file already exists, don't overwrite
|
||||||
|
if os.path.isfile(log_path + "/test.csv"):
|
||||||
|
for i in range(1000):
|
||||||
|
p = f"{log_path}/test_{i:03}.csv"
|
||||||
|
if not os.path.isfile(p):
|
||||||
|
log_path = p
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
log_path += "/test.csv"
|
||||||
|
|
||||||
|
with open(log_path, 'w') as f:
|
||||||
|
f.write("reward,ep. length,rew. cumulative avg., ep. len. cumulative avg.\n")
|
||||||
|
print("Train statistics are saved to:", log_path)
|
||||||
|
|
||||||
|
if enable_FPS_control: # control simulation speed (using non blocking user input)
|
||||||
|
print("\nThe simulation speed can be changed by sending a non-negative integer\n"
|
||||||
|
"(e.g. '50' sets speed to 50%, '0' pauses the simulation, '' sets speed to MAX)\n")
|
||||||
|
|
||||||
|
ep_reward = 0
|
||||||
|
ep_length = 0
|
||||||
|
rewards_sum = 0
|
||||||
|
reward_min = math.inf
|
||||||
|
reward_max = -math.inf
|
||||||
|
ep_lengths_sum = 0
|
||||||
|
ep_no = 0
|
||||||
|
|
||||||
|
obs, _ = env.reset()
|
||||||
|
while True:
|
||||||
|
action, _states = model.predict(obs, deterministic=True)
|
||||||
|
obs, reward, terminated, truncated, info = env.step(action)
|
||||||
|
done = terminated or truncated
|
||||||
|
ep_reward += reward
|
||||||
|
ep_length += 1
|
||||||
|
|
||||||
|
# if enable_FPS_control: # control simulation speed (using non blocking user input)
|
||||||
|
# self.control_fps(select.select([sys.stdin], [], [], 0)[0])
|
||||||
|
|
||||||
|
if done:
|
||||||
|
obs, _ = env.reset()
|
||||||
|
rewards_sum += ep_reward
|
||||||
|
ep_lengths_sum += ep_length
|
||||||
|
reward_max = max(ep_reward, reward_max)
|
||||||
|
reward_min = min(ep_reward, reward_min)
|
||||||
|
ep_no += 1
|
||||||
|
avg_ep_lengths = ep_lengths_sum / ep_no
|
||||||
|
avg_rewards = rewards_sum / ep_no
|
||||||
|
|
||||||
|
if verbose > 0:
|
||||||
|
print(
|
||||||
|
f"\rEpisode: {ep_no:<3} Ep.Length: {ep_length:<4.0f} Reward: {ep_reward:<6.2f} \n",
|
||||||
|
end=f"--AVERAGE-- Ep.Length: {avg_ep_lengths:<4.0f} Reward: {avg_rewards:<6.2f} (Min: {reward_min:<6.2f} Max: {reward_max:<6.2f})",
|
||||||
|
flush=True)
|
||||||
|
|
||||||
|
if log_path is not None:
|
||||||
|
with open(log_path, 'a') as f:
|
||||||
|
writer = csv.writer(f)
|
||||||
|
writer.writerow([ep_reward, ep_length, avg_rewards, avg_ep_lengths])
|
||||||
|
|
||||||
|
if ep_no == max_episodes:
|
||||||
|
return
|
||||||
|
|
||||||
|
ep_reward = 0
|
||||||
|
ep_length = 0
|
||||||
|
|
||||||
|
def learn_model(self, model: BaseAlgorithm, total_steps: int, path: str, eval_env=None, eval_freq=None, eval_eps=5,
|
||||||
|
save_freq=None, backup_env_file=None, export_name=None):
|
||||||
|
'''
|
||||||
|
Learn Model for a specific number of time steps
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
model : BaseAlgorithm
|
||||||
|
Model to train
|
||||||
|
total_steps : int
|
||||||
|
The total number of samples (env steps) to train on
|
||||||
|
path : str
|
||||||
|
Path where the trained model is saved
|
||||||
|
If the path already exists, an incrementing number suffix is added
|
||||||
|
eval_env : Env
|
||||||
|
Environment to periodically test the model
|
||||||
|
Default is None (no periodical evaluation)
|
||||||
|
eval_freq : int
|
||||||
|
Evaluate the agent every X steps
|
||||||
|
Default is None (no periodical evaluation)
|
||||||
|
eval_eps : int
|
||||||
|
Evaluate the agent for X episodes (both eval_env and eval_freq must be defined)
|
||||||
|
Default is 5
|
||||||
|
save_freq : int
|
||||||
|
Saves model at every X steps
|
||||||
|
Default is None (no periodical checkpoint)
|
||||||
|
backup_gym_file : str
|
||||||
|
Generates backup of environment file in model's folder
|
||||||
|
Default is None (no backup)
|
||||||
|
export_name : str
|
||||||
|
If export_name and save_freq are defined, a model is exported every X steps
|
||||||
|
Default is None (no export)
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
model_path : str
|
||||||
|
Directory where model was actually saved (considering incremental suffix)
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
If `eval_env` and `eval_freq` were specified:
|
||||||
|
- The policy will be evaluated in `eval_env` every `eval_freq` steps
|
||||||
|
- Evaluation results will be saved in `path` and shown at the end of training
|
||||||
|
- Every time the results improve, the model is saved
|
||||||
|
'''
|
||||||
|
|
||||||
|
start = time.time()
|
||||||
|
start_date = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
|
||||||
|
|
||||||
|
# If path already exists, add suffix to avoid overwriting
|
||||||
|
if os.path.isdir(path):
|
||||||
|
for i in count():
|
||||||
|
p = path.rstrip("/") + f'_{i:03}/'
|
||||||
|
if not os.path.isdir(p):
|
||||||
|
path = p
|
||||||
|
break
|
||||||
|
os.makedirs(path)
|
||||||
|
|
||||||
|
# Backup environment file
|
||||||
|
if backup_env_file is not None:
|
||||||
|
backup_file = os.path.join(path, os.path.basename(backup_env_file))
|
||||||
|
copy(backup_env_file, backup_file)
|
||||||
|
|
||||||
|
evaluate = bool(eval_env is not None and eval_freq is not None)
|
||||||
|
|
||||||
|
# Create evaluation callback
|
||||||
|
eval_callback = None if not evaluate else EvalCallback(eval_env, n_eval_episodes=eval_eps, eval_freq=eval_freq,
|
||||||
|
log_path=path,
|
||||||
|
best_model_save_path=path, deterministic=True,
|
||||||
|
render=False)
|
||||||
|
|
||||||
|
# Create custom callback to display evaluations
|
||||||
|
custom_callback = None if not evaluate else Cyclic_Callback(eval_freq,
|
||||||
|
lambda: self.display_evaluations(path, True))
|
||||||
|
|
||||||
|
# Create checkpoint callback
|
||||||
|
checkpoint_callback = None if save_freq is None else CheckpointCallback(save_freq=save_freq, save_path=path,
|
||||||
|
name_prefix="model", verbose=1)
|
||||||
|
|
||||||
|
# Create custom callback to export checkpoint models
|
||||||
|
export_callback = None if save_freq is None or export_name is None else Export_Callback(save_freq, path,
|
||||||
|
export_name)
|
||||||
|
|
||||||
|
callbacks = CallbackList(
|
||||||
|
[c for c in [eval_callback, custom_callback, checkpoint_callback, export_callback] if c is not None])
|
||||||
|
|
||||||
|
model.learn(total_timesteps=total_steps, callback=callbacks)
|
||||||
|
model.save(os.path.join(path, "last_model"))
|
||||||
|
|
||||||
|
# Display evaluations if they exist
|
||||||
|
if evaluate:
|
||||||
|
self.display_evaluations(path)
|
||||||
|
|
||||||
|
# Display timestamps + Model path
|
||||||
|
end_date = datetime.now().strftime('%d/%m/%Y %H:%M:%S')
|
||||||
|
duration = timedelta(seconds=int(time.time() - start))
|
||||||
|
print(f"Train start: {start_date}")
|
||||||
|
print(f"Train end: {end_date}")
|
||||||
|
print(f"Train duration: {duration}")
|
||||||
|
print(f"Model path: {path}")
|
||||||
|
|
||||||
|
# Append timestamps to backup environment file
|
||||||
|
if backup_env_file is not None:
|
||||||
|
with open(backup_file, 'a') as f:
|
||||||
|
f.write(f"\n# Train start: {start_date}\n")
|
||||||
|
f.write(f"# Train end: {end_date}\n")
|
||||||
|
f.write(f"# Train duration: {duration}")
|
||||||
|
|
||||||
|
return path
|
||||||
|
|
||||||
|
def display_evaluations(self, path, save_csv=False):
|
||||||
|
|
||||||
|
eval_npz = os.path.join(path, "evaluations.npz")
|
||||||
|
|
||||||
|
if not os.path.isfile(eval_npz):
|
||||||
|
return
|
||||||
|
|
||||||
|
console_width = 80
|
||||||
|
console_height = 18
|
||||||
|
symb_x = "\u2022"
|
||||||
|
symb_o = "\u007c"
|
||||||
|
symb_xo = "\u237f"
|
||||||
|
|
||||||
|
with np.load(eval_npz) as data:
|
||||||
|
time_steps = data["timesteps"]
|
||||||
|
results_raw = np.mean(data["results"], axis=1)
|
||||||
|
ep_lengths_raw = np.mean(data["ep_lengths"], axis=1)
|
||||||
|
sample_no = len(results_raw)
|
||||||
|
|
||||||
|
xvals = np.linspace(0, sample_no - 1, 80)
|
||||||
|
results = np.interp(xvals, range(sample_no), results_raw)
|
||||||
|
ep_lengths = np.interp(xvals, range(sample_no), ep_lengths_raw)
|
||||||
|
|
||||||
|
results_limits = np.min(results), np.max(results)
|
||||||
|
ep_lengths_limits = np.min(ep_lengths), np.max(ep_lengths)
|
||||||
|
|
||||||
|
results_discrete = np.digitize(results, np.linspace(results_limits[0] - 1e-5, results_limits[1] + 1e-5,
|
||||||
|
console_height + 1)) - 1
|
||||||
|
ep_lengths_discrete = np.digitize(ep_lengths,
|
||||||
|
np.linspace(0, ep_lengths_limits[1] + 1e-5, console_height + 1)) - 1
|
||||||
|
|
||||||
|
matrix = np.zeros((console_height, console_width, 2), int)
|
||||||
|
matrix[results_discrete[0]][0][0] = 1 # draw 1st column
|
||||||
|
matrix[ep_lengths_discrete[0]][0][1] = 1 # draw 1st column
|
||||||
|
rng = [[results_discrete[0], results_discrete[0]], [ep_lengths_discrete[0], ep_lengths_discrete[0]]]
|
||||||
|
|
||||||
|
# Create continuous line for both plots
|
||||||
|
for k in range(2):
|
||||||
|
for i in range(1, console_width):
|
||||||
|
x = [results_discrete, ep_lengths_discrete][k][i]
|
||||||
|
if x > rng[k][1]:
|
||||||
|
rng[k] = [rng[k][1] + 1, x]
|
||||||
|
elif x < rng[k][0]:
|
||||||
|
rng[k] = [x, rng[k][0] - 1]
|
||||||
|
else:
|
||||||
|
rng[k] = [x, x]
|
||||||
|
for j in range(rng[k][0], rng[k][1] + 1):
|
||||||
|
matrix[j][i][k] = 1
|
||||||
|
|
||||||
|
print(f'{"-" * console_width}')
|
||||||
|
for l in reversed(range(console_height)):
|
||||||
|
for c in range(console_width):
|
||||||
|
if np.all(matrix[l][c] == 0):
|
||||||
|
print(end=" ")
|
||||||
|
elif np.all(matrix[l][c] == 1):
|
||||||
|
print(end=symb_xo)
|
||||||
|
elif matrix[l][c][0] == 1:
|
||||||
|
print(end=symb_x)
|
||||||
|
else:
|
||||||
|
print(end=symb_o)
|
||||||
|
print()
|
||||||
|
print(f'{"-" * console_width}')
|
||||||
|
print(f"({symb_x})-reward min:{results_limits[0]:11.2f} max:{results_limits[1]:11.2f}")
|
||||||
|
print(
|
||||||
|
f"({symb_o})-ep. length min:{ep_lengths_limits[0]:11.0f} max:{ep_lengths_limits[1]:11.0f} {time_steps[-1] / 1000:15.0f}k steps")
|
||||||
|
print(f'{"-" * console_width}')
|
||||||
|
|
||||||
|
# save CSV
|
||||||
|
if save_csv:
|
||||||
|
eval_csv = os.path.join(path, "evaluations.csv")
|
||||||
|
with open(eval_csv, 'a+') as f:
|
||||||
|
writer = csv.writer(f)
|
||||||
|
if sample_no == 1:
|
||||||
|
writer.writerow(["time_steps", "reward ep.", "length"])
|
||||||
|
writer.writerow([time_steps[-1], results_raw[-1], ep_lengths_raw[-1]])
|
||||||
|
|
||||||
|
# def generate_slot_behavior(self, path, slots, auto_head:bool, XML_name):
|
||||||
|
# '''
|
||||||
|
# Function that generates the XML file for the optimized slot behavior, overwriting previous files
|
||||||
|
# '''
|
||||||
|
|
||||||
|
# file = os.path.join( path, XML_name )
|
||||||
|
|
||||||
|
# # create the file structure
|
||||||
|
# auto_head = '1' if auto_head else '0'
|
||||||
|
# EL_behavior = ET.Element('behavior',{'description':'Add description to XML file', "auto_head":auto_head})
|
||||||
|
|
||||||
|
# for i,s in enumerate(slots):
|
||||||
|
# EL_slot = ET.SubElement(EL_behavior, 'slot', {'name':str(i), 'delta':str(s[0]/1000)})
|
||||||
|
# for j in s[1]: # go through all joint indices
|
||||||
|
# ET.SubElement(EL_slot, 'move', {'id':str(j), 'angle':str(s[2][j])})
|
||||||
|
|
||||||
|
# # create XML file
|
||||||
|
# xml_rough = ET.tostring( EL_behavior, 'utf-8' )
|
||||||
|
# xml_pretty = minidom.parseString(xml_rough).toprettyxml(indent=" ")
|
||||||
|
# with open(file, "w") as x:
|
||||||
|
# x.write(xml_pretty)
|
||||||
|
|
||||||
|
# print(file, "was created!")
|
||||||
|
|
||||||
|
# @staticmethod
|
||||||
|
# def linear_schedule(initial_value: float) -> Callable[[float], float]:
|
||||||
|
# '''
|
||||||
|
# Linear learning rate schedule
|
||||||
|
|
||||||
|
# Parameters
|
||||||
|
# ----------
|
||||||
|
# initial_value : float
|
||||||
|
# Initial learning rate
|
||||||
|
|
||||||
|
# Returns
|
||||||
|
# -------
|
||||||
|
# schedule : Callable[[float], float]
|
||||||
|
# schedule that computes current learning rate depending on remaining progress
|
||||||
|
# '''
|
||||||
|
# def func(progress_remaining: float) -> float:
|
||||||
|
# '''
|
||||||
|
# Compute learning rate according to current progress
|
||||||
|
|
||||||
|
# Parameters
|
||||||
|
# ----------
|
||||||
|
# progress_remaining : float
|
||||||
|
# Progress will decrease from 1 (beginning) to 0
|
||||||
|
|
||||||
|
# Returns
|
||||||
|
# -------
|
||||||
|
# learning_rate : float
|
||||||
|
# Learning rate according to current progress
|
||||||
|
# '''
|
||||||
|
# return progress_remaining * initial_value
|
||||||
|
|
||||||
|
# return func
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def export_model(input_file, output_file, add_sufix=True):
|
||||||
|
'''
|
||||||
|
Export model weights to binary file
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
input_file : str
|
||||||
|
Input file, compatible with algorithm
|
||||||
|
output_file : str
|
||||||
|
Output file, including directory
|
||||||
|
add_sufix : bool
|
||||||
|
If true, a suffix is appended to the file name: output_file + "_{index}.pkl"
|
||||||
|
'''
|
||||||
|
|
||||||
|
# If file already exists, don't overwrite
|
||||||
|
if add_sufix:
|
||||||
|
for i in count():
|
||||||
|
f = f"{output_file}_{i:03}.pkl"
|
||||||
|
if not os.path.isfile(f):
|
||||||
|
output_file = f
|
||||||
|
break
|
||||||
|
|
||||||
|
model = PPO.load(input_file)
|
||||||
|
weights = model.policy.state_dict() # dictionary containing network layers
|
||||||
|
|
||||||
|
w = lambda name: weights[name].detach().cpu().numpy() # extract weights from policy
|
||||||
|
|
||||||
|
var_list = []
|
||||||
|
for i in count(0, 2): # add hidden layers (step=2 because that's how SB3 works)
|
||||||
|
if f"mlp_extractor.policy_net.{i}.bias" not in weights:
|
||||||
|
break
|
||||||
|
var_list.append(
|
||||||
|
[w(f"mlp_extractor.policy_net.{i}.bias"), w(f"mlp_extractor.policy_net.{i}.weight"), "tanh"])
|
||||||
|
|
||||||
|
var_list.append([w("action_net.bias"), w("action_net.weight"), "none"]) # add final layer
|
||||||
|
|
||||||
|
with open(output_file, "wb") as f:
|
||||||
|
pickle.dump(var_list, f, protocol=4) # protocol 4 is backward compatible with Python 3.4
|
||||||
|
|
||||||
|
def print_list(data, numbering=True, prompt=None, divider=" | ", alignment="<", min_per_col=6):
|
||||||
|
'''
|
||||||
|
Print list - prints list, using as many columns as possible
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
data : `list`
|
||||||
|
list of items
|
||||||
|
numbering : `bool`
|
||||||
|
assigns number to each option
|
||||||
|
prompt : `str`
|
||||||
|
the prompt string, if given, is printed after the table before reading input
|
||||||
|
divider : `str`
|
||||||
|
string that divides columns
|
||||||
|
alignment : `str`
|
||||||
|
f-string style alignment ( '<', '>', '^' )
|
||||||
|
min_per_col : int
|
||||||
|
avoid splitting columns with fewer items
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
item : `int`, item
|
||||||
|
returns tuple with global index of selected item and the item object,
|
||||||
|
or `None` (if `numbering` or `prompt` are `None`)
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
WIDTH = shutil.get_terminal_size()[0]
|
||||||
|
|
||||||
|
data_size = len(data)
|
||||||
|
items = []
|
||||||
|
items_len = []
|
||||||
|
|
||||||
|
# --------------------------------------------- Add numbers, margins and divider
|
||||||
|
for i in range(data_size):
|
||||||
|
number = f"{i}-" if numbering else ""
|
||||||
|
items.append(f"{divider}{number}{data[i]}")
|
||||||
|
items_len.append(len(items[-1]))
|
||||||
|
|
||||||
|
max_cols = np.clip((WIDTH + len(divider)) // min(items_len), 1, math.ceil(
|
||||||
|
data_size / max(min_per_col, 1))) # width + len(divider) because it is not needed in last col
|
||||||
|
|
||||||
|
# --------------------------------------------- Check maximum number of columns, considering content width (min:1)
|
||||||
|
for i in range(max_cols, 0, -1):
|
||||||
|
cols_width = []
|
||||||
|
cols_items = []
|
||||||
|
table_width = 0
|
||||||
|
a, b = divmod(data_size, i)
|
||||||
|
for col in range(i):
|
||||||
|
start = a * col + min(b, col)
|
||||||
|
end = start + a + (1 if col < b else 0)
|
||||||
|
cols_items.append(items[start:end])
|
||||||
|
col_width = max(items_len[start:end])
|
||||||
|
cols_width.append(col_width)
|
||||||
|
table_width += col_width
|
||||||
|
if table_width <= WIDTH + len(divider):
|
||||||
|
break
|
||||||
|
table_width -= len(divider)
|
||||||
|
|
||||||
|
# --------------------------------------------- Print columns
|
||||||
|
print("=" * table_width)
|
||||||
|
for row in range(math.ceil(data_size / i)):
|
||||||
|
for col in range(i):
|
||||||
|
content = cols_items[col][row] if len(
|
||||||
|
cols_items[col]) > row else divider # print divider when there are no items
|
||||||
|
if col == 0:
|
||||||
|
l = len(divider)
|
||||||
|
print(end=f"{content[l:]:{alignment}{cols_width[col] - l}}") # remove divider from 1st col
|
||||||
|
else:
|
||||||
|
print(end=f"{content :{alignment}{cols_width[col]}}")
|
||||||
|
print()
|
||||||
|
print("=" * table_width)
|
||||||
|
|
||||||
|
# --------------------------------------------- Prompt
|
||||||
|
if prompt is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if numbering is None:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
idx = UI.read_int(prompt, 0, data_size)
|
||||||
|
return idx, data[idx]
|
||||||
|
|
||||||
|
|
||||||
|
class Cyclic_Callback(BaseCallback):
|
||||||
|
''' Stable baselines custom callback '''
|
||||||
|
|
||||||
|
def __init__(self, freq, function):
|
||||||
|
super(Cyclic_Callback, self).__init__(1)
|
||||||
|
self.freq = freq
|
||||||
|
self.function = function
|
||||||
|
|
||||||
|
def _on_step(self) -> bool:
|
||||||
|
if self.n_calls % self.freq == 0:
|
||||||
|
self.function()
|
||||||
|
return True # If the callback returns False, training is aborted early
|
||||||
|
|
||||||
|
|
||||||
|
class Export_Callback(BaseCallback):
|
||||||
|
''' Stable baselines custom callback '''
|
||||||
|
|
||||||
|
def __init__(self, freq, load_path, export_name):
|
||||||
|
super(Export_Callback, self).__init__(1)
|
||||||
|
self.freq = freq
|
||||||
|
self.load_path = load_path
|
||||||
|
self.export_name = export_name
|
||||||
|
|
||||||
|
def _on_step(self) -> bool:
|
||||||
|
if self.n_calls % self.freq == 0:
|
||||||
|
path = os.path.join(self.load_path, f"model_{self.num_timesteps}_steps.zip")
|
||||||
|
Train_Base.export_model(path, f"./scripts/gyms/export/{self.export_name}")
|
||||||
|
return True # If the callback returns False, training is aborted early
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Reference in New Issue
Block a user