Init
This commit is contained in:
BIN
agent/__pycache__/agent.cpython-311.pyc
Normal file
BIN
agent/__pycache__/agent.cpython-311.pyc
Normal file
Binary file not shown.
BIN
agent/__pycache__/base_agent.cpython-311.pyc
Normal file
BIN
agent/__pycache__/base_agent.cpython-311.pyc
Normal file
Binary file not shown.
135
agent/agent.py
Normal file
135
agent/agent.py
Normal file
@@ -0,0 +1,135 @@
|
||||
from dataclasses import Field
|
||||
import logging
|
||||
from typing import Mapping
|
||||
|
||||
import numpy as np
|
||||
from utils.math_ops import MathOps
|
||||
from world.commons.field import FIFAField, HLAdultField
|
||||
from world.commons.play_mode import PlayModeEnum, PlayModeGroupEnum
|
||||
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
|
||||
class Agent:
|
||||
"""
|
||||
Responsible for deciding what the agent should do at each moment.
|
||||
|
||||
This class is called every simulation step to update the agent's behavior
|
||||
based on the current state of the world and game conditions.
|
||||
"""
|
||||
|
||||
BEAM_POSES: Mapping[type[Field], Mapping[int, tuple[float, float, float]]] ={
|
||||
FIFAField: {
|
||||
1: (2.1, 0, 0),
|
||||
2: (22.0, 12.0, 0),
|
||||
3: (22.0, 4.0, 0),
|
||||
4: (22.0, -4.0, 0),
|
||||
5: (22.0, -12.0, 0),
|
||||
6: (15.0, 0.0, 0),
|
||||
7: (4.0, 16.0, 0),
|
||||
8: (11.0, 6.0, 0),
|
||||
9: (11.0, -6.0, 0),
|
||||
10: (4.0, -16.0, 0),
|
||||
11: (7.0, 0.0, 0),
|
||||
},
|
||||
HLAdultField: {
|
||||
1: (7.0, 0.0, 0),
|
||||
2: (2.0, -1.5, 0),
|
||||
3: (2.0, 1.5, 0),
|
||||
}
|
||||
}
|
||||
|
||||
def __init__(self, agent):
|
||||
"""
|
||||
Creates a new DecisionMaker linked to the given agent.
|
||||
|
||||
Args:
|
||||
agent: The main agent that owns this DecisionMaker.
|
||||
"""
|
||||
from agent.base_agent import Base_Agent # type hinting
|
||||
|
||||
self.agent: Base_Agent = agent
|
||||
self.is_getting_up: bool = False
|
||||
|
||||
def update_current_behavior(self) -> None:
|
||||
"""
|
||||
Chooses what the agent should do in the current step.
|
||||
|
||||
This function checks the game state and decides which behavior
|
||||
or skill should be executed next.
|
||||
"""
|
||||
|
||||
if self.agent.world.playmode is PlayModeEnum.GAME_OVER:
|
||||
return
|
||||
|
||||
if self.agent.world.playmode_group in (
|
||||
PlayModeGroupEnum.ACTIVE_BEAM,
|
||||
PlayModeGroupEnum.PASSIVE_BEAM,
|
||||
):
|
||||
self.agent.server.commit_beam(
|
||||
pos2d=self.BEAM_POSES[type(self.agent.world.field)][self.agent.world.number][:2],
|
||||
rotation=self.BEAM_POSES[type(self.agent.world.field)][self.agent.world.number][2],
|
||||
)
|
||||
|
||||
if self.is_getting_up or self.agent.skills_manager.is_ready(skill_name="GetUp"):
|
||||
self.is_getting_up = not self.agent.skills_manager.execute(skill_name="GetUp")
|
||||
|
||||
elif self.agent.world.playmode is PlayModeEnum.PLAY_ON:
|
||||
self.carry_ball()
|
||||
elif self.agent.world.playmode in (PlayModeEnum.BEFORE_KICK_OFF, PlayModeEnum.THEIR_GOAL, PlayModeEnum.OUR_GOAL):
|
||||
self.agent.skills_manager.execute("Neutral")
|
||||
else:
|
||||
self.carry_ball()
|
||||
|
||||
self.agent.robot.commit_motor_targets_pd()
|
||||
|
||||
def carry_ball(self):
|
||||
"""
|
||||
Basic example of a behavior: moves the robot toward the goal while handling the ball.
|
||||
"""
|
||||
their_goal_pos = self.agent.world.field.get_their_goal_position()[:2]
|
||||
ball_pos = self.agent.world.ball_pos[:2]
|
||||
my_pos = self.agent.world.global_position[:2]
|
||||
|
||||
ball_to_goal = their_goal_pos - ball_pos
|
||||
bg_norm = np.linalg.norm(ball_to_goal)
|
||||
if bg_norm == 0:
|
||||
return
|
||||
ball_to_goal_dir = ball_to_goal / bg_norm
|
||||
|
||||
dist_from_ball_to_start_carrying = 0.30
|
||||
carry_ball_pos = ball_pos - ball_to_goal_dir * dist_from_ball_to_start_carrying
|
||||
|
||||
my_to_ball = ball_pos - my_pos
|
||||
my_to_ball_norm = np.linalg.norm(my_to_ball)
|
||||
if my_to_ball_norm == 0:
|
||||
my_to_ball_dir = np.zeros(2)
|
||||
else:
|
||||
my_to_ball_dir = my_to_ball / my_to_ball_norm
|
||||
|
||||
cosang = np.dot(my_to_ball_dir, ball_to_goal_dir)
|
||||
cosang = np.clip(cosang, -1.0, 1.0)
|
||||
angle_diff = np.arccos(cosang)
|
||||
|
||||
ANGLE_TOL = np.deg2rad(7.5)
|
||||
aligned = (my_to_ball_norm > 1e-6) and (angle_diff <= ANGLE_TOL)
|
||||
|
||||
behind_ball = np.dot(my_pos - ball_pos, ball_to_goal_dir) < 0
|
||||
desired_orientation = MathOps.vector_angle(ball_to_goal)
|
||||
|
||||
if not aligned or not behind_ball:
|
||||
self.agent.skills_manager.execute(
|
||||
"Walk",
|
||||
target_2d=carry_ball_pos,
|
||||
is_target_absolute=True,
|
||||
orientation=None if np.linalg.norm(my_pos - carry_ball_pos) > 2 else desired_orientation
|
||||
)
|
||||
else:
|
||||
self.agent.skills_manager.execute(
|
||||
"Walk",
|
||||
target_2d=their_goal_pos,
|
||||
is_target_absolute=True,
|
||||
orientation=desired_orientation
|
||||
)
|
||||
|
||||
81
agent/base_agent.py
Normal file
81
agent/base_agent.py
Normal file
@@ -0,0 +1,81 @@
|
||||
import logging
|
||||
|
||||
from agent.agent import Agent
|
||||
from world.robot import T1, Robot
|
||||
from behaviors.behavior_manager import BehaviorManager
|
||||
from world.world import World
|
||||
from communication.server import Server
|
||||
from communication.world_parser import WorldParser
|
||||
|
||||
logger = logging.getLogger(__file__)
|
||||
|
||||
|
||||
class Base_Agent:
|
||||
def __init__(
|
||||
self,
|
||||
team_name: str = "Default",
|
||||
number: int = 1,
|
||||
host: str = "localhost",
|
||||
port: int = 60000,
|
||||
field: str = 'fifa'
|
||||
):
|
||||
"""
|
||||
Initializes the agent and all its main components.
|
||||
|
||||
Args:
|
||||
team_name (str): The name of the team the agent belongs to.
|
||||
number (int): The player number assigned to this agent.
|
||||
host (str): The host address of the simulator server.
|
||||
port (int): The port number of the simulator server.
|
||||
field (str): The name of the field configuration to use.
|
||||
"""
|
||||
|
||||
self.world: World = World(agent=self, team_name=team_name, number=number, field_name=field)
|
||||
self.world_parser: WorldParser = WorldParser(agent=self)
|
||||
self.server: Server = Server(
|
||||
host=host, port=port, world_parser=self.world_parser
|
||||
)
|
||||
self.robot: Robot = T1(agent=self)
|
||||
self.skills_manager: BehaviorManager = BehaviorManager(agent=self)
|
||||
self.decision_maker: Agent = Agent(agent=self)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Starts the agent’s main control loop.
|
||||
|
||||
This method:
|
||||
1. Connects to the simulator server.
|
||||
2. Sends the initial configuration (init message).
|
||||
3. Enters the main loop, where it:
|
||||
- Receives and parses world updates.
|
||||
- Updates internal world representation.
|
||||
- Executes the decision-making process.
|
||||
- Sends the next set of commands to the server.
|
||||
"""
|
||||
self.server.connect()
|
||||
|
||||
self.server.send_immediate(
|
||||
f"(init {self.robot.name} {self.world.team_name} {self.world.number})"
|
||||
)
|
||||
|
||||
while True:
|
||||
try:
|
||||
self.server.receive()
|
||||
|
||||
self.world.update()
|
||||
|
||||
self.decision_maker.update_current_behavior()
|
||||
|
||||
self.server.send()
|
||||
except Exception:
|
||||
self.shutdown()
|
||||
raise
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Safely shuts down the agent.
|
||||
|
||||
Logs a shutdown message and closes the server connection.
|
||||
"""
|
||||
logger.info("Shutting down.")
|
||||
self.server.shutdown()
|
||||
Reference in New Issue
Block a user