Module agents
[hide private]
[frames] | no frames]

Source Code for Module agents

   1  ################################################################################## 
   2  # Copyright (c) 2012, Daniel Urieli, Peter Stone 
   3  # University of Texas at Austin 
   4  # All right reserved 
   5  #  
   6  # Based On: 
   7  #  
   8  # Copyright (c) 2000-2003, Jelle Kok, University of Amsterdam 
   9  # All rights reserved. 
  10  #  
  11  # Redistribution and use in source and binary forms, with or without 
  12  # modification, are permitted provided that the following conditions are met: 
  13  #  
  14  # 1. Redistributions of source code must retain the above copyright notice, this 
  15  # list of conditions and the following disclaimer. 
  16  #  
  17  # 2. Redistributions in binary form must reproduce the above copyright notice, 
  18  # this list of conditions and the following disclaimer in the documentation 
  19  # and/or other materials provided with the distribution. 
  20  #  
  21  # 3. Neither the name of the University of Amsterdam nor the names of its 
  22  # contributors may be used to endorse or promote products derived from this 
  23  # software without specific prior written permission. 
  24  #  
  25  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
  26  # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  27  # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 
  28  # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 
  29  # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
  30  # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 
  31  # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 
  32  # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 
  33  # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
  34  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  35  ################################################################################## 
  36   
  37   
  38   
  39  import copy 
  40  import random 
  41  import os 
  42  import sys 
  43  import operator 
  44  from math import * 
  45   
  46  from vec2d import * 
  47  from lineline import * 
  48   
  49  import geometry 
  50  import rrtTree 
  51  import perceptModels 
  52  from actions import * 
  53  import state 
  54  from messages import PrimitiveMessage, CompositeMessage 
  55  import obstacleAvoidance 
  56  from navigationTactics import * 
  57  from scripts import heuristicDivide 
  58   
  59  ######################################################################## 
  60  # 
  61  # This file defines different types of agents. 
  62  # An agent is defined by: 
  63  # - The way it models the world 
  64  # - The way it process percepts for this given world model 
  65  # - The set of legal actions available to it 
  66  # - It's strategy for choosing actions, based on the set of legal actions, 
  67  #   and based on its world model. 
  68  # 
  69  # The AgentWorldModel decouples the perception part from the action  
  70  # part, thus allowing us to mix any combination of percepts,  
  71  # percept-processing-algorithm, world-model, legal-action-set, 
  72  # and decision-making-strategy. 
  73  # 
  74  # Implementation-wise, an agent has two components: 
  75  # - AgentWorldModel: implements a specific worldmodel, and percept processing 
  76  # - AgentStrategy: for a given AgentWorldModel and legal actions, 
  77  #                  defines a strategy for choosing actions 
  78  # 
  79  ######################################################################## 
  80   
  81   
  82   
  83   
  84  ######################################################################## 
  85  # AgentWorldModel class. Implements different models an agent  
  86  # can have about the world. 
  87  ######################################################################## 
88 -class AgentWorldModel:
89 """ 90 This class is a base class for all the models an agent 91 can have about the world. 92 A subclass should implement a specific model of the world, 93 and a specific way to process percepts for this model. 94 There can be multiple ways to process percepts for any 95 given model, so an example class hierarchy could be: 96 97 ------------------- 98 | AgentWorldModel | 99 ------------------- 100 / \ 101 ---------- ---------- 102 | Model1 | | Model2 | 103 ---------- ---------- 104 / \ \ 105 ------------------- ------------------ . 106 |with method 1 to | |with method 2 to| . 107 |processPercepts | |processPercepts | . 108 |for Model1 | |for Model1 | 109 ------------------- ------------------ 110 """
111 - def __init__(self):
112 """ 113 Every world model contains a queue for incoming messages. 114 Note that outgoing msgs are inside strategy. 115 TODO: does this design need change? 116 """ 117 self.incomingMsgs = CompositeMessage()
118 119 # Abstract methods
120 - def processPercepts(self, perceptsList):
121 """ 122 For a given concrete model, this method 123 implements a specific method of updating this 124 model from percepts. 125 This is just an extra layer of flexibility, in case 126 there are multiple methods to update percepts for the 127 same model. 128 129 @type perceptsList: PerceptList 130 @param perceptsList: A list of percepts to be processed 131 """ 132 abstract
133
134 - def getEstimatedStatesOfOtherShips(self, selfIndex):
135 """ 136 Get a list of estimated ShipExternalState of all other ships 137 in the system. 138 139 @type selfIndex: int 140 @param selfIndex: The index of the ship controlled by the agent - we don't want to include it in the returned states. 141 142 @rtype: A list of ShipExternalState 143 @return: A list of estimated states of all other ships in the system 144 """ 145 abstract
146
147 - def getEstimatedExtShipState(self):
148 """ 149 Get the estimated ShipExternalState as estimated 150 by the world model. 151 152 @rtype: ShipExternalState 153 @return: An estimation of the ship's real state according to our AgentWorldModel. 154 """ 155 abstract
156
157 - def getEstimatedWindVector(self):
158 """ 159 Get the estimated wind vector in the location of 160 the ship. 161 162 @rtype: numpy array (vector) 163 @return: an estimation of the wind vector (speed and direction) in the agent's location according to our AgentWorldModel. 164 """ 165 abstract
166
167 - def getEstimatedWaterVector(self):
168 """ 169 Get the estimated water vector in the location of 170 the ship. 171 172 @rtype: numpy array (vector) 173 @return: an estimation of the water vector (speed and direction) in the agent's location according to our AgentWorldModel. 174 """ 175 abstract
176
177 - def getEstimatedObstaclesList(self):
178 """ 179 Get a list of estimated obstacle shapes. 180 181 @rtype: Obstacles 182 @return: an estimation of the obstacles shapes (and positions) according to our AgentWorldModel. 183 """ 184 abstract
185 186 187 # non-abstract methods
188 - def cleanMsgQueue(self):
189 self.incomingMsgs = CompositeMessage()
190
191 - def receiveMsg(self, msg):
192 """ 193 Append a message to the queue 194 """ 195 self.incomingMsgs.appendMsg(msg) 196 # just a debug check 197 if len(self.incomingMsgs.primitiveMsgs) > 100: 198 raise Exception("Did you forget to read and clean messages")
199 200 # TODO: more interface functions needed? 201 202
203 -class AgentWorldModelCompleteWorld(AgentWorldModel):
204 """ 205 Just a skeleton for testing purposes. 206 The world model is exactly the world state. 207 """
208 - def __init__(self, shipIndex):
209 AgentWorldModel.__init__(self) 210 self.state = None 211 self.shipIndex = shipIndex
212 213 # override
214 - def processPercepts(self, perceptsList):
215 "Just search for the CompleteStatePercept and save the state from it" 216 for percept in perceptsList.getPercepts(): 217 if isinstance(percept, perceptModels.CompleteStatePercept): 218 # TODO: removed deepcopy because took time - but shallow copy should be fine 219 #self.state = copy.deepcopy(percept.state) 220 self.state = copy.copy(percept.state) 221 return 222 raise Exception("Couldn't find a CompleteStatePercept")
223
224 - def getEstimatedStatesOfOtherShips(self, selfIndex):
225 """ 226 Implements the abstract method 227 """ 228 states = self.state.shipExternalStates 229 return states[0:selfIndex] + states[selfIndex + 1:]
230
231 - def getEstimatedExtShipState(self):
232 """ 233 Implements the abstract method 234 """ 235 return self.state.shipExternalStates[self.shipIndex]
236
237 - def getEstimatedWindVector(self):
238 """ 239 Implements the abstract method 240 """ 241 shipState = self.state.shipExternalStates[self.shipIndex] 242 posX, posY = shipState.x, shipState.y 243 return self.state.sea.wind.getSpeedVectorInLocation(posX, posY)
244
245 - def getEstimatedWaterVector(self):
246 """ 247 Implements the abstract method 248 """ 249 shipState = self.state.shipExternalStates[self.shipIndex] 250 posX, posY = shipState.x, shipState.y 251 return self.state.sea.waterCurrents.getSpeedVectorInLocation(posX, posY)
252
253 - def getEstimatedObstaclesList(self):
254 """ 255 Implements the abstract method 256 """ 257 return self.state.sea.obstacles.getObstaclesList()
258 259 260 ######################################################################## 261 # Strategy class. Connecting between AgentWorldModel and the actions. 262 # A strategy is created for each pair of specific AgentWorldModel 263 # and a set of available actions (ship actuators). 264 # Note that for such a pair we can define more then one strategy, 265 # implementing different decision algorithms. 266 ########################################################################
267 -class AgentStrategy:
268 """ 269 An interface for specific decision making strategy. 270 It should be defined for a given AgentWorldModel, 271 a given set of legal actions, and a given decision making algorithm. 272 273 For each step, the flow is: 274 AgentWorldModel --> Specific_AgentStrategy --> Actions-to-execute 275 """
276 - def __init__(self):
277 # outgoingMsg is a message an agent (strategy) can send to 278 # other agents and to the simulation environment 279 self.outgoingMsg = CompositeMessage()
280
281 - def composeAction(self, agentWorldModel):
282 "Given the (inferred) state of the world, decide on a set of actions." 283 abstract
284
285 - def getOutgoingMessage(self):
286 msg = self.outgoingMsg 287 # reset if needed 288 if len(msg.primitiveMsgs) > 0: 289 self.outgoingMsg = CompositeMessage() 290 return msg
291
292 - def transmit(self, msg):
293 self.outgoingMsg.appendMsg(msg)
294 295 296
297 -class AgentStaticPatrolStrategy(AgentStrategy):
298 """ 299 Implements a basic patrol strategy. 300 Given points of interest, just travel 301 along the cycle they define 302 """
303 - def __init__(self, agentIndex, taskPath, rulesOfTheSea):
304 """ 305 Initializes the strategy. 306 307 @type agentIndex: int 308 @param agentIndex: sort of an agent-id - an agent's index in the array of all agents 309 310 @type path: a list of 2-tuples 311 @param path: a list of (x,y) points that define a cycle for the agent to travel along 312 313 @type rulesOfTheSea: boolean 314 @param rulesOfTheSea: Tells whether to respect the rules of the sea 315 """ 316 AgentStrategy.__init__(self) 317 318 self.agentIndex = agentIndex 319 self.taskPath = taskPath 320 self.rulesOfTheSea = rulesOfTheSea 321 self.nextTaskPointIndex = 0 322 323 self.navigationTactic = None 324 325 self.time = 0 326 self.prevPointTime = 0 327 self.firstVisit = True
328
329 - def composeAction(self, agentWorldModel):
330 """ 331 Implements the abstract method, see documentation 332 of the abstract method. 333 """ 334 # if done, just stop for now 335 # if self.nextTaskPointIndex == len(self.taskPath): 336 # action = CompositeAction() 337 # action.appendAction(PrimitiveAction("stop", [])) 338 # return action 339 self.time += 1 340 341 if self.navigationTactic == None: 342 # i.e. first time we enter this function 343 goalPos = self.taskPath[self.nextTaskPointIndex] 344 self.setNavigationTactic(agentWorldModel, goalPos) 345 346 elif self.navigationTactic.done(agentWorldModel): 347 self.recordStatistics() 348 self.nextTaskPointIndex = (self.nextTaskPointIndex + 1) % len(self.taskPath) 349 goalPos = self.taskPath[self.nextTaskPointIndex] 350 self.setNavigationTactic(agentWorldModel, goalPos) 351 352 return self.navigationTactic.composeAction(agentWorldModel)
353 354
355 - def setNavigationTactic(self, agentWorldModel, nextGoal):
356 estimatedState = agentWorldModel.getEstimatedExtShipState() 357 # why round? because this value goes deep into 358 # rrtTree.findClosestNode and floating point arithmetic 359 # significantly slows it down! 360 estimatedPos = (int(round(estimatedState.x)), int(round(estimatedState.y))) 361 362 if obstacleAvoidance.pathBlocked(estimatedPos, nextGoal, agentWorldModel.getEstimatedObstaclesList()): 363 self.navigationTactic = AgentRRTTactic( self.agentIndex, 364 estimatedPos, 365 nextGoal, 366 agentWorldModel.getEstimatedObstaclesList(), 367 self.rulesOfTheSea) 368 else: 369 self.navigationTactic = AgentPIDTactic(self.agentIndex, 370 nextGoal, 371 self.rulesOfTheSea)
372 373
374 - def getPath(self):
375 return self.taskPath
376 377
378 - def recordStatistics(self):
379 # record results 380 if not self.firstVisit: 381 prev = self.taskPath[self.nextTaskPointIndex - 1] 382 p = self.taskPath[self.nextTaskPointIndex] 383 384 # create an outgoing message 385 self.transmit( 386 PrimitiveMessage(PrimitiveMessage.TO_SIMULATOR, 387 PrimitiveMessage.TYPE_VISIT, 388 p) 389 ) 390 391 currentEdge = (prev, p) 392 edgeLength = self.time - self.prevPointTime 393 self.transmit( 394 PrimitiveMessage(PrimitiveMessage.TO_SIMULATOR, 395 PrimitiveMessage.TYPE_EDGE_LENGTH, 396 (currentEdge, edgeLength)) 397 ) 398 399 self.firstVisit = False 400 self.prevPointTime = self.time
401 402
403 -class AgentCoordinatedPatrolStrategy(AgentStrategy):
404 """ 405 Implements a coordinated patrol strategy. 406 Given points of interest, each agent 407 runs heuristicDivide(), assign ships 408 to locations, and transmits it. Agents 409 vote and decide on suggestion, move to 410 their initial locations, and start 411 patroling. 412 """ 413 414 # "Enum" for mission state 415 TRANSMIT_INITIAL_LOCATION = 1 416 COMPUTE_PATROL_ASSIGNMENTS_AND_TRANSMIT = 2 417 VOTE = 3 418 MOVING_TO_START = 4 419 PATROLING = 5 420 421
422 - def __init__(self, agentIndex, patrolPoints, edgeLengths, rulesOfTheSea):
423 """ 424 Initializes the strategy. 425 426 @type agentIndex: int 427 @param agentIndex: sort of an agent-id - an agent's index in the array of all agents 428 429 @type agentIndex: string 430 @param agentIndex: the name of the file containing edges and nodes from measurements data 431 432 @type rulesOfTheSea: boolean 433 @param rulesOfTheSea: Tells whether to respect the rules of the sea 434 """ 435 AgentStrategy.__init__(self) 436 437 self.agentIndex = agentIndex 438 self.patrolPoints = patrolPoints 439 self.edgeLengths = edgeLengths 440 self.rulesOfTheSea = rulesOfTheSea 441 self.reset() 442 443 # initializing the callbacks for processing messages 444 self.msgCallbacks = {} 445 self.msgCallbacks[PrimitiveMessage.TYPE_START_PATROL] = self.processMsgStart.__name__ 446 self.msgCallbacks[PrimitiveMessage.TYPE_POSITION] = self.processMsgPosition.__name__ 447 self.msgCallbacks[PrimitiveMessage.TYPE_PATROL_DIVISION] = self.processMsgPatrolDivision.__name__ 448 self.msgCallbacks[PrimitiveMessage.TYPE_REACHED_START] = self.processMsgReachedStart.__name__
449 # . 450 # . 451 # . 452 # continue adding callbacks 453 454
455 - def reset(self):
456 """ 457 Resets all coordinated-patrol related data members 458 """ 459 self.missionState = self.TRANSMIT_INITIAL_LOCATION 460 self.shipPositions = {} # maps ships to their transmitted positions 461 self.ship2patrolData = {} # would hold agent's suggestion 462 self.ship2patrolDataSuggestions = [] 463 self.reachedStartPoint = set() 464 465 self.myStartingPatrolPosition = None 466 self.myPatrolPath = [] 467 468 self.navigationTactic = None 469 self.patrolStrategy = None
470 471
472 - def composeAction(self, agentWorldModel):
473 474 self.processIncomingMsgs(agentWorldModel) 475 476 # by default, return a stopping action 477 returnedAction = CompositeAction() 478 returnedAction.appendAction(PrimitiveAction("stop", [])) 479 480 if self.missionState == self.TRANSMIT_INITIAL_LOCATION: 481 myState = agentWorldModel.getEstimatedExtShipState() 482 self.transmit( 483 PrimitiveMessage(PrimitiveMessage.TO_ALL, 484 PrimitiveMessage.TYPE_POSITION, 485 (agentWorldModel.shipIndex, myState.x, myState.y)) 486 ) 487 # move to the next state (TODO: assuming no msgs are lost) 488 self.missionState = self.COMPUTE_PATROL_ASSIGNMENTS_AND_TRANSMIT 489 490 491 elif self.missionState == self.COMPUTE_PATROL_ASSIGNMENTS_AND_TRANSMIT: 492 numShips = len(self.shipPositions) 493 patrolPoints, paths, patrolStartingStates = heuristicDivide.run(numShips, 494 self.patrolPoints, self.edgeLengths, debug=False) 495 state2ship = self.assignShipsToStates(self.shipPositions, patrolStartingStates) 496 # build message 497 self.ship2patrolData = {} 498 for i, start_and_path in enumerate(zip(patrolStartingStates, paths)): 499 start, path = start_and_path 500 ship = state2ship[i] 501 self.ship2patrolData[ship] = (start, path) 502 msgContent = self.ship2patrolData 503 self.transmit( 504 PrimitiveMessage(PrimitiveMessage.TO_ALL, 505 PrimitiveMessage.TYPE_PATROL_DIVISION, 506 msgContent) 507 ) 508 # move to the next state 509 self.missionState = self.VOTE 510 511 512 elif self.missionState == self.VOTE: 513 for suggestion in self.ship2patrolDataSuggestions: 514 if not suggestion == self.ship2patrolData: 515 print 'suggestion', suggestion 516 print 'mysuggestion', self.ship2patrolData 517 raise Exception("vote failed - need to be implemented") 518 # if we are here, everyone agrees 519 self.myStartingPatrolPosition, self.myPatrolPath = self.ship2patrolData[self.agentIndex] 520 self.missionState = self.MOVING_TO_START 521 522 523 elif self.missionState == self.MOVING_TO_START: 524 if self.navigationTactic == None: 525 pos = self.myStartingPatrolPosition #just shorthand 526 self.navigationTactic = AgentPIDTactic(self.agentIndex, 527 (pos.x, pos.y), 528 self.rulesOfTheSea) 529 # if reached target - transmit it 530 if self.navigationTactic.done(agentWorldModel): 531 msgContent = self.agentIndex 532 self.transmit( 533 PrimitiveMessage(PrimitiveMessage.TO_ALL, 534 PrimitiveMessage.TYPE_REACHED_START, 535 msgContent) 536 ) 537 returnedAction = self.navigationTactic.composeAction(agentWorldModel) 538 if self.allReachedStartingPoints(): 539 print 'allReachedStartingPoints' 540 self.missionState = self.PATROLING 541 542 elif self.missionState == self.PATROLING: 543 if self.patrolStrategy == None: 544 self.patrolStrategy = AgentStaticPatrolStrategy(self.agentIndex, 545 self.myPatrolPath, 546 self.rulesOfTheSea) 547 # get action + msgs(!) from wrapped strategy, since 548 # these are the two outputs of composeAction() 549 returnedAction = self.patrolStrategy.composeAction(agentWorldModel) 550 for m in self.patrolStrategy.getOutgoingMessage().primitiveMsgs: self.transmit(m) 551 552 return returnedAction
553
554 - def processIncomingMsgs(self, agentWorldModel):
555 """ 556 Reads incoming msgs from the agentWorldModel 557 and clean its queue 558 559 @type agentWorldModel: AgentWorldModel 560 @param agentWorldModel: the world model, which contain msgs. 561 """ 562 for msg in agentWorldModel.incomingMsgs.primitiveMsgs: 563 # just a debug check 564 if msg.to != PrimitiveMessage.TO_ALL: 565 raise Exception("Message doesn't seem to be directed at me") 566 # go to callback name, and convert to function using getattr 567 getattr(self, self.msgCallbacks[msg.type])(msg.content)
568
569 - def assignShipsToStates(self, shipPositions, patrolStartingStates):
570 """ 571 Implements a matching algorithm 572 573 @type shipPositions: map[shipIndex] = (x,y) position 574 @param shipPositions: map of the transmitted positions of ships 575 576 @type patrolStartingStates: array of [shipExternalState, ... ] for all ships 577 @param patrolStartingStates: the starting states computed by heuristicDivide 578 579 @rtype: map[shipIndex] = shipStartingState 580 @return: mapping of each ship to its patrol starting state 581 """ 582 numShips = len(shipPositions) 583 if not numShips == len(patrolStartingStates): 584 raise Exception("number of ships must be equal to num patrol start points") 585 586 # create a list of (ship, position, distance) tuples, sorted by distance 587 edges = [] 588 for shipIndex, position in shipPositions.items(): 589 distances = [geometry.distance2D(position, (state.x, state.y)) for state in patrolStartingStates] 590 edges += [(shipIndex, i, dist) for i, dist in enumerate(distances)] 591 edges = sorted(edges, key=operator.itemgetter(2), reverse=True) 592 593 # start removing distances until a node has only one edge, then create a pair 594 # and remove all the other edges with the pair-mate 595 shipNumEdges = {} 596 posNumEdges = {} 597 numOutgoingEdges = numShips 598 for i in range(numOutgoingEdges): 599 shipNumEdges[i] = numOutgoingEdges 600 posNumEdges[i] = numOutgoingEdges 601 602 match = [] 603 matchedShips = set() # for bookkeeping 604 matchedPositions = set() # for bookkeeping 605 # Heuristic match: 606 # scan edges, throwing heaviest until a ship/node has 607 # 1 outgoing edge, create a match, keep scanning. 608 # This process might not find a match for all, so 609 # repeat scans with left nodes/ships w/o a match 610 while len(match) < numShips: 611 deletedShips = set() 612 deletedPositions = set() 613 for edge in edges: 614 shipIndex, posIndex, dist = edge 615 # if we removed all the edges of a node - skip 616 if shipIndex in deletedShips or posIndex in deletedPositions: 617 continue 618 619 if shipNumEdges[shipIndex] == 1 or posNumEdges[posIndex] == 1: 620 match.append(edge) 621 deletedShips.add(shipIndex) 622 deletedPositions.add(posIndex) 623 matchedShips.add(shipIndex) 624 matchedPositions.add(posIndex) 625 626 shipNumEdges[shipIndex] -= 1 627 posNumEdges[posIndex] -= 1 628 629 # remove edges for matched pairs before next round 630 newEdges = [] 631 for edge in edges: 632 shipIndex, posIndex, dist = edge 633 if shipIndex in matchedShips or posIndex in matchedPositions: 634 continue 635 else: 636 newEdges.append(edge) 637 edges = newEdges 638 639 640 641 state2Ship = {} 642 for edge in match: 643 shipIndex, posIndex, dist = edge 644 state2Ship[posIndex] = shipIndex 645 646 return state2Ship
647
648 - def allReachedStartingPoints(self):
649 numShips = len(self.shipPositions) 650 if len(self.reachedStartPoint) == numShips: 651 return True 652 else: 653 return False
654
655 - def getPath(self):
656 return self.myPatrolPath
657 658 # msg processing callbacks
659 - def processMsgStart(self, msgContent):
660 """ 661 Processes a msg that notifies ships to start patrol 662 663 @type msgContent: None 664 @param msgContent: no additional information currently needed 665 """ 666 self.reset()
667
668 - def processMsgPosition(self, msgContent):
669 """ 670 Processes a msg that notifies other ship's position 671 672 @type msgContent: a 3-tuple (shipIndex, x, y) 673 @param msgContent: the position of the sending ship 674 """ 675 shipIndex, x, y = msgContent 676 self.shipPositions[shipIndex] = (x,y)
677
678 - def processMsgPatrolDivision(self, msgContent):
679 """ 680 Processes a msg that notifies other ship's patrol suggestion 681 682 @type msgContent: map[int] = (pos, list-of-positions) 683 @param msgContent: for each ship, a starting position and a path 684 """ 685 self.ship2patrolDataSuggestions.append(msgContent) 686
687 - def processMsgReachedStart(self, msgContent):
688 """ 689 Processes a msg that notifies some ship reached its start pos 690 691 @type msgContent: int 692 @param msgContent: an index of an agent that reached start 693 """ 694 self.reachedStartPoint.add(msgContent)
695 696 697
698 -class AgentJoiningPatrolStrategy(AgentCoordinatedPatrolStrategy):
699 """ 700 Implements an agent that waits for some time, 701 then joins a coordinated patrol - this is why it is a coordinated 702 patrol 703 """ 704
705 - def __init__(self, agentIndex, patrolPoints, edgeLengths, rulesOfTheSea):
706 """ 707 Initializes the strategy. 708 709 @type agentIndex: int 710 @param agentIndex: sort of an agent-id - an agent's index in the array of all agents 711 712 @type agentIndex: string 713 @param agentIndex: the name of the file containing edges and nodes from measurements data 714 715 @type rulesOfTheSea: boolean 716 @param rulesOfTheSea: Tells whether to respect the rules of the sea 717 """ 718 AgentCoordinatedPatrolStrategy.__init__(self, agentIndex, patrolPoints, edgeLengths, rulesOfTheSea) 719 720 self.waitingTime = 1000 721 self.currentTime = 0
722
723 - def composeAction(self, agentWorldModel):
724 self.currentTime += 1 725 if self.currentTime < 1000: 726 returnedAction = CompositeAction() 727 returnedAction.appendAction(PrimitiveAction("stop", [])) 728 729 elif self.currentTime == 1000: 730 self.reset() 731 self.transmit( 732 PrimitiveMessage(PrimitiveMessage.TO_ALL, 733 PrimitiveMessage.TYPE_START_PATROL, 734 None) 735 ) 736 returnedAction = CompositeAction() 737 returnedAction.appendAction(PrimitiveAction("stop", [])) 738 739 else: 740 # call base class function 741 returnedAction = AgentCoordinatedPatrolStrategy.composeAction(self, agentWorldModel) 742 # # waiting time expired - try to join patrol 743 # if self.patrolStrategy == None: 744 # self.patrolStrategy = AgentCoordinatedPatrolStrategy(self.agentIndex, 745 # self.patrolPoints, 746 # self.edgeLengths, 747 # self.rulesOfTheSea) 748 # # get action + msgs(!) from wrapped strategy, since 749 # # these are the two outputs of composeAction() 750 # returnedAction = self.patrolStrategy.composeAction(agentWorldModel) 751 # for m in self.patrolStrategy.getOutgoingMessage().primitiveMsgs: self.transmit(m) 752 753 return returnedAction
754 755 756 ''' 757 class AgentCirclingStrategy(AgentStrategy): 758 """ 759 NOTE: this class is not in use. 760 Implements a regular circling agent strategy 761 """ 762 763 def __init__(self, speed): 764 AgentStrategy.__init__(self) 765 766 self.speed = speed 767 768 def composeAction(self, agentWorldModel): 769 action = CompositeAction() 770 action.appendAction(PrimitiveAction("setEngineSpeed", [self.speed])) 771 action.appendAction(PrimitiveAction("setSteering", [-1])) 772 return action 773 774 775 class CompleteStateHolonomicRRTSteeringEngineStrategy(AgentStrategy): 776 """ 777 NOTE: this class is out-of-date and should be updated for the 778 current environment and motion model (or actually to convert it into 779 non-holonomic, meaning to incorporate the sea physics constraints 780 into the RRT planner) 781 782 A strategy for an RRT agent, where the world model is 783 a complete state and the set of legal actions are steering and 784 engine speed. RRT is holonomic (i.e. assumes the agent has direct 785 control on the ship position), with no colision detection here. 786 The logical flow is: 787 complete-world-state model in the agent --> RRT --> Steering&Engine commands 788 """ 789 def __init__(self, agentIndex, initialState, goalPos): 790 AgentStrategy.__init__(self) 791 792 self.agentIndex = agentIndex 793 self.goalPos = goalPos 794 self.actionIndex = 0 795 796 797 798 799 # TODO: Some hard-coded constants that should probably be sent as params 800 801 # Threshold to determine whether at goal 802 self.angleThreshold = 10 803 self.distanceThreshold = 10 804 805 # borders for sampling 806 self.minX = 0 807 self.maxX = 1200 808 self.minY = 0 809 self.maxY = 800 810 self.minAng = 0 811 self.maxAng = 360 812 813 # step-fraction towards a new sampled state 814 self.epsilon = 0.1 815 816 817 818 819 # Compute the RRT path 820 self.path = self.rrtPath(initialState, goalPos) 821 822 823 824 def composeAction(self, agentWorldModel): 825 "Implemeting the AgentStrategy interface." 826 # At the end, do the same action (which should be stop) 827 if self.actionIndex >= len(self.path): 828 return self.path[-1] 829 830 action = self.path[self.actionIndex] 831 self.actionIndex += 1 832 return action 833 834 835 def rrtPath(self, initialState, goalPos): 836 # create initial node 837 initialExternalState = copy.copy(initialState.shipExternalStates[self.agentIndex]) 838 839 # init a tree (TODO: change to realistic distanceUnit and angleUnit?) 840 self.tree = rrtTree.RRTTreeXYOrientationEuclidDist(initialExternalState, 841 distanceUnit=10, 842 angleUnit=5) 843 844 # search 845 currentState = initialExternalState 846 while not self.isGoalState(currentState): 847 currentState = self.extend(self.tree, self.chooseRandomState()) 848 849 return self.tree.getPathTo(currentState) 850 851 852 def extend(self, tree, randomState): 853 """ 854 The EXTEND function from the paper. 855 Here, in this simple version of the algorithms, we just 856 take an epsilon step towards the randomState, without checking 857 the all intermediate states are collision-free. 858 We also assume holonomic problem in which the computation 859 of the edge (the action that takes us to the new state) 860 is just a simple vector calculation. 861 862 863 Keyword arguments: 864 randomState -- A state that was randomly sampled in space, 865 towards which we take an epsilon step. 866 867 Returns: 868 The new state that was added to the tree. 869 870 """ 871 closestNode = tree.findClosestNode(randomState) 872 closestState = closestNode.state 873 874 # Take an epsilon step towards randomState 875 # TODO: need to normalize attributes with variance 876 closestStateX = closestState.x 877 closestStateY = closestState.y 878 closestStateOrient = closestState.orientation 879 newX = (randomState.x - closestStateX) * self.epsilon + closestStateX 880 newY = (randomState.y - closestStateY) * self.epsilon + closestStateY 881 newOrient = (randomState.orientation - closestStateOrient) * self.epsilon + closestStateOrient 882 883 # Compute the edge - the action to take in order to get to this state 884 # TODO: check that the path to this state is legal 885 xDiff = newX - closestStateX 886 yDiff = newY - closestStateY 887 888 if closestNode.parent != None: 889 oldOrientation = degrees( atan2(closestStateY - closestNode.parent.state.y, 890 closestStateX - closestNode.parent.state.x) ) 891 else: 892 oldOrientation = closestStateOrient 893 # Turn from the old direction to the new direction 894 steeringToApply = degrees( atan2(yDiff, xDiff) ) - oldOrientation 895 896 speedToApply = sqrt( xDiff * xDiff + yDiff * yDiff ) 897 action = CompositeAction() 898 action.appendAction(PrimitiveAction("setEngineSpeed", [speedToApply])) 899 action.appendAction(PrimitiveAction("setSteering", [steeringToApply])) 900 901 # Add the node to the tree 902 newState = state.ShipExternalState(newX, newY, newOrient, speedToApply, 0) 903 parent = closestNode 904 edge = action 905 newNode = rrtTree.RRTNode(newState, parent, edge) 906 tree.addNode(newNode) 907 908 return newState 909 910 def isGoalState(self,shipState): 911 "Is the ship within some threshold from the goal." 912 913 point1 = (shipState.x, shipState.y) 914 point2 = (self.goalPos.x, self.goalPos.y) 915 distance = geometry.distance2D(point1, point2) 916 if distance > self.distanceThreshold: 917 return False 918 919 # TODO: fix to deal with angles in a circular way, 920 # e.g. 359 and 1 are at distance 2 and not 358 921 angle1 = shipState.orientation 922 angle2 = self.goalPos.orientation 923 if abs(angle2-angle1) > self.angleThreshold: 924 return False 925 926 return True 927 928 929 def chooseRandomState(self): 930 "Samples a random state with a bias towards the goal." 931 if random.random() < 0.05: 932 return self.goalPos 933 else: 934 return state.ShipExternalState(random.randint(self.minX, self.maxX), 935 random.randint(self.minY, self.maxY), 936 random.randint(self.minAng, self.maxAng), 937 speed = -1, angularSpeed = -1) # speed should be ignored 938 939 940 class MeasureEdgeLengths(AgentStrategy): 941 """ 942 TODO: THIS CLASS IS FOR THE REGRESSION LEARNING 943 CURRENTLY NOT USEFUL YET. 944 NEED TO BE MADE MORE GENERIC WHEN IT BECOMES RELEVANT. 945 A class that wraps some AgentStrategy, and adds 946 some measurements, to get experiment data. 947 The measurements it adds are the number of time steps it 948 took to reach the goal. 949 """ 950 # TODO: THIS CLASS IS FOR THE REGRESSION LEARNING - NEED TO BE 951 # MADE MORE GENERIC WHEN IT BECOMES RELEVANT. 952 def __init__(self, agentIndex, goalPos, agentStrategy): 953 AgentStrategy.__init__(self) 954 955 self.shipIndex = agentIndex 956 self.goalPos = goalPos 957 self.agentStrategy = agentStrategy 958 # Time counter 959 self.time = 0 960 # filename to append results 961 self.filename = "regression" + str(agentIndex) 962 # boolean - to write env data only in the first time 963 self.writtenEnvData = False 964 965 def composeAction(self, agentWorldModel): 966 # Increase time counter 967 self.time += 1 968 969 estimatedState = agentWorldModel.getEstimatedExtShipState(self.shipIndex) 970 estimatedPos = (estimatedState.x, estimatedState.y) 971 972 973 # For the first time write the conditions to params files 974 # TODO: hack, we only measure the environment at the beginning 975 if not self.writtenEnvData: 976 shipOrient = geometry.orientation2D(estimatedPos, self.goalPos) 977 978 windVector = agentWorldModel.getEstimatedWindVector(self.shipIndex) 979 windSpeed = geometry.distance2D((0,0), windVector) 980 windOrient = geometry.orientation2D((0,0), windVector) 981 print 'windOrient', windOrient 982 windRelOrient = geometry.computeShortestTurn(shipOrient, windOrient) 983 984 currentsVector = agentWorldModel.getEstimatedWaterVector(self.shipIndex) 985 waterSpeed = geometry.distance2D((0,0), currentsVector) 986 currentOrient = geometry.orientation2D((0,0), currentsVector) 987 print 'currentOrient', currentOrient 988 waterRelOrient = geometry.computeShortestTurn(shipOrient, currentOrient) 989 990 xFilename = self.filename + ".x" 991 if not os.path.exists(xFilename): 992 f = open(xFilename, 'w') 993 f.write('[') 994 else: 995 f = open(xFilename, 'r+') 996 f.seek(-1, 2) 997 f.write(', ') 998 f.write('(%.2f, %.2f, %.2f, %.2f)]' % ( windSpeed, 999 windRelOrient, 1000 waterSpeed, 1001 waterRelOrient)) 1002 f.close() 1003 self.writtenEnvData = True 1004 1005 1006 # If at goal - stop 1007 distanceToGoal = geometry.distance2D(estimatedPos, self.goalPos) 1008 if distanceToGoal <= 20: 1009 yFilename = self.filename + ".y" 1010 if not os.path.exists(yFilename): 1011 f = open(yFilename, 'w') 1012 f.write('[') 1013 else: 1014 f = open(yFilename, 'r+') 1015 f.seek(-1, 2) 1016 f.write(', ') 1017 f.write(str(self.time) + ']') 1018 f.close() 1019 sys.exit() 1020 else: 1021 return self.agentStrategy.composeAction(agentWorldModel) 1022 ''' 1023 1024 1025 1026 1027 1028 1029 1030 ######################################################################## 1031 # The Agent class, encapsulates all the agent's functionality 1032 ########################################################################
1033 -class Agent:
1034 """ 1035 This is a class for an agent that controls a ship. 1036 An agent has two main components: it's world model, 1037 and its decision-making strategy. 1038 """
1039 - def __init__(self, worldModel, strategy):
1040 self.worldModel = worldModel 1041 self.strategy = strategy
1042
1043 - def getAction(self, percepts):
1044 """ 1045 A template method. 1046 Choose an action based on current percepts 1047 1048 @type percepts: PerceptList 1049 @param percepts: A list of percepts sent to the agent for this decision cycle 1050 1051 @rtype: CompositeAction 1052 @return: A list of actions to be executed on the ship 1053 """ 1054 self.worldModel.processPercepts(percepts) 1055 action = self.strategy.composeAction(self.worldModel) 1056 self.worldModel.cleanMsgQueue() # clean msgs, even if not processed 1057 return action
1058
1059 - def receiveMsg(self, msg):
1060 """ 1061 msg is received by the agent using this function 1062 1063 @type msg: PrimitiveMessage 1064 @param msg: a primitive message sent to the agent. 1065 """ 1066 # TODO: receiving messages should be part of the 1067 # world model and not of the strategy - it seems like? 1068 return self.worldModel.receiveMsg(msg)
1069
1070 - def getOutgoingMessage(self):
1071 """ 1072 Returns any message that strategy created 1073 1074 @rtype: CompositeMessage 1075 @return: The agents' outgoing message for this decision cycle 1076 """ 1077 return self.strategy.getOutgoingMessage()
1078