Module agents
[hide private]
[frames] | no frames]

Source Code for Module agents

   1  ################################################################################## 
   2  # Copyright (c) 2010, 2011, 2012, 2013, Daniel Urieli, Peter Stone 
   3  # University of Texas at Austin 
   4  # All right reserved 
   5  #  
   6  # Based On: 
   7  #  
   8  # Copyright (c) 2000-2003, Jelle Kok, University of Amsterdam 
   9  # All rights reserved. 
  10  #  
  11  # Redistribution and use in source and binary forms, with or without 
  12  # modification, are permitted provided that the following conditions are met: 
  13  #  
  14  # 1. Redistributions of source code must retain the above copyright notice, this 
  15  # list of conditions and the following disclaimer. 
  16  #  
  17  # 2. Redistributions in binary form must reproduce the above copyright notice, 
  18  # this list of conditions and the following disclaimer in the documentation 
  19  # and/or other materials provided with the distribution. 
  20  #  
  21  # 3. Neither the name of the University of Amsterdam nor the names of its 
  22  # contributors may be used to endorse or promote products derived from this 
  23  # software without specific prior written permission. 
  24  #  
  25  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
  26  # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  27  # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 
  28  # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 
  29  # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
  30  # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 
  31  # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 
  32  # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 
  33  # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
  34  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  35  ################################################################################## 
  36   
  37   
  38   
  39  import copy 
  40  import random 
  41  import os 
  42  import sys 
  43  import operator 
  44  from math import * 
  45  import numpy 
  46   
  47  from vec2d import * 
  48  from lineline import * 
  49   
  50  import geometry 
  51  import rrtTree 
  52  import perceptModels 
  53  from actions import * 
  54  import state 
  55  from messages import PrimitiveMessage, CompositeMessage 
  56  import obstacleAvoidance 
  57  from navigationTactics import * 
  58  from scripts import heuristicDivide 
  59   
  60  ######################################################################## 
  61  # 
  62  # This file defines different types of agents. 
  63  # An agent is defined by: 
  64  # - The way it models the world 
  65  # - The way it process percepts for this given world model 
  66  # - The set of legal actions available to it 
  67  # - It's strategy for choosing actions, based on the set of legal actions, 
  68  #   and based on its world model. 
  69  # 
  70  # The AgentWorldModel decouples the perception part from the action  
  71  # part, thus allowing us to mix any combination of percepts,  
  72  # percept-processing-algorithm, world-model, legal-action-set, 
  73  # and decision-making-strategy. 
  74  # 
  75  # Implementation-wise, an agent has two components: 
  76  # - AgentWorldModel: implements a specific worldmodel, and percept processing 
  77  # - AgentStrategy: for a given AgentWorldModel and legal actions, 
  78  #                  defines a strategy for choosing actions 
  79  # 
  80  ######################################################################## 
  81   
  82   
  83   
  84   
  85  ######################################################################## 
  86  # AgentWorldModel class. Implements different models an agent  
  87  # can have about the world. 
  88  ######################################################################## 
89 -class AgentWorldModel:
90 """ 91 This class is a base class for all the models an agent 92 can have about the world. 93 A subclass should implement a specific model of the world, 94 and a specific way to process percepts for this model. 95 There can be multiple ways to process percepts for any 96 given model, so an example class hierarchy could be: 97 98 ------------------- 99 | AgentWorldModel | 100 ------------------- 101 / \ 102 ---------- ---------- 103 | Model1 | | Model2 | 104 ---------- ---------- 105 / \ \ 106 ------------------- ------------------ . 107 |with method 1 to | |with method 2 to| . 108 |processPercepts | |processPercepts | . 109 |for Model1 | |for Model1 | 110 ------------------- ------------------ 111 """
112 - def __init__(self):
113 """ 114 Every world model contains a queue for incoming messages. 115 Note that outgoing msgs are inside strategy. 116 TODO: does this design need change? 117 """ 118 self.incomingMsgs = CompositeMessage()
119 120 # Abstract methods
121 - def processPercepts(self, perceptsList):
122 """ 123 For a given concrete model, this method 124 implements a specific method of updating this 125 model from percepts. 126 This is just an extra layer of flexibility, in case 127 there are multiple methods to update percepts for the 128 same model. 129 130 @type perceptsList: PerceptList 131 @param perceptsList: A list of percepts to be processed 132 """ 133 abstract
134
135 - def getEstimatedStatesOfOtherShips(self, selfIndex):
136 """ 137 Get a list of estimated ShipExternalState of all other ships 138 in the system. 139 140 @type selfIndex: int 141 @param selfIndex: The index of the ship controlled by the agent - we don't want to include it in the returned states. 142 143 @rtype: A list of ShipExternalState 144 @return: A list of estimated states of all other ships in the system 145 """ 146 abstract
147
148 - def getEstimatedStatesOfShips(self, shipIndices):
149 """ 150 Get a list of estimated ShipExternalState of all other ships 151 indexed by shipIndices. 152 153 @type selfIndex: list[int] 154 @param selfIndex: The indices of the ships for which location is estimated 155 156 @rtype: A list of ShipExternalState 157 @return: A list of estimated states of all other ships in the system 158 """ 159 abstract
160
161 - def getEstimatedExtShipState(self):
162 """ 163 Get the estimated ShipExternalState as estimated 164 by the world model. 165 166 @rtype: ShipExternalState 167 @return: An estimation of the ship's real state according to our AgentWorldModel. 168 """ 169 abstract
170
171 - def getEstimatedWindVector(self):
172 """ 173 Get the estimated wind vector in the location of 174 the ship. 175 176 @rtype: numpy array (vector) 177 @return: an estimation of the wind vector (speed and direction) in the agent's location according to our AgentWorldModel. 178 """ 179 abstract
180
181 - def getEstimatedWaterVector(self):
182 """ 183 Get the estimated water vector in the location of 184 the ship. 185 186 @rtype: numpy array (vector) 187 @return: an estimation of the water vector (speed and direction) in the agent's location according to our AgentWorldModel. 188 """ 189 abstract
190
191 - def getEstimatedObstaclesList(self):
192 """ 193 Get a list of estimated obstacle shapes. 194 195 @rtype: Obstacles 196 @return: an estimation of the obstacles shapes (and positions) according to our AgentWorldModel. 197 """ 198 abstract
199 200 201 # non-abstract methods
202 - def cleanMsgQueue(self):
203 self.incomingMsgs = CompositeMessage()
204
205 - def receiveMsg(self, msg):
206 """ 207 Append a message to the queue 208 """ 209 self.incomingMsgs.appendMsg(msg) 210 # just a debug check 211 if len(self.incomingMsgs.primitiveMsgs) > 100: 212 raise Exception("Did you forget to read and clean messages")
213 214 # TODO: more interface functions needed? 215 216
217 -class AgentWorldModelCompleteWorld(AgentWorldModel):
218 """ 219 Just a skeleton for testing purposes. 220 The world model is exactly the world state. 221 """
222 - def __init__(self, shipIndex):
223 AgentWorldModel.__init__(self) 224 self.state = None 225 self.shipIndex = shipIndex
226 227 # override
228 - def processPercepts(self, perceptsList):
229 "Just search for the CompleteStatePercept and save the state from it" 230 for percept in perceptsList.getPercepts(): 231 if isinstance(percept, perceptModels.CompleteStatePercept): 232 # TODO: removed deepcopy because took time - but shallow copy should be fine 233 #self.state = copy.deepcopy(percept.state) 234 self.state = copy.copy(percept.state) 235 return 236 raise Exception("Couldn't find a CompleteStatePercept")
237
238 - def getEstimatedStatesOfOtherShips(self, selfIndex):
239 """ 240 Implements the abstract method 241 """ 242 states = self.state.shipExternalStates 243 return states[0:selfIndex] + states[selfIndex + 1:]
244
245 - def getEstimatedStatesOfShips(self, shipIndices):
246 """ 247 Implements the abstract method 248 """ 249 states = self.state.shipExternalStates 250 return [states[i] for i in shipIndices]
251
252 - def getEstimatedExtShipState(self):
253 """ 254 Implements the abstract method 255 """ 256 return self.state.shipExternalStates[self.shipIndex]
257
258 - def getEstimatedWindVector(self):
259 """ 260 Implements the abstract method 261 """ 262 shipState = self.state.shipExternalStates[self.shipIndex] 263 posX, posY = shipState.x, shipState.y 264 return self.state.sea.wind.getSpeedVectorInLocation(posX, posY)
265
266 - def getEstimatedWaterVector(self):
267 """ 268 Implements the abstract method 269 """ 270 shipState = self.state.shipExternalStates[self.shipIndex] 271 posX, posY = shipState.x, shipState.y 272 return self.state.sea.waterCurrents.getSpeedVectorInLocation(posX, posY)
273
274 - def getEstimatedObstaclesList(self):
275 """ 276 Implements the abstract method 277 """ 278 return self.state.sea.obstacles.getObstaclesList()
279 280 281 ######################################################################## 282 # Strategy class. Connecting between AgentWorldModel and the actions. 283 # A strategy is created for each pair of specific AgentWorldModel 284 # and a set of available actions (ship actuators). 285 # Note that for such a pair we can define more then one strategy, 286 # implementing different decision algorithms. 287 ########################################################################
288 -class AgentStrategy:
289 """ 290 An interface for specific decision making strategy. 291 It should be defined for a given AgentWorldModel, 292 a given set of legal actions, and a given decision making algorithm. 293 294 For each step, the flow is: 295 AgentWorldModel --> Specific_AgentStrategy --> Actions-to-execute 296 """
297 - def __init__(self):
298 # outgoingMsg is a message an agent (strategy) can send to 299 # other agents and to the simulation environment 300 self.outgoingMsg = CompositeMessage()
301
302 - def composeAction(self, agentWorldModel):
303 "Given the (inferred) state of the world, decide on a set of actions." 304 abstract
305
306 - def getOutgoingMessage(self):
307 msg = self.outgoingMsg 308 # reset if needed 309 if len(msg.primitiveMsgs) > 0: 310 self.outgoingMsg = CompositeMessage() 311 return msg
312
313 - def transmit(self, msg):
314 self.outgoingMsg.appendMsg(msg)
315 316
317 -class AgentStaticPatrolStrategy(AgentStrategy):
318 """ 319 Implements a basic patrol strategy. 320 Given points of interest, just travel 321 along the cycle they define 322 """
323 - def __init__(self, agentIndex, myPatrolPath, rulesOfTheSea):
324 """ 325 Initializes the strategy. 326 327 @type agentIndex: int 328 @param agentIndex: sort of an agent-id - an agent's index in the array of all agents 329 330 @type path: a list of 2-tuples 331 @param path: a list of (x,y) points that define a cycle for the agent to travel along 332 333 @type rulesOfTheSea: boolean 334 @param rulesOfTheSea: Tells whether to respect the rules of the sea 335 """ 336 AgentStrategy.__init__(self) 337 338 self.agentIndex = agentIndex 339 self.myPatrolPath = myPatrolPath 340 self.rulesOfTheSea = rulesOfTheSea 341 self.nextTaskPointIndex = 0 342 343 self.navigationTactic = None 344 345 self.time = 0 346 self.prevPointTime = 0 347 self.firstVisit = True
348
349 - def composeAction(self, agentWorldModel):
350 """ 351 Implements the abstract method, see documentation 352 of the abstract method. 353 """ 354 # if done, just stop for now 355 # if self.nextTaskPointIndex == len(self.myPatrolPath): 356 # action = CompositeAction() 357 # action.appendAction(PrimitiveAction("stop", [])) 358 # return action 359 self.time += 1 360 361 if self.navigationTactic and self.navigationTactic.done(agentWorldModel): 362 self.recordStatistics() 363 364 self.setNextPointAndNavigationTacticIfNeeded(agentWorldModel) 365 366 return self.navigationTactic.composeAction(agentWorldModel)
367
368 - def setNextPointAndNavigationTacticIfNeeded(self, agentWorldModel):
369 if self.navigationTactic == None: 370 # i.e. first time we enter this function 371 goalPos = self.myPatrolPath[self.nextTaskPointIndex] 372 self.setNavigationTactic(agentWorldModel, goalPos) 373 374 elif self.navigationTactic.done(agentWorldModel): 375 self.nextTaskPointIndex = (self.nextTaskPointIndex + 1) % len(self.myPatrolPath) 376 goalPos = self.myPatrolPath[self.nextTaskPointIndex] 377 self.setNavigationTactic(agentWorldModel, goalPos)
378
379 - def setNavigationTactic(self, agentWorldModel, nextGoal):
380 estimatedState = agentWorldModel.getEstimatedExtShipState() 381 # why round? because this value goes deep into 382 # rrtTree.findClosestNode and floating point arithmetic 383 # significantly slows it down! 384 estimatedPos = (int(round(estimatedState.x)), int(round(estimatedState.y))) 385 386 if obstacleAvoidance.pathBlocked(estimatedPos, nextGoal, agentWorldModel.getEstimatedObstaclesList()): 387 self.navigationTactic = AgentRRTTactic( self.agentIndex, 388 estimatedPos, 389 nextGoal, 390 agentWorldModel.getEstimatedObstaclesList(), 391 self.rulesOfTheSea) 392 else: 393 self.navigationTactic = AgentPIDTactic(self.agentIndex, 394 nextGoal, 395 self.rulesOfTheSea)
396 397
398 - def getPath(self):
399 return self.myPatrolPath
400 401
402 - def recordStatistics(self):
403 # record results 404 if not self.firstVisit: 405 prev = self.myPatrolPath[self.nextTaskPointIndex - 1] 406 p = self.myPatrolPath[self.nextTaskPointIndex] 407 408 # create an outgoing message 409 self.transmit( 410 PrimitiveMessage(PrimitiveMessage.TO_SIMULATOR, 411 PrimitiveMessage.TYPE_VISIT, 412 p) 413 ) 414 415 currentEdge = (prev, p) 416 edgeLength = self.time - self.prevPointTime 417 self.transmit( 418 PrimitiveMessage(PrimitiveMessage.TO_SIMULATOR, 419 PrimitiveMessage.TYPE_EDGE_LENGTH, 420 (currentEdge, edgeLength)) 421 ) 422 423 self.firstVisit = False 424 self.prevPointTime = self.time
425 426
427 -class AgentCoordinatedPatrolStrategy(AgentStrategy):
428 """ 429 Implements a coordinated patrol strategy. 430 Given points of interest, each agent 431 runs heuristicDivide(), assign ships 432 to locations, and transmits it. Agents 433 vote and decide on suggestion, move to 434 their initial locations, and start 435 patroling. 436 """ 437 438 # "Enum" for mission state 439 TRANSMIT_INITIAL_LOCATION = 1 440 COMPUTE_PATROL_ASSIGNMENTS_AND_TRANSMIT = 2 441 VOTE = 3 442 MOVING_TO_START = 4 443 PATROLING = 5 444 445
446 - def __init__(self, agentIndex, patrolPoints, edgeLengths, rulesOfTheSea):
447 """ 448 Initializes the strategy. 449 450 @type agentIndex: int 451 @param agentIndex: sort of an agent-id - an agent's index in the array of all agents 452 453 @type agentIndex: string 454 @param agentIndex: the name of the file containing edges and nodes from measurements data 455 456 @type rulesOfTheSea: boolean 457 @param rulesOfTheSea: Tells whether to respect the rules of the sea 458 """ 459 AgentStrategy.__init__(self) 460 461 self.agentIndex = agentIndex 462 self.patrolPoints = patrolPoints 463 self.edgeLengths = edgeLengths 464 self.rulesOfTheSea = rulesOfTheSea 465 # stats related vars 466 self.time = 0 467 self.prevPointTime = 0 468 self.firstVisit = True 469 # initialize coordinated-patrol related data members 470 self.reset() 471 472 # initializing the callbacks for processing messages 473 self.msgCallbacks = {} 474 self.msgCallbacks[PrimitiveMessage.TYPE_START_PATROL] = self.processMsgStart.__name__ 475 self.msgCallbacks[PrimitiveMessage.TYPE_POSITION] = self.processMsgPosition.__name__ 476 self.msgCallbacks[PrimitiveMessage.TYPE_PATROL_DIVISION] = self.processMsgPatrolDivision.__name__ 477 self.msgCallbacks[PrimitiveMessage.TYPE_REACHED_START] = self.processMsgReachedStart.__name__
478 # . 479 # . 480 # . 481 # continue adding callbacks 482 483
484 - def reset(self):
485 """ 486 Resets all coordinated-patrol related data members 487 """ 488 self.missionState = self.TRANSMIT_INITIAL_LOCATION 489 self.shipPositions = {} # maps ships to their transmitted positions 490 self.ship2patrolData = {} # would hold agent's suggestion 491 self.ship2patrolDataSuggestions = [] 492 self.reachedStartPoint = set() 493 494 self.myStartingPatrolPosition = None 495 self.myPatrolPath = [] 496 self.nextTaskPointIndex = None 497 self.navigationTactic = None
498 #self.patrolStrategy = None 499 500
501 - def composeAction(self, agentWorldModel):
502 # update time 503 self.time += 1 504 505 self.processIncomingMsgs(agentWorldModel) 506 507 # by default, return a stopping action 508 returnedAction = CompositeAction() 509 returnedAction.appendAction(PrimitiveAction("stop", [])) 510 511 if self.missionState == self.TRANSMIT_INITIAL_LOCATION: 512 myState = agentWorldModel.getEstimatedExtShipState() 513 self.transmit( 514 PrimitiveMessage(PrimitiveMessage.TO_ALL, 515 PrimitiveMessage.TYPE_POSITION, 516 (agentWorldModel.shipIndex, myState.x, myState.y)) 517 ) 518 # move to the next state (TODO: assuming no msgs are lost) 519 self.missionState = self.COMPUTE_PATROL_ASSIGNMENTS_AND_TRANSMIT 520 521 522 elif self.missionState == self.COMPUTE_PATROL_ASSIGNMENTS_AND_TRANSMIT: 523 numShips = len(self.shipPositions) 524 patrolPoints, paths, patrolStartingStates = heuristicDivide.run(numShips, 525 self.patrolPoints, self.edgeLengths, debug=False) 526 state2ship = self.assignShipsToStates(self.shipPositions, patrolStartingStates) 527 # build message 528 self.ship2patrolData = {} 529 for i, start_and_path in enumerate(zip(patrolStartingStates, paths)): 530 start, path = start_and_path 531 ship = state2ship[i] 532 self.ship2patrolData[ship] = (start, path) 533 msgContent = self.ship2patrolData 534 self.transmit( 535 PrimitiveMessage(PrimitiveMessage.TO_ALL, 536 PrimitiveMessage.TYPE_PATROL_DIVISION, 537 msgContent) 538 ) 539 # move to the next state 540 self.missionState = self.VOTE 541 542 543 elif self.missionState == self.VOTE: 544 for suggestion in self.ship2patrolDataSuggestions: 545 if not suggestion == self.ship2patrolData: 546 print 'suggestion', suggestion 547 print 'mysuggestion', self.ship2patrolData 548 raise Exception("vote failed - need to be implemented") 549 # if we are here, everyone agrees 550 self.myStartingPatrolPosition, self.myPatrolPath = self.ship2patrolData[self.agentIndex] 551 self.missionState = self.MOVING_TO_START 552 553 554 elif self.missionState == self.MOVING_TO_START: 555 if self.navigationTactic == None: 556 pos = self.myStartingPatrolPosition #just shorthand 557 self.setNavigationTactic(agentWorldModel, 558 (pos.x, pos.y)) 559 #self.navigationTactic = AgentPIDTactic(self.agentIndex, 560 # (pos.x, pos.y), 561 # self.rulesOfTheSea) 562 # if reached target - transmit it 563 if self.navigationTactic.done(agentWorldModel): 564 msgContent = self.agentIndex 565 self.transmit( 566 PrimitiveMessage(PrimitiveMessage.TO_ALL, 567 PrimitiveMessage.TYPE_REACHED_START, 568 msgContent) 569 ) 570 # this will restore to PID tactic (since already at goal), to wait 571 # until all are ready 572 pos = self.myStartingPatrolPosition #just shorthand 573 self.setNavigationTactic(agentWorldModel, 574 (pos.x, pos.y)) 575 returnedAction = self.navigationTactic.composeAction(agentWorldModel) 576 if self.allReachedStartingPoints(): 577 print 'allReachedStartingPoints' 578 self.missionState = self.PATROLING 579 #initialize patrol points 580 self.nextTaskPointIndex = 0 581 self.setNavigationTactic(agentWorldModel, 582 self.myPatrolPath[self.nextTaskPointIndex]) 583 584 elif self.missionState == self.PATROLING: 585 if self.navigationTactic.done(agentWorldModel): 586 self.nextTaskPointIndex = (self.nextTaskPointIndex + 1) % len(self.myPatrolPath) 587 self.setNavigationTactic(agentWorldModel, 588 self.myPatrolPath[self.nextTaskPointIndex]) 589 # also, record stats if segment is done 590 self.recordStatistics() 591 592 #if self.patrolStrategy == None: 593 # self.patrolStrategy = AgentStaticPatrolStrategy(self.agentIndex, 594 # self.myPatrolPath, 595 # self.rulesOfTheSea) 596 ## get action + msgs(!) from wrapped strategy, since 597 ## these are the two outputs of composeAction() 598 returnedAction = self.navigationTactic.composeAction(agentWorldModel) 599 #for m in self.patrolStrategy.getOutgoingMessage().primitiveMsgs: self.transmit(m) 600 601 return returnedAction
602
603 - def setNavigationTactic(self, agentWorldModel, nextGoal):
604 estimatedState = agentWorldModel.getEstimatedExtShipState() 605 # why round? because this value goes deep into 606 # rrtTree.findClosestNode and floating point arithmetic 607 # significantly slows it down 608 estimatedPos = (int(round(estimatedState.x)), int(round(estimatedState.y))) 609 610 if obstacleAvoidance.pathBlocked(estimatedPos, nextGoal, agentWorldModel.getEstimatedObstaclesList()): 611 self.navigationTactic = AgentRRTTactic( self.agentIndex, 612 estimatedPos, 613 nextGoal, 614 agentWorldModel.getEstimatedObstaclesList(), 615 self.rulesOfTheSea) 616 else: 617 self.navigationTactic = AgentPIDTactic(self.agentIndex, 618 nextGoal, 619 self.rulesOfTheSea)
620
621 - def recordStatistics(self):
622 # record results 623 if not self.firstVisit: 624 prev = self.myPatrolPath[self.nextTaskPointIndex - 1] 625 p = self.myPatrolPath[self.nextTaskPointIndex] 626 627 # create an outgoing message 628 self.transmit( 629 PrimitiveMessage(PrimitiveMessage.TO_SIMULATOR, 630 PrimitiveMessage.TYPE_VISIT, 631 p) 632 ) 633 634 currentEdge = (prev, p) 635 edgeLength = self.time - self.prevPointTime 636 self.transmit( 637 PrimitiveMessage(PrimitiveMessage.TO_SIMULATOR, 638 PrimitiveMessage.TYPE_EDGE_LENGTH, 639 (currentEdge, edgeLength)) 640 ) 641 642 self.firstVisit = False 643 self.prevPointTime = self.time
644
645 - def processIncomingMsgs(self, agentWorldModel):
646 """ 647 Reads incoming msgs from the agentWorldModel 648 and clean its queue 649 650 @type agentWorldModel: AgentWorldModel 651 @param agentWorldModel: the world model, which contain msgs. 652 """ 653 for msg in agentWorldModel.incomingMsgs.primitiveMsgs: 654 # just a debug check 655 if msg.to != PrimitiveMessage.TO_ALL: 656 raise Exception("Message doesn't seem to be directed at me") 657 # go to callback name, and convert to function using getattr 658 getattr(self, self.msgCallbacks[msg.type])(msg.content)
659
660 - def assignShipsToStates(self, shipPositions, patrolStartingStates):
661 """ 662 Implements a matching algorithm 663 664 @type shipPositions: map[shipIndex] = (x,y) position 665 @param shipPositions: map of the transmitted positions of ships 666 667 @type patrolStartingStates: array of [shipExternalState, ... ] for all ships 668 @param patrolStartingStates: the starting states computed by heuristicDivide 669 670 @rtype: map[shipIndex] = shipStartingState 671 @return: mapping of each ship to its patrol starting state 672 """ 673 numShips = len(shipPositions) 674 if not numShips == len(patrolStartingStates): 675 raise Exception("number of ships must be equal to num patrol start points") 676 677 # create a list of (ship, position, distance) tuples, sorted by distance 678 edges = [] 679 for shipIndex, position in shipPositions.items(): 680 distances = [geometry.distance2D(position, (state.x, state.y)) for state in patrolStartingStates] 681 edges += [(shipIndex, i, dist) for i, dist in enumerate(distances)] 682 edges = sorted(edges, key=operator.itemgetter(2), reverse=True) 683 684 # start removing distances until a node has only one edge, then create a pair 685 # and remove all the other edges with the pair-mate 686 shipNumEdges = {} 687 posNumEdges = {} 688 numOutgoingEdges = numShips 689 for i in range(numOutgoingEdges): 690 shipNumEdges[i] = numOutgoingEdges 691 posNumEdges[i] = numOutgoingEdges 692 693 match = [] 694 matchedShips = set() # for bookkeeping 695 matchedPositions = set() # for bookkeeping 696 # Heuristic match: 697 # scan edges, throwing heaviest until a ship/node has 698 # 1 outgoing edge, create a match, keep scanning. 699 # This process might not find a match for all, so 700 # repeat scans with left nodes/ships w/o a match 701 while len(match) < numShips: 702 deletedShips = set() 703 deletedPositions = set() 704 for edge in edges: 705 shipIndex, posIndex, dist = edge 706 # if we removed all the edges of a node - skip 707 if shipIndex in deletedShips or posIndex in deletedPositions: 708 continue 709 710 if shipNumEdges[shipIndex] == 1 or posNumEdges[posIndex] == 1: 711 match.append(edge) 712 deletedShips.add(shipIndex) 713 deletedPositions.add(posIndex) 714 matchedShips.add(shipIndex) 715 matchedPositions.add(posIndex) 716 717 shipNumEdges[shipIndex] -= 1 718 posNumEdges[posIndex] -= 1 719 720 # remove edges for matched pairs before next round 721 newEdges = [] 722 for edge in edges: 723 shipIndex, posIndex, dist = edge 724 if shipIndex in matchedShips or posIndex in matchedPositions: 725 continue 726 else: 727 newEdges.append(edge) 728 edges = newEdges 729 730 731 732 state2Ship = {} 733 for edge in match: 734 shipIndex, posIndex, dist = edge 735 state2Ship[posIndex] = shipIndex 736 737 return state2Ship
738
739 - def allReachedStartingPoints(self):
740 numShips = len(self.shipPositions) 741 if len(self.reachedStartPoint) == numShips: 742 return True 743 else: 744 return False
745
746 - def getPath(self):
747 return self.myPatrolPath
748 749 # msg processing callbacks
750 - def processMsgStart(self, msgContent):
751 """ 752 Processes a msg that notifies ships to start patrol 753 754 @type msgContent: None 755 @param msgContent: no additional information currently needed 756 """ 757 self.reset()
758
759 - def processMsgPosition(self, msgContent):
760 """ 761 Processes a msg that notifies other ship's position 762 763 @type msgContent: a 3-tuple (shipIndex, x, y) 764 @param msgContent: the position of the sending ship 765 """ 766 shipIndex, x, y = msgContent 767 self.shipPositions[shipIndex] = (x,y)
768
769 - def processMsgPatrolDivision(self, msgContent):
770 """ 771 Processes a msg that notifies other ship's patrol suggestion 772 773 @type msgContent: map[int] = (pos, list-of-positions) 774 @param msgContent: for each ship, a starting position and a path 775 """ 776 self.ship2patrolDataSuggestions.append(msgContent) 777
778 - def processMsgReachedStart(self, msgContent):
779 """ 780 Processes a msg that notifies some ship reached its start pos 781 782 @type msgContent: int 783 @param msgContent: an index of an agent that reached start 784 """ 785 self.reachedStartPoint.add(msgContent)
786 787
788 -class AgentJoiningPatrolStrategy(AgentCoordinatedPatrolStrategy):
789 """ 790 Implements an agent that waits for some time, 791 then joins a coordinated patrol - this is why it is a coordinated 792 patrol 793 """ 794
795 - def __init__(self, agentIndex, patrolPoints, edgeLengths, rulesOfTheSea):
796 """ 797 Initializes the strategy. 798 799 @type agentIndex: int 800 @param agentIndex: sort of an agent-id - an agent's index in the array of all agents 801 802 @type agentIndex: string 803 @param agentIndex: the name of the file containing edges and nodes from measurements data 804 805 @type rulesOfTheSea: boolean 806 @param rulesOfTheSea: Tells whether to respect the rules of the sea 807 """ 808 AgentCoordinatedPatrolStrategy.__init__(self, agentIndex, patrolPoints, edgeLengths, rulesOfTheSea) 809 810 self.waitingTime = 1000 811 self.currentTime = 0
812
813 - def composeAction(self, agentWorldModel):
814 self.currentTime += 1 815 if self.currentTime < 1000: 816 returnedAction = CompositeAction() 817 returnedAction.appendAction(PrimitiveAction("stop", [])) 818 819 elif self.currentTime == 1000: 820 self.reset() 821 self.transmit( 822 PrimitiveMessage(PrimitiveMessage.TO_ALL, 823 PrimitiveMessage.TYPE_START_PATROL, 824 None) 825 ) 826 returnedAction = CompositeAction() 827 returnedAction.appendAction(PrimitiveAction("stop", [])) 828 829 else: 830 # call base class function 831 returnedAction = AgentCoordinatedPatrolStrategy.composeAction(self, agentWorldModel) 832 833 return returnedAction
834 835
836 -class AgentTrackingPatrolStrategy(AgentStrategy):
837 """ 838 Implements a basic patrol strategy. 839 Given points of interest, just travel 840 along the cycle they define 841 """ 842 843 # "Enum" for mission state 844 TRANSMIT_LOCATION = 1 845 COMPUTE_PATROL_ASSIGNMENTS_AND_TRANSMIT = 2 846 VOTE_AND_SET_NAV_TARGET = 3 847 #SET_NEW_TARGET = 4 848 849
850 - def __init__(self, agentIndex, trackedShipIndices, 851 trackingShipIndices, trackingDistance, positionOffsets, 852 rulesOfTheSea):
853 """ 854 Initializes the strategy. 855 856 @type agentIndex: int 857 @param agentIndex: sort of an agent-id - an agent's index in the array of all agents 858 859 @type trackedShipIndices: a list of integer indices 860 @param trackedShipIndices: a list of indices of tracked ships 861 862 @type trackingShipIndices: a list of integer indices 863 @param trackingShipIndices: a list of indices of tracking ships 864 865 @type trackingDistance: a floating point number 866 @param trackingDistance: a (close to) maximal distance for tracking/sensing 867 868 @type positionOffsets: a list of numpy 2d vectors 869 @param positionOffsets: offsets for all tracking ships 870 871 @type rulesOfTheSea: boolean 872 @param rulesOfTheSea: Tells whether to respect the rules of the sea 873 """ 874 AgentStrategy.__init__(self) 875 876 self.agentIndex = agentIndex 877 self.trackedShipIndices = trackedShipIndices 878 self.trackingShipIndices = trackingShipIndices 879 self.trackingDistance = trackingDistance 880 self.positionOffsets = positionOffsets 881 self.rulesOfTheSea = rulesOfTheSea 882 883 self.navigationTactic = None 884 885 self.reset() 886 887 self.missionState = self.TRANSMIT_LOCATION 888 889 # initializing the callbacks for processing messages 890 self.msgCallbacks = {} 891 self.msgCallbacks[PrimitiveMessage.TYPE_TRACKING_POINT] = self.processMsgTrackingPoint.__name__ 892 self.msgCallbacks[PrimitiveMessage.TYPE_NAVIGATION_CENTRAL_POINT] = self.processMsgNavigationCentralPoint.__name__ 893 self.msgCallbacks[PrimitiveMessage.TYPE_POSITIONS_ASSIGNMENT] = self.processMsgPositionAssignments.__name__ 894 895 self.time = 0
896
897 - def reset(self):
898 self.trackingPoints = {} 899 self.navCentralPoint = {} 900 self.ship2positionSuggestions = []
901
902 - def composeAction(self, agentWorldModel):
903 """ 904 Implements the abstract method, see documentation 905 of the abstract method. 906 """ 907 self.time += 1 908 909 self.processIncomingMsgs(agentWorldModel) 910 911 #if self.navigationTactic and self.navigationTactic.done(agentWorldModel): 912 # self.recordStatistics() 913 914 self.setNextPointAndNavigationTacticIfNeeded(agentWorldModel) 915 916 return self.navigationTactic.composeAction(agentWorldModel)
917
918 - def setNextPointAndNavigationTacticIfNeeded(self, agentWorldModel):
919 if self.navigationTactic == None: 920 # initially create some tactic - to stand in place 921 shipState = agentWorldModel.getEstimatedExtShipState() 922 self.setNavigationTactic(agentWorldModel, (shipState.x, shipState.y)) 923 924 if self.missionState == self.TRANSMIT_LOCATION: 925 self.trackingPoint = self.computedAvgPosOfTrackedShips(agentWorldModel) 926 self.transmit( 927 PrimitiveMessage(PrimitiveMessage.TO_ALL, 928 PrimitiveMessage.TYPE_TRACKING_POINT, 929 self.trackingPoint) 930 ) 931 self.navigationCentralPoint = self.computedAvgPosOfTrackingShips(agentWorldModel) 932 self.transmit( 933 PrimitiveMessage(PrimitiveMessage.TO_ALL, 934 PrimitiveMessage.TYPE_NAVIGATION_CENTRAL_POINT, 935 self.navigationCentralPoint) 936 ) 937 self.missionState = self.COMPUTE_PATROL_ASSIGNMENTS_AND_TRANSMIT 938 939 elif self.missionState == self.COMPUTE_PATROL_ASSIGNMENTS_AND_TRANSMIT: 940 goal = self.computeNextGoal(agentWorldModel) #numpy 941 targets = self.generateNewTargets(goal, self.trackingPoint) 942 trackingShipPositions = self.getTrackingShipLocations(agentWorldModel) 943 self.ship2state = self.assignShipsToStates(trackingShipPositions, targets) 944 msgContent = self.ship2state 945 self.transmit( 946 PrimitiveMessage(PrimitiveMessage.TO_ALL, 947 PrimitiveMessage.TYPE_POSITIONS_ASSIGNMENT, 948 msgContent) 949 ) 950 # move to the next state 951 self.missionState = self.VOTE_AND_SET_NAV_TARGET 952 953 elif self.missionState == self.VOTE_AND_SET_NAV_TARGET: 954 for suggestion in self.ship2positionSuggestions: 955 if not suggestion == self.ship2state: 956 print 'suggestion', suggestion 957 print 'mysuggestion', self.ship2state 958 raise Exception("vote failed - need to be implemented") 959 # if we are here, everyone agrees 960 self.myStartingPatrolPosition = self.ship2state[self.agentIndex] 961 goalPos = self.myStartingPatrolPosition 962 self.setNavigationTactic(agentWorldModel, goalPos) 963 # back to start 964 self.missionState = self.TRANSMIT_LOCATION 965 self.reset()
966 967
968 - def computeNextGoal(self, agentWorldModel):
969 """ 970 Computes the next goal for the tracking strategy. 971 """ 972 trackingPoint = self.computedAvgPosOfTrackedShips(agentWorldModel) 973 navigationCentralPoint = self.computedAvgPosOfTrackingShips(agentWorldModel) 974 distToTarget = geometry.distance2D(navigationCentralPoint, trackingPoint) 975 newTrgt = navigationCentralPoint + (trackingPoint - navigationCentralPoint) * (1 - self.trackingDistance / distToTarget) 976 return newTrgt
977
978 - def computedAvgPosOfTrackedShips(self, agentWorldModel):
979 trackedShipPositions = self.getTrackedShipLocations(agentWorldModel).values() 980 trackingPoint = geometry.averagePoint2D(trackedShipPositions) 981 self.trackingPoint = trackingPoint # record for usage by GUI 982 return trackingPoint
983
984 - def computedAvgPosOfTrackingShips(self, agentWorldModel):
985 trackingShipPositions = self.getTrackingShipLocations(agentWorldModel).values() 986 navigationCentralPoint = geometry.averagePoint2D(trackingShipPositions) 987 return navigationCentralPoint
988
989 - def setNavigationTactic(self, agentWorldModel, nextGoal):
990 estimatedState = agentWorldModel.getEstimatedExtShipState() 991 # why round? because this value goes deep into 992 # rrtTree.findClosestNode and floating point arithmetic 993 # significantly slows it down 994 estimatedPos = (int(round(estimatedState.x)), int(round(estimatedState.y))) 995 996 if obstacleAvoidance.pathBlocked(estimatedPos, nextGoal, agentWorldModel.getEstimatedObstaclesList()): 997 self.navigationTactic = AgentRRTTactic( self.agentIndex, 998 estimatedPos, 999 nextGoal, 1000 agentWorldModel.getEstimatedObstaclesList(), 1001 self.rulesOfTheSea) 1002 else: 1003 self.navigationTactic = AgentPIDTactic(self.agentIndex, 1004 nextGoal, 1005 self.rulesOfTheSea)
1006
1007 - def getPath(self):
1008 return [self.navigationTactic.goalPoint]
1009
1010 - def recordStatistics(self):
1011 # record results 1012 if not self.firstVisit: 1013 prev = self.myPatrolPath[self.nextTaskPointIndex - 1] 1014 p = self.myPatrolPath[self.nextTaskPointIndex] 1015 1016 # create an outgoing message 1017 self.transmit( 1018 PrimitiveMessage(PrimitiveMessage.TO_SIMULATOR, 1019 PrimitiveMessage.TYPE_VISIT, 1020 p) 1021 ) 1022 1023 currentEdge = (prev, p) 1024 edgeLength = self.time - self.prevPointTime 1025 self.transmit( 1026 PrimitiveMessage(PrimitiveMessage.TO_SIMULATOR, 1027 PrimitiveMessage.TYPE_EDGE_LENGTH, 1028 (currentEdge, edgeLength)) 1029 ) 1030 1031 self.firstVisit = False 1032 self.prevPointTime = self.time
1033
1034 - def processIncomingMsgs(self, agentWorldModel):
1035 """ 1036 Reads incoming msgs from the agentWorldModel 1037 and clean its queue 1038 1039 @type agentWorldModel: AgentWorldModel 1040 @param agentWorldModel: the world model, which contain msgs. 1041 """ 1042 for msg in agentWorldModel.incomingMsgs.primitiveMsgs: 1043 # just a debug check 1044 if msg.to != PrimitiveMessage.TO_ALL: 1045 raise Exception("Message doesn't seem to be directed at me") 1046 # go to callback name, and convert to function using getattr 1047 getattr(self, self.msgCallbacks[msg.type])(msg.content)
1048
1049 - def processMsgTrackingPoint(self, msgContent):
1050 """ 1051 Processes a msg that notifies the tracking point - avg location of tracked 1052 ships 1053 1054 @type msgContent: a tuple (int, numpy.array[(x,y)]) 1055 @param msgContent: (suggestingShipIndex, suggestedTrackingPoint) 1056 """ 1057 suggestingShipIndex, trackingPoint = msgContent 1058 self.trackingPoints[suggestingShipIndex] = trackingPoint
1059
1060 - def processMsgNavigationCentralPoint(self, msgContent):
1061 """ 1062 Processes a msg that notifies the central navigation point - 1063 the point from which ship offsets are computed 1064 1065 @type msgContent: a tuple (int, numpy.array[(x,y)]) 1066 @param msgContent: (suggestingShipIndex, suggestedNavigationPoint) 1067 """ 1068 suggestingShipIndex, navPoint = msgContent 1069 self.navCentralPoint[suggestingShipIndex] = navPoint
1070
1071 - def processMsgPositionAssignments(self, msgContent):
1072 """ 1073 Processes a msg that notifies other ship's patrol suggestion 1074 1075 @type msgContent: map[int] = (pos) 1076 @param msgContent: for each ship, a target position 1077 """ 1078 #self.ship2patrolDataSuggestions.append(msgContent) 1079 self.ship2positionSuggestions.append(msgContent) 1080
1081 - def assignShipsToStates(self, shipPositions, targets):
1082 """ 1083 Implements a matching algorithm 1084 1085 @type shipPositions: map[shipIndex] = (x,y) position 1086 @param shipPositions: map of the transmitted positions of ships 1087 1088 @type targets: array of [(x,y), ... ] for all ships 1089 @param targets: the target locations for tracking 1090 1091 @rtype: map[shipIndex] = shipStartingState 1092 @return: mapping of each ship to its patrol starting state 1093 """ 1094 numShips = len(shipPositions) 1095 if not numShips == len(targets): 1096 raise Exception("number of ships must be equal to num patrol start points") 1097 1098 # create a list of (ship, position, distance) tuples, sorted by distance 1099 edges = [] 1100 for shipIndex, position in shipPositions.items(): 1101 distances = [geometry.distance2D(position, target) for target in targets] 1102 edges += [(shipIndex, i, dist) for i, dist in enumerate(distances)] 1103 edges = sorted(edges, key=operator.itemgetter(2), reverse=True) 1104 1105 # start removing distances until a node has only one edge, then create a pair 1106 # and remove all the other edges with the pair-mate 1107 shipNumEdges = {} 1108 posNumEdges = {} 1109 numOutgoingEdges = numShips 1110 for i in range(numOutgoingEdges): 1111 posNumEdges[i] = numOutgoingEdges 1112 for shipIndex in shipPositions.keys(): 1113 shipNumEdges[shipIndex] = numOutgoingEdges 1114 1115 match = [] 1116 matchedShips = set() # for bookkeeping 1117 matchedPositions = set() # for bookkeeping 1118 # Heuristic match: 1119 # scan edges, throwing heaviest until a ship/node has 1120 # 1 outgoing edge, create a match, keep scanning. 1121 # This process might not find a match for all, so 1122 # repeat scans with left nodes/ships w/o a match 1123 while len(match) < numShips: 1124 deletedShips = set() 1125 deletedPositions = set() 1126 for edge in edges: 1127 shipIndex, posIndex, dist = edge 1128 # if we removed all the edges of a node - skip 1129 if shipIndex in deletedShips or posIndex in deletedPositions: 1130 continue 1131 1132 if shipNumEdges[shipIndex] == 1 or posNumEdges[posIndex] == 1: 1133 match.append(edge) 1134 deletedShips.add(shipIndex) 1135 deletedPositions.add(posIndex) 1136 matchedShips.add(shipIndex) 1137 matchedPositions.add(posIndex) 1138 1139 shipNumEdges[shipIndex] -= 1 1140 posNumEdges[posIndex] -= 1 1141 1142 # remove edges for matched pairs before next round 1143 newEdges = [] 1144 for edge in edges: 1145 shipIndex, posIndex, dist = edge 1146 if shipIndex in matchedShips or posIndex in matchedPositions: 1147 continue 1148 else: 1149 newEdges.append(edge) 1150 edges = newEdges 1151 1152 1153 ship2state = {} 1154 for edge in match: 1155 shipIndex, posIndex, dist = edge 1156 ship2state[shipIndex] = tuple(targets[posIndex]) 1157 1158 return ship2state
1159
1160 - def getTrackedShipLocations(self, agentWorldModel):
1161 """ 1162 returns a list of x,y coordinates of tracked ships 1163 """ 1164 states = dict( (i, (s.x, s.y)) for i,s in 1165 zip(self.trackedShipIndices, 1166 agentWorldModel.getEstimatedStatesOfShips(self.trackedShipIndices) ) ) 1167 return states
1168
1169 - def getTrackingShipLocations(self, agentWorldModel):
1170 """ 1171 returns a list of x,y coordinates of tracking ships 1172 """ 1173 states = dict( (i, (s.x, s.y)) for i,s in 1174 zip(self.trackingShipIndices, 1175 agentWorldModel.getEstimatedStatesOfShips(self.trackingShipIndices) ) ) 1176 return states
1177
1178 - def generateNewTargets(self, goal, trackingPoint):
1179 """ 1180 Given the new goal and vector offsets - generate new targets. 1181 """ 1182 # offsets are computed w.r.t. line to target 1183 direction = trackingPoint - goal 1184 direction = direction / numpy.linalg.norm(direction) 1185 perpendicular = numpy.dot(numpy.array([[0,1], [-1,0]]), direction) 1186 goal = numpy.array(goal) 1187 return [ goal + offset[0] * direction + offset[1] * perpendicular for offset in self.positionOffsets]
1188 1189 1190 1191 # class AgentRandomPatrolStrategy(AgentStrategy): 1192 # """ 1193 # Implements a basic patrol strategy. 1194 # Given points of interest, just travel 1195 # along the cycle they define 1196 # """ 1197 # def __init__(self, agentIndex, patrolArea, rulesOfTheSea): 1198 # """ 1199 # Initializes the strategy. 1200 # 1201 # @type agentIndex: int 1202 # @param agentIndex: sort of an agent-id - an agent's index in the array of all agents 1203 # 1204 # @type rulesOfTheSea: boolean 1205 # @param rulesOfTheSea: Tells whether to respect the rules of the sea 1206 # """ 1207 # AgentStrategy.__init__(self) 1208 # 1209 # self.agentIndex = agentIndex 1210 # self.patrolArea = patrolArea 1211 # self.rulesOfTheSea = rulesOfTheSea 1212 # 1213 # def composeAction(self, agentWorldModel): 1214 # """ 1215 # Implements the abstract method, see documentation 1216 # of the abstract method. 1217 # """ 1218 # self.time += 1 1219 # 1220 # if self.navigationTactic and self.navigationTactic.done(agentWorldModel): 1221 # self.recordStatistics() 1222 # 1223 # self.setNextPointAndNavigationTacticIfNeeded(agentWorldModel) 1224 # 1225 # return self.navigationTactic.composeAction(agentWorldModel) 1226 # 1227 # def setNextPointAndNavigationTacticIfNeeded(self, agentWorldModel): 1228 # if self.navigationTactic == None or self.navigationTactic.done(agentWorldModel): 1229 # # i.e. first time we enter this function 1230 # goalPos = self.getRandomGoal(self.patrolArea) 1231 # self.setNavigationTactic(agentWorldModel, goalPos) 1232 # 1233 # def setNavigationTactic(self, agentWorldModel, nextGoal): 1234 # estimatedState = agentWorldModel.getEstimatedExtShipState() 1235 # # why round? because this value goes deep into 1236 # # rrtTree.findClosestNode and floating point arithmetic 1237 # # significantly slows it down! 1238 # estimatedPos = (int(round(estimatedState.x)), int(round(estimatedState.y))) 1239 # 1240 # if obstacleAvoidance.pathBlocked(estimatedPos, nextGoal, agentWorldModel.getEstimatedObstaclesList()): 1241 # self.navigationTactic = AgentRRTTactic( self.agentIndex, 1242 # estimatedPos, 1243 # nextGoal, 1244 # agentWorldModel.getEstimatedObstaclesList(), 1245 # self.rulesOfTheSea) 1246 # else: 1247 # self.navigationTactic = AgentPIDTactic(self.agentIndex, 1248 # nextGoal, 1249 # self.rulesOfTheSea) 1250 # 1251 # def recordStatistics(self): 1252 # # record results 1253 # if not self.firstVisit: 1254 # prev = self.myPatrolPath[self.nextTaskPointIndex - 1] 1255 # p = self.myPatrolPath[self.nextTaskPointIndex] 1256 # 1257 # # create an outgoing message 1258 # self.transmit( 1259 # PrimitiveMessage(PrimitiveMessage.TO_SIMULATOR, 1260 # PrimitiveMessage.TYPE_VISIT, 1261 # p) 1262 # ) 1263 # 1264 # currentEdge = (prev, p) 1265 # edgeLength = self.time - self.prevPointTime 1266 # self.transmit( 1267 # PrimitiveMessage(PrimitiveMessage.TO_SIMULATOR, 1268 # PrimitiveMessage.TYPE_EDGE_LENGTH, 1269 # (currentEdge, edgeLength)) 1270 # ) 1271 # 1272 # self.firstVisit = False 1273 # self.prevPointTime = self.time 1274 # 1275 # def getRandomGoal(self, patrolArea): 1276 # p1, p2 = patrolArea 1277 # x1, y1 = p1 1278 # x2, y2 = p2 1279 # goalX = random.randint(x1, x2) 1280 # goalY = random.randint(y1, y2) 1281 # return (goalX, goalY) 1282 # 1283 1284 1285 #class AgentCirclingStrategy(AgentStrategy): 1286 # """ 1287 # NOTE: this class is not in use. 1288 # Implements a regular circling agent strategy 1289 # """ 1290 # 1291 # def __init__(self, speed): 1292 # AgentStrategy.__init__(self) 1293 # 1294 # self.speed = speed 1295 # 1296 # def composeAction(self, agentWorldModel): 1297 # action = CompositeAction() 1298 # action.appendAction(PrimitiveAction("setEngineSpeed", [self.speed])) 1299 # action.appendAction(PrimitiveAction("setSteering", [-1])) 1300 # return action 1301 # 1302 # 1303 #class CompleteStateHolonomicRRTSteeringEngineStrategy(AgentStrategy): 1304 # """ 1305 # NOTE: this class is out-of-date and should be updated for the 1306 # current environment and motion model (or actually to convert it into 1307 # non-holonomic, meaning to incorporate the sea physics constraints 1308 # into the RRT planner) 1309 # 1310 # A strategy for an RRT agent, where the world model is 1311 # a complete state and the set of legal actions are steering and 1312 # engine speed. RRT is holonomic (i.e. assumes the agent has direct 1313 # control on the ship position), with no colision detection here. 1314 # The logical flow is: 1315 # complete-world-state model in the agent --> RRT --> Steering&Engine commands 1316 # """ 1317 # def __init__(self, agentIndex, initialState, goalPos): 1318 # AgentStrategy.__init__(self) 1319 # 1320 # self.agentIndex = agentIndex 1321 # self.goalPos = goalPos 1322 # self.actionIndex = 0 1323 # 1324 # 1325 # 1326 # 1327 # # TODO: Some hard-coded constants that should probably be sent as params 1328 # 1329 # # Threshold to determine whether at goal 1330 # self.angleThreshold = 10 1331 # self.distanceThreshold = 10 1332 # 1333 # # borders for sampling 1334 # self.minX = 0 1335 # self.maxX = 1200 1336 # self.minY = 0 1337 # self.maxY = 800 1338 # self.minAng = 0 1339 # self.maxAng = 360 1340 # 1341 # # step-fraction towards a new sampled state 1342 # self.epsilon = 0.1 1343 # 1344 # 1345 # 1346 # 1347 # # Compute the RRT path 1348 # self.path = self.rrtPath(initialState, goalPos) 1349 # 1350 # 1351 # 1352 # def composeAction(self, agentWorldModel): 1353 # "Implemeting the AgentStrategy interface." 1354 # # At the end, do the same action (which should be stop) 1355 # if self.actionIndex >= len(self.path): 1356 # return self.path[-1] 1357 # 1358 # action = self.path[self.actionIndex] 1359 # self.actionIndex += 1 1360 # return action 1361 # 1362 # 1363 # def rrtPath(self, initialState, goalPos): 1364 # # create initial node 1365 # initialExternalState = copy.copy(initialState.shipExternalStates[self.agentIndex]) 1366 # 1367 # # init a tree (TODO: change to realistic distanceUnit and angleUnit?) 1368 # self.tree = rrtTree.RRTTreeXYOrientationEuclidDist(initialExternalState, 1369 # distanceUnit=10, 1370 # angleUnit=5) 1371 # 1372 # # search 1373 # currentState = initialExternalState 1374 # while not self.isGoalState(currentState): 1375 # currentState = self.extend(self.tree, self.chooseRandomState()) 1376 # 1377 # return self.tree.getPathTo(currentState) 1378 # 1379 # 1380 # def extend(self, tree, randomState): 1381 # """ 1382 # The EXTEND function from the paper. 1383 # Here, in this simple version of the algorithms, we just 1384 # take an epsilon step towards the randomState, without checking 1385 # the all intermediate states are collision-free. 1386 # We also assume holonomic problem in which the computation 1387 # of the edge (the action that takes us to the new state) 1388 # is just a simple vector calculation. 1389 # 1390 # 1391 # Keyword arguments: 1392 # randomState -- A state that was randomly sampled in space, 1393 # towards which we take an epsilon step. 1394 # 1395 # Returns: 1396 # The new state that was added to the tree. 1397 # 1398 # """ 1399 # closestNode = tree.findClosestNode(randomState) 1400 # closestState = closestNode.state 1401 # 1402 # # Take an epsilon step towards randomState 1403 # # TODO: need to normalize attributes with variance 1404 # closestStateX = closestState.x 1405 # closestStateY = closestState.y 1406 # closestStateOrient = closestState.orientation 1407 # newX = (randomState.x - closestStateX) * self.epsilon + closestStateX 1408 # newY = (randomState.y - closestStateY) * self.epsilon + closestStateY 1409 # newOrient = (randomState.orientation - closestStateOrient) * self.epsilon + closestStateOrient 1410 # 1411 # # Compute the edge - the action to take in order to get to this state 1412 # # TODO: check that the path to this state is legal 1413 # xDiff = newX - closestStateX 1414 # yDiff = newY - closestStateY 1415 # 1416 # if closestNode.parent != None: 1417 # oldOrientation = degrees( atan2(closestStateY - closestNode.parent.state.y, 1418 # closestStateX - closestNode.parent.state.x) ) 1419 # else: 1420 # oldOrientation = closestStateOrient 1421 # # Turn from the old direction to the new direction 1422 # steeringToApply = degrees( atan2(yDiff, xDiff) ) - oldOrientation 1423 # 1424 # speedToApply = sqrt( xDiff * xDiff + yDiff * yDiff ) 1425 # action = CompositeAction() 1426 # action.appendAction(PrimitiveAction("setEngineSpeed", [speedToApply])) 1427 # action.appendAction(PrimitiveAction("setSteering", [steeringToApply])) 1428 # 1429 # # Add the node to the tree 1430 # newState = state.ShipExternalState(newX, newY, newOrient, speedToApply, 0) 1431 # parent = closestNode 1432 # edge = action 1433 # newNode = rrtTree.RRTNode(newState, parent, edge) 1434 # tree.addNode(newNode) 1435 # 1436 # return newState 1437 # 1438 # def isGoalState(self,shipState): 1439 # "Is the ship within some threshold from the goal." 1440 # 1441 # point1 = (shipState.x, shipState.y) 1442 # point2 = (self.goalPos.x, self.goalPos.y) 1443 # distance = geometry.distance2D(point1, point2) 1444 # if distance > self.distanceThreshold: 1445 # return False 1446 # 1447 # # TODO: fix to deal with angles in a circular way, 1448 # # e.g. 359 and 1 are at distance 2 and not 358 1449 # angle1 = shipState.orientation 1450 # angle2 = self.goalPos.orientation 1451 # if abs(angle2-angle1) > self.angleThreshold: 1452 # return False 1453 # 1454 # return True 1455 # 1456 # 1457 # def chooseRandomState(self): 1458 # "Samples a random state with a bias towards the goal." 1459 # if random.random() < 0.05: 1460 # return self.goalPos 1461 # else: 1462 # return state.ShipExternalState(random.randint(self.minX, self.maxX), 1463 # random.randint(self.minY, self.maxY), 1464 # random.randint(self.minAng, self.maxAng), 1465 # speed = -1, angularSpeed = -1) # speed should be ignored 1466 # 1467 # 1468 #class MeasureEdgeLengths(AgentStrategy): 1469 # """ 1470 # TODO: this class is for the regression learning 1471 # currently not useful yet. 1472 # need to be made more generic when it becomes relevant. 1473 # A class that wraps some AgentStrategy, and adds 1474 # some measurements, to get experiment data. 1475 # The measurements it adds are the number of time steps it 1476 # took to reach the goal. 1477 # """ 1478 # # TODO: this class is for the regression learning - need to be 1479 # # made more generic when it becomes relevant. 1480 # def __init__(self, agentIndex, goalPos, agentStrategy): 1481 # AgentStrategy.__init__(self) 1482 # 1483 # self.shipIndex = agentIndex 1484 # self.goalPos = goalPos 1485 # self.agentStrategy = agentStrategy 1486 # # Time counter 1487 # self.time = 0 1488 # # filename to append results 1489 # self.filename = "regression" + str(agentIndex) 1490 # # boolean - to write env data only in the first time 1491 # self.writtenEnvData = False 1492 # 1493 # def composeAction(self, agentWorldModel): 1494 # # Increase time counter 1495 # self.time += 1 1496 # 1497 # estimatedState = agentWorldModel.getEstimatedExtShipState(self.shipIndex) 1498 # estimatedPos = (estimatedState.x, estimatedState.y) 1499 # 1500 # 1501 # # For the first time write the conditions to params files 1502 # # TODO: hack, we only measure the environment at the beginning 1503 # if not self.writtenEnvData: 1504 # shipOrient = geometry.orientation2D(estimatedPos, self.goalPos) 1505 # 1506 # windVector = agentWorldModel.getEstimatedWindVector(self.shipIndex) 1507 # windSpeed = geometry.distance2D((0,0), windVector) 1508 # windOrient = geometry.orientation2D((0,0), windVector) 1509 # print 'windOrient', windOrient 1510 # windRelOrient = geometry.computeShortestTurn(shipOrient, windOrient) 1511 # 1512 # currentsVector = agentWorldModel.getEstimatedWaterVector(self.shipIndex) 1513 # waterSpeed = geometry.distance2D((0,0), currentsVector) 1514 # currentOrient = geometry.orientation2D((0,0), currentsVector) 1515 # print 'currentOrient', currentOrient 1516 # waterRelOrient = geometry.computeShortestTurn(shipOrient, currentOrient) 1517 # 1518 # xFilename = self.filename + ".x" 1519 # if not os.path.exists(xFilename): 1520 # f = open(xFilename, 'w') 1521 # f.write('[') 1522 # else: 1523 # f = open(xFilename, 'r+') 1524 # f.seek(-1, 2) 1525 # f.write(', ') 1526 # f.write('(%.2f, %.2f, %.2f, %.2f)]' % ( windSpeed, 1527 # windRelOrient, 1528 # waterSpeed, 1529 # waterRelOrient)) 1530 # f.close() 1531 # self.writtenEnvData = True 1532 # 1533 # 1534 # # If at goal - stop 1535 # distanceToGoal = geometry.distance2D(estimatedPos, self.goalPos) 1536 # if distanceToGoal <= 20: 1537 # yFilename = self.filename + ".y" 1538 # if not os.path.exists(yFilename): 1539 # f = open(yFilename, 'w') 1540 # f.write('[') 1541 # else: 1542 # f = open(yFilename, 'r+') 1543 # f.seek(-1, 2) 1544 # f.write(', ') 1545 # f.write(str(self.time) + ']') 1546 # f.close() 1547 # sys.exit() 1548 # else: 1549 # return self.agentStrategy.composeAction(agentWorldModel) 1550 1551 1552 1553 1554 1555 1556 1557 1558 ######################################################################## 1559 # The Agent class, encapsulates all the agent's functionality 1560 ########################################################################
1561 -class Agent:
1562 """ 1563 This is a class for an agent that controls a ship. 1564 An agent has two main components: it's world model, 1565 and its decision-making strategy. 1566 """
1567 - def __init__(self, worldModel, strategy):
1568 self.worldModel = worldModel 1569 self.strategy = strategy
1570
1571 - def getAction(self, percepts):
1572 """ 1573 A template method. 1574 Choose an action based on current percepts 1575 1576 @type percepts: PerceptList 1577 @param percepts: A list of percepts sent to the agent for this decision cycle 1578 1579 @rtype: CompositeAction 1580 @return: A list of actions to be executed on the ship 1581 """ 1582 self.worldModel.processPercepts(percepts) 1583 action = self.strategy.composeAction(self.worldModel) 1584 self.worldModel.cleanMsgQueue() # clean msgs, even if not processed 1585 return action
1586
1587 - def receiveMsg(self, msg):
1588 """ 1589 msg is received by the agent using this function 1590 1591 @type msg: PrimitiveMessage 1592 @param msg: a primitive message sent to the agent. 1593 """ 1594 # TODO: receiving messages should be part of the 1595 # world model and not of the strategy - it seems like? 1596 return self.worldModel.receiveMsg(msg)
1597
1598 - def getOutgoingMessage(self):
1599 """ 1600 Returns any message that strategy created 1601 1602 @rtype: CompositeMessage 1603 @return: The agents' outgoing message for this decision cycle 1604 """ 1605 return self.strategy.getOutgoingMessage()
1606