1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 import copy
40 import random
41 import os
42 import sys
43 import operator
44 from math import *
45
46 from vec2d import *
47 from lineline import *
48
49 import geometry
50 import rrtTree
51 import perceptModels
52 from actions import *
53 import state
54 from messages import PrimitiveMessage, CompositeMessage
55 import obstacleAvoidance
56 from navigationTactics import *
57 from scripts import heuristicDivide
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
89 """
90 This class is a base class for all the models an agent
91 can have about the world.
92 A subclass should implement a specific model of the world,
93 and a specific way to process percepts for this model.
94 There can be multiple ways to process percepts for any
95 given model, so an example class hierarchy could be:
96
97 -------------------
98 | AgentWorldModel |
99 -------------------
100 / \
101 ---------- ----------
102 | Model1 | | Model2 |
103 ---------- ----------
104 / \ \
105 ------------------- ------------------ .
106 |with method 1 to | |with method 2 to| .
107 |processPercepts | |processPercepts | .
108 |for Model1 | |for Model1 |
109 ------------------- ------------------
110 """
112 """
113 Every world model contains a queue for incoming messages.
114 Note that outgoing msgs are inside strategy.
115 TODO: does this design need change?
116 """
117 self.incomingMsgs = CompositeMessage()
118
119
121 """
122 For a given concrete model, this method
123 implements a specific method of updating this
124 model from percepts.
125 This is just an extra layer of flexibility, in case
126 there are multiple methods to update percepts for the
127 same model.
128
129 @type perceptsList: PerceptList
130 @param perceptsList: A list of percepts to be processed
131 """
132 abstract
133
135 """
136 Get a list of estimated ShipExternalState of all other ships
137 in the system.
138
139 @type selfIndex: int
140 @param selfIndex: The index of the ship controlled by the agent - we don't want to include it in the returned states.
141
142 @rtype: A list of ShipExternalState
143 @return: A list of estimated states of all other ships in the system
144 """
145 abstract
146
148 """
149 Get the estimated ShipExternalState as estimated
150 by the world model.
151
152 @rtype: ShipExternalState
153 @return: An estimation of the ship's real state according to our AgentWorldModel.
154 """
155 abstract
156
158 """
159 Get the estimated wind vector in the location of
160 the ship.
161
162 @rtype: numpy array (vector)
163 @return: an estimation of the wind vector (speed and direction) in the agent's location according to our AgentWorldModel.
164 """
165 abstract
166
168 """
169 Get the estimated water vector in the location of
170 the ship.
171
172 @rtype: numpy array (vector)
173 @return: an estimation of the water vector (speed and direction) in the agent's location according to our AgentWorldModel.
174 """
175 abstract
176
178 """
179 Get a list of estimated obstacle shapes.
180
181 @rtype: Obstacles
182 @return: an estimation of the obstacles shapes (and positions) according to our AgentWorldModel.
183 """
184 abstract
185
186
187
190
192 """
193 Append a message to the queue
194 """
195 self.incomingMsgs.appendMsg(msg)
196
197 if len(self.incomingMsgs.primitiveMsgs) > 100:
198 raise Exception("Did you forget to read and clean messages")
199
200
201
202
204 """
205 Just a skeleton for testing purposes.
206 The world model is exactly the world state.
207 """
212
213
223
225 """
226 Implements the abstract method
227 """
228 states = self.state.shipExternalStates
229 return states[0:selfIndex] + states[selfIndex + 1:]
230
232 """
233 Implements the abstract method
234 """
235 return self.state.shipExternalStates[self.shipIndex]
236
238 """
239 Implements the abstract method
240 """
241 shipState = self.state.shipExternalStates[self.shipIndex]
242 posX, posY = shipState.x, shipState.y
243 return self.state.sea.wind.getSpeedVectorInLocation(posX, posY)
244
246 """
247 Implements the abstract method
248 """
249 shipState = self.state.shipExternalStates[self.shipIndex]
250 posX, posY = shipState.x, shipState.y
251 return self.state.sea.waterCurrents.getSpeedVectorInLocation(posX, posY)
252
254 """
255 Implements the abstract method
256 """
257 return self.state.sea.obstacles.getObstaclesList()
258
259
260
261
262
263
264
265
266
268 """
269 An interface for specific decision making strategy.
270 It should be defined for a given AgentWorldModel,
271 a given set of legal actions, and a given decision making algorithm.
272
273 For each step, the flow is:
274 AgentWorldModel --> Specific_AgentStrategy --> Actions-to-execute
275 """
280
282 "Given the (inferred) state of the world, decide on a set of actions."
283 abstract
284
286 msg = self.outgoingMsg
287
288 if len(msg.primitiveMsgs) > 0:
289 self.outgoingMsg = CompositeMessage()
290 return msg
291
294
295
296
298 """
299 Implements a basic patrol strategy.
300 Given points of interest, just travel
301 along the cycle they define
302 """
303 - def __init__(self, agentIndex, taskPath, rulesOfTheSea):
304 """
305 Initializes the strategy.
306
307 @type agentIndex: int
308 @param agentIndex: sort of an agent-id - an agent's index in the array of all agents
309
310 @type path: a list of 2-tuples
311 @param path: a list of (x,y) points that define a cycle for the agent to travel along
312
313 @type rulesOfTheSea: boolean
314 @param rulesOfTheSea: Tells whether to respect the rules of the sea
315 """
316 AgentStrategy.__init__(self)
317
318 self.agentIndex = agentIndex
319 self.taskPath = taskPath
320 self.rulesOfTheSea = rulesOfTheSea
321 self.nextTaskPointIndex = 0
322
323 self.navigationTactic = None
324
325 self.time = 0
326 self.prevPointTime = 0
327 self.firstVisit = True
328
330 """
331 Implements the abstract method, see documentation
332 of the abstract method.
333 """
334
335
336
337
338
339 self.time += 1
340
341 if self.navigationTactic == None:
342
343 goalPos = self.taskPath[self.nextTaskPointIndex]
344 self.setNavigationTactic(agentWorldModel, goalPos)
345
346 elif self.navigationTactic.done(agentWorldModel):
347 self.recordStatistics()
348 self.nextTaskPointIndex = (self.nextTaskPointIndex + 1) % len(self.taskPath)
349 goalPos = self.taskPath[self.nextTaskPointIndex]
350 self.setNavigationTactic(agentWorldModel, goalPos)
351
352 return self.navigationTactic.composeAction(agentWorldModel)
353
354
372
373
376
377
401
402
404 """
405 Implements a coordinated patrol strategy.
406 Given points of interest, each agent
407 runs heuristicDivide(), assign ships
408 to locations, and transmits it. Agents
409 vote and decide on suggestion, move to
410 their initial locations, and start
411 patroling.
412 """
413
414
415 TRANSMIT_INITIAL_LOCATION = 1
416 COMPUTE_PATROL_ASSIGNMENTS_AND_TRANSMIT = 2
417 VOTE = 3
418 MOVING_TO_START = 4
419 PATROLING = 5
420
421
422 - def __init__(self, agentIndex, patrolPoints, edgeLengths, rulesOfTheSea):
449
450
451
452
453
454
456 """
457 Resets all coordinated-patrol related data members
458 """
459 self.missionState = self.TRANSMIT_INITIAL_LOCATION
460 self.shipPositions = {}
461 self.ship2patrolData = {}
462 self.ship2patrolDataSuggestions = []
463 self.reachedStartPoint = set()
464
465 self.myStartingPatrolPosition = None
466 self.myPatrolPath = []
467
468 self.navigationTactic = None
469 self.patrolStrategy = None
470
471
473
474 self.processIncomingMsgs(agentWorldModel)
475
476
477 returnedAction = CompositeAction()
478 returnedAction.appendAction(PrimitiveAction("stop", []))
479
480 if self.missionState == self.TRANSMIT_INITIAL_LOCATION:
481 myState = agentWorldModel.getEstimatedExtShipState()
482 self.transmit(
483 PrimitiveMessage(PrimitiveMessage.TO_ALL,
484 PrimitiveMessage.TYPE_POSITION,
485 (agentWorldModel.shipIndex, myState.x, myState.y))
486 )
487
488 self.missionState = self.COMPUTE_PATROL_ASSIGNMENTS_AND_TRANSMIT
489
490
491 elif self.missionState == self.COMPUTE_PATROL_ASSIGNMENTS_AND_TRANSMIT:
492 numShips = len(self.shipPositions)
493 patrolPoints, paths, patrolStartingStates = heuristicDivide.run(numShips,
494 self.patrolPoints, self.edgeLengths, debug=False)
495 state2ship = self.assignShipsToStates(self.shipPositions, patrolStartingStates)
496
497 self.ship2patrolData = {}
498 for i, start_and_path in enumerate(zip(patrolStartingStates, paths)):
499 start, path = start_and_path
500 ship = state2ship[i]
501 self.ship2patrolData[ship] = (start, path)
502 msgContent = self.ship2patrolData
503 self.transmit(
504 PrimitiveMessage(PrimitiveMessage.TO_ALL,
505 PrimitiveMessage.TYPE_PATROL_DIVISION,
506 msgContent)
507 )
508
509 self.missionState = self.VOTE
510
511
512 elif self.missionState == self.VOTE:
513 for suggestion in self.ship2patrolDataSuggestions:
514 if not suggestion == self.ship2patrolData:
515 print 'suggestion', suggestion
516 print 'mysuggestion', self.ship2patrolData
517 raise Exception("vote failed - need to be implemented")
518
519 self.myStartingPatrolPosition, self.myPatrolPath = self.ship2patrolData[self.agentIndex]
520 self.missionState = self.MOVING_TO_START
521
522
523 elif self.missionState == self.MOVING_TO_START:
524 if self.navigationTactic == None:
525 pos = self.myStartingPatrolPosition
526 self.navigationTactic = AgentPIDTactic(self.agentIndex,
527 (pos.x, pos.y),
528 self.rulesOfTheSea)
529
530 if self.navigationTactic.done(agentWorldModel):
531 msgContent = self.agentIndex
532 self.transmit(
533 PrimitiveMessage(PrimitiveMessage.TO_ALL,
534 PrimitiveMessage.TYPE_REACHED_START,
535 msgContent)
536 )
537 returnedAction = self.navigationTactic.composeAction(agentWorldModel)
538 if self.allReachedStartingPoints():
539 print 'allReachedStartingPoints'
540 self.missionState = self.PATROLING
541
542 elif self.missionState == self.PATROLING:
543 if self.patrolStrategy == None:
544 self.patrolStrategy = AgentStaticPatrolStrategy(self.agentIndex,
545 self.myPatrolPath,
546 self.rulesOfTheSea)
547
548
549 returnedAction = self.patrolStrategy.composeAction(agentWorldModel)
550 for m in self.patrolStrategy.getOutgoingMessage().primitiveMsgs: self.transmit(m)
551
552 return returnedAction
553
555 """
556 Reads incoming msgs from the agentWorldModel
557 and clean its queue
558
559 @type agentWorldModel: AgentWorldModel
560 @param agentWorldModel: the world model, which contain msgs.
561 """
562 for msg in agentWorldModel.incomingMsgs.primitiveMsgs:
563
564 if msg.to != PrimitiveMessage.TO_ALL:
565 raise Exception("Message doesn't seem to be directed at me")
566
567 getattr(self, self.msgCallbacks[msg.type])(msg.content)
568
570 """
571 Implements a matching algorithm
572
573 @type shipPositions: map[shipIndex] = (x,y) position
574 @param shipPositions: map of the transmitted positions of ships
575
576 @type patrolStartingStates: array of [shipExternalState, ... ] for all ships
577 @param patrolStartingStates: the starting states computed by heuristicDivide
578
579 @rtype: map[shipIndex] = shipStartingState
580 @return: mapping of each ship to its patrol starting state
581 """
582 numShips = len(shipPositions)
583 if not numShips == len(patrolStartingStates):
584 raise Exception("number of ships must be equal to num patrol start points")
585
586
587 edges = []
588 for shipIndex, position in shipPositions.items():
589 distances = [geometry.distance2D(position, (state.x, state.y)) for state in patrolStartingStates]
590 edges += [(shipIndex, i, dist) for i, dist in enumerate(distances)]
591 edges = sorted(edges, key=operator.itemgetter(2), reverse=True)
592
593
594
595 shipNumEdges = {}
596 posNumEdges = {}
597 numOutgoingEdges = numShips
598 for i in range(numOutgoingEdges):
599 shipNumEdges[i] = numOutgoingEdges
600 posNumEdges[i] = numOutgoingEdges
601
602 match = []
603 matchedShips = set()
604 matchedPositions = set()
605
606
607
608
609
610 while len(match) < numShips:
611 deletedShips = set()
612 deletedPositions = set()
613 for edge in edges:
614 shipIndex, posIndex, dist = edge
615
616 if shipIndex in deletedShips or posIndex in deletedPositions:
617 continue
618
619 if shipNumEdges[shipIndex] == 1 or posNumEdges[posIndex] == 1:
620 match.append(edge)
621 deletedShips.add(shipIndex)
622 deletedPositions.add(posIndex)
623 matchedShips.add(shipIndex)
624 matchedPositions.add(posIndex)
625
626 shipNumEdges[shipIndex] -= 1
627 posNumEdges[posIndex] -= 1
628
629
630 newEdges = []
631 for edge in edges:
632 shipIndex, posIndex, dist = edge
633 if shipIndex in matchedShips or posIndex in matchedPositions:
634 continue
635 else:
636 newEdges.append(edge)
637 edges = newEdges
638
639
640
641 state2Ship = {}
642 for edge in match:
643 shipIndex, posIndex, dist = edge
644 state2Ship[posIndex] = shipIndex
645
646 return state2Ship
647
649 numShips = len(self.shipPositions)
650 if len(self.reachedStartPoint) == numShips:
651 return True
652 else:
653 return False
654
656 return self.myPatrolPath
657
658
660 """
661 Processes a msg that notifies ships to start patrol
662
663 @type msgContent: None
664 @param msgContent: no additional information currently needed
665 """
666 self.reset()
667
669 """
670 Processes a msg that notifies other ship's position
671
672 @type msgContent: a 3-tuple (shipIndex, x, y)
673 @param msgContent: the position of the sending ship
674 """
675 shipIndex, x, y = msgContent
676 self.shipPositions[shipIndex] = (x,y)
677
679 """
680 Processes a msg that notifies other ship's patrol suggestion
681
682 @type msgContent: map[int] = (pos, list-of-positions)
683 @param msgContent: for each ship, a starting position and a path
684 """
685 self.ship2patrolDataSuggestions.append(msgContent)
686
688 """
689 Processes a msg that notifies some ship reached its start pos
690
691 @type msgContent: int
692 @param msgContent: an index of an agent that reached start
693 """
694 self.reachedStartPoint.add(msgContent)
695
696
697
699 """
700 Implements an agent that waits for some time,
701 then joins a coordinated patrol - this is why it is a coordinated
702 patrol
703 """
704
705 - def __init__(self, agentIndex, patrolPoints, edgeLengths, rulesOfTheSea):
706 """
707 Initializes the strategy.
708
709 @type agentIndex: int
710 @param agentIndex: sort of an agent-id - an agent's index in the array of all agents
711
712 @type agentIndex: string
713 @param agentIndex: the name of the file containing edges and nodes from measurements data
714
715 @type rulesOfTheSea: boolean
716 @param rulesOfTheSea: Tells whether to respect the rules of the sea
717 """
718 AgentCoordinatedPatrolStrategy.__init__(self, agentIndex, patrolPoints, edgeLengths, rulesOfTheSea)
719
720 self.waitingTime = 1000
721 self.currentTime = 0
722
754
755
756 '''
757 class AgentCirclingStrategy(AgentStrategy):
758 """
759 NOTE: this class is not in use.
760 Implements a regular circling agent strategy
761 """
762
763 def __init__(self, speed):
764 AgentStrategy.__init__(self)
765
766 self.speed = speed
767
768 def composeAction(self, agentWorldModel):
769 action = CompositeAction()
770 action.appendAction(PrimitiveAction("setEngineSpeed", [self.speed]))
771 action.appendAction(PrimitiveAction("setSteering", [-1]))
772 return action
773
774
775 class CompleteStateHolonomicRRTSteeringEngineStrategy(AgentStrategy):
776 """
777 NOTE: this class is out-of-date and should be updated for the
778 current environment and motion model (or actually to convert it into
779 non-holonomic, meaning to incorporate the sea physics constraints
780 into the RRT planner)
781
782 A strategy for an RRT agent, where the world model is
783 a complete state and the set of legal actions are steering and
784 engine speed. RRT is holonomic (i.e. assumes the agent has direct
785 control on the ship position), with no colision detection here.
786 The logical flow is:
787 complete-world-state model in the agent --> RRT --> Steering&Engine commands
788 """
789 def __init__(self, agentIndex, initialState, goalPos):
790 AgentStrategy.__init__(self)
791
792 self.agentIndex = agentIndex
793 self.goalPos = goalPos
794 self.actionIndex = 0
795
796
797
798
799 # TODO: Some hard-coded constants that should probably be sent as params
800
801 # Threshold to determine whether at goal
802 self.angleThreshold = 10
803 self.distanceThreshold = 10
804
805 # borders for sampling
806 self.minX = 0
807 self.maxX = 1200
808 self.minY = 0
809 self.maxY = 800
810 self.minAng = 0
811 self.maxAng = 360
812
813 # step-fraction towards a new sampled state
814 self.epsilon = 0.1
815
816
817
818
819 # Compute the RRT path
820 self.path = self.rrtPath(initialState, goalPos)
821
822
823
824 def composeAction(self, agentWorldModel):
825 "Implemeting the AgentStrategy interface."
826 # At the end, do the same action (which should be stop)
827 if self.actionIndex >= len(self.path):
828 return self.path[-1]
829
830 action = self.path[self.actionIndex]
831 self.actionIndex += 1
832 return action
833
834
835 def rrtPath(self, initialState, goalPos):
836 # create initial node
837 initialExternalState = copy.copy(initialState.shipExternalStates[self.agentIndex])
838
839 # init a tree (TODO: change to realistic distanceUnit and angleUnit?)
840 self.tree = rrtTree.RRTTreeXYOrientationEuclidDist(initialExternalState,
841 distanceUnit=10,
842 angleUnit=5)
843
844 # search
845 currentState = initialExternalState
846 while not self.isGoalState(currentState):
847 currentState = self.extend(self.tree, self.chooseRandomState())
848
849 return self.tree.getPathTo(currentState)
850
851
852 def extend(self, tree, randomState):
853 """
854 The EXTEND function from the paper.
855 Here, in this simple version of the algorithms, we just
856 take an epsilon step towards the randomState, without checking
857 the all intermediate states are collision-free.
858 We also assume holonomic problem in which the computation
859 of the edge (the action that takes us to the new state)
860 is just a simple vector calculation.
861
862
863 Keyword arguments:
864 randomState -- A state that was randomly sampled in space,
865 towards which we take an epsilon step.
866
867 Returns:
868 The new state that was added to the tree.
869
870 """
871 closestNode = tree.findClosestNode(randomState)
872 closestState = closestNode.state
873
874 # Take an epsilon step towards randomState
875 # TODO: need to normalize attributes with variance
876 closestStateX = closestState.x
877 closestStateY = closestState.y
878 closestStateOrient = closestState.orientation
879 newX = (randomState.x - closestStateX) * self.epsilon + closestStateX
880 newY = (randomState.y - closestStateY) * self.epsilon + closestStateY
881 newOrient = (randomState.orientation - closestStateOrient) * self.epsilon + closestStateOrient
882
883 # Compute the edge - the action to take in order to get to this state
884 # TODO: check that the path to this state is legal
885 xDiff = newX - closestStateX
886 yDiff = newY - closestStateY
887
888 if closestNode.parent != None:
889 oldOrientation = degrees( atan2(closestStateY - closestNode.parent.state.y,
890 closestStateX - closestNode.parent.state.x) )
891 else:
892 oldOrientation = closestStateOrient
893 # Turn from the old direction to the new direction
894 steeringToApply = degrees( atan2(yDiff, xDiff) ) - oldOrientation
895
896 speedToApply = sqrt( xDiff * xDiff + yDiff * yDiff )
897 action = CompositeAction()
898 action.appendAction(PrimitiveAction("setEngineSpeed", [speedToApply]))
899 action.appendAction(PrimitiveAction("setSteering", [steeringToApply]))
900
901 # Add the node to the tree
902 newState = state.ShipExternalState(newX, newY, newOrient, speedToApply, 0)
903 parent = closestNode
904 edge = action
905 newNode = rrtTree.RRTNode(newState, parent, edge)
906 tree.addNode(newNode)
907
908 return newState
909
910 def isGoalState(self,shipState):
911 "Is the ship within some threshold from the goal."
912
913 point1 = (shipState.x, shipState.y)
914 point2 = (self.goalPos.x, self.goalPos.y)
915 distance = geometry.distance2D(point1, point2)
916 if distance > self.distanceThreshold:
917 return False
918
919 # TODO: fix to deal with angles in a circular way,
920 # e.g. 359 and 1 are at distance 2 and not 358
921 angle1 = shipState.orientation
922 angle2 = self.goalPos.orientation
923 if abs(angle2-angle1) > self.angleThreshold:
924 return False
925
926 return True
927
928
929 def chooseRandomState(self):
930 "Samples a random state with a bias towards the goal."
931 if random.random() < 0.05:
932 return self.goalPos
933 else:
934 return state.ShipExternalState(random.randint(self.minX, self.maxX),
935 random.randint(self.minY, self.maxY),
936 random.randint(self.minAng, self.maxAng),
937 speed = -1, angularSpeed = -1) # speed should be ignored
938
939
940 class MeasureEdgeLengths(AgentStrategy):
941 """
942 TODO: THIS CLASS IS FOR THE REGRESSION LEARNING
943 CURRENTLY NOT USEFUL YET.
944 NEED TO BE MADE MORE GENERIC WHEN IT BECOMES RELEVANT.
945 A class that wraps some AgentStrategy, and adds
946 some measurements, to get experiment data.
947 The measurements it adds are the number of time steps it
948 took to reach the goal.
949 """
950 # TODO: THIS CLASS IS FOR THE REGRESSION LEARNING - NEED TO BE
951 # MADE MORE GENERIC WHEN IT BECOMES RELEVANT.
952 def __init__(self, agentIndex, goalPos, agentStrategy):
953 AgentStrategy.__init__(self)
954
955 self.shipIndex = agentIndex
956 self.goalPos = goalPos
957 self.agentStrategy = agentStrategy
958 # Time counter
959 self.time = 0
960 # filename to append results
961 self.filename = "regression" + str(agentIndex)
962 # boolean - to write env data only in the first time
963 self.writtenEnvData = False
964
965 def composeAction(self, agentWorldModel):
966 # Increase time counter
967 self.time += 1
968
969 estimatedState = agentWorldModel.getEstimatedExtShipState(self.shipIndex)
970 estimatedPos = (estimatedState.x, estimatedState.y)
971
972
973 # For the first time write the conditions to params files
974 # TODO: hack, we only measure the environment at the beginning
975 if not self.writtenEnvData:
976 shipOrient = geometry.orientation2D(estimatedPos, self.goalPos)
977
978 windVector = agentWorldModel.getEstimatedWindVector(self.shipIndex)
979 windSpeed = geometry.distance2D((0,0), windVector)
980 windOrient = geometry.orientation2D((0,0), windVector)
981 print 'windOrient', windOrient
982 windRelOrient = geometry.computeShortestTurn(shipOrient, windOrient)
983
984 currentsVector = agentWorldModel.getEstimatedWaterVector(self.shipIndex)
985 waterSpeed = geometry.distance2D((0,0), currentsVector)
986 currentOrient = geometry.orientation2D((0,0), currentsVector)
987 print 'currentOrient', currentOrient
988 waterRelOrient = geometry.computeShortestTurn(shipOrient, currentOrient)
989
990 xFilename = self.filename + ".x"
991 if not os.path.exists(xFilename):
992 f = open(xFilename, 'w')
993 f.write('[')
994 else:
995 f = open(xFilename, 'r+')
996 f.seek(-1, 2)
997 f.write(', ')
998 f.write('(%.2f, %.2f, %.2f, %.2f)]' % ( windSpeed,
999 windRelOrient,
1000 waterSpeed,
1001 waterRelOrient))
1002 f.close()
1003 self.writtenEnvData = True
1004
1005
1006 # If at goal - stop
1007 distanceToGoal = geometry.distance2D(estimatedPos, self.goalPos)
1008 if distanceToGoal <= 20:
1009 yFilename = self.filename + ".y"
1010 if not os.path.exists(yFilename):
1011 f = open(yFilename, 'w')
1012 f.write('[')
1013 else:
1014 f = open(yFilename, 'r+')
1015 f.seek(-1, 2)
1016 f.write(', ')
1017 f.write(str(self.time) + ']')
1018 f.close()
1019 sys.exit()
1020 else:
1021 return self.agentStrategy.composeAction(agentWorldModel)
1022 '''
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1034 """
1035 This is a class for an agent that controls a ship.
1036 An agent has two main components: it's world model,
1037 and its decision-making strategy.
1038 """
1039 - def __init__(self, worldModel, strategy):
1040 self.worldModel = worldModel
1041 self.strategy = strategy
1042
1044 """
1045 A template method.
1046 Choose an action based on current percepts
1047
1048 @type percepts: PerceptList
1049 @param percepts: A list of percepts sent to the agent for this decision cycle
1050
1051 @rtype: CompositeAction
1052 @return: A list of actions to be executed on the ship
1053 """
1054 self.worldModel.processPercepts(percepts)
1055 action = self.strategy.composeAction(self.worldModel)
1056 self.worldModel.cleanMsgQueue()
1057 return action
1058
1060 """
1061 msg is received by the agent using this function
1062
1063 @type msg: PrimitiveMessage
1064 @param msg: a primitive message sent to the agent.
1065 """
1066
1067
1068 return self.worldModel.receiveMsg(msg)
1069
1071 """
1072 Returns any message that strategy created
1073
1074 @rtype: CompositeMessage
1075 @return: The agents' outgoing message for this decision cycle
1076 """
1077 return self.strategy.getOutgoingMessage()
1078