1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 import copy
40 import random
41 import os
42 import sys
43 from math import *
44
45 from vec2d import *
46 from lineline import *
47
48 import geometry
49 import rrtTree
50 import perceptModels
51 from actions import *
52 import state
53 import messages
54 import obstacleAvoidance
55 from navigationTactics import *
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
87 """
88 This class is a base class for all the models an agent
89 can have about the world.
90 A subclass should implement a specific model of the world,
91 and a specific way to process percepts for this model.
92 There can be multiple ways to process percepts for any
93 given model, so an example class hierarchy could be:
94
95 -------------------
96 | AgentWorldModel |
97 -------------------
98 / \
99 ---------- ----------
100 | Model1 | | Model2 |
101 ---------- ----------
102 / \ \
103 ------------------- ------------------ .
104 |with method 1 to | |with method 2 to| .
105 |processPercepts | |processPercepts | .
106 |for Model1 | |for Model1 |
107 ------------------- ------------------
108 """
109
111 """
112 For a given concrete model, this method
113 implements a specific method of updating this
114 model from percepts.
115 This is just an extra layer of flexibility, in case
116 there are multiple methods to update percepts for the
117 same model.
118
119 @type perceptsList: PerceptList
120 @param perceptsList: A list of percepts to be processed
121 """
122 abstract
123
125 """
126 Get a list of estimated ShipExternalState of all other ships
127 in the system.
128
129 @type selfIndex: int
130 @param selfIndex: The index of the ship controlled by the agent - we don't want to include it in the returned states.
131
132 @rtype: A list of ShipExternalState
133 @return: A list of estimated states of all other ships in the system
134 """
135 abstract
136
138 """
139 Get the estimated ShipExternalState as estimated
140 by the world model.
141
142 @rtype: ShipExternalState
143 @return: An estimation of the ship's real state according to our AgentWorldModel.
144 """
145 abstract
146
148 """
149 Get the estimated wind vector in the location of
150 the ship.
151
152 @rtype: numpy array (vector)
153 @return: an estimation of the wind vector (speed and direction) in the agent's location according to our AgentWorldModel.
154 """
155 abstract
156
158 """
159 Get the estimated water vector in the location of
160 the ship.
161
162 @rtype: numpy array (vector)
163 @return: an estimation of the water vector (speed and direction) in the agent's location according to our AgentWorldModel.
164 """
165 abstract
166
168 """
169 Get a list of estimated obstacle shapes.
170
171 @rtype: Obstacles
172 @return: an estimation of the obstacles shapes (and positions) according to our AgentWorldModel.
173 """
174 abstract
175
176
177
178
180 """
181 Just a skeleton for testing purposes.
182 The world model is exactly the world state.
183 """
185 self.state = None
186 self.shipIndex = shipIndex
187
188
198
200 """
201 Implements the abstract method
202 """
203 states = self.state.shipExternalStates
204 return states[0:selfIndex] + states[selfIndex + 1:]
205
207 """
208 Implements the abstract method
209 """
210 return self.state.shipExternalStates[self.shipIndex]
211
213 """
214 Implements the abstract method
215 """
216 shipState = self.state.shipExternalStates[self.shipIndex]
217 posX, posY = shipState.x, shipState.y
218 return self.state.sea.wind.getSpeedVectorInLocation(posX, posY)
219
221 """
222 Implements the abstract method
223 """
224 shipState = self.state.shipExternalStates[self.shipIndex]
225 posX, posY = shipState.x, shipState.y
226 return self.state.sea.waterCurrents.getSpeedVectorInLocation(posX, posY)
227
229 """
230 Implements the abstract method
231 """
232 return self.state.sea.obstacles.getObstaclesList()
233
234
235
236
237
238
239
240
241
243 """
244 An interface for specific decision making strategy.
245 It should be defined for a given AgentWorldModel,
246 a given set of legal actions, and a given decision making algorithm.
247
248 For each step, the flow is:
249 AgentWorldModel --> Specific_AgentStrategy --> Actions-to-execute
250 """
255
257 "Given the (inferred) state of the world, decide on a set of actions."
258 abstract
259
261 msg = self.outgoingMsg
262 if len(msg.primitiveMsgs) > 0:
263
264 self.outgoingMsg = messages.CompositeMessage()
265 return msg
266
267
269 """
270 Implements a basic patrol strategy.
271 Given points of interest, just travel
272 along the cycle they define
273 """
274 - def __init__(self, agentIndex, taskPath, rulesOfTheSea):
275 """
276 Initializes the strategy.
277
278 @type agentIndex: int
279 @param agentIndex: sort of an agent-id - an agent's index in the array of all agents
280
281 @type path: a list of 2-tuples
282 @param path: a list of (x,y) points that define a cycle for the agent to travel along
283
284 @type rulesOfTheSea: boolean
285 @param rulesOfTheSea: Tells whether to respect the rules of the sea
286 """
287 AgentStrategy.__init__(self)
288
289 self.agentIndex = agentIndex
290 self.taskPath = taskPath
291 self.rulesOfTheSea = rulesOfTheSea
292 self.nextTaskPointIndex = 0
293
294 self.navigationTactic = None
295
296 self.time = 0
297 self.prevPointTime = 0
298 self.firstVisit = True
299
301 """
302 Implements the abstract method, see documentation
303 of the abstract method.
304 """
305
306
307
308
309
310 self.time += 1
311
312 if self.navigationTactic == None:
313
314 goalPos = self.taskPath[self.nextTaskPointIndex]
315 self.setNavigationTactic(agentWorldModel, goalPos)
316
317 elif self.navigationTactic.done(agentWorldModel):
318 self.recordStatistics()
319 self.nextTaskPointIndex = (self.nextTaskPointIndex + 1) % len(self.taskPath)
320 goalPos = self.taskPath[self.nextTaskPointIndex]
321 self.setNavigationTactic(agentWorldModel, goalPos)
322
323 return self.navigationTactic.composeAction(agentWorldModel)
324
325
343
344
347
348
350
351 if not self.firstVisit:
352 prev = self.taskPath[self.nextTaskPointIndex - 1]
353 p = self.taskPath[self.nextTaskPointIndex]
354
355
356 self.outgoingMsg.appendMsg(messages.PrimitiveMessage('sim', 'visit', p))
357
358 currentEdge = (prev, p)
359 edgeLength = self.time - self.prevPointTime
360 self.outgoingMsg.appendMsg(messages.PrimitiveMessage('sim', 'edgeLength', (currentEdge, edgeLength)))
361
362 self.firstVisit = False
363 self.prevPointTime = self.time
364
365 '''
366 class AgentCirclingStrategy(AgentStrategy):
367 """
368 NOTE: this class is not in use.
369 Implements a regular circling agent strategy
370 """
371
372 def __init__(self, speed):
373 AgentStrategy.__init__(self)
374
375 self.speed = speed
376
377 def composeAction(self, agentWorldModel):
378 action = CompositeAction()
379 action.appendAction(PrimitiveAction("setEngineSpeed", [self.speed]))
380 action.appendAction(PrimitiveAction("setSteering", [-1]))
381 return action
382
383
384 class CompleteStateHolonomicRRTSteeringEngineStrategy(AgentStrategy):
385 """
386 NOTE: this class is out-of-date and should be updated for the
387 current environment and motion model (or actually to convert it into
388 non-holonomic, meaning to incorporate the sea physics constraints
389 into the RRT planner)
390
391 A strategy for an RRT agent, where the world model is
392 a complete state and the set of legal actions are steering and
393 engine speed. RRT is holonomic (i.e. assumes the agent has direct
394 control on the ship position), with no colision detection here.
395 The logical flow is:
396 complete-world-state model in the agent --> RRT --> Steering&Engine commands
397 """
398 def __init__(self, agentIndex, initialState, goalPos):
399 AgentStrategy.__init__(self)
400
401 self.agentIndex = agentIndex
402 self.goalPos = goalPos
403 self.actionIndex = 0
404
405
406
407
408 # TODO: Some hard-coded constants that should probably be sent as params
409
410 # Threshold to determine whether at goal
411 self.angleThreshold = 10
412 self.distanceThreshold = 10
413
414 # borders for sampling
415 self.minX = 0
416 self.maxX = 1200
417 self.minY = 0
418 self.maxY = 800
419 self.minAng = 0
420 self.maxAng = 360
421
422 # step-fraction towards a new sampled state
423 self.epsilon = 0.1
424
425
426
427
428 # Compute the RRT path
429 self.path = self.rrtPath(initialState, goalPos)
430
431
432
433 def composeAction(self, agentWorldModel):
434 "Implemeting the AgentStrategy interface."
435 # At the end, do the same action (which should be stop)
436 if self.actionIndex >= len(self.path):
437 return self.path[-1]
438
439 action = self.path[self.actionIndex]
440 self.actionIndex += 1
441 return action
442
443
444 def rrtPath(self, initialState, goalPos):
445 # create initial node
446 initialExternalState = copy.copy(initialState.shipExternalStates[self.agentIndex])
447
448 # init a tree (TODO: change to realistic distanceUnit and angleUnit?)
449 self.tree = rrtTree.RRTTreeXYOrientationEuclidDist(initialExternalState,
450 distanceUnit=10,
451 angleUnit=5)
452
453 # search
454 currentState = initialExternalState
455 while not self.isGoalState(currentState):
456 currentState = self.extend(self.tree, self.chooseRandomState())
457
458 return self.tree.getPathTo(currentState)
459
460
461 def extend(self, tree, randomState):
462 """
463 The EXTEND function from the paper.
464 Here, in this simple version of the algorithms, we just
465 take an epsilon step towards the randomState, without checking
466 the all intermediate states are collision-free.
467 We also assume holonomic problem in which the computation
468 of the edge (the action that takes us to the new state)
469 is just a simple vector calculation.
470
471
472 Keyword arguments:
473 randomState -- A state that was randomly sampled in space,
474 towards which we take an epsilon step.
475
476 Returns:
477 The new state that was added to the tree.
478
479 """
480 closestNode = tree.findClosestNode(randomState)
481 closestState = closestNode.state
482
483 # Take an epsilon step towards randomState
484 # TODO: need to normalize attributes with variance
485 closestStateX = closestState.x
486 closestStateY = closestState.y
487 closestStateOrient = closestState.orientation
488 newX = (randomState.x - closestStateX) * self.epsilon + closestStateX
489 newY = (randomState.y - closestStateY) * self.epsilon + closestStateY
490 newOrient = (randomState.orientation - closestStateOrient) * self.epsilon + closestStateOrient
491
492 # Compute the edge - the action to take in order to get to this state
493 # TODO: check that the path to this state is legal
494 xDiff = newX - closestStateX
495 yDiff = newY - closestStateY
496
497 if closestNode.parent != None:
498 oldOrientation = degrees( atan2(closestStateY - closestNode.parent.state.y,
499 closestStateX - closestNode.parent.state.x) )
500 else:
501 oldOrientation = closestStateOrient
502 # Turn from the old direction to the new direction
503 steeringToApply = degrees( atan2(yDiff, xDiff) ) - oldOrientation
504
505 speedToApply = sqrt( xDiff * xDiff + yDiff * yDiff )
506 action = CompositeAction()
507 action.appendAction(PrimitiveAction("setEngineSpeed", [speedToApply]))
508 action.appendAction(PrimitiveAction("setSteering", [steeringToApply]))
509
510 # Add the node to the tree
511 newState = state.ShipExternalState(newX, newY, newOrient, speedToApply, 0)
512 parent = closestNode
513 edge = action
514 newNode = rrtTree.RRTNode(newState, parent, edge)
515 tree.addNode(newNode)
516
517 return newState
518
519 def isGoalState(self,shipState):
520 "Is the ship within some threshold from the goal."
521
522 point1 = (shipState.x, shipState.y)
523 point2 = (self.goalPos.x, self.goalPos.y)
524 distance = geometry.distance2D(point1, point2)
525 if distance > self.distanceThreshold:
526 return False
527
528 # TODO: fix to deal with angles in a circular way,
529 # e.g. 359 and 1 are at distance 2 and not 358
530 angle1 = shipState.orientation
531 angle2 = self.goalPos.orientation
532 if abs(angle2-angle1) > self.angleThreshold:
533 return False
534
535 return True
536
537
538 def chooseRandomState(self):
539 "Samples a random state with a bias towards the goal."
540 if random.random() < 0.05:
541 return self.goalPos
542 else:
543 return state.ShipExternalState(random.randint(self.minX, self.maxX),
544 random.randint(self.minY, self.maxY),
545 random.randint(self.minAng, self.maxAng),
546 speed = -1, angularSpeed = -1) # speed should be ignored
547
548
549 class MeasureEdgeLengths(AgentStrategy):
550 """
551 TODO: THIS CLASS IS FOR THE REGRESSION LEARNING
552 CURRENTLY NOT USEFUL YET.
553 NEED TO BE MADE MORE GENERIC WHEN IT BECOMES RELEVANT.
554 A class that wraps some AgentStrategy, and adds
555 some measurements, to get experiment data.
556 The measurements it adds are the number of time steps it
557 took to reach the goal.
558 """
559 # TODO: THIS CLASS IS FOR THE REGRESSION LEARNING - NEED TO BE
560 # MADE MORE GENERIC WHEN IT BECOMES RELEVANT.
561 def __init__(self, agentIndex, goalPos, agentStrategy):
562 AgentStrategy.__init__(self)
563
564 self.shipIndex = agentIndex
565 self.goalPos = goalPos
566 self.agentStrategy = agentStrategy
567 # Time counter
568 self.time = 0
569 # filename to append results
570 self.filename = "regression" + str(agentIndex)
571 # boolean - to write env data only in the first time
572 self.writtenEnvData = False
573
574 def composeAction(self, agentWorldModel):
575 # Increase time counter
576 self.time += 1
577
578 estimatedState = agentWorldModel.getEstimatedExtShipState(self.shipIndex)
579 estimatedPos = (estimatedState.x, estimatedState.y)
580
581
582 # For the first time write the conditions to params files
583 # TODO: hack, we only measure the environment at the beginning
584 if not self.writtenEnvData:
585 shipOrient = geometry.orientation2D(estimatedPos, self.goalPos)
586
587 windVector = agentWorldModel.getEstimatedWindVector(self.shipIndex)
588 windSpeed = geometry.distance2D((0,0), windVector)
589 windOrient = geometry.orientation2D((0,0), windVector)
590 print 'windOrient', windOrient
591 windRelOrient = geometry.computeShortestTurn(shipOrient, windOrient)
592
593 currentsVector = agentWorldModel.getEstimatedWaterVector(self.shipIndex)
594 waterSpeed = geometry.distance2D((0,0), currentsVector)
595 currentOrient = geometry.orientation2D((0,0), currentsVector)
596 print 'currentOrient', currentOrient
597 waterRelOrient = geometry.computeShortestTurn(shipOrient, currentOrient)
598
599 xFilename = self.filename + ".x"
600 if not os.path.exists(xFilename):
601 f = open(xFilename, 'w')
602 f.write('[')
603 else:
604 f = open(xFilename, 'r+')
605 f.seek(-1, 2)
606 f.write(', ')
607 f.write('(%.2f, %.2f, %.2f, %.2f)]' % ( windSpeed,
608 windRelOrient,
609 waterSpeed,
610 waterRelOrient))
611 f.close()
612 self.writtenEnvData = True
613
614
615 # If at goal - stop
616 distanceToGoal = geometry.distance2D(estimatedPos, self.goalPos)
617 if distanceToGoal <= 20:
618 yFilename = self.filename + ".y"
619 if not os.path.exists(yFilename):
620 f = open(yFilename, 'w')
621 f.write('[')
622 else:
623 f = open(yFilename, 'r+')
624 f.seek(-1, 2)
625 f.write(', ')
626 f.write(str(self.time) + ']')
627 f.close()
628 sys.exit()
629 else:
630 return self.agentStrategy.composeAction(agentWorldModel)
631 '''
632
633
634
635
636
637
638
639
640
641
643 """
644 This is a class for an agent that controls a ship.
645 An agent has two main components: it's world model,
646 and its decision-making strategy.
647 """
648 - def __init__(self, worldModel, strategy):
649 self.worldModel = worldModel
650 self.strategy = strategy
651
653 """
654 A template method.
655 Choose an action based on current percepts
656
657 @type percepts: PerceptList
658 @param percepts: A list of percepts sent to the agent for this decision cycle
659
660 @rtype: CompositeAction
661 @return: A list of actions to be executed on the ship
662 """
663 self.worldModel.processPercepts(percepts)
664 action = self.strategy.composeAction(self.worldModel)
665 return action
666
668 """
669 Returns any message that strategy created
670
671 @rtype: CompositeMessage
672 @return: The agents' outgoing message for this decision cycle
673 """
674 return self.strategy.getOutgoingMessage()
675