-
Notifications
You must be signed in to change notification settings - Fork 0
/
planning_problem.py
103 lines (87 loc) · 3.87 KB
/
planning_problem.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
from functools import lru_cache
from aimacode.logic import PropKB
from aimacode.search import Node, Problem
from _utils import encode_state, decode_state
from my_planning_graph import PlanningGraph
##############################################################################
# YOU DO NOT NEED TO MODIFY CODE IN THIS FILE #
##############################################################################
class BasePlanningProblem(Problem):
def __init__(self, initial, goal):
self.state_map = sorted(initial.pos + initial.neg, key=str)
self.initial_state_TF = encode_state(initial, self.state_map)
super().__init__(self.initial_state_TF, goal=goal)
@lru_cache()
def h_unmet_goals(self, node):
""" This heuristic estimates the minimum number of actions that must be
carried out from the current state in order to satisfy all of the goal
conditions by ignoring the preconditions required for an action to be
executed.
"""
return sum(1 for i, f in enumerate(self.state_map) if not node.state[i] and f in self.goal)
@lru_cache()
def h_pg_levelsum(self, node):
""" This heuristic uses a planning graph representation of the problem
state space to estimate the sum of the number of actions that must be
carried out from the current state in order to satisfy each individual
goal condition.
See Also
--------
Russell-Norvig 10.3.1 (3rd Edition)
"""
pg = PlanningGraph(self, node.state, serialize=True, ignore_mutexes=True)
score = pg.h_levelsum()
return score
@lru_cache()
def h_pg_maxlevel(self, node):
""" This heuristic uses a planning graph representation of the problem
to estimate the maximum level cost out of all the individual goal literals.
The level cost is the first level where a goal literal appears in the
planning graph.
See Also
--------
Russell-Norvig 10.3.1 (3rd Edition)
"""
pg = PlanningGraph(self, node.state, serialize=True, ignore_mutexes=True)
score = pg.h_maxlevel()
return score
@lru_cache()
def h_pg_setlevel(self, node):
""" This heuristic uses a planning graph representation of the problem
to estimate the level cost in the planning graph to achieve all of the
goal literals such that none of them are mutually exclusive.
See Also
--------
Russell-Norvig 10.3.1 (3rd Edition)
"""
pg = PlanningGraph(self, node.state, serialize=True)
score = pg.h_setlevel()
return score
def actions(self, state):
""" Return the actions that can be executed in the given state. """
possible_actions = []
fluent = decode_state(state, self.state_map)
for action in self.actions_list:
is_possible = True
for clause in action.precond_pos:
if clause not in fluent.pos:
is_possible = False
break
if not is_possible: continue
for clause in action.precond_neg:
if clause not in fluent.neg:
is_possible = False
break
if is_possible: possible_actions.append(action)
return possible_actions
def result(self, state, action):
""" Return the state that results from executing the given action in the
given state. The action must be one of self.actions(state).
"""
return tuple([
(f and s not in action.effect_rem) or (s in action.effect_add)
for f, s in zip(state, self.state_map)
])
def goal_test(self, state: str) -> bool:
""" Test the state to see if goal is reached """
return all(f for f, c in zip(state, self.state_map) if c in self.goal)