Add interpretation phase to shallow (1 level) nested structures.
This commit is contained in:
parent
5f6b067e17
commit
42cb4cb8f1
@ -48,6 +48,7 @@ class KnowledgeBase(object):
|
|||||||
|
|
||||||
def process(self, row):
|
def process(self, row):
|
||||||
knowledge_before = copy.deepcopy(self.knowledge)
|
knowledge_before = copy.deepcopy(self.knowledge)
|
||||||
|
print("\x1b[7;32m> {} \x1b[0m".format(row))
|
||||||
tokens, decomposition, inferred_tree = parsing.get_fit(self, row)
|
tokens, decomposition, inferred_tree = parsing.get_fit(self, row)
|
||||||
result = knowledge_evaluation.integrate_information(self.knowledge,
|
result = knowledge_evaluation.integrate_information(self.knowledge,
|
||||||
{
|
{
|
||||||
|
@ -5,7 +5,9 @@ import knowledge_evaluation
|
|||||||
import re
|
import re
|
||||||
import copy
|
import copy
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
MAX_RECURSIONS = 10
|
||||||
|
|
||||||
# TODO: more flexible tokenization
|
# TODO: more flexible tokenization
|
||||||
def to_tokens(text):
|
def to_tokens(text):
|
||||||
@ -105,7 +107,7 @@ def integrate_language(knowledge_base, example):
|
|||||||
new_tokens.pop(offset)
|
new_tokens.pop(offset)
|
||||||
|
|
||||||
# TODO: Get a specific types for... types
|
# TODO: Get a specific types for... types
|
||||||
new_tokens.insert(offset, "<type: {}>".format(subquery_type))
|
new_tokens.insert(offset, (subquery_type, remix))
|
||||||
tokens = new_tokens
|
tokens = new_tokens
|
||||||
|
|
||||||
resolved_parsed = replace_position(resolved_parsed, position, subquery_type)
|
resolved_parsed = replace_position(resolved_parsed, position, subquery_type)
|
||||||
@ -243,6 +245,8 @@ def get_similar_tree(knowledge_base, atom):
|
|||||||
|
|
||||||
return sorted_possibilities[0]
|
return sorted_possibilities[0]
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: unroll this mess
|
||||||
def get_matching(sample, other):
|
def get_matching(sample, other):
|
||||||
l = len(sample[0])
|
l = len(sample[0])
|
||||||
other = list(filter(lambda x: len(x[0]) == l, other))
|
other = list(filter(lambda x: len(x[0]) == l, other))
|
||||||
@ -250,12 +254,19 @@ def get_matching(sample, other):
|
|||||||
if len(other) == 0:
|
if len(other) == 0:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
if not isinstance(sample[0][i], str):
|
if isinstance(sample[0][i], dict): # Dictionaries are compared by groups
|
||||||
other = list(filter(lambda x: not isinstance(x[0][i], str) and
|
other = list(filter(lambda x: isinstance(x[0][i], dict) and
|
||||||
len(x[0][i]['groups'] & sample[0][i]['groups']) > 0,
|
len(x[0][i]['groups'] & sample[0][i]['groups']) > 0,
|
||||||
other))
|
other))
|
||||||
|
|
||||||
|
elif isinstance(sample[0][i], tuple): # Tuples are compared by types [0]
|
||||||
|
other = list(filter(lambda x: isinstance(x[0][i], tuple) and
|
||||||
|
x[0][i][0] == sample[0][i][0],
|
||||||
|
other))
|
||||||
|
|
||||||
return [sample[0][x] if isinstance(sample[0][x], str)
|
return [sample[0][x] if isinstance(sample[0][x], str)
|
||||||
|
else
|
||||||
|
sample[0][x] if isinstance(sample[0][x], tuple)
|
||||||
else {'groups': sample[0][x]['groups'] & reduce(lambda a, b: a & b,
|
else {'groups': sample[0][x]['groups'] & reduce(lambda a, b: a & b,
|
||||||
map(lambda y: y[0][x]['groups'],
|
map(lambda y: y[0][x]['groups'],
|
||||||
other))}
|
other))}
|
||||||
@ -282,15 +293,124 @@ def reprocess_language_knowledge(knowledge_base, examples):
|
|||||||
return pattern_examples
|
return pattern_examples
|
||||||
|
|
||||||
|
|
||||||
def get_fit(knowledge, row):
|
def fitting_return_type(knowledge,
|
||||||
row = row.lower().split()
|
return_type, remixer,
|
||||||
for sample, ast in knowledge.trained:
|
input_stream,
|
||||||
if len(sample) != len(row):
|
tail_of_ouput_stream,
|
||||||
continue
|
remaining_recursions: int):
|
||||||
|
indent = " " + " " * (MAX_RECURSIONS - remaining_recursions)
|
||||||
|
|
||||||
if all(map(lambda x: (not isinstance(sample[x], str)
|
for sample, ast in knowledge.trained:
|
||||||
or sample[x] == row[x]),
|
try:
|
||||||
range(len(sample)))):
|
parsed_input = []
|
||||||
return row, sample, ast
|
parsed_output = []
|
||||||
|
|
||||||
|
remaining_input = reverse_remix(input_stream, remixer)
|
||||||
|
print(indent + "RMXin:", remaining_input)
|
||||||
|
remaining_output = copy.deepcopy(sample)
|
||||||
|
|
||||||
|
print(indent + "S:", sample)
|
||||||
|
print(indent + "A:", ast)
|
||||||
|
print()
|
||||||
|
|
||||||
|
while len(remaining_output) > 0:
|
||||||
|
((input, output),
|
||||||
|
(remaining_input, remaining_output)) = match_token(knowledge,
|
||||||
|
remaining_input,
|
||||||
|
remaining_output,
|
||||||
|
remaining_recursions - 1)
|
||||||
|
parsed_input += input
|
||||||
|
parsed_output += output
|
||||||
|
print(indent + "INP:", input)
|
||||||
|
print(indent + "OUT:", output)
|
||||||
|
|
||||||
|
print(indent + "Pi:", parsed_input)
|
||||||
|
print(indent + "Po:", parsed_output)
|
||||||
|
print("\x1b[7m", end='')
|
||||||
|
print(indent + "Ri:", remaining_input)
|
||||||
|
print(indent + "Ro:", remaining_output)
|
||||||
|
print("\x1b[0m")
|
||||||
|
return ((parsed_input, parsed_output),
|
||||||
|
(remaining_input, remaining_output + tail_of_ouput_stream))
|
||||||
|
except TypeError as e:
|
||||||
|
print(indent + "X " + str(e))
|
||||||
|
pass
|
||||||
|
except IndexError as e:
|
||||||
|
print(indent + "X " + str(e))
|
||||||
|
pass
|
||||||
|
raise TypeError("No matching type found")
|
||||||
|
|
||||||
|
|
||||||
|
def reverse_remix(tree_section, remix):
|
||||||
|
result_section = []
|
||||||
|
for origin in remix:
|
||||||
|
result_section.append(copy.deepcopy(tree_section[origin]))
|
||||||
|
return result_section + tree_section[len(remix):]
|
||||||
|
|
||||||
|
|
||||||
|
def match_token(knowledge,
|
||||||
|
input: List[str],
|
||||||
|
trained: List[str],
|
||||||
|
remaining_recursions: int):
|
||||||
|
if remaining_recursions < 1:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# print("#" * (MAX_RECURSIONS - remaining_recursions))
|
||||||
|
# print("Input:", input)
|
||||||
|
# print("Output:", trained)
|
||||||
|
indent = " " + " " * (MAX_RECURSIONS - remaining_recursions)
|
||||||
|
first_input = input[0]
|
||||||
|
expected_first = trained[0]
|
||||||
|
print(indent + "Ex?", expected_first)
|
||||||
|
print(indent + "Fo!", first_input)
|
||||||
|
|
||||||
|
if isinstance(expected_first, dict):
|
||||||
|
# TODO: check if the dictionary matches the values
|
||||||
|
return (([first_input], [expected_first]), (input[1:], trained[1:]))
|
||||||
|
|
||||||
|
elif isinstance(expected_first, tuple):
|
||||||
|
return_type, remixer = expected_first
|
||||||
|
return fitting_return_type(knowledge,
|
||||||
|
return_type, remixer,
|
||||||
|
input, trained[1:],
|
||||||
|
remaining_recursions)
|
||||||
|
|
||||||
|
elif expected_first == first_input:
|
||||||
|
return (([first_input], [expected_first]), (input[1:], trained[1:]))
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_fit(knowledge, row, remaining_recursions=MAX_RECURSIONS):
|
||||||
|
tokens = to_tokens(row)
|
||||||
|
indent = " " * (MAX_RECURSIONS - remaining_recursions)
|
||||||
|
for sample, ast in knowledge.trained:
|
||||||
|
print("-----")
|
||||||
|
print("TOK:", tokens)
|
||||||
|
try:
|
||||||
|
remaining_input = copy.deepcopy(tokens)
|
||||||
|
remaining_output = copy.deepcopy(sample)
|
||||||
|
print(indent + "AST:", ast)
|
||||||
|
print(indent + "S:", sample)
|
||||||
|
|
||||||
|
# TODO: merge with get_return type, as uses the same mechanism
|
||||||
|
while len(remaining_output) > 0:
|
||||||
|
((_, _), (remaining_input, remaining_output)) = match_token(knowledge,
|
||||||
|
remaining_input,
|
||||||
|
remaining_output,
|
||||||
|
remaining_recursions)
|
||||||
|
print(indent + "Ri:", remaining_input)
|
||||||
|
print(indent + "Ro:", remaining_output)
|
||||||
|
|
||||||
|
if len(remaining_input) == 0 and len(remaining_input) == 0:
|
||||||
|
print("!!!", tokens, sample, ast)
|
||||||
|
return tokens, sample, ast
|
||||||
|
except TypeError as e:
|
||||||
|
print(indent + "X " + str(e))
|
||||||
|
pass
|
||||||
|
except IndexError as e:
|
||||||
|
print(indent + "X " + str(e))
|
||||||
|
pass
|
||||||
|
print()
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
from knowledge_base import KnowledgeBase
|
from knowledge_base import KnowledgeBase
|
||||||
|
from modifiable_property import ModifiableProperty
|
||||||
|
|
||||||
examples = [
|
examples = [
|
||||||
{
|
{
|
||||||
@ -19,10 +20,10 @@ examples = [
|
|||||||
# "text": "is lava dangerous?",
|
# "text": "is lava dangerous?",
|
||||||
# "parsed": ("question", ("exists-property-with-value", 'lava', 'dangerous')),
|
# "parsed": ("question", ("exists-property-with-value", 'lava', 'dangerous')),
|
||||||
# },
|
# },
|
||||||
# {
|
{
|
||||||
# "text": "earth is a planet",
|
"text": "earth is a planet",
|
||||||
# "parsed": ("pertenence-to-group", 'earth', 'planet'),
|
"parsed": ("pertenence-to-group", 'earth', 'planet'),
|
||||||
# },
|
},
|
||||||
# {
|
# {
|
||||||
# "text": "is earth a moon?",
|
# "text": "is earth a moon?",
|
||||||
# "parsed": ("question", ("pertenence-to-group", 'earth', 'moon')),
|
# "parsed": ("question", ("pertenence-to-group", 'earth', 'moon')),
|
||||||
@ -91,7 +92,10 @@ def test_assumption(expectedResponse, knowledge, query):
|
|||||||
print("Expected: {}".format(expectedResponse))
|
print("Expected: {}".format(expectedResponse))
|
||||||
|
|
||||||
result, abstract_tree, diff = knowledge.process(query['text'])
|
result, abstract_tree, diff = knowledge.process(query['text'])
|
||||||
print("\x1b[0;3{}mResult: {}\x1b[0m".format("1" if result != expectedResponse else "2", result))
|
end_result = result.getter() if isinstance(result, ModifiableProperty) else result
|
||||||
|
|
||||||
|
print("\x1b[0;3{}mResult: {}\x1b[0m".format("1" if end_result != expectedResponse else "2", end_result))
|
||||||
|
assert(end_result == expectedResponse)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@ -105,6 +109,7 @@ def main():
|
|||||||
print(differences())
|
print(differences())
|
||||||
print("----")
|
print("----")
|
||||||
|
|
||||||
|
test_assumption(True, knowledge, {'text': 'earth is a planet'})
|
||||||
test_assumption(True, knowledge, {'text': 'is lava dangerous?'})
|
test_assumption(True, knowledge, {'text': 'is lava dangerous?'})
|
||||||
# for test in [{'text': 'a bus can run'}, {'text': 'io is a moon'}]:
|
# for test in [{'text': 'a bus can run'}, {'text': 'io is a moon'}]:
|
||||||
# row = test['text']
|
# row = test['text']
|
||||||
@ -119,7 +124,6 @@ def main():
|
|||||||
# queryTrue = { "text": "is io a moon?", "parsed": ("question", ("pertenence-to-group", "io", "moon")) }
|
# queryTrue = { "text": "is io a moon?", "parsed": ("question", ("pertenence-to-group", "io", "moon")) }
|
||||||
# queryFalse = { "text": "is io a planet?", "parsed": ("question", ("pertenence-to-group", "io", "planet")) }
|
# queryFalse = { "text": "is io a planet?", "parsed": ("question", ("pertenence-to-group", "io", "planet")) }
|
||||||
|
|
||||||
# test_assumption(True, knowledge, queryTrue)
|
|
||||||
# test_assumption(False, knowledge, queryFalse)
|
# test_assumption(False, knowledge, queryFalse)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
Loading…
Reference in New Issue
Block a user