Add session context, increase logging.
This commit is contained in:
parent
359f858c39
commit
bb7d438e0d
2
.gitignore
vendored
2
.gitignore
vendored
@ -3,4 +3,4 @@
|
|||||||
*.ba?k
|
*.ba?k
|
||||||
*.pyc
|
*.pyc
|
||||||
__pycache__
|
__pycache__
|
||||||
treeNLU-*session-*.org
|
treeNLU-*session*.org
|
||||||
|
@ -34,7 +34,6 @@ def main():
|
|||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
data = input("> ").strip()
|
data = input("> ").strip()
|
||||||
session().log_step(data, 0)
|
|
||||||
except EOFError:
|
except EOFError:
|
||||||
print("bye")
|
print("bye")
|
||||||
break
|
break
|
||||||
@ -50,16 +49,17 @@ def main():
|
|||||||
show_samples(knowledge)
|
show_samples(knowledge)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
ret = knowledge.process(data)
|
with session().log(data):
|
||||||
if ret:
|
ret = knowledge.process(data)
|
||||||
result, _, _ = ret
|
if ret:
|
||||||
if not is_modifiable_property(result):
|
result, _, _ = ret
|
||||||
print("<", result)
|
if not is_modifiable_property(result):
|
||||||
|
print("<", result)
|
||||||
|
else:
|
||||||
|
result.setter()
|
||||||
|
print("OK")
|
||||||
|
elif ret is None:
|
||||||
|
print("- Couldn't understand that, oops... -")
|
||||||
else:
|
else:
|
||||||
result.setter()
|
print("Unhandled response:", ret)
|
||||||
print("OK")
|
|
||||||
elif ret is None:
|
|
||||||
print("- Couldn't understand that, oops... -")
|
|
||||||
else:
|
|
||||||
print("Unhandled response:", ret)
|
|
||||||
print("< Bye!")
|
print("< Bye!")
|
||||||
|
@ -97,45 +97,46 @@ def integrate_language(knowledge_base, example):
|
|||||||
break
|
break
|
||||||
|
|
||||||
for position, atom in lower_levels:
|
for position, atom in lower_levels:
|
||||||
session().annotate("\x1b[1mSelecting\x1b[0m: {}".format(atom))
|
with session().log(atom):
|
||||||
similar = get_similar_tree(knowledge_base, atom, tokens)
|
session().annotate("\x1b[1mSelecting\x1b[0m: {}".format(atom))
|
||||||
remix, (start_bounds, end_bounds) = build_remix_matrix(knowledge_base, tokens, atom, similar)
|
similar = get_similar_tree(knowledge_base, atom, tokens)
|
||||||
|
remix, (start_bounds, end_bounds) = build_remix_matrix(knowledge_base, tokens, atom, similar)
|
||||||
|
|
||||||
after_remix = apply_remix(tokens[len(start_bounds):-len(end_bounds)], remix)
|
after_remix = apply_remix(tokens[len(start_bounds):-len(end_bounds)], remix)
|
||||||
session().annotate("--FIND MIX--")
|
session().annotate("--FIND MIX--")
|
||||||
session().annotate("-MIX- | {}".format(remix))
|
session().annotate("-MIX- | {}".format(remix))
|
||||||
session().annotate("-FRM- | {}".format(tokens))
|
session().annotate("-FRM- | {}".format(tokens))
|
||||||
session().annotate("-AFT- | {}".format(after_remix))
|
session().annotate("-AFT- | {}".format(after_remix))
|
||||||
|
|
||||||
session().annotate("--- TEMPLATE ---")
|
session().annotate("--- TEMPLATE ---")
|
||||||
|
|
||||||
_, matcher, result = make_template(knowledge_base, after_remix, atom)
|
_, matcher, result = make_template(knowledge_base, after_remix, atom)
|
||||||
session().annotate("Tx: {}".format(after_remix))
|
session().annotate("Tx: {}".format(after_remix))
|
||||||
session().annotate("Mx: {}".format(matcher))
|
session().annotate("Mx: {}".format(matcher))
|
||||||
session().annotate("Rx: {}".format(result))
|
session().annotate("Rx: {}".format(result))
|
||||||
session().annotate("Sx: {}".format(start_bounds))
|
session().annotate("Sx: {}".format(start_bounds))
|
||||||
session().annotate("Ex: {}".format(end_bounds))
|
session().annotate("Ex: {}".format(end_bounds))
|
||||||
|
|
||||||
|
|
||||||
assert(len(after_remix) + len(start_bounds) + len(end_bounds) == len(tokens))
|
assert(len(after_remix) + len(start_bounds) + len(end_bounds) == len(tokens))
|
||||||
session().annotate( " +-> {}".format(after_remix))
|
session().annotate( " +-> {}".format(after_remix))
|
||||||
subquery_type = knowledge_evaluation.get_subquery_type(knowledge_base.knowledge, atom)
|
subquery_type = knowledge_evaluation.get_subquery_type(knowledge_base.knowledge, atom)
|
||||||
session().annotate(r" \-> <{}>".format(subquery_type))
|
session().annotate(r" \-> <{}>".format(subquery_type))
|
||||||
|
|
||||||
# Clean remaining tokens
|
# Clean remaining tokens
|
||||||
new_tokens = list(tokens)
|
new_tokens = list(tokens)
|
||||||
offset = len(start_bounds)
|
offset = len(start_bounds)
|
||||||
for _ in range(len(remix)):
|
for _ in range(len(remix)):
|
||||||
new_tokens.pop(offset)
|
new_tokens.pop(offset)
|
||||||
|
|
||||||
# TODO: Get a specific types for... types
|
# TODO: Get a specific types for... types
|
||||||
new_tokens.insert(offset, (subquery_type, remix))
|
new_tokens.insert(offset, (subquery_type, remix))
|
||||||
tokens = new_tokens
|
tokens = new_tokens
|
||||||
|
|
||||||
resolved_parsed = replace_position(resolved_parsed, position, offset)
|
resolved_parsed = replace_position(resolved_parsed, position, offset)
|
||||||
session().annotate("RP: {}".format(resolved_parsed))
|
session().annotate("RP: {}".format(resolved_parsed))
|
||||||
session().annotate("AT: {}".format(atom))
|
session().annotate("AT: {}".format(atom))
|
||||||
session().annotate("#########")
|
session().annotate("#########")
|
||||||
|
|
||||||
|
|
||||||
tokens, matcher, result = make_template(knowledge_base, tokens, resolved_parsed)
|
tokens, matcher, result = make_template(knowledge_base, tokens, resolved_parsed)
|
||||||
@ -298,12 +299,12 @@ def get_similar_tree(knowledge_base, atom, tokens):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
for i, possibility in enumerate(sorted_possibilities):
|
for i, possibility in enumerate(sorted_possibilities):
|
||||||
session().annotate('---- POSSIBILITY #{} ----'.format(i))
|
with session().log(possibility):
|
||||||
similar_matcher, similar_result, similar_result_resolved, _, _ = possibility
|
similar_matcher, similar_result, similar_result_resolved, _, _ = possibility
|
||||||
session().annotate('AST: {}'.format(similar_result))
|
session().annotate('AST: {}'.format(similar_result))
|
||||||
session().annotate('Based on: {}'.format(similar_matcher))
|
session().annotate('Based on: {}'.format(similar_matcher))
|
||||||
session().annotate('Results on: {}'.format(similar_result_resolved))
|
session().annotate('Results on: {}'.format(similar_result_resolved))
|
||||||
session().annotate('---------------------')
|
session().annotate('---------------------')
|
||||||
|
|
||||||
return sorted_possibilities[0]
|
return sorted_possibilities[0]
|
||||||
|
|
||||||
|
@ -25,21 +25,40 @@ def global_session():
|
|||||||
return SESSION
|
return SESSION
|
||||||
|
|
||||||
|
|
||||||
|
class LevelContext:
|
||||||
|
def __init__(self, increaser, decreaser):
|
||||||
|
self.increaser = increaser
|
||||||
|
self.decreaser = decreaser
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.increaser()
|
||||||
|
|
||||||
|
def __exit__(self, _type, _value, _traceback):
|
||||||
|
self.decreaser()
|
||||||
|
|
||||||
|
|
||||||
class OrgModeSession:
|
class OrgModeSession:
|
||||||
def __init__(self, fname):
|
def __init__(self, fname):
|
||||||
self.f = open(fname, 'wt')
|
self.f = open(fname, 'wt')
|
||||||
self.last_level = 0
|
self.level = 0
|
||||||
|
|
||||||
def annotate(self, annotation):
|
def annotate(self, annotation):
|
||||||
self.f.write("{indentation} {data}\n".format(
|
self.f.write("{indentation} {data}\n".format(
|
||||||
indentation=' ' * (self.last_level + 2 + 1),
|
indentation=' ' * (self.level + 2 + 1),
|
||||||
data=annotation))
|
data=annotation))
|
||||||
|
|
||||||
def log_step(self, string, level):
|
def log(self, string):
|
||||||
self.f.write("{indentation} {data}\n".format(
|
self.f.write("{indentation} {data}\n".format(
|
||||||
indentation='*' * (level + 1),
|
indentation='*' * (self.level + 1),
|
||||||
data=string))
|
data=string))
|
||||||
self.last_level = level
|
|
||||||
|
return LevelContext(self.inc_level, self.dec_level)
|
||||||
|
|
||||||
|
def inc_level(self):
|
||||||
|
self.level += 1
|
||||||
|
|
||||||
|
def dec_level(self):
|
||||||
|
self.level -= 1
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
self.f.close()
|
self.f.close()
|
||||||
|
@ -16,13 +16,15 @@ tests = (
|
|||||||
|
|
||||||
|
|
||||||
def gen_session_name():
|
def gen_session_name():
|
||||||
now = datetime.datetime.utcnow()
|
return "treeNLU-test-session.org"
|
||||||
return "treeNLU-test-session-{}.org".format(
|
|
||||||
now.strftime("%y_%m_%d %H:%M:%S_%f"))
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
org_mode.create_global_session(gen_session_name())
|
org_mode.create_global_session(gen_session_name())
|
||||||
|
|
||||||
|
now = datetime.datetime.utcnow()
|
||||||
|
org_mode.global_session().annotate("Ran on {}".format(
|
||||||
|
now.strftime("%y_%m_%d %H:%M:%S_%f")))
|
||||||
failed = False
|
failed = False
|
||||||
for test_name, test_module in tests:
|
for test_name, test_module in tests:
|
||||||
try:
|
try:
|
||||||
@ -39,6 +41,7 @@ def main():
|
|||||||
failed = True
|
failed = True
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
raise
|
raise
|
||||||
|
org_mode.global_session().close()
|
||||||
|
|
||||||
if failed:
|
if failed:
|
||||||
exit(1)
|
exit(1)
|
||||||
|
@ -125,7 +125,9 @@ def main():
|
|||||||
knowledge=base_knowledge,
|
knowledge=base_knowledge,
|
||||||
)
|
)
|
||||||
|
|
||||||
differences = knowledge.train(examples)
|
for example in examples:
|
||||||
|
with session().log(example['text']):
|
||||||
|
differences = knowledge.train([example])
|
||||||
|
|
||||||
session().annotate("----")
|
session().annotate("----")
|
||||||
session().annotate(differences())
|
session().annotate(differences())
|
||||||
|
@ -696,19 +696,20 @@ def main():
|
|||||||
|
|
||||||
for i, (example_type, data) in enumerate(examples):
|
for i, (example_type, data) in enumerate(examples):
|
||||||
if example_type == 'full_example':
|
if example_type == 'full_example':
|
||||||
session().log_step(data['affirmation'], 0)
|
|
||||||
affirmation = {
|
affirmation = {
|
||||||
'text': data['affirmation'],
|
'text': data['affirmation'],
|
||||||
'parsed': data['parsed'][1],
|
'parsed': data['parsed'][1],
|
||||||
}
|
}
|
||||||
question = data
|
question = data
|
||||||
|
|
||||||
show_progbar(i, total, data['affirmation'])
|
with session().log(data['affirmation']):
|
||||||
differences = knowledge.train([affirmation])
|
show_progbar(i, total, data['affirmation'])
|
||||||
|
differences = knowledge.train([affirmation])
|
||||||
|
|
||||||
show_progbar(i, total, data['text'])
|
with session().log(data['text']):
|
||||||
differences = knowledge.train([question])
|
show_progbar(i, total, data['text'])
|
||||||
session().annotate(differences())
|
differences = knowledge.train([question])
|
||||||
|
session().annotate(differences())
|
||||||
|
|
||||||
result, _, _ = knowledge.process(data['text'])
|
result, _, _ = knowledge.process(data['text'])
|
||||||
|
|
||||||
@ -720,20 +721,20 @@ def main():
|
|||||||
raise AssertionError('{} is not {}'.format(result, data['answer']))
|
raise AssertionError('{} is not {}'.format(result, data['answer']))
|
||||||
|
|
||||||
elif example_type == 'text_example':
|
elif example_type == 'text_example':
|
||||||
session().log_step(data['affirmation'], 0)
|
with session().log(data['affirmation']):
|
||||||
|
show_progbar(i, total, data['affirmation'])
|
||||||
|
affirmation = data['affirmation']
|
||||||
|
session().annotate("Processing affirmation: {}".format(affirmation))
|
||||||
|
_, _, _ = knowledge.process(affirmation)
|
||||||
|
|
||||||
show_progbar(i, total, data['affirmation'])
|
with session().log(data['question']):
|
||||||
affirmation = data['affirmation']
|
show_progbar(i, total, data['question'])
|
||||||
session().annotate("Processing affirmation: {}".format(affirmation))
|
question = data['question']
|
||||||
_, _, _ = knowledge.process(affirmation)
|
session().annotate("Processing question : {}".format(question))
|
||||||
|
result, _, _ = knowledge.process(question)
|
||||||
|
|
||||||
show_progbar(i, total, data['question'])
|
if result != data['answer']:
|
||||||
question = data['question']
|
raise AssertionError('{} is not {}'.format(result, data['answer']))
|
||||||
session().annotate("Processing question : {}".format(question))
|
|
||||||
result, _, _ = knowledge.process(question)
|
|
||||||
|
|
||||||
if result != data['answer']:
|
|
||||||
raise AssertionError('{} is not {}'.format(result, data['answer']))
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError('Example type: {}'.format(example_type))
|
raise NotImplementedError('Example type: {}'.format(example_type))
|
||||||
|
Loading…
Reference in New Issue
Block a user