diff --git a/naive-nlu/depth_meter.py b/naive-nlu/depth_meter.py index 7d8bb61..db1dab9 100644 --- a/naive-nlu/depth_meter.py +++ b/naive-nlu/depth_meter.py @@ -1,11 +1,10 @@ import sys +import parameters -def show_depth(depth: int): - multiplier = 3 - max_depth = 5 - offset = int((max_depth - depth) / (2 / multiplier)) +def show_depth(depth: int, zoom: int=2): + offset = int((parameters.MAX_RECURSIONS - depth) / (2 / zoom)) - depth = depth * multiplier + depth = depth * zoom offset -= int(depth % 2) sys.stdout.write("\r|\x1b[K" + (u'█' * int(depth / 2)) + (u'▌' * int(depth % 2)) + ' ' * offset + "|\x1b[7m \x1b[0m\b") diff --git a/naive-nlu/parameters.py b/naive-nlu/parameters.py new file mode 100644 index 0000000..0a845fc --- /dev/null +++ b/naive-nlu/parameters.py @@ -0,0 +1 @@ +MAX_RECURSIONS = 5 diff --git a/naive-nlu/parsing.py b/naive-nlu/parsing.py index 353cabb..4044c9a 100644 --- a/naive-nlu/parsing.py +++ b/naive-nlu/parsing.py @@ -10,8 +10,7 @@ import copy from functools import reduce from typing import List from modifiable_property import ModifiableProperty - -MAX_RECURSIONS = 5 +import parameters # TODO: more flexible tokenization def to_tokens(text): @@ -326,7 +325,7 @@ def fitting_return_type(knowledge, input_stream, tail_of_ouput_stream, remaining_recursions: int): - indent = " " + " " * (MAX_RECURSIONS - remaining_recursions) + indent = " " + " " * (parameters.MAX_RECURSIONS - remaining_recursions) for sample, ast in knowledge.trained: try: @@ -396,11 +395,11 @@ def match_token(knowledge, if remaining_recursions < 1: yield None - # logging.debug("#" * (MAX_RECURSIONS - remaining_recursions)) + # logging.debug("#" * (parameters.MAX_RECURSIONS - remaining_recursions)) # logging.debug("Input:", input) # logging.debug("Output:", trained) - depth_meter.show_depth(MAX_RECURSIONS - remaining_recursions) - indent = " " + " " * (MAX_RECURSIONS - remaining_recursions) + depth_meter.show_depth(parameters.MAX_RECURSIONS - remaining_recursions) + indent = " " + " " * (parameters.MAX_RECURSIONS - remaining_recursions) first_input = input[0] expected_first = trained[0] logging.debug(indent + "Ex?", expected_first) @@ -426,7 +425,7 @@ def match_token(knowledge, def get_fit_onwards(knowledge, ast, remaining_input, remaining_output, remaining_recursions): - indent = "." + " " * (MAX_RECURSIONS - remaining_recursions) + indent = "." + " " * (parameters.MAX_RECURSIONS - remaining_recursions) try: # TODO: merge with get_return type, as uses the same mechanism if len(remaining_output) > 0: @@ -465,9 +464,9 @@ def get_fit_onwards(knowledge, ast, remaining_input, remaining_output, remaining return None -def get_fit(knowledge, row, remaining_recursions=MAX_RECURSIONS): +def get_fit(knowledge, row, remaining_recursions=parameters.MAX_RECURSIONS): tokens = to_tokens(row) - indent = " " * (MAX_RECURSIONS - remaining_recursions) + indent = " " * (parameters.MAX_RECURSIONS - remaining_recursions) for sample, ast in knowledge.trained: logging.debug("-----") logging.debug("TOK:", tokens)