diff --git a/.gitea/workflows/tests.yaml b/.gitea/workflows/tests.yaml index a3adf0a..f56a490 100644 --- a/.gitea/workflows/tests.yaml +++ b/.gitea/workflows/tests.yaml @@ -9,8 +9,8 @@ jobs: - name: Check out repository code uses: actions/checkout@v3 - run: apt-get update && apt-get install -y python3-pip - - run: pip install --break-system-package -e . - - run: pip install --break-system-package pytest + - run: pip install -e . + - run: pip install pytest - run: pytest mypy: @@ -19,8 +19,8 @@ jobs: - name: Check out repository code uses: actions/checkout@v3 - run: apt-get update && apt-get install -y python3-pip - - run: pip install --break-system-package -e . - - run: pip install --break-system-package mypy + - run: pip install -e . + - run: pip install mypy - run: mypy org_rw --check-untyped-defs style-formatting: @@ -29,8 +29,8 @@ jobs: - name: Check out repository code uses: actions/checkout@v3 - run: apt-get update && apt-get install -y python3-pip - - run: pip install --break-system-package -e . - - run: pip install --break-system-package black + - run: pip install -e . + - run: pip install black - run: black --check . style-sorted-imports: @@ -39,8 +39,8 @@ jobs: - name: Check out repository code uses: actions/checkout@v3 - run: apt-get update && apt-get install -y python3-pip - - run: pip install --break-system-package -e . - - run: pip install --break-system-package isort + - run: pip install -e . + - run: pip install isort - run: isort --profile black --check . stability-extra-test: @@ -49,5 +49,5 @@ jobs: - name: Check out repository code uses: actions/checkout@v3 - run: apt-get update && apt-get install -y git-core python3-pip - - run: pip install --break-system-package -e . + - run: pip install -e . - run: bash extra-tests/check_all.sh diff --git a/README.org b/README.org index 6f03720..95ec98a 100644 --- a/README.org +++ b/README.org @@ -7,12 +7,6 @@ A python library to parse, modify and save Org-mode files. - Modify these data and write it back to disk. - Keep the original structure intact (indentation, spaces, format, ...). -** Principles -- Avoid any dependency outside of Python's standard library. -- Don't do anything outside of the scope of parsing/re-serializing Org-mode files. -- *Modification of the original text if there's no change is considered a bug (see [[id:7363ba38-1662-4d3c-9e83-0999824975b7][Known issues]]).* -- Data structures should be exposed as it's read on Emacs's org-mode or when in doubt as raw as possible. -- Data in the objects should be modificable as a way to update the document itself. *Consider this a Object-oriented design.* ** Safety mechanism As this library is still in early development. Running it over files might produce unexpected changes on them. For this reason it's heavily recommended to @@ -27,9 +21,6 @@ Also, see [[id:76e77f7f-c9e0-4c83-ad2f-39a5a8894a83][Known issues:Structure modi not properly stored and can trigger this safety mechanism on a false-positive. * Known issues -:PROPERTIES: -:ID: 7363ba38-1662-4d3c-9e83-0999824975b7 -:END: ** Structure modifications :PROPERTIES: :ID: 76e77f7f-c9e0-4c83-ad2f-39a5a8894a83 diff --git a/org_rw/dom.py b/org_rw/dom.py index baf0092..f9ed40f 100644 --- a/org_rw/dom.py +++ b/org_rw/dom.py @@ -24,14 +24,6 @@ class ResultsDrawerNode(DrawerNode): return "".format(len(self.children)) -class GenericDrawerNode(DrawerNode): - def __init__(self, drawer_name): - self.drawer_name = drawer_name - - def __repr__(self): - return "".format(self.drawer_name, len(self.children)) - - class PropertyNode: def __init__(self, key, value): self.key = key @@ -70,18 +62,12 @@ class TableSeparatorRow: def __init__(self, orig=None): self.orig = orig - def get_raw(self): - return get_raw_contents(self.orig) - class TableRow: def __init__(self, cells, orig=None): self.cells = cells self.orig = orig - def get_raw(self): - return get_raw_contents(self.orig) - class Text: def __init__(self, content): diff --git a/org_rw/org_rw.py b/org_rw/org_rw.py index 6baadd1..558e7a2 100644 --- a/org_rw/org_rw.py +++ b/org_rw/org_rw.py @@ -122,7 +122,6 @@ NON_FINISHED_GROUPS = ( dom.ListGroupNode, dom.ResultsDrawerNode, dom.PropertyDrawerNode, - dom.GenericDrawerNode, ) FREE_GROUPS = (dom.CodeBlock,) @@ -330,7 +329,7 @@ class Headline: closed: Optional[Time] = None, ): self.start_line = start_line - self.depth = depth + self._depth = depth self.orig = orig self.properties = properties self.keywords = keywords @@ -415,7 +414,6 @@ class Headline: if ( isinstance(line, DelimiterLine) and line.delimiter_type == DelimiterLineType.END_BLOCK - and line.type_data.subtype == current_node.header.type_data.subtype ): start = current_node.header.linenum @@ -638,13 +636,6 @@ class Headline: assert current_node is None current_node = dom.ResultsDrawerNode() - # TODO: Allow indentation of these blocks inside others - indentation_tree = [current_node] - tree.append(current_node) - elif content.strip().startswith(":") and content.strip().endswith(":"): - assert current_node is None - current_node = dom.GenericDrawerNode(content.strip().strip(":")) - # TODO: Allow indentation of these blocks inside others indentation_tree = [current_node] tree.append(current_node) @@ -771,6 +762,16 @@ class Headline: pass self._state = new_state + @property + def depth(self): + return self._depth + + @depth.setter + def depth(self, value): + self._depth = value + for child in self.children: + child.depth = value + 1 + @property def clock(self): times = [] @@ -873,24 +874,9 @@ class Headline: yield from get_links_from_content(item.content) def get_lines_between(self, start, end): - # @TODO: Generalize for other line types too. - everything = ( - [] - # + self.keywords - + self.contents - # + self.list_items - # + self.table_rows - # + self.properties - # + self.structural - + self.delimiters - ) - - for line in everything: + for line in self.contents: if start <= line.linenum < end: - if "get_raw" in dir(line): - yield "".join(line.get_raw()) - else: - yield line.line + yield "".join(line.get_raw()) def get_contents(self, format): if format == "raw": @@ -1090,6 +1076,38 @@ class Headline: self.children.append(headline) return headline + def refile( + self, destination: Union["Headline", OrgDoc], top: bool = False + ) -> Union["Headline", OrgDoc]: + """Refile this headline to a new destination. + + Args: + destination: The headline to which this headline will be moved + top: Whether to append to bottom or insert at top of destination's children + + Returns: + The destination headline + """ + # Remove from the parent + if self.parent: + if isinstance(self.parent, Headline): + self.parent.children.remove(self) + else: + self.parent.headlines.remove(self) + + # Add ourselves to the destination + if top: + destination.children.insert(0, self) + else: + destination.children.append(self) + + # Adjust the depth + self.depth = destination.depth + 1 + + # Adjust our parent + self.parent = destination + return destination + RawLine = collections.namedtuple("RawLine", ("linenum", "line")) Keyword = collections.namedtuple( @@ -2331,7 +2349,6 @@ class OrgDoc: list_items, structural, properties, - delimiters, environment=BASE_ENVIRONMENT, ): self.todo_keywords = [HeadlineState(name=kw) for kw in DEFAULT_TODO_KEYWORDS] @@ -2361,7 +2378,6 @@ class OrgDoc: self.list_items: List[ListItem] = list_items self.structural: List = structural self.properties: List = properties - self.delimiters: List = delimiters self._path = None self.headlines: List[Headline] = list( map(lambda hl: parse_headline(hl, self, self), headlines) @@ -2392,6 +2408,24 @@ class OrgDoc: def shallow_tags(self) -> list[str]: return self.tags + @property + def depth(self): + """ + Attribute to be compatible with the signature of the Headlines. + + Useful when doing operations across the headline hierarchy + """ + return 0 + + @property + def children(self): + """ + Attribute to be compatible with the signature of the Headlines. + + Useful when doing operations across the headline hierarchy + """ + return self.headlines + ## Querying def get_links(self): for headline in self.headlines: @@ -2526,9 +2560,6 @@ class OrgDoc: for struct in self.structural: lines.append(dump_structural(struct)) - for content in self.delimiters: - lines.append(dump_delimiters(content)) - for kw in self.keywords: lines.append(dump_kw(kw)) @@ -2566,7 +2597,6 @@ class OrgDocReader: self.list_items, self.structural, self.properties, - self.delimiters, self.environment, ) diff --git a/org_rw/utils.py b/org_rw/utils.py index 87f6712..5b8b4e5 100644 --- a/org_rw/utils.py +++ b/org_rw/utils.py @@ -9,7 +9,6 @@ from .org_rw import ( ListItem, RawLine, Strike, - TableRow, Text, Underlined, Verbatim, @@ -51,8 +50,6 @@ def get_raw_contents(doc) -> str: return doc.get_raw() if isinstance(doc, ListItem): return dump_contents(doc)[1] - if isinstance(doc, TableRow): - return dump_contents(doc)[1] print("Unhandled type: " + str(doc)) raise NotImplementedError("Unhandled type: " + str(doc)) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..1c51c66 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +# No external requirements at this point diff --git a/tests/test_org.py b/tests/test_org.py index a1fdff1..59441d5 100644 --- a/tests/test_org.py +++ b/tests/test_org.py @@ -1024,6 +1024,171 @@ class TestSerde(unittest.TestCase): self.assertEqual(dumps(doc), "* TODO First entry") + def test_refile_headline_down_to_bottom(self) -> None: + orig = """* Source Headline +** Child of Source +* Destination Headline +** Existing Child""" + doc = loads(orig) + + source_headline = doc.headlines[0] + destination_headline = doc.headlines[1] + + result = source_headline.refile(destination_headline) + + assert result == destination_headline + assert source_headline.parent == destination_headline + assert source_headline in destination_headline.children + assert destination_headline.children[-1] == source_headline + assert ( + dumps(doc) + == """* Destination Headline +** Existing Child +** Source Headline +*** Child of Source""" + ) + + def test_refile_headline_down_to_top(self) -> None: + orig = """* Source Headline +** Child of Source +* Destination Headline +** Existing Child""" + doc = loads(orig) + + source_headline = doc.headlines[0] + destination_headline = doc.headlines[1] + + result = source_headline.refile(destination_headline, top=True) + + assert result == destination_headline + assert source_headline.parent == destination_headline + assert source_headline in destination_headline.children + assert destination_headline.children[0] == source_headline + assert ( + dumps(doc) + == """* Destination Headline +** Source Headline +*** Child of Source +** Existing Child""" + ) + + def test_refile_headline_down_to_existing_child(self) -> None: + orig = """* Source Headline +** Child of Source +* Destination Parent +** Destination Headline""" + doc = loads(orig) + + source_headline = doc.headlines[0] + destination_headline = doc.headlines[1] + destination_child = destination_headline.children[0] + + result = source_headline.refile(destination_child) + + assert result == destination_child + assert source_headline.parent == destination_child + assert source_headline in destination_child.children + assert destination_child.children[-1] == source_headline + assert ( + dumps(doc) + == """* Destination Parent +** Destination Headline +*** Source Headline +**** Child of Source""" + ) + + def test_refile_headline_from_child_to_parent_bottom(self) -> None: + orig = """* Destination Headline +** Existing Child +*** Source Headline +**** Source Child""" + doc = loads(orig) + source_headline = doc.headlines[0].children[0].children[0] + destination_headline = doc.headlines[0] + + result = source_headline.refile(destination_headline) + + assert result == destination_headline + assert source_headline.parent == destination_headline + assert source_headline in destination_headline.children + assert destination_headline.children[-1] == source_headline + assert ( + dumps(doc) + == """* Destination Headline +** Existing Child +** Source Headline +*** Source Child""" + ) + + def test_refile_headline_from_child_to_parent_top(self) -> None: + orig = """* Destination Headline +** Existing Child +*** Source Headline +**** Source Child""" + doc = loads(orig) + source_headline = doc.headlines[0].children[0].children[0] + destination_headline = doc.headlines[0] + + result = source_headline.refile(destination_headline, top=True) + + assert result == destination_headline + assert source_headline.parent == destination_headline + assert source_headline in destination_headline.children + assert destination_headline.children[0] == source_headline + assert ( + dumps(doc) + == """* Destination Headline +** Source Headline +*** Source Child +** Existing Child""" + ) + + def test_refile_headline_from_child_to_first_level_at_bottom(self) -> None: + orig = """* Destination Headline +** Existing Child +*** Source Headline +**** Source Child""" + doc = loads(orig) + source_headline = doc.headlines[0].children[0].children[0] + destination_headline = doc.headlines[0].parent + + result = source_headline.refile(destination_headline) + + assert result == destination_headline + assert source_headline.parent == destination_headline + assert source_headline in destination_headline.children + assert destination_headline.children[-1] == source_headline + assert ( + dumps(doc) + == """* Destination Headline +** Existing Child +* Source Headline +** Source Child""" + ) + + def test_refile_headline_from_child_to_first_level_at_top(self) -> None: + orig = """* Destination Headline +** Existing Child +*** Source Headline +**** Source Child""" + doc = loads(orig) + source_headline = doc.headlines[0].children[0].children[0] + destination_headline = doc.headlines[0].parent + + result = source_headline.refile(destination_headline, top=True) + + assert result == destination_headline + assert source_headline.parent == destination_headline + assert source_headline in destination_headline.children + assert destination_headline.children[0] == source_headline + assert ( + dumps(doc) + == """* Source Headline +** Source Child +* Destination Headline +** Existing Child""" + ) + def print_tree(tree, indentation=0, headline=None): for element in tree: