diff --git a/.gitea/workflows/tests.yaml b/.gitea/workflows/tests.yaml index a3adf0a..ee49a29 100644 --- a/.gitea/workflows/tests.yaml +++ b/.gitea/workflows/tests.yaml @@ -9,8 +9,8 @@ jobs: - name: Check out repository code uses: actions/checkout@v3 - run: apt-get update && apt-get install -y python3-pip - - run: pip install --break-system-package -e . - - run: pip install --break-system-package pytest + - run: pip install -e . + - run: pip install pytest - run: pytest mypy: @@ -19,35 +19,15 @@ jobs: - name: Check out repository code uses: actions/checkout@v3 - run: apt-get update && apt-get install -y python3-pip - - run: pip install --break-system-package -e . - - run: pip install --break-system-package mypy + - run: pip install -e . + - run: pip install mypy - run: mypy org_rw --check-untyped-defs - style-formatting: - runs-on: ubuntu-latest - steps: - - name: Check out repository code - uses: actions/checkout@v3 - - run: apt-get update && apt-get install -y python3-pip - - run: pip install --break-system-package -e . - - run: pip install --break-system-package black - - run: black --check . - - style-sorted-imports: - runs-on: ubuntu-latest - steps: - - name: Check out repository code - uses: actions/checkout@v3 - - run: apt-get update && apt-get install -y python3-pip - - run: pip install --break-system-package -e . - - run: pip install --break-system-package isort - - run: isort --profile black --check . - stability-extra-test: runs-on: ubuntu-latest steps: - name: Check out repository code uses: actions/checkout@v3 - run: apt-get update && apt-get install -y git-core python3-pip - - run: pip install --break-system-package -e . + - run: pip install -e . - run: bash extra-tests/check_all.sh diff --git a/README.org b/README.org index 6f03720..95ec98a 100644 --- a/README.org +++ b/README.org @@ -7,12 +7,6 @@ A python library to parse, modify and save Org-mode files. - Modify these data and write it back to disk. - Keep the original structure intact (indentation, spaces, format, ...). -** Principles -- Avoid any dependency outside of Python's standard library. -- Don't do anything outside of the scope of parsing/re-serializing Org-mode files. -- *Modification of the original text if there's no change is considered a bug (see [[id:7363ba38-1662-4d3c-9e83-0999824975b7][Known issues]]).* -- Data structures should be exposed as it's read on Emacs's org-mode or when in doubt as raw as possible. -- Data in the objects should be modificable as a way to update the document itself. *Consider this a Object-oriented design.* ** Safety mechanism As this library is still in early development. Running it over files might produce unexpected changes on them. For this reason it's heavily recommended to @@ -27,9 +21,6 @@ Also, see [[id:76e77f7f-c9e0-4c83-ad2f-39a5a8894a83][Known issues:Structure modi not properly stored and can trigger this safety mechanism on a false-positive. * Known issues -:PROPERTIES: -:ID: 7363ba38-1662-4d3c-9e83-0999824975b7 -:END: ** Structure modifications :PROPERTIES: :ID: 76e77f7f-c9e0-4c83-ad2f-39a5a8894a83 diff --git a/org_rw/dom.py b/org_rw/dom.py index baf0092..cd8d63b 100644 --- a/org_rw/dom.py +++ b/org_rw/dom.py @@ -24,14 +24,6 @@ class ResultsDrawerNode(DrawerNode): return "".format(len(self.children)) -class GenericDrawerNode(DrawerNode): - def __init__(self, drawer_name): - self.drawer_name = drawer_name - - def __repr__(self): - return "".format(self.drawer_name, len(self.children)) - - class PropertyNode: def __init__(self, key, value): self.key = key @@ -49,12 +41,11 @@ class ListGroupNode: self.children.append(child) def get_raw(self): - return "\n".join([c.get_raw() for c in self.children]) + return '\n'.join([c.get_raw() for c in self.children]) def __repr__(self): return "".format(len(self.children)) - class TableNode: def __init__(self): self.children = [] @@ -65,30 +56,21 @@ class TableNode: def __repr__(self): return "".format(len(self.children)) - class TableSeparatorRow: def __init__(self, orig=None): self.orig = orig - def get_raw(self): - return get_raw_contents(self.orig) - - class TableRow: def __init__(self, cells, orig=None): self.cells = cells self.orig = orig - def get_raw(self): - return get_raw_contents(self.orig) - - class Text: def __init__(self, content): self.content = content def get_raw(self): - return "".join(self.content.get_raw()) + return ''.join(self.content.get_raw()) class ListItem: @@ -123,24 +105,21 @@ class CodeBlock(BlockNode): def __repr__(self): return "".format(len(self.lines or [])) +DomNode = Union[DrawerNode, + PropertyNode, + ListGroupNode, + TableNode, + TableSeparatorRow, + TableRow, + Text, + ListItem, + BlockNode, + ] -DomNode = Union[ - DrawerNode, - PropertyNode, - ListGroupNode, - TableNode, - TableSeparatorRow, - TableRow, - Text, - ListItem, - BlockNode, -] - -ContainerDomNode = Union[ - DrawerNode, - ListGroupNode, - TableNode, - BlockNode, -] +ContainerDomNode = Union[DrawerNode, + ListGroupNode, + TableNode, + BlockNode, + ] from .utils import get_raw_contents diff --git a/org_rw/org_rw.py b/org_rw/org_rw.py index 6baadd1..9b25ed9 100644 --- a/org_rw/org_rw.py +++ b/org_rw/org_rw.py @@ -1,5 +1,6 @@ from __future__ import annotations - +from typing import Optional +from datetime import timedelta import collections import difflib import logging @@ -8,21 +9,12 @@ import re import sys from datetime import date, datetime, timedelta from enum import Enum -from typing import ( - Dict, - Iterator, - List, - Literal, - Optional, - TextIO, - Tuple, - TypedDict, - Union, - cast, -) +from typing import cast, Iterator, List, Literal, Optional, Tuple, TypedDict, Union + +from .types import HeadlineDict from . import dom -from .types import HeadlineDict + DEBUG_DIFF_CONTEXT = 10 @@ -31,9 +23,7 @@ DEFAULT_DONE_KEYWORDS = ["DONE"] BASE_ENVIRONMENT = { "org-footnote-section": "Footnotes", - "org-todo-keywords": " ".join(DEFAULT_TODO_KEYWORDS) - + " | " - + " ".join(DEFAULT_DONE_KEYWORDS), + "org-todo-keywords": ' '.join(DEFAULT_TODO_KEYWORDS) + ' | ' + ' '.join(DEFAULT_DONE_KEYWORDS), "org-options-keywords": ( "ARCHIVE:", "AUTHOR:", @@ -103,7 +93,7 @@ PLANNING_RE = re.compile( r")+\s*" ) LIST_ITEM_RE = re.compile( - r"(?P\s*)((?P[*\-+])|((?P\d|[a-zA-Z])(?P[.)]))) ((?P\s*)\[(?P[ Xx])\])?((?P\s*)((?P.*?)\s::))?(?P.*)" + r"(?P\s*)((?P[*\-+])|((?P\d|[a-zA-Z])(?P[.)]))) ((?P\s*)\[(?P[ Xx])\])?((?P\s*)(?P.*?)::)?(?P.*)" ) IMPLICIT_LINK_RE = re.compile(r"(https?:[^<> ]*[a-zA-Z0-9])") @@ -113,7 +103,7 @@ BEGIN_BLOCK_RE = re.compile(r"^\s*#\+BEGIN_(?P[^ ]+)(?P.*)$" END_BLOCK_RE = re.compile(r"^\s*#\+END_(?P[^ ]+)\s*$", re.I) RESULTS_DRAWER_RE = re.compile(r"^\s*:results:\s*$", re.I) CodeSnippet = collections.namedtuple( - "CodeSnippet", ("name", "content", "result", "language", "arguments") + "CodeSnippet", ("name", "content", "result", "arguments") ) # Groupings @@ -122,17 +112,14 @@ NON_FINISHED_GROUPS = ( dom.ListGroupNode, dom.ResultsDrawerNode, dom.PropertyDrawerNode, - dom.GenericDrawerNode, ) FREE_GROUPS = (dom.CodeBlock,) - # States class HeadlineState(TypedDict): # To be extended to handle keyboard shortcuts name: str - class OrgDocDeclaredStates(TypedDict): not_completed: List[HeadlineState] completed: List[HeadlineState] @@ -338,7 +325,7 @@ class Headline: self.priority = priority self.title_start = title_start self.title = parse_content_block([RawLine(linenum=start_line, line=title)]) - self._state = state + self.state = state self.tags_start = tags_start self.shallow_tags = tags self.contents = contents @@ -415,7 +402,6 @@ class Headline: if ( isinstance(line, DelimiterLine) and line.delimiter_type == DelimiterLineType.END_BLOCK - and line.type_data.subtype == current_node.header.type_data.subtype ): start = current_node.header.linenum @@ -638,13 +624,6 @@ class Headline: assert current_node is None current_node = dom.ResultsDrawerNode() - # TODO: Allow indentation of these blocks inside others - indentation_tree = [current_node] - tree.append(current_node) - elif content.strip().startswith(":") and content.strip().endswith(":"): - assert current_node is None - current_node = dom.GenericDrawerNode(content.strip().strip(":")) - # TODO: Allow indentation of these blocks inside others indentation_tree = [current_node] tree.append(current_node) @@ -735,42 +714,6 @@ class Headline: def id(self, value): self.set_property("ID", value) - @property - def state(self) -> HeadlineState: - return self._state - - @state.setter - def state(self, new_state: Union[None, str, HeadlineState]) -> None: - """ - Update the state of a Headline. If the state is a known one it will update it's TODO/DONE properties. - - Args: - new_state (str|HeadlineState): New state, either it's literal value or it's structure. - """ - if new_state is None: - self.is_todo = False - self.is_done = False - # TODO: Check & log if appropriate? - self._state = None - return - - if isinstance(new_state, str): - new_state = HeadlineState(name=new_state) - - state_name = new_state["name"] - if state_name in [kw["name"] for kw in self.doc.todo_keywords]: - self.is_todo = True - self.is_done = False - # TODO: Check & log if appropriate? - elif state_name in [kw["name"] for kw in self.doc.done_keywords]: - self.is_todo = False - self.is_done = True - # TODO: Check, log & if appropriate? - else: - # TODO: Should we raise a warning, raise an exception, update the is_todo/is_done? - pass - self._state = new_state - @property def clock(self): times = [] @@ -797,20 +740,11 @@ class Headline: return times @property - def tags(self) -> list[str]: - parent_tags = self.parent.tags - if self.doc.environment.get("org-use-tag-inheritance"): - accepted_tags = [] - for tag in self.doc.environment.get("org-use-tag-inheritance"): - if tag in parent_tags: - accepted_tags.append(tag) - parent_tags = accepted_tags - - elif self.doc.environment.get("org-tags-exclude-from-inheritance"): - for tag in self.doc.environment.get("org-tags-exclude-from-inheritance"): - if tag in parent_tags: - parent_tags.remove(tag) - return list(self.shallow_tags) + parent_tags + def tags(self): + if isinstance(self.parent, OrgDoc): + return list(self.shallow_tags) + else: + return list(self.shallow_tags) + self.parent.tags def add_tag(self, tag: str): self.shallow_tags.append(tag) @@ -873,24 +807,9 @@ class Headline: yield from get_links_from_content(item.content) def get_lines_between(self, start, end): - # @TODO: Generalize for other line types too. - everything = ( - [] - # + self.keywords - + self.contents - # + self.list_items - # + self.table_rows - # + self.properties - # + self.structural - + self.delimiters - ) - - for line in everything: + for line in self.contents: if start <= line.linenum < end: - if "get_raw" in dir(line): - yield "".join(line.get_raw()) - else: - yield line.line + yield "".join(line.get_raw()) def get_contents(self, format): if format == "raw": @@ -942,12 +861,6 @@ class Headline: sections = [] arguments = None - names_by_line = {} - for kw in self.keywords: - if kw.key == "NAME": - names_by_line[kw.linenum] = kw.value - - name = None for delimiter in self.delimiters: if ( delimiter.delimiter_type == DelimiterLineType.BEGIN_BLOCK @@ -956,12 +869,6 @@ class Headline: line_start = delimiter.linenum inside_code = True arguments = delimiter.arguments - - name_line = line_start - 1 - if name_line in names_by_line: - name = names_by_line[name_line] - else: - name = None elif ( delimiter.delimiter_type == DelimiterLineType.END_BLOCK and delimiter.type_data.subtype.lower() == "src" @@ -976,26 +883,14 @@ class Headline: # the content parsing must be re-thinked contents = contents[:-1] - language = None - if arguments is not None: - arguments = arguments.strip() - if " " in arguments: - language = arguments[: arguments.index(" ")] - arguments = arguments[arguments.index(" ") + 1 :] - else: - language = arguments - arguments = None sections.append( { "line_first": start + 1, "line_last": end - 1, "content": contents, "arguments": arguments, - "language": language, - "name": name, } ) - name = None arguments = None line_start = None @@ -1044,18 +939,13 @@ class Headline: results = [] for section in sections: + name = None content = section["content"] code_result = section.get("result", None) arguments = section.get("arguments", None) - language = section.get("language", None) - name = section.get("name", None) results.append( CodeSnippet( - content=content, - result=code_result, - arguments=arguments, - language=language, - name=name, + name=name, content=content, result=code_result, arguments=arguments ) ) @@ -1204,9 +1094,7 @@ class Timestamp: datetime: The corresponding datetime object. """ if self.hour is not None: - return datetime( - self.year, self.month, self.day, self.hour, self.minute or 0 - ) + return datetime(self.year, self.month, self.day, self.hour, self.minute or 0) else: return datetime(self.year, self.month, self.day, 0, 0) @@ -1605,6 +1493,7 @@ class OrgTime: """ return self.time.active + @active.setter def active(self, value: bool) -> None: """ @@ -1779,7 +1668,7 @@ class Text: def __repr__(self): return "{{Text line: {}; content: {} }}".format(self.linenum, self.contents) - def get_text(self) -> str: + def get_text(self): return token_list_to_plaintext(self.contents) def get_raw(self): @@ -2009,12 +1898,7 @@ def tokenize_contents(contents: str) -> List[TokenItems]: continue # Possible link close or open of description - if ( - char == "]" - and len(contents) > i + 1 - and in_link - and contents[i + 1] in "][" - ): + if char == "]" and len(contents) > i + 1 and in_link: if contents[i + 1] == "]": cut_string() @@ -2065,7 +1949,6 @@ def tokenize_contents(contents: str) -> List[TokenItems]: cut_string() tokens.append((TOKEN_TYPE_CLOSE_MARKER, char)) has_changed = True - closes.remove(i) if not has_changed: text.append(char) @@ -2108,7 +1991,7 @@ def parse_contents(raw_contents: List[RawLine]): return [parse_content_block(block) for block in blocks] -def parse_content_block(raw_contents: Union[List[RawLine], str]) -> Text: +def parse_content_block(raw_contents: Union[List[RawLine], str]): contents_buff = [] if isinstance(raw_contents, str): contents_buff.append(raw_contents) @@ -2156,7 +2039,7 @@ def dump_contents(raw): content = "\n".join(content_lines) checkbox = f"[{raw.checkbox_value}]" if raw.checkbox_value else "" tag = ( - f"{raw.tag_indentation}{token_list_to_raw(raw.tag or '')} ::" + f"{raw.tag_indentation}{token_list_to_raw(raw.tag or '')}::" if raw.tag or raw.tag_indentation else "" ) @@ -2194,16 +2077,16 @@ def parse_headline(hl, doc, parent) -> Headline: title = line is_done = is_todo = False for state in doc.todo_keywords or []: - if title.startswith(state["name"] + " "): + if title.startswith(state['name'] + " "): hl_state = state - title = title[len(state["name"] + " ") :] + title = title[len(state['name'] + " ") :] is_todo = True break else: for state in doc.done_keywords or []: - if title.startswith(state["name"] + " "): + if title.startswith(state['name'] + " "): hl_state = state - title = title[len(state["name"] + " ") :] + title = title[len(state['name'] + " ") :] is_done = True break @@ -2302,7 +2185,7 @@ def dump_delimiters(line: DelimiterLine): def parse_todo_done_keywords(line: str) -> OrgDocDeclaredStates: clean_line = re.sub(r"\([^)]+\)", "", line) - if "|" in clean_line: + if '|' in clean_line: todo_kws, done_kws = clean_line.split("|", 1) has_split = True else: @@ -2317,51 +2200,42 @@ def parse_todo_done_keywords(line: str) -> OrgDocDeclaredStates: todo_keywords = todo_keywords[:-1] return { - "not_completed": [HeadlineState(name=keyword) for keyword in todo_keywords], - "completed": [HeadlineState(name=keyword) for keyword in done_keywords], + "not_completed": [ + HeadlineState(name=keyword) + for keyword in todo_keywords + ], + "completed": [ + HeadlineState(name=keyword) + for keyword in done_keywords + ], } class OrgDoc: def __init__( - self, - headlines, - keywords, - contents, - list_items, - structural, - properties, - delimiters, + self, headlines, keywords, contents, list_items, structural, properties, environment=BASE_ENVIRONMENT, ): self.todo_keywords = [HeadlineState(name=kw) for kw in DEFAULT_TODO_KEYWORDS] self.done_keywords = [HeadlineState(name=kw) for kw in DEFAULT_DONE_KEYWORDS] - self.environment = environment keywords_set_in_file = False for keyword in keywords: if keyword.key in ("TODO", "SEQ_TODO"): states = parse_todo_done_keywords(keyword.value) - self.todo_keywords, self.done_keywords = ( - states["not_completed"], - states["completed"], - ) + self.todo_keywords, self.done_keywords = states['not_completed'], states['completed'] keywords_set_in_file = True - if not keywords_set_in_file and "org-todo-keywords" in environment: + if not keywords_set_in_file and 'org-todo-keywords' in environment: # Read keywords from environment - states = parse_todo_done_keywords(environment["org-todo-keywords"]) - self.todo_keywords, self.done_keywords = ( - states["not_completed"], - states["completed"], - ) + states = parse_todo_done_keywords(environment['org-todo-keywords']) + self.todo_keywords, self.done_keywords = states['not_completed'], states['completed'] self.keywords: List[Property] = keywords self.contents: List[RawLine] = contents self.list_items: List[ListItem] = list_items self.structural: List = structural self.properties: List = properties - self.delimiters: List = delimiters self._path = None self.headlines: List[Headline] = list( map(lambda hl: parse_headline(hl, self, self), headlines) @@ -2381,17 +2255,6 @@ class OrgDoc: def path(self): return self._path - @property - def tags(self) -> list[str]: - for kw in self.keywords: - if kw.key == "FILETAGS": - return kw.value.strip(":").split(":") - return [] - - @property - def shallow_tags(self) -> list[str]: - return self.tags - ## Querying def get_links(self): for headline in self.headlines: @@ -2429,7 +2292,7 @@ class OrgDoc: yield hl def get_code_snippets(self): - for headline in self.getAllHeadlines(): + for headline in self.headlines: yield from headline.get_code_snippets() # Writing @@ -2440,8 +2303,8 @@ class OrgDoc: tags = ":" + ":".join(headline.shallow_tags) + ":" state = "" - if headline._state: - state = headline._state["name"] + " " + if headline.state: + state = headline.state['name'] + " " raw_title = token_list_to_raw(headline.title.contents) tags_padding = "" @@ -2526,9 +2389,6 @@ class OrgDoc: for struct in self.structural: lines.append(dump_structural(struct)) - for content in self.delimiters: - lines.append(dump_delimiters(content)) - for kw in self.keywords: lines.append(dump_kw(kw)) @@ -2558,7 +2418,7 @@ class OrgDocReader: self.current_drawer: Optional[List] = None self.environment = environment - def finalize(self) -> OrgDoc: + def finalize(self): return OrgDoc( self.headlines, self.keywords, @@ -2566,7 +2426,6 @@ class OrgDocReader: self.list_items, self.structural, self.properties, - self.delimiters, self.environment, ) @@ -2865,26 +2724,7 @@ class OrgDocReader: raise -def loads( - s: str, environment: Optional[Dict] = BASE_ENVIRONMENT, extra_cautious: bool = True -) -> OrgDoc: - """ - Load an Org-mode document from a string. - - Args: - s (str): The string representation of the Org-mode document. - environment (Optional[dict]): The environment for parsing. Defaults to - `BASE_ENVIRONMENT`. - extra_cautious (bool): If True, perform an extra check to ensure that - the document can be re-serialized to the original string. Defaults to True. - - Returns: - OrgDoc: The loaded Org-mode document. - - Raises: - NonReproducibleDocument: If `extra_cautious` is True and there is a - difference between the original string and the re-serialized document. - """ +def loads(s, environment=BASE_ENVIRONMENT, extra_cautious=True): reader = OrgDocReader(environment) reader.read(s) doc = reader.finalize() @@ -2924,55 +2764,20 @@ def loads( return doc -def load( - f: TextIO, - environment: Optional[dict] = BASE_ENVIRONMENT, - extra_cautious: bool = False, -) -> OrgDoc: - """ - Load an Org-mode document from a file object. - - Args: - f (TextIO): The file object containing the Org-mode document. - environment (Optional[dict]): The environment for parsing. Defaults to - `BASE_ENVIRONMENT`. - extra_cautious (bool): If True, perform an extra check to ensure that - the document can be re-serialized to the original string. Defaults to False. - - Returns: - OrgDoc: The loaded Org-mode document. - """ +def load(f, environment=BASE_ENVIRONMENT, extra_cautious=False): doc = loads(f.read(), environment, extra_cautious) doc._path = os.path.abspath(f.name) return doc -def dumps(doc: OrgDoc) -> str: - """ - Serialize an OrgDoc object to a string. - - Args: - doc (OrgDoc): The OrgDoc object to serialize. - - Returns: - str: The serialized string representation of the OrgDoc object. - """ +def dumps(doc): dump = list(doc.dump()) result = "\n".join(dump) + # print(result) return result -def dump(doc: OrgDoc, fp: TextIO) -> None: - """ - Serialize an OrgDoc object to a file. - - Args: - doc (OrgDoc): The OrgDoc object to serialize. - fp (TextIO): The file-like object to write the serialized data to. - - Returns: - None - """ +def dump(doc, fp): it = doc.dump() # Write first line separately diff --git a/org_rw/py.typed b/org_rw/py.typed deleted file mode 100644 index e69de29..0000000 diff --git a/org_rw/utils.py b/org_rw/utils.py index 87f6712..0e6f559 100644 --- a/org_rw/utils.py +++ b/org_rw/utils.py @@ -1,20 +1,9 @@ import uuid -from .org_rw import ( - Bold, - Code, - Headline, - Italic, - Line, - ListItem, - RawLine, - Strike, - TableRow, - Text, - Underlined, - Verbatim, - dump_contents, -) +from .org_rw import (Bold, Code, Headline, Italic, Line, RawLine, ListItem, Strike, Text, + Underlined, Verbatim) + +from .org_rw import dump_contents def get_hl_raw_contents(doc: Headline) -> str: @@ -51,8 +40,6 @@ def get_raw_contents(doc) -> str: return doc.get_raw() if isinstance(doc, ListItem): return dump_contents(doc)[1] - if isinstance(doc, TableRow): - return dump_contents(doc)[1] print("Unhandled type: " + str(doc)) raise NotImplementedError("Unhandled type: " + str(doc)) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..1c51c66 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +# No external requirements at this point diff --git a/scripts/apply-formatting.sh b/scripts/apply-formatting.sh deleted file mode 100755 index 2f7486b..0000000 --- a/scripts/apply-formatting.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh - -set -eu - -cd "`dirname $0`" -cd .. - -set -x - -isort --profile black . -black . diff --git a/tests/04-code.org b/tests/04-code.org index 7af3aed..956d961 100644 --- a/tests/04-code.org +++ b/tests/04-code.org @@ -9,7 +9,6 @@ :CREATED: [2020-01-01 Wed 01:01] :END: -#+NAME: first-code-name #+BEGIN_SRC shell :results verbatim echo "This is a test" echo "with two lines" diff --git a/tests/13-tags.org b/tests/13-tags.org deleted file mode 100644 index c61ccdf..0000000 --- a/tests/13-tags.org +++ /dev/null @@ -1,13 +0,0 @@ -#+TITLE: 13-Tags -#+DESCRIPTION: Simple org file to test tags -#+FILETAGS: :filetag: - -* Level 1 :h1tag: - :PROPERTIES: - :ID: 13-tags - :CREATED: [2020-01-01 Wed 01:01] - :END: - -** Level2 :h2tag: -* Level 1-1 :otherh1tag: -** Level2 :otherh2tag: diff --git a/tests/test_org.py b/tests/test_org.py index a1fdff1..e49c6cf 100644 --- a/tests/test_org.py +++ b/tests/test_org.py @@ -2,6 +2,9 @@ import os import unittest from datetime import datetime as DT +from org_rw import MarkerToken, MarkerType, Timestamp, dumps, load, loads, dom +import org_rw + from utils.assertions import ( BOLD, CODE, @@ -16,9 +19,6 @@ from utils.assertions import ( Tokens, ) -import org_rw -from org_rw import MarkerToken, MarkerType, Timestamp, dom, dumps, load, loads - DIR = os.path.dirname(os.path.abspath(__file__)) @@ -480,22 +480,20 @@ class TestSerde(unittest.TestCase): snippets = list(doc.get_code_snippets()) self.assertEqual(len(snippets), 3) - self.assertEqual(snippets[0].name, "first-code-name") - self.assertEqual(snippets[0].language, "shell") self.assertEqual( snippets[0].content, 'echo "This is a test"\n' + 'echo "with two lines"\n' + "exit 0 # Exit successfully", ) - self.assertEqual(snippets[0].arguments.split(), [":results", "verbatim"]) + self.assertEqual( + snippets[0].arguments.split(), ["shell", ":results", "verbatim"] + ) self.assertEqual( snippets[0].result, "This is a test\n" + "with two lines", ) - self.assertEqual(snippets[1].name, None) - self.assertEqual(snippets[1].language, "shell") self.assertEqual( snippets[1].content, 'echo "This is another test"\n' @@ -506,8 +504,6 @@ class TestSerde(unittest.TestCase): snippets[1].result, "This is another test\n" + "with two lines too" ) - self.assertEqual(snippets[2].name, None) - self.assertEqual(snippets[2].language, "c") self.assertEqual( snippets[2].content, "/* This code has to be escaped to\n" @@ -838,12 +834,12 @@ class TestSerde(unittest.TestCase): self.assertEqual(dumps(doc), orig) def test_add_todo_keywords_programatically(self): - orig = """* NEW_TODO_STATE First entry + orig = '''* NEW_TODO_STATE First entry -* NEW_DONE_STATE Second entry""" - doc = loads( - orig, environment={"org-todo-keywords": "NEW_TODO_STATE | NEW_DONE_STATE"} - ) +* NEW_DONE_STATE Second entry''' + doc = loads(orig, environment={ + 'org-todo-keywords': "NEW_TODO_STATE | NEW_DONE_STATE" + }) self.assertEqual(doc.headlines[0].is_todo, True) self.assertEqual(doc.headlines[0].is_done, False) @@ -853,14 +849,14 @@ class TestSerde(unittest.TestCase): self.assertEqual(dumps(doc), orig) def test_add_todo_keywords_in_file(self): - orig = """#+TODO: NEW_TODO_STATE | NEW_DONE_STATE + orig = '''#+TODO: NEW_TODO_STATE | NEW_DONE_STATE * NEW_TODO_STATE First entry -* NEW_DONE_STATE Second entry""" - doc = loads( - orig, environment={"org-todo-keywords": "NEW_TODO_STATE | NEW_DONE_STATE"} - ) +* NEW_DONE_STATE Second entry''' + doc = loads(orig, environment={ + 'org-todo-keywords': "NEW_TODO_STATE | NEW_DONE_STATE" + }) self.assertEqual(doc.headlines[0].is_todo, True) self.assertEqual(doc.headlines[0].is_done, False) @@ -869,161 +865,6 @@ class TestSerde(unittest.TestCase): self.assertEqual(dumps(doc), orig) - def test_mimic_write_file_13(self): - with open(os.path.join(DIR, "13-tags.org")) as f: - orig = f.read() - doc = loads(orig) - - self.assertEqual(dumps(doc), orig) - - def test_tag_property_read_13(self): - with open(os.path.join(DIR, "13-tags.org")) as f: - orig = f.read() - doc = loads(orig) - - self.assertEqual(doc.tags, ["filetag"]) - - h1_1, h1_2 = doc.getTopHeadlines() - self.assertEqual(sorted(h1_1.tags), ["filetag", "h1tag"]) - self.assertEqual(sorted(h1_2.tags), ["filetag", "otherh1tag"]) - - h1_1_h2 = h1_1.children[0] - self.assertEqual(sorted(h1_1_h2.tags), ["filetag", "h1tag", "h2tag"]) - - h1_2_h2 = h1_2.children[0] - self.assertEqual(sorted(h1_2_h2.tags), ["filetag", "otherh1tag", "otherh2tag"]) - - def test_shallow_tag_property_read_13(self): - with open(os.path.join(DIR, "13-tags.org")) as f: - orig = f.read() - doc = loads(orig) - - self.assertEqual(doc.shallow_tags, ["filetag"]) - - h1_1, h1_2 = doc.getTopHeadlines() - self.assertEqual(sorted(h1_1.shallow_tags), ["h1tag"]) - self.assertEqual(sorted(h1_2.shallow_tags), ["otherh1tag"]) - - h1_1_h2 = h1_1.children[0] - self.assertEqual(sorted(h1_1_h2.shallow_tags), ["h2tag"]) - - h1_2_h2 = h1_2.children[0] - self.assertEqual(sorted(h1_2_h2.shallow_tags), ["otherh2tag"]) - - def test_exclude_tags_from_inheritance_property_read_13(self): - with open(os.path.join(DIR, "13-tags.org")) as f: - orig = f.read() - doc = loads( - orig, - { - "org-tags-exclude-from-inheritance": ("h1tag", "otherh2tag"), - }, - ) - - self.assertEqual(doc.tags, ["filetag"]) - - h1_1, h1_2 = doc.getTopHeadlines() - self.assertEqual(sorted(h1_1.tags), ["filetag", "h1tag"]) - self.assertEqual(sorted(h1_2.tags), ["filetag", "otherh1tag"]) - - h1_1_h2 = h1_1.children[0] - self.assertEqual(sorted(h1_1_h2.tags), ["filetag", "h2tag"]) - - h1_2_h2 = h1_2.children[0] - self.assertEqual(sorted(h1_2_h2.tags), ["filetag", "otherh1tag", "otherh2tag"]) - - def test_select_tags_to_inheritance_property_read_13(self): - with open(os.path.join(DIR, "13-tags.org")) as f: - orig = f.read() - doc = loads( - orig, - { - "org-tags-exclude-from-inheritance": ("h1tag", "otherh2tag"), - "org-use-tag-inheritance": ("h1tag",), - }, - ) - - self.assertEqual(doc.tags, ["filetag"]) - - h1_1, h1_2 = doc.getTopHeadlines() - self.assertEqual(sorted(h1_1.tags), ["h1tag"]) - self.assertEqual(sorted(h1_2.tags), ["otherh1tag"]) - - h1_1_h2 = h1_1.children[0] - self.assertEqual(sorted(h1_1_h2.tags), ["h1tag", "h2tag"]) - - h1_2_h2 = h1_2.children[0] - self.assertEqual(sorted(h1_2_h2.tags), ["otherh2tag"]) - - def test_update_headline_from_none_to_todo(self): - orig = "* First entry" - doc = loads(orig) - self.assertEqual(doc.headlines[0].is_todo, False) - self.assertEqual(doc.headlines[0].is_done, False) - self.assertEqual(doc.headlines[0].state, None) - - doc.headlines[0].state = "TODO" - self.assertEqual(doc.headlines[0].is_todo, True) - self.assertEqual(doc.headlines[0].is_done, False) - self.assertEqual(doc.headlines[0].state["name"], "TODO") - - self.assertEqual(dumps(doc), "* TODO First entry") - - def test_update_headline_from_none_to_done(self): - orig = "* First entry" - doc = loads(orig) - self.assertEqual(doc.headlines[0].is_todo, False) - self.assertEqual(doc.headlines[0].is_done, False) - self.assertEqual(doc.headlines[0].state, None) - - doc.headlines[0].state = org_rw.HeadlineState(name="DONE") - self.assertEqual(doc.headlines[0].is_todo, False) - self.assertEqual(doc.headlines[0].is_done, True) - self.assertEqual(doc.headlines[0].state["name"], "DONE") - - self.assertEqual(dumps(doc), "* DONE First entry") - - def test_update_headline_from_todo_to_none(self): - orig = "* TODO First entry" - doc = loads(orig) - self.assertEqual(doc.headlines[0].is_todo, True) - self.assertEqual(doc.headlines[0].is_done, False) - self.assertEqual(doc.headlines[0].state["name"], "TODO") - - doc.headlines[0].state = None - self.assertEqual(doc.headlines[0].is_todo, False) - self.assertEqual(doc.headlines[0].is_done, False) - self.assertEqual(doc.headlines[0].state, None) - - self.assertEqual(dumps(doc), "* First entry") - - def test_update_headline_from_todo_to_done(self): - orig = "* TODO First entry" - doc = loads(orig) - self.assertEqual(doc.headlines[0].is_todo, True) - self.assertEqual(doc.headlines[0].is_done, False) - self.assertEqual(doc.headlines[0].state["name"], "TODO") - - doc.headlines[0].state = "DONE" - self.assertEqual(doc.headlines[0].is_todo, False) - self.assertEqual(doc.headlines[0].is_done, True) - self.assertEqual(doc.headlines[0].state["name"], "DONE") - self.assertEqual(dumps(doc), "* DONE First entry") - - def test_update_headline_from_done_to_todo(self): - orig = "* DONE First entry" - doc = loads(orig) - self.assertEqual(doc.headlines[0].is_todo, False) - self.assertEqual(doc.headlines[0].is_done, True) - self.assertEqual(doc.headlines[0].state["name"], "DONE") - - doc.headlines[0].state = org_rw.HeadlineState(name="TODO") - self.assertEqual(doc.headlines[0].is_todo, True) - self.assertEqual(doc.headlines[0].is_done, False) - self.assertEqual(doc.headlines[0].state["name"], "TODO") - - self.assertEqual(dumps(doc), "* TODO First entry") - def print_tree(tree, indentation=0, headline=None): for element in tree: diff --git a/tests/test_timestamp.py b/tests/test_timestamp.py index f7e0eca..7d69d13 100644 --- a/tests/test_timestamp.py +++ b/tests/test_timestamp.py @@ -1,9 +1,7 @@ """Test the Timestamp object.""" -from datetime import date, datetime - import pytest - +from datetime import date, datetime from org_rw import Timestamp diff --git a/tests/utils/assertions.py b/tests/utils/assertions.py index 9012d49..59dc658 100644 --- a/tests/utils/assertions.py +++ b/tests/utils/assertions.py @@ -2,17 +2,8 @@ import collections import unittest from datetime import datetime -from org_rw import ( - Bold, - Code, - Italic, - Line, - Strike, - Text, - Underlined, - Verbatim, - get_raw_contents, -) +from org_rw import (Bold, Code, Italic, Line, Strike, Text, Underlined, + Verbatim, get_raw_contents) def timestamp_to_datetime(ts):