Compare commits
No commits in common. "develop" and "feat/improvements" have entirely different histories.
develop
...
feat/impro
@ -23,26 +23,6 @@ jobs:
|
|||||||
- run: pip install mypy
|
- run: pip install mypy
|
||||||
- run: mypy org_rw --check-untyped-defs
|
- run: mypy org_rw --check-untyped-defs
|
||||||
|
|
||||||
style-formatting:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
- run: apt-get update && apt-get install -y python3-pip
|
|
||||||
- run: pip install -e .
|
|
||||||
- run: pip install black
|
|
||||||
- run: black --check .
|
|
||||||
|
|
||||||
style-sorted-imports:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
- run: apt-get update && apt-get install -y python3-pip
|
|
||||||
- run: pip install -e .
|
|
||||||
- run: pip install isort
|
|
||||||
- run: isort --profile black --check .
|
|
||||||
|
|
||||||
stability-extra-test:
|
stability-extra-test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
@ -41,12 +41,11 @@ class ListGroupNode:
|
|||||||
self.children.append(child)
|
self.children.append(child)
|
||||||
|
|
||||||
def get_raw(self):
|
def get_raw(self):
|
||||||
return "\n".join([c.get_raw() for c in self.children])
|
return '\n'.join([c.get_raw() for c in self.children])
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<List: {}>".format(len(self.children))
|
return "<List: {}>".format(len(self.children))
|
||||||
|
|
||||||
|
|
||||||
class TableNode:
|
class TableNode:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.children = []
|
self.children = []
|
||||||
@ -57,24 +56,21 @@ class TableNode:
|
|||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<Table: {}>".format(len(self.children))
|
return "<Table: {}>".format(len(self.children))
|
||||||
|
|
||||||
|
|
||||||
class TableSeparatorRow:
|
class TableSeparatorRow:
|
||||||
def __init__(self, orig=None):
|
def __init__(self, orig=None):
|
||||||
self.orig = orig
|
self.orig = orig
|
||||||
|
|
||||||
|
|
||||||
class TableRow:
|
class TableRow:
|
||||||
def __init__(self, cells, orig=None):
|
def __init__(self, cells, orig=None):
|
||||||
self.cells = cells
|
self.cells = cells
|
||||||
self.orig = orig
|
self.orig = orig
|
||||||
|
|
||||||
|
|
||||||
class Text:
|
class Text:
|
||||||
def __init__(self, content):
|
def __init__(self, content):
|
||||||
self.content = content
|
self.content = content
|
||||||
|
|
||||||
def get_raw(self):
|
def get_raw(self):
|
||||||
return "".join(self.content.get_raw())
|
return ''.join(self.content.get_raw())
|
||||||
|
|
||||||
|
|
||||||
class ListItem:
|
class ListItem:
|
||||||
@ -109,24 +105,21 @@ class CodeBlock(BlockNode):
|
|||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<Code: {}>".format(len(self.lines or []))
|
return "<Code: {}>".format(len(self.lines or []))
|
||||||
|
|
||||||
|
DomNode = Union[DrawerNode,
|
||||||
|
PropertyNode,
|
||||||
|
ListGroupNode,
|
||||||
|
TableNode,
|
||||||
|
TableSeparatorRow,
|
||||||
|
TableRow,
|
||||||
|
Text,
|
||||||
|
ListItem,
|
||||||
|
BlockNode,
|
||||||
|
]
|
||||||
|
|
||||||
DomNode = Union[
|
ContainerDomNode = Union[DrawerNode,
|
||||||
DrawerNode,
|
ListGroupNode,
|
||||||
PropertyNode,
|
TableNode,
|
||||||
ListGroupNode,
|
BlockNode,
|
||||||
TableNode,
|
]
|
||||||
TableSeparatorRow,
|
|
||||||
TableRow,
|
|
||||||
Text,
|
|
||||||
ListItem,
|
|
||||||
BlockNode,
|
|
||||||
]
|
|
||||||
|
|
||||||
ContainerDomNode = Union[
|
|
||||||
DrawerNode,
|
|
||||||
ListGroupNode,
|
|
||||||
TableNode,
|
|
||||||
BlockNode,
|
|
||||||
]
|
|
||||||
|
|
||||||
from .utils import get_raw_contents
|
from .utils import get_raw_contents
|
||||||
|
338
org_rw/org_rw.py
338
org_rw/org_rw.py
@ -1,5 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
from typing import Optional
|
||||||
|
from datetime import timedelta
|
||||||
import collections
|
import collections
|
||||||
import difflib
|
import difflib
|
||||||
import logging
|
import logging
|
||||||
@ -8,32 +9,17 @@ import re
|
|||||||
import sys
|
import sys
|
||||||
from datetime import date, datetime, timedelta
|
from datetime import date, datetime, timedelta
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import (
|
from typing import cast, Iterator, List, Optional, Tuple, Union
|
||||||
Dict,
|
|
||||||
Iterator,
|
from .types import HeadlineDict
|
||||||
List,
|
|
||||||
Literal,
|
|
||||||
Optional,
|
|
||||||
TextIO,
|
|
||||||
Tuple,
|
|
||||||
TypedDict,
|
|
||||||
Union,
|
|
||||||
cast,
|
|
||||||
)
|
|
||||||
|
|
||||||
from . import dom
|
from . import dom
|
||||||
from .types import HeadlineDict
|
|
||||||
|
|
||||||
DEBUG_DIFF_CONTEXT = 10
|
DEBUG_DIFF_CONTEXT = 10
|
||||||
|
|
||||||
DEFAULT_TODO_KEYWORDS = ["TODO"]
|
|
||||||
DEFAULT_DONE_KEYWORDS = ["DONE"]
|
|
||||||
|
|
||||||
BASE_ENVIRONMENT = {
|
BASE_ENVIRONMENT = {
|
||||||
"org-footnote-section": "Footnotes",
|
"org-footnote-section": "Footnotes",
|
||||||
"org-todo-keywords": " ".join(DEFAULT_TODO_KEYWORDS)
|
|
||||||
+ " | "
|
|
||||||
+ " ".join(DEFAULT_DONE_KEYWORDS),
|
|
||||||
"org-options-keywords": (
|
"org-options-keywords": (
|
||||||
"ARCHIVE:",
|
"ARCHIVE:",
|
||||||
"AUTHOR:",
|
"AUTHOR:",
|
||||||
@ -67,6 +53,9 @@ BASE_ENVIRONMENT = {
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DEFAULT_TODO_KEYWORDS = ["TODO"]
|
||||||
|
DEFAULT_DONE_KEYWORDS = ["DONE"]
|
||||||
|
|
||||||
HEADLINE_TAGS_RE = re.compile(r"((:(\w|[0-9_@#%])+)+:)\s*$")
|
HEADLINE_TAGS_RE = re.compile(r"((:(\w|[0-9_@#%])+)+:)\s*$")
|
||||||
HEADLINE_RE = re.compile(r"^(?P<stars>\*+)(?P<spacing>\s+)(?P<line>.*?)$")
|
HEADLINE_RE = re.compile(r"^(?P<stars>\*+)(?P<spacing>\s+)(?P<line>.*?)$")
|
||||||
KEYWORDS_RE = re.compile(
|
KEYWORDS_RE = re.compile(
|
||||||
@ -103,7 +92,7 @@ PLANNING_RE = re.compile(
|
|||||||
r")+\s*"
|
r")+\s*"
|
||||||
)
|
)
|
||||||
LIST_ITEM_RE = re.compile(
|
LIST_ITEM_RE = re.compile(
|
||||||
r"(?P<indentation>\s*)((?P<bullet>[*\-+])|((?P<counter>\d|[a-zA-Z])(?P<counter_sep>[.)]))) ((?P<checkbox_indentation>\s*)\[(?P<checkbox_value>[ Xx])\])?((?P<tag_indentation>\s*)((?P<tag>.*?)\s::))?(?P<content>.*)"
|
r"(?P<indentation>\s*)((?P<bullet>[*\-+])|((?P<counter>\d|[a-zA-Z])(?P<counter_sep>[.)]))) ((?P<checkbox_indentation>\s*)\[(?P<checkbox_value>[ Xx])\])?((?P<tag_indentation>\s*)(?P<tag>.*?)::)?(?P<content>.*)"
|
||||||
)
|
)
|
||||||
|
|
||||||
IMPLICIT_LINK_RE = re.compile(r"(https?:[^<> ]*[a-zA-Z0-9])")
|
IMPLICIT_LINK_RE = re.compile(r"(https?:[^<> ]*[a-zA-Z0-9])")
|
||||||
@ -113,7 +102,7 @@ BEGIN_BLOCK_RE = re.compile(r"^\s*#\+BEGIN_(?P<subtype>[^ ]+)(?P<arguments>.*)$"
|
|||||||
END_BLOCK_RE = re.compile(r"^\s*#\+END_(?P<subtype>[^ ]+)\s*$", re.I)
|
END_BLOCK_RE = re.compile(r"^\s*#\+END_(?P<subtype>[^ ]+)\s*$", re.I)
|
||||||
RESULTS_DRAWER_RE = re.compile(r"^\s*:results:\s*$", re.I)
|
RESULTS_DRAWER_RE = re.compile(r"^\s*:results:\s*$", re.I)
|
||||||
CodeSnippet = collections.namedtuple(
|
CodeSnippet = collections.namedtuple(
|
||||||
"CodeSnippet", ("name", "content", "result", "language", "arguments")
|
"CodeSnippet", ("name", "content", "result", "arguments")
|
||||||
)
|
)
|
||||||
|
|
||||||
# Groupings
|
# Groupings
|
||||||
@ -126,17 +115,6 @@ NON_FINISHED_GROUPS = (
|
|||||||
FREE_GROUPS = (dom.CodeBlock,)
|
FREE_GROUPS = (dom.CodeBlock,)
|
||||||
|
|
||||||
|
|
||||||
# States
|
|
||||||
class HeadlineState(TypedDict):
|
|
||||||
# To be extended to handle keyboard shortcuts
|
|
||||||
name: str
|
|
||||||
|
|
||||||
|
|
||||||
class OrgDocDeclaredStates(TypedDict):
|
|
||||||
not_completed: List[HeadlineState]
|
|
||||||
completed: List[HeadlineState]
|
|
||||||
|
|
||||||
|
|
||||||
class NonReproducibleDocument(Exception):
|
class NonReproducibleDocument(Exception):
|
||||||
"""
|
"""
|
||||||
Exception thrown when a document would be saved as different contents
|
Exception thrown when a document would be saved as different contents
|
||||||
@ -337,7 +315,7 @@ class Headline:
|
|||||||
self.priority = priority
|
self.priority = priority
|
||||||
self.title_start = title_start
|
self.title_start = title_start
|
||||||
self.title = parse_content_block([RawLine(linenum=start_line, line=title)])
|
self.title = parse_content_block([RawLine(linenum=start_line, line=title)])
|
||||||
self._state = state
|
self.state = state
|
||||||
self.tags_start = tags_start
|
self.tags_start = tags_start
|
||||||
self.shallow_tags = tags
|
self.shallow_tags = tags
|
||||||
self.contents = contents
|
self.contents = contents
|
||||||
@ -376,12 +354,12 @@ class Headline:
|
|||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
if scheduled_m := m.group("scheduled"):
|
if scheduled := m.group("scheduled"):
|
||||||
self.scheduled = parse_time(scheduled_m)
|
self.scheduled = parse_time(scheduled)
|
||||||
if closed_m := m.group("closed"):
|
if closed := m.group("closed"):
|
||||||
self.closed = parse_time(closed_m)
|
self.closed = parse_time(closed)
|
||||||
if deadline_m := m.group("deadline"):
|
if deadline := m.group("deadline"):
|
||||||
self.deadline = parse_time(deadline_m)
|
self.deadline = parse_time(deadline)
|
||||||
|
|
||||||
# Remove from contents
|
# Remove from contents
|
||||||
self._remove_element_in_line(start_line + 1)
|
self._remove_element_in_line(start_line + 1)
|
||||||
@ -726,42 +704,6 @@ class Headline:
|
|||||||
def id(self, value):
|
def id(self, value):
|
||||||
self.set_property("ID", value)
|
self.set_property("ID", value)
|
||||||
|
|
||||||
@property
|
|
||||||
def state(self) -> HeadlineState:
|
|
||||||
return self._state
|
|
||||||
|
|
||||||
@state.setter
|
|
||||||
def state(self, new_state: Union[None, str, HeadlineState]) -> None:
|
|
||||||
"""
|
|
||||||
Update the state of a Headline. If the state is a known one it will update it's TODO/DONE properties.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
new_state (str|HeadlineState): New state, either it's literal value or it's structure.
|
|
||||||
"""
|
|
||||||
if new_state is None:
|
|
||||||
self.is_todo = False
|
|
||||||
self.is_done = False
|
|
||||||
# TODO: Check & log if appropriate?
|
|
||||||
self._state = None
|
|
||||||
return
|
|
||||||
|
|
||||||
if isinstance(new_state, str):
|
|
||||||
new_state = HeadlineState(name=new_state)
|
|
||||||
|
|
||||||
state_name = new_state["name"]
|
|
||||||
if state_name in [kw["name"] for kw in self.doc.todo_keywords]:
|
|
||||||
self.is_todo = True
|
|
||||||
self.is_done = False
|
|
||||||
# TODO: Check & log if appropriate?
|
|
||||||
elif state_name in [kw["name"] for kw in self.doc.done_keywords]:
|
|
||||||
self.is_todo = False
|
|
||||||
self.is_done = True
|
|
||||||
# TODO: Check, log & if appropriate?
|
|
||||||
else:
|
|
||||||
# TODO: Should we raise a warning, raise an exception, update the is_todo/is_done?
|
|
||||||
pass
|
|
||||||
self._state = new_state
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def clock(self):
|
def clock(self):
|
||||||
times = []
|
times = []
|
||||||
@ -788,20 +730,11 @@ class Headline:
|
|||||||
return times
|
return times
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def tags(self) -> list[str]:
|
def tags(self):
|
||||||
parent_tags = self.parent.tags
|
if isinstance(self.parent, OrgDoc):
|
||||||
if self.doc.environment.get("org-use-tag-inheritance"):
|
return list(self.shallow_tags)
|
||||||
accepted_tags = []
|
else:
|
||||||
for tag in self.doc.environment.get("org-use-tag-inheritance"):
|
return list(self.shallow_tags) + self.parent.tags
|
||||||
if tag in parent_tags:
|
|
||||||
accepted_tags.append(tag)
|
|
||||||
parent_tags = accepted_tags
|
|
||||||
|
|
||||||
elif self.doc.environment.get("org-tags-exclude-from-inheritance"):
|
|
||||||
for tag in self.doc.environment.get("org-tags-exclude-from-inheritance"):
|
|
||||||
if tag in parent_tags:
|
|
||||||
parent_tags.remove(tag)
|
|
||||||
return list(self.shallow_tags) + parent_tags
|
|
||||||
|
|
||||||
def add_tag(self, tag: str):
|
def add_tag(self, tag: str):
|
||||||
self.shallow_tags.append(tag)
|
self.shallow_tags.append(tag)
|
||||||
@ -918,12 +851,6 @@ class Headline:
|
|||||||
sections = []
|
sections = []
|
||||||
arguments = None
|
arguments = None
|
||||||
|
|
||||||
names_by_line = {}
|
|
||||||
for kw in self.keywords:
|
|
||||||
if kw.key == "NAME":
|
|
||||||
names_by_line[kw.linenum] = kw.value
|
|
||||||
|
|
||||||
name = None
|
|
||||||
for delimiter in self.delimiters:
|
for delimiter in self.delimiters:
|
||||||
if (
|
if (
|
||||||
delimiter.delimiter_type == DelimiterLineType.BEGIN_BLOCK
|
delimiter.delimiter_type == DelimiterLineType.BEGIN_BLOCK
|
||||||
@ -932,12 +859,6 @@ class Headline:
|
|||||||
line_start = delimiter.linenum
|
line_start = delimiter.linenum
|
||||||
inside_code = True
|
inside_code = True
|
||||||
arguments = delimiter.arguments
|
arguments = delimiter.arguments
|
||||||
|
|
||||||
name_line = line_start - 1
|
|
||||||
if name_line in names_by_line:
|
|
||||||
name = names_by_line[name_line]
|
|
||||||
else:
|
|
||||||
name = None
|
|
||||||
elif (
|
elif (
|
||||||
delimiter.delimiter_type == DelimiterLineType.END_BLOCK
|
delimiter.delimiter_type == DelimiterLineType.END_BLOCK
|
||||||
and delimiter.type_data.subtype.lower() == "src"
|
and delimiter.type_data.subtype.lower() == "src"
|
||||||
@ -952,26 +873,14 @@ class Headline:
|
|||||||
# the content parsing must be re-thinked
|
# the content parsing must be re-thinked
|
||||||
contents = contents[:-1]
|
contents = contents[:-1]
|
||||||
|
|
||||||
language = None
|
|
||||||
if arguments is not None:
|
|
||||||
arguments = arguments.strip()
|
|
||||||
if " " in arguments:
|
|
||||||
language = arguments[: arguments.index(" ")]
|
|
||||||
arguments = arguments[arguments.index(" ") + 1 :]
|
|
||||||
else:
|
|
||||||
language = arguments
|
|
||||||
arguments = None
|
|
||||||
sections.append(
|
sections.append(
|
||||||
{
|
{
|
||||||
"line_first": start + 1,
|
"line_first": start + 1,
|
||||||
"line_last": end - 1,
|
"line_last": end - 1,
|
||||||
"content": contents,
|
"content": contents,
|
||||||
"arguments": arguments,
|
"arguments": arguments,
|
||||||
"language": language,
|
|
||||||
"name": name,
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
name = None
|
|
||||||
arguments = None
|
arguments = None
|
||||||
line_start = None
|
line_start = None
|
||||||
|
|
||||||
@ -1020,18 +929,13 @@ class Headline:
|
|||||||
|
|
||||||
results = []
|
results = []
|
||||||
for section in sections:
|
for section in sections:
|
||||||
|
name = None
|
||||||
content = section["content"]
|
content = section["content"]
|
||||||
code_result = section.get("result", None)
|
code_result = section.get("result", None)
|
||||||
arguments = section.get("arguments", None)
|
arguments = section.get("arguments", None)
|
||||||
language = section.get("language", None)
|
|
||||||
name = section.get("name", None)
|
|
||||||
results.append(
|
results.append(
|
||||||
CodeSnippet(
|
CodeSnippet(
|
||||||
content=content,
|
name=name, content=content, result=code_result, arguments=arguments
|
||||||
result=code_result,
|
|
||||||
arguments=arguments,
|
|
||||||
language=language,
|
|
||||||
name=name,
|
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -1180,9 +1084,7 @@ class Timestamp:
|
|||||||
datetime: The corresponding datetime object.
|
datetime: The corresponding datetime object.
|
||||||
"""
|
"""
|
||||||
if self.hour is not None:
|
if self.hour is not None:
|
||||||
return datetime(
|
return datetime(self.year, self.month, self.day, self.hour, self.minute)
|
||||||
self.year, self.month, self.day, self.hour, self.minute or 0
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
return datetime(self.year, self.month, self.day, 0, 0)
|
return datetime(self.year, self.month, self.day, 0, 0)
|
||||||
|
|
||||||
@ -1574,31 +1476,17 @@ class OrgTime:
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
|
||||||
def active(self) -> bool:
|
|
||||||
"""
|
|
||||||
Checks if the time is set as active.
|
|
||||||
"""
|
|
||||||
return self.time.active
|
|
||||||
|
|
||||||
@active.setter
|
|
||||||
def active(self, value: bool) -> None:
|
|
||||||
"""
|
|
||||||
Sets the active state for the timestamp.
|
|
||||||
"""
|
|
||||||
self.time.active = value
|
|
||||||
|
|
||||||
def activate(self) -> None:
|
def activate(self) -> None:
|
||||||
"""
|
"""
|
||||||
Sets the active state for the timestamp.
|
Sets the active state for the timestamp.
|
||||||
"""
|
"""
|
||||||
self.active = True
|
self.time.active = True
|
||||||
|
|
||||||
def deactivate(self) -> None:
|
def deactivate(self) -> None:
|
||||||
"""
|
"""
|
||||||
Sets the inactive state for the timestamp.
|
Sets the inactive state for the timestamp.
|
||||||
"""
|
"""
|
||||||
self.active = False
|
self.time.active = False
|
||||||
|
|
||||||
def from_datetime(self, dt: datetime) -> None:
|
def from_datetime(self, dt: datetime) -> None:
|
||||||
"""
|
"""
|
||||||
@ -1629,7 +1517,7 @@ def timestamp_to_string(ts: Timestamp, end_time: Optional[Timestamp] = None) ->
|
|||||||
|
|
||||||
if ts.hour is not None:
|
if ts.hour is not None:
|
||||||
base = "{date} {hour:02}:{minute:02d}".format(
|
base = "{date} {hour:02}:{minute:02d}".format(
|
||||||
date=date, hour=ts.hour, minute=ts.minute or 0
|
date=date, hour=ts.hour, minute=ts.minute
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
base = date
|
base = date
|
||||||
@ -1755,7 +1643,7 @@ class Text:
|
|||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "{{Text line: {}; content: {} }}".format(self.linenum, self.contents)
|
return "{{Text line: {}; content: {} }}".format(self.linenum, self.contents)
|
||||||
|
|
||||||
def get_text(self) -> str:
|
def get_text(self):
|
||||||
return token_list_to_plaintext(self.contents)
|
return token_list_to_plaintext(self.contents)
|
||||||
|
|
||||||
def get_raw(self):
|
def get_raw(self):
|
||||||
@ -1985,12 +1873,7 @@ def tokenize_contents(contents: str) -> List[TokenItems]:
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
# Possible link close or open of description
|
# Possible link close or open of description
|
||||||
if (
|
if char == "]" and len(contents) > i + 1 and in_link:
|
||||||
char == "]"
|
|
||||||
and len(contents) > i + 1
|
|
||||||
and in_link
|
|
||||||
and contents[i + 1] in "]["
|
|
||||||
):
|
|
||||||
if contents[i + 1] == "]":
|
if contents[i + 1] == "]":
|
||||||
cut_string()
|
cut_string()
|
||||||
|
|
||||||
@ -2041,7 +1924,6 @@ def tokenize_contents(contents: str) -> List[TokenItems]:
|
|||||||
cut_string()
|
cut_string()
|
||||||
tokens.append((TOKEN_TYPE_CLOSE_MARKER, char))
|
tokens.append((TOKEN_TYPE_CLOSE_MARKER, char))
|
||||||
has_changed = True
|
has_changed = True
|
||||||
closes.remove(i)
|
|
||||||
|
|
||||||
if not has_changed:
|
if not has_changed:
|
||||||
text.append(char)
|
text.append(char)
|
||||||
@ -2084,7 +1966,7 @@ def parse_contents(raw_contents: List[RawLine]):
|
|||||||
return [parse_content_block(block) for block in blocks]
|
return [parse_content_block(block) for block in blocks]
|
||||||
|
|
||||||
|
|
||||||
def parse_content_block(raw_contents: Union[List[RawLine], str]) -> Text:
|
def parse_content_block(raw_contents: Union[List[RawLine], str]):
|
||||||
contents_buff = []
|
contents_buff = []
|
||||||
if isinstance(raw_contents, str):
|
if isinstance(raw_contents, str):
|
||||||
contents_buff.append(raw_contents)
|
contents_buff.append(raw_contents)
|
||||||
@ -2132,7 +2014,7 @@ def dump_contents(raw):
|
|||||||
content = "\n".join(content_lines)
|
content = "\n".join(content_lines)
|
||||||
checkbox = f"[{raw.checkbox_value}]" if raw.checkbox_value else ""
|
checkbox = f"[{raw.checkbox_value}]" if raw.checkbox_value else ""
|
||||||
tag = (
|
tag = (
|
||||||
f"{raw.tag_indentation}{token_list_to_raw(raw.tag or '')} ::"
|
f"{raw.tag_indentation}{token_list_to_raw(raw.tag or '')}::"
|
||||||
if raw.tag or raw.tag_indentation
|
if raw.tag or raw.tag_indentation
|
||||||
else ""
|
else ""
|
||||||
)
|
)
|
||||||
@ -2170,16 +2052,16 @@ def parse_headline(hl, doc, parent) -> Headline:
|
|||||||
title = line
|
title = line
|
||||||
is_done = is_todo = False
|
is_done = is_todo = False
|
||||||
for state in doc.todo_keywords or []:
|
for state in doc.todo_keywords or []:
|
||||||
if title.startswith(state["name"] + " "):
|
if title.startswith(state + " "):
|
||||||
hl_state = state
|
hl_state = state
|
||||||
title = title[len(state["name"] + " ") :]
|
title = title[len(state + " ") :]
|
||||||
is_todo = True
|
is_todo = True
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
for state in doc.done_keywords or []:
|
for state in doc.done_keywords or []:
|
||||||
if title.startswith(state["name"] + " "):
|
if title.startswith(state + " "):
|
||||||
hl_state = state
|
hl_state = state
|
||||||
title = title[len(state["name"] + " ") :]
|
title = title[len(state + " ") :]
|
||||||
is_done = True
|
is_done = True
|
||||||
break
|
break
|
||||||
|
|
||||||
@ -2276,60 +2158,21 @@ def dump_delimiters(line: DelimiterLine):
|
|||||||
return (line.linenum, line.line)
|
return (line.linenum, line.line)
|
||||||
|
|
||||||
|
|
||||||
def parse_todo_done_keywords(line: str) -> OrgDocDeclaredStates:
|
|
||||||
clean_line = re.sub(r"\([^)]+\)", "", line)
|
|
||||||
if "|" in clean_line:
|
|
||||||
todo_kws, done_kws = clean_line.split("|", 1)
|
|
||||||
has_split = True
|
|
||||||
else:
|
|
||||||
# Standard behavior in this case is: the last state is the one considered as DONE
|
|
||||||
todo_kws = clean_line
|
|
||||||
|
|
||||||
todo_keywords = re.sub(r"\s{2,}", " ", todo_kws.strip()).split()
|
|
||||||
if has_split:
|
|
||||||
done_keywords = re.sub(r"\s{2,}", " ", done_kws.strip()).split()
|
|
||||||
else:
|
|
||||||
done_keywods = [todo_keywords[-1]]
|
|
||||||
todo_keywords = todo_keywords[:-1]
|
|
||||||
|
|
||||||
return {
|
|
||||||
"not_completed": [HeadlineState(name=keyword) for keyword in todo_keywords],
|
|
||||||
"completed": [HeadlineState(name=keyword) for keyword in done_keywords],
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class OrgDoc:
|
class OrgDoc:
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self, headlines, keywords, contents, list_items, structural, properties
|
||||||
headlines,
|
|
||||||
keywords,
|
|
||||||
contents,
|
|
||||||
list_items,
|
|
||||||
structural,
|
|
||||||
properties,
|
|
||||||
environment=BASE_ENVIRONMENT,
|
|
||||||
):
|
):
|
||||||
self.todo_keywords = [HeadlineState(name=kw) for kw in DEFAULT_TODO_KEYWORDS]
|
self.todo_keywords = DEFAULT_TODO_KEYWORDS
|
||||||
self.done_keywords = [HeadlineState(name=kw) for kw in DEFAULT_DONE_KEYWORDS]
|
self.done_keywords = DEFAULT_DONE_KEYWORDS
|
||||||
self.environment = environment
|
|
||||||
|
|
||||||
keywords_set_in_file = False
|
|
||||||
for keyword in keywords:
|
for keyword in keywords:
|
||||||
if keyword.key in ("TODO", "SEQ_TODO"):
|
if keyword.key in ("TODO", "SEQ_TODO"):
|
||||||
states = parse_todo_done_keywords(keyword.value)
|
todo_kws, done_kws = re.sub(r"\([^)]+\)", "", keyword.value).split(
|
||||||
self.todo_keywords, self.done_keywords = (
|
"|", 1
|
||||||
states["not_completed"],
|
|
||||||
states["completed"],
|
|
||||||
)
|
)
|
||||||
keywords_set_in_file = True
|
|
||||||
|
|
||||||
if not keywords_set_in_file and "org-todo-keywords" in environment:
|
self.todo_keywords = re.sub(r"\s{2,}", " ", todo_kws.strip()).split()
|
||||||
# Read keywords from environment
|
self.done_keywords = re.sub(r"\s{2,}", " ", done_kws.strip()).split()
|
||||||
states = parse_todo_done_keywords(environment["org-todo-keywords"])
|
|
||||||
self.todo_keywords, self.done_keywords = (
|
|
||||||
states["not_completed"],
|
|
||||||
states["completed"],
|
|
||||||
)
|
|
||||||
|
|
||||||
self.keywords: List[Property] = keywords
|
self.keywords: List[Property] = keywords
|
||||||
self.contents: List[RawLine] = contents
|
self.contents: List[RawLine] = contents
|
||||||
@ -2355,17 +2198,6 @@ class OrgDoc:
|
|||||||
def path(self):
|
def path(self):
|
||||||
return self._path
|
return self._path
|
||||||
|
|
||||||
@property
|
|
||||||
def tags(self) -> list[str]:
|
|
||||||
for kw in self.keywords:
|
|
||||||
if kw.key == "FILETAGS":
|
|
||||||
return kw.value.strip(":").split(":")
|
|
||||||
return []
|
|
||||||
|
|
||||||
@property
|
|
||||||
def shallow_tags(self) -> list[str]:
|
|
||||||
return self.tags
|
|
||||||
|
|
||||||
## Querying
|
## Querying
|
||||||
def get_links(self):
|
def get_links(self):
|
||||||
for headline in self.headlines:
|
for headline in self.headlines:
|
||||||
@ -2403,7 +2235,7 @@ class OrgDoc:
|
|||||||
yield hl
|
yield hl
|
||||||
|
|
||||||
def get_code_snippets(self):
|
def get_code_snippets(self):
|
||||||
for headline in self.getAllHeadlines():
|
for headline in self.headlines:
|
||||||
yield from headline.get_code_snippets()
|
yield from headline.get_code_snippets()
|
||||||
|
|
||||||
# Writing
|
# Writing
|
||||||
@ -2414,8 +2246,8 @@ class OrgDoc:
|
|||||||
tags = ":" + ":".join(headline.shallow_tags) + ":"
|
tags = ":" + ":".join(headline.shallow_tags) + ":"
|
||||||
|
|
||||||
state = ""
|
state = ""
|
||||||
if headline._state:
|
if headline.state:
|
||||||
state = headline._state["name"] + " "
|
state = headline.state + " "
|
||||||
|
|
||||||
raw_title = token_list_to_raw(headline.title.contents)
|
raw_title = token_list_to_raw(headline.title.contents)
|
||||||
tags_padding = ""
|
tags_padding = ""
|
||||||
@ -2516,7 +2348,7 @@ class OrgDoc:
|
|||||||
|
|
||||||
|
|
||||||
class OrgDocReader:
|
class OrgDocReader:
|
||||||
def __init__(self, environment=BASE_ENVIRONMENT):
|
def __init__(self):
|
||||||
self.headlines: List[HeadlineDict] = []
|
self.headlines: List[HeadlineDict] = []
|
||||||
self.keywords: List[Keyword] = []
|
self.keywords: List[Keyword] = []
|
||||||
self.headline_hierarchy: List[Optional[HeadlineDict]] = []
|
self.headline_hierarchy: List[Optional[HeadlineDict]] = []
|
||||||
@ -2527,9 +2359,8 @@ class OrgDocReader:
|
|||||||
self.structural: List = []
|
self.structural: List = []
|
||||||
self.properties: List = []
|
self.properties: List = []
|
||||||
self.current_drawer: Optional[List] = None
|
self.current_drawer: Optional[List] = None
|
||||||
self.environment = environment
|
|
||||||
|
|
||||||
def finalize(self) -> OrgDoc:
|
def finalize(self):
|
||||||
return OrgDoc(
|
return OrgDoc(
|
||||||
self.headlines,
|
self.headlines,
|
||||||
self.keywords,
|
self.keywords,
|
||||||
@ -2537,7 +2368,6 @@ class OrgDocReader:
|
|||||||
self.list_items,
|
self.list_items,
|
||||||
self.structural,
|
self.structural,
|
||||||
self.properties,
|
self.properties,
|
||||||
self.environment,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
## Construction
|
## Construction
|
||||||
@ -2745,7 +2575,7 @@ class OrgDocReader:
|
|||||||
|
|
||||||
self.current_drawer.append(Property(linenum, match, key, value, None))
|
self.current_drawer.append(Property(linenum, match, key, value, None))
|
||||||
|
|
||||||
def read(self, s):
|
def read(self, s, environment):
|
||||||
lines = s.split("\n")
|
lines = s.split("\n")
|
||||||
line_count = len(lines)
|
line_count = len(lines)
|
||||||
reader = enumerate(lines)
|
reader = enumerate(lines)
|
||||||
@ -2835,28 +2665,9 @@ class OrgDocReader:
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
def loads(
|
def loads(s, environment=BASE_ENVIRONMENT, extra_cautious=True):
|
||||||
s: str, environment: Optional[Dict] = BASE_ENVIRONMENT, extra_cautious: bool = True
|
reader = OrgDocReader()
|
||||||
) -> OrgDoc:
|
reader.read(s, environment)
|
||||||
"""
|
|
||||||
Load an Org-mode document from a string.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
s (str): The string representation of the Org-mode document.
|
|
||||||
environment (Optional[dict]): The environment for parsing. Defaults to
|
|
||||||
`BASE_ENVIRONMENT`.
|
|
||||||
extra_cautious (bool): If True, perform an extra check to ensure that
|
|
||||||
the document can be re-serialized to the original string. Defaults to True.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
OrgDoc: The loaded Org-mode document.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
NonReproducibleDocument: If `extra_cautious` is True and there is a
|
|
||||||
difference between the original string and the re-serialized document.
|
|
||||||
"""
|
|
||||||
reader = OrgDocReader(environment)
|
|
||||||
reader.read(s)
|
|
||||||
doc = reader.finalize()
|
doc = reader.finalize()
|
||||||
if extra_cautious: # Check that all options can be properly re-serialized
|
if extra_cautious: # Check that all options can be properly re-serialized
|
||||||
after_dump = dumps(doc)
|
after_dump = dumps(doc)
|
||||||
@ -2894,55 +2705,20 @@ def loads(
|
|||||||
return doc
|
return doc
|
||||||
|
|
||||||
|
|
||||||
def load(
|
def load(f, environment=BASE_ENVIRONMENT, extra_cautious=False):
|
||||||
f: TextIO,
|
|
||||||
environment: Optional[dict] = BASE_ENVIRONMENT,
|
|
||||||
extra_cautious: bool = False,
|
|
||||||
) -> OrgDoc:
|
|
||||||
"""
|
|
||||||
Load an Org-mode document from a file object.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
f (TextIO): The file object containing the Org-mode document.
|
|
||||||
environment (Optional[dict]): The environment for parsing. Defaults to
|
|
||||||
`BASE_ENVIRONMENT`.
|
|
||||||
extra_cautious (bool): If True, perform an extra check to ensure that
|
|
||||||
the document can be re-serialized to the original string. Defaults to False.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
OrgDoc: The loaded Org-mode document.
|
|
||||||
"""
|
|
||||||
doc = loads(f.read(), environment, extra_cautious)
|
doc = loads(f.read(), environment, extra_cautious)
|
||||||
doc._path = os.path.abspath(f.name)
|
doc._path = os.path.abspath(f.name)
|
||||||
return doc
|
return doc
|
||||||
|
|
||||||
|
|
||||||
def dumps(doc: OrgDoc) -> str:
|
def dumps(doc):
|
||||||
"""
|
|
||||||
Serialize an OrgDoc object to a string.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
doc (OrgDoc): The OrgDoc object to serialize.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The serialized string representation of the OrgDoc object.
|
|
||||||
"""
|
|
||||||
dump = list(doc.dump())
|
dump = list(doc.dump())
|
||||||
result = "\n".join(dump)
|
result = "\n".join(dump)
|
||||||
|
# print(result)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def dump(doc: OrgDoc, fp: TextIO) -> None:
|
def dump(doc, fp):
|
||||||
"""
|
|
||||||
Serialize an OrgDoc object to a file.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
doc (OrgDoc): The OrgDoc object to serialize.
|
|
||||||
fp (TextIO): The file-like object to write the serialized data to.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
None
|
|
||||||
"""
|
|
||||||
it = doc.dump()
|
it = doc.dump()
|
||||||
|
|
||||||
# Write first line separately
|
# Write first line separately
|
||||||
|
@ -1,19 +1,9 @@
|
|||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from .org_rw import (
|
from .org_rw import (Bold, Code, Headline, Italic, Line, RawLine, ListItem, Strike, Text,
|
||||||
Bold,
|
Underlined, Verbatim)
|
||||||
Code,
|
|
||||||
Headline,
|
from .org_rw import dump_contents
|
||||||
Italic,
|
|
||||||
Line,
|
|
||||||
ListItem,
|
|
||||||
RawLine,
|
|
||||||
Strike,
|
|
||||||
Text,
|
|
||||||
Underlined,
|
|
||||||
Verbatim,
|
|
||||||
dump_contents,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_hl_raw_contents(doc: Headline) -> str:
|
def get_hl_raw_contents(doc: Headline) -> str:
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
cd "`dirname $0`"
|
|
||||||
cd ..
|
|
||||||
|
|
||||||
set -x
|
|
||||||
|
|
||||||
isort --profile black .
|
|
||||||
black .
|
|
@ -9,7 +9,6 @@
|
|||||||
:CREATED: [2020-01-01 Wed 01:01]
|
:CREATED: [2020-01-01 Wed 01:01]
|
||||||
:END:
|
:END:
|
||||||
|
|
||||||
#+NAME: first-code-name
|
|
||||||
#+BEGIN_SRC shell :results verbatim
|
#+BEGIN_SRC shell :results verbatim
|
||||||
echo "This is a test"
|
echo "This is a test"
|
||||||
echo "with two lines"
|
echo "with two lines"
|
||||||
|
@ -1,13 +0,0 @@
|
|||||||
#+TITLE: 13-Tags
|
|
||||||
#+DESCRIPTION: Simple org file to test tags
|
|
||||||
#+FILETAGS: :filetag:
|
|
||||||
|
|
||||||
* Level 1 :h1tag:
|
|
||||||
:PROPERTIES:
|
|
||||||
:ID: 13-tags
|
|
||||||
:CREATED: [2020-01-01 Wed 01:01]
|
|
||||||
:END:
|
|
||||||
|
|
||||||
** Level2 :h2tag:
|
|
||||||
* Level 1-1 :otherh1tag:
|
|
||||||
** Level2 :otherh2tag:
|
|
@ -2,6 +2,9 @@ import os
|
|||||||
import unittest
|
import unittest
|
||||||
from datetime import datetime as DT
|
from datetime import datetime as DT
|
||||||
|
|
||||||
|
from org_rw import MarkerToken, MarkerType, Timestamp, dumps, load, loads, dom
|
||||||
|
import org_rw
|
||||||
|
|
||||||
from utils.assertions import (
|
from utils.assertions import (
|
||||||
BOLD,
|
BOLD,
|
||||||
CODE,
|
CODE,
|
||||||
@ -16,9 +19,6 @@ from utils.assertions import (
|
|||||||
Tokens,
|
Tokens,
|
||||||
)
|
)
|
||||||
|
|
||||||
import org_rw
|
|
||||||
from org_rw import MarkerToken, MarkerType, Timestamp, dom, dumps, load, loads
|
|
||||||
|
|
||||||
DIR = os.path.dirname(os.path.abspath(__file__))
|
DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
|
||||||
|
|
||||||
@ -480,22 +480,20 @@ class TestSerde(unittest.TestCase):
|
|||||||
|
|
||||||
snippets = list(doc.get_code_snippets())
|
snippets = list(doc.get_code_snippets())
|
||||||
self.assertEqual(len(snippets), 3)
|
self.assertEqual(len(snippets), 3)
|
||||||
self.assertEqual(snippets[0].name, "first-code-name")
|
|
||||||
self.assertEqual(snippets[0].language, "shell")
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
snippets[0].content,
|
snippets[0].content,
|
||||||
'echo "This is a test"\n'
|
'echo "This is a test"\n'
|
||||||
+ 'echo "with two lines"\n'
|
+ 'echo "with two lines"\n'
|
||||||
+ "exit 0 # Exit successfully",
|
+ "exit 0 # Exit successfully",
|
||||||
)
|
)
|
||||||
self.assertEqual(snippets[0].arguments.split(), [":results", "verbatim"])
|
self.assertEqual(
|
||||||
|
snippets[0].arguments.split(), ["shell", ":results", "verbatim"]
|
||||||
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
snippets[0].result,
|
snippets[0].result,
|
||||||
"This is a test\n" + "with two lines",
|
"This is a test\n" + "with two lines",
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(snippets[1].name, None)
|
|
||||||
self.assertEqual(snippets[1].language, "shell")
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
snippets[1].content,
|
snippets[1].content,
|
||||||
'echo "This is another test"\n'
|
'echo "This is another test"\n'
|
||||||
@ -506,8 +504,6 @@ class TestSerde(unittest.TestCase):
|
|||||||
snippets[1].result, "This is another test\n" + "with two lines too"
|
snippets[1].result, "This is another test\n" + "with two lines too"
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(snippets[2].name, None)
|
|
||||||
self.assertEqual(snippets[2].language, "c")
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
snippets[2].content,
|
snippets[2].content,
|
||||||
"/* This code has to be escaped to\n"
|
"/* This code has to be escaped to\n"
|
||||||
@ -837,193 +833,6 @@ class TestSerde(unittest.TestCase):
|
|||||||
|
|
||||||
self.assertEqual(dumps(doc), orig)
|
self.assertEqual(dumps(doc), orig)
|
||||||
|
|
||||||
def test_add_todo_keywords_programatically(self):
|
|
||||||
orig = """* NEW_TODO_STATE First entry
|
|
||||||
|
|
||||||
* NEW_DONE_STATE Second entry"""
|
|
||||||
doc = loads(
|
|
||||||
orig, environment={"org-todo-keywords": "NEW_TODO_STATE | NEW_DONE_STATE"}
|
|
||||||
)
|
|
||||||
self.assertEqual(doc.headlines[0].is_todo, True)
|
|
||||||
self.assertEqual(doc.headlines[0].is_done, False)
|
|
||||||
|
|
||||||
self.assertEqual(doc.headlines[1].is_todo, False)
|
|
||||||
self.assertEqual(doc.headlines[1].is_done, True)
|
|
||||||
|
|
||||||
self.assertEqual(dumps(doc), orig)
|
|
||||||
|
|
||||||
def test_add_todo_keywords_in_file(self):
|
|
||||||
orig = """#+TODO: NEW_TODO_STATE | NEW_DONE_STATE
|
|
||||||
|
|
||||||
* NEW_TODO_STATE First entry
|
|
||||||
|
|
||||||
* NEW_DONE_STATE Second entry"""
|
|
||||||
doc = loads(
|
|
||||||
orig, environment={"org-todo-keywords": "NEW_TODO_STATE | NEW_DONE_STATE"}
|
|
||||||
)
|
|
||||||
self.assertEqual(doc.headlines[0].is_todo, True)
|
|
||||||
self.assertEqual(doc.headlines[0].is_done, False)
|
|
||||||
|
|
||||||
self.assertEqual(doc.headlines[1].is_todo, False)
|
|
||||||
self.assertEqual(doc.headlines[1].is_done, True)
|
|
||||||
|
|
||||||
self.assertEqual(dumps(doc), orig)
|
|
||||||
|
|
||||||
def test_mimic_write_file_13(self):
|
|
||||||
with open(os.path.join(DIR, "13-tags.org")) as f:
|
|
||||||
orig = f.read()
|
|
||||||
doc = loads(orig)
|
|
||||||
|
|
||||||
self.assertEqual(dumps(doc), orig)
|
|
||||||
|
|
||||||
def test_tag_property_read_13(self):
|
|
||||||
with open(os.path.join(DIR, "13-tags.org")) as f:
|
|
||||||
orig = f.read()
|
|
||||||
doc = loads(orig)
|
|
||||||
|
|
||||||
self.assertEqual(doc.tags, ["filetag"])
|
|
||||||
|
|
||||||
h1_1, h1_2 = doc.getTopHeadlines()
|
|
||||||
self.assertEqual(sorted(h1_1.tags), ["filetag", "h1tag"])
|
|
||||||
self.assertEqual(sorted(h1_2.tags), ["filetag", "otherh1tag"])
|
|
||||||
|
|
||||||
h1_1_h2 = h1_1.children[0]
|
|
||||||
self.assertEqual(sorted(h1_1_h2.tags), ["filetag", "h1tag", "h2tag"])
|
|
||||||
|
|
||||||
h1_2_h2 = h1_2.children[0]
|
|
||||||
self.assertEqual(sorted(h1_2_h2.tags), ["filetag", "otherh1tag", "otherh2tag"])
|
|
||||||
|
|
||||||
def test_shallow_tag_property_read_13(self):
|
|
||||||
with open(os.path.join(DIR, "13-tags.org")) as f:
|
|
||||||
orig = f.read()
|
|
||||||
doc = loads(orig)
|
|
||||||
|
|
||||||
self.assertEqual(doc.shallow_tags, ["filetag"])
|
|
||||||
|
|
||||||
h1_1, h1_2 = doc.getTopHeadlines()
|
|
||||||
self.assertEqual(sorted(h1_1.shallow_tags), ["h1tag"])
|
|
||||||
self.assertEqual(sorted(h1_2.shallow_tags), ["otherh1tag"])
|
|
||||||
|
|
||||||
h1_1_h2 = h1_1.children[0]
|
|
||||||
self.assertEqual(sorted(h1_1_h2.shallow_tags), ["h2tag"])
|
|
||||||
|
|
||||||
h1_2_h2 = h1_2.children[0]
|
|
||||||
self.assertEqual(sorted(h1_2_h2.shallow_tags), ["otherh2tag"])
|
|
||||||
|
|
||||||
def test_exclude_tags_from_inheritance_property_read_13(self):
|
|
||||||
with open(os.path.join(DIR, "13-tags.org")) as f:
|
|
||||||
orig = f.read()
|
|
||||||
doc = loads(
|
|
||||||
orig,
|
|
||||||
{
|
|
||||||
"org-tags-exclude-from-inheritance": ("h1tag", "otherh2tag"),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
self.assertEqual(doc.tags, ["filetag"])
|
|
||||||
|
|
||||||
h1_1, h1_2 = doc.getTopHeadlines()
|
|
||||||
self.assertEqual(sorted(h1_1.tags), ["filetag", "h1tag"])
|
|
||||||
self.assertEqual(sorted(h1_2.tags), ["filetag", "otherh1tag"])
|
|
||||||
|
|
||||||
h1_1_h2 = h1_1.children[0]
|
|
||||||
self.assertEqual(sorted(h1_1_h2.tags), ["filetag", "h2tag"])
|
|
||||||
|
|
||||||
h1_2_h2 = h1_2.children[0]
|
|
||||||
self.assertEqual(sorted(h1_2_h2.tags), ["filetag", "otherh1tag", "otherh2tag"])
|
|
||||||
|
|
||||||
def test_select_tags_to_inheritance_property_read_13(self):
|
|
||||||
with open(os.path.join(DIR, "13-tags.org")) as f:
|
|
||||||
orig = f.read()
|
|
||||||
doc = loads(
|
|
||||||
orig,
|
|
||||||
{
|
|
||||||
"org-tags-exclude-from-inheritance": ("h1tag", "otherh2tag"),
|
|
||||||
"org-use-tag-inheritance": ("h1tag",),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
self.assertEqual(doc.tags, ["filetag"])
|
|
||||||
|
|
||||||
h1_1, h1_2 = doc.getTopHeadlines()
|
|
||||||
self.assertEqual(sorted(h1_1.tags), ["h1tag"])
|
|
||||||
self.assertEqual(sorted(h1_2.tags), ["otherh1tag"])
|
|
||||||
|
|
||||||
h1_1_h2 = h1_1.children[0]
|
|
||||||
self.assertEqual(sorted(h1_1_h2.tags), ["h1tag", "h2tag"])
|
|
||||||
|
|
||||||
h1_2_h2 = h1_2.children[0]
|
|
||||||
self.assertEqual(sorted(h1_2_h2.tags), ["otherh2tag"])
|
|
||||||
|
|
||||||
def test_update_headline_from_none_to_todo(self):
|
|
||||||
orig = "* First entry"
|
|
||||||
doc = loads(orig)
|
|
||||||
self.assertEqual(doc.headlines[0].is_todo, False)
|
|
||||||
self.assertEqual(doc.headlines[0].is_done, False)
|
|
||||||
self.assertEqual(doc.headlines[0].state, None)
|
|
||||||
|
|
||||||
doc.headlines[0].state = "TODO"
|
|
||||||
self.assertEqual(doc.headlines[0].is_todo, True)
|
|
||||||
self.assertEqual(doc.headlines[0].is_done, False)
|
|
||||||
self.assertEqual(doc.headlines[0].state["name"], "TODO")
|
|
||||||
|
|
||||||
self.assertEqual(dumps(doc), "* TODO First entry")
|
|
||||||
|
|
||||||
def test_update_headline_from_none_to_done(self):
|
|
||||||
orig = "* First entry"
|
|
||||||
doc = loads(orig)
|
|
||||||
self.assertEqual(doc.headlines[0].is_todo, False)
|
|
||||||
self.assertEqual(doc.headlines[0].is_done, False)
|
|
||||||
self.assertEqual(doc.headlines[0].state, None)
|
|
||||||
|
|
||||||
doc.headlines[0].state = org_rw.HeadlineState(name="DONE")
|
|
||||||
self.assertEqual(doc.headlines[0].is_todo, False)
|
|
||||||
self.assertEqual(doc.headlines[0].is_done, True)
|
|
||||||
self.assertEqual(doc.headlines[0].state["name"], "DONE")
|
|
||||||
|
|
||||||
self.assertEqual(dumps(doc), "* DONE First entry")
|
|
||||||
|
|
||||||
def test_update_headline_from_todo_to_none(self):
|
|
||||||
orig = "* TODO First entry"
|
|
||||||
doc = loads(orig)
|
|
||||||
self.assertEqual(doc.headlines[0].is_todo, True)
|
|
||||||
self.assertEqual(doc.headlines[0].is_done, False)
|
|
||||||
self.assertEqual(doc.headlines[0].state["name"], "TODO")
|
|
||||||
|
|
||||||
doc.headlines[0].state = None
|
|
||||||
self.assertEqual(doc.headlines[0].is_todo, False)
|
|
||||||
self.assertEqual(doc.headlines[0].is_done, False)
|
|
||||||
self.assertEqual(doc.headlines[0].state, None)
|
|
||||||
|
|
||||||
self.assertEqual(dumps(doc), "* First entry")
|
|
||||||
|
|
||||||
def test_update_headline_from_todo_to_done(self):
|
|
||||||
orig = "* TODO First entry"
|
|
||||||
doc = loads(orig)
|
|
||||||
self.assertEqual(doc.headlines[0].is_todo, True)
|
|
||||||
self.assertEqual(doc.headlines[0].is_done, False)
|
|
||||||
self.assertEqual(doc.headlines[0].state["name"], "TODO")
|
|
||||||
|
|
||||||
doc.headlines[0].state = "DONE"
|
|
||||||
self.assertEqual(doc.headlines[0].is_todo, False)
|
|
||||||
self.assertEqual(doc.headlines[0].is_done, True)
|
|
||||||
self.assertEqual(doc.headlines[0].state["name"], "DONE")
|
|
||||||
self.assertEqual(dumps(doc), "* DONE First entry")
|
|
||||||
|
|
||||||
def test_update_headline_from_done_to_todo(self):
|
|
||||||
orig = "* DONE First entry"
|
|
||||||
doc = loads(orig)
|
|
||||||
self.assertEqual(doc.headlines[0].is_todo, False)
|
|
||||||
self.assertEqual(doc.headlines[0].is_done, True)
|
|
||||||
self.assertEqual(doc.headlines[0].state["name"], "DONE")
|
|
||||||
|
|
||||||
doc.headlines[0].state = org_rw.HeadlineState(name="TODO")
|
|
||||||
self.assertEqual(doc.headlines[0].is_todo, True)
|
|
||||||
self.assertEqual(doc.headlines[0].is_done, False)
|
|
||||||
self.assertEqual(doc.headlines[0].state["name"], "TODO")
|
|
||||||
|
|
||||||
self.assertEqual(dumps(doc), "* TODO First entry")
|
|
||||||
|
|
||||||
|
|
||||||
def print_tree(tree, indentation=0, headline=None):
|
def print_tree(tree, indentation=0, headline=None):
|
||||||
for element in tree:
|
for element in tree:
|
||||||
|
@ -1,9 +1,7 @@
|
|||||||
"""Test the Timestamp object."""
|
"""Test the Timestamp object."""
|
||||||
|
|
||||||
from datetime import date, datetime
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
from datetime import date, datetime
|
||||||
from org_rw import Timestamp
|
from org_rw import Timestamp
|
||||||
|
|
||||||
|
|
||||||
|
@ -2,17 +2,8 @@ import collections
|
|||||||
import unittest
|
import unittest
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
from org_rw import (
|
from org_rw import (Bold, Code, Italic, Line, Strike, Text, Underlined,
|
||||||
Bold,
|
Verbatim, get_raw_contents)
|
||||||
Code,
|
|
||||||
Italic,
|
|
||||||
Line,
|
|
||||||
Strike,
|
|
||||||
Text,
|
|
||||||
Underlined,
|
|
||||||
Verbatim,
|
|
||||||
get_raw_contents,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def timestamp_to_datetime(ts):
|
def timestamp_to_datetime(ts):
|
||||||
|
Loading…
Reference in New Issue
Block a user