Compare commits

..

No commits in common. "ef615feac5e7fc178d3a6888d4e817a1891a62f3" and "071004ea7a7f816733000b5a7de27b6faf02835a" have entirely different histories.

3 changed files with 9 additions and 86 deletions

View File

@ -1,4 +1,5 @@
from __future__ import annotations from __future__ import annotations
from typing import Optional
from datetime import timedelta from datetime import timedelta
import collections import collections
import difflib import difflib
@ -8,7 +9,7 @@ import re
import sys import sys
from datetime import date, datetime, timedelta from datetime import date, datetime, timedelta
from enum import Enum from enum import Enum
from typing import Any, cast, Iterator, List, Literal, Optional, Tuple, TypedDict, TypeVar, Union from typing import cast, Iterator, List, Literal, Optional, Tuple, TypedDict, Union
from .types import HeadlineDict from .types import HeadlineDict
@ -820,7 +821,7 @@ class Headline:
raise NotImplementedError() raise NotImplementedError()
def update_raw_contents(self, new_contents): def update_raw_contents(self, new_contents):
# Clear elements # @TODO: Properly re-parse elements
self.keywords = [] self.keywords = []
self.contents = [] self.contents = []
self.list_items = [] self.list_items = []
@ -832,31 +833,10 @@ class Headline:
self.deadline = None self.deadline = None
self.closed = None self.closed = None
reader = OrgDocReader(environment=self.doc.environment) for line in new_contents.split('\n'):
reader.read(new_contents) self.contents.append(
RawLine(linenum=0, line=line)
# No need to finalize as we can take the data from the reader instead of from a doc )
if len(reader.headlines) > 0:
# Probably can be done by just adding the headlines to this one's children
raise NotImplementedError('new headlines on raw contents not supported yet. This probably should be simple, see comment on code.')
for kw in reader.keywords:
self.keywords.append(offset_linenum(self.start_line + 1, kw))
for content in reader.contents:
self.contents.append(offset_linenum(self.start_line + 1, content))
for list_item in reader.list_items:
self.list_items.append(offset_linenum(self.start_line + 1, list_item))
for struct_item in reader.structural:
self.structural.append(offset_linenum(self.start_line + 1, struct_item))
for prop in reader.properties:
self.properties.append(offset_linenum(self.start_line + 1, prop))
# Environment is not used, as it's known
def get_element_in_line(self, linenum): def get_element_in_line(self, linenum):
for line in self.contents: for line in self.contents:
@ -1074,19 +1054,6 @@ TableRow = collections.namedtuple(
), ),
) )
ItemWithLineNum = Union[Keyword, RawLine, Property, ListItem, tuple[int, Any]]
def offset_linenum(offset: int, item: ItemWithLineNum) -> ItemWithLineNum:
if isinstance(item, tuple) and len(item) == 2 and isinstance(item[0], int):
return item
if isinstance(item, ListItem):
item.linenum += offset
return item
assert isinstance(item, (Keyword, RawLine, Property)), \
"Expected (Keyword|RawLine|Property), found {}".format(item)
return item._replace(linenum=item.linenum + offset)
# @TODO How are [YYYY-MM-DD HH:mm--HH:mm] and ([... HH:mm]--[... HH:mm]) differentiated ? # @TODO How are [YYYY-MM-DD HH:mm--HH:mm] and ([... HH:mm]--[... HH:mm]) differentiated ?
# @TODO Consider recurrence annotations # @TODO Consider recurrence annotations
@ -2291,7 +2258,6 @@ class OrgDoc:
self.headlines: List[Headline] = list( self.headlines: List[Headline] = list(
map(lambda hl: parse_headline(hl, self, self), headlines) map(lambda hl: parse_headline(hl, self, self), headlines)
) )
self.environment = environment
@property @property
def id(self): def id(self):

View File

@ -865,46 +865,6 @@ class TestSerde(unittest.TestCase):
self.assertEqual(dumps(doc), orig) self.assertEqual(dumps(doc), orig)
def test_update_reparse(self):
with open(os.path.join(DIR, "01-simple.org")) as f:
doc = load(f)
hl = doc.getTopHeadlines()[0]
ex = HL(
"First level",
props=[
("ID", "01-simple-first-level-id"),
("CREATED", DT(2020, 1, 1, 1, 1)),
],
content=" First level content\n",
children=[
HL(
"Second level",
props=[("ID", "01-simple-second-level-id")],
content="\n Second level content\n",
children=[
HL(
"Third level",
props=[("ID", "01-simple-third-level-id")],
content="\n Third level content\n",
)
],
)
],
)
# Ground check
ex.assert_matches(self, hl)
# Update
lines = list(doc.dump_headline(hl, recursive=False))
assert lines[0].startswith('* ') # Title, skip it
content = '\n'.join(lines[1:])
hl.update_raw_contents(content)
# Check after update
ex.assert_matches(self, hl, accept_trailing_whitespace_changes=True)
def print_tree(tree, indentation=0, headline=None): def print_tree(tree, indentation=0, headline=None):
for element in tree: for element in tree:

View File

@ -58,7 +58,7 @@ class HL:
self.content = content self.content = content
self.children = children self.children = children
def assert_matches(self, test_case: unittest.TestCase, doc, accept_trailing_whitespace_changes=False): def assert_matches(self, test_case: unittest.TestCase, doc):
test_case.assertEqual(self.title, get_raw(doc.title)) test_case.assertEqual(self.title, get_raw(doc.title))
# Check properties # Check properties
@ -75,10 +75,7 @@ class HL:
timestamp_to_datetime(doc_props[i].value), prop[1] timestamp_to_datetime(doc_props[i].value), prop[1]
) )
if accept_trailing_whitespace_changes: test_case.assertEqual(get_raw_contents(doc), self.get_raw())
test_case.assertEqual(get_raw_contents(doc).rstrip(), self.get_raw().rstrip())
else:
test_case.assertEqual(get_raw_contents(doc), self.get_raw())
# Check children # Check children
if self.children is None: if self.children is None: