2021-08-14 16:04:22 +00:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
2022-09-29 22:13:22 +00:00
|
|
|
import sqlite3
|
2022-08-20 12:27:45 +00:00
|
|
|
import time
|
2022-05-15 22:06:37 +00:00
|
|
|
import json
|
2021-08-26 22:22:48 +00:00
|
|
|
import html
|
2021-08-14 16:04:22 +00:00
|
|
|
import logging
|
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
import uuid
|
|
|
|
from datetime import datetime
|
2022-08-20 12:27:45 +00:00
|
|
|
import traceback
|
2022-10-23 19:16:22 +00:00
|
|
|
import re
|
2022-11-03 23:33:05 +00:00
|
|
|
from itertools import chain
|
2022-12-14 22:46:41 +00:00
|
|
|
import shutil
|
2022-08-20 12:27:45 +00:00
|
|
|
|
|
|
|
import inotify.adapters
|
2021-08-14 16:04:22 +00:00
|
|
|
|
2022-05-07 21:03:26 +00:00
|
|
|
import org_rw
|
2022-05-06 13:58:28 +00:00
|
|
|
from org_rw import OrgTime, dom, Link
|
2021-08-14 16:04:22 +00:00
|
|
|
from org_rw import dump as dump_org
|
|
|
|
from org_rw import load as load_org
|
2021-08-26 22:22:48 +00:00
|
|
|
from org_rw import token_list_to_raw
|
2021-08-14 16:04:22 +00:00
|
|
|
|
2022-11-15 20:11:36 +00:00
|
|
|
import pygments
|
|
|
|
import pygments.lexers
|
|
|
|
import pygments.formatters
|
|
|
|
|
2022-10-23 19:30:44 +00:00
|
|
|
# Set custom states
|
|
|
|
for state in ("NEXT", "MEETING", "Q", "PAUSED", "SOMETIME", "TRACK", "WAITING"):
|
|
|
|
org_rw.DEFAULT_TODO_KEYWORDS.append(state)
|
|
|
|
|
|
|
|
for state in ("DISCARDED", "VALIDATING"):
|
|
|
|
org_rw.DEFAULT_DONE_KEYWORDS.append(state)
|
|
|
|
|
2021-08-14 16:04:22 +00:00
|
|
|
EXTENSIONS = [
|
|
|
|
".org",
|
|
|
|
".org.txt",
|
|
|
|
]
|
2022-12-14 22:55:30 +00:00
|
|
|
IMG_EXTENSIONS = set([
|
2022-12-14 22:46:41 +00:00
|
|
|
"svg",
|
|
|
|
"png",
|
|
|
|
"jpg",
|
|
|
|
"jpeg",
|
|
|
|
"gif",
|
2022-12-14 22:55:30 +00:00
|
|
|
])
|
|
|
|
SKIPPED_TAGS = set(['attach'])
|
2021-08-14 16:04:22 +00:00
|
|
|
|
2022-10-17 23:16:14 +00:00
|
|
|
WATCH = True
|
|
|
|
if os.getenv('WATCH_AND_REBUILD', '1') == '0':
|
|
|
|
WATCH = False
|
|
|
|
|
2022-05-07 18:38:12 +00:00
|
|
|
MIN_HIDDEN_HEADLINE_LEVEL = 2
|
2022-08-20 11:59:15 +00:00
|
|
|
INDEX_ID = "ea48ec1d-f9d4-4fb7-b39a-faa7b6e2ba95"
|
2022-08-20 16:21:26 +00:00
|
|
|
SITE_NAME = "Código para llevar"
|
2021-08-14 16:04:22 +00:00
|
|
|
|
2022-08-20 12:27:45 +00:00
|
|
|
MONITORED_EVENT_TYPES = (
|
|
|
|
'IN_CREATE',
|
|
|
|
# 'IN_MODIFY',
|
|
|
|
'IN_CLOSE_WRITE',
|
|
|
|
'IN_DELETE',
|
|
|
|
'IN_MOVED_FROM',
|
|
|
|
'IN_MOVED_TO',
|
|
|
|
'IN_DELETE_SELF',
|
|
|
|
'IN_MOVE_SELF',
|
|
|
|
)
|
|
|
|
|
2022-10-23 19:30:09 +00:00
|
|
|
TEXT_OR_LINK_RE = re.compile(r'([^\s\[\]]+|.)')
|
2022-10-23 19:16:22 +00:00
|
|
|
|
2022-08-20 12:27:45 +00:00
|
|
|
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
|
|
|
|
STATIC_PATH = os.path.join(ROOT_DIR, 'static')
|
|
|
|
|
2022-10-25 21:45:30 +00:00
|
|
|
class NonExistingLocalNoteError(AssertionError):
|
|
|
|
def __init__(self, note_id, src_headline):
|
|
|
|
AssertionError.__init__(self)
|
|
|
|
self.note_id = note_id
|
|
|
|
self.src_headline = src_headline
|
|
|
|
|
|
|
|
def get_message(self):
|
|
|
|
return ("Cannot follow link to '{}' on headline '{}' ({})"
|
|
|
|
.format(self.note_id,
|
|
|
|
self.src_headline.id,
|
|
|
|
self.src_headline.title.get_text().strip()))
|
|
|
|
|
2022-08-20 12:27:45 +00:00
|
|
|
def is_git_path(path):
|
2022-08-20 17:17:08 +00:00
|
|
|
return any([chunk == ".git" for chunk in path.split(os.sep)])
|
2022-08-20 12:27:45 +00:00
|
|
|
|
2022-09-29 22:13:22 +00:00
|
|
|
def create_db(path):
|
|
|
|
if os.path.exists(path):
|
|
|
|
os.unlink(path)
|
|
|
|
|
|
|
|
db = sqlite3.connect(path)
|
2022-10-30 22:38:01 +00:00
|
|
|
db.execute('CREATE VIRTUAL TABLE note_search USING fts5(note_id, title, body, top_level_title, is_done, is_todo, tokenize="trigram");')
|
2022-09-29 22:13:22 +00:00
|
|
|
return db
|
|
|
|
|
2021-08-14 16:04:22 +00:00
|
|
|
def load_all(top_dir_relative):
|
|
|
|
top = os.path.abspath(top_dir_relative)
|
|
|
|
|
|
|
|
docs = []
|
|
|
|
|
|
|
|
for root, dirs, files in os.walk(top):
|
|
|
|
for name in files:
|
|
|
|
if ".org" not in name:
|
|
|
|
continue
|
|
|
|
|
|
|
|
path = os.path.join(root, name)
|
|
|
|
|
|
|
|
try:
|
|
|
|
doc = load_org(open(path), extra_cautious=True)
|
|
|
|
docs.append(doc)
|
|
|
|
except Exception as err:
|
|
|
|
import traceback
|
|
|
|
|
|
|
|
traceback.print_exc()
|
|
|
|
print(f"== On {path}")
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
logging.info("Collected {} files".format(len(docs)))
|
|
|
|
return docs
|
|
|
|
|
2022-09-29 22:13:22 +00:00
|
|
|
def regen_all(src_top, dest_top, *, docs=None, db=None):
|
2021-08-26 22:22:48 +00:00
|
|
|
files_generated = 0
|
2022-09-29 22:13:22 +00:00
|
|
|
cur = db.cursor()
|
2022-11-01 11:19:52 +00:00
|
|
|
cleaned_db = False
|
|
|
|
|
|
|
|
try:
|
|
|
|
cur.execute('DELETE FROM note_search;')
|
|
|
|
cleaned_db = True
|
|
|
|
except sqlite3.OperationalError as err:
|
2022-11-15 20:11:58 +00:00
|
|
|
if WATCH:
|
|
|
|
logging.warning("Error pre-cleaning DB, search won't be updated")
|
|
|
|
else:
|
|
|
|
raise
|
2021-08-14 16:04:22 +00:00
|
|
|
|
2022-08-20 12:27:45 +00:00
|
|
|
docs = load_all(src_top)
|
2022-12-14 22:46:41 +00:00
|
|
|
base_dirs = set()
|
2022-05-16 21:55:58 +00:00
|
|
|
doc_to_headline_remapping = {}
|
|
|
|
|
2021-08-14 16:04:22 +00:00
|
|
|
os.makedirs(dest_top, exist_ok=True)
|
2022-10-23 16:22:05 +00:00
|
|
|
|
|
|
|
## Build headline list
|
|
|
|
# This includes a virtual headline for ID-referenced documents.
|
|
|
|
all_headlines = []
|
|
|
|
main_headlines_by_path = {}
|
2022-10-25 21:35:46 +00:00
|
|
|
main_headline_to_docid = {}
|
2021-08-14 16:04:22 +00:00
|
|
|
for doc in docs:
|
|
|
|
relpath = os.path.relpath(doc.path, src_top)
|
|
|
|
changed = False
|
2021-09-03 18:19:45 +00:00
|
|
|
headlines = list(doc.getAllHeadlines())
|
2021-08-14 16:04:22 +00:00
|
|
|
related = None
|
2022-10-23 16:22:05 +00:00
|
|
|
if not relpath.startswith("public/"):
|
|
|
|
# print("Skip:", relpath)
|
|
|
|
continue
|
2021-08-14 16:04:22 +00:00
|
|
|
|
2022-12-14 22:46:41 +00:00
|
|
|
base_dirs.add(os.path.dirname(relpath))
|
2021-08-14 16:04:22 +00:00
|
|
|
i = len(headlines)
|
|
|
|
while i > 0:
|
|
|
|
i -= 1
|
|
|
|
headline = headlines[i]
|
2022-08-28 12:09:57 +00:00
|
|
|
if headline.title.get_text().strip().lower() == "related" and headline.depth == 1:
|
2021-09-03 18:19:45 +00:00
|
|
|
if related is not None:
|
|
|
|
print(
|
|
|
|
"Found duplicated related: {} vs {}".format(
|
|
|
|
related.id, headline.id
|
|
|
|
)
|
|
|
|
)
|
|
|
|
assert related is None
|
2021-08-14 16:04:22 +00:00
|
|
|
related = headline
|
|
|
|
headlines.pop(i)
|
|
|
|
|
|
|
|
for headline in headlines:
|
|
|
|
if headline.id is None:
|
|
|
|
headline.id = str(uuid.uuid4())
|
|
|
|
changed = True
|
|
|
|
|
|
|
|
if changed:
|
|
|
|
print("Updated", relpath)
|
|
|
|
save_changes(doc)
|
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
all_headlines.extend(headlines)
|
2022-05-16 21:55:58 +00:00
|
|
|
main_headline = None
|
|
|
|
topHeadlines = doc.getTopHeadlines()
|
2022-05-07 11:15:30 +00:00
|
|
|
|
2022-05-16 21:55:58 +00:00
|
|
|
if ((len(topHeadlines) == 1 and related is None)
|
|
|
|
or (len(topHeadlines) == 2 and related is not None)):
|
2022-05-07 11:15:30 +00:00
|
|
|
|
2022-05-16 21:55:58 +00:00
|
|
|
main_headline = [h for h in topHeadlines if h != related][0]
|
2022-10-23 16:22:05 +00:00
|
|
|
main_headlines_by_path[doc.path] = main_headline
|
2022-05-16 21:55:58 +00:00
|
|
|
if doc.id is not None:
|
2022-10-23 16:22:05 +00:00
|
|
|
doc_to_headline_remapping['id:' + doc.id] = 'id:' + main_headline.id
|
2022-10-25 21:35:46 +00:00
|
|
|
main_headline_to_docid[main_headline.id] = doc.id
|
2022-10-23 16:22:05 +00:00
|
|
|
files_generated += 1
|
2022-05-16 21:55:58 +00:00
|
|
|
elif doc.id is not None:
|
|
|
|
logging.error("Cannot render document from id: {}. {} headlines {} related".format(
|
|
|
|
relpath,
|
|
|
|
len(topHeadlines),
|
|
|
|
'with' if related is not None else 'without'
|
|
|
|
))
|
2022-05-07 11:15:30 +00:00
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
# Build graph
|
2022-10-25 21:35:46 +00:00
|
|
|
graph = {}
|
2022-11-09 00:01:34 +00:00
|
|
|
backlink_graph = {}
|
2022-10-23 16:22:05 +00:00
|
|
|
for headline in all_headlines:
|
|
|
|
links = []
|
|
|
|
headline_links = list(headline.get_links())
|
|
|
|
if headline == main_headline and related is not None:
|
|
|
|
headline_links.extend(list(related.get_links()))
|
|
|
|
|
|
|
|
for l in headline_links:
|
|
|
|
if l.value.startswith('http://') or l.value.startswith('https://'):
|
|
|
|
pass # Ignore for now, external URL
|
|
|
|
elif l.value.startswith('id:'):
|
|
|
|
links.append({'target': l.value})
|
|
|
|
elif l.value.startswith('attachment:'):
|
|
|
|
pass # Ignore, attachment
|
|
|
|
elif l.value.startswith('file:'):
|
|
|
|
pass # Ignore, attachment
|
|
|
|
elif l.value.startswith('notmuch:'):
|
|
|
|
pass # Ignore, mail
|
|
|
|
elif l.value.startswith('orgit-rev:'):
|
|
|
|
pass # Ignore, mail
|
|
|
|
elif l.value.startswith('*'):
|
|
|
|
pass # Ignore, internal
|
|
|
|
elif not ':' in l.value.split()[0]:
|
|
|
|
pass # Ignore, internal
|
|
|
|
elif l.value.startswith('./'):
|
|
|
|
pass # TODO: Properly handle
|
|
|
|
else:
|
|
|
|
logging.warning('On document {}, unknown link to {}'.format(doc.path, l.value))
|
|
|
|
|
|
|
|
if headline.parent:
|
|
|
|
if isinstance(headline.parent, org_rw.Headline):
|
|
|
|
links.append({
|
|
|
|
"target": headline.parent.id,
|
|
|
|
"relation": "in"
|
|
|
|
})
|
2022-11-09 00:01:34 +00:00
|
|
|
for backlink in links:
|
|
|
|
if 'relation' in backlink and backlink['relation'] == 'in':
|
|
|
|
continue
|
|
|
|
|
|
|
|
target = backlink['target']
|
|
|
|
if target.startswith('id:'):
|
|
|
|
target = target[len('id:'):]
|
|
|
|
|
|
|
|
if target not in backlink_graph:
|
|
|
|
backlink_graph[target] = set()
|
|
|
|
|
|
|
|
backlink_graph[target].add(headline.id)
|
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
graph[headline.id] = {
|
|
|
|
"title": org_rw.org_rw.token_list_to_plaintext(headline.title.contents).strip(),
|
|
|
|
"links": links,
|
|
|
|
"depth": headline.depth,
|
|
|
|
}
|
2022-10-25 21:35:46 +00:00
|
|
|
if headline.id in main_headline_to_docid:
|
|
|
|
graph[main_headline_to_docid[headline.id]] = graph[headline.id]
|
2022-10-23 16:22:05 +00:00
|
|
|
|
|
|
|
topLevelHeadline = headline
|
|
|
|
while isinstance(topLevelHeadline.parent, org_rw.Headline):
|
|
|
|
topLevelHeadline = topLevelHeadline.parent
|
2021-08-26 22:22:48 +00:00
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
# Save for full-text-search
|
|
|
|
cur.execute('''INSERT INTO note_search(note_id, title, body, top_level_title, is_done, is_todo) VALUES (?, ?, ?, ?, ?, ?);''',
|
|
|
|
(
|
|
|
|
headline.id,
|
|
|
|
headline.title.get_text(),
|
2022-10-30 22:27:47 +00:00
|
|
|
'\n'.join(headline.doc.dump_headline(headline, recursive=False)),
|
2022-10-23 16:22:05 +00:00
|
|
|
topLevelHeadline.title.get_text(),
|
|
|
|
headline.is_done,
|
|
|
|
headline.is_todo,
|
|
|
|
))
|
2022-08-20 11:59:15 +00:00
|
|
|
|
2022-05-16 21:55:58 +00:00
|
|
|
# Update graph, replace document ids with headline ids
|
|
|
|
for headline_data in graph.values():
|
|
|
|
for link in headline_data['links']:
|
|
|
|
if link['target'] in doc_to_headline_remapping:
|
|
|
|
link['target'] = doc_to_headline_remapping[link['target']]
|
|
|
|
|
2022-11-09 18:53:56 +00:00
|
|
|
# Remap document ids backlinks to main headlines
|
|
|
|
for doc_id, main_headline_id in doc_to_headline_remapping.items():
|
|
|
|
if doc_id.startswith('id:'):
|
|
|
|
doc_id = doc_id[len('id:'):]
|
|
|
|
if main_headline_id.startswith('id:'):
|
|
|
|
main_headline_id = main_headline_id[len('id:'):]
|
|
|
|
for backlink in backlink_graph.get(doc_id, []):
|
|
|
|
if main_headline_id not in backlink_graph:
|
|
|
|
backlink_graph[main_headline_id] = set()
|
|
|
|
backlink_graph[main_headline_id].add(backlink)
|
|
|
|
|
2023-06-09 19:04:04 +00:00
|
|
|
# Output graph files
|
|
|
|
graphpath = os.path.join(dest_top, "graph.json")
|
|
|
|
graph_explorer_path = os.path.join(dest_top, "graph.html")
|
|
|
|
with open(graphpath, "wt") as f:
|
|
|
|
json.dump(obj=graph, fp=f, indent=2)
|
|
|
|
graph_explorer_path = os.path.join(dest_top, "graph.html")
|
|
|
|
with open(graph_explorer_path, 'wt') as f:
|
|
|
|
with open(os.path.join(os.path.dirname(os.path.abspath(dest_top)), '..', 'static', 'graph_explorer.html'), 'rt') as template:
|
|
|
|
source = template.read()
|
|
|
|
f.write(source.replace('<!-- REPLACE_THIS_WITH_GRAPH -->',
|
|
|
|
json.dumps(graph)))
|
|
|
|
logging.info("Generated {} files".format(files_generated))
|
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
# Render docs after we've built the graph
|
|
|
|
# Render main headlines
|
2022-11-09 00:01:34 +00:00
|
|
|
full_graph_info = { "nodes": graph, "backlinks": backlink_graph, "main_headlines": main_headlines_by_path }
|
2022-10-23 16:22:05 +00:00
|
|
|
for _docpath, main_headline in main_headlines_by_path.items():
|
|
|
|
if main_headline.doc.id:
|
|
|
|
endpath = os.path.join(dest_top, main_headline.doc.id + ".node.html")
|
|
|
|
with open(endpath, "wt") as f:
|
|
|
|
f.write(render_as_document(main_headline, main_headline.doc, headlineLevel=0, graph=full_graph_info,
|
|
|
|
title=org_rw.token_list_to_plaintext(main_headline.title.contents)))
|
|
|
|
|
|
|
|
# Render all headlines
|
|
|
|
for headline in all_headlines:
|
|
|
|
endpath = os.path.join(dest_top, headline.id + ".node.html")
|
|
|
|
|
|
|
|
# Render HTML
|
|
|
|
with open(endpath, "wt") as f:
|
|
|
|
f.write(render_as_document(headline, headline.doc, headlineLevel=0, graph=full_graph_info,
|
|
|
|
title=org_rw.token_list_to_plaintext(headline.title.contents)))
|
|
|
|
files_generated += 1
|
|
|
|
|
|
|
|
if headline.id == INDEX_ID:
|
|
|
|
index_endpath = os.path.join(dest_top, "index.html")
|
|
|
|
with open(index_endpath, "wt") as f:
|
|
|
|
f.write(render_as_document(headline, headline.doc, headlineLevel=0, graph=full_graph_info,
|
|
|
|
title=org_rw.token_list_to_plaintext(headline.title.contents)))
|
|
|
|
files_generated += 1
|
|
|
|
|
2022-09-29 22:13:22 +00:00
|
|
|
cur.close()
|
|
|
|
db.commit()
|
2021-08-26 22:22:48 +00:00
|
|
|
|
2022-12-14 22:46:41 +00:00
|
|
|
logging.info("Copying attachments")
|
|
|
|
attachments_dir = os.path.join(dest_top, 'attachments')
|
|
|
|
os.makedirs(attachments_dir, exist_ok=True)
|
|
|
|
for base in base_dirs:
|
|
|
|
data_dir = os.path.join(src_top, base, 'data')
|
2023-06-09 18:54:01 +00:00
|
|
|
logging.info("Copying attachments from: {}".format(data_dir))
|
2022-12-14 22:46:41 +00:00
|
|
|
if not os.path.exists(data_dir):
|
|
|
|
continue
|
|
|
|
for subdir in os.listdir(data_dir):
|
|
|
|
shutil.copytree(os.path.join(data_dir, subdir),
|
|
|
|
os.path.join(attachments_dir, subdir),
|
|
|
|
dirs_exist_ok=True)
|
|
|
|
|
|
|
|
|
2022-08-20 12:27:45 +00:00
|
|
|
def main(src_top, dest_top):
|
|
|
|
notifier = inotify.adapters.InotifyTrees([src_top, STATIC_PATH])
|
|
|
|
|
|
|
|
## Initial load
|
|
|
|
t0 = time.time()
|
|
|
|
|
2022-10-20 22:23:50 +00:00
|
|
|
os.makedirs(dest_top, exist_ok=True)
|
2022-09-29 22:13:22 +00:00
|
|
|
db = create_db(os.path.join(dest_top, 'db.sqlite3'))
|
|
|
|
docs = regen_all(src_top, dest_top, db=db)
|
2022-08-20 12:27:45 +00:00
|
|
|
|
2022-10-17 23:16:14 +00:00
|
|
|
if not WATCH:
|
|
|
|
logging.info("Build completed in {:.2f}s".format(time.time() - t0))
|
|
|
|
return 0
|
|
|
|
|
|
|
|
logging.info("Initial load completed in {:.2f}s".format(time.time() - t0))
|
2022-08-20 12:27:45 +00:00
|
|
|
## Updating
|
|
|
|
for event in notifier.event_gen(yield_nones=False):
|
|
|
|
(ev, types, directory, file) = event
|
|
|
|
if not any([type in MONITORED_EVENT_TYPES for type in types]):
|
|
|
|
continue
|
|
|
|
if is_git_path(directory):
|
|
|
|
continue
|
|
|
|
filepath = os.path.join(directory, file)
|
|
|
|
print("CHANGED: {}".format(filepath))
|
|
|
|
t0 = time.time()
|
|
|
|
try:
|
2022-09-29 22:13:22 +00:00
|
|
|
docs = regen_all(src_top, dest_top, docs=docs, db=db)
|
2022-08-20 12:27:45 +00:00
|
|
|
except:
|
|
|
|
logging.error(traceback.format_exc())
|
|
|
|
logging.error("Loading new templates failed 😿")
|
|
|
|
continue
|
|
|
|
logging.info("Updated all in {:.2f}s".format(time.time() - t0))
|
|
|
|
|
2022-10-23 16:28:08 +00:00
|
|
|
def get_headline_with_name(target_name, doc):
|
|
|
|
target_name = target_name.strip()
|
|
|
|
for headline in doc.getAllHeadlines():
|
|
|
|
if headline.title.get_text().strip() == target_name:
|
|
|
|
return headline
|
|
|
|
|
|
|
|
return None
|
2022-08-20 12:27:45 +00:00
|
|
|
|
2022-10-25 21:35:46 +00:00
|
|
|
def assert_id_exists(id, src_headline, graph):
|
|
|
|
if id not in graph["nodes"]:
|
2022-10-25 21:45:30 +00:00
|
|
|
raise NonExistingLocalNoteError(id, src_headline)
|
2022-10-25 21:35:46 +00:00
|
|
|
|
2022-08-20 15:50:40 +00:00
|
|
|
def print_tree(tree, indentation=0, headline=None):
|
|
|
|
# if headline and headline.id != INDEX_ID:
|
|
|
|
# return
|
2022-05-16 21:28:43 +00:00
|
|
|
return
|
2021-08-26 22:22:48 +00:00
|
|
|
for element in tree:
|
|
|
|
if "children" in dir(element):
|
|
|
|
if len(element.children) > 0:
|
2022-08-20 15:50:40 +00:00
|
|
|
print_element(element.children, indentation + 1, headline)
|
2021-08-26 22:22:48 +00:00
|
|
|
print()
|
|
|
|
|
2022-08-20 15:50:40 +00:00
|
|
|
elif "content" in dir(element):
|
|
|
|
for content in element.content:
|
|
|
|
print_element(content, indentation + 1, headline)
|
|
|
|
|
|
|
|
def print_element(element, indentation, headline):
|
|
|
|
if isinstance(element, org_rw.Link):
|
|
|
|
print(" " * indentation, "Link:", element.get_raw())
|
|
|
|
elif isinstance(element, str):
|
|
|
|
print(" " * indentation, "{" + element + "}", type(element))
|
|
|
|
else:
|
|
|
|
print_tree(element, indentation, headline)
|
|
|
|
|
2021-08-26 22:22:48 +00:00
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
def render_property_drawer(element, acc, headline, graph):
|
2021-08-26 22:22:48 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
def render_logbook_drawer(element, acc, headline, graph):
|
2021-08-26 22:22:48 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
def render_property_node(element, acc, headline, graph):
|
2021-08-26 22:22:48 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
def render_list_group(element, acc, headline, graph):
|
2021-08-26 22:22:48 +00:00
|
|
|
acc.append("<ul>")
|
2022-10-23 16:22:05 +00:00
|
|
|
render_tree(element.children, acc, headline, graph)
|
2021-08-26 22:22:48 +00:00
|
|
|
acc.append("</ul>")
|
|
|
|
|
2023-01-11 23:19:04 +00:00
|
|
|
def render_table(element, acc, headline, graph):
|
2022-09-27 22:04:06 +00:00
|
|
|
acc.append("<table>")
|
2022-10-23 16:22:05 +00:00
|
|
|
render_tree(element.children, acc, headline, graph)
|
2022-09-27 22:04:06 +00:00
|
|
|
acc.append("</table>")
|
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
def render_table_row(element, acc, headline, graph):
|
2022-09-27 22:04:06 +00:00
|
|
|
acc.append("<tr>")
|
|
|
|
for cell in element.cells:
|
|
|
|
acc.append("<td>")
|
|
|
|
acc.append(html.escape(cell))
|
|
|
|
acc.append("</td>")
|
|
|
|
acc.append("</tr>")
|
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
def render_table_separator_row(element, acc, headline, graph):
|
2022-09-27 22:04:06 +00:00
|
|
|
acc.append("<tr class='__table-separator'></tr>")
|
2021-08-26 22:22:48 +00:00
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
def render_list_item(element, acc, headline, graph):
|
2021-08-26 22:22:48 +00:00
|
|
|
acc.append("<li>")
|
2022-05-06 18:19:11 +00:00
|
|
|
if element.tag is not None:
|
2021-08-26 22:22:48 +00:00
|
|
|
acc.append("<span class='tag'>")
|
2022-10-23 16:22:05 +00:00
|
|
|
render_text_tokens(element.tag, acc, headline, graph)
|
2021-08-26 22:22:48 +00:00
|
|
|
acc.append("</span>")
|
|
|
|
|
|
|
|
acc.append("<span class='item'>")
|
2022-10-23 16:22:05 +00:00
|
|
|
render_text_tokens(element.content, acc, headline, graph)
|
2021-08-26 22:22:48 +00:00
|
|
|
acc.append("</span></li>")
|
|
|
|
|
2022-11-03 23:33:05 +00:00
|
|
|
def render_block(content, acc, _class, is_code):
|
|
|
|
acc.append('<pre class="{}">'.format(_class))
|
|
|
|
if is_code:
|
|
|
|
acc.append('<code>')
|
2022-08-27 11:33:47 +00:00
|
|
|
|
|
|
|
# Remove indentation common to all lines
|
2022-12-19 23:56:53 +00:00
|
|
|
acc.append(unindent(content))
|
|
|
|
if is_code:
|
|
|
|
acc.append('</code>')
|
|
|
|
acc.append('</pre>')
|
|
|
|
|
|
|
|
def unindent(content):
|
2022-08-27 11:33:47 +00:00
|
|
|
base_indentation = min([
|
|
|
|
len(l) - len(l.lstrip(' '))
|
|
|
|
for l in content.split('\n')
|
|
|
|
if len(l.strip()) > 0
|
|
|
|
])
|
|
|
|
content_lines = [
|
|
|
|
l[base_indentation:]
|
|
|
|
for l in content.split('\n')
|
|
|
|
]
|
2022-12-19 23:56:53 +00:00
|
|
|
return '\n'.join(content_lines)
|
2022-11-03 23:33:05 +00:00
|
|
|
|
|
|
|
def render_code_block(element, acc, headline, graph):
|
2022-11-15 20:11:36 +00:00
|
|
|
code = element.lines
|
2022-12-19 23:56:53 +00:00
|
|
|
|
2022-11-15 20:11:36 +00:00
|
|
|
if element.arguments is not None and len(element.arguments) > 0 :
|
|
|
|
try:
|
|
|
|
lexer = pygments.lexers.get_lexer_by_name(element.arguments.split()[0], stripall=True)
|
2022-12-19 23:56:53 +00:00
|
|
|
content = pygments.highlight(unindent(code),
|
2022-11-15 20:11:36 +00:00
|
|
|
lexer,
|
|
|
|
pygments.formatters.HtmlFormatter()
|
|
|
|
)
|
|
|
|
acc.append(content)
|
|
|
|
return
|
|
|
|
|
|
|
|
except pygments.util.ClassNotFound:
|
|
|
|
pass
|
|
|
|
logging.error("Cannot find lexer for {}".format(element.subtype.lower()))
|
|
|
|
content = html.escape(code)
|
2022-11-03 23:33:05 +00:00
|
|
|
render_block(content, acc, _class='code ' + element.subtype.lower(), is_code=True)
|
|
|
|
|
2021-08-26 22:22:48 +00:00
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
def render_results_block(element, acc, headline, graph):
|
2022-11-03 23:33:05 +00:00
|
|
|
items = [e.get_raw() for e in element.children]
|
|
|
|
content = '\n'.join(items)
|
|
|
|
if len(content.strip()) > 0:
|
|
|
|
render_block(content, acc, _class='results lang-text', is_code=False)
|
2022-05-16 21:28:59 +00:00
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
def render_org_text(element, acc, headline, graph):
|
2022-08-28 12:09:57 +00:00
|
|
|
as_dom = org_rw.text_to_dom(element.contents, element)
|
2022-10-23 16:22:05 +00:00
|
|
|
render_text_tokens(as_dom, acc, headline, graph)
|
2021-08-26 22:22:48 +00:00
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
def render_text(element, acc, headline, graph):
|
2022-08-20 15:51:08 +00:00
|
|
|
acc.append('<div class="text">')
|
2022-10-23 16:22:05 +00:00
|
|
|
render_text_tokens(element.content, acc, headline, graph)
|
2022-08-20 15:51:08 +00:00
|
|
|
acc.append('</div>')
|
2022-05-06 18:19:11 +00:00
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
def render_text_tokens(tokens, acc, headline, graph):
|
2022-08-20 15:51:08 +00:00
|
|
|
acc.append('<p>')
|
2022-11-03 23:33:05 +00:00
|
|
|
if isinstance(tokens, org_rw.Text):
|
|
|
|
tokens = tokens.contents
|
2022-05-06 18:19:11 +00:00
|
|
|
for chunk in tokens:
|
2022-05-06 13:58:28 +00:00
|
|
|
if isinstance(chunk, str):
|
2022-10-23 19:16:22 +00:00
|
|
|
lines = chunk.split('\n\n')
|
|
|
|
contents = []
|
|
|
|
for line in lines:
|
|
|
|
line_chunks = []
|
2022-10-23 19:30:09 +00:00
|
|
|
for word in TEXT_OR_LINK_RE.findall(line):
|
2022-11-03 23:33:16 +00:00
|
|
|
if '://' in word and not (word.startswith('org-protocol://')):
|
2022-10-23 19:16:22 +00:00
|
|
|
if not (word.startswith('http://')
|
|
|
|
or word.startswith('https://')
|
|
|
|
or word.startswith('ftp://')
|
|
|
|
or word.startswith('ftps://')
|
|
|
|
):
|
2022-11-03 23:33:16 +00:00
|
|
|
logging.warning('Is this a link? {} (on {})\nLine: {}\nChunks: {}'.format(word, headline.doc.path, line, line_chunks))
|
|
|
|
line_chunks.append(html.escape(word))
|
|
|
|
else:
|
|
|
|
line_chunks.append('<a href="{url}" class="external">{description}</a>'
|
|
|
|
.format(url=word,
|
|
|
|
description=html.escape(word)))
|
2022-10-23 19:16:22 +00:00
|
|
|
else:
|
|
|
|
line_chunks.append(html.escape(word))
|
2022-11-15 20:12:13 +00:00
|
|
|
contents.append(' '.join(line_chunks))
|
2022-10-23 19:16:22 +00:00
|
|
|
|
|
|
|
acc.append('<span class="line">{}</span>'.format('</p><p>'.join(contents)))
|
|
|
|
|
2022-05-06 13:58:28 +00:00
|
|
|
elif isinstance(chunk, Link):
|
2022-05-06 19:18:16 +00:00
|
|
|
link_target = chunk.value
|
2022-10-23 19:50:33 +00:00
|
|
|
is_internal_link = True
|
2022-05-07 21:44:37 +00:00
|
|
|
description = chunk.description
|
|
|
|
if description is None:
|
|
|
|
description = chunk.value
|
|
|
|
|
2022-10-25 21:45:30 +00:00
|
|
|
try:
|
|
|
|
if link_target.startswith('id:'):
|
|
|
|
assert_id_exists(link_target[3:], headline, graph)
|
|
|
|
link_target = './' + link_target[3:] + '.node.html'
|
|
|
|
elif link_target.startswith('./') or link_target.startswith('../'):
|
|
|
|
if '::' in link_target:
|
2022-11-09 00:01:49 +00:00
|
|
|
logging.warning('Not implemented headline links to other files. Used on {}'.format(link_target))
|
2022-10-25 21:45:30 +00:00
|
|
|
|
|
|
|
else:
|
|
|
|
target_path = os.path.abspath(os.path.join(os.path.dirname(headline.doc.path), link_target))
|
|
|
|
if target_path not in graph['main_headlines']:
|
2022-11-09 00:01:49 +00:00
|
|
|
logging.warning('Link to doc not in graph: {}'.format(target_path))
|
2022-10-25 21:45:30 +00:00
|
|
|
else:
|
|
|
|
assert_id_exists(graph['main_headlines'][target_path].id, headline, graph)
|
|
|
|
link_target = './' + graph['main_headlines'][target_path].id + '.node.html'
|
|
|
|
elif link_target.startswith('attachment:'):
|
2022-12-14 22:46:41 +00:00
|
|
|
inner_target = link_target.split(':', 1)[1]
|
|
|
|
link_target = 'attachments/{}/{}/{}'.format(headline.id[:2], headline.id[2:], inner_target)
|
2022-11-09 00:01:49 +00:00
|
|
|
logging.warning('Not implemented `attachment:` links. Used on {}'.format(link_target))
|
2022-10-25 21:45:30 +00:00
|
|
|
elif link_target.startswith('* '):
|
|
|
|
target_headline = get_headline_with_name(link_target.lstrip('* '), headline.doc)
|
|
|
|
if target_headline is None:
|
2022-11-09 00:01:49 +00:00
|
|
|
logging.warning('No headline found corresponding to {}. On file {}'.format(link_target, headline.doc.path))
|
2022-10-25 21:45:30 +00:00
|
|
|
else:
|
|
|
|
assert_id_exists(target_headline.id, headline, graph)
|
|
|
|
link_target = './' + target_headline.id + '.node.html'
|
|
|
|
else:
|
|
|
|
is_internal_link = False
|
2023-01-11 23:18:46 +00:00
|
|
|
if link_target.startswith('orgit-rev'):
|
|
|
|
raise NonExistingLocalNoteError(link_target, headline)
|
2023-06-06 16:32:19 +00:00
|
|
|
elif link_target.startswith('file:'):
|
|
|
|
raise NonExistingLocalNoteError(link_target, headline)
|
2023-01-11 23:18:46 +00:00
|
|
|
elif not (
|
2022-10-25 21:45:30 +00:00
|
|
|
link_target.startswith('https://')
|
|
|
|
or link_target.startswith('http://')
|
|
|
|
or link_target.startswith('/')
|
|
|
|
):
|
|
|
|
raise NotImplementedError('Unknown link type: {}'
|
|
|
|
.format(link_target))
|
|
|
|
|
2022-12-14 22:46:41 +00:00
|
|
|
if link_target.rsplit('.', 1)[-1].lower() in IMG_EXTENSIONS:
|
|
|
|
acc.append('<a href="{}" class="img img-{}" ><img src="{}" /></a>'.format(
|
|
|
|
html.escape(link_target),
|
|
|
|
'internal' if is_internal_link else 'external',
|
|
|
|
html.escape(link_target),
|
|
|
|
))
|
|
|
|
else:
|
|
|
|
acc.append('<a href="{}" class="{}" >{}</a>'.format(
|
|
|
|
html.escape(link_target),
|
|
|
|
'internal' if is_internal_link else 'external',
|
|
|
|
html.escape(description),
|
|
|
|
))
|
2022-10-25 21:45:30 +00:00
|
|
|
except NonExistingLocalNoteError as err:
|
|
|
|
logging.warning(err.get_message())
|
|
|
|
acc.append(html.escape(description))
|
2022-12-28 21:42:26 +00:00
|
|
|
elif isinstance(chunk, org_rw.MarkerToken):
|
|
|
|
tag = '<'
|
|
|
|
if chunk.closing:
|
|
|
|
tag += '/'
|
|
|
|
tag += {
|
|
|
|
org_rw.MarkerType.BOLD_MODE: 'strong',
|
|
|
|
org_rw.MarkerType.CODE_MODE: 'code',
|
|
|
|
org_rw.MarkerType.ITALIC_MODE: 'em',
|
|
|
|
org_rw.MarkerType.STRIKE_MODE: 's',
|
|
|
|
org_rw.MarkerType.UNDERLINED_MODE: 'span class="underlined"' if not chunk.closing else 'span',
|
|
|
|
org_rw.MarkerType.VERBATIM_MODE: 'span class="verbatim"' if not chunk.closing else 'span',
|
|
|
|
}[chunk.tok_type]
|
|
|
|
tag += '>'
|
|
|
|
acc.append(tag)
|
|
|
|
else:
|
|
|
|
raise NotImplementedError('TextToken: {}'.format(chunk))
|
2022-08-20 15:51:08 +00:00
|
|
|
acc.append('</p>')
|
2021-08-26 22:22:48 +00:00
|
|
|
|
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
def render_tag(element, acc, headline, graph):
|
2021-08-26 22:22:48 +00:00
|
|
|
return {
|
|
|
|
dom.PropertyDrawerNode: render_property_drawer,
|
|
|
|
dom.LogbookDrawerNode: render_logbook_drawer,
|
|
|
|
dom.PropertyNode: render_property_node,
|
|
|
|
dom.ListGroupNode: render_list_group,
|
|
|
|
dom.ListItem: render_list_item,
|
2022-09-27 22:04:06 +00:00
|
|
|
dom.TableNode: render_table,
|
|
|
|
dom.TableSeparatorRow: render_table_separator_row,
|
|
|
|
dom.TableRow: render_table_row,
|
2021-08-26 22:22:48 +00:00
|
|
|
dom.CodeBlock: render_code_block,
|
|
|
|
dom.Text: render_text,
|
2022-05-16 21:28:59 +00:00
|
|
|
dom.ResultsDrawerNode: render_results_block,
|
2022-08-28 12:09:57 +00:00
|
|
|
org_rw.Text: render_org_text,
|
2022-10-23 16:22:05 +00:00
|
|
|
}[type(element)](element, acc, headline, graph)
|
2021-08-26 22:22:48 +00:00
|
|
|
|
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
def render_tree(tree, acc, headline, graph):
|
2021-08-26 22:22:48 +00:00
|
|
|
for element in tree:
|
2022-10-23 16:22:05 +00:00
|
|
|
render_tag(element, acc, headline, graph)
|
2021-08-26 22:22:48 +00:00
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
def render_inline(tree, f, headline, graph):
|
2022-08-28 12:09:57 +00:00
|
|
|
acc = []
|
2022-10-23 16:22:05 +00:00
|
|
|
f(tree, acc, headline, graph)
|
2022-08-28 12:09:57 +00:00
|
|
|
return ''.join(acc)
|
|
|
|
|
2021-08-26 22:22:48 +00:00
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
def render_as_document(headline, doc, headlineLevel, graph, title):
|
2022-10-18 20:47:04 +00:00
|
|
|
if isinstance(headline.parent, org_rw.Headline):
|
|
|
|
topLevelHeadline = headline.parent
|
|
|
|
while isinstance(topLevelHeadline.parent, org_rw.Headline):
|
|
|
|
topLevelHeadline = topLevelHeadline.parent
|
|
|
|
return f"""<!DOCTYPE html>
|
|
|
|
<html>
|
|
|
|
<head>
|
|
|
|
<meta charset="utf-8">
|
|
|
|
<title>{title} @ {SITE_NAME}</title>
|
|
|
|
<meta http-equiv="refresh" content="0;./{topLevelHeadline.id}.node.html#{headline.id}" />
|
|
|
|
<link href="../static/style.css" rel="stylesheet"/>
|
2022-11-15 20:20:25 +00:00
|
|
|
<link href="../static/syntax.css" rel="stylesheet"/>
|
2022-10-18 20:47:04 +00:00
|
|
|
</head>
|
|
|
|
<body>
|
|
|
|
<nav>
|
2022-10-18 21:18:57 +00:00
|
|
|
<h1><a href="./index.html">Código para llevar [Notes]</a></h1>
|
2022-10-18 20:47:04 +00:00
|
|
|
</nav>
|
|
|
|
<a href='./{topLevelHeadline.id}.node.html#{headline.id}'>Sending you to the main note... [{org_rw.token_list_to_plaintext(topLevelHeadline.title.contents)}]</a>
|
|
|
|
</body>
|
|
|
|
</html>
|
2022-10-20 21:44:35 +00:00
|
|
|
"""
|
2022-10-18 20:47:04 +00:00
|
|
|
else:
|
2022-11-28 23:20:31 +00:00
|
|
|
return as_document(render(headline, doc, graph=graph, headlineLevel=headlineLevel), title, render_toc(doc))
|
|
|
|
|
|
|
|
def render_toc(doc):
|
2023-04-26 21:52:49 +00:00
|
|
|
acc = ['<ul class="toc">']
|
2022-11-28 23:20:31 +00:00
|
|
|
for headline in doc.getTopHeadlines():
|
|
|
|
render_toc_headline(headline, acc)
|
|
|
|
|
|
|
|
acc.append('</ul>')
|
2023-04-26 21:52:49 +00:00
|
|
|
|
|
|
|
if sum([chunk == '<li>' for chunk in acc]) < 2:
|
|
|
|
# If < 2 headlines, ignore it
|
|
|
|
return None
|
|
|
|
|
2022-11-28 23:20:31 +00:00
|
|
|
return ''.join(acc)
|
|
|
|
|
|
|
|
def render_toc_headline(headline, acc):
|
|
|
|
acc.append('<li>')
|
|
|
|
acc.append(f'<a href="#{headline.id}">{html.escape(headline.title.get_text())}</a>')
|
|
|
|
children = list(headline.children)
|
|
|
|
if children:
|
|
|
|
acc.append('<ul>')
|
|
|
|
for child in children:
|
|
|
|
render_toc_headline(child, acc)
|
|
|
|
acc.append('</ul>')
|
|
|
|
acc.append('</li>')
|
|
|
|
|
|
|
|
|
2022-10-18 20:47:04 +00:00
|
|
|
|
2022-11-09 00:01:34 +00:00
|
|
|
def render_connections(headline_id, content, graph):
|
|
|
|
if headline_id not in graph['backlinks']:
|
|
|
|
return
|
|
|
|
|
2023-06-09 18:54:01 +00:00
|
|
|
# if headline_id != 'aa29be89-70e7-4465-91ed-361cf0ce62f2':
|
|
|
|
# return
|
|
|
|
|
|
|
|
# TODO: Cache results
|
|
|
|
# TODO: Avoid querying graph API on script
|
2023-06-09 19:04:04 +00:00
|
|
|
# TODO: Properly render outgouing links
|
2023-06-09 18:54:01 +00:00
|
|
|
logging.info("Generating centered graph for {}".format(headline_id))
|
|
|
|
import subprocess
|
|
|
|
this_dir = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
os.makedirs('cache', exist_ok=True)
|
|
|
|
subprocess.check_call(['python3', os.path.join(this_dir, 'gen-centered-graph.py'), headline_id, 'cache/' + headline_id + '.svg'])
|
2023-06-09 19:04:04 +00:00
|
|
|
try:
|
|
|
|
with open('cache/' + headline_id + '.svg') as f:
|
|
|
|
content.append("<div class='connections'>{}</div>".format(f.read()))
|
|
|
|
except FileNotFoundError:
|
|
|
|
logging.exception('Graph file not produced on headline: "{}"'.format(headline_id))
|
2022-11-09 00:01:34 +00:00
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
def render(headline, doc, graph, headlineLevel):
|
2022-05-16 21:28:43 +00:00
|
|
|
try:
|
|
|
|
dom = headline.as_dom()
|
|
|
|
except:
|
|
|
|
logging.error("Error generating DOM for {}".format(doc.path))
|
|
|
|
raise
|
2022-08-20 15:50:40 +00:00
|
|
|
print_tree(dom, indentation=2, headline=headline)
|
2021-08-26 22:22:48 +00:00
|
|
|
|
|
|
|
content = []
|
2022-10-23 16:22:05 +00:00
|
|
|
render_tree(dom, content, headline, graph)
|
2022-11-09 00:01:34 +00:00
|
|
|
if headline.id:
|
|
|
|
render_connections(headline.id, content, graph)
|
|
|
|
|
2021-09-03 22:26:28 +00:00
|
|
|
for child in headline.children:
|
2022-10-23 16:22:05 +00:00
|
|
|
content.append(render(child, doc, headlineLevel=headlineLevel+1, graph=graph))
|
2021-08-26 22:22:48 +00:00
|
|
|
|
2021-09-03 18:19:45 +00:00
|
|
|
if headline.state is None:
|
|
|
|
state = ""
|
|
|
|
else:
|
|
|
|
state = f'<span class="state todo-{headline.is_todo} state-{headline.state}">{headline.state}</span>'
|
|
|
|
|
|
|
|
if headline.is_todo:
|
|
|
|
todo_state = "todo"
|
|
|
|
else:
|
|
|
|
todo_state = "done"
|
2022-05-07 18:38:12 +00:00
|
|
|
|
2022-09-01 22:31:16 +00:00
|
|
|
tag_list = []
|
|
|
|
for tag in headline.shallow_tags:
|
2022-12-14 22:55:30 +00:00
|
|
|
if tag.lower() not in SKIPPED_TAGS:
|
|
|
|
tag_list.append(f'<span class="tag">{html.escape(tag)}</span>')
|
2022-09-01 22:31:16 +00:00
|
|
|
tags = f'<span class="tags">{"".join(tag_list)}</span>'
|
|
|
|
|
2022-08-28 12:10:08 +00:00
|
|
|
display_state = 'expanded'
|
2022-11-28 23:20:31 +00:00
|
|
|
# # Update display based on document STARTUP config
|
|
|
|
# visual_level = doc.get_keywords('STARTUP', 'showall')
|
|
|
|
# if visual_level.startswith('show') and visual_level.endswith('levels'):
|
|
|
|
# visual_level_num = int(visual_level[len('show'):-len('levels')]) - 1
|
|
|
|
# # Note that level is 0 indexed inside this loop
|
|
|
|
# if headlineLevel >= visual_level_num:
|
|
|
|
# display_state = 'collapsed'
|
2022-05-07 18:38:12 +00:00
|
|
|
|
2022-10-23 16:22:05 +00:00
|
|
|
title = render_inline(headline.title, render_tag, headline, graph)
|
2022-10-20 21:44:35 +00:00
|
|
|
|
|
|
|
if headlineLevel > 0:
|
2023-04-27 22:11:15 +00:00
|
|
|
title = f"<a href=\"#{html.escape(headline.id)}\">{title}</a>"
|
2022-10-20 21:44:35 +00:00
|
|
|
|
2021-08-26 22:22:48 +00:00
|
|
|
return f"""
|
2022-05-07 18:38:12 +00:00
|
|
|
<div id="{html.escape(headline.id)}" class="node {todo_state} {display_state}">
|
2021-08-26 22:22:48 +00:00
|
|
|
<h1 class="title">
|
2021-09-03 18:19:45 +00:00
|
|
|
{state}
|
2022-10-20 21:44:35 +00:00
|
|
|
{title}
|
2022-09-01 22:31:16 +00:00
|
|
|
{tags}
|
2021-08-26 22:22:48 +00:00
|
|
|
</h1>
|
2022-05-07 18:38:12 +00:00
|
|
|
<div class='contents'>
|
|
|
|
{''.join(content)}
|
|
|
|
</div>
|
2021-09-03 22:26:10 +00:00
|
|
|
</div>
|
2021-08-26 22:22:48 +00:00
|
|
|
"""
|
2021-08-14 16:04:22 +00:00
|
|
|
|
|
|
|
|
2022-11-28 23:20:31 +00:00
|
|
|
def as_document(html, title, global_toc):
|
2023-05-14 19:06:08 +00:00
|
|
|
body_classes = []
|
2023-04-26 21:52:49 +00:00
|
|
|
if global_toc is None:
|
|
|
|
toc_section = ""
|
2023-05-14 19:06:08 +00:00
|
|
|
body_classes.append('no-toc')
|
2023-04-26 21:52:49 +00:00
|
|
|
else:
|
|
|
|
toc_section = f"""
|
|
|
|
<div class="global-table-of-contents">
|
|
|
|
<h2>Table of contents</h2>
|
|
|
|
{global_toc}
|
|
|
|
</div>
|
|
|
|
"""
|
2022-05-07 16:35:18 +00:00
|
|
|
return f"""<!DOCTYPE html>
|
|
|
|
<html>
|
|
|
|
<head>
|
2022-08-20 11:59:15 +00:00
|
|
|
<meta charset="utf-8">
|
2022-08-20 16:21:26 +00:00
|
|
|
<title>{title} @ {SITE_NAME}</title>
|
2021-09-03 22:26:10 +00:00
|
|
|
<link href="../static/style.css" rel="stylesheet"/>
|
2022-11-15 20:20:25 +00:00
|
|
|
<link href="../static/syntax.css" rel="stylesheet"/>
|
2023-01-06 16:09:10 +00:00
|
|
|
<!-- v Fixes mobile viewports. -->
|
|
|
|
<meta name="HandheldFriendly" content="True">
|
|
|
|
<meta name="MobileOptimized" content="320">
|
|
|
|
<meta name="viewport" content="width=device-width, initial-scale=1">
|
2022-05-07 16:35:18 +00:00
|
|
|
</head>
|
2023-05-14 19:06:08 +00:00
|
|
|
<body class="{' '.join(body_classes)}">
|
2022-10-03 21:40:57 +00:00
|
|
|
<nav>
|
2022-10-18 21:18:57 +00:00
|
|
|
<h1><a href="./index.html">Código para llevar [Notes]</a></h1>
|
2022-10-16 22:10:06 +00:00
|
|
|
<input type="text" id="searchbox" disabled="true" placeholder="Search (requires JS)" />
|
2022-10-03 21:40:57 +00:00
|
|
|
</nav>
|
2023-04-26 21:52:49 +00:00
|
|
|
{toc_section}
|
2021-09-03 22:26:10 +00:00
|
|
|
{html}
|
2022-10-03 21:40:57 +00:00
|
|
|
|
|
|
|
<script src="../static/search-box.js"></script>
|
2022-10-16 22:10:06 +00:00
|
|
|
<script tye="text/javascript">_codigoparallevar_enable_search_box('#searchbox', {{placeholder: 'Search...'}})</script>
|
2022-05-07 16:35:18 +00:00
|
|
|
</body>
|
|
|
|
</html>
|
2021-09-03 22:26:10 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
|
2021-08-14 16:04:22 +00:00
|
|
|
def save_changes(doc):
|
|
|
|
assert doc.path is not None
|
|
|
|
with open(doc.path, "wt") as f:
|
2021-09-03 18:19:45 +00:00
|
|
|
dump_org(doc, f)
|
2021-08-14 16:04:22 +00:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
if len(sys.argv) != 3:
|
|
|
|
print("Usage: {} SOURCE_TOP DEST_TOP".format(sys.argv[0]))
|
|
|
|
exit(0)
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s")
|
2022-10-17 23:16:14 +00:00
|
|
|
exit(main(sys.argv[1], sys.argv[2]))
|