mirror of
https://github.com/kevinveenbirkenbach/computer-playbook.git
synced 2025-08-29 23:08:06 +02:00
Moved sphinx files
This commit is contained in:
0
docs/extensions/__init__.py
Normal file
0
docs/extensions/__init__.py
Normal file
61
docs/extensions/local_file_headings.py
Normal file
61
docs/extensions/local_file_headings.py
Normal file
@@ -0,0 +1,61 @@
|
||||
import os
|
||||
import sys
|
||||
import logging as std_logging # Use the standard logging module
|
||||
from sphinx.util import logging # Sphinx logging is used elsewhere if needed
|
||||
from docutils.parsers.rst import Directive
|
||||
from .nav_utils import natural_sort_key, extract_headings_from_file, group_headings, sort_tree, MAX_HEADING_LEVEL, DEFAULT_MAX_NAV_DEPTH
|
||||
|
||||
# Set up our logger based on command-line args.
|
||||
logger = std_logging.getLogger(__name__)
|
||||
if any(arg in sys.argv for arg in ["-v", "--verbose"]):
|
||||
logger.setLevel(std_logging.DEBUG)
|
||||
else:
|
||||
logger.setLevel(std_logging.INFO)
|
||||
|
||||
DEFAULT_MAX_NAV_DEPTH = 4
|
||||
|
||||
def add_local_file_headings(app, pagename, templatename, context, doctree):
|
||||
logger.debug("add_local_file_headings called with pagename: %s", pagename)
|
||||
|
||||
srcdir = app.srcdir
|
||||
directory = os.path.dirname(pagename)
|
||||
abs_dir = os.path.join(srcdir, directory)
|
||||
if not os.path.isdir(abs_dir):
|
||||
logger.warning("Directory %s not found for page %s.", abs_dir, pagename)
|
||||
context['local_md_headings'] = []
|
||||
return
|
||||
|
||||
# Get only files with .md or .rst extensions.
|
||||
files = [f for f in os.listdir(abs_dir) if f.endswith('.md') or f.endswith('.rst')]
|
||||
# If an index file is present, remove any readme files (case-insensitive).
|
||||
files_lower = [f.lower() for f in files]
|
||||
if 'index.rst' in files_lower:
|
||||
files = [f for f in files if f.lower() not in ['readme.md']]
|
||||
|
||||
file_items = []
|
||||
for file in files:
|
||||
filepath = os.path.join(abs_dir, file)
|
||||
headings = extract_headings_from_file(filepath, max_level=MAX_HEADING_LEVEL)
|
||||
basename, _ = os.path.splitext(file)
|
||||
# Set priority: index gets priority 0, otherwise 1.
|
||||
priority = 0 if basename.lower() == 'index' else 1
|
||||
for heading in headings:
|
||||
file_link = os.path.join(directory, basename)
|
||||
file_items.append({
|
||||
'level': heading['level'],
|
||||
'text': heading['text'],
|
||||
'link': file_link,
|
||||
'anchor': heading['anchor'],
|
||||
'priority': priority,
|
||||
'filename': basename
|
||||
})
|
||||
tree = group_headings(file_items)
|
||||
sort_tree(tree)
|
||||
|
||||
logger.debug("Generated tree: %s", tree)
|
||||
context['local_md_headings'] = tree
|
||||
|
||||
def setup(app):
|
||||
app.add_config_value('local_nav_max_depth', DEFAULT_MAX_NAV_DEPTH, 'env')
|
||||
app.connect('html-page-context', add_local_file_headings)
|
||||
return {'version': '0.1', 'parallel_read_safe': True}
|
130
docs/extensions/local_subfolders.py
Normal file
130
docs/extensions/local_subfolders.py
Normal file
@@ -0,0 +1,130 @@
|
||||
import os
|
||||
from sphinx.util import logging
|
||||
from .nav_utils import extract_headings_from_file, MAX_HEADING_LEVEL
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CANDIDATES = ['index.rst', 'readme.md', 'main.rst']
|
||||
|
||||
def collect_folder_tree(dir_path, base_url):
|
||||
"""
|
||||
Recursively collects the folder tree starting from the given directory.
|
||||
|
||||
For each folder:
|
||||
- Hidden folders (names starting with a dot) are skipped.
|
||||
- A folder is processed only if it contains one of the representative files:
|
||||
index.rst, index.md, readme.md, or readme.rst.
|
||||
- The first heading of the representative file is used as the folder title.
|
||||
- The representative file is not listed as a file in the folder.
|
||||
- All other Markdown and reStructuredText files are listed without sub-headings,
|
||||
using their first heading as the file title.
|
||||
"""
|
||||
# Skip hidden directories
|
||||
if os.path.basename(dir_path).startswith('.'):
|
||||
return None
|
||||
|
||||
# List all files in the current directory with .md or .rst extension
|
||||
files = [f for f in os.listdir(dir_path)
|
||||
if os.path.isfile(os.path.join(dir_path, f))
|
||||
and (f.endswith('.md') or f.endswith('.rst'))]
|
||||
|
||||
# Find representative file for folder title using index or readme
|
||||
rep_file = None
|
||||
for candidate in CANDIDATES:
|
||||
for f in files:
|
||||
if f.lower() == candidate:
|
||||
rep_file = f
|
||||
break
|
||||
if rep_file:
|
||||
break
|
||||
|
||||
# Skip this folder if no representative file exists
|
||||
if not rep_file:
|
||||
return None
|
||||
|
||||
rep_path = os.path.join(dir_path, rep_file)
|
||||
headings = extract_headings_from_file(rep_path, max_level=MAX_HEADING_LEVEL)
|
||||
folder_title = headings[0]['text'] if headings else os.path.basename(dir_path)
|
||||
folder_link = os.path.join(base_url, os.path.splitext(rep_file)[0])
|
||||
|
||||
# Remove the representative file from the list to avoid duplication,
|
||||
# and filter out any additional "readme.md" or "index.rst" files.
|
||||
files.remove(rep_file)
|
||||
files = [f for f in files if f.lower() not in CANDIDATES]
|
||||
|
||||
# Process the remaining files in the current directory
|
||||
file_items = []
|
||||
for file in sorted(files, key=lambda s: s.lower()):
|
||||
file_path = os.path.join(dir_path, file)
|
||||
file_headings = extract_headings_from_file(file_path, max_level=MAX_HEADING_LEVEL)
|
||||
file_title = file_headings[0]['text'] if file_headings else file
|
||||
file_base = os.path.splitext(file)[0]
|
||||
file_link = os.path.join(base_url, file_base)
|
||||
file_items.append({
|
||||
'level': 1,
|
||||
'text': file_title,
|
||||
'link': file_link,
|
||||
'anchor': '',
|
||||
'priority': 1,
|
||||
'filename': file
|
||||
})
|
||||
|
||||
# Process subdirectories (ignoring hidden ones)
|
||||
dir_items = []
|
||||
for item in sorted(os.listdir(dir_path), key=lambda s: s.lower()):
|
||||
full_path = os.path.join(dir_path, item)
|
||||
if os.path.isdir(full_path) and not item.startswith('.'):
|
||||
subtree = collect_folder_tree(full_path, os.path.join(base_url, item))
|
||||
if subtree:
|
||||
dir_items.append(subtree)
|
||||
|
||||
# Combine files and subdirectories as children of the current folder
|
||||
children = file_items + dir_items
|
||||
|
||||
return {
|
||||
'text': folder_title,
|
||||
'link': folder_link,
|
||||
'children': children,
|
||||
'filename': os.path.basename(dir_path)
|
||||
}
|
||||
|
||||
def mark_current(node, active):
|
||||
"""
|
||||
Recursively mark nodes as current if the active page (pagename)
|
||||
matches the node's link or is a descendant of it.
|
||||
|
||||
The function sets node['current'] = True if:
|
||||
- The node's link matches the active page exactly, or
|
||||
- The active page begins with the node's link plus a separator (indicating a child).
|
||||
Additionally, if any child node is current, the parent is marked as current.
|
||||
"""
|
||||
is_current = False
|
||||
node_link = node.get('link', '').rstrip('/')
|
||||
active = active.rstrip('/')
|
||||
if node_link and (active == node_link or active.startswith(node_link + '/')):
|
||||
is_current = True
|
||||
|
||||
# Recurse into children if they exist
|
||||
children = node.get('children', [])
|
||||
for child in children:
|
||||
if mark_current(child, active):
|
||||
is_current = True
|
||||
|
||||
node['current'] = is_current
|
||||
return is_current
|
||||
|
||||
def add_local_subfolders(app, pagename, templatename, context, doctree):
|
||||
"""
|
||||
Sets the 'local_subfolders' context variable with the entire folder tree
|
||||
starting from app.srcdir, and marks the tree with the 'current' flag up
|
||||
to the active page.
|
||||
"""
|
||||
root_dir = app.srcdir
|
||||
folder_tree = collect_folder_tree(root_dir, '')
|
||||
if folder_tree:
|
||||
mark_current(folder_tree, pagename)
|
||||
context['local_subfolders'] = [folder_tree] if folder_tree else []
|
||||
|
||||
def setup(app):
|
||||
app.connect('html-page-context', add_local_subfolders)
|
||||
return {'version': '0.1', 'parallel_read_safe': True}
|
80
docs/extensions/markdown_include.py
Normal file
80
docs/extensions/markdown_include.py
Normal file
@@ -0,0 +1,80 @@
|
||||
import os
|
||||
from docutils import nodes
|
||||
from docutils.parsers.rst import Directive
|
||||
from sphinx.util import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from myst_parser.parsers.sphinx_ import MystParser
|
||||
|
||||
class MarkdownIncludeDirective(Directive):
|
||||
required_arguments = 1 # Path to the Markdown file
|
||||
optional_arguments = 0
|
||||
final_argument_whitespace = True
|
||||
has_content = False
|
||||
|
||||
def run(self):
|
||||
logger.info("Executing markdown-include directive")
|
||||
env = self.state.document.settings.env
|
||||
# Determine the absolute path of the file.
|
||||
rel_filename, filename = env.relfn2path(self.arguments[0])
|
||||
logger.info("Markdown file: %s", filename)
|
||||
if not os.path.exists(filename):
|
||||
error = self.state_machine.reporter.error(
|
||||
f'File not found: {filename}',
|
||||
nodes.literal_block(self.block_text, self.block_text),
|
||||
line=self.lineno)
|
||||
return [error]
|
||||
|
||||
try:
|
||||
with open(filename, 'r', encoding='utf-8') as f:
|
||||
markdown_content = f.read()
|
||||
except Exception as e:
|
||||
error = self.state_machine.reporter.error(
|
||||
f'Error reading file {filename}: {e}',
|
||||
nodes.literal_block(self.block_text, self.block_text),
|
||||
line=self.lineno)
|
||||
return [error]
|
||||
|
||||
# Parse the Markdown content with MystParser.
|
||||
parser = MystParser()
|
||||
from docutils.frontend import OptionParser
|
||||
from docutils.utils import new_document
|
||||
settings = OptionParser(components=(MystParser,)).get_default_values()
|
||||
# Attach the Sphinx environment to the settings so that myst_parser works.
|
||||
settings.env = self.state.document.settings.env
|
||||
doc = new_document(filename, settings=settings)
|
||||
parser.parse(markdown_content, doc)
|
||||
logger.info("Markdown parsing completed successfully")
|
||||
|
||||
# Remove the first header (title) if it exists.
|
||||
if doc.children:
|
||||
first_section = doc.children[0]
|
||||
if isinstance(first_section, nodes.section) and first_section.children:
|
||||
first_child = first_section.children[0]
|
||||
if isinstance(first_child, nodes.title):
|
||||
# If there are additional children, remove the title node.
|
||||
if len(first_section.children) > 1:
|
||||
first_section.pop(0)
|
||||
logger.info("Removed first header from Markdown content")
|
||||
else:
|
||||
# If it's the only child, clear its content instead.
|
||||
first_child.clear()
|
||||
logger.info("Cleared text of first header from Markdown content")
|
||||
|
||||
# Unwrap the first section if it no longer has a title.
|
||||
if isinstance(first_section, nodes.section):
|
||||
has_title = any(isinstance(child, nodes.title) and child.astext().strip()
|
||||
for child in first_section.children)
|
||||
if not has_title:
|
||||
# Remove the section wrapper so that its content does not create a TOC entry.
|
||||
unwrapped = list(first_section.children)
|
||||
# Replace the first section with its children.
|
||||
doc.children = unwrapped + doc.children[1:]
|
||||
logger.info("Unwrapped first section to avoid a TOC entry")
|
||||
|
||||
return doc.children
|
||||
|
||||
def setup(app):
|
||||
app.add_directive("markdown-include", MarkdownIncludeDirective)
|
||||
return {'version': '0.1', 'parallel_read_safe': True}
|
78
docs/extensions/nav_utils.py
Normal file
78
docs/extensions/nav_utils.py
Normal file
@@ -0,0 +1,78 @@
|
||||
import os
|
||||
import re
|
||||
import yaml
|
||||
|
||||
DEFAULT_MAX_NAV_DEPTH = 4
|
||||
MAX_HEADING_LEVEL = 0 # This can be overridden in your configuration
|
||||
|
||||
def natural_sort_key(text):
|
||||
return [int(c) if c.isdigit() else c.lower() for c in re.split(r'(\d+)', text)]
|
||||
|
||||
def extract_headings_from_file(filepath, max_level=MAX_HEADING_LEVEL):
|
||||
# If max_level is 0, set it to a very high value to effectively iterate infinitely
|
||||
if max_level == 0:
|
||||
max_level = 9999
|
||||
|
||||
headings = []
|
||||
ext = os.path.splitext(filepath)[1].lower()
|
||||
try:
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
if ext == '.md':
|
||||
in_code_block = False
|
||||
for line in f:
|
||||
if line.strip().startswith("```"):
|
||||
in_code_block = not in_code_block
|
||||
continue
|
||||
if in_code_block:
|
||||
continue
|
||||
# Assuming markdown headings are defined with '#' characters
|
||||
match = re.match(r'^(#{1,})(.*?)$', line)
|
||||
if match:
|
||||
level = len(match.group(1))
|
||||
if level <= max_level:
|
||||
heading_text = match.group(2).strip()
|
||||
anchor = re.sub(r'\s+', '-', heading_text.lower())
|
||||
anchor = re.sub(r'[^a-z0-9\-]', '', anchor)
|
||||
headings.append({'level': level, 'text': heading_text, 'anchor': anchor})
|
||||
elif ext == '.rst':
|
||||
lines = f.readlines()
|
||||
for i in range(len(lines) - 1):
|
||||
text_line = lines[i].rstrip("\n")
|
||||
underline = lines[i+1].rstrip("\n")
|
||||
if len(underline) >= 3 and re.fullmatch(r'[-=~\^\+"\'`]+', underline):
|
||||
level = 1
|
||||
heading_text = text_line.strip()
|
||||
headings.append({'level': level, 'text': heading_text, 'anchor': ''})
|
||||
except Exception as e:
|
||||
print(f"Warning: Error reading {filepath}: {e}")
|
||||
if not headings:
|
||||
base = os.path.basename(filepath).lower()
|
||||
if base == 'index.rst':
|
||||
folder = os.path.dirname(filepath)
|
||||
readme_path = os.path.join(folder, 'README.md')
|
||||
if os.path.isfile(readme_path):
|
||||
try:
|
||||
headings = extract_headings_from_file(readme_path, max_level)
|
||||
except Exception as e:
|
||||
print(f"Warning: Error reading fallback README.md in {folder}: {e}")
|
||||
return headings
|
||||
|
||||
def group_headings(headings):
|
||||
tree = []
|
||||
stack = []
|
||||
for heading in headings:
|
||||
heading['children'] = []
|
||||
while stack and stack[-1]['level'] >= heading['level']:
|
||||
stack.pop()
|
||||
if stack:
|
||||
stack[-1]['children'].append(heading)
|
||||
else:
|
||||
tree.append(heading)
|
||||
stack.append(heading)
|
||||
return tree
|
||||
|
||||
def sort_tree(tree):
|
||||
tree.sort(key=lambda x: (x.get('priority', 1), natural_sort_key(x.get('filename', x['text']))))
|
||||
for item in tree:
|
||||
if item.get('children'):
|
||||
sort_tree(item['children'])
|
116
docs/extensions/roles_overview.py
Normal file
116
docs/extensions/roles_overview.py
Normal file
@@ -0,0 +1,116 @@
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
import yaml
|
||||
from docutils import nodes
|
||||
from sphinx.util import logging
|
||||
from docutils.parsers.rst import Directive
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class RolesOverviewDirective(Directive):
|
||||
"""
|
||||
A directive to embed a roles overview as reStructuredText.
|
||||
|
||||
It scans the roles directory (i.e. every folder under "roles") for a "meta/main.yml" file,
|
||||
reads the role’s galaxy tags and description, and outputs an overview grouped by each tag.
|
||||
For each role, it attempts to extract a level‑1 heading from its README.md as the title.
|
||||
If no title is found, the role folder name is used.
|
||||
The title is rendered as a clickable link to the role's README.md.
|
||||
"""
|
||||
has_content = False
|
||||
|
||||
def run(self):
|
||||
env = self.state.document.settings.env
|
||||
srcdir = env.srcdir
|
||||
roles_dir = os.path.join(srcdir, 'roles')
|
||||
if not os.path.isdir(roles_dir):
|
||||
logger.warning(f"Roles directory not found: {roles_dir}")
|
||||
error_node = self.state.document.reporter.error(
|
||||
"Roles directory not found.", line=self.lineno)
|
||||
return [error_node]
|
||||
|
||||
# Gather role entries grouped by tag.
|
||||
categories = {}
|
||||
for role_path in glob.glob(os.path.join(roles_dir, '*')):
|
||||
if os.path.isdir(role_path):
|
||||
meta_path = os.path.join(role_path, 'meta', 'main.yml')
|
||||
if os.path.exists(meta_path):
|
||||
try:
|
||||
with open(meta_path, 'r', encoding='utf-8') as f:
|
||||
data = yaml.safe_load(f)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error reading YAML file {meta_path}: {e}")
|
||||
continue
|
||||
|
||||
role_name = os.path.basename(role_path)
|
||||
# Determine title from README.md if present.
|
||||
readme_path = os.path.join(role_path, 'README.md')
|
||||
title = role_name
|
||||
if os.path.exists(readme_path):
|
||||
try:
|
||||
with open(readme_path, 'r', encoding='utf-8') as f:
|
||||
for line in f:
|
||||
match = re.match(r'^#\s+(.*)$', line)
|
||||
if match:
|
||||
title = match.group(1).strip()
|
||||
break
|
||||
except Exception as e:
|
||||
logger.warning(f"Error reading README.md for {role_name}: {e}")
|
||||
|
||||
galaxy_info = data.get('galaxy_info', {})
|
||||
tags = galaxy_info.get('galaxy_tags', [])
|
||||
if not tags:
|
||||
tags = ['uncategorized']
|
||||
role_description = galaxy_info.get('description', '')
|
||||
role_entry = {
|
||||
'name': role_name,
|
||||
'title': title,
|
||||
'description': role_description,
|
||||
'link': f'roles/{role_name}/README.md',
|
||||
'tags': tags,
|
||||
}
|
||||
for tag in tags:
|
||||
categories.setdefault(tag, []).append(role_entry)
|
||||
else:
|
||||
logger.warning(f"meta/main.yml not found for role {role_path}")
|
||||
|
||||
# Sort categories and roles alphabetically.
|
||||
sorted_categories = sorted(categories.items(), key=lambda x: x[0].lower())
|
||||
for tag, roles in sorted_categories:
|
||||
roles.sort(key=lambda r: r['name'].lower())
|
||||
|
||||
# Build document structure.
|
||||
container = nodes.container()
|
||||
|
||||
# For each category, create a section to serve as a large category heading.
|
||||
for tag, roles in sorted_categories:
|
||||
# Create a section for the category.
|
||||
cat_id = nodes.make_id(tag)
|
||||
category_section = nodes.section(ids=[cat_id])
|
||||
category_title = nodes.title(text=tag)
|
||||
category_section += category_title
|
||||
|
||||
# For each role within the category, create a subsection.
|
||||
for role in roles:
|
||||
role_section_id = nodes.make_id(role['title'])
|
||||
role_section = nodes.section(ids=[role_section_id])
|
||||
# Create a title node with a clickable reference.
|
||||
role_title = nodes.title()
|
||||
reference = nodes.reference(text=role['title'], refuri=role['link'])
|
||||
role_title += reference
|
||||
role_section += role_title
|
||||
|
||||
if role['description']:
|
||||
para = nodes.paragraph(text=role['description'])
|
||||
role_section += para
|
||||
|
||||
category_section += role_section
|
||||
|
||||
container += category_section
|
||||
|
||||
return [container]
|
||||
|
||||
def setup(app):
|
||||
app.add_directive("roles-overview", RolesOverviewDirective)
|
||||
return {'version': '0.1', 'parallel_read_safe': True}
|
Reference in New Issue
Block a user