Manage Atom feeds in a persistent git repository

Add generate command and HTML templates for static site generation

- Add generate command to create static HTML pages from stored feeds
- Add HTML templates for timeline, users, links, and index pages
- Update CLI commands structure and reference parser
- Remove outdated test files

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

+2 -1
pyproject.toml
···
"bleach>=6.0.0",
"platformdirs>=4.0.0",
"pyyaml>=6.0.0",
-
"email_validator"
]
[project.optional-dependencies]
···
"bleach>=6.0.0",
"platformdirs>=4.0.0",
"pyyaml>=6.0.0",
+
"email_validator",
+
"jinja2>=3.1.6",
]
[project.optional-dependencies]
+2 -2
src/thicket/cli/commands/__init__.py
···
"""CLI commands for thicket."""
# Import all commands to register them with the main app
-
from . import add, duplicates, index_cmd, info_cmd, init, links_cmd, list_cmd, sync
-
__all__ = ["add", "duplicates", "index_cmd", "info_cmd", "init", "links_cmd", "list_cmd", "sync"]
···
"""CLI commands for thicket."""
# Import all commands to register them with the main app
+
from . import add, duplicates, generate, index_cmd, info_cmd, init, links_cmd, list_cmd, sync
+
__all__ = ["add", "duplicates", "generate", "index_cmd", "info_cmd", "init", "links_cmd", "list_cmd", "sync"]
+703
src/thicket/cli/commands/generate.py
···
···
+
"""Generate static HTML website from thicket data."""
+
+
import base64
+
import json
+
import re
+
import shutil
+
from datetime import datetime
+
from pathlib import Path
+
from typing import Any, Optional, TypedDict, Union
+
+
import typer
+
from jinja2 import Environment, FileSystemLoader, select_autoescape
+
from rich.progress import Progress, SpinnerColumn, TextColumn
+
+
from ...core.git_store import GitStore
+
from ...models.feed import AtomEntry
+
from ...models.user import GitStoreIndex, UserMetadata
+
from ..main import app
+
from ..utils import console, load_config
+
+
+
class UserData(TypedDict):
+
"""Type definition for user data structure."""
+
+
metadata: UserMetadata
+
recent_entries: list[tuple[str, AtomEntry]]
+
+
+
def safe_anchor_id(atom_id: str) -> str:
+
"""Convert an Atom ID to a safe HTML anchor ID."""
+
# Use base64 URL-safe encoding without padding
+
encoded = base64.urlsafe_b64encode(atom_id.encode('utf-8')).decode('ascii').rstrip('=')
+
# Prefix with 'id' to ensure it starts with a letter (HTML requirement)
+
return f"id{encoded}"
+
+
+
class WebsiteGenerator:
+
"""Generate static HTML website from thicket data."""
+
+
def __init__(self, git_store: GitStore, output_dir: Path):
+
self.git_store = git_store
+
self.output_dir = output_dir
+
self.template_dir = Path(__file__).parent.parent.parent / "templates"
+
+
# Initialize Jinja2 environment
+
self.env = Environment(
+
loader=FileSystemLoader(self.template_dir),
+
autoescape=select_autoescape(["html", "xml"]),
+
)
+
+
# Data containers
+
self.index: Optional[GitStoreIndex] = None
+
self.entries: list[tuple[str, AtomEntry]] = [] # (username, entry)
+
self.links_data: Optional[dict[str, Any]] = None
+
self.threads: list[list[dict[str, Any]]] = [] # List of threads with metadata
+
+
def get_display_name(self, username: str) -> str:
+
"""Get display name for a user, falling back to username."""
+
if self.index and username in self.index.users:
+
user = self.index.users[username]
+
return user.display_name or username
+
return username
+
+
def get_user_homepage(self, username: str) -> Optional[str]:
+
"""Get homepage URL for a user."""
+
if self.index and username in self.index.users:
+
user = self.index.users[username]
+
return str(user.homepage) if user.homepage else None
+
return None
+
+
def clean_html_summary(self, content: Optional[str], max_length: int = 200) -> str:
+
"""Clean HTML content and truncate for display in timeline."""
+
if not content:
+
return ""
+
+
# Remove HTML tags
+
clean_text = re.sub(r"<[^>]+>", " ", content)
+
# Replace multiple whitespace with single space
+
clean_text = re.sub(r"\s+", " ", clean_text)
+
# Strip leading/trailing whitespace
+
clean_text = clean_text.strip()
+
+
# Truncate with ellipsis if needed
+
if len(clean_text) > max_length:
+
# Try to break at word boundary
+
truncated = clean_text[:max_length]
+
last_space = truncated.rfind(" ")
+
if (
+
last_space > max_length * 0.8
+
): # If we can break reasonably close to the limit
+
clean_text = truncated[:last_space] + "..."
+
else:
+
clean_text = truncated + "..."
+
+
return clean_text
+
+
def load_data(self) -> None:
+
"""Load all data from the git repository."""
+
with Progress(
+
SpinnerColumn(),
+
TextColumn("[progress.description]{task.description}"),
+
console=console,
+
) as progress:
+
# Load index
+
task = progress.add_task("Loading repository index...", total=None)
+
self.index = self.git_store._load_index()
+
if not self.index:
+
raise ValueError("No index found in repository")
+
progress.update(task, completed=True)
+
+
# Load all entries
+
task = progress.add_task("Loading entries...", total=None)
+
for username, user_metadata in self.index.users.items():
+
user_dir = self.git_store.repo_path / user_metadata.directory
+
if user_dir.exists():
+
for entry_file in user_dir.glob("*.json"):
+
if entry_file.name not in ["index.json", "duplicates.json"]:
+
try:
+
with open(entry_file) as f:
+
entry_data = json.load(f)
+
entry = AtomEntry(**entry_data)
+
self.entries.append((username, entry))
+
except Exception as e:
+
console.print(
+
f"[yellow]Warning: Failed to load {entry_file}: {e}[/yellow]"
+
)
+
progress.update(task, completed=True)
+
+
# Sort entries by date (newest first) - prioritize updated over published
+
self.entries.sort(
+
key=lambda x: x[1].updated or x[1].published or datetime.min, reverse=True
+
)
+
+
# Load links data
+
task = progress.add_task("Loading links and references...", total=None)
+
links_file = self.git_store.repo_path / "links.json"
+
if links_file.exists():
+
with open(links_file) as f:
+
self.links_data = json.load(f)
+
progress.update(task, completed=True)
+
+
def build_threads(self) -> None:
+
"""Build threaded conversations from references."""
+
if not self.links_data or "references" not in self.links_data:
+
return
+
+
# Map entry IDs to (username, entry) tuples
+
entry_map: dict[str, tuple[str, AtomEntry]] = {}
+
for username, entry in self.entries:
+
entry_map[entry.id] = (username, entry)
+
+
# Build adjacency lists for references
+
self.outbound_refs: dict[str, set[str]] = {}
+
self.inbound_refs: dict[str, set[str]] = {}
+
self.reference_details: dict[
+
str, list[dict[str, Any]]
+
] = {} # Store full reference info
+
+
for ref in self.links_data["references"]:
+
source_id = ref["source_entry_id"]
+
target_id = ref.get("target_entry_id")
+
+
if target_id and source_id in entry_map and target_id in entry_map:
+
self.outbound_refs.setdefault(source_id, set()).add(target_id)
+
self.inbound_refs.setdefault(target_id, set()).add(source_id)
+
+
# Store reference details for UI
+
self.reference_details.setdefault(source_id, []).append(
+
{
+
"target_id": target_id,
+
"target_username": ref.get("target_username"),
+
"type": "outbound",
+
}
+
)
+
self.reference_details.setdefault(target_id, []).append(
+
{
+
"source_id": source_id,
+
"source_username": ref.get("source_username"),
+
"type": "inbound",
+
}
+
)
+
+
# Find conversation threads (multi-post discussions)
+
processed = set()
+
+
for entry_id, (_username, _entry) in entry_map.items():
+
if entry_id in processed:
+
continue
+
+
# Build thread starting from this entry
+
thread = []
+
to_visit = [entry_id]
+
thread_ids = set()
+
level_map: dict[str, int] = {} # Track levels for this thread
+
+
# First, traverse up to find the root
+
current = entry_id
+
while current in self.inbound_refs:
+
parents = self.inbound_refs[current] - {
+
current
+
} # Exclude self-references
+
if not parents:
+
break
+
# Take the first parent
+
parent = next(iter(parents))
+
if parent in thread_ids: # Avoid cycles
+
break
+
current = parent
+
to_visit.insert(0, current)
+
+
# Now traverse down from the root
+
while to_visit:
+
current = to_visit.pop(0)
+
if current in thread_ids or current not in entry_map:
+
continue
+
+
thread_ids.add(current)
+
username, entry = entry_map[current]
+
+
# Calculate thread level
+
thread_level = self._calculate_thread_level(current, level_map)
+
+
# Add threading metadata
+
thread_entry = {
+
"username": username,
+
"display_name": self.get_display_name(username),
+
"entry": entry,
+
"entry_id": current,
+
"references_to": list(self.outbound_refs.get(current, [])),
+
"referenced_by": list(self.inbound_refs.get(current, [])),
+
"thread_level": thread_level,
+
}
+
thread.append(thread_entry)
+
processed.add(current)
+
+
# Add children
+
if current in self.outbound_refs:
+
children = self.outbound_refs[current] - thread_ids # Avoid cycles
+
to_visit.extend(sorted(children))
+
+
if len(thread) > 1: # Only keep actual threads
+
# Sort thread by date (newest first) - prioritize updated over published
+
thread.sort(key=lambda x: x["entry"].updated or x["entry"].published or datetime.min, reverse=True) # type: ignore
+
self.threads.append(thread)
+
+
# Sort threads by the date of their most recent entry - prioritize updated over published
+
self.threads.sort(
+
key=lambda t: max(
+
item["entry"].updated or item["entry"].published or datetime.min for item in t
+
),
+
reverse=True,
+
)
+
+
def _calculate_thread_level(
+
self, entry_id: str, processed_entries: dict[str, int]
+
) -> int:
+
"""Calculate indentation level for threaded display."""
+
if entry_id in processed_entries:
+
return processed_entries[entry_id]
+
+
if entry_id not in self.inbound_refs:
+
processed_entries[entry_id] = 0
+
return 0
+
+
parents_in_thread = self.inbound_refs[entry_id] & set(processed_entries.keys())
+
if not parents_in_thread:
+
processed_entries[entry_id] = 0
+
return 0
+
+
# Find the deepest parent level + 1
+
max_parent_level = 0
+
for parent_id in parents_in_thread:
+
parent_level = self._calculate_thread_level(parent_id, processed_entries)
+
max_parent_level = max(max_parent_level, parent_level)
+
+
level = min(max_parent_level + 1, 4) # Cap at level 4
+
processed_entries[entry_id] = level
+
return level
+
+
def get_standalone_references(self) -> list[dict[str, Any]]:
+
"""Get posts that have references but aren't part of multi-post threads."""
+
if not hasattr(self, "reference_details"):
+
return []
+
+
threaded_entry_ids = set()
+
for thread in self.threads:
+
for item in thread:
+
threaded_entry_ids.add(item["entry_id"])
+
+
standalone_refs = []
+
for username, entry in self.entries:
+
if (
+
entry.id in self.reference_details
+
and entry.id not in threaded_entry_ids
+
):
+
refs = self.reference_details[entry.id]
+
# Only include if it has meaningful references (not just self-references)
+
meaningful_refs = [
+
r
+
for r in refs
+
if r.get("target_id") != entry.id and r.get("source_id") != entry.id
+
]
+
if meaningful_refs:
+
standalone_refs.append(
+
{
+
"username": username,
+
"display_name": self.get_display_name(username),
+
"entry": entry,
+
"references": meaningful_refs,
+
}
+
)
+
+
return standalone_refs
+
+
def _add_cross_thread_links(self, timeline_items: list[dict[str, Any]]) -> None:
+
"""Add cross-thread linking for entries that appear in multiple threads."""
+
# Map entry IDs to their positions in the timeline
+
entry_positions: dict[str, list[int]] = {}
+
# Map URLs referenced by entries to the entries that reference them
+
url_references: dict[str, list[tuple[str, int]]] = {} # url -> [(entry_id, position)]
+
+
# First pass: collect all entry IDs, their positions, and referenced URLs
+
for i, item in enumerate(timeline_items):
+
if item["type"] == "post":
+
entry_id = item["content"]["entry"].id
+
entry_positions.setdefault(entry_id, []).append(i)
+
# Track URLs this entry references
+
if entry_id in self.reference_details:
+
for ref in self.reference_details[entry_id]:
+
if ref["type"] == "outbound" and "target_id" in ref:
+
# Find the target entry's URL if available
+
target_entry = self._find_entry_by_id(ref["target_id"])
+
if target_entry and target_entry.link:
+
url = str(target_entry.link)
+
url_references.setdefault(url, []).append((entry_id, i))
+
elif item["type"] == "thread":
+
for thread_item in item["content"]:
+
entry_id = thread_item["entry"].id
+
entry_positions.setdefault(entry_id, []).append(i)
+
# Track URLs this entry references
+
if entry_id in self.reference_details:
+
for ref in self.reference_details[entry_id]:
+
if ref["type"] == "outbound" and "target_id" in ref:
+
target_entry = self._find_entry_by_id(ref["target_id"])
+
if target_entry and target_entry.link:
+
url = str(target_entry.link)
+
url_references.setdefault(url, []).append((entry_id, i))
+
+
# Build cross-thread connections - only for entries that actually appear multiple times
+
cross_thread_connections: dict[str, set[int]] = {} # entry_id -> set of timeline positions
+
+
# Add connections ONLY for entries that appear multiple times in the timeline
+
for entry_id, positions in entry_positions.items():
+
if len(positions) > 1:
+
cross_thread_connections[entry_id] = set(positions)
+
# Debug: uncomment to see which entries have multiple appearances
+
# print(f"Entry {entry_id[:50]}... appears at positions: {positions}")
+
+
# Apply cross-thread links to timeline items
+
for entry_id, positions_set in cross_thread_connections.items():
+
positions_list = list(positions_set)
+
for pos in positions_list:
+
item = timeline_items[pos]
+
other_positions = sorted([p for p in positions_list if p != pos])
+
+
if item["type"] == "post":
+
# Add cross-thread info to individual posts
+
item["content"]["cross_thread_links"] = self._build_cross_thread_link_data(entry_id, other_positions, timeline_items)
+
# Add info about shared references
+
item["content"]["shared_references"] = self._get_shared_references(entry_id, positions_set, timeline_items)
+
elif item["type"] == "thread":
+
# Add cross-thread info to thread items
+
for thread_item in item["content"]:
+
if thread_item["entry"].id == entry_id:
+
thread_item["cross_thread_links"] = self._build_cross_thread_link_data(entry_id, other_positions, timeline_items)
+
thread_item["shared_references"] = self._get_shared_references(entry_id, positions_set, timeline_items)
+
break
+
+
def _build_cross_thread_link_data(self, entry_id: str, other_positions: list[int], timeline_items: list[dict[str, Any]]) -> list[dict[str, Any]]:
+
"""Build detailed cross-thread link data with anchor information."""
+
cross_thread_links = []
+
+
for pos in other_positions:
+
item = timeline_items[pos]
+
if item["type"] == "post":
+
# For individual posts
+
safe_id = safe_anchor_id(entry_id)
+
cross_thread_links.append({
+
"position": pos,
+
"anchor_id": f"post-{pos}-{safe_id}",
+
"context": "individual post",
+
"title": item["content"]["entry"].title
+
})
+
elif item["type"] == "thread":
+
# For thread items, find the specific thread item
+
for thread_idx, thread_item in enumerate(item["content"]):
+
if thread_item["entry"].id == entry_id:
+
safe_id = safe_anchor_id(entry_id)
+
cross_thread_links.append({
+
"position": pos,
+
"anchor_id": f"post-{pos}-{thread_idx}-{safe_id}",
+
"context": f"thread (level {thread_item.get('thread_level', 0)})",
+
"title": thread_item["entry"].title
+
})
+
break
+
+
return cross_thread_links
+
+
def _find_entry_by_id(self, entry_id: str) -> Optional[AtomEntry]:
+
"""Find an entry by its ID."""
+
for _username, entry in self.entries:
+
if entry.id == entry_id:
+
return entry
+
return None
+
+
def _get_shared_references(self, entry_id: str, positions: Union[set[int], list[int]], timeline_items: list[dict[str, Any]]) -> list[dict[str, Any]]:
+
"""Get information about shared references between cross-thread entries."""
+
shared_refs = []
+
+
# Collect all referenced URLs from entries at these positions
+
url_counts: dict[str, int] = {}
+
referencing_entries: dict[str, list[str]] = {} # url -> [entry_ids]
+
+
for pos in positions:
+
item = timeline_items[pos]
+
entries_to_check = []
+
+
if item["type"] == "post":
+
entries_to_check.append(item["content"]["entry"])
+
elif item["type"] == "thread":
+
entries_to_check.extend([ti["entry"] for ti in item["content"]])
+
+
for entry in entries_to_check:
+
if entry.id in self.reference_details:
+
for ref in self.reference_details[entry.id]:
+
if ref["type"] == "outbound" and "target_id" in ref:
+
target_entry = self._find_entry_by_id(ref["target_id"])
+
if target_entry and target_entry.link:
+
url = str(target_entry.link)
+
url_counts[url] = url_counts.get(url, 0) + 1
+
if url not in referencing_entries:
+
referencing_entries[url] = []
+
if entry.id not in referencing_entries[url]:
+
referencing_entries[url].append(entry.id)
+
+
# Find URLs referenced by multiple entries
+
for url, count in url_counts.items():
+
if count > 1 and len(referencing_entries[url]) > 1:
+
# Get the target entry info
+
target_entry = None
+
target_username = None
+
for ref in (self.links_data or {}).get("references", []):
+
if ref.get("target_url") == url:
+
target_username = ref.get("target_username")
+
if ref.get("target_entry_id"):
+
target_entry = self._find_entry_by_id(ref["target_entry_id"])
+
break
+
+
shared_refs.append({
+
"url": url,
+
"count": count,
+
"referencing_entries": referencing_entries[url],
+
"target_username": target_username,
+
"target_title": target_entry.title if target_entry else None
+
})
+
+
return sorted(shared_refs, key=lambda x: x["count"], reverse=True)
+
+
def generate_site(self) -> None:
+
"""Generate the static website."""
+
# Create output directory
+
self.output_dir.mkdir(parents=True, exist_ok=True)
+
+
# Create static directories
+
(self.output_dir / "css").mkdir(exist_ok=True)
+
(self.output_dir / "js").mkdir(exist_ok=True)
+
+
# Generate CSS
+
css_template = self.env.get_template("style.css")
+
css_content = css_template.render()
+
with open(self.output_dir / "css" / "style.css", "w") as f:
+
f.write(css_content)
+
+
# Generate JavaScript
+
js_template = self.env.get_template("script.js")
+
js_content = js_template.render()
+
with open(self.output_dir / "js" / "script.js", "w") as f:
+
f.write(js_content)
+
+
# Prepare common template data
+
base_data = {
+
"title": "Energy & Environment Group",
+
"generated_at": datetime.now().isoformat(),
+
"get_display_name": self.get_display_name,
+
"get_user_homepage": self.get_user_homepage,
+
"clean_html_summary": self.clean_html_summary,
+
"safe_anchor_id": safe_anchor_id,
+
}
+
+
# Build unified timeline
+
timeline_items = []
+
+
# Only consider the threads that will actually be displayed
+
displayed_threads = self.threads[:20] # Limit to 20 threads
+
+
# Track which entries are part of displayed threads
+
threaded_entry_ids = set()
+
for thread in displayed_threads:
+
for item in thread:
+
threaded_entry_ids.add(item["entry_id"])
+
+
# Add threads to timeline (using the date of the most recent post)
+
for thread in displayed_threads:
+
most_recent_date = max(
+
item["entry"].updated or item["entry"].published or datetime.min
+
for item in thread
+
)
+
timeline_items.append({
+
"type": "thread",
+
"date": most_recent_date,
+
"content": thread
+
})
+
+
# Add individual posts (not in threads)
+
for username, entry in self.entries[:50]:
+
if entry.id not in threaded_entry_ids:
+
# Check if this entry has references
+
has_refs = (
+
entry.id in self.reference_details
+
if hasattr(self, "reference_details")
+
else False
+
)
+
+
refs = []
+
if has_refs:
+
refs = self.reference_details.get(entry.id, [])
+
refs = [
+
r for r in refs
+
if r.get("target_id") != entry.id
+
and r.get("source_id") != entry.id
+
]
+
+
timeline_items.append({
+
"type": "post",
+
"date": entry.updated or entry.published or datetime.min,
+
"content": {
+
"username": username,
+
"display_name": self.get_display_name(username),
+
"entry": entry,
+
"references": refs if refs else None
+
}
+
})
+
+
# Sort unified timeline by date (newest first)
+
timeline_items.sort(key=lambda x: x["date"], reverse=True)
+
+
# Limit timeline to what will actually be rendered
+
timeline_items = timeline_items[:50] # Limit to 50 items total
+
+
# Add cross-thread linking for repeat blog references
+
self._add_cross_thread_links(timeline_items)
+
+
# Prepare outgoing links data
+
outgoing_links = []
+
if self.links_data and "links" in self.links_data:
+
for url, link_info in self.links_data["links"].items():
+
referencing_entries = []
+
for entry_id in link_info.get("referencing_entries", []):
+
for username, entry in self.entries:
+
if entry.id == entry_id:
+
referencing_entries.append(
+
(self.get_display_name(username), entry)
+
)
+
break
+
+
if referencing_entries:
+
# Sort by date - prioritize updated over published
+
referencing_entries.sort(
+
key=lambda x: x[1].updated or x[1].published or datetime.min, reverse=True
+
)
+
outgoing_links.append(
+
{
+
"url": url,
+
"target_username": link_info.get("target_username"),
+
"entries": referencing_entries,
+
}
+
)
+
+
# Sort links by most recent reference - prioritize updated over published
+
outgoing_links.sort(
+
key=lambda x: x["entries"][0][1].updated
+
or x["entries"][0][1].published or datetime.min,
+
reverse=True,
+
)
+
+
# Prepare users data
+
users: list[UserData] = []
+
if self.index:
+
for username, user_metadata in self.index.users.items():
+
# Get recent entries for this user with display names
+
user_entries = [
+
(self.get_display_name(u), e)
+
for u, e in self.entries
+
if u == username
+
][:5]
+
users.append(
+
{"metadata": user_metadata, "recent_entries": user_entries}
+
)
+
# Sort by entry count
+
users.sort(key=lambda x: x["metadata"].entry_count, reverse=True)
+
+
# Generate timeline page
+
timeline_template = self.env.get_template("timeline.html")
+
timeline_content = timeline_template.render(
+
**base_data,
+
page="timeline",
+
timeline_items=timeline_items, # Already limited above
+
)
+
with open(self.output_dir / "timeline.html", "w") as f:
+
f.write(timeline_content)
+
+
# Generate links page
+
links_template = self.env.get_template("links.html")
+
links_content = links_template.render(
+
**base_data,
+
page="links",
+
outgoing_links=outgoing_links[:100],
+
)
+
with open(self.output_dir / "links.html", "w") as f:
+
f.write(links_content)
+
+
# Generate users page
+
users_template = self.env.get_template("users.html")
+
users_content = users_template.render(
+
**base_data,
+
page="users",
+
users=users,
+
)
+
with open(self.output_dir / "users.html", "w") as f:
+
f.write(users_content)
+
+
# Generate main index page (redirect to timeline)
+
index_template = self.env.get_template("index.html")
+
index_content = index_template.render(**base_data)
+
with open(self.output_dir / "index.html", "w") as f:
+
f.write(index_content)
+
+
console.print(f"[green]✓[/green] Generated website at {self.output_dir}")
+
console.print(f" - {len(self.entries)} entries")
+
console.print(f" - {len(self.threads)} conversation threads")
+
console.print(f" - {len(outgoing_links)} outgoing links")
+
console.print(f" - {len(users)} users")
+
console.print(
+
" - Generated pages: index.html, timeline.html, links.html, users.html"
+
)
+
+
+
@app.command()
+
def generate(
+
output: Path = typer.Option(
+
Path("./thicket-site"),
+
"--output",
+
"-o",
+
help="Output directory for the generated website",
+
),
+
force: bool = typer.Option(
+
False, "--force", "-f", help="Overwrite existing output directory"
+
),
+
config_file: Path = typer.Option(
+
Path("thicket.yaml"), "--config", help="Configuration file path"
+
),
+
) -> None:
+
"""Generate a static HTML website from thicket data."""
+
config = load_config(config_file)
+
+
if not config.git_store:
+
console.print("[red]No git store path configured[/red]")
+
raise typer.Exit(1)
+
+
git_store = GitStore(config.git_store)
+
+
# Check if output directory exists
+
if output.exists() and not force:
+
console.print(
+
f"[red]Output directory {output} already exists. Use --force to overwrite.[/red]"
+
)
+
raise typer.Exit(1)
+
+
# Clean output directory if forcing
+
if output.exists() and force:
+
shutil.rmtree(output)
+
+
try:
+
generator = WebsiteGenerator(git_store, output)
+
+
console.print("[bold]Generating static website...[/bold]")
+
generator.load_data()
+
generator.build_threads()
+
generator.generate_site()
+
+
except Exception as e:
+
console.print(f"[red]Error generating website: {e}[/red]")
+
raise typer.Exit(1) from e
+1 -1
src/thicket/cli/main.py
···
# Import commands to register them
-
from .commands import add, duplicates, index_cmd, info_cmd, init, links_cmd, list_cmd, sync
if __name__ == "__main__":
app()
···
# Import commands to register them
+
from .commands import add, duplicates, generate, index_cmd, info_cmd, init, links_cmd, list_cmd, sync
if __name__ == "__main__":
app()
+150 -13
src/thicket/core/reference_parser.py
···
return True
return False
def resolve_target_user(
self, url: str, user_domains: dict[str, set[str]]
) -> Optional[str]:
···
links = self.extract_links_from_html(content)
for url, _link_text in links:
-
# Skip internal links (same domain as the entry)
entry_domain = (
urlparse(str(entry.link)).netloc.lower() if entry.link else ""
)
link_domain = urlparse(url).netloc.lower()
-
if link_domain == entry_domain:
-
continue
-
# Check if this looks like a blog URL
if not self.is_blog_url(url):
continue
# Try to resolve to a known user
-
target_username = self.resolve_target_user(url, user_domains)
ref = BlogReference(
source_entry_id=entry.id,
···
return user_domains
def resolve_target_entry_ids(
self, references: list[BlogReference], git_store: "GitStore"
) -> list[BlogReference]:
-
"""Resolve target_entry_id for references that have target_username but no target_entry_id."""
resolved_refs = []
for ref in references:
# If we already have a target_entry_id, keep the reference as-is
···
resolved_refs.append(ref)
continue
-
# Try to find the entry by matching the URL
-
entries = git_store.list_entries(ref.target_username)
resolved_entry_id = None
-
for entry in entries:
-
# Check if the entry's link matches the target URL
-
if entry.link and str(entry.link) == ref.target_url:
-
resolved_entry_id = entry.id
-
break
# Create a new reference with the resolved target_entry_id
resolved_ref = BlogReference(
···
return True
return False
+
def _is_likely_blog_post_url(self, url: str) -> bool:
+
"""Check if a same-domain URL likely points to a blog post (not CSS, images, etc.)."""
+
parsed_url = urlparse(url)
+
path = parsed_url.path.lower()
+
+
# Skip obvious non-blog content
+
if any(path.endswith(ext) for ext in ['.css', '.js', '.png', '.jpg', '.jpeg', '.gif', '.svg', '.ico', '.pdf', '.xml', '.json']):
+
return False
+
+
# Skip common non-blog paths
+
if any(segment in path for segment in ['/static/', '/assets/', '/css/', '/js/', '/images/', '/img/', '/media/', '/uploads/']):
+
return False
+
+
# Skip fragment-only links (same page anchors)
+
if not path or path == '/':
+
return False
+
+
# Look for positive indicators of blog posts
+
# Common blog post patterns: dates, slugs, post indicators
+
blog_indicators = [
+
r'/\d{4}/', # Year in path
+
r'/\d{4}/\d{2}/', # Year/month in path
+
r'/blog/',
+
r'/post/',
+
r'/posts/',
+
r'/articles?/',
+
r'/notes?/',
+
r'/entries/',
+
r'/writing/',
+
]
+
+
for pattern in blog_indicators:
+
if re.search(pattern, path):
+
return True
+
+
# If it has a reasonable path depth and doesn't match exclusions, likely a blog post
+
path_segments = [seg for seg in path.split('/') if seg]
+
return len(path_segments) >= 1 # At least one meaningful path segment
+
def resolve_target_user(
self, url: str, user_domains: dict[str, set[str]]
) -> Optional[str]:
···
links = self.extract_links_from_html(content)
for url, _link_text in links:
entry_domain = (
urlparse(str(entry.link)).netloc.lower() if entry.link else ""
)
link_domain = urlparse(url).netloc.lower()
# Check if this looks like a blog URL
if not self.is_blog_url(url):
continue
+
+
# For same-domain links, apply additional filtering to avoid non-blog content
+
if link_domain == entry_domain:
+
# Only include same-domain links that look like blog posts
+
if not self._is_likely_blog_post_url(url):
+
continue
# Try to resolve to a known user
+
if link_domain == entry_domain:
+
# Same domain - target user is the same as source user
+
target_username: Optional[str] = username
+
else:
+
# Different domain - try to resolve
+
target_username = self.resolve_target_user(url, user_domains)
ref = BlogReference(
source_entry_id=entry.id,
···
return user_domains
+
def _build_url_to_entry_mapping(self, git_store: "GitStore") -> dict[str, str]:
+
"""Build a comprehensive mapping from URLs to entry IDs using git store data.
+
+
This creates a bidirectional mapping that handles:
+
- Entry link URLs -> Entry IDs
+
- URL variations (with/without www, http/https)
+
- Multiple URLs pointing to the same entry
+
"""
+
url_to_entry: dict[str, str] = {}
+
+
# Load index to get all users
+
index = git_store._load_index()
+
+
for username in index.users.keys():
+
entries = git_store.list_entries(username)
+
+
for entry in entries:
+
if entry.link:
+
link_url = str(entry.link)
+
entry_id = entry.id
+
+
# Map the canonical link URL
+
url_to_entry[link_url] = entry_id
+
+
# Handle common URL variations
+
parsed = urlparse(link_url)
+
if parsed.netloc and parsed.path:
+
# Add version without www
+
if parsed.netloc.startswith('www.'):
+
no_www_url = f"{parsed.scheme}://{parsed.netloc[4:]}{parsed.path}"
+
if parsed.query:
+
no_www_url += f"?{parsed.query}"
+
if parsed.fragment:
+
no_www_url += f"#{parsed.fragment}"
+
url_to_entry[no_www_url] = entry_id
+
+
# Add version with www if not present
+
elif not parsed.netloc.startswith('www.'):
+
www_url = f"{parsed.scheme}://www.{parsed.netloc}{parsed.path}"
+
if parsed.query:
+
www_url += f"?{parsed.query}"
+
if parsed.fragment:
+
www_url += f"#{parsed.fragment}"
+
url_to_entry[www_url] = entry_id
+
+
# Add http/https variations
+
if parsed.scheme == 'https':
+
http_url = link_url.replace('https://', 'http://', 1)
+
url_to_entry[http_url] = entry_id
+
elif parsed.scheme == 'http':
+
https_url = link_url.replace('http://', 'https://', 1)
+
url_to_entry[https_url] = entry_id
+
+
return url_to_entry
+
+
def _normalize_url(self, url: str) -> str:
+
"""Normalize URL for consistent matching.
+
+
Handles common variations like trailing slashes, fragments, etc.
+
"""
+
parsed = urlparse(url)
+
+
# Remove trailing slash from path
+
path = parsed.path.rstrip('/') if parsed.path != '/' else parsed.path
+
+
# Reconstruct without fragment for consistent matching
+
normalized = f"{parsed.scheme}://{parsed.netloc}{path}"
+
if parsed.query:
+
normalized += f"?{parsed.query}"
+
+
return normalized
+
def resolve_target_entry_ids(
self, references: list[BlogReference], git_store: "GitStore"
) -> list[BlogReference]:
+
"""Resolve target_entry_id for references using comprehensive URL mapping."""
resolved_refs = []
+
+
# Build comprehensive URL to entry ID mapping
+
url_to_entry = self._build_url_to_entry_mapping(git_store)
for ref in references:
# If we already have a target_entry_id, keep the reference as-is
···
resolved_refs.append(ref)
continue
+
# Try to resolve using URL mapping
resolved_entry_id = None
+
# First, try exact match
+
if ref.target_url in url_to_entry:
+
resolved_entry_id = url_to_entry[ref.target_url]
+
else:
+
# Try normalized URL matching
+
normalized_target = self._normalize_url(ref.target_url)
+
if normalized_target in url_to_entry:
+
resolved_entry_id = url_to_entry[normalized_target]
+
else:
+
# Try URL variations
+
for mapped_url, entry_id in url_to_entry.items():
+
if self._normalize_url(mapped_url) == normalized_target:
+
resolved_entry_id = entry_id
+
break
+
+
# Verify the resolved entry belongs to the target username
+
if resolved_entry_id:
+
# Double-check by loading the actual entry
+
entries = git_store.list_entries(ref.target_username)
+
entry_found = any(entry.id == resolved_entry_id for entry in entries)
+
if not entry_found:
+
resolved_entry_id = None
# Create a new reference with the resolved target_entry_id
resolved_ref = BlogReference(
+31
src/thicket/templates/base.html
···
···
+
<!DOCTYPE html>
+
<html lang="en">
+
<head>
+
<meta charset="UTF-8">
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
+
<title>{% block page_title %}{{ title }}{% endblock %}</title>
+
<link rel="stylesheet" href="css/style.css">
+
</head>
+
<body>
+
<header class="site-header">
+
<div class="header-content">
+
<h1 class="site-title">{{ title }}</h1>
+
<nav class="site-nav">
+
<a href="timeline.html" class="nav-link {% if page == 'timeline' %}active{% endif %}">Timeline</a>
+
<a href="links.html" class="nav-link {% if page == 'links' %}active{% endif %}">Links</a>
+
<a href="users.html" class="nav-link {% if page == 'users' %}active{% endif %}">Users</a>
+
</nav>
+
</div>
+
</header>
+
+
<main class="main-content">
+
{% block content %}{% endblock %}
+
</main>
+
+
<footer class="site-footer">
+
<p>Generated on {{ generated_at }} by <a href="https://github.com/avsm/thicket">Thicket</a></p>
+
</footer>
+
+
<script src="js/script.js"></script>
+
</body>
+
</html>
+13
src/thicket/templates/index.html
···
···
+
<!DOCTYPE html>
+
<html lang="en">
+
<head>
+
<meta charset="UTF-8">
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
+
<title>{{ title }}</title>
+
<meta http-equiv="refresh" content="0; url=timeline.html">
+
<link rel="canonical" href="timeline.html">
+
</head>
+
<body>
+
<p>Redirecting to <a href="timeline.html">Timeline</a>...</p>
+
</body>
+
</html>
+38
src/thicket/templates/links.html
···
···
+
{% extends "base.html" %}
+
+
{% block page_title %}Outgoing Links - {{ title }}{% endblock %}
+
+
{% block content %}
+
<div class="page-content">
+
<h2>Outgoing Links</h2>
+
<p class="page-description">External links referenced in blog posts, ordered by most recent reference.</p>
+
+
{% for link in outgoing_links %}
+
<article class="link-group">
+
<h3 class="link-url">
+
<a href="{{ link.url }}" target="_blank">{{ link.url|truncate(80) }}</a>
+
{% if link.target_username %}
+
<span class="target-user">({{ link.target_username }})</span>
+
{% endif %}
+
</h3>
+
<div class="referencing-entries">
+
<span class="ref-count">Referenced in {{ link.entries|length }} post(s):</span>
+
<ul>
+
{% for display_name, entry in link.entries[:5] %}
+
<li>
+
<span class="author">{{ display_name }}</span> -
+
<a href="{{ entry.link }}" target="_blank">{{ entry.title }}</a>
+
<time datetime="{{ entry.updated or entry.published }}">
+
({{ (entry.updated or entry.published).strftime('%Y-%m-%d') }})
+
</time>
+
</li>
+
{% endfor %}
+
{% if link.entries|length > 5 %}
+
<li class="more">... and {{ link.entries|length - 5 }} more</li>
+
{% endif %}
+
</ul>
+
</div>
+
</article>
+
{% endfor %}
+
</div>
+
{% endblock %}
+88
src/thicket/templates/script.js
···
···
+
// Enhanced functionality for thicket website
+
document.addEventListener('DOMContentLoaded', function() {
+
+
// Enhance thread collapsing (optional feature)
+
const threadHeaders = document.querySelectorAll('.thread-header');
+
threadHeaders.forEach(header => {
+
header.style.cursor = 'pointer';
+
header.addEventListener('click', function() {
+
const thread = this.parentElement;
+
const entries = thread.querySelectorAll('.thread-entry');
+
+
// Toggle visibility of all but the first entry
+
for (let i = 1; i < entries.length; i++) {
+
entries[i].style.display = entries[i].style.display === 'none' ? 'block' : 'none';
+
}
+
+
// Update thread count text
+
const count = this.querySelector('.thread-count');
+
if (entries[1] && entries[1].style.display === 'none') {
+
count.textContent = count.textContent.replace('posts', 'posts (collapsed)');
+
} else {
+
count.textContent = count.textContent.replace(' (collapsed)', '');
+
}
+
});
+
});
+
+
// Add relative time display
+
const timeElements = document.querySelectorAll('time');
+
timeElements.forEach(timeEl => {
+
const datetime = new Date(timeEl.getAttribute('datetime'));
+
const now = new Date();
+
const diffMs = now - datetime;
+
const diffDays = Math.floor(diffMs / (1000 * 60 * 60 * 24));
+
+
let relativeTime;
+
if (diffDays === 0) {
+
const diffHours = Math.floor(diffMs / (1000 * 60 * 60));
+
if (diffHours === 0) {
+
const diffMinutes = Math.floor(diffMs / (1000 * 60));
+
relativeTime = diffMinutes === 0 ? 'just now' : `${diffMinutes}m ago`;
+
} else {
+
relativeTime = `${diffHours}h ago`;
+
}
+
} else if (diffDays === 1) {
+
relativeTime = 'yesterday';
+
} else if (diffDays < 7) {
+
relativeTime = `${diffDays}d ago`;
+
} else if (diffDays < 30) {
+
const weeks = Math.floor(diffDays / 7);
+
relativeTime = weeks === 1 ? '1w ago' : `${weeks}w ago`;
+
} else if (diffDays < 365) {
+
const months = Math.floor(diffDays / 30);
+
relativeTime = months === 1 ? '1mo ago' : `${months}mo ago`;
+
} else {
+
const years = Math.floor(diffDays / 365);
+
relativeTime = years === 1 ? '1y ago' : `${years}y ago`;
+
}
+
+
// Add relative time as title attribute
+
timeEl.setAttribute('title', timeEl.textContent);
+
timeEl.textContent = relativeTime;
+
});
+
+
// Enhanced anchor link scrolling for shared references
+
document.querySelectorAll('a[href^="#"]').forEach(anchor => {
+
anchor.addEventListener('click', function (e) {
+
e.preventDefault();
+
const target = document.querySelector(this.getAttribute('href'));
+
if (target) {
+
target.scrollIntoView({
+
behavior: 'smooth',
+
block: 'center'
+
});
+
+
// Highlight the target briefly
+
const timelineEntry = target.closest('.timeline-entry');
+
if (timelineEntry) {
+
timelineEntry.style.outline = '2px solid var(--primary-color)';
+
timelineEntry.style.borderRadius = '8px';
+
setTimeout(() => {
+
timelineEntry.style.outline = '';
+
timelineEntry.style.borderRadius = '';
+
}, 2000);
+
}
+
}
+
});
+
});
+
});
+754
src/thicket/templates/style.css
···
···
+
/* Modern, clean design with high-density text and readable theme */
+
+
:root {
+
--primary-color: #2c3e50;
+
--secondary-color: #3498db;
+
--accent-color: #e74c3c;
+
--background: #ffffff;
+
--surface: #f8f9fa;
+
--text-primary: #2c3e50;
+
--text-secondary: #7f8c8d;
+
--border-color: #e0e0e0;
+
--thread-indent: 20px;
+
--max-width: 1200px;
+
}
+
+
* {
+
margin: 0;
+
padding: 0;
+
box-sizing: border-box;
+
}
+
+
body {
+
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Helvetica Neue', Arial, sans-serif;
+
font-size: 14px;
+
line-height: 1.6;
+
color: var(--text-primary);
+
background-color: var(--background);
+
}
+
+
/* Header */
+
.site-header {
+
background-color: var(--surface);
+
border-bottom: 1px solid var(--border-color);
+
padding: 0.75rem 0;
+
position: sticky;
+
top: 0;
+
z-index: 100;
+
}
+
+
.header-content {
+
max-width: var(--max-width);
+
margin: 0 auto;
+
padding: 0 2rem;
+
display: flex;
+
justify-content: space-between;
+
align-items: center;
+
}
+
+
.site-title {
+
font-size: 1.5rem;
+
font-weight: 600;
+
color: var(--primary-color);
+
margin: 0;
+
}
+
+
/* Navigation */
+
.site-nav {
+
display: flex;
+
gap: 1.5rem;
+
}
+
+
.nav-link {
+
text-decoration: none;
+
color: var(--text-secondary);
+
font-weight: 500;
+
font-size: 0.95rem;
+
padding: 0.5rem 0.75rem;
+
border-radius: 4px;
+
transition: all 0.2s ease;
+
}
+
+
.nav-link:hover {
+
color: var(--primary-color);
+
background-color: var(--background);
+
}
+
+
.nav-link.active {
+
color: var(--secondary-color);
+
background-color: var(--background);
+
font-weight: 600;
+
}
+
+
/* Main Content */
+
.main-content {
+
max-width: var(--max-width);
+
margin: 2rem auto;
+
padding: 0 2rem;
+
}
+
+
.page-content {
+
margin: 0;
+
}
+
+
.page-description {
+
color: var(--text-secondary);
+
margin-bottom: 1.5rem;
+
font-style: italic;
+
}
+
+
/* Sections */
+
section {
+
margin-bottom: 2rem;
+
}
+
+
h2 {
+
font-size: 1.3rem;
+
font-weight: 600;
+
margin-bottom: 0.75rem;
+
color: var(--primary-color);
+
}
+
+
h3 {
+
font-size: 1.1rem;
+
font-weight: 600;
+
margin-bottom: 0.75rem;
+
color: var(--primary-color);
+
}
+
+
/* Entries and Threads */
+
article {
+
margin-bottom: 1.5rem;
+
padding: 1rem;
+
background-color: var(--surface);
+
border-radius: 4px;
+
border: 1px solid var(--border-color);
+
}
+
+
/* Timeline-style entries */
+
.timeline-entry {
+
margin-bottom: 0.5rem;
+
padding: 0.5rem 0.75rem;
+
border: none;
+
background: transparent;
+
transition: background-color 0.2s ease;
+
}
+
+
.timeline-entry:hover {
+
background-color: var(--surface);
+
}
+
+
.timeline-meta {
+
display: inline-flex;
+
gap: 0.5rem;
+
align-items: center;
+
font-size: 0.75rem;
+
color: var(--text-secondary);
+
margin-bottom: 0.25rem;
+
}
+
+
.timeline-time {
+
font-family: 'SF Mono', Monaco, Consolas, 'Courier New', monospace;
+
font-size: 0.75rem;
+
color: var(--text-secondary);
+
}
+
+
.timeline-author {
+
font-weight: 600;
+
color: var(--primary-color);
+
font-size: 0.8rem;
+
text-decoration: none;
+
}
+
+
.timeline-author:hover {
+
color: var(--secondary-color);
+
text-decoration: underline;
+
}
+
+
.timeline-content {
+
line-height: 1.4;
+
}
+
+
.timeline-title {
+
font-size: 0.95rem;
+
font-weight: 600;
+
}
+
+
.timeline-title a {
+
color: var(--primary-color);
+
text-decoration: none;
+
}
+
+
.timeline-title a:hover {
+
color: var(--secondary-color);
+
text-decoration: underline;
+
}
+
+
.timeline-summary {
+
color: var(--text-secondary);
+
font-size: 0.9rem;
+
line-height: 1.4;
+
}
+
+
/* Legacy styles for other sections */
+
.entry-meta, .thread-header {
+
display: flex;
+
gap: 1rem;
+
align-items: center;
+
margin-bottom: 0.5rem;
+
font-size: 0.85rem;
+
color: var(--text-secondary);
+
}
+
+
.author {
+
font-weight: 600;
+
color: var(--primary-color);
+
}
+
+
time {
+
font-size: 0.85rem;
+
}
+
+
h4 {
+
font-size: 1.1rem;
+
font-weight: 600;
+
margin-bottom: 0.5rem;
+
}
+
+
h4 a {
+
color: var(--primary-color);
+
text-decoration: none;
+
}
+
+
h4 a:hover {
+
color: var(--secondary-color);
+
text-decoration: underline;
+
}
+
+
.entry-summary {
+
color: var(--text-primary);
+
line-height: 1.5;
+
margin-top: 0.5rem;
+
}
+
+
/* Enhanced Threading Styles */
+
+
/* Conversation Clusters */
+
.conversation-cluster {
+
background-color: var(--background);
+
border: 2px solid var(--border-color);
+
border-radius: 8px;
+
margin-bottom: 2rem;
+
overflow: hidden;
+
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05);
+
}
+
+
.conversation-header {
+
background: linear-gradient(135deg, var(--surface) 0%, #f1f3f4 100%);
+
padding: 0.75rem 1rem;
+
border-bottom: 1px solid var(--border-color);
+
}
+
+
.conversation-meta {
+
display: flex;
+
justify-content: space-between;
+
align-items: center;
+
flex-wrap: wrap;
+
gap: 0.5rem;
+
}
+
+
.conversation-count {
+
font-weight: 600;
+
color: var(--secondary-color);
+
font-size: 0.9rem;
+
}
+
+
.conversation-participants {
+
font-size: 0.8rem;
+
color: var(--text-secondary);
+
flex: 1;
+
text-align: right;
+
}
+
+
.conversation-flow {
+
padding: 0.5rem;
+
}
+
+
/* Threaded Conversation Entries */
+
.conversation-entry {
+
position: relative;
+
margin-bottom: 0.75rem;
+
display: flex;
+
align-items: flex-start;
+
}
+
+
.conversation-entry.level-0 {
+
margin-left: 0;
+
}
+
+
.conversation-entry.level-1 {
+
margin-left: 1.5rem;
+
}
+
+
.conversation-entry.level-2 {
+
margin-left: 3rem;
+
}
+
+
.conversation-entry.level-3 {
+
margin-left: 4.5rem;
+
}
+
+
.conversation-entry.level-4 {
+
margin-left: 6rem;
+
}
+
+
.entry-connector {
+
width: 3px;
+
background-color: var(--secondary-color);
+
margin-right: 0.75rem;
+
margin-top: 0.25rem;
+
min-height: 2rem;
+
border-radius: 2px;
+
opacity: 0.6;
+
}
+
+
.conversation-entry.level-0 .entry-connector {
+
background-color: var(--accent-color);
+
opacity: 0.8;
+
}
+
+
.entry-content {
+
flex: 1;
+
background-color: var(--surface);
+
padding: 0.75rem;
+
border-radius: 6px;
+
border: 1px solid var(--border-color);
+
transition: all 0.2s ease;
+
}
+
+
.entry-content:hover {
+
border-color: var(--secondary-color);
+
box-shadow: 0 2px 8px rgba(52, 152, 219, 0.1);
+
}
+
+
/* Reference Indicators */
+
.reference-indicators {
+
display: inline-flex;
+
gap: 0.25rem;
+
margin-left: 0.5rem;
+
}
+
+
.ref-out, .ref-in {
+
display: inline-block;
+
width: 1rem;
+
height: 1rem;
+
border-radius: 50%;
+
text-align: center;
+
line-height: 1rem;
+
font-size: 0.7rem;
+
font-weight: bold;
+
}
+
+
.ref-out {
+
background-color: #e8f5e8;
+
color: #2d8f2d;
+
}
+
+
.ref-in {
+
background-color: #e8f0ff;
+
color: #1f5fbf;
+
}
+
+
/* Reference Badges for Individual Posts */
+
.timeline-entry.with-references {
+
background-color: var(--surface);
+
}
+
+
/* Conversation posts in unified timeline */
+
.timeline-entry.conversation-post {
+
background: transparent;
+
border: none;
+
margin-bottom: 0.5rem;
+
padding: 0.5rem 0.75rem;
+
}
+
+
.timeline-entry.conversation-post.level-0 {
+
margin-left: 0;
+
border-left: 2px solid var(--accent-color);
+
padding-left: 0.75rem;
+
}
+
+
.timeline-entry.conversation-post.level-1 {
+
margin-left: 1.5rem;
+
border-left: 2px solid var(--secondary-color);
+
padding-left: 0.75rem;
+
}
+
+
.timeline-entry.conversation-post.level-2 {
+
margin-left: 3rem;
+
border-left: 2px solid var(--text-secondary);
+
padding-left: 0.75rem;
+
}
+
+
.timeline-entry.conversation-post.level-3 {
+
margin-left: 4.5rem;
+
border-left: 2px solid var(--text-secondary);
+
padding-left: 0.75rem;
+
}
+
+
.timeline-entry.conversation-post.level-4 {
+
margin-left: 6rem;
+
border-left: 2px solid var(--text-secondary);
+
padding-left: 0.75rem;
+
}
+
+
/* Cross-thread linking */
+
.cross-thread-links {
+
margin-top: 0.5rem;
+
padding-top: 0.5rem;
+
border-top: 1px solid var(--border-color);
+
}
+
+
.cross-thread-indicator {
+
font-size: 0.75rem;
+
color: var(--text-secondary);
+
background-color: var(--surface);
+
padding: 0.25rem 0.5rem;
+
border-radius: 12px;
+
border: 1px solid var(--border-color);
+
display: inline-block;
+
}
+
+
/* Inline shared references styling */
+
.inline-shared-refs {
+
margin-left: 0.5rem;
+
font-size: 0.85rem;
+
color: var(--text-secondary);
+
}
+
+
.shared-ref-link {
+
color: var(--primary-color);
+
text-decoration: none;
+
font-weight: 500;
+
transition: color 0.2s ease;
+
}
+
+
.shared-ref-link:hover {
+
color: var(--secondary-color);
+
text-decoration: underline;
+
}
+
+
.shared-ref-more {
+
font-style: italic;
+
color: var(--text-secondary);
+
font-size: 0.8rem;
+
margin-left: 0.25rem;
+
}
+
+
.user-anchor, .post-anchor {
+
position: absolute;
+
margin-top: -60px; /* Offset for fixed header */
+
pointer-events: none;
+
}
+
+
.cross-thread-link {
+
color: var(--primary-color);
+
text-decoration: none;
+
font-weight: 500;
+
transition: color 0.2s ease;
+
}
+
+
.cross-thread-link:hover {
+
color: var(--secondary-color);
+
text-decoration: underline;
+
}
+
+
.reference-badges {
+
display: flex;
+
gap: 0.25rem;
+
margin-left: 0.5rem;
+
flex-wrap: wrap;
+
}
+
+
.ref-badge {
+
display: inline-block;
+
padding: 0.1rem 0.4rem;
+
border-radius: 12px;
+
font-size: 0.7rem;
+
font-weight: 600;
+
text-transform: uppercase;
+
letter-spacing: 0.05em;
+
}
+
+
.ref-badge.ref-outbound {
+
background-color: #e8f5e8;
+
color: #2d8f2d;
+
border: 1px solid #c3e6c3;
+
}
+
+
.ref-badge.ref-inbound {
+
background-color: #e8f0ff;
+
color: #1f5fbf;
+
border: 1px solid #b3d9ff;
+
}
+
+
/* Author Color Coding */
+
.timeline-author {
+
position: relative;
+
}
+
+
.timeline-author::before {
+
content: '';
+
display: inline-block;
+
width: 8px;
+
height: 8px;
+
border-radius: 50%;
+
margin-right: 0.5rem;
+
background-color: var(--secondary-color);
+
}
+
+
/* Generate consistent colors for authors */
+
.author-avsm::before { background-color: #e74c3c; }
+
.author-mort::before { background-color: #3498db; }
+
.author-mte::before { background-color: #2ecc71; }
+
.author-ryan::before { background-color: #f39c12; }
+
.author-mwd::before { background-color: #9b59b6; }
+
.author-dra::before { background-color: #1abc9c; }
+
.author-pf341::before { background-color: #34495e; }
+
.author-sadiqj::before { background-color: #e67e22; }
+
.author-martinkl::before { background-color: #8e44ad; }
+
.author-jonsterling::before { background-color: #27ae60; }
+
.author-jon::before { background-color: #f1c40f; }
+
.author-onkar::before { background-color: #e91e63; }
+
.author-gabriel::before { background-color: #00bcd4; }
+
.author-jess::before { background-color: #ff5722; }
+
.author-ibrahim::before { background-color: #607d8b; }
+
.author-andres::before { background-color: #795548; }
+
.author-eeg::before { background-color: #ff9800; }
+
+
/* Section Headers */
+
.conversations-section h3,
+
.referenced-posts-section h3,
+
.individual-posts-section h3 {
+
border-bottom: 2px solid var(--border-color);
+
padding-bottom: 0.5rem;
+
margin-bottom: 1.5rem;
+
position: relative;
+
}
+
+
.conversations-section h3::before {
+
content: "💬";
+
margin-right: 0.5rem;
+
}
+
+
.referenced-posts-section h3::before {
+
content: "🔗";
+
margin-right: 0.5rem;
+
}
+
+
.individual-posts-section h3::before {
+
content: "📝";
+
margin-right: 0.5rem;
+
}
+
+
/* Legacy thread styles (for backward compatibility) */
+
.thread {
+
background-color: var(--background);
+
border: 1px solid var(--border-color);
+
padding: 0;
+
overflow: hidden;
+
margin-bottom: 1rem;
+
}
+
+
.thread-header {
+
background-color: var(--surface);
+
padding: 0.5rem 0.75rem;
+
border-bottom: 1px solid var(--border-color);
+
}
+
+
.thread-count {
+
font-weight: 600;
+
color: var(--secondary-color);
+
}
+
+
.thread-entry {
+
padding: 0.5rem 0.75rem;
+
border-bottom: 1px solid var(--border-color);
+
}
+
+
.thread-entry:last-child {
+
border-bottom: none;
+
}
+
+
.thread-entry.reply {
+
margin-left: var(--thread-indent);
+
border-left: 3px solid var(--secondary-color);
+
background-color: var(--surface);
+
}
+
+
/* Links Section */
+
.link-group {
+
background-color: var(--background);
+
}
+
+
.link-url {
+
font-size: 1rem;
+
word-break: break-word;
+
}
+
+
.link-url a {
+
color: var(--secondary-color);
+
text-decoration: none;
+
}
+
+
.link-url a:hover {
+
text-decoration: underline;
+
}
+
+
.target-user {
+
font-size: 0.9rem;
+
color: var(--text-secondary);
+
font-weight: normal;
+
}
+
+
.referencing-entries {
+
margin-top: 0.75rem;
+
}
+
+
.ref-count {
+
font-weight: 600;
+
color: var(--text-secondary);
+
font-size: 0.9rem;
+
}
+
+
.referencing-entries ul {
+
list-style: none;
+
margin-top: 0.5rem;
+
padding-left: 1rem;
+
}
+
+
.referencing-entries li {
+
margin-bottom: 0.25rem;
+
font-size: 0.9rem;
+
}
+
+
.referencing-entries .more {
+
font-style: italic;
+
color: var(--text-secondary);
+
}
+
+
/* Users Section */
+
.user-card {
+
background-color: var(--background);
+
}
+
+
.user-header {
+
display: flex;
+
gap: 1rem;
+
align-items: start;
+
margin-bottom: 1rem;
+
}
+
+
.user-icon {
+
width: 48px;
+
height: 48px;
+
border-radius: 50%;
+
object-fit: cover;
+
}
+
+
.user-info h3 {
+
margin-bottom: 0.25rem;
+
}
+
+
.username {
+
font-size: 0.9rem;
+
color: var(--text-secondary);
+
font-weight: normal;
+
}
+
+
.user-meta {
+
font-size: 0.9rem;
+
color: var(--text-secondary);
+
}
+
+
.user-meta a {
+
color: var(--secondary-color);
+
text-decoration: none;
+
}
+
+
.user-meta a:hover {
+
text-decoration: underline;
+
}
+
+
.separator {
+
margin: 0 0.5rem;
+
}
+
+
.post-count {
+
font-weight: 600;
+
}
+
+
.user-recent h4 {
+
font-size: 0.95rem;
+
margin-bottom: 0.5rem;
+
color: var(--text-secondary);
+
}
+
+
.user-recent ul {
+
list-style: none;
+
padding-left: 0;
+
}
+
+
.user-recent li {
+
margin-bottom: 0.25rem;
+
font-size: 0.9rem;
+
}
+
+
/* Footer */
+
.site-footer {
+
max-width: var(--max-width);
+
margin: 3rem auto 2rem;
+
padding: 1rem 2rem;
+
text-align: center;
+
color: var(--text-secondary);
+
font-size: 0.85rem;
+
border-top: 1px solid var(--border-color);
+
}
+
+
.site-footer a {
+
color: var(--secondary-color);
+
text-decoration: none;
+
}
+
+
.site-footer a:hover {
+
text-decoration: underline;
+
}
+
+
/* Responsive */
+
@media (max-width: 768px) {
+
.site-title {
+
font-size: 1.3rem;
+
}
+
+
.header-content {
+
flex-direction: column;
+
gap: 0.75rem;
+
align-items: flex-start;
+
}
+
+
.site-nav {
+
gap: 1rem;
+
}
+
+
.main-content {
+
padding: 0 1rem;
+
}
+
+
.thread-entry.reply {
+
margin-left: calc(var(--thread-indent) / 2);
+
}
+
+
.user-header {
+
flex-direction: column;
+
}
+
}
+141
src/thicket/templates/timeline.html
···
···
+
{% extends "base.html" %}
+
+
{% block page_title %}Timeline - {{ title }}{% endblock %}
+
+
{% block content %}
+
{% set seen_users = [] %}
+
<div class="page-content">
+
<h2>Recent Posts & Conversations</h2>
+
+
<section class="unified-timeline">
+
{% for item in timeline_items %}
+
{% if item.type == "post" %}
+
<!-- Individual Post -->
+
<article class="timeline-entry {% if item.content.references %}with-references{% endif %}">
+
<div class="timeline-meta">
+
<time datetime="{{ item.content.entry.updated or item.content.entry.published }}" class="timeline-time">
+
{{ (item.content.entry.updated or item.content.entry.published).strftime('%Y-%m-%d %H:%M') }}
+
</time>
+
{% set homepage = get_user_homepage(item.content.username) %}
+
{% if item.content.username not in seen_users %}
+
<a id="{{ item.content.username }}" class="user-anchor"></a>
+
{% set _ = seen_users.append(item.content.username) %}
+
{% endif %}
+
<a id="post-{{ loop.index0 }}-{{ safe_anchor_id(item.content.entry.id) }}" class="post-anchor"></a>
+
{% if homepage %}
+
<a href="{{ homepage }}" target="_blank" class="timeline-author">{{ item.content.display_name }}</a>
+
{% else %}
+
<span class="timeline-author">{{ item.content.display_name }}</span>
+
{% endif %}
+
{% if item.content.references %}
+
<div class="reference-badges">
+
{% for ref in item.content.references %}
+
{% if ref.type == 'outbound' %}
+
<span class="ref-badge ref-outbound" title="References {{ ref.target_username or 'external post' }}">
+
→ {{ ref.target_username or 'ext' }}
+
</span>
+
{% elif ref.type == 'inbound' %}
+
<span class="ref-badge ref-inbound" title="Referenced by {{ ref.source_username or 'external post' }}">
+
← {{ ref.source_username or 'ext' }}
+
</span>
+
{% endif %}
+
{% endfor %}
+
</div>
+
{% endif %}
+
</div>
+
<div class="timeline-content">
+
<strong class="timeline-title">
+
<a href="{{ item.content.entry.link }}" target="_blank">{{ item.content.entry.title }}</a>
+
</strong>
+
{% if item.content.entry.summary %}
+
<span class="timeline-summary">— {{ clean_html_summary(item.content.entry.summary, 250) }}</span>
+
{% endif %}
+
{% if item.content.shared_references %}
+
<span class="inline-shared-refs">
+
{% for ref in item.content.shared_references[:3] %}
+
{% if ref.target_username %}
+
<a href="#{{ ref.target_username }}" class="shared-ref-link" title="Referenced by {{ ref.count }} entries">@{{ ref.target_username }}</a>{% if not loop.last %}, {% endif %}
+
{% endif %}
+
{% endfor %}
+
{% if item.content.shared_references|length > 3 %}
+
<span class="shared-ref-more">+{{ item.content.shared_references|length - 3 }} more</span>
+
{% endif %}
+
</span>
+
{% endif %}
+
{% if item.content.cross_thread_links %}
+
<div class="cross-thread-links">
+
<span class="cross-thread-indicator">🔗 Also appears: </span>
+
{% for link in item.content.cross_thread_links %}
+
<a href="#{{ link.anchor_id }}" class="cross-thread-link" title="{{ link.title }}">{{ link.context }}</a>{% if not loop.last %}, {% endif %}
+
{% endfor %}
+
</div>
+
{% endif %}
+
</div>
+
</article>
+
+
{% elif item.type == "thread" %}
+
<!-- Conversation Thread -->
+
{% set outer_loop_index = loop.index0 %}
+
{% for thread_item in item.content %}
+
<article class="timeline-entry conversation-post level-{{ thread_item.thread_level }}">
+
<div class="timeline-meta">
+
<time datetime="{{ thread_item.entry.updated or thread_item.entry.published }}" class="timeline-time">
+
{{ (thread_item.entry.updated or thread_item.entry.published).strftime('%Y-%m-%d %H:%M') }}
+
</time>
+
{% set homepage = get_user_homepage(thread_item.username) %}
+
{% if thread_item.username not in seen_users %}
+
<a id="{{ thread_item.username }}" class="user-anchor"></a>
+
{% set _ = seen_users.append(thread_item.username) %}
+
{% endif %}
+
<a id="post-{{ outer_loop_index }}-{{ loop.index0 }}-{{ safe_anchor_id(thread_item.entry.id) }}" class="post-anchor"></a>
+
{% if homepage %}
+
<a href="{{ homepage }}" target="_blank" class="timeline-author author-{{ thread_item.username }}">{{ thread_item.display_name }}</a>
+
{% else %}
+
<span class="timeline-author author-{{ thread_item.username }}">{{ thread_item.display_name }}</span>
+
{% endif %}
+
{% if thread_item.references_to or thread_item.referenced_by %}
+
<span class="reference-indicators">
+
{% if thread_item.references_to %}
+
<span class="ref-out" title="References other posts">→</span>
+
{% endif %}
+
{% if thread_item.referenced_by %}
+
<span class="ref-in" title="Referenced by other posts">←</span>
+
{% endif %}
+
</span>
+
{% endif %}
+
</div>
+
<div class="timeline-content">
+
<strong class="timeline-title">
+
<a href="{{ thread_item.entry.link }}" target="_blank">{{ thread_item.entry.title }}</a>
+
</strong>
+
{% if thread_item.entry.summary %}
+
<span class="timeline-summary">— {{ clean_html_summary(thread_item.entry.summary, 300) }}</span>
+
{% endif %}
+
{% if thread_item.shared_references %}
+
<span class="inline-shared-refs">
+
{% for ref in thread_item.shared_references[:3] %}
+
{% if ref.target_username %}
+
<a href="#{{ ref.target_username }}" class="shared-ref-link" title="Referenced by {{ ref.count }} entries">@{{ ref.target_username }}</a>{% if not loop.last %}, {% endif %}
+
{% endif %}
+
{% endfor %}
+
{% if thread_item.shared_references|length > 3 %}
+
<span class="shared-ref-more">+{{ thread_item.shared_references|length - 3 }} more</span>
+
{% endif %}
+
</span>
+
{% endif %}
+
{% if thread_item.cross_thread_links %}
+
<div class="cross-thread-links">
+
<span class="cross-thread-indicator">🔗 Also appears: </span>
+
{% for link in thread_item.cross_thread_links %}
+
<a href="#{{ link.anchor_id }}" class="cross-thread-link" title="{{ link.title }}">{{ link.context }}</a>{% if not loop.last %}, {% endif %}
+
{% endfor %}
+
</div>
+
{% endif %}
+
</div>
+
</article>
+
{% endfor %}
+
{% endif %}
+
{% endfor %}
+
</section>
+
</div>
+
{% endblock %}
+57
src/thicket/templates/users.html
···
···
+
{% extends "base.html" %}
+
+
{% block page_title %}Users - {{ title }}{% endblock %}
+
+
{% block content %}
+
<div class="page-content">
+
<h2>Users</h2>
+
<p class="page-description">All users contributing to this thicket, ordered by post count.</p>
+
+
{% for user_info in users %}
+
<article class="user-card">
+
<div class="user-header">
+
{% if user_info.metadata.icon and user_info.metadata.icon != "None" %}
+
<img src="{{ user_info.metadata.icon }}" alt="{{ user_info.metadata.username }}" class="user-icon">
+
{% endif %}
+
<div class="user-info">
+
<h3>
+
{% if user_info.metadata.display_name %}
+
{{ user_info.metadata.display_name }}
+
<span class="username">({{ user_info.metadata.username }})</span>
+
{% else %}
+
{{ user_info.metadata.username }}
+
{% endif %}
+
</h3>
+
<div class="user-meta">
+
{% if user_info.metadata.homepage %}
+
<a href="{{ user_info.metadata.homepage }}" target="_blank">{{ user_info.metadata.homepage }}</a>
+
{% endif %}
+
{% if user_info.metadata.email %}
+
<span class="separator">•</span>
+
<a href="mailto:{{ user_info.metadata.email }}">{{ user_info.metadata.email }}</a>
+
{% endif %}
+
<span class="separator">•</span>
+
<span class="post-count">{{ user_info.metadata.entry_count }} posts</span>
+
</div>
+
</div>
+
</div>
+
+
{% if user_info.recent_entries %}
+
<div class="user-recent">
+
<h4>Recent posts:</h4>
+
<ul>
+
{% for display_name, entry in user_info.recent_entries %}
+
<li>
+
<a href="{{ entry.link }}" target="_blank">{{ entry.title }}</a>
+
<time datetime="{{ entry.updated or entry.published }}">
+
({{ (entry.updated or entry.published).strftime('%Y-%m-%d') }})
+
</time>
+
</li>
+
{% endfor %}
+
</ul>
+
</div>
+
{% endif %}
+
</article>
+
{% endfor %}
+
</div>
+
{% endblock %}
tests/__init__.py

This is a binary file and will not be displayed.

-84
tests/conftest.py
···
-
"""Test configuration and fixtures for thicket."""
-
-
import tempfile
-
from pathlib import Path
-
-
import pytest
-
-
from thicket.models import ThicketConfig, UserConfig
-
-
-
@pytest.fixture
-
def temp_dir():
-
"""Create a temporary directory for tests."""
-
with tempfile.TemporaryDirectory() as tmp_dir:
-
yield Path(tmp_dir)
-
-
-
@pytest.fixture
-
def sample_config(temp_dir):
-
"""Create a sample configuration for testing."""
-
git_store = temp_dir / "git_store"
-
cache_dir = temp_dir / "cache"
-
-
return ThicketConfig(
-
git_store=git_store,
-
cache_dir=cache_dir,
-
users=[
-
UserConfig(
-
username="testuser",
-
feeds=["https://example.com/feed.xml"],
-
email="test@example.com",
-
display_name="Test User",
-
)
-
],
-
)
-
-
-
@pytest.fixture
-
def sample_atom_feed():
-
"""Sample Atom feed XML for testing."""
-
return """<?xml version="1.0" encoding="utf-8"?>
-
<feed xmlns="http://www.w3.org/2005/Atom">
-
<title>Test Feed</title>
-
<link href="https://example.com/"/>
-
<updated>2025-01-01T00:00:00Z</updated>
-
<author>
-
<name>Test Author</name>
-
<email>author@example.com</email>
-
</author>
-
<id>https://example.com/</id>
-
-
<entry>
-
<title>Test Entry</title>
-
<link href="https://example.com/entry/1"/>
-
<id>https://example.com/entry/1</id>
-
<updated>2025-01-01T00:00:00Z</updated>
-
<summary>This is a test entry.</summary>
-
<content type="html">
-
<![CDATA[<p>This is the content of the test entry.</p>]]>
-
</content>
-
</entry>
-
</feed>"""
-
-
-
@pytest.fixture
-
def sample_rss_feed():
-
"""Sample RSS feed XML for testing."""
-
return """<?xml version="1.0" encoding="UTF-8"?>
-
<rss version="2.0">
-
<channel>
-
<title>Test RSS Feed</title>
-
<link>https://example.com/</link>
-
<description>Test RSS feed for testing</description>
-
<managingEditor>editor@example.com</managingEditor>
-
-
<item>
-
<title>Test RSS Entry</title>
-
<link>https://example.com/rss/entry/1</link>
-
<description>This is a test RSS entry.</description>
-
<pubDate>Mon, 01 Jan 2025 00:00:00 GMT</pubDate>
-
<guid>https://example.com/rss/entry/1</guid>
-
</item>
-
</channel>
-
</rss>"""
···
-131
tests/test_feed_parser.py
···
-
"""Tests for feed parser functionality."""
-
-
from pydantic import HttpUrl
-
-
from thicket.core.feed_parser import FeedParser
-
from thicket.models import AtomEntry, FeedMetadata
-
-
-
class TestFeedParser:
-
"""Test the FeedParser class."""
-
-
def test_init(self):
-
"""Test parser initialization."""
-
parser = FeedParser()
-
assert parser.user_agent == "thicket/0.1.0"
-
assert "a" in parser.allowed_tags
-
assert "href" in parser.allowed_attributes["a"]
-
-
def test_parse_atom_feed(self, sample_atom_feed):
-
"""Test parsing an Atom feed."""
-
parser = FeedParser()
-
metadata, entries = parser.parse_feed(sample_atom_feed)
-
-
# Check metadata
-
assert isinstance(metadata, FeedMetadata)
-
assert metadata.title == "Test Feed"
-
assert metadata.author_name == "Test Author"
-
assert metadata.author_email == "author@example.com"
-
assert metadata.link == HttpUrl("https://example.com/")
-
-
# Check entries
-
assert len(entries) == 1
-
entry = entries[0]
-
assert isinstance(entry, AtomEntry)
-
assert entry.title == "Test Entry"
-
assert entry.id == "https://example.com/entry/1"
-
assert entry.link == HttpUrl("https://example.com/entry/1")
-
assert entry.summary == "This is a test entry."
-
assert "<p>This is the content of the test entry.</p>" in entry.content
-
-
def test_parse_rss_feed(self, sample_rss_feed):
-
"""Test parsing an RSS feed."""
-
parser = FeedParser()
-
metadata, entries = parser.parse_feed(sample_rss_feed)
-
-
# Check metadata
-
assert isinstance(metadata, FeedMetadata)
-
assert metadata.title == "Test RSS Feed"
-
assert metadata.link == HttpUrl("https://example.com/")
-
assert metadata.author_email == "editor@example.com"
-
-
# Check entries
-
assert len(entries) == 1
-
entry = entries[0]
-
assert isinstance(entry, AtomEntry)
-
assert entry.title == "Test RSS Entry"
-
assert entry.id == "https://example.com/rss/entry/1"
-
assert entry.summary == "This is a test RSS entry."
-
-
def test_sanitize_entry_id(self):
-
"""Test entry ID sanitization."""
-
parser = FeedParser()
-
-
# Test URL ID
-
url_id = "https://example.com/posts/2025/01/test-post"
-
sanitized = parser.sanitize_entry_id(url_id)
-
assert sanitized == "posts_2025_01_test-post"
-
-
# Test problematic characters
-
bad_id = "test/with\\bad:chars|and<more>"
-
sanitized = parser.sanitize_entry_id(bad_id)
-
assert sanitized == "test_with_bad_chars_and_more_"
-
-
# Test empty ID
-
empty_id = ""
-
sanitized = parser.sanitize_entry_id(empty_id)
-
assert sanitized == "entry"
-
-
# Test very long ID
-
long_id = "a" * 300
-
sanitized = parser.sanitize_entry_id(long_id)
-
assert len(sanitized) == 200
-
-
def test_sanitize_html(self):
-
"""Test HTML sanitization."""
-
parser = FeedParser()
-
-
# Test allowed tags
-
safe_html = "<p>This is <strong>safe</strong> HTML</p>"
-
sanitized = parser._sanitize_html(safe_html)
-
assert sanitized == safe_html
-
-
# Test dangerous tags
-
dangerous_html = "<script>alert('xss')</script><p>Safe content</p>"
-
sanitized = parser._sanitize_html(dangerous_html)
-
assert "<script>" not in sanitized
-
assert "<p>Safe content</p>" in sanitized
-
-
# Test attributes
-
html_with_attrs = '<a href="https://example.com" onclick="alert()">Link</a>'
-
sanitized = parser._sanitize_html(html_with_attrs)
-
assert 'href="https://example.com"' in sanitized
-
assert 'onclick' not in sanitized
-
-
def test_extract_feed_metadata(self):
-
"""Test feed metadata extraction."""
-
parser = FeedParser()
-
-
# Test with feedparser parsed data
-
import feedparser
-
parsed = feedparser.parse("""<?xml version="1.0" encoding="utf-8"?>
-
<feed xmlns="http://www.w3.org/2005/Atom">
-
<title>Test Feed</title>
-
<link href="https://example.com/"/>
-
<author>
-
<name>Test Author</name>
-
<email>author@example.com</email>
-
<uri>https://example.com/about</uri>
-
</author>
-
<logo>https://example.com/logo.png</logo>
-
<icon>https://example.com/icon.png</icon>
-
</feed>""")
-
-
metadata = parser._extract_feed_metadata(parsed.feed)
-
assert metadata.title == "Test Feed"
-
assert metadata.author_name == "Test Author"
-
assert metadata.author_email == "author@example.com"
-
assert metadata.author_uri == HttpUrl("https://example.com/about")
-
assert metadata.link == HttpUrl("https://example.com/")
-
assert metadata.logo == HttpUrl("https://example.com/logo.png")
-
assert metadata.icon == HttpUrl("https://example.com/icon.png")
···
-275
tests/test_git_store.py
···
-
"""Tests for Git store functionality."""
-
-
import json
-
from datetime import datetime
-
-
from pydantic import HttpUrl
-
-
from thicket.core.git_store import GitStore
-
from thicket.models import AtomEntry, DuplicateMap, UserMetadata
-
-
-
class TestGitStore:
-
"""Test the GitStore class."""
-
-
def test_init_new_repo(self, temp_dir):
-
"""Test initializing a new Git repository."""
-
repo_path = temp_dir / "test_repo"
-
store = GitStore(repo_path)
-
-
assert store.repo_path == repo_path
-
assert store.repo is not None
-
assert repo_path.exists()
-
assert (repo_path / ".git").exists()
-
assert (repo_path / "index.json").exists()
-
assert (repo_path / "duplicates.json").exists()
-
-
def test_init_existing_repo(self, temp_dir):
-
"""Test initializing with existing repository."""
-
repo_path = temp_dir / "test_repo"
-
-
# Create first store
-
store1 = GitStore(repo_path)
-
store1.add_user("testuser", display_name="Test User")
-
-
# Create second store pointing to same repo
-
store2 = GitStore(repo_path)
-
user = store2.get_user("testuser")
-
-
assert user is not None
-
assert user.username == "testuser"
-
assert user.display_name == "Test User"
-
-
def test_add_user(self, temp_dir):
-
"""Test adding a user to the Git store."""
-
store = GitStore(temp_dir / "test_repo")
-
-
user = store.add_user(
-
username="testuser",
-
display_name="Test User",
-
email="test@example.com",
-
homepage="https://example.com",
-
icon="https://example.com/icon.png",
-
feeds=["https://example.com/feed.xml"],
-
)
-
-
assert isinstance(user, UserMetadata)
-
assert user.username == "testuser"
-
assert user.display_name == "Test User"
-
assert user.email == "test@example.com"
-
assert user.homepage == "https://example.com"
-
assert user.icon == "https://example.com/icon.png"
-
assert user.feeds == ["https://example.com/feed.xml"]
-
assert user.directory == "testuser"
-
-
# Check that user directory was created
-
user_dir = store.repo_path / "testuser"
-
assert user_dir.exists()
-
-
# Check user exists in index
-
stored_user = store.get_user("testuser")
-
assert stored_user is not None
-
assert stored_user.username == "testuser"
-
assert stored_user.display_name == "Test User"
-
-
def test_get_user(self, temp_dir):
-
"""Test getting user metadata."""
-
store = GitStore(temp_dir / "test_repo")
-
-
# Add user
-
store.add_user("testuser", display_name="Test User")
-
-
# Get user
-
user = store.get_user("testuser")
-
assert user is not None
-
assert user.username == "testuser"
-
assert user.display_name == "Test User"
-
-
# Try to get non-existent user
-
non_user = store.get_user("nonexistent")
-
assert non_user is None
-
-
def test_store_entry(self, temp_dir):
-
"""Test storing an entry."""
-
store = GitStore(temp_dir / "test_repo")
-
-
# Add user first
-
store.add_user("testuser")
-
-
# Create test entry
-
entry = AtomEntry(
-
id="https://example.com/entry/1",
-
title="Test Entry",
-
link=HttpUrl("https://example.com/entry/1"),
-
updated=datetime.now(),
-
summary="Test entry summary",
-
content="<p>Test content</p>",
-
)
-
-
# Store entry
-
result = store.store_entry("testuser", entry)
-
assert result is True
-
-
# Check that entry file was created
-
user_dir = store.repo_path / "testuser"
-
entry_files = list(user_dir.glob("*.json"))
-
entry_files = [f for f in entry_files if f.name != "metadata.json"]
-
assert len(entry_files) == 1
-
-
# Check entry content
-
with open(entry_files[0]) as f:
-
stored_entry = json.load(f)
-
assert stored_entry["title"] == "Test Entry"
-
assert stored_entry["id"] == "https://example.com/entry/1"
-
-
def test_get_entry(self, temp_dir):
-
"""Test retrieving an entry."""
-
store = GitStore(temp_dir / "test_repo")
-
-
# Add user and entry
-
store.add_user("testuser")
-
entry = AtomEntry(
-
id="https://example.com/entry/1",
-
title="Test Entry",
-
link=HttpUrl("https://example.com/entry/1"),
-
updated=datetime.now(),
-
)
-
store.store_entry("testuser", entry)
-
-
# Get entry
-
retrieved = store.get_entry("testuser", "https://example.com/entry/1")
-
assert retrieved is not None
-
assert retrieved.title == "Test Entry"
-
assert retrieved.id == "https://example.com/entry/1"
-
-
# Try to get non-existent entry
-
non_entry = store.get_entry("testuser", "https://example.com/nonexistent")
-
assert non_entry is None
-
-
def test_list_entries(self, temp_dir):
-
"""Test listing entries for a user."""
-
store = GitStore(temp_dir / "test_repo")
-
-
# Add user
-
store.add_user("testuser")
-
-
# Add multiple entries
-
for i in range(3):
-
entry = AtomEntry(
-
id=f"https://example.com/entry/{i}",
-
title=f"Test Entry {i}",
-
link=HttpUrl(f"https://example.com/entry/{i}"),
-
updated=datetime.now(),
-
)
-
store.store_entry("testuser", entry)
-
-
# List all entries
-
entries = store.list_entries("testuser")
-
assert len(entries) == 3
-
-
# List with limit
-
limited = store.list_entries("testuser", limit=2)
-
assert len(limited) == 2
-
-
# List for non-existent user
-
none_entries = store.list_entries("nonexistent")
-
assert len(none_entries) == 0
-
-
def test_duplicates(self, temp_dir):
-
"""Test duplicate management."""
-
store = GitStore(temp_dir / "test_repo")
-
-
# Get initial duplicates (should be empty)
-
duplicates = store.get_duplicates()
-
assert isinstance(duplicates, DuplicateMap)
-
assert len(duplicates.duplicates) == 0
-
-
# Add duplicate
-
store.add_duplicate("https://example.com/dup", "https://example.com/canonical")
-
-
# Check duplicate was added
-
duplicates = store.get_duplicates()
-
assert len(duplicates.duplicates) == 1
-
assert duplicates.is_duplicate("https://example.com/dup")
-
assert duplicates.get_canonical("https://example.com/dup") == "https://example.com/canonical"
-
-
# Remove duplicate
-
result = store.remove_duplicate("https://example.com/dup")
-
assert result is True
-
-
# Check duplicate was removed
-
duplicates = store.get_duplicates()
-
assert len(duplicates.duplicates) == 0
-
assert not duplicates.is_duplicate("https://example.com/dup")
-
-
def test_search_entries(self, temp_dir):
-
"""Test searching entries."""
-
store = GitStore(temp_dir / "test_repo")
-
-
# Add user
-
store.add_user("testuser")
-
-
# Add entries with different content
-
entries_data = [
-
("Test Python Programming", "Learning Python basics"),
-
("JavaScript Tutorial", "Advanced JavaScript concepts"),
-
("Python Web Development", "Building web apps with Python"),
-
]
-
-
for title, summary in entries_data:
-
entry = AtomEntry(
-
id=f"https://example.com/entry/{title.lower().replace(' ', '-')}",
-
title=title,
-
link=HttpUrl(f"https://example.com/entry/{title.lower().replace(' ', '-')}"),
-
updated=datetime.now(),
-
summary=summary,
-
)
-
store.store_entry("testuser", entry)
-
-
# Search for Python entries
-
results = store.search_entries("Python")
-
assert len(results) == 2
-
-
# Search for specific user
-
results = store.search_entries("Python", username="testuser")
-
assert len(results) == 2
-
-
# Search with limit
-
results = store.search_entries("Python", limit=1)
-
assert len(results) == 1
-
-
# Search for non-existent term
-
results = store.search_entries("NonExistent")
-
assert len(results) == 0
-
-
def test_get_stats(self, temp_dir):
-
"""Test getting repository statistics."""
-
store = GitStore(temp_dir / "test_repo")
-
-
# Get initial stats
-
stats = store.get_stats()
-
assert stats["total_users"] == 0
-
assert stats["total_entries"] == 0
-
assert stats["total_duplicates"] == 0
-
-
# Add user and entries
-
store.add_user("testuser")
-
for i in range(3):
-
entry = AtomEntry(
-
id=f"https://example.com/entry/{i}",
-
title=f"Test Entry {i}",
-
link=HttpUrl(f"https://example.com/entry/{i}"),
-
updated=datetime.now(),
-
)
-
store.store_entry("testuser", entry)
-
-
# Add duplicate
-
store.add_duplicate("https://example.com/dup", "https://example.com/canonical")
-
-
# Get updated stats
-
stats = store.get_stats()
-
assert stats["total_users"] == 1
-
assert stats["total_entries"] == 3
-
assert stats["total_duplicates"] == 1
-
assert "last_updated" in stats
-
assert "repository_size" in stats
···
-352
tests/test_models.py
···
-
"""Tests for pydantic models."""
-
-
from datetime import datetime
-
-
import pytest
-
from pydantic import HttpUrl, ValidationError
-
-
from thicket.models import (
-
AtomEntry,
-
DuplicateMap,
-
FeedMetadata,
-
ThicketConfig,
-
UserConfig,
-
UserMetadata,
-
)
-
-
-
class TestUserConfig:
-
"""Test UserConfig model."""
-
-
def test_valid_user_config(self):
-
"""Test creating valid user config."""
-
config = UserConfig(
-
username="testuser",
-
feeds=["https://example.com/feed.xml"],
-
email="test@example.com",
-
homepage="https://example.com",
-
display_name="Test User",
-
)
-
-
assert config.username == "testuser"
-
assert len(config.feeds) == 1
-
assert config.feeds[0] == HttpUrl("https://example.com/feed.xml")
-
assert config.email == "test@example.com"
-
assert config.display_name == "Test User"
-
-
def test_invalid_email(self):
-
"""Test validation of invalid email."""
-
with pytest.raises(ValidationError):
-
UserConfig(
-
username="testuser",
-
feeds=["https://example.com/feed.xml"],
-
email="invalid-email",
-
)
-
-
def test_invalid_feed_url(self):
-
"""Test validation of invalid feed URL."""
-
with pytest.raises(ValidationError):
-
UserConfig(
-
username="testuser",
-
feeds=["not-a-url"],
-
)
-
-
def test_optional_fields(self):
-
"""Test optional fields with None values."""
-
config = UserConfig(
-
username="testuser",
-
feeds=["https://example.com/feed.xml"],
-
)
-
-
assert config.email is None
-
assert config.homepage is None
-
assert config.icon is None
-
assert config.display_name is None
-
-
-
class TestThicketConfig:
-
"""Test ThicketConfig model."""
-
-
def test_valid_config(self, temp_dir):
-
"""Test creating valid configuration."""
-
config = ThicketConfig(
-
git_store=temp_dir / "git_store",
-
cache_dir=temp_dir / "cache",
-
users=[
-
UserConfig(
-
username="testuser",
-
feeds=["https://example.com/feed.xml"],
-
)
-
],
-
)
-
-
assert config.git_store == temp_dir / "git_store"
-
assert config.cache_dir == temp_dir / "cache"
-
assert len(config.users) == 1
-
assert config.users[0].username == "testuser"
-
-
def test_find_user(self, temp_dir):
-
"""Test finding user by username."""
-
config = ThicketConfig(
-
git_store=temp_dir / "git_store",
-
cache_dir=temp_dir / "cache",
-
users=[
-
UserConfig(username="user1", feeds=["https://example.com/feed1.xml"]),
-
UserConfig(username="user2", feeds=["https://example.com/feed2.xml"]),
-
],
-
)
-
-
user = config.find_user("user1")
-
assert user is not None
-
assert user.username == "user1"
-
-
non_user = config.find_user("nonexistent")
-
assert non_user is None
-
-
def test_add_user(self, temp_dir):
-
"""Test adding a new user."""
-
config = ThicketConfig(
-
git_store=temp_dir / "git_store",
-
cache_dir=temp_dir / "cache",
-
users=[],
-
)
-
-
new_user = UserConfig(
-
username="newuser",
-
feeds=["https://example.com/feed.xml"],
-
)
-
-
config.add_user(new_user)
-
assert len(config.users) == 1
-
assert config.users[0].username == "newuser"
-
-
def test_add_feed_to_user(self, temp_dir):
-
"""Test adding feed to existing user."""
-
config = ThicketConfig(
-
git_store=temp_dir / "git_store",
-
cache_dir=temp_dir / "cache",
-
users=[
-
UserConfig(username="testuser", feeds=["https://example.com/feed1.xml"]),
-
],
-
)
-
-
result = config.add_feed_to_user("testuser", HttpUrl("https://example.com/feed2.xml"))
-
assert result is True
-
-
user = config.find_user("testuser")
-
assert len(user.feeds) == 2
-
assert HttpUrl("https://example.com/feed2.xml") in user.feeds
-
-
# Test adding to non-existent user
-
result = config.add_feed_to_user("nonexistent", HttpUrl("https://example.com/feed.xml"))
-
assert result is False
-
-
-
class TestAtomEntry:
-
"""Test AtomEntry model."""
-
-
def test_valid_entry(self):
-
"""Test creating valid Atom entry."""
-
entry = AtomEntry(
-
id="https://example.com/entry/1",
-
title="Test Entry",
-
link=HttpUrl("https://example.com/entry/1"),
-
updated=datetime.now(),
-
published=datetime.now(),
-
summary="Test summary",
-
content="<p>Test content</p>",
-
content_type="html",
-
author={"name": "Test Author"},
-
categories=["test", "example"],
-
)
-
-
assert entry.id == "https://example.com/entry/1"
-
assert entry.title == "Test Entry"
-
assert entry.summary == "Test summary"
-
assert entry.content == "<p>Test content</p>"
-
assert entry.content_type == "html"
-
assert entry.author["name"] == "Test Author"
-
assert "test" in entry.categories
-
-
def test_minimal_entry(self):
-
"""Test creating minimal Atom entry."""
-
entry = AtomEntry(
-
id="https://example.com/entry/1",
-
title="Test Entry",
-
link=HttpUrl("https://example.com/entry/1"),
-
updated=datetime.now(),
-
)
-
-
assert entry.id == "https://example.com/entry/1"
-
assert entry.title == "Test Entry"
-
assert entry.published is None
-
assert entry.summary is None
-
assert entry.content is None
-
assert entry.content_type == "html" # default
-
assert entry.author is None
-
assert entry.categories == []
-
-
-
class TestDuplicateMap:
-
"""Test DuplicateMap model."""
-
-
def test_empty_duplicates(self):
-
"""Test empty duplicate map."""
-
dup_map = DuplicateMap()
-
assert len(dup_map.duplicates) == 0
-
assert not dup_map.is_duplicate("test")
-
assert dup_map.get_canonical("test") == "test"
-
-
def test_add_duplicate(self):
-
"""Test adding duplicate mapping."""
-
dup_map = DuplicateMap()
-
dup_map.add_duplicate("dup1", "canonical1")
-
-
assert len(dup_map.duplicates) == 1
-
assert dup_map.is_duplicate("dup1")
-
assert dup_map.get_canonical("dup1") == "canonical1"
-
assert dup_map.get_canonical("canonical1") == "canonical1"
-
-
def test_remove_duplicate(self):
-
"""Test removing duplicate mapping."""
-
dup_map = DuplicateMap()
-
dup_map.add_duplicate("dup1", "canonical1")
-
-
result = dup_map.remove_duplicate("dup1")
-
assert result is True
-
assert len(dup_map.duplicates) == 0
-
assert not dup_map.is_duplicate("dup1")
-
-
# Test removing non-existent duplicate
-
result = dup_map.remove_duplicate("nonexistent")
-
assert result is False
-
-
def test_get_duplicates_for_canonical(self):
-
"""Test getting all duplicates for a canonical ID."""
-
dup_map = DuplicateMap()
-
dup_map.add_duplicate("dup1", "canonical1")
-
dup_map.add_duplicate("dup2", "canonical1")
-
dup_map.add_duplicate("dup3", "canonical2")
-
-
dups = dup_map.get_duplicates_for_canonical("canonical1")
-
assert len(dups) == 2
-
assert "dup1" in dups
-
assert "dup2" in dups
-
-
dups = dup_map.get_duplicates_for_canonical("canonical2")
-
assert len(dups) == 1
-
assert "dup3" in dups
-
-
dups = dup_map.get_duplicates_for_canonical("nonexistent")
-
assert len(dups) == 0
-
-
-
class TestFeedMetadata:
-
"""Test FeedMetadata model."""
-
-
def test_valid_metadata(self):
-
"""Test creating valid feed metadata."""
-
metadata = FeedMetadata(
-
title="Test Feed",
-
author_name="Test Author",
-
author_email="author@example.com",
-
author_uri=HttpUrl("https://example.com/author"),
-
link=HttpUrl("https://example.com"),
-
description="Test description",
-
)
-
-
assert metadata.title == "Test Feed"
-
assert metadata.author_name == "Test Author"
-
assert metadata.author_email == "author@example.com"
-
assert metadata.link == HttpUrl("https://example.com")
-
-
def test_to_user_config(self):
-
"""Test converting metadata to user config."""
-
metadata = FeedMetadata(
-
title="Test Feed",
-
author_name="Test Author",
-
author_email="author@example.com",
-
author_uri=HttpUrl("https://example.com/author"),
-
link=HttpUrl("https://example.com"),
-
logo=HttpUrl("https://example.com/logo.png"),
-
)
-
-
feed_url = HttpUrl("https://example.com/feed.xml")
-
user_config = metadata.to_user_config("testuser", feed_url)
-
-
assert user_config.username == "testuser"
-
assert user_config.feeds == [feed_url]
-
assert user_config.display_name == "Test Author"
-
assert user_config.email == "author@example.com"
-
assert user_config.homepage == HttpUrl("https://example.com/author")
-
assert user_config.icon == HttpUrl("https://example.com/logo.png")
-
-
def test_to_user_config_fallbacks(self):
-
"""Test fallback logic in to_user_config."""
-
metadata = FeedMetadata(
-
title="Test Feed",
-
link=HttpUrl("https://example.com"),
-
icon=HttpUrl("https://example.com/icon.png"),
-
)
-
-
feed_url = HttpUrl("https://example.com/feed.xml")
-
user_config = metadata.to_user_config("testuser", feed_url)
-
-
assert user_config.display_name == "Test Feed" # Falls back to title
-
assert user_config.homepage == HttpUrl("https://example.com") # Falls back to link
-
assert user_config.icon == HttpUrl("https://example.com/icon.png")
-
assert user_config.email is None
-
-
-
class TestUserMetadata:
-
"""Test UserMetadata model."""
-
-
def test_valid_metadata(self):
-
"""Test creating valid user metadata."""
-
now = datetime.now()
-
metadata = UserMetadata(
-
username="testuser",
-
directory="testuser",
-
created=now,
-
last_updated=now,
-
feeds=["https://example.com/feed.xml"],
-
entry_count=5,
-
)
-
-
assert metadata.username == "testuser"
-
assert metadata.directory == "testuser"
-
assert metadata.entry_count == 5
-
assert len(metadata.feeds) == 1
-
-
def test_update_timestamp(self):
-
"""Test updating timestamp."""
-
now = datetime.now()
-
metadata = UserMetadata(
-
username="testuser",
-
directory="testuser",
-
created=now,
-
last_updated=now,
-
)
-
-
original_time = metadata.last_updated
-
metadata.update_timestamp()
-
-
assert metadata.last_updated > original_time
-
-
def test_increment_entry_count(self):
-
"""Test incrementing entry count."""
-
metadata = UserMetadata(
-
username="testuser",
-
directory="testuser",
-
created=datetime.now(),
-
last_updated=datetime.now(),
-
entry_count=5,
-
)
-
-
original_count = metadata.entry_count
-
original_time = metadata.last_updated
-
-
metadata.increment_entry_count(3)
-
-
assert metadata.entry_count == original_count + 3
-
assert metadata.last_updated > original_time
···
+82
uv.lock
···
]
[[package]]
name = "markdown-it-py"
version = "3.0.0"
source = { registry = "https://pypi.org/simple" }
···
sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596, upload-time = "2023-06-03T06:41:14.443Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528, upload-time = "2023-06-03T06:41:11.019Z" },
]
[[package]]
···
{ name = "feedparser" },
{ name = "gitpython" },
{ name = "httpx" },
{ name = "pendulum" },
{ name = "platformdirs" },
{ name = "pydantic" },
···
{ name = "feedparser", specifier = ">=6.0.11" },
{ name = "gitpython", specifier = ">=3.1.40" },
{ name = "httpx", specifier = ">=0.28.0" },
{ name = "mypy", marker = "extra == 'dev'", specifier = ">=1.13.0" },
{ name = "pendulum", specifier = ">=3.0.0" },
{ name = "platformdirs", specifier = ">=4.0.0" },
···
]
[[package]]
+
name = "jinja2"
+
version = "3.1.6"
+
source = { registry = "https://pypi.org/simple" }
+
dependencies = [
+
{ name = "markupsafe" },
+
]
+
sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" }
+
wheels = [
+
{ url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" },
+
]
+
+
[[package]]
name = "markdown-it-py"
version = "3.0.0"
source = { registry = "https://pypi.org/simple" }
···
sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596, upload-time = "2023-06-03T06:41:14.443Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528, upload-time = "2023-06-03T06:41:11.019Z" },
+
]
+
+
[[package]]
+
name = "markupsafe"
+
version = "3.0.2"
+
source = { registry = "https://pypi.org/simple" }
+
sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537, upload-time = "2024-10-18T15:21:54.129Z" }
+
wheels = [
+
{ url = "https://files.pythonhosted.org/packages/04/90/d08277ce111dd22f77149fd1a5d4653eeb3b3eaacbdfcbae5afb2600eebd/MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", size = 14357, upload-time = "2024-10-18T15:20:51.44Z" },
+
{ url = "https://files.pythonhosted.org/packages/04/e1/6e2194baeae0bca1fae6629dc0cbbb968d4d941469cbab11a3872edff374/MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", size = 12393, upload-time = "2024-10-18T15:20:52.426Z" },
+
{ url = "https://files.pythonhosted.org/packages/1d/69/35fa85a8ece0a437493dc61ce0bb6d459dcba482c34197e3efc829aa357f/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", size = 21732, upload-time = "2024-10-18T15:20:53.578Z" },
+
{ url = "https://files.pythonhosted.org/packages/22/35/137da042dfb4720b638d2937c38a9c2df83fe32d20e8c8f3185dbfef05f7/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", size = 20866, upload-time = "2024-10-18T15:20:55.06Z" },
+
{ url = "https://files.pythonhosted.org/packages/29/28/6d029a903727a1b62edb51863232152fd335d602def598dade38996887f0/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", size = 20964, upload-time = "2024-10-18T15:20:55.906Z" },
+
{ url = "https://files.pythonhosted.org/packages/cc/cd/07438f95f83e8bc028279909d9c9bd39e24149b0d60053a97b2bc4f8aa51/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", size = 21977, upload-time = "2024-10-18T15:20:57.189Z" },
+
{ url = "https://files.pythonhosted.org/packages/29/01/84b57395b4cc062f9c4c55ce0df7d3108ca32397299d9df00fedd9117d3d/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", size = 21366, upload-time = "2024-10-18T15:20:58.235Z" },
+
{ url = "https://files.pythonhosted.org/packages/bd/6e/61ebf08d8940553afff20d1fb1ba7294b6f8d279df9fd0c0db911b4bbcfd/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", size = 21091, upload-time = "2024-10-18T15:20:59.235Z" },
+
{ url = "https://files.pythonhosted.org/packages/11/23/ffbf53694e8c94ebd1e7e491de185124277964344733c45481f32ede2499/MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50", size = 15065, upload-time = "2024-10-18T15:21:00.307Z" },
+
{ url = "https://files.pythonhosted.org/packages/44/06/e7175d06dd6e9172d4a69a72592cb3f7a996a9c396eee29082826449bbc3/MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", size = 15514, upload-time = "2024-10-18T15:21:01.122Z" },
+
{ url = "https://files.pythonhosted.org/packages/6b/28/bbf83e3f76936960b850435576dd5e67034e200469571be53f69174a2dfd/MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", size = 14353, upload-time = "2024-10-18T15:21:02.187Z" },
+
{ url = "https://files.pythonhosted.org/packages/6c/30/316d194b093cde57d448a4c3209f22e3046c5bb2fb0820b118292b334be7/MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", size = 12392, upload-time = "2024-10-18T15:21:02.941Z" },
+
{ url = "https://files.pythonhosted.org/packages/f2/96/9cdafba8445d3a53cae530aaf83c38ec64c4d5427d975c974084af5bc5d2/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", size = 23984, upload-time = "2024-10-18T15:21:03.953Z" },
+
{ url = "https://files.pythonhosted.org/packages/f1/a4/aefb044a2cd8d7334c8a47d3fb2c9f328ac48cb349468cc31c20b539305f/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", size = 23120, upload-time = "2024-10-18T15:21:06.495Z" },
+
{ url = "https://files.pythonhosted.org/packages/8d/21/5e4851379f88f3fad1de30361db501300d4f07bcad047d3cb0449fc51f8c/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", size = 23032, upload-time = "2024-10-18T15:21:07.295Z" },
+
{ url = "https://files.pythonhosted.org/packages/00/7b/e92c64e079b2d0d7ddf69899c98842f3f9a60a1ae72657c89ce2655c999d/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", size = 24057, upload-time = "2024-10-18T15:21:08.073Z" },
+
{ url = "https://files.pythonhosted.org/packages/f9/ac/46f960ca323037caa0a10662ef97d0a4728e890334fc156b9f9e52bcc4ca/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", size = 23359, upload-time = "2024-10-18T15:21:09.318Z" },
+
{ url = "https://files.pythonhosted.org/packages/69/84/83439e16197337b8b14b6a5b9c2105fff81d42c2a7c5b58ac7b62ee2c3b1/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", size = 23306, upload-time = "2024-10-18T15:21:10.185Z" },
+
{ url = "https://files.pythonhosted.org/packages/9a/34/a15aa69f01e2181ed8d2b685c0d2f6655d5cca2c4db0ddea775e631918cd/MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", size = 15094, upload-time = "2024-10-18T15:21:11.005Z" },
+
{ url = "https://files.pythonhosted.org/packages/da/b8/3a3bd761922d416f3dc5d00bfbed11f66b1ab89a0c2b6e887240a30b0f6b/MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", size = 15521, upload-time = "2024-10-18T15:21:12.911Z" },
+
{ url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274, upload-time = "2024-10-18T15:21:13.777Z" },
+
{ url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348, upload-time = "2024-10-18T15:21:14.822Z" },
+
{ url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149, upload-time = "2024-10-18T15:21:15.642Z" },
+
{ url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118, upload-time = "2024-10-18T15:21:17.133Z" },
+
{ url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993, upload-time = "2024-10-18T15:21:18.064Z" },
+
{ url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178, upload-time = "2024-10-18T15:21:18.859Z" },
+
{ url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319, upload-time = "2024-10-18T15:21:19.671Z" },
+
{ url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352, upload-time = "2024-10-18T15:21:20.971Z" },
+
{ url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097, upload-time = "2024-10-18T15:21:22.646Z" },
+
{ url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601, upload-time = "2024-10-18T15:21:23.499Z" },
+
{ url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274, upload-time = "2024-10-18T15:21:24.577Z" },
+
{ url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352, upload-time = "2024-10-18T15:21:25.382Z" },
+
{ url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122, upload-time = "2024-10-18T15:21:26.199Z" },
+
{ url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085, upload-time = "2024-10-18T15:21:27.029Z" },
+
{ url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978, upload-time = "2024-10-18T15:21:27.846Z" },
+
{ url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208, upload-time = "2024-10-18T15:21:28.744Z" },
+
{ url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357, upload-time = "2024-10-18T15:21:29.545Z" },
+
{ url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344, upload-time = "2024-10-18T15:21:30.366Z" },
+
{ url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101, upload-time = "2024-10-18T15:21:31.207Z" },
+
{ url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603, upload-time = "2024-10-18T15:21:32.032Z" },
+
{ url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510, upload-time = "2024-10-18T15:21:33.625Z" },
+
{ url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486, upload-time = "2024-10-18T15:21:34.611Z" },
+
{ url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480, upload-time = "2024-10-18T15:21:35.398Z" },
+
{ url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914, upload-time = "2024-10-18T15:21:36.231Z" },
+
{ url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796, upload-time = "2024-10-18T15:21:37.073Z" },
+
{ url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473, upload-time = "2024-10-18T15:21:37.932Z" },
+
{ url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114, upload-time = "2024-10-18T15:21:39.799Z" },
+
{ url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098, upload-time = "2024-10-18T15:21:40.813Z" },
+
{ url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208, upload-time = "2024-10-18T15:21:41.814Z" },
+
{ url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739, upload-time = "2024-10-18T15:21:42.784Z" },
+
{ url = "https://files.pythonhosted.org/packages/a7/ea/9b1530c3fdeeca613faeb0fb5cbcf2389d816072fab72a71b45749ef6062/MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a", size = 14344, upload-time = "2024-10-18T15:21:43.721Z" },
+
{ url = "https://files.pythonhosted.org/packages/4b/c2/fbdbfe48848e7112ab05e627e718e854d20192b674952d9042ebd8c9e5de/MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff", size = 12389, upload-time = "2024-10-18T15:21:44.666Z" },
+
{ url = "https://files.pythonhosted.org/packages/f0/25/7a7c6e4dbd4f867d95d94ca15449e91e52856f6ed1905d58ef1de5e211d0/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13", size = 21607, upload-time = "2024-10-18T15:21:45.452Z" },
+
{ url = "https://files.pythonhosted.org/packages/53/8f/f339c98a178f3c1e545622206b40986a4c3307fe39f70ccd3d9df9a9e425/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144", size = 20728, upload-time = "2024-10-18T15:21:46.295Z" },
+
{ url = "https://files.pythonhosted.org/packages/1a/03/8496a1a78308456dbd50b23a385c69b41f2e9661c67ea1329849a598a8f9/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29", size = 20826, upload-time = "2024-10-18T15:21:47.134Z" },
+
{ url = "https://files.pythonhosted.org/packages/e6/cf/0a490a4bd363048c3022f2f475c8c05582179bb179defcee4766fb3dcc18/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0", size = 21843, upload-time = "2024-10-18T15:21:48.334Z" },
+
{ url = "https://files.pythonhosted.org/packages/19/a3/34187a78613920dfd3cdf68ef6ce5e99c4f3417f035694074beb8848cd77/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0", size = 21219, upload-time = "2024-10-18T15:21:49.587Z" },
+
{ url = "https://files.pythonhosted.org/packages/17/d8/5811082f85bb88410ad7e452263af048d685669bbbfb7b595e8689152498/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178", size = 20946, upload-time = "2024-10-18T15:21:50.441Z" },
+
{ url = "https://files.pythonhosted.org/packages/7c/31/bd635fb5989440d9365c5e3c47556cfea121c7803f5034ac843e8f37c2f2/MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f", size = 15063, upload-time = "2024-10-18T15:21:51.385Z" },
+
{ url = "https://files.pythonhosted.org/packages/b3/73/085399401383ce949f727afec55ec3abd76648d04b9f22e1c0e99cb4bec3/MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a", size = 15506, upload-time = "2024-10-18T15:21:52.974Z" },
]
[[package]]
···
{ name = "feedparser" },
{ name = "gitpython" },
{ name = "httpx" },
+
{ name = "jinja2" },
{ name = "pendulum" },
{ name = "platformdirs" },
{ name = "pydantic" },
···
{ name = "feedparser", specifier = ">=6.0.11" },
{ name = "gitpython", specifier = ">=3.1.40" },
{ name = "httpx", specifier = ">=0.28.0" },
+
{ name = "jinja2", specifier = ">=3.1.6" },
{ name = "mypy", marker = "extra == 'dev'", specifier = ">=1.13.0" },
{ name = "pendulum", specifier = ">=3.0.0" },
{ name = "platformdirs", specifier = ">=4.0.0" },