A community based topic aggregation platform built on atproto

Compare changes

Choose any two refs to compare.

Changed files
+9718 -4632
.beads
aggregators
cmd
server
docs
internal
api
atproto
core
db
validation
scripts
static
tests
+20 -3
Makefile
···
@go build -o server ./cmd/server
@echo "$(GREEN)โœ“ Build complete: ./server$(RESET)"
-
run: ## Run the Coves server (requires database running)
-
@echo "$(GREEN)Starting Coves server...$(RESET)"
-
@go run ./cmd/server
+
run: ## Run the Coves server with dev environment (requires database running)
+
@./scripts/dev-run.sh
##@ Cleanup
···
@make dev-down
@make dev-up
+
##@ Mobile Testing
+
+
mobile-setup: ## Setup Android port forwarding for USB-connected devices (recommended)
+
@echo "$(CYAN)Setting up Android mobile testing environment...$(RESET)"
+
@./scripts/setup-mobile-ports.sh
+
+
mobile-reset: ## Remove all Android port forwarding
+
@echo "$(YELLOW)Removing Android port forwarding...$(RESET)"
+
@adb reverse --remove-all || echo "$(YELLOW)No device connected$(RESET)"
+
@echo "$(GREEN)โœ“ Port forwarding removed$(RESET)"
+
+
ngrok-up: ## Start ngrok tunnels (for iOS or WiFi testing - requires paid plan for 3 tunnels)
+
@echo "$(GREEN)Starting ngrok tunnels for mobile testing...$(RESET)"
+
@./scripts/start-ngrok.sh
+
+
ngrok-down: ## Stop all ngrok tunnels
+
@./scripts/stop-ngrok.sh
+
##@ Utilities
validate-lexicon: ## Validate all Lexicon schemas
+14
scripts/dev-run.sh
···
+
#!/bin/bash
+
# Development server runner - loads .env.dev before starting
+
+
set -a # automatically export all variables
+
source .env.dev
+
set +a
+
+
echo "๐Ÿš€ Starting Coves server in DEV mode..."
+
echo " IS_DEV_ENV: $IS_DEV_ENV"
+
echo " PLC_DIRECTORY_URL: $PLC_DIRECTORY_URL"
+
echo " JETSTREAM_URL: $JETSTREAM_URL"
+
echo ""
+
+
go run ./cmd/server
+68
scripts/setup-mobile-ports.sh
···
+
#!/bin/bash
+
# Setup adb reverse port forwarding for mobile testing
+
# This allows the mobile app to access localhost services on the dev machine
+
+
set -e
+
+
# Colors
+
GREEN='\033[0;32m'
+
CYAN='\033[0;36m'
+
YELLOW='\033[1;33m'
+
RED='\033[0;31m'
+
NC='\033[0m' # No Color
+
+
echo -e "${CYAN}๐Ÿ“ฑ Setting up Android port forwarding for Coves mobile testing...${NC}"
+
echo ""
+
+
# Check if adb is available
+
if ! command -v adb &> /dev/null; then
+
echo -e "${RED}โœ— adb not found${NC}"
+
echo "Install Android SDK Platform Tools: https://developer.android.com/studio/releases/platform-tools"
+
exit 1
+
fi
+
+
# Check if device is connected
+
DEVICES=$(adb devices | grep -v "List" | grep "device$" | wc -l)
+
if [ "$DEVICES" -eq 0 ]; then
+
echo -e "${RED}โœ— No Android devices connected${NC}"
+
echo "Connect a device via USB or start an emulator"
+
exit 1
+
fi
+
+
echo -e "${YELLOW}Setting up port forwarding...${NC}"
+
+
# Forward ports from Android device to localhost
+
adb reverse tcp:3000 tcp:3001 # PDS (internal port in DID document)
+
adb reverse tcp:3001 tcp:3001 # PDS (external port)
+
adb reverse tcp:3002 tcp:3002 # PLC Directory
+
adb reverse tcp:8081 tcp:8081 # AppView
+
+
echo ""
+
echo -e "${GREEN}โœ… Port forwarding configured successfully!${NC}"
+
echo ""
+
echo -e "${CYAN}โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•${NC}"
+
echo -e "${CYAN} PORT FORWARDING ${NC}"
+
echo -e "${CYAN}โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•${NC}"
+
echo ""
+
echo -e "${GREEN}PDS (3000):${NC} localhost:3001 โ†’ device:3000 ${YELLOW}(DID document port)${NC}"
+
echo -e "${GREEN}PDS (3001):${NC} localhost:3001 โ†’ device:3001"
+
echo -e "${GREEN}PLC (3002):${NC} localhost:3002 โ†’ device:3002"
+
echo -e "${GREEN}AppView (8081):${NC} localhost:8081 โ†’ device:8081"
+
echo ""
+
echo -e "${CYAN}โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•${NC}"
+
echo ""
+
echo -e "${CYAN}๐Ÿ“ฑ Next Steps:${NC}"
+
echo ""
+
echo -e "1. Mobile app is already configured for localhost (environment_config.dart)"
+
echo ""
+
echo -e "2. Run mobile app:"
+
echo -e " ${YELLOW}cd /home/bretton/Code/coves-mobile${NC}"
+
echo -e " ${YELLOW}flutter run --dart-define=ENVIRONMENT=local${NC}"
+
echo ""
+
echo -e "3. Login with:"
+
echo -e " Handle: ${CYAN}charlie.local.coves.dev${NC}"
+
echo -e " Password: ${CYAN}charliepass123${NC}"
+
echo ""
+
echo -e "${YELLOW}๐Ÿ’ก Note: Port forwarding persists until device disconnects or you run:${NC}"
+
echo -e "${YELLOW} adb reverse --remove-all${NC}"
+
echo ""
+116
scripts/start-ngrok.sh
···
+
#!/bin/bash
+
# Automated ngrok tunnel starter for mobile testing
+
# Starts 3 ngrok tunnels and captures their HTTPS URLs
+
+
set -e
+
+
# Colors
+
GREEN='\033[0;32m'
+
CYAN='\033[0;36m'
+
YELLOW='\033[1;33m'
+
NC='\033[0m' # No Color
+
+
echo -e "${CYAN}๐Ÿš€ Starting ngrok tunnels for Coves mobile testing...${NC}"
+
echo ""
+
+
# Kill any existing ngrok processes
+
pkill -f "ngrok http" || true
+
sleep 2
+
+
# Start ngrok tunnels using separate processes (simpler, works with any config version)
+
echo -e "${YELLOW}Starting PDS tunnel (port 3001)...${NC}"
+
ngrok http 3001 --log=stdout > /tmp/ngrok-pds.log 2>&1 &
+
sleep 1
+
+
echo -e "${YELLOW}Starting PLC tunnel (port 3002)...${NC}"
+
ngrok http 3002 --log=stdout > /tmp/ngrok-plc.log 2>&1 &
+
sleep 1
+
+
echo -e "${YELLOW}Starting AppView tunnel (port 8081)...${NC}"
+
ngrok http 8081 --log=stdout > /tmp/ngrok-appview.log 2>&1 &
+
+
# Get all PIDs
+
PIDS=$(pgrep -f "ngrok http")
+
NGROK_PID=$PIDS
+
+
# Save PID for cleanup
+
echo "$NGROK_PID" > /tmp/ngrok-pids.txt
+
+
# Wait for ngrok to initialize
+
echo ""
+
echo -e "${YELLOW}Waiting for tunnels to initialize...${NC}"
+
sleep 7
+
+
# Fetch URLs from ngrok API (single API at port 4040)
+
echo ""
+
echo -e "${GREEN}โœ… Tunnels started successfully!${NC}"
+
echo ""
+
echo -e "${CYAN}โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•${NC}"
+
echo -e "${CYAN} NGROK TUNNEL URLS ${NC}"
+
echo -e "${CYAN}โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•${NC}"
+
echo ""
+
+
# Get all tunnel info
+
TUNNELS=$(curl -s http://localhost:4040/api/tunnels 2>/dev/null || echo "")
+
+
# Extract URLs by matching port in config.addr
+
PDS_URL=$(echo "$TUNNELS" | jq -r '.tunnels[] | select(.config.addr | contains("3001")) | select(.proto=="https") | .public_url' 2>/dev/null | head -1)
+
PLC_URL=$(echo "$TUNNELS" | jq -r '.tunnels[] | select(.config.addr | contains("3002")) | select(.proto=="https") | .public_url' 2>/dev/null | head -1)
+
APPVIEW_URL=$(echo "$TUNNELS" | jq -r '.tunnels[] | select(.config.addr | contains("8081")) | select(.proto=="https") | .public_url' 2>/dev/null | head -1)
+
+
# Fallback if jq filtering fails - just get first 3 HTTPS URLs
+
if [ -z "$PDS_URL" ] || [ -z "$PLC_URL" ] || [ -z "$APPVIEW_URL" ]; then
+
echo -e "${YELLOW}โš ๏ธ Port-based matching failed, using fallback...${NC}"
+
URLS=($(echo "$TUNNELS" | jq -r '.tunnels[] | select(.proto=="https") | .public_url' 2>/dev/null))
+
PDS_URL=${URLS[0]:-ERROR}
+
PLC_URL=${URLS[1]:-ERROR}
+
APPVIEW_URL=${URLS[2]:-ERROR}
+
fi
+
+
echo -e "${GREEN}PDS (3001):${NC} $PDS_URL"
+
echo -e "${GREEN}PLC (3002):${NC} $PLC_URL"
+
echo -e "${GREEN}AppView (8081):${NC} $APPVIEW_URL"
+
+
echo ""
+
echo -e "${CYAN}โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•${NC}"
+
echo ""
+
+
# Check if any URLs failed
+
if [[ "$PDS_URL" == "ERROR" ]] || [[ "$PLC_URL" == "ERROR" ]] || [[ "$APPVIEW_URL" == "ERROR" ]]; then
+
echo -e "${YELLOW}โš ๏ธ Some tunnels failed to start. Check logs:${NC}"
+
echo " tail -f /tmp/ngrok-pds.log"
+
echo " tail -f /tmp/ngrok-plc.log"
+
echo " tail -f /tmp/ngrok-appview.log"
+
exit 1
+
fi
+
+
# Extract clean URLs (remove https://)
+
PDS_CLEAN=$(echo $PDS_URL | sed 's|https://||')
+
PLC_CLEAN=$(echo $PLC_URL | sed 's|https://||')
+
APPVIEW_CLEAN=$(echo $APPVIEW_URL | sed 's|https://||')
+
+
echo -e "${CYAN}๐Ÿ“ฑ Next Steps:${NC}"
+
echo ""
+
echo -e "1. Update ${YELLOW}coves-mobile/lib/config/environment_config.dart${NC}:"
+
echo ""
+
echo -e "${GREEN}static const local = EnvironmentConfig(${NC}"
+
echo -e "${GREEN} environment: Environment.local,${NC}"
+
echo -e "${GREEN} apiUrl: '$APPVIEW_URL',${NC}"
+
echo -e "${GREEN} handleResolverUrl: '$PDS_URL/xrpc/com.atproto.identity.resolveHandle',${NC}"
+
echo -e "${GREEN} plcDirectoryUrl: '$PLC_URL',${NC}"
+
echo -e "${GREEN});${NC}"
+
echo ""
+
echo -e "2. Run mobile app:"
+
echo -e " ${YELLOW}cd /home/bretton/Code/coves-mobile${NC}"
+
echo -e " ${YELLOW}flutter run --dart-define=ENVIRONMENT=local${NC}"
+
echo ""
+
echo -e "3. Login with:"
+
echo -e " Handle: ${CYAN}bob.local.coves.dev${NC}"
+
echo -e " Password: ${CYAN}bobpass123${NC}"
+
echo ""
+
echo -e "${YELLOW}๐Ÿ’ก Tip: Leave this terminal open. Press Ctrl+C to stop tunnels.${NC}"
+
echo -e "${YELLOW} Or run: make ngrok-down${NC}"
+
echo ""
+
+
# Keep script running (can be killed with Ctrl+C or make ngrok-down)
+
wait
+26
scripts/stop-ngrok.sh
···
+
#!/bin/bash
+
# Stop all ngrok tunnels
+
+
# Colors
+
GREEN='\033[0;32m'
+
YELLOW='\033[1;33m'
+
NC='\033[0m'
+
+
echo -e "${YELLOW}Stopping ngrok tunnels...${NC}"
+
+
# Kill processes by PID if available
+
if [ -f /tmp/ngrok-pids.txt ]; then
+
PIDS=$(cat /tmp/ngrok-pids.txt)
+
for pid in $PIDS; do
+
kill $pid 2>/dev/null || true
+
done
+
rm /tmp/ngrok-pids.txt
+
fi
+
+
# Fallback: kill all ngrok processes
+
pkill -f "ngrok http" || true
+
+
# Clean up logs
+
rm -f /tmp/ngrok-*.log
+
+
echo -e "${GREEN}โœ“ ngrok tunnels stopped${NC}"
-399
internal/core/votes/service.go
···
-
package votes
-
-
import (
-
"Coves/internal/core/posts"
-
"bytes"
-
"context"
-
"encoding/json"
-
"fmt"
-
"io"
-
"log"
-
"net/http"
-
"strings"
-
"time"
-
)
-
-
type voteService struct {
-
repo Repository
-
postRepo posts.Repository
-
pdsURL string
-
}
-
-
// NewVoteService creates a new vote service
-
func NewVoteService(
-
repo Repository,
-
postRepo posts.Repository,
-
pdsURL string,
-
) Service {
-
return &voteService{
-
repo: repo,
-
postRepo: postRepo,
-
pdsURL: pdsURL,
-
}
-
}
-
-
// CreateVote creates a new vote or toggles an existing vote
-
// Toggle logic:
-
// - No vote -> Create vote
-
// - Same direction -> Delete vote (toggle off)
-
// - Different direction -> Delete old + Create new (toggle direction)
-
func (s *voteService) CreateVote(ctx context.Context, voterDID string, userAccessToken string, req CreateVoteRequest) (*CreateVoteResponse, error) {
-
// 1. Validate input
-
if voterDID == "" {
-
return nil, NewValidationError("voterDid", "required")
-
}
-
if userAccessToken == "" {
-
return nil, NewValidationError("userAccessToken", "required")
-
}
-
if req.Subject == "" {
-
return nil, NewValidationError("subject", "required")
-
}
-
if req.Direction != "up" && req.Direction != "down" {
-
return nil, ErrInvalidDirection
-
}
-
-
// 2. Validate subject URI format (should be at://...)
-
if !strings.HasPrefix(req.Subject, "at://") {
-
return nil, ErrInvalidSubject
-
}
-
-
// 3. Get subject post/comment to verify it exists and get its CID (for strong reference)
-
// For now, we assume the subject is a post. In the future, we'll support comments too.
-
post, err := s.postRepo.GetByURI(ctx, req.Subject)
-
if err != nil {
-
if err == posts.ErrNotFound {
-
return nil, ErrSubjectNotFound
-
}
-
return nil, fmt.Errorf("failed to get subject post: %w", err)
-
}
-
-
// 4. Check for existing vote on PDS (source of truth for toggle logic)
-
// IMPORTANT: We query the user's PDS directly instead of AppView to avoid race conditions.
-
// AppView is eventually consistent (updated via Jetstream), so querying it can cause
-
// duplicate vote records if the user toggles before Jetstream catches up.
-
existingVoteRecord, err := s.findVoteOnPDS(ctx, voterDID, userAccessToken, req.Subject)
-
if err != nil {
-
return nil, fmt.Errorf("failed to check existing vote on PDS: %w", err)
-
}
-
-
// 5. Handle toggle logic
-
var existingVoteURI *string
-
-
if existingVoteRecord != nil {
-
// Vote exists on PDS - implement toggle logic
-
if existingVoteRecord.Direction == req.Direction {
-
// Same direction -> Delete vote (toggle off)
-
log.Printf("[VOTE-CREATE] Toggle off: deleting existing %s vote on %s", req.Direction, req.Subject)
-
-
// Delete from user's PDS
-
if err := s.deleteRecordOnPDSAs(ctx, voterDID, "social.coves.interaction.vote", existingVoteRecord.RKey, userAccessToken); err != nil {
-
return nil, fmt.Errorf("failed to delete vote on PDS: %w", err)
-
}
-
-
// Return empty response (vote was deleted, not created)
-
return &CreateVoteResponse{
-
URI: "",
-
CID: "",
-
}, nil
-
}
-
-
// Different direction -> Delete old vote first, then create new one below
-
log.Printf("[VOTE-CREATE] Toggle direction: %s -> %s on %s", existingVoteRecord.Direction, req.Direction, req.Subject)
-
-
if err := s.deleteRecordOnPDSAs(ctx, voterDID, "social.coves.interaction.vote", existingVoteRecord.RKey, userAccessToken); err != nil {
-
return nil, fmt.Errorf("failed to delete old vote on PDS: %w", err)
-
}
-
-
existingVoteURI = &existingVoteRecord.URI
-
}
-
-
// 6. Build vote record with strong reference
-
voteRecord := map[string]interface{}{
-
"$type": "social.coves.interaction.vote",
-
"subject": map[string]interface{}{
-
"uri": req.Subject,
-
"cid": post.CID,
-
},
-
"direction": req.Direction,
-
"createdAt": time.Now().Format(time.RFC3339),
-
}
-
-
// 7. Write to user's PDS repository
-
recordURI, recordCID, err := s.createRecordOnPDSAs(ctx, voterDID, "social.coves.interaction.vote", "", voteRecord, userAccessToken)
-
if err != nil {
-
return nil, fmt.Errorf("failed to create vote on PDS: %w", err)
-
}
-
-
log.Printf("[VOTE-CREATE] Created %s vote: %s (CID: %s)", req.Direction, recordURI, recordCID)
-
-
// 8. Return response
-
return &CreateVoteResponse{
-
URI: recordURI,
-
CID: recordCID,
-
Existing: existingVoteURI,
-
}, nil
-
}
-
-
// DeleteVote removes a vote from a post/comment
-
func (s *voteService) DeleteVote(ctx context.Context, voterDID string, userAccessToken string, req DeleteVoteRequest) error {
-
// 1. Validate input
-
if voterDID == "" {
-
return NewValidationError("voterDid", "required")
-
}
-
if userAccessToken == "" {
-
return NewValidationError("userAccessToken", "required")
-
}
-
if req.Subject == "" {
-
return NewValidationError("subject", "required")
-
}
-
-
// 2. Find existing vote on PDS (source of truth)
-
// IMPORTANT: Query PDS directly to avoid race conditions with AppView indexing
-
existingVoteRecord, err := s.findVoteOnPDS(ctx, voterDID, userAccessToken, req.Subject)
-
if err != nil {
-
return fmt.Errorf("failed to check existing vote on PDS: %w", err)
-
}
-
-
if existingVoteRecord == nil {
-
return ErrVoteNotFound
-
}
-
-
// 3. Delete from user's PDS
-
if err := s.deleteRecordOnPDSAs(ctx, voterDID, "social.coves.interaction.vote", existingVoteRecord.RKey, userAccessToken); err != nil {
-
return fmt.Errorf("failed to delete vote on PDS: %w", err)
-
}
-
-
log.Printf("[VOTE-DELETE] Deleted vote: %s", existingVoteRecord.URI)
-
-
return nil
-
}
-
-
// GetVote retrieves a user's vote on a specific subject
-
func (s *voteService) GetVote(ctx context.Context, voterDID string, subjectURI string) (*Vote, error) {
-
return s.repo.GetByVoterAndSubject(ctx, voterDID, subjectURI)
-
}
-
-
// Helper methods for PDS operations
-
-
// createRecordOnPDSAs creates a record on the PDS using the user's access token
-
func (s *voteService) createRecordOnPDSAs(ctx context.Context, repoDID, collection, rkey string, record map[string]interface{}, accessToken string) (string, string, error) {
-
endpoint := fmt.Sprintf("%s/xrpc/com.atproto.repo.createRecord", strings.TrimSuffix(s.pdsURL, "/"))
-
-
payload := map[string]interface{}{
-
"repo": repoDID,
-
"collection": collection,
-
"record": record,
-
}
-
-
if rkey != "" {
-
payload["rkey"] = rkey
-
}
-
-
return s.callPDSWithAuth(ctx, "POST", endpoint, payload, accessToken)
-
}
-
-
// deleteRecordOnPDSAs deletes a record from the PDS using the user's access token
-
func (s *voteService) deleteRecordOnPDSAs(ctx context.Context, repoDID, collection, rkey, accessToken string) error {
-
endpoint := fmt.Sprintf("%s/xrpc/com.atproto.repo.deleteRecord", strings.TrimSuffix(s.pdsURL, "/"))
-
-
payload := map[string]interface{}{
-
"repo": repoDID,
-
"collection": collection,
-
"rkey": rkey,
-
}
-
-
_, _, err := s.callPDSWithAuth(ctx, "POST", endpoint, payload, accessToken)
-
return err
-
}
-
-
// callPDSWithAuth makes a PDS call with a specific access token
-
func (s *voteService) callPDSWithAuth(ctx context.Context, method, endpoint string, payload map[string]interface{}, accessToken string) (string, string, error) {
-
jsonData, err := json.Marshal(payload)
-
if err != nil {
-
return "", "", fmt.Errorf("failed to marshal payload: %w", err)
-
}
-
-
req, err := http.NewRequestWithContext(ctx, method, endpoint, bytes.NewBuffer(jsonData))
-
if err != nil {
-
return "", "", fmt.Errorf("failed to create request: %w", err)
-
}
-
req.Header.Set("Content-Type", "application/json")
-
-
// Add authentication with provided access token
-
if accessToken != "" {
-
req.Header.Set("Authorization", "Bearer "+accessToken)
-
}
-
-
// Use 30 second timeout for write operations
-
timeout := 30 * time.Second
-
client := &http.Client{Timeout: timeout}
-
resp, err := client.Do(req)
-
if err != nil {
-
return "", "", fmt.Errorf("failed to call PDS: %w", err)
-
}
-
defer func() {
-
if closeErr := resp.Body.Close(); closeErr != nil {
-
log.Printf("Failed to close response body: %v", closeErr)
-
}
-
}()
-
-
body, err := io.ReadAll(resp.Body)
-
if err != nil {
-
return "", "", fmt.Errorf("failed to read response: %w", err)
-
}
-
-
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
-
return "", "", fmt.Errorf("PDS returned error %d: %s", resp.StatusCode, string(body))
-
}
-
-
// Parse response to extract URI and CID
-
var result struct {
-
URI string `json:"uri"`
-
CID string `json:"cid"`
-
}
-
if err := json.Unmarshal(body, &result); err != nil {
-
return "", "", fmt.Errorf("failed to parse PDS response: %w", err)
-
}
-
-
return result.URI, result.CID, nil
-
}
-
-
// Helper functions
-
-
// PDSVoteRecord represents a vote record returned from PDS listRecords
-
type PDSVoteRecord struct {
-
URI string
-
RKey string
-
Direction string
-
Subject struct {
-
URI string
-
CID string
-
}
-
}
-
-
// findVoteOnPDS queries the user's PDS to find an existing vote on a specific subject
-
// This is the source of truth for toggle logic (avoiding AppView race conditions)
-
//
-
// IMPORTANT: This function paginates through ALL user votes with reverse=true (newest first)
-
// to handle users with >100 votes. Without pagination, votes on older posts would not be found,
-
// causing duplicate vote records and 404 errors on delete operations.
-
func (s *voteService) findVoteOnPDS(ctx context.Context, voterDID, accessToken, subjectURI string) (*PDSVoteRecord, error) {
-
const maxPages = 50 // Safety limit: prevent infinite loops (50 pages * 100 = 5000 votes max)
-
var cursor string
-
pageCount := 0
-
-
client := &http.Client{Timeout: 10 * time.Second}
-
-
for {
-
pageCount++
-
if pageCount > maxPages {
-
log.Printf("[VOTE-PDS] Reached max pagination limit (%d pages) searching for vote on %s", maxPages, subjectURI)
-
break
-
}
-
-
// Build endpoint with pagination cursor and reverse=true (newest first)
-
endpoint := fmt.Sprintf("%s/xrpc/com.atproto.repo.listRecords?repo=%s&collection=social.coves.interaction.vote&limit=100&reverse=true",
-
strings.TrimSuffix(s.pdsURL, "/"), voterDID)
-
-
if cursor != "" {
-
endpoint += fmt.Sprintf("&cursor=%s", cursor)
-
}
-
-
req, err := http.NewRequestWithContext(ctx, "GET", endpoint, nil)
-
if err != nil {
-
return nil, fmt.Errorf("failed to create request: %w", err)
-
}
-
-
req.Header.Set("Authorization", "Bearer "+accessToken)
-
-
resp, err := client.Do(req)
-
if err != nil {
-
return nil, fmt.Errorf("failed to query PDS: %w", err)
-
}
-
-
if resp.StatusCode != http.StatusOK {
-
body, _ := io.ReadAll(resp.Body)
-
resp.Body.Close()
-
return nil, fmt.Errorf("PDS returned error %d: %s", resp.StatusCode, string(body))
-
}
-
-
var result struct {
-
Records []struct {
-
URI string `json:"uri"`
-
Value struct {
-
Subject struct {
-
URI string `json:"uri"`
-
CID string `json:"cid"`
-
} `json:"subject"`
-
Direction string `json:"direction"`
-
} `json:"value"`
-
} `json:"records"`
-
Cursor string `json:"cursor,omitempty"` // Pagination cursor for next page
-
}
-
-
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
-
resp.Body.Close()
-
return nil, fmt.Errorf("failed to decode PDS response: %w", err)
-
}
-
resp.Body.Close()
-
-
// Find vote on this specific subject in current page
-
for _, record := range result.Records {
-
if record.Value.Subject.URI == subjectURI {
-
rkey := extractRKeyFromURI(record.URI)
-
log.Printf("[VOTE-PDS] Found existing vote on page %d: %s (direction: %s)", pageCount, record.URI, record.Value.Direction)
-
return &PDSVoteRecord{
-
URI: record.URI,
-
RKey: rkey,
-
Direction: record.Value.Direction,
-
Subject: struct {
-
URI string
-
CID string
-
}{
-
URI: record.Value.Subject.URI,
-
CID: record.Value.Subject.CID,
-
},
-
}, nil
-
}
-
}
-
-
// No more pages to check
-
if result.Cursor == "" {
-
log.Printf("[VOTE-PDS] No existing vote found after checking %d page(s)", pageCount)
-
break
-
}
-
-
// Move to next page
-
cursor = result.Cursor
-
}
-
-
// No vote found on this subject after paginating through all records
-
return nil, nil
-
}
-
-
// extractRKeyFromURI extracts the rkey from an AT-URI (at://did/collection/rkey)
-
func extractRKeyFromURI(uri string) string {
-
parts := strings.Split(uri, "/")
-
if len(parts) >= 4 {
-
return parts[len(parts)-1]
-
}
-
return ""
-
}
-
-
// ValidationError represents a validation error
-
type ValidationError struct {
-
Field string
-
Message string
-
}
-
-
func (e *ValidationError) Error() string {
-
return fmt.Sprintf("validation error for field '%s': %s", e.Field, e.Message)
-
}
-
-
// NewValidationError creates a new validation error
-
func NewValidationError(field, message string) error {
-
return &ValidationError{
-
Field: field,
-
Message: message,
-
}
-
}
-344
internal/core/votes/service_test.go
···
-
package votes
-
-
import (
-
"Coves/internal/core/posts"
-
"context"
-
"testing"
-
"time"
-
-
"github.com/stretchr/testify/assert"
-
"github.com/stretchr/testify/mock"
-
"github.com/stretchr/testify/require"
-
)
-
-
// Mock repositories for testing
-
type mockVoteRepository struct {
-
mock.Mock
-
}
-
-
func (m *mockVoteRepository) Create(ctx context.Context, vote *Vote) error {
-
args := m.Called(ctx, vote)
-
return args.Error(0)
-
}
-
-
func (m *mockVoteRepository) GetByURI(ctx context.Context, uri string) (*Vote, error) {
-
args := m.Called(ctx, uri)
-
if args.Get(0) == nil {
-
return nil, args.Error(1)
-
}
-
return args.Get(0).(*Vote), args.Error(1)
-
}
-
-
func (m *mockVoteRepository) GetByVoterAndSubject(ctx context.Context, voterDID string, subjectURI string) (*Vote, error) {
-
args := m.Called(ctx, voterDID, subjectURI)
-
if args.Get(0) == nil {
-
return nil, args.Error(1)
-
}
-
return args.Get(0).(*Vote), args.Error(1)
-
}
-
-
func (m *mockVoteRepository) Delete(ctx context.Context, uri string) error {
-
args := m.Called(ctx, uri)
-
return args.Error(0)
-
}
-
-
func (m *mockVoteRepository) ListBySubject(ctx context.Context, subjectURI string, limit, offset int) ([]*Vote, error) {
-
args := m.Called(ctx, subjectURI, limit, offset)
-
if args.Get(0) == nil {
-
return nil, args.Error(1)
-
}
-
return args.Get(0).([]*Vote), args.Error(1)
-
}
-
-
func (m *mockVoteRepository) ListByVoter(ctx context.Context, voterDID string, limit, offset int) ([]*Vote, error) {
-
args := m.Called(ctx, voterDID, limit, offset)
-
if args.Get(0) == nil {
-
return nil, args.Error(1)
-
}
-
return args.Get(0).([]*Vote), args.Error(1)
-
}
-
-
type mockPostRepository struct {
-
mock.Mock
-
}
-
-
func (m *mockPostRepository) GetByURI(ctx context.Context, uri string) (*posts.Post, error) {
-
args := m.Called(ctx, uri)
-
if args.Get(0) == nil {
-
return nil, args.Error(1)
-
}
-
return args.Get(0).(*posts.Post), args.Error(1)
-
}
-
-
func (m *mockPostRepository) Create(ctx context.Context, post *posts.Post) error {
-
args := m.Called(ctx, post)
-
return args.Error(0)
-
}
-
-
func (m *mockPostRepository) GetByRkey(ctx context.Context, communityDID, rkey string) (*posts.Post, error) {
-
args := m.Called(ctx, communityDID, rkey)
-
if args.Get(0) == nil {
-
return nil, args.Error(1)
-
}
-
return args.Get(0).(*posts.Post), args.Error(1)
-
}
-
-
func (m *mockPostRepository) ListByCommunity(ctx context.Context, communityDID string, limit, offset int) ([]*posts.Post, error) {
-
args := m.Called(ctx, communityDID, limit, offset)
-
if args.Get(0) == nil {
-
return nil, args.Error(1)
-
}
-
return args.Get(0).([]*posts.Post), args.Error(1)
-
}
-
-
func (m *mockPostRepository) Delete(ctx context.Context, uri string) error {
-
args := m.Called(ctx, uri)
-
return args.Error(0)
-
}
-
-
// TestVoteService_CreateVote_NoExistingVote tests creating a vote when no vote exists
-
// NOTE: This test is skipped because we need to refactor service to inject HTTP client
-
// for testing PDS writes. The full flow is covered by E2E tests.
-
func TestVoteService_CreateVote_NoExistingVote(t *testing.T) {
-
t.Skip("Skipping because we need to refactor service to inject HTTP client for testing PDS writes - covered by E2E tests")
-
-
// This test would verify:
-
// - Post exists check
-
// - No existing vote
-
// - PDS write succeeds
-
// - Response contains vote URI and CID
-
}
-
-
// TestVoteService_ValidateInput tests input validation
-
func TestVoteService_ValidateInput(t *testing.T) {
-
mockVoteRepo := new(mockVoteRepository)
-
mockPostRepo := new(mockPostRepository)
-
-
service := &voteService{
-
repo: mockVoteRepo,
-
postRepo: mockPostRepo,
-
pdsURL: "http://mock-pds.test",
-
}
-
-
ctx := context.Background()
-
-
tests := []struct {
-
name string
-
voterDID string
-
accessToken string
-
req CreateVoteRequest
-
expectedError string
-
}{
-
{
-
name: "missing voter DID",
-
voterDID: "",
-
accessToken: "token123",
-
req: CreateVoteRequest{Subject: "at://test", Direction: "up"},
-
expectedError: "voterDid",
-
},
-
{
-
name: "missing access token",
-
voterDID: "did:plc:test",
-
accessToken: "",
-
req: CreateVoteRequest{Subject: "at://test", Direction: "up"},
-
expectedError: "userAccessToken",
-
},
-
{
-
name: "missing subject",
-
voterDID: "did:plc:test",
-
accessToken: "token123",
-
req: CreateVoteRequest{Subject: "", Direction: "up"},
-
expectedError: "subject",
-
},
-
{
-
name: "invalid direction",
-
voterDID: "did:plc:test",
-
accessToken: "token123",
-
req: CreateVoteRequest{Subject: "at://test", Direction: "invalid"},
-
expectedError: "invalid vote direction",
-
},
-
{
-
name: "invalid subject format",
-
voterDID: "did:plc:test",
-
accessToken: "token123",
-
req: CreateVoteRequest{Subject: "http://not-at-uri", Direction: "up"},
-
expectedError: "invalid subject URI",
-
},
-
}
-
-
for _, tt := range tests {
-
t.Run(tt.name, func(t *testing.T) {
-
_, err := service.CreateVote(ctx, tt.voterDID, tt.accessToken, tt.req)
-
require.Error(t, err)
-
assert.Contains(t, err.Error(), tt.expectedError)
-
})
-
}
-
}
-
-
// TestVoteService_GetVote tests retrieving a vote
-
func TestVoteService_GetVote(t *testing.T) {
-
mockVoteRepo := new(mockVoteRepository)
-
mockPostRepo := new(mockPostRepository)
-
-
service := &voteService{
-
repo: mockVoteRepo,
-
postRepo: mockPostRepo,
-
pdsURL: "http://mock-pds.test",
-
}
-
-
ctx := context.Background()
-
voterDID := "did:plc:voter123"
-
subjectURI := "at://did:plc:community/social.coves.post.record/abc123"
-
-
expectedVote := &Vote{
-
ID: 1,
-
URI: "at://did:plc:voter123/social.coves.interaction.vote/xyz789",
-
VoterDID: voterDID,
-
SubjectURI: subjectURI,
-
Direction: "up",
-
CreatedAt: time.Now(),
-
}
-
-
mockVoteRepo.On("GetByVoterAndSubject", ctx, voterDID, subjectURI).Return(expectedVote, nil)
-
-
result, err := service.GetVote(ctx, voterDID, subjectURI)
-
assert.NoError(t, err)
-
assert.Equal(t, expectedVote.URI, result.URI)
-
assert.Equal(t, expectedVote.Direction, result.Direction)
-
-
mockVoteRepo.AssertExpectations(t)
-
}
-
-
// TestVoteService_GetVote_NotFound tests getting a non-existent vote
-
func TestVoteService_GetVote_NotFound(t *testing.T) {
-
mockVoteRepo := new(mockVoteRepository)
-
mockPostRepo := new(mockPostRepository)
-
-
service := &voteService{
-
repo: mockVoteRepo,
-
postRepo: mockPostRepo,
-
pdsURL: "http://mock-pds.test",
-
}
-
-
ctx := context.Background()
-
voterDID := "did:plc:voter123"
-
subjectURI := "at://did:plc:community/social.coves.post.record/noexist"
-
-
mockVoteRepo.On("GetByVoterAndSubject", ctx, voterDID, subjectURI).Return(nil, ErrVoteNotFound)
-
-
result, err := service.GetVote(ctx, voterDID, subjectURI)
-
assert.ErrorIs(t, err, ErrVoteNotFound)
-
assert.Nil(t, result)
-
-
mockVoteRepo.AssertExpectations(t)
-
}
-
-
// TestVoteService_SubjectNotFound tests voting on non-existent post
-
func TestVoteService_SubjectNotFound(t *testing.T) {
-
mockVoteRepo := new(mockVoteRepository)
-
mockPostRepo := new(mockPostRepository)
-
-
service := &voteService{
-
repo: mockVoteRepo,
-
postRepo: mockPostRepo,
-
pdsURL: "http://mock-pds.test",
-
}
-
-
ctx := context.Background()
-
voterDID := "did:plc:voter123"
-
subjectURI := "at://did:plc:community/social.coves.post.record/noexist"
-
-
// Mock post not found
-
mockPostRepo.On("GetByURI", ctx, subjectURI).Return(nil, posts.ErrNotFound)
-
-
req := CreateVoteRequest{
-
Subject: subjectURI,
-
Direction: "up",
-
}
-
-
_, err := service.CreateVote(ctx, voterDID, "token123", req)
-
assert.ErrorIs(t, err, ErrSubjectNotFound)
-
-
mockPostRepo.AssertExpectations(t)
-
}
-
-
// NOTE: Testing toggle logic (same direction, different direction) requires mocking HTTP client
-
// These tests are covered by integration tests in tests/integration/vote_e2e_test.go
-
// To add unit tests for toggle logic, we would need to:
-
// 1. Refactor voteService to accept an HTTP client interface
-
// 2. Mock the PDS createRecord and deleteRecord calls
-
// 3. Verify the correct sequence of operations
-
-
// Example of what toggle tests would look like (requires refactoring):
-
/*
-
func TestVoteService_ToggleSameDirection(t *testing.T) {
-
// Setup
-
mockVoteRepo := new(mockVoteRepository)
-
mockPostRepo := new(mockPostRepository)
-
mockPDSClient := new(mockPDSClient)
-
-
service := &voteService{
-
repo: mockVoteRepo,
-
postRepo: mockPostRepo,
-
pdsClient: mockPDSClient, // Would need to refactor to inject this
-
}
-
-
ctx := context.Background()
-
voterDID := "did:plc:voter123"
-
subjectURI := "at://did:plc:community/social.coves.post.record/abc123"
-
-
// Mock existing upvote
-
existingVote := &Vote{
-
URI: "at://did:plc:voter123/social.coves.interaction.vote/existing",
-
VoterDID: voterDID,
-
SubjectURI: subjectURI,
-
Direction: "up",
-
}
-
mockVoteRepo.On("GetByVoterAndSubject", ctx, voterDID, subjectURI).Return(existingVote, nil)
-
-
// Mock post exists
-
mockPostRepo.On("GetByURI", ctx, subjectURI).Return(&posts.Post{
-
URI: subjectURI,
-
CID: "bafyreigpost123",
-
}, nil)
-
-
// Mock PDS delete
-
mockPDSClient.On("DeleteRecord", voterDID, "social.coves.interaction.vote", "existing").Return(nil)
-
-
// Execute: Click upvote when already upvoted -> should delete
-
req := CreateVoteRequest{
-
Subject: subjectURI,
-
Direction: "up", // Same direction
-
}
-
-
response, err := service.CreateVote(ctx, voterDID, "token123", req)
-
-
// Assert
-
assert.NoError(t, err)
-
assert.Equal(t, "", response.URI, "Should return empty URI when toggled off")
-
mockPDSClient.AssertCalled(t, "DeleteRecord", voterDID, "social.coves.interaction.vote", "existing")
-
mockVoteRepo.AssertExpectations(t)
-
mockPostRepo.AssertExpectations(t)
-
}
-
-
func TestVoteService_ToggleDifferentDirection(t *testing.T) {
-
// Similar test but existing vote is "up" and new vote is "down"
-
// Should delete old vote and create new vote
-
// Would verify:
-
// 1. DeleteRecord called for old vote
-
// 2. CreateRecord called for new vote
-
// 3. Response contains new vote URI
-
}
-
*/
-
-
// Documentation test to explain toggle logic (verified by E2E tests)
-
func TestVoteService_ToggleLogicDocumentation(t *testing.T) {
-
t.Log("Toggle Logic (verified by E2E tests in tests/integration/vote_e2e_test.go):")
-
t.Log("1. No existing vote + upvote clicked โ†’ Create upvote")
-
t.Log("2. Upvote exists + upvote clicked โ†’ Delete upvote (toggle off)")
-
t.Log("3. Upvote exists + downvote clicked โ†’ Delete upvote + Create downvote (switch)")
-
t.Log("4. Downvote exists + downvote clicked โ†’ Delete downvote (toggle off)")
-
t.Log("5. Downvote exists + upvote clicked โ†’ Delete downvote + Create upvote (switch)")
-
t.Log("")
-
t.Log("To add unit tests for toggle logic, refactor service to accept HTTP client interface")
-
}
+5 -23
internal/core/votes/interfaces.go
···
import "context"
-
// Service defines the business logic interface for votes
-
// Coordinates between Repository, user PDS, and vote validation
-
type Service interface {
-
// CreateVote creates a new vote or toggles an existing vote
-
// Flow: Validate -> Check existing vote -> Handle toggle logic -> Write to user's PDS -> Return URI/CID
-
// AppView indexing happens asynchronously via Jetstream consumer
-
// Toggle logic:
-
// - No vote -> Create vote
-
// - Same direction -> Delete vote (toggle off)
-
// - Different direction -> Delete old + Create new (toggle direction)
-
CreateVote(ctx context.Context, voterDID string, userAccessToken string, req CreateVoteRequest) (*CreateVoteResponse, error)
-
-
// DeleteVote removes a vote from a post/comment
-
// Flow: Find vote -> Verify ownership -> Delete from user's PDS
-
// AppView decrements vote count asynchronously via Jetstream consumer
-
DeleteVote(ctx context.Context, voterDID string, userAccessToken string, req DeleteVoteRequest) error
-
-
// GetVote retrieves a user's vote on a specific subject
-
// Used to check vote state before creating/toggling
-
GetVote(ctx context.Context, voterDID string, subjectURI string) (*Vote, error)
-
}
-
// Repository defines the data access interface for votes
// Used by Jetstream consumer to index votes from firehose
+
//
+
// Architecture: Votes are written directly by clients to their PDS using
+
// com.atproto.repo.createRecord/deleteRecord. This AppView indexes votes
+
// from Jetstream for aggregation and querying.
type Repository interface {
// Create inserts a new vote into the AppView database
// Called by Jetstream consumer after vote is created on PDS
···
// GetByVoterAndSubject retrieves a user's vote on a specific subject
// Used to check existing vote state
-
GetByVoterAndSubject(ctx context.Context, voterDID string, subjectURI string) (*Vote, error)
+
GetByVoterAndSubject(ctx context.Context, voterDID, subjectURI string) (*Vote, error)
// Delete soft-deletes a vote (sets deleted_at)
// Called by Jetstream consumer after vote is deleted from PDS
+10 -31
internal/core/votes/vote.go
···
// Vote represents a vote in the AppView database
// Votes are indexed from the firehose after being written to user repositories
type Vote struct {
-
ID int64 `json:"id" db:"id"`
+
CreatedAt time.Time `json:"createdAt" db:"created_at"`
+
IndexedAt time.Time `json:"indexedAt" db:"indexed_at"`
+
DeletedAt *time.Time `json:"deletedAt,omitempty" db:"deleted_at"`
URI string `json:"uri" db:"uri"`
CID string `json:"cid" db:"cid"`
RKey string `json:"rkey" db:"rkey"`
VoterDID string `json:"voterDid" db:"voter_did"`
SubjectURI string `json:"subjectUri" db:"subject_uri"`
SubjectCID string `json:"subjectCid" db:"subject_cid"`
-
Direction string `json:"direction" db:"direction"` // "up" or "down"
-
CreatedAt time.Time `json:"createdAt" db:"created_at"`
-
IndexedAt time.Time `json:"indexedAt" db:"indexed_at"`
-
DeletedAt *time.Time `json:"deletedAt,omitempty" db:"deleted_at"`
-
}
-
-
// CreateVoteRequest represents input for creating a new vote
-
// Matches social.coves.interaction.createVote lexicon input schema
-
type CreateVoteRequest struct {
-
Subject string `json:"subject"` // AT-URI of post/comment
-
Direction string `json:"direction"` // "up" or "down"
-
}
-
-
// CreateVoteResponse represents the response from creating a vote
-
// Matches social.coves.interaction.createVote lexicon output schema
-
type CreateVoteResponse struct {
-
URI string `json:"uri"` // AT-URI of created vote record
-
CID string `json:"cid"` // CID of created vote record
-
Existing *string `json:"existing,omitempty"` // AT-URI of existing vote if updating
-
}
-
-
// DeleteVoteRequest represents input for deleting a vote
-
// Matches social.coves.interaction.deleteVote lexicon input schema
-
type DeleteVoteRequest struct {
-
Subject string `json:"subject"` // AT-URI of post/comment
+
Direction string `json:"direction" db:"direction"`
+
ID int64 `json:"id" db:"id"`
}
-
// VoteRecord represents the actual atProto record structure written to PDS
+
// VoteRecord represents the atProto record structure indexed from Jetstream
// This is the data structure that gets stored in the user's repository
type VoteRecord struct {
-
Type string `json:"$type"`
-
Subject StrongRef `json:"subject"`
-
Direction string `json:"direction"` // "up" or "down"
-
CreatedAt string `json:"createdAt"`
+
Type string `json:"$type"`
+
Subject StrongRef `json:"subject"`
+
Direction string `json:"direction"` // "up" or "down"
+
CreatedAt string `json:"createdAt"`
}
// StrongRef represents a strong reference to a record (URI + CID)
-129
internal/api/handlers/vote/create_vote.go
···
-
package vote
-
-
import (
-
"Coves/internal/api/handlers"
-
"Coves/internal/api/middleware"
-
"Coves/internal/core/votes"
-
"encoding/json"
-
"log"
-
"net/http"
-
)
-
-
// CreateVoteHandler handles vote creation
-
type CreateVoteHandler struct {
-
service votes.Service
-
}
-
-
// NewCreateVoteHandler creates a new create vote handler
-
func NewCreateVoteHandler(service votes.Service) *CreateVoteHandler {
-
return &CreateVoteHandler{
-
service: service,
-
}
-
}
-
-
// HandleCreateVote creates a vote or toggles an existing vote
-
// POST /xrpc/social.coves.interaction.createVote
-
//
-
// Request body: { "subject": "at://...", "direction": "up" | "down" }
-
func (h *CreateVoteHandler) HandleCreateVote(w http.ResponseWriter, r *http.Request) {
-
if r.Method != http.MethodPost {
-
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
-
return
-
}
-
-
// Parse request body
-
var req votes.CreateVoteRequest
-
-
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
-
handlers.WriteError(w, http.StatusBadRequest, "InvalidRequest", "Invalid request body")
-
return
-
}
-
-
if req.Subject == "" {
-
handlers.WriteError(w, http.StatusBadRequest, "InvalidRequest", "subject is required")
-
return
-
}
-
-
if req.Direction == "" {
-
handlers.WriteError(w, http.StatusBadRequest, "InvalidRequest", "direction is required")
-
return
-
}
-
-
if req.Direction != "up" && req.Direction != "down" {
-
handlers.WriteError(w, http.StatusBadRequest, "InvalidRequest", "direction must be 'up' or 'down'")
-
return
-
}
-
-
// Extract authenticated user DID and access token from request context (injected by auth middleware)
-
voterDID := middleware.GetUserDID(r)
-
if voterDID == "" {
-
handlers.WriteError(w, http.StatusUnauthorized, "AuthRequired", "Authentication required")
-
return
-
}
-
-
userAccessToken := middleware.GetUserAccessToken(r)
-
if userAccessToken == "" {
-
handlers.WriteError(w, http.StatusUnauthorized, "AuthRequired", "Missing access token")
-
return
-
}
-
-
// Create vote via service (write-forward to user's PDS)
-
response, err := h.service.CreateVote(r.Context(), voterDID, userAccessToken, req)
-
if err != nil {
-
handleServiceError(w, err)
-
return
-
}
-
-
// Handle toggle-off case (vote was deleted, not created)
-
if response.URI == "" {
-
// Vote was toggled off (deleted)
-
w.Header().Set("Content-Type", "application/json")
-
w.WriteHeader(http.StatusOK)
-
if err := json.NewEncoder(w).Encode(map[string]interface{}{
-
"deleted": true,
-
}); err != nil {
-
log.Printf("Failed to encode response: %v", err)
-
}
-
return
-
}
-
-
// Return success response
-
responseMap := map[string]interface{}{
-
"uri": response.URI,
-
"cid": response.CID,
-
}
-
-
if response.Existing != nil {
-
responseMap["existing"] = *response.Existing
-
}
-
-
w.Header().Set("Content-Type", "application/json")
-
w.WriteHeader(http.StatusOK)
-
if err := json.NewEncoder(w).Encode(responseMap); err != nil {
-
log.Printf("Failed to encode response: %v", err)
-
}
-
}
-
-
// handleServiceError converts service errors to HTTP responses
-
func handleServiceError(w http.ResponseWriter, err error) {
-
switch err {
-
case votes.ErrVoteNotFound:
-
handlers.WriteError(w, http.StatusNotFound, "VoteNotFound", "Vote not found")
-
case votes.ErrSubjectNotFound:
-
handlers.WriteError(w, http.StatusNotFound, "SubjectNotFound", "Post or comment not found")
-
case votes.ErrInvalidDirection:
-
handlers.WriteError(w, http.StatusBadRequest, "InvalidRequest", "Invalid vote direction")
-
case votes.ErrInvalidSubject:
-
handlers.WriteError(w, http.StatusBadRequest, "InvalidRequest", "Invalid subject URI")
-
case votes.ErrVoteAlreadyExists:
-
handlers.WriteError(w, http.StatusConflict, "VoteAlreadyExists", "Vote already exists")
-
case votes.ErrNotAuthorized:
-
handlers.WriteError(w, http.StatusForbidden, "NotAuthorized", "Not authorized")
-
case votes.ErrBanned:
-
handlers.WriteError(w, http.StatusForbidden, "Banned", "User is banned from this community")
-
default:
-
// Check for validation errors
-
log.Printf("Vote creation error: %v", err)
-
handlers.WriteError(w, http.StatusInternalServerError, "InternalError", "Failed to create vote")
-
}
-
}
-75
internal/api/handlers/vote/delete_vote.go
···
-
package vote
-
-
import (
-
"Coves/internal/api/handlers"
-
"Coves/internal/api/middleware"
-
"Coves/internal/core/votes"
-
"encoding/json"
-
"log"
-
"net/http"
-
)
-
-
// DeleteVoteHandler handles vote deletion
-
type DeleteVoteHandler struct {
-
service votes.Service
-
}
-
-
// NewDeleteVoteHandler creates a new delete vote handler
-
func NewDeleteVoteHandler(service votes.Service) *DeleteVoteHandler {
-
return &DeleteVoteHandler{
-
service: service,
-
}
-
}
-
-
// HandleDeleteVote removes a vote from a post/comment
-
// POST /xrpc/social.coves.interaction.deleteVote
-
//
-
// Request body: { "subject": "at://..." }
-
func (h *DeleteVoteHandler) HandleDeleteVote(w http.ResponseWriter, r *http.Request) {
-
if r.Method != http.MethodPost {
-
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
-
return
-
}
-
-
// Parse request body
-
var req votes.DeleteVoteRequest
-
-
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
-
handlers.WriteError(w, http.StatusBadRequest, "InvalidRequest", "Invalid request body")
-
return
-
}
-
-
if req.Subject == "" {
-
handlers.WriteError(w, http.StatusBadRequest, "InvalidRequest", "subject is required")
-
return
-
}
-
-
// Extract authenticated user DID and access token from request context (injected by auth middleware)
-
voterDID := middleware.GetUserDID(r)
-
if voterDID == "" {
-
handlers.WriteError(w, http.StatusUnauthorized, "AuthRequired", "Authentication required")
-
return
-
}
-
-
userAccessToken := middleware.GetUserAccessToken(r)
-
if userAccessToken == "" {
-
handlers.WriteError(w, http.StatusUnauthorized, "AuthRequired", "Missing access token")
-
return
-
}
-
-
// Delete vote via service (delete record on PDS)
-
err := h.service.DeleteVote(r.Context(), voterDID, userAccessToken, req)
-
if err != nil {
-
handleServiceError(w, err)
-
return
-
}
-
-
// Return success response
-
w.Header().Set("Content-Type", "application/json")
-
w.WriteHeader(http.StatusOK)
-
if err := json.NewEncoder(w).Encode(map[string]interface{}{
-
"success": true,
-
}); err != nil {
-
log.Printf("Failed to encode response: %v", err)
-
}
-
}
-24
internal/api/routes/vote.go
···
-
package routes
-
-
import (
-
"Coves/internal/api/handlers/vote"
-
"Coves/internal/api/middleware"
-
"Coves/internal/core/votes"
-
-
"github.com/go-chi/chi/v5"
-
)
-
-
// RegisterVoteRoutes registers vote-related XRPC endpoints on the router
-
// Implements social.coves.interaction.* lexicon endpoints for voting
-
func RegisterVoteRoutes(r chi.Router, service votes.Service, authMiddleware *middleware.AtProtoAuthMiddleware) {
-
// Initialize handlers
-
createVoteHandler := vote.NewCreateVoteHandler(service)
-
deleteVoteHandler := vote.NewDeleteVoteHandler(service)
-
-
// Procedure endpoints (POST) - require authentication
-
// social.coves.interaction.createVote - create or toggle a vote on a post/comment
-
r.With(authMiddleware.RequireAuth).Post("/xrpc/social.coves.interaction.createVote", createVoteHandler.HandleCreateVote)
-
-
// social.coves.interaction.deleteVote - delete a vote from a post/comment
-
r.With(authMiddleware.RequireAuth).Post("/xrpc/social.coves.interaction.deleteVote", deleteVoteHandler.HandleDeleteVote)
-
}
-789
tests/integration/vote_e2e_test.go
···
-
package integration
-
-
import (
-
"Coves/internal/api/handlers/vote"
-
"Coves/internal/api/middleware"
-
"Coves/internal/atproto/identity"
-
"Coves/internal/atproto/jetstream"
-
"Coves/internal/core/communities"
-
"Coves/internal/core/posts"
-
"Coves/internal/core/users"
-
"Coves/internal/core/votes"
-
"Coves/internal/db/postgres"
-
"bytes"
-
"context"
-
"database/sql"
-
"encoding/json"
-
"fmt"
-
"net"
-
"net/http"
-
"net/http/httptest"
-
"os"
-
"strings"
-
"testing"
-
"time"
-
-
"github.com/gorilla/websocket"
-
_ "github.com/lib/pq"
-
"github.com/pressly/goose/v3"
-
"github.com/stretchr/testify/assert"
-
"github.com/stretchr/testify/require"
-
)
-
-
// TestVote_E2E_WithJetstream tests the full vote flow with simulated Jetstream:
-
// XRPC endpoint โ†’ AppView Service โ†’ PDS write โ†’ (Simulated) Jetstream consumer โ†’ DB indexing
-
//
-
// This is a fast integration test that simulates what happens in production:
-
// 1. Client calls POST /xrpc/social.coves.interaction.createVote with auth token
-
// 2. Handler validates and calls VoteService.CreateVote()
-
// 3. Service writes vote to user's PDS repository
-
// 4. (Simulated) PDS broadcasts event to Jetstream
-
// 5. Jetstream consumer receives event and indexes vote in AppView DB
-
// 6. Vote is now queryable from AppView + post counts updated
-
//
-
// NOTE: This test simulates the Jetstream event (step 4-5) since we don't have
-
// a live PDS/Jetstream in test environment. For true live testing, use TestVote_E2E_LivePDS.
-
func TestVote_E2E_WithJetstream(t *testing.T) {
-
db := setupTestDB(t)
-
defer func() {
-
if err := db.Close(); err != nil {
-
t.Logf("Failed to close database: %v", err)
-
}
-
}()
-
-
// Cleanup old test data first
-
_, _ = db.Exec("DELETE FROM votes WHERE voter_did LIKE 'did:plc:votee2e%'")
-
_, _ = db.Exec("DELETE FROM posts WHERE community_did = 'did:plc:votecommunity123'")
-
_, _ = db.Exec("DELETE FROM communities WHERE did = 'did:plc:votecommunity123'")
-
_, _ = db.Exec("DELETE FROM users WHERE did LIKE 'did:plc:votee2e%'")
-
-
// Setup repositories
-
userRepo := postgres.NewUserRepository(db)
-
communityRepo := postgres.NewCommunityRepository(db)
-
postRepo := postgres.NewPostRepository(db)
-
voteRepo := postgres.NewVoteRepository(db)
-
-
// Setup user service for consumers
-
identityConfig := identity.DefaultConfig()
-
identityResolver := identity.NewResolver(db, identityConfig)
-
userService := users.NewUserService(userRepo, identityResolver, "http://localhost:3001")
-
-
// Create test users (voter and author)
-
voter := createTestUser(t, db, "voter.test", "did:plc:votee2evoter123")
-
author := createTestUser(t, db, "author.test", "did:plc:votee2eauthor123")
-
-
// Create test community
-
community := &communities.Community{
-
DID: "did:plc:votecommunity123",
-
Handle: "votecommunity.test.coves.social",
-
Name: "votecommunity",
-
DisplayName: "Vote Test Community",
-
OwnerDID: "did:plc:votecommunity123",
-
CreatedByDID: author.DID,
-
HostedByDID: "did:web:coves.test",
-
Visibility: "public",
-
ModerationType: "moderator",
-
RecordURI: "at://did:plc:votecommunity123/social.coves.community.profile/self",
-
RecordCID: "fakecid123",
-
PDSAccessToken: "fake_token_for_testing",
-
PDSRefreshToken: "fake_refresh_token",
-
}
-
_, err := communityRepo.Create(context.Background(), community)
-
if err != nil {
-
t.Fatalf("Failed to create test community: %v", err)
-
}
-
-
// Create test post (subject of votes)
-
postRkey := generateTID()
-
postURI := fmt.Sprintf("at://%s/social.coves.post.record/%s", community.DID, postRkey)
-
postCID := "bafy2bzacepostcid123"
-
post := &posts.Post{
-
URI: postURI,
-
CID: postCID,
-
RKey: postRkey,
-
AuthorDID: author.DID,
-
CommunityDID: community.DID,
-
Title: stringPtr("Test Post for Voting"),
-
Content: stringPtr("This post will receive votes"),
-
CreatedAt: time.Now(),
-
UpvoteCount: 0,
-
DownvoteCount: 0,
-
Score: 0,
-
}
-
err = postRepo.Create(context.Background(), post)
-
if err != nil {
-
t.Fatalf("Failed to create test post: %v", err)
-
}
-
-
t.Run("Full E2E flow - Create upvote via Jetstream", func(t *testing.T) {
-
ctx := context.Background()
-
-
// STEP 1: Simulate Jetstream consumer receiving a vote CREATE event
-
// In real production, this event comes from PDS via Jetstream WebSocket
-
voteRkey := generateTID()
-
voteURI := fmt.Sprintf("at://%s/social.coves.interaction.vote/%s", voter.DID, voteRkey)
-
-
jetstreamEvent := jetstream.JetstreamEvent{
-
Did: voter.DID, // Vote comes from voter's repo
-
Kind: "commit",
-
Commit: &jetstream.CommitEvent{
-
Operation: "create",
-
Collection: "social.coves.interaction.vote",
-
RKey: voteRkey,
-
CID: "bafy2bzacevotecid123",
-
Record: map[string]interface{}{
-
"$type": "social.coves.interaction.vote",
-
"subject": map[string]interface{}{
-
"uri": postURI,
-
"cid": postCID,
-
},
-
"direction": "up",
-
"createdAt": time.Now().Format(time.RFC3339),
-
},
-
},
-
}
-
-
// STEP 2: Process event through Jetstream consumer
-
consumer := jetstream.NewVoteEventConsumer(voteRepo, userService, db)
-
err := consumer.HandleEvent(ctx, &jetstreamEvent)
-
if err != nil {
-
t.Fatalf("Jetstream consumer failed to process event: %v", err)
-
}
-
-
// STEP 3: Verify vote was indexed in AppView database
-
indexedVote, err := voteRepo.GetByURI(ctx, voteURI)
-
if err != nil {
-
t.Fatalf("Vote not indexed in AppView: %v", err)
-
}
-
-
// STEP 4: Verify vote fields are correct
-
assert.Equal(t, voteURI, indexedVote.URI, "Vote URI should match")
-
assert.Equal(t, voter.DID, indexedVote.VoterDID, "Voter DID should match")
-
assert.Equal(t, postURI, indexedVote.SubjectURI, "Subject URI should match")
-
assert.Equal(t, postCID, indexedVote.SubjectCID, "Subject CID should match (strong reference)")
-
assert.Equal(t, "up", indexedVote.Direction, "Direction should be 'up'")
-
-
// STEP 5: Verify post vote counts were updated atomically
-
updatedPost, err := postRepo.GetByURI(ctx, postURI)
-
require.NoError(t, err, "Post should still exist")
-
assert.Equal(t, 1, updatedPost.UpvoteCount, "Post upvote_count should be 1")
-
assert.Equal(t, 0, updatedPost.DownvoteCount, "Post downvote_count should be 0")
-
assert.Equal(t, 1, updatedPost.Score, "Post score should be 1 (upvotes - downvotes)")
-
-
t.Logf("โœ“ E2E test passed! Vote indexed with URI: %s, post upvotes: %d", indexedVote.URI, updatedPost.UpvoteCount)
-
})
-
-
t.Run("Create downvote and verify counts", func(t *testing.T) {
-
ctx := context.Background()
-
-
// Create a different voter for this test to avoid unique constraint violation
-
downvoter := createTestUser(t, db, "downvoter.test", "did:plc:votee2edownvoter")
-
-
// Create downvote
-
voteRkey := generateTID()
-
voteURI := fmt.Sprintf("at://%s/social.coves.interaction.vote/%s", downvoter.DID, voteRkey)
-
-
jetstreamEvent := jetstream.JetstreamEvent{
-
Did: downvoter.DID,
-
Kind: "commit",
-
Commit: &jetstream.CommitEvent{
-
Operation: "create",
-
Collection: "social.coves.interaction.vote",
-
RKey: voteRkey,
-
CID: "bafy2bzacedownvotecid",
-
Record: map[string]interface{}{
-
"$type": "social.coves.interaction.vote",
-
"subject": map[string]interface{}{
-
"uri": postURI,
-
"cid": postCID,
-
},
-
"direction": "down",
-
"createdAt": time.Now().Format(time.RFC3339),
-
},
-
},
-
}
-
-
consumer := jetstream.NewVoteEventConsumer(voteRepo, userService, db)
-
err := consumer.HandleEvent(ctx, &jetstreamEvent)
-
require.NoError(t, err, "Consumer should process downvote")
-
-
// Verify vote indexed
-
indexedVote, err := voteRepo.GetByURI(ctx, voteURI)
-
require.NoError(t, err, "Downvote should be indexed")
-
assert.Equal(t, "down", indexedVote.Direction, "Direction should be 'down'")
-
-
// Verify post counts (now has 1 upvote + 1 downvote from previous test)
-
updatedPost, err := postRepo.GetByURI(ctx, postURI)
-
require.NoError(t, err)
-
assert.Equal(t, 1, updatedPost.UpvoteCount, "Upvote count should still be 1")
-
assert.Equal(t, 1, updatedPost.DownvoteCount, "Downvote count should be 1")
-
assert.Equal(t, 0, updatedPost.Score, "Score should be 0 (1 up - 1 down)")
-
-
t.Logf("โœ“ Downvote indexed, post counts: up=%d down=%d score=%d",
-
updatedPost.UpvoteCount, updatedPost.DownvoteCount, updatedPost.Score)
-
})
-
-
t.Run("Delete vote and verify counts decremented", func(t *testing.T) {
-
ctx := context.Background()
-
-
// Create a different voter for this test
-
deletevoter := createTestUser(t, db, "deletevoter.test", "did:plc:votee2edeletevoter")
-
-
// Get current counts
-
beforePost, _ := postRepo.GetByURI(ctx, postURI)
-
-
// Create a vote first
-
voteRkey := generateTID()
-
voteURI := fmt.Sprintf("at://%s/social.coves.interaction.vote/%s", deletevoter.DID, voteRkey)
-
-
createEvent := jetstream.JetstreamEvent{
-
Did: deletevoter.DID,
-
Kind: "commit",
-
Commit: &jetstream.CommitEvent{
-
Operation: "create",
-
Collection: "social.coves.interaction.vote",
-
RKey: voteRkey,
-
CID: "bafy2bzacedeleteme",
-
Record: map[string]interface{}{
-
"$type": "social.coves.interaction.vote",
-
"subject": map[string]interface{}{
-
"uri": postURI,
-
"cid": postCID,
-
},
-
"direction": "up",
-
"createdAt": time.Now().Format(time.RFC3339),
-
},
-
},
-
}
-
-
consumer := jetstream.NewVoteEventConsumer(voteRepo, userService, db)
-
err := consumer.HandleEvent(ctx, &createEvent)
-
require.NoError(t, err)
-
-
// Now delete it
-
deleteEvent := jetstream.JetstreamEvent{
-
Did: deletevoter.DID,
-
Kind: "commit",
-
Commit: &jetstream.CommitEvent{
-
Operation: "delete",
-
Collection: "social.coves.interaction.vote",
-
RKey: voteRkey,
-
},
-
}
-
-
err = consumer.HandleEvent(ctx, &deleteEvent)
-
require.NoError(t, err, "Consumer should process delete")
-
-
// Verify vote is soft-deleted
-
deletedVote, err := voteRepo.GetByURI(ctx, voteURI)
-
require.NoError(t, err, "Vote should still exist (soft delete)")
-
assert.NotNil(t, deletedVote.DeletedAt, "Vote should have deleted_at timestamp")
-
-
// Verify post counts decremented
-
afterPost, err := postRepo.GetByURI(ctx, postURI)
-
require.NoError(t, err)
-
assert.Equal(t, beforePost.UpvoteCount, afterPost.UpvoteCount,
-
"Upvote count should be back to original (delete decremented)")
-
-
t.Logf("โœ“ Vote deleted, counts decremented correctly")
-
})
-
-
t.Run("Idempotent indexing - duplicate events", func(t *testing.T) {
-
ctx := context.Background()
-
-
// Create a different voter for this test
-
idempotentvoter := createTestUser(t, db, "idempotentvoter.test", "did:plc:votee2eidempotent")
-
-
// Create a vote
-
voteRkey := generateTID()
-
voteURI := fmt.Sprintf("at://%s/social.coves.interaction.vote/%s", idempotentvoter.DID, voteRkey)
-
-
event := jetstream.JetstreamEvent{
-
Did: idempotentvoter.DID,
-
Kind: "commit",
-
Commit: &jetstream.CommitEvent{
-
Operation: "create",
-
Collection: "social.coves.interaction.vote",
-
RKey: voteRkey,
-
CID: "bafy2bzaceidempotent",
-
Record: map[string]interface{}{
-
"$type": "social.coves.interaction.vote",
-
"subject": map[string]interface{}{
-
"uri": postURI,
-
"cid": postCID,
-
},
-
"direction": "up",
-
"createdAt": time.Now().Format(time.RFC3339),
-
},
-
},
-
}
-
-
consumer := jetstream.NewVoteEventConsumer(voteRepo, userService, db)
-
-
// First event - should succeed
-
err := consumer.HandleEvent(ctx, &event)
-
require.NoError(t, err, "First event should succeed")
-
-
// Get counts after first event
-
firstPost, _ := postRepo.GetByURI(ctx, postURI)
-
-
// Second event (duplicate) - should be handled gracefully
-
err = consumer.HandleEvent(ctx, &event)
-
require.NoError(t, err, "Duplicate event should be handled gracefully")
-
-
// Verify counts NOT incremented again (idempotent)
-
secondPost, err := postRepo.GetByURI(ctx, postURI)
-
require.NoError(t, err)
-
assert.Equal(t, firstPost.UpvoteCount, secondPost.UpvoteCount,
-
"Duplicate event should not increment count again")
-
-
// Verify only one vote in database
-
vote, err := voteRepo.GetByURI(ctx, voteURI)
-
require.NoError(t, err)
-
assert.Equal(t, voteURI, vote.URI, "Should still be the same vote")
-
-
t.Logf("โœ“ Idempotency test passed - duplicate event handled correctly")
-
})
-
-
t.Run("Security: Vote from wrong repository rejected", func(t *testing.T) {
-
ctx := context.Background()
-
-
// SECURITY TEST: Try to create a vote that claims to be from the voter
-
// but actually comes from a different user's repository
-
// This should be REJECTED by the consumer
-
-
maliciousUser := createTestUser(t, db, "hacker.test", "did:plc:hacker123")
-
-
maliciousEvent := jetstream.JetstreamEvent{
-
Did: maliciousUser.DID, // Event from hacker's repo
-
Kind: "commit",
-
Commit: &jetstream.CommitEvent{
-
Operation: "create",
-
Collection: "social.coves.interaction.vote",
-
RKey: generateTID(),
-
CID: "bafy2bzacefake",
-
Record: map[string]interface{}{
-
"$type": "social.coves.interaction.vote",
-
"subject": map[string]interface{}{
-
"uri": postURI,
-
"cid": postCID,
-
},
-
"direction": "up",
-
"createdAt": time.Now().Format(time.RFC3339),
-
},
-
},
-
}
-
-
consumer := jetstream.NewVoteEventConsumer(voteRepo, userService, db)
-
err := consumer.HandleEvent(ctx, &maliciousEvent)
-
-
// Should succeed (vote is created in hacker's repo, which is valid)
-
// The vote record itself is FROM their repo, so it's legitimate
-
// This is different from posts which must come from community repo
-
assert.NoError(t, err, "Votes in user repos are valid")
-
-
t.Logf("โœ“ Security validation passed - user repo votes are allowed")
-
})
-
}
-
-
// TestVote_E2E_LivePDS tests the COMPLETE end-to-end flow with a live PDS:
-
// 1. HTTP POST to /xrpc/social.coves.interaction.createVote (with auth)
-
// 2. Handler โ†’ Service โ†’ Write to user's PDS repository
-
// 3. PDS โ†’ Jetstream firehose event
-
// 4. Jetstream consumer โ†’ Index in AppView database
-
// 5. Verify vote appears in database + post counts updated
-
//
-
// This is a TRUE E2E test that requires:
-
// - Live PDS running at PDS_URL (default: http://localhost:3001)
-
// - Live Jetstream running at JETSTREAM_URL (default: ws://localhost:6008/subscribe)
-
// - Test database running
-
func TestVote_E2E_LivePDS(t *testing.T) {
-
if testing.Short() {
-
t.Skip("Skipping live PDS E2E test in short mode")
-
}
-
-
// Setup test database
-
dbURL := os.Getenv("TEST_DATABASE_URL")
-
if dbURL == "" {
-
dbURL = "postgres://test_user:test_password@localhost:5434/coves_test?sslmode=disable"
-
}
-
-
db, err := sql.Open("postgres", dbURL)
-
require.NoError(t, err, "Failed to connect to test database")
-
defer func() {
-
if closeErr := db.Close(); closeErr != nil {
-
t.Logf("Failed to close database: %v", closeErr)
-
}
-
}()
-
-
// Run migrations
-
require.NoError(t, goose.SetDialect("postgres"))
-
require.NoError(t, goose.Up(db, "../../internal/db/migrations"))
-
-
// Check if PDS is running
-
pdsURL := os.Getenv("PDS_URL")
-
if pdsURL == "" {
-
pdsURL = "http://localhost:3001"
-
}
-
-
healthResp, err := http.Get(pdsURL + "/xrpc/_health")
-
if err != nil {
-
t.Skipf("PDS not running at %s: %v", pdsURL, err)
-
}
-
_ = healthResp.Body.Close()
-
-
// Check if Jetstream is running
-
jetstreamHealthURL := "http://127.0.0.1:6009/metrics" // Use 127.0.0.1 for IPv4
-
jetstreamResp, err := http.Get(jetstreamHealthURL)
-
if err != nil {
-
t.Skipf("Jetstream not running: %v", err)
-
}
-
_ = jetstreamResp.Body.Close()
-
-
ctx := context.Background()
-
-
// Cleanup old test data
-
_, _ = db.Exec("DELETE FROM votes WHERE voter_did LIKE 'did:plc:votee2elive%' OR voter_did IN (SELECT did FROM users WHERE handle LIKE '%votee2elive%')")
-
_, _ = db.Exec("DELETE FROM posts WHERE community_did LIKE 'did:plc:votee2elive%'")
-
_, _ = db.Exec("DELETE FROM communities WHERE did LIKE 'did:plc:votee2elive%'")
-
_, _ = db.Exec("DELETE FROM users WHERE did LIKE 'did:plc:votee2elive%' OR handle LIKE '%votee2elive%' OR handle LIKE '%authore2e%'")
-
-
// Setup repositories and services
-
userRepo := postgres.NewUserRepository(db)
-
communityRepo := postgres.NewCommunityRepository(db)
-
postRepo := postgres.NewPostRepository(db)
-
voteRepo := postgres.NewVoteRepository(db)
-
-
identityConfig := identity.DefaultConfig()
-
identityResolver := identity.NewResolver(db, identityConfig)
-
userService := users.NewUserService(userRepo, identityResolver, pdsURL)
-
-
// Create test voter
-
voter := createTestUser(t, db, "votee2elive.bsky.social", "did:plc:votee2elive123")
-
-
// Create test community and post (simplified - using fake credentials)
-
author := createTestUser(t, db, "authore2e.bsky.social", "did:plc:votee2eliveauthor")
-
community := &communities.Community{
-
DID: "did:plc:votee2elivecommunity",
-
Handle: "votee2elivecommunity.test.coves.social",
-
Name: "votee2elivecommunity",
-
DisplayName: "Vote E2E Live Community",
-
OwnerDID: author.DID,
-
CreatedByDID: author.DID,
-
HostedByDID: "did:web:coves.test",
-
Visibility: "public",
-
ModerationType: "moderator",
-
RecordURI: "at://did:plc:votee2elivecommunity/social.coves.community.profile/self",
-
RecordCID: "fakecid",
-
PDSAccessToken: "fake_token",
-
PDSRefreshToken: "fake_refresh",
-
}
-
_, err = communityRepo.Create(ctx, community)
-
require.NoError(t, err)
-
-
postRkey := generateTID()
-
postURI := fmt.Sprintf("at://%s/social.coves.post.record/%s", community.DID, postRkey)
-
postCID := "bafy2bzaceposte2e"
-
post := &posts.Post{
-
URI: postURI,
-
CID: postCID,
-
RKey: postRkey,
-
AuthorDID: author.DID,
-
CommunityDID: community.DID,
-
Title: stringPtr("E2E Vote Test Post"),
-
Content: stringPtr("This post will receive live votes"),
-
CreatedAt: time.Now(),
-
UpvoteCount: 0,
-
DownvoteCount: 0,
-
Score: 0,
-
}
-
err = postRepo.Create(ctx, post)
-
require.NoError(t, err)
-
-
// Setup vote service and handler
-
voteService := votes.NewVoteService(voteRepo, postRepo, pdsURL)
-
voteHandler := vote.NewCreateVoteHandler(voteService)
-
authMiddleware := middleware.NewAtProtoAuthMiddleware(nil, true) // Skip JWT verification for testing
-
-
t.Run("Live E2E: Create vote and verify via Jetstream", func(t *testing.T) {
-
t.Logf("\n๐Ÿ”„ TRUE E2E: Creating vote via XRPC endpoint...")
-
-
// Authenticate voter with PDS to get real access token
-
// Note: This assumes the voter account already exists on PDS
-
// For a complete test, you'd create the account first via com.atproto.server.createAccount
-
instanceHandle := os.Getenv("PDS_INSTANCE_HANDLE")
-
instancePassword := os.Getenv("PDS_INSTANCE_PASSWORD")
-
if instanceHandle == "" {
-
instanceHandle = "testuser123.local.coves.dev"
-
}
-
if instancePassword == "" {
-
instancePassword = "test-password-123"
-
}
-
-
t.Logf("๐Ÿ” Authenticating voter with PDS as: %s", instanceHandle)
-
voterAccessToken, voterDID, err := authenticateWithPDS(pdsURL, instanceHandle, instancePassword)
-
if err != nil {
-
t.Skipf("Failed to authenticate voter with PDS (account may not exist): %v", err)
-
}
-
t.Logf("โœ… Authenticated - Voter DID: %s", voterDID)
-
-
// Update voter record to match authenticated DID
-
_, err = db.Exec("UPDATE users SET did = $1 WHERE did = $2", voterDID, voter.DID)
-
require.NoError(t, err)
-
voter.DID = voterDID
-
-
// Build HTTP request for vote creation
-
reqBody := map[string]interface{}{
-
"subject": postURI,
-
"direction": "up",
-
}
-
reqJSON, err := json.Marshal(reqBody)
-
require.NoError(t, err)
-
-
// Create HTTP request
-
req := httptest.NewRequest("POST", "/xrpc/social.coves.interaction.createVote", bytes.NewReader(reqJSON))
-
req.Header.Set("Content-Type", "application/json")
-
-
// Use REAL PDS access token (not mock JWT)
-
req.Header.Set("Authorization", "Bearer "+voterAccessToken)
-
-
// Execute request through auth middleware + handler
-
rr := httptest.NewRecorder()
-
handler := authMiddleware.RequireAuth(http.HandlerFunc(voteHandler.HandleCreateVote))
-
handler.ServeHTTP(rr, req)
-
-
// Check response
-
require.Equal(t, http.StatusOK, rr.Code, "Handler should return 200 OK, body: %s", rr.Body.String())
-
-
// Parse response
-
var response map[string]interface{}
-
err = json.NewDecoder(rr.Body).Decode(&response)
-
require.NoError(t, err, "Failed to parse response")
-
-
voteURI := response["uri"].(string)
-
voteCID := response["cid"].(string)
-
-
t.Logf("โœ… Vote created on PDS:")
-
t.Logf(" URI: %s", voteURI)
-
t.Logf(" CID: %s", voteCID)
-
-
// ====================================================================================
-
// Part 2: Query the PDS to verify the vote record exists
-
// ====================================================================================
-
t.Run("2a. Verify vote record on PDS", func(t *testing.T) {
-
t.Logf("\n๐Ÿ“ก Querying PDS for vote record...")
-
-
// Extract rkey from vote URI (at://did/collection/rkey)
-
parts := strings.Split(voteURI, "/")
-
rkey := parts[len(parts)-1]
-
-
// Query PDS for the vote record
-
getRecordURL := fmt.Sprintf("%s/xrpc/com.atproto.repo.getRecord?repo=%s&collection=%s&rkey=%s",
-
pdsURL, voterDID, "social.coves.interaction.vote", rkey)
-
-
t.Logf(" GET %s", getRecordURL)
-
-
pdsResp, err := http.Get(getRecordURL)
-
require.NoError(t, err, "Failed to query PDS")
-
defer pdsResp.Body.Close()
-
-
require.Equal(t, http.StatusOK, pdsResp.StatusCode, "Vote record should exist on PDS")
-
-
var pdsRecord struct {
-
Value map[string]interface{} `json:"value"`
-
URI string `json:"uri"`
-
CID string `json:"cid"`
-
}
-
-
err = json.NewDecoder(pdsResp.Body).Decode(&pdsRecord)
-
require.NoError(t, err, "Failed to decode PDS response")
-
-
t.Logf("โœ… Vote record found on PDS!")
-
t.Logf(" URI: %s", pdsRecord.URI)
-
t.Logf(" CID: %s", pdsRecord.CID)
-
t.Logf(" Direction: %v", pdsRecord.Value["direction"])
-
t.Logf(" Subject: %v", pdsRecord.Value["subject"])
-
-
// Verify the record matches what we created
-
assert.Equal(t, voteURI, pdsRecord.URI, "PDS URI should match")
-
assert.Equal(t, voteCID, pdsRecord.CID, "PDS CID should match")
-
assert.Equal(t, "up", pdsRecord.Value["direction"], "Direction should be 'up'")
-
-
// Print full record for inspection
-
recordJSON, _ := json.MarshalIndent(pdsRecord.Value, " ", " ")
-
t.Logf(" Full record:\n %s", string(recordJSON))
-
})
-
-
// ====================================================================================
-
// Part 2b: TRUE E2E - Real Jetstream Firehose Consumer
-
// ====================================================================================
-
t.Run("2b. Real Jetstream Firehose Consumption", func(t *testing.T) {
-
t.Logf("\n๐Ÿ”„ TRUE E2E: Subscribing to real Jetstream firehose...")
-
-
// Get PDS hostname for Jetstream filtering
-
pdsHostname := strings.TrimPrefix(pdsURL, "http://")
-
pdsHostname = strings.TrimPrefix(pdsHostname, "https://")
-
pdsHostname = strings.Split(pdsHostname, ":")[0] // Remove port
-
-
// Build Jetstream URL with filters for vote records
-
jetstreamURL := fmt.Sprintf("ws://%s:6008/subscribe?wantedCollections=social.coves.interaction.vote",
-
pdsHostname)
-
-
t.Logf(" Jetstream URL: %s", jetstreamURL)
-
t.Logf(" Looking for vote URI: %s", voteURI)
-
t.Logf(" Voter DID: %s", voterDID)
-
-
// Create vote consumer (same as main.go)
-
consumer := jetstream.NewVoteEventConsumer(voteRepo, userService, db)
-
-
// Channels to receive the event
-
eventChan := make(chan *jetstream.JetstreamEvent, 10)
-
errorChan := make(chan error, 1)
-
done := make(chan bool)
-
-
// Start Jetstream WebSocket subscriber in background
-
go func() {
-
err := subscribeToJetstreamForVote(ctx, jetstreamURL, voterDID, postURI, consumer, eventChan, errorChan, done)
-
if err != nil {
-
errorChan <- err
-
}
-
}()
-
-
// Wait for event or timeout
-
t.Logf("โณ Waiting for Jetstream event (max 30 seconds)...")
-
-
select {
-
case event := <-eventChan:
-
t.Logf("โœ… Received real Jetstream event!")
-
t.Logf(" Event DID: %s", event.Did)
-
t.Logf(" Collection: %s", event.Commit.Collection)
-
t.Logf(" Operation: %s", event.Commit.Operation)
-
t.Logf(" RKey: %s", event.Commit.RKey)
-
-
// Verify it's for our voter
-
assert.Equal(t, voterDID, event.Did, "Event should be from voter's repo")
-
-
// Verify vote was indexed in AppView database
-
t.Logf("\n๐Ÿ” Querying AppView database for indexed vote...")
-
-
indexedVote, err := voteRepo.GetByVoterAndSubject(ctx, voterDID, postURI)
-
require.NoError(t, err, "Vote should be indexed in AppView")
-
-
t.Logf("โœ… Vote indexed in AppView:")
-
t.Logf(" URI: %s", indexedVote.URI)
-
t.Logf(" CID: %s", indexedVote.CID)
-
t.Logf(" Voter DID: %s", indexedVote.VoterDID)
-
t.Logf(" Subject: %s", indexedVote.SubjectURI)
-
t.Logf(" Direction: %s", indexedVote.Direction)
-
-
// Verify all fields match
-
assert.Equal(t, voteURI, indexedVote.URI, "URI should match")
-
assert.Equal(t, voteCID, indexedVote.CID, "CID should match")
-
assert.Equal(t, voterDID, indexedVote.VoterDID, "Voter DID should match")
-
assert.Equal(t, postURI, indexedVote.SubjectURI, "Subject URI should match")
-
assert.Equal(t, "up", indexedVote.Direction, "Direction should be 'up'")
-
-
// Verify post counts were updated
-
t.Logf("\n๐Ÿ” Verifying post vote counts updated...")
-
updatedPost, err := postRepo.GetByURI(ctx, postURI)
-
require.NoError(t, err, "Post should exist")
-
-
t.Logf("โœ… Post vote counts updated:")
-
t.Logf(" Upvotes: %d", updatedPost.UpvoteCount)
-
t.Logf(" Downvotes: %d", updatedPost.DownvoteCount)
-
t.Logf(" Score: %d", updatedPost.Score)
-
-
assert.Equal(t, 1, updatedPost.UpvoteCount, "Upvote count should be 1")
-
assert.Equal(t, 0, updatedPost.DownvoteCount, "Downvote count should be 0")
-
assert.Equal(t, 1, updatedPost.Score, "Score should be 1")
-
-
// Signal to stop Jetstream consumer
-
close(done)
-
-
t.Log("\nโœ… TRUE E2E COMPLETE: PDS โ†’ Jetstream โ†’ Consumer โ†’ AppView โœ“")
-
-
case err := <-errorChan:
-
t.Fatalf("โŒ Jetstream error: %v", err)
-
-
case <-time.After(30 * time.Second):
-
t.Fatalf("โŒ Timeout: No Jetstream event received within 30 seconds")
-
}
-
})
-
})
-
}
-
-
// subscribeToJetstreamForVote subscribes to real Jetstream firehose and processes vote events
-
// This helper creates a WebSocket connection to Jetstream and waits for vote events
-
func subscribeToJetstreamForVote(
-
ctx context.Context,
-
jetstreamURL string,
-
targetVoterDID string,
-
targetSubjectURI string,
-
consumer *jetstream.VoteEventConsumer,
-
eventChan chan<- *jetstream.JetstreamEvent,
-
errorChan chan<- error,
-
done <-chan bool,
-
) error {
-
conn, _, err := websocket.DefaultDialer.Dial(jetstreamURL, nil)
-
if err != nil {
-
return fmt.Errorf("failed to connect to Jetstream: %w", err)
-
}
-
defer func() { _ = conn.Close() }()
-
-
// Read messages until we find our event or receive done signal
-
for {
-
select {
-
case <-done:
-
return nil
-
case <-ctx.Done():
-
return ctx.Err()
-
default:
-
// Set read deadline to avoid blocking forever
-
if err := conn.SetReadDeadline(time.Now().Add(5 * time.Second)); err != nil {
-
return fmt.Errorf("failed to set read deadline: %w", err)
-
}
-
-
var event jetstream.JetstreamEvent
-
err := conn.ReadJSON(&event)
-
if err != nil {
-
// Check if it's a timeout (expected)
-
if websocket.IsCloseError(err, websocket.CloseNormalClosure) {
-
return nil
-
}
-
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
-
continue // Timeout is expected, keep listening
-
}
-
return fmt.Errorf("failed to read Jetstream message: %w", err)
-
}
-
-
// Check if this is a vote event for the target voter + subject
-
if event.Did == targetVoterDID && event.Kind == "commit" &&
-
event.Commit != nil && event.Commit.Collection == "social.coves.interaction.vote" {
-
-
// Verify it's for the target subject
-
record := event.Commit.Record
-
if subject, ok := record["subject"].(map[string]interface{}); ok {
-
if subjectURI, ok := subject["uri"].(string); ok && subjectURI == targetSubjectURI {
-
// This is our vote! Process it
-
if err := consumer.HandleEvent(ctx, &event); err != nil {
-
return fmt.Errorf("failed to process event: %w", err)
-
}
-
-
// Send to channel so test can verify
-
select {
-
case eventChan <- &event:
-
return nil
-
case <-time.After(1 * time.Second):
-
return fmt.Errorf("timeout sending event to channel")
-
}
-
}
-
}
-
}
-
}
-
}
-
}
-
-
// Helper function
-
func stringPtr(s string) *string {
-
return &s
-
}
-67
internal/atproto/lexicon/social/coves/interaction/createVote.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.interaction.createVote",
-
"defs": {
-
"main": {
-
"type": "procedure",
-
"description": "Vote on a post or comment",
-
"input": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["subject", "direction"],
-
"properties": {
-
"subject": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the post or comment to vote on"
-
},
-
"direction": {
-
"type": "string",
-
"enum": ["up", "down"],
-
"description": "Vote direction"
-
}
-
}
-
}
-
},
-
"output": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["uri", "cid"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the created vote record"
-
},
-
"cid": {
-
"type": "string",
-
"format": "cid",
-
"description": "CID of the created vote record"
-
},
-
"existing": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of existing vote if updating"
-
}
-
}
-
}
-
},
-
"errors": [
-
{
-
"name": "SubjectNotFound",
-
"description": "Post or comment not found"
-
},
-
{
-
"name": "NotAuthorized",
-
"description": "User is not authorized to vote"
-
},
-
{
-
"name": "Banned",
-
"description": "User is banned from this community"
-
}
-
]
-
}
-
}
-
}
-37
internal/atproto/lexicon/social/coves/interaction/deleteVote.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.interaction.deleteVote",
-
"defs": {
-
"main": {
-
"type": "procedure",
-
"description": "Remove a vote from a post or comment",
-
"input": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["subject"],
-
"properties": {
-
"subject": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the post or comment to remove vote from"
-
}
-
}
-
}
-
},
-
"output": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"properties": {}
-
}
-
},
-
"errors": [
-
{
-
"name": "VoteNotFound",
-
"description": "No vote found on this subject"
-
}
-
]
-
}
-
}
-
}
+27 -27
internal/core/aggregators/aggregator.go
···
// Aggregators are autonomous services that can post content to communities after authorization
// Following Bluesky's pattern: app.bsky.feed.generator and app.bsky.labeler.service
type Aggregator struct {
-
DID string `json:"did" db:"did"` // Aggregator's DID (primary key)
-
DisplayName string `json:"displayName" db:"display_name"` // Human-readable name
-
Description string `json:"description,omitempty" db:"description"` // What the aggregator does
-
AvatarURL string `json:"avatarUrl,omitempty" db:"avatar_url"` // Optional avatar image URL
-
ConfigSchema []byte `json:"configSchema,omitempty" db:"config_schema"` // JSON Schema for configuration (JSONB)
-
MaintainerDID string `json:"maintainerDid,omitempty" db:"maintainer_did"` // Contact for support/issues
-
SourceURL string `json:"sourceUrl,omitempty" db:"source_url"` // Source code URL (transparency)
-
CommunitiesUsing int `json:"communitiesUsing" db:"communities_using"` // Auto-updated by trigger
-
PostsCreated int `json:"postsCreated" db:"posts_created"` // Auto-updated by trigger
-
CreatedAt time.Time `json:"createdAt" db:"created_at"` // When aggregator was created (from lexicon)
-
IndexedAt time.Time `json:"indexedAt" db:"indexed_at"` // When we indexed this record
-
RecordURI string `json:"recordUri,omitempty" db:"record_uri"` // at://did/social.coves.aggregator.service/self
-
RecordCID string `json:"recordCid,omitempty" db:"record_cid"` // Content hash
+
CreatedAt time.Time `json:"createdAt" db:"created_at"`
+
IndexedAt time.Time `json:"indexedAt" db:"indexed_at"`
+
AvatarURL string `json:"avatarUrl,omitempty" db:"avatar_url"`
+
DID string `json:"did" db:"did"`
+
MaintainerDID string `json:"maintainerDid,omitempty" db:"maintainer_did"`
+
SourceURL string `json:"sourceUrl,omitempty" db:"source_url"`
+
Description string `json:"description,omitempty" db:"description"`
+
DisplayName string `json:"displayName" db:"display_name"`
+
RecordURI string `json:"recordUri,omitempty" db:"record_uri"`
+
RecordCID string `json:"recordCid,omitempty" db:"record_cid"`
+
ConfigSchema []byte `json:"configSchema,omitempty" db:"config_schema"`
+
CommunitiesUsing int `json:"communitiesUsing" db:"communities_using"`
+
PostsCreated int `json:"postsCreated" db:"posts_created"`
}
// Authorization represents a community's authorization for an aggregator
// Stored in community's repository: at://community_did/social.coves.aggregator.authorization/{rkey}
type Authorization struct {
-
ID int `json:"id" db:"id"` // Database ID
-
AggregatorDID string `json:"aggregatorDid" db:"aggregator_did"` // Which aggregator
-
CommunityDID string `json:"communityDid" db:"community_did"` // Which community
-
Enabled bool `json:"enabled" db:"enabled"` // Current status
-
Config []byte `json:"config,omitempty" db:"config"` // Aggregator-specific config (JSONB)
-
CreatedBy string `json:"createdBy,omitempty" db:"created_by"` // Moderator DID who enabled it
-
DisabledBy string `json:"disabledBy,omitempty" db:"disabled_by"` // Moderator DID who disabled it
-
CreatedAt time.Time `json:"createdAt" db:"created_at"` // When authorization was created
-
DisabledAt *time.Time `json:"disabledAt,omitempty" db:"disabled_at"` // When authorization was disabled (for modlog/audit)
-
IndexedAt time.Time `json:"indexedAt" db:"indexed_at"` // When we indexed this record
-
RecordURI string `json:"recordUri,omitempty" db:"record_uri"` // at://community_did/social.coves.aggregator.authorization/{rkey}
-
RecordCID string `json:"recordCid,omitempty" db:"record_cid"` // Content hash
+
CreatedAt time.Time `json:"createdAt" db:"created_at"`
+
IndexedAt time.Time `json:"indexedAt" db:"indexed_at"`
+
DisabledAt *time.Time `json:"disabledAt,omitempty" db:"disabled_at"`
+
AggregatorDID string `json:"aggregatorDid" db:"aggregator_did"`
+
CommunityDID string `json:"communityDid" db:"community_did"`
+
CreatedBy string `json:"createdBy,omitempty" db:"created_by"`
+
DisabledBy string `json:"disabledBy,omitempty" db:"disabled_by"`
+
RecordURI string `json:"recordUri,omitempty" db:"record_uri"`
+
RecordCID string `json:"recordCid,omitempty" db:"record_cid"`
+
Config []byte `json:"config,omitempty" db:"config"`
+
ID int `json:"id" db:"id"`
+
Enabled bool `json:"enabled" db:"enabled"`
}
// AggregatorPost represents tracking of posts created by aggregators
// AppView-only table for rate limiting and statistics
type AggregatorPost struct {
-
ID int `json:"id" db:"id"`
+
CreatedAt time.Time `json:"createdAt" db:"created_at"`
AggregatorDID string `json:"aggregatorDid" db:"aggregator_did"`
CommunityDID string `json:"communityDid" db:"community_did"`
PostURI string `json:"postUri" db:"post_uri"`
PostCID string `json:"postCid" db:"post_cid"`
-
CreatedAt time.Time `json:"createdAt" db:"created_at"`
+
ID int `json:"id" db:"id"`
}
// EnableAggregatorRequest represents input for enabling an aggregator in a community
+100
internal/atproto/lexicon/social/coves/community/post.json
···
+
{
+
"lexicon": 1,
+
"id": "social.coves.community.post",
+
"defs": {
+
"main": {
+
"type": "record",
+
"description": "A post in a Coves community. Posts live in community repositories and persist independently of the author.",
+
"key": "tid",
+
"record": {
+
"type": "object",
+
"required": ["community", "author", "createdAt"],
+
"properties": {
+
"community": {
+
"type": "string",
+
"format": "at-identifier",
+
"description": "DID or handle of the community this was posted to"
+
},
+
"author": {
+
"type": "string",
+
"format": "did",
+
"description": "DID of the user who created this post"
+
},
+
"title": {
+
"type": "string",
+
"maxGraphemes": 300,
+
"maxLength": 3000,
+
"description": "Post title (optional for media-only posts)"
+
},
+
"content": {
+
"type": "string",
+
"maxGraphemes": 10000,
+
"maxLength": 100000,
+
"description": "Post content - supports rich text via facets"
+
},
+
"facets": {
+
"type": "array",
+
"description": "Annotations for rich text (mentions, links, tags)",
+
"items": {
+
"type": "ref",
+
"ref": "social.coves.richtext.facet"
+
}
+
},
+
"embed": {
+
"type": "union",
+
"description": "Embedded media, external links, or quoted posts",
+
"refs": [
+
"social.coves.embed.images",
+
"social.coves.embed.video",
+
"social.coves.embed.external",
+
"social.coves.embed.post"
+
]
+
},
+
"langs": {
+
"type": "array",
+
"description": "Languages used in the post content (ISO 639-1)",
+
"maxLength": 3,
+
"items": {
+
"type": "string",
+
"format": "language"
+
}
+
},
+
"labels": {
+
"type": "ref",
+
"ref": "com.atproto.label.defs#selfLabels",
+
"description": "Self-applied content labels (NSFW, spoilers, etc.)"
+
},
+
"tags": {
+
"type": "array",
+
"description": "User-applied topic tags",
+
"maxLength": 8,
+
"items": {
+
"type": "string",
+
"maxLength": 64,
+
"maxGraphemes": 64
+
}
+
},
+
"crosspostOf": {
+
"type": "ref",
+
"ref": "com.atproto.repo.strongRef",
+
"description": "If this is a crosspost, strong reference to the immediate parent post"
+
},
+
"crosspostChain": {
+
"type": "array",
+
"description": "Full chain of crossposts with version pinning. First element is original, last is immediate parent.",
+
"maxLength": 25,
+
"items": {
+
"type": "ref",
+
"ref": "com.atproto.repo.strongRef"
+
}
+
},
+
"createdAt": {
+
"type": "string",
+
"format": "datetime",
+
"description": "Timestamp of post creation"
+
}
+
}
+
}
+
}
+
}
+
}
+119
internal/atproto/lexicon/social/coves/community/post/create.json
···
+
{
+
"lexicon": 1,
+
"id": "social.coves.community.post.create",
+
"defs": {
+
"main": {
+
"type": "procedure",
+
"description": "Create a new post in a community",
+
"input": {
+
"encoding": "application/json",
+
"schema": {
+
"type": "object",
+
"required": ["community"],
+
"properties": {
+
"community": {
+
"type": "string",
+
"format": "at-identifier",
+
"description": "DID or handle of the community to post in"
+
},
+
"title": {
+
"type": "string",
+
"maxGraphemes": 300,
+
"maxLength": 3000,
+
"description": "Post title (optional for media-only posts)"
+
},
+
"content": {
+
"type": "string",
+
"maxGraphemes": 10000,
+
"maxLength": 100000,
+
"description": "Post content - supports rich text via facets"
+
},
+
"facets": {
+
"type": "array",
+
"description": "Annotations for rich text (mentions, links, tags)",
+
"items": {
+
"type": "ref",
+
"ref": "social.coves.richtext.facet"
+
}
+
},
+
"embed": {
+
"type": "union",
+
"description": "Embedded media, external links, or quoted posts",
+
"refs": [
+
"social.coves.embed.images",
+
"social.coves.embed.video",
+
"social.coves.embed.external",
+
"social.coves.embed.post"
+
]
+
},
+
"langs": {
+
"type": "array",
+
"description": "Languages used in the post content (ISO 639-1)",
+
"maxLength": 3,
+
"items": {
+
"type": "string",
+
"format": "language"
+
}
+
},
+
"labels": {
+
"type": "ref",
+
"ref": "com.atproto.label.defs#selfLabels",
+
"description": "Self-applied content labels (NSFW, spoilers, etc.)"
+
},
+
"tags": {
+
"type": "array",
+
"description": "User-applied topic tags",
+
"maxLength": 8,
+
"items": {
+
"type": "string",
+
"maxLength": 64,
+
"maxGraphemes": 64
+
}
+
}
+
}
+
}
+
},
+
"output": {
+
"encoding": "application/json",
+
"schema": {
+
"type": "object",
+
"required": ["uri", "cid"],
+
"properties": {
+
"uri": {
+
"type": "string",
+
"format": "at-uri",
+
"description": "AT-URI of the created post"
+
},
+
"cid": {
+
"type": "string",
+
"format": "cid",
+
"description": "CID of the created post"
+
}
+
}
+
}
+
},
+
"errors": [
+
{
+
"name": "CommunityNotFound",
+
"description": "Community not found"
+
},
+
{
+
"name": "NotAuthorized",
+
"description": "User is not authorized to post in this community"
+
},
+
{
+
"name": "Banned",
+
"description": "User is banned from this community"
+
},
+
{
+
"name": "InvalidContent",
+
"description": "Post content violates community rules"
+
},
+
{
+
"name": "ContentRuleViolation",
+
"description": "Post violates community content rules (e.g., embeds not allowed, text too short)"
+
}
+
]
+
}
+
}
+
}
+41
internal/atproto/lexicon/social/coves/community/post/delete.json
···
+
{
+
"lexicon": 1,
+
"id": "social.coves.community.post.delete",
+
"defs": {
+
"main": {
+
"type": "procedure",
+
"description": "Delete a post",
+
"input": {
+
"encoding": "application/json",
+
"schema": {
+
"type": "object",
+
"required": ["uri"],
+
"properties": {
+
"uri": {
+
"type": "string",
+
"format": "at-uri",
+
"description": "AT-URI of the post to delete"
+
}
+
}
+
}
+
},
+
"output": {
+
"encoding": "application/json",
+
"schema": {
+
"type": "object",
+
"properties": {}
+
}
+
},
+
"errors": [
+
{
+
"name": "PostNotFound",
+
"description": "Post not found"
+
},
+
{
+
"name": "NotAuthorized",
+
"description": "User is not authorized to delete this post"
+
}
+
]
+
}
+
}
+
}
+6 -6
internal/atproto/lexicon/social/coves/embed/post.json
···
"defs": {
"main": {
"type": "object",
-
"description": "Embedded reference to another post",
-
"required": ["uri"],
+
"description": "Embedded reference to another post (quoted post)",
+
"required": ["post"],
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the post being embedded"
+
"post": {
+
"type": "ref",
+
"ref": "com.atproto.repo.strongRef",
+
"description": "Strong reference to the embedded post (includes URI and CID)"
}
}
}
+3 -3
internal/atproto/lexicon/social/coves/feed/defs.json
···
"properties": {
"post": {
"type": "ref",
-
"ref": "social.coves.post.get#postView"
+
"ref": "social.coves.community.post.get#postView"
},
"reason": {
"type": "union",
···
"properties": {
"by": {
"type": "ref",
-
"ref": "social.coves.post.get#authorView"
+
"ref": "social.coves.community.post.get#authorView"
},
"indexedAt": {
"type": "string",
···
"properties": {
"community": {
"type": "ref",
-
"ref": "social.coves.post.get#communityRef"
+
"ref": "social.coves.community.post.get#communityRef"
}
}
},
+47
internal/db/migrations/015_alter_content_labels_to_jsonb.sql
···
+
-- +goose Up
+
-- Change content_labels from TEXT[] to JSONB to preserve full com.atproto.label.defs#selfLabels structure
+
-- This allows storing the optional 'neg' field and future extensions
+
+
-- Create temporary function to convert TEXT[] to selfLabels JSONB
+
-- +goose StatementBegin
+
CREATE OR REPLACE FUNCTION convert_labels_to_jsonb(labels TEXT[])
+
RETURNS JSONB AS $$
+
BEGIN
+
IF labels IS NULL OR array_length(labels, 1) = 0 THEN
+
RETURN NULL;
+
END IF;
+
+
RETURN jsonb_build_object(
+
'values',
+
(SELECT jsonb_agg(jsonb_build_object('val', label))
+
FROM unnest(labels) AS label)
+
);
+
END;
+
$$ LANGUAGE plpgsql IMMUTABLE;
+
-- +goose StatementEnd
+
+
-- Convert column type using the function
+
ALTER TABLE posts
+
ALTER COLUMN content_labels TYPE JSONB
+
USING convert_labels_to_jsonb(content_labels);
+
+
-- Drop the temporary function
+
DROP FUNCTION convert_labels_to_jsonb(TEXT[]);
+
+
-- Update column comment
+
COMMENT ON COLUMN posts.content_labels IS 'Self-applied labels per com.atproto.label.defs#selfLabels (JSONB: {"values":[{"val":"nsfw","neg":false}]})';
+
+
-- +goose Down
+
-- Revert JSONB back to TEXT[] (lossy - drops 'neg' field)
+
ALTER TABLE posts
+
ALTER COLUMN content_labels TYPE TEXT[]
+
USING CASE
+
WHEN content_labels IS NULL THEN NULL
+
ELSE ARRAY(
+
SELECT value->>'val'
+
FROM jsonb_array_elements(content_labels->'values') AS value
+
)
+
END;
+
+
-- Restore original comment
+
COMMENT ON COLUMN posts.content_labels IS 'Self-applied labels (nsfw, spoiler, violence)';
+1 -1
tests/lexicon-test-data/moderation/tribunal-vote-invalid-decision.json
···
{
"$type": "social.coves.moderation.tribunalVote",
"tribunal": "at://did:plc:community123/social.coves.moderation.tribunal/3k7a3dmb5bk2c",
-
"subject": "at://did:plc:user123/social.coves.post.record/3k7a2clb4bj2b",
+
"subject": "at://$1/social.coves.community.post/3k7a2clb4bj2b",
"decision": "maybe",
"createdAt": "2025-01-09T18:00:00Z"
}
+5 -6
tests/lexicon-test-data/post/post-invalid-missing-community.json
···
{
-
"$type": "social.coves.post.record",
-
"postType": "text",
+
"$type": "social.coves.community.post",
+
"author": "did:plc:testauthor123",
"title": "Test Post",
-
"text": "This post is missing the required community field",
+
"content": "This post is missing the required community field",
"tags": ["test"],
-
"language": "en",
-
"contentWarnings": [],
+
"langs": ["en"],
"createdAt": "2025-01-09T14:30:00Z"
-
}
+
}
+6 -7
tests/lexicon-test-data/post/post-valid-text.json
···
{
-
"$type": "social.coves.post.record",
+
"$type": "social.coves.community.post",
"community": "did:plc:programming123",
-
"postType": "text",
+
"author": "did:plc:testauthor123",
"title": "Best practices for error handling in Go",
-
"text": "I've been working with Go for a while now and wanted to share some thoughts on error handling patterns...",
-
"textFacets": [
+
"content": "I've been working with Go for a while now and wanted to share some thoughts on error handling patterns...",
+
"facets": [
{
"index": {
"byteStart": 20,
···
}
],
"tags": ["golang", "error-handling", "best-practices"],
-
"language": "en",
-
"contentWarnings": [],
+
"langs": ["en"],
"createdAt": "2025-01-09T14:30:00Z"
-
}
+
}
+5 -5
docs/COMMUNITY_FEEDS.md
···
```go
type PostView struct {
-
URI string // at://did:plc:abc/social.coves.post.record/123
+
URI string // at://did:plc:abc/social.coves.community.post.record/123
CID string // Content ID
RKey string // Record key (TID)
Author *AuthorView // Author with handle, avatar, reputation
···
"feed": [
{
"post": {
-
"uri": "at://did:plc:gaming123/social.coves.post.record/abc",
+
"uri": "at://did:plc:gaming123/social.coves.community.post.record/abc",
"cid": "bafyrei...",
"author": {
"did": "did:plc:alice",
···
GET /xrpc/social.coves.feed.getSkeleton?feed=at://alice/feed/best-memes
โ†’ Returns: [uri1, uri2, uri3, ...]
-
GET /xrpc/social.coves.post.get?uris=[...]
+
GET /xrpc/social.coves.community.post.get?uris=[...]
โ†’ Returns: [full posts]
```
···
## Lexicon Updates
-
### Updated: `social.coves.post.get`
+
### Updated: `social.coves.community.post.get`
**Changes:**
1. โœ… Batch URIs: `uri` โ†’ `uris[]` (max 25)
···
// Custom feed (power users)
GET /xrpc/social.coves.feed.getSkeleton?feed=at://alice/feed/best-memes
โ†’ Returns URIs
-
GET /xrpc/social.coves.post.get?uris=[...]
+
GET /xrpc/social.coves.community.post.get?uris=[...]
โ†’ Hydrates posts
```
+1 -1
docs/PRD_GOVERNANCE.md
···
- [ ] Go structs: `ContentRules` type in community models
- [ ] Repository: Parse and store `contentRules` from community profiles
- [ ] Service: `ValidatePostAgainstRules(post, community)` function
-
- [ ] Handler: Integrate validation into `social.coves.post.create`
+
- [ ] Handler: Integrate validation into `social.coves.community.post.create`
- [ ] AppView indexing: Index post characteristics (embed_type, text_length, etc.)
- [ ] Tests: Comprehensive rule validation tests
- [ ] Documentation: Content rules guide for community creators
+17 -17
docs/PRD_POSTS.md
···
**Repository Structure:**
```
-
Repository: at://did:plc:community789/social.coves.post.record/3k2a4b5c6d7e
+
Repository: at://did:plc:community789/social.coves.community.post.record/3k2a4b5c6d7e
Owner: did:plc:community789 (community owns the post)
Author: did:plc:user123 (tracked in record metadata)
Hosted By: did:web:coves.social (instance manages community credentials)
···
**Implementation checklist:**
- [x] Lexicon: `contentRules` in `social.coves.community.profile` โœ…
-
- [x] Lexicon: `postType` removed from `social.coves.post.create` โœ…
+
- [x] Lexicon: `postType` removed from `social.coves.community.post.create` โœ…
- [ ] Validation: `ValidatePostAgainstRules()` service function
- [ ] Handler: Integrate validation in post creation endpoint
- [ ] AppView: Index derived characteristics (embed_type, text_length, etc.)
···
**Priority:** CRITICAL - Posts are the foundation of the platform
#### Create Post
-
- [x] Lexicon: `social.coves.post.record` โœ…
-
- [x] Lexicon: `social.coves.post.create` โœ…
+
- [x] Lexicon: `social.coves.community.post.record` โœ…
+
- [x] Lexicon: `social.coves.community.post.create` โœ…
- [x] Removed `postType` enum in favor of content rules โœ… (2025-10-18)
- [x] Removed `postType` from record and get lexicons โœ… (2025-10-18)
-
- [x] **Handler:** `POST /xrpc/social.coves.post.create` โœ… (Alpha - see IMPLEMENTATION_POST_CREATION.md)
+
- [x] **Handler:** `POST /xrpc/social.coves.community.post.create` โœ… (Alpha - see IMPLEMENTATION_POST_CREATION.md)
- โœ… Accept: community (DID/handle), title (optional), content, facets, embed, contentLabels
- โœ… Validate: User is authenticated, community exists, content within limits
- โœ… Write: Create record in **community's PDS repository**
···
- [x] **E2E Test:** Create text post โ†’ Write to **community's PDS** โ†’ Index via Jetstream โ†’ Verify in AppView โœ…
#### Get Post
-
- [x] Lexicon: `social.coves.post.get` โœ…
-
- [ ] **Handler:** `GET /xrpc/social.coves.post.get?uri=at://...`
+
- [x] Lexicon: `social.coves.community.post.get` โœ…
+
- [ ] **Handler:** `GET /xrpc/social.coves.community.post.get?uri=at://...`
- Accept: AT-URI of post
- Return: Full post view with author, community, stats, viewer state
- [ ] **Service Layer:** `PostService.Get(uri, viewerDID)`
···
- [ ] **E2E Test:** Get post by URI โ†’ Verify all fields populated
#### Update Post
-
- [x] Lexicon: `social.coves.post.update` โœ…
-
- [ ] **Handler:** `POST /xrpc/social.coves.post.update`
+
- [x] Lexicon: `social.coves.community.post.update` โœ…
+
- [ ] **Handler:** `POST /xrpc/social.coves.community.post.update`
- Accept: uri, title, content, facets, embed, contentLabels, editNote
- Validate: User is post author, within 24-hour edit window
- Write: Update record in **community's PDS**
···
- [ ] **E2E Test:** Update post โ†’ Verify edit reflected in AppView
#### Delete Post
-
- [x] Lexicon: `social.coves.post.delete` โœ…
-
- [ ] **Handler:** `POST /xrpc/social.coves.post.delete`
+
- [x] Lexicon: `social.coves.community.post.delete` โœ…
+
- [ ] **Handler:** `POST /xrpc/social.coves.community.post.delete`
- Accept: uri
- Validate: User is post author OR community moderator
- Write: Delete record from **community's PDS**
···
#### Post Event Handling
- [x] **Consumer:** `PostConsumer.HandlePostEvent()` โœ… (2025-10-19)
-
- โœ… Listen for `social.coves.post.record` CREATE from **community repositories**
+
- โœ… Listen for `social.coves.community.post.record` CREATE from **community repositories**
- โœ… Parse post record, extract author DID and community DID (from AT-URI owner)
- โš ๏ธ **Derive post characteristics:** DEFERRED (embed_type, text_length, has_title, has_embed for content rules filtering)
- โœ… Insert in AppView PostgreSQL (CREATE only - UPDATE/DELETE deferred)
···
- [ ] **Tag Storage:** Tags live in **user's repository** (users own their tags)
#### Crossposting
-
- [x] Lexicon: `social.coves.post.crosspost` โœ…
+
- [x] Lexicon: `social.coves.community.post.crosspost` โœ…
- [ ] **Crosspost Tracking:** Share post to multiple communities
- [ ] **Implementation:** Create new post record in each community's repository
- [ ] **Crosspost Chain:** Track all crosspost relationships
···
- [ ] **AppView Query:** Endpoint to fetch user's saved posts
### Post Search
-
- [x] Lexicon: `social.coves.post.search` โœ…
+
- [x] Lexicon: `social.coves.community.post.search` โœ…
- [ ] **Search Parameters:**
- Query string (q)
- Filter by community
···
- **Reuses Token Refresh:** Can leverage existing community credential management
**Implementation Details:**
-
- Post AT-URI: `at://community_did/social.coves.post.record/tid`
+
- Post AT-URI: `at://community_did/social.coves.community.post.record/tid`
- Write operations use community's PDS credentials (encrypted, stored in AppView)
- Author tracked in post record's `author` field (DID)
- Moderators can delete any post in their community
···
## Lexicon Summary
-
### `social.coves.post.record`
+
### `social.coves.community.post.record`
**Status:** โœ… Defined, implementation TODO
**Last Updated:** 2025-10-18 (removed `postType` enum)
···
- Post "type" is derived from structure (has embed? what embed type? has title? text length?)
- Community's `contentRules` validate post structure at creation time
-
### `social.coves.post.create` (Procedure)
+
### `social.coves.community.post.create` (Procedure)
**Status:** โœ… Defined, implementation TODO
**Last Updated:** 2025-10-18 (removed `postType` parameter)
+4 -4
docs/aggregators/PRD_AGGREGATORS.md
···
1. **Aggregators are Actors, Not a Separate System**
- Each aggregator has its own DID
- Authenticate as themselves via JWT
-
- Use existing `social.coves.post.create` endpoint
+
- Use existing `social.coves.community.post.create` endpoint
- Post record's `author` field = aggregator DID (server-populated)
- No separate posting API needed
···
Aggregator Service (External)
โ”‚
โ”‚ 1. Authenticates as aggregator DID (JWT)
-
โ”‚ 2. Calls social.coves.post.create
+
โ”‚ 2. Calls social.coves.community.post.create
โ–ผ
Coves AppView Handler
โ”‚
···
### For Aggregators
-
- **`social.coves.post.create`** - Modified to handle aggregator auth
+
- **`social.coves.community.post.create`** - Modified to handle aggregator auth
- **`social.coves.aggregator.getAuthorizations`** - Query authorized communities
### For Discovery
···
---
-
### 2025-10-19: Reuse `social.coves.post.create` Endpoint
+
### 2025-10-19: Reuse `social.coves.community.post.create` Endpoint
**Decision:** Aggregators use existing post creation endpoint.
**Rationale:**
+3 -3
docs/aggregators/PRD_KAGI_NEWS_RSS.md
···
โ”‚ 3. Deduplication: Tracks posted items via JSON state file โ”‚
โ”‚ 4. Feed Mapper: Maps feed URLs to community handles โ”‚
โ”‚ 5. Post Formatter: Converts to Coves post format โ”‚
-
โ”‚ 6. Post Publisher: Calls social.coves.post.create via XRPC โ”‚
+
โ”‚ 6. Post Publisher: Calls social.coves.community.post.create via XRPC โ”‚
โ”‚ 7. Blob Uploader: Handles image upload to ATProto โ”‚
โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜
โ”‚
โ”‚ Authenticated XRPC calls
โ–ผ
โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”
-
โ”‚ Coves AppView (social.coves.post.create) โ”‚
+
โ”‚ Coves AppView (social.coves.community.post.create) โ”‚
โ”‚ - Validates aggregator authorization โ”‚
โ”‚ - Creates post with author = did:plc:[aggregator-did] โ”‚
โ”‚ - Indexes to community feeds โ”‚
···
```json
{
-
"$type": "social.coves.post.record",
+
"$type": "social.coves.community.post.record",
"author": "did:plc:[aggregator-did]",
"community": "world-news.coves.social",
"title": "{Kagi story title}",
+7 -7
internal/api/routes/post.go
···
)
// RegisterPostRoutes registers post-related XRPC endpoints on the router
-
// Implements social.coves.post.* lexicon endpoints
+
// Implements social.coves.community.post.* lexicon endpoints
func RegisterPostRoutes(r chi.Router, service posts.Service, authMiddleware *middleware.AtProtoAuthMiddleware) {
// Initialize handlers
createHandler := post.NewCreateHandler(service)
// Procedure endpoints (POST) - require authentication
-
// social.coves.post.create - create a new post in a community
-
r.With(authMiddleware.RequireAuth).Post("/xrpc/social.coves.post.create", createHandler.HandleCreate)
+
// social.coves.community.post.create - create a new post in a community
+
r.With(authMiddleware.RequireAuth).Post("/xrpc/social.coves.community.post.create", createHandler.HandleCreate)
// Future endpoints (Beta):
-
// r.Get("/xrpc/social.coves.post.get", getHandler.HandleGet)
-
// r.With(authMiddleware.RequireAuth).Post("/xrpc/social.coves.post.update", updateHandler.HandleUpdate)
-
// r.With(authMiddleware.RequireAuth).Post("/xrpc/social.coves.post.delete", deleteHandler.HandleDelete)
-
// r.Get("/xrpc/social.coves.post.list", listHandler.HandleList)
+
// r.Get("/xrpc/social.coves.community.post.get", getHandler.HandleGet)
+
// r.With(authMiddleware.RequireAuth).Post("/xrpc/social.coves.community.post.update", updateHandler.HandleUpdate)
+
// r.With(authMiddleware.RequireAuth).Post("/xrpc/social.coves.community.post.delete", deleteHandler.HandleDelete)
+
// r.Get("/xrpc/social.coves.community.post.list", listHandler.HandleList)
}
+80
internal/atproto/lexicon/social/coves/feed/comment.json
···
+
{
+
"lexicon": 1,
+
"id": "social.coves.feed.comment",
+
"defs": {
+
"main": {
+
"type": "record",
+
"description": "A comment on a post or another comment. Comments live in user repositories and support nested threading.",
+
"key": "tid",
+
"record": {
+
"type": "object",
+
"required": ["reply", "content", "createdAt"],
+
"properties": {
+
"reply": {
+
"type": "ref",
+
"ref": "#replyRef",
+
"description": "Reference to the post and parent being replied to"
+
},
+
"content": {
+
"type": "string",
+
"maxGraphemes": 3000,
+
"maxLength": 30000,
+
"description": "Comment text content"
+
},
+
"facets": {
+
"type": "array",
+
"description": "Annotations for rich text (mentions, links, etc.)",
+
"items": {
+
"type": "ref",
+
"ref": "social.coves.richtext.facet"
+
}
+
},
+
"embed": {
+
"type": "union",
+
"description": "Embedded media or quoted posts",
+
"refs": [
+
"social.coves.embed.images",
+
"social.coves.embed.post"
+
]
+
},
+
"langs": {
+
"type": "array",
+
"description": "Languages used in the comment content (ISO 639-1)",
+
"maxLength": 3,
+
"items": {
+
"type": "string",
+
"format": "language"
+
}
+
},
+
"labels": {
+
"type": "ref",
+
"ref": "com.atproto.label.defs#selfLabels",
+
"description": "Self-applied content labels"
+
},
+
"createdAt": {
+
"type": "string",
+
"format": "datetime",
+
"description": "Timestamp of comment creation"
+
}
+
}
+
}
+
},
+
"replyRef": {
+
"type": "object",
+
"description": "References for maintaining thread structure. Root always points to the original post, parent points to the immediate parent (post or comment).",
+
"required": ["root", "parent"],
+
"properties": {
+
"root": {
+
"type": "ref",
+
"ref": "com.atproto.repo.strongRef",
+
"description": "Strong reference to the original post that started the thread"
+
},
+
"parent": {
+
"type": "ref",
+
"ref": "com.atproto.repo.strongRef",
+
"description": "Strong reference to the immediate parent (post or comment) being replied to"
+
}
+
}
+
}
+
}
+
}
-86
internal/atproto/lexicon/social/coves/interaction/comment.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.interaction.comment",
-
"defs": {
-
"main": {
-
"type": "record",
-
"description": "A comment on a post or another comment",
-
"key": "tid",
-
"record": {
-
"type": "object",
-
"required": ["subject", "content", "createdAt"],
-
"properties": {
-
"subject": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of post or comment being replied to"
-
},
-
"content": {
-
"type": "union",
-
"refs": ["#textContent", "#imageContent", "#stickerContent"]
-
},
-
"location": {
-
"type": "ref",
-
"ref": "social.coves.actor.profile#geoLocation"
-
},
-
"translatedFrom": {
-
"type": "string",
-
"maxLength": 10,
-
"description": "Language code if auto-translated (ISO 639-1)"
-
},
-
"createdAt": {
-
"type": "string",
-
"format": "datetime"
-
}
-
}
-
}
-
},
-
"textContent": {
-
"type": "object",
-
"required": ["text"],
-
"properties": {
-
"text": {
-
"type": "string",
-
"maxLength": 10000,
-
"description": "Comment text"
-
},
-
"facets": {
-
"type": "array",
-
"description": "Rich text annotations",
-
"items": {
-
"type": "ref",
-
"ref": "social.coves.richtext.facet"
-
}
-
}
-
}
-
},
-
"imageContent": {
-
"type": "object",
-
"required": ["image"],
-
"properties": {
-
"image": {
-
"type": "ref",
-
"ref": "social.coves.embed.images#image"
-
},
-
"caption": {
-
"type": "string",
-
"maxLength": 1000
-
}
-
}
-
},
-
"stickerContent": {
-
"type": "object",
-
"required": ["stickerId"],
-
"properties": {
-
"stickerId": {
-
"type": "string",
-
"description": "Reference to a sticker in a sticker pack"
-
},
-
"stickerPackId": {
-
"type": "string",
-
"description": "Reference to the sticker pack"
-
}
-
}
-
}
-
}
-
}
-75
internal/atproto/lexicon/social/coves/interaction/createComment.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.interaction.createComment",
-
"defs": {
-
"main": {
-
"type": "procedure",
-
"description": "Create a comment on a post or another comment",
-
"input": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["parent", "text"],
-
"properties": {
-
"parent": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the post or comment being replied to"
-
},
-
"text": {
-
"type": "string",
-
"maxGraphemes": 3000,
-
"maxLength": 30000,
-
"description": "Comment text"
-
},
-
"textFacets": {
-
"type": "array",
-
"description": "Rich text annotations",
-
"items": {
-
"type": "ref",
-
"ref": "social.coves.richtext.facet"
-
}
-
}
-
}
-
}
-
},
-
"output": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["uri", "cid"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the created comment"
-
},
-
"cid": {
-
"type": "string",
-
"format": "cid",
-
"description": "CID of the created comment"
-
}
-
}
-
}
-
},
-
"errors": [
-
{
-
"name": "ParentNotFound",
-
"description": "Parent post or comment not found"
-
},
-
{
-
"name": "NotAuthorized",
-
"description": "User is not authorized to comment"
-
},
-
{
-
"name": "ThreadLocked",
-
"description": "Comment thread is locked"
-
},
-
{
-
"name": "Banned",
-
"description": "User is banned from this community"
-
}
-
]
-
}
-
}
-
}
-41
internal/atproto/lexicon/social/coves/interaction/deleteComment.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.interaction.deleteComment",
-
"defs": {
-
"main": {
-
"type": "procedure",
-
"description": "Delete a comment",
-
"input": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["uri"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the comment to delete"
-
}
-
}
-
}
-
},
-
"output": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"properties": {}
-
}
-
},
-
"errors": [
-
{
-
"name": "CommentNotFound",
-
"description": "Comment not found"
-
},
-
{
-
"name": "NotAuthorized",
-
"description": "User is not authorized to delete this comment"
-
}
-
]
-
}
-
}
-
}
-118
internal/atproto/lexicon/social/coves/post/create.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.post.create",
-
"defs": {
-
"main": {
-
"type": "procedure",
-
"description": "Create a new post in a community",
-
"input": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["community"],
-
"properties": {
-
"community": {
-
"type": "string",
-
"format": "at-identifier",
-
"description": "DID or handle of the community to post in"
-
},
-
"title": {
-
"type": "string",
-
"maxGraphemes": 300,
-
"maxLength": 3000,
-
"description": "Post title (optional for microblog, image, and video posts)"
-
},
-
"content": {
-
"type": "string",
-
"maxLength": 50000,
-
"description": "Post content - main text for text posts, description for media, etc."
-
},
-
"facets": {
-
"type": "array",
-
"description": "Rich text annotations for content",
-
"items": {
-
"type": "ref",
-
"ref": "social.coves.richtext.facet"
-
}
-
},
-
"embed": {
-
"type": "union",
-
"description": "Embedded content - images, videos, external links, or quoted posts",
-
"refs": [
-
"social.coves.embed.images",
-
"social.coves.embed.video",
-
"social.coves.embed.external",
-
"social.coves.embed.post"
-
]
-
},
-
"originalAuthor": {
-
"type": "ref",
-
"ref": "social.coves.post.record#originalAuthor",
-
"description": "For microblog posts - information about the original author"
-
},
-
"federatedFrom": {
-
"type": "ref",
-
"ref": "social.coves.federation.post",
-
"description": "Reference to original federated post (for microblog posts)"
-
},
-
"contentLabels": {
-
"type": "array",
-
"description": "Self-applied content labels",
-
"items": {
-
"type": "string",
-
"knownValues": ["nsfw", "spoiler", "violence"],
-
"maxLength": 32
-
}
-
},
-
"location": {
-
"type": "ref",
-
"ref": "social.coves.actor.profile#geoLocation",
-
"description": "Geographic location where post was created"
-
}
-
}
-
}
-
},
-
"output": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["uri", "cid"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the created post"
-
},
-
"cid": {
-
"type": "string",
-
"format": "cid",
-
"description": "CID of the created post"
-
}
-
}
-
}
-
},
-
"errors": [
-
{
-
"name": "CommunityNotFound",
-
"description": "Community not found"
-
},
-
{
-
"name": "NotAuthorized",
-
"description": "User is not authorized to post in this community"
-
},
-
{
-
"name": "Banned",
-
"description": "User is banned from this community"
-
},
-
{
-
"name": "InvalidContent",
-
"description": "Post content violates community rules"
-
},
-
{
-
"name": "ContentRuleViolation",
-
"description": "Post violates community content rules (e.g., embeds not allowed, text too short)"
-
}
-
]
-
}
-
}
-
}
-39
internal/atproto/lexicon/social/coves/post/crosspost.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.post.crosspost",
-
"defs": {
-
"main": {
-
"type": "record",
-
"description": "A record tracking crosspost relationships between posts",
-
"key": "tid",
-
"record": {
-
"type": "object",
-
"required": ["originalPost", "crosspostOf", "createdAt"],
-
"properties": {
-
"originalPost": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the original post in the crosspost chain"
-
},
-
"crosspostOf": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the immediate parent this is a crosspost of"
-
},
-
"allCrossposts": {
-
"type": "array",
-
"description": "Array of AT-URIs of all posts in the crosspost chain",
-
"items": {
-
"type": "string",
-
"format": "at-uri"
-
}
-
},
-
"createdAt": {
-
"type": "string",
-
"format": "datetime"
-
}
-
}
-
}
-
}
-
}
-
}
-41
internal/atproto/lexicon/social/coves/post/delete.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.post.delete",
-
"defs": {
-
"main": {
-
"type": "procedure",
-
"description": "Delete a post",
-
"input": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["uri"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the post to delete"
-
}
-
}
-
}
-
},
-
"output": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"properties": {}
-
}
-
},
-
"errors": [
-
{
-
"name": "PostNotFound",
-
"description": "Post not found"
-
},
-
{
-
"name": "NotAuthorized",
-
"description": "User is not authorized to delete this post"
-
}
-
]
-
}
-
}
-
}
-294
internal/atproto/lexicon/social/coves/post/get.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.post.get",
-
"defs": {
-
"main": {
-
"type": "query",
-
"description": "Get posts by AT-URI. Supports batch fetching for feed hydration. Returns posts in same order as input URIs.",
-
"parameters": {
-
"type": "params",
-
"required": ["uris"],
-
"properties": {
-
"uris": {
-
"type": "array",
-
"description": "List of post AT-URIs to fetch (max 25)",
-
"items": {
-
"type": "string",
-
"format": "at-uri"
-
},
-
"maxLength": 25,
-
"minLength": 1
-
}
-
}
-
},
-
"output": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["posts"],
-
"properties": {
-
"posts": {
-
"type": "array",
-
"description": "Array of post views. May include notFound/blocked entries for missing posts.",
-
"items": {
-
"type": "union",
-
"refs": ["#postView", "#notFoundPost", "#blockedPost"]
-
}
-
}
-
}
-
}
-
},
-
"errors": [
-
{"name": "InvalidRequest", "description": "Invalid URI format or empty array"}
-
]
-
},
-
"postView": {
-
"type": "object",
-
"required": ["uri", "cid", "author", "record", "community", "createdAt", "indexedAt"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri"
-
},
-
"cid": {
-
"type": "string",
-
"format": "cid"
-
},
-
"author": {
-
"type": "ref",
-
"ref": "#authorView"
-
},
-
"record": {
-
"type": "unknown",
-
"description": "The actual post record (text, image, video, etc.)"
-
},
-
"community": {
-
"type": "ref",
-
"ref": "#communityRef"
-
},
-
"title": {
-
"type": "string"
-
},
-
"text": {
-
"type": "string"
-
},
-
"textFacets": {
-
"type": "array",
-
"items": {
-
"type": "ref",
-
"ref": "social.coves.richtext.facet"
-
}
-
},
-
"embed": {
-
"type": "union",
-
"description": "Embedded content (images, video, link preview, or quoted post)",
-
"refs": [
-
"social.coves.embed.images#view",
-
"social.coves.embed.video#view",
-
"social.coves.embed.external#view",
-
"social.coves.embed.record#view",
-
"social.coves.embed.recordWithMedia#view"
-
]
-
},
-
"language": {
-
"type": "string",
-
"format": "language"
-
},
-
"createdAt": {
-
"type": "string",
-
"format": "datetime"
-
},
-
"editedAt": {
-
"type": "string",
-
"format": "datetime"
-
},
-
"indexedAt": {
-
"type": "string",
-
"format": "datetime",
-
"description": "When this post was indexed by the AppView"
-
},
-
"stats": {
-
"type": "ref",
-
"ref": "#postStats"
-
},
-
"viewer": {
-
"type": "ref",
-
"ref": "#viewerState"
-
}
-
}
-
},
-
"authorView": {
-
"type": "object",
-
"required": ["did", "handle"],
-
"properties": {
-
"did": {
-
"type": "string",
-
"format": "did"
-
},
-
"handle": {
-
"type": "string",
-
"format": "handle"
-
},
-
"displayName": {
-
"type": "string"
-
},
-
"avatar": {
-
"type": "string",
-
"format": "uri"
-
},
-
"reputation": {
-
"type": "integer",
-
"description": "Author's reputation in the community"
-
}
-
}
-
},
-
"communityRef": {
-
"type": "object",
-
"required": ["did", "name"],
-
"properties": {
-
"did": {
-
"type": "string",
-
"format": "did"
-
},
-
"name": {
-
"type": "string"
-
},
-
"avatar": {
-
"type": "string",
-
"format": "uri"
-
}
-
}
-
},
-
"notFoundPost": {
-
"type": "object",
-
"description": "Post was not found (deleted, never indexed, or invalid URI)",
-
"required": ["uri", "notFound"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri"
-
},
-
"notFound": {
-
"type": "boolean",
-
"const": true
-
}
-
}
-
},
-
"blockedPost": {
-
"type": "object",
-
"description": "Post is blocked due to viewer blocking author/community, or community moderation",
-
"required": ["uri", "blocked"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri"
-
},
-
"blocked": {
-
"type": "boolean",
-
"const": true
-
},
-
"blockedBy": {
-
"type": "string",
-
"enum": ["author", "community", "moderator"],
-
"description": "What caused the block: viewer blocked author, viewer blocked community, or post was removed by moderators"
-
},
-
"author": {
-
"type": "ref",
-
"ref": "#blockedAuthor"
-
},
-
"community": {
-
"type": "ref",
-
"ref": "#blockedCommunity"
-
}
-
}
-
},
-
"blockedAuthor": {
-
"type": "object",
-
"description": "Minimal author info for blocked posts",
-
"required": ["did"],
-
"properties": {
-
"did": {
-
"type": "string",
-
"format": "did"
-
}
-
}
-
},
-
"blockedCommunity": {
-
"type": "object",
-
"description": "Minimal community info for blocked posts",
-
"required": ["did"],
-
"properties": {
-
"did": {
-
"type": "string",
-
"format": "did"
-
},
-
"name": {
-
"type": "string"
-
}
-
}
-
},
-
"postStats": {
-
"type": "object",
-
"required": ["upvotes", "downvotes", "score", "commentCount"],
-
"properties": {
-
"upvotes": {
-
"type": "integer",
-
"minimum": 0
-
},
-
"downvotes": {
-
"type": "integer",
-
"minimum": 0
-
},
-
"score": {
-
"type": "integer",
-
"description": "Calculated score (upvotes - downvotes)"
-
},
-
"commentCount": {
-
"type": "integer",
-
"minimum": 0
-
},
-
"shareCount": {
-
"type": "integer",
-
"minimum": 0
-
},
-
"tagCounts": {
-
"type": "object",
-
"description": "Aggregate counts of tags applied by community members",
-
"additionalProperties": {
-
"type": "integer",
-
"minimum": 0
-
}
-
}
-
}
-
},
-
"viewerState": {
-
"type": "object",
-
"properties": {
-
"vote": {
-
"type": "string",
-
"enum": ["up", "down"],
-
"description": "Viewer's vote on this post"
-
},
-
"voteUri": {
-
"type": "string",
-
"format": "at-uri"
-
},
-
"saved": {
-
"type": "boolean"
-
},
-
"savedUri": {
-
"type": "string",
-
"format": "at-uri"
-
},
-
"tags": {
-
"type": "array",
-
"description": "Tags applied by the viewer to this post",
-
"items": {
-
"type": "string",
-
"maxLength": 32
-
}
-
}
-
}
-
}
-
}
-
}
-99
internal/atproto/lexicon/social/coves/post/getCrosspostChain.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.post.getCrosspostChain",
-
"defs": {
-
"main": {
-
"type": "procedure",
-
"description": "Get all crossposts in a crosspost chain for a given post",
-
"input": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["uri"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of any post in the crosspost chain"
-
}
-
}
-
}
-
},
-
"output": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["crossposts"],
-
"properties": {
-
"crossposts": {
-
"type": "array",
-
"description": "All posts in the crosspost chain",
-
"items": {
-
"type": "ref",
-
"ref": "#crosspostView"
-
}
-
}
-
}
-
}
-
}
-
},
-
"crosspostView": {
-
"type": "object",
-
"required": ["uri", "community", "author", "createdAt"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the post"
-
},
-
"community": {
-
"type": "object",
-
"required": ["uri", "name"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the community"
-
},
-
"name": {
-
"type": "string",
-
"description": "Display name of the community"
-
},
-
"handle": {
-
"type": "string",
-
"description": "Handle of the community"
-
}
-
}
-
},
-
"author": {
-
"type": "object",
-
"required": ["did", "handle"],
-
"properties": {
-
"did": {
-
"type": "string",
-
"format": "did"
-
},
-
"handle": {
-
"type": "string"
-
},
-
"displayName": {
-
"type": "string"
-
},
-
"avatar": {
-
"type": "string",
-
"format": "uri"
-
}
-
}
-
},
-
"isOriginal": {
-
"type": "boolean",
-
"description": "Whether this is the original post in the chain"
-
},
-
"createdAt": {
-
"type": "string",
-
"format": "datetime"
-
}
-
}
-
}
-
}
-
}
-129
internal/atproto/lexicon/social/coves/post/record.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.post.record",
-
"defs": {
-
"main": {
-
"type": "record",
-
"description": "A unified post record supporting multiple content types",
-
"key": "tid",
-
"record": {
-
"type": "object",
-
"required": ["$type", "community", "author", "createdAt"],
-
"properties": {
-
"$type": {
-
"type": "string",
-
"const": "social.coves.post.record",
-
"description": "The record type identifier"
-
},
-
"community": {
-
"type": "string",
-
"format": "at-identifier",
-
"description": "DID or handle of the community this was posted to"
-
},
-
"author": {
-
"type": "string",
-
"format": "did",
-
"description": "DID of the user who created this post. Server-populated from authenticated session; clients MUST NOT provide this field. Required for attribution, moderation, and accountability."
-
},
-
"title": {
-
"type": "string",
-
"maxGraphemes": 300,
-
"maxLength": 3000,
-
"description": "Post title (optional for microblog, image, and video posts)"
-
},
-
"content": {
-
"type": "string",
-
"maxLength": 50000,
-
"description": "Post content - main text for text posts, description for media, etc."
-
},
-
"facets": {
-
"type": "array",
-
"description": "Rich text annotations for content",
-
"items": {
-
"type": "ref",
-
"ref": "social.coves.richtext.facet"
-
}
-
},
-
"embed": {
-
"type": "union",
-
"description": "Embedded content - images, videos, external links, or quoted posts",
-
"refs": [
-
"social.coves.embed.images",
-
"social.coves.embed.video",
-
"social.coves.embed.external",
-
"social.coves.embed.post"
-
]
-
},
-
"originalAuthor": {
-
"type": "ref",
-
"ref": "#originalAuthor",
-
"description": "For microblog posts - information about the original author from federated platform"
-
},
-
"contentLabels": {
-
"type": "array",
-
"description": "Self-applied content labels",
-
"items": {
-
"type": "string",
-
"knownValues": ["nsfw", "spoiler", "violence"],
-
"maxLength": 32
-
}
-
},
-
"federatedFrom": {
-
"type": "ref",
-
"ref": "social.coves.federation.post",
-
"description": "Reference to original federated post (if applicable)"
-
},
-
"location": {
-
"type": "ref",
-
"ref": "social.coves.actor.profile#geoLocation",
-
"description": "Geographic location where post was created"
-
},
-
"crosspostOf": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "If this is a crosspost, AT-URI of the post this is a crosspost of"
-
},
-
"crosspostChain": {
-
"type": "array",
-
"description": "Array of AT-URIs of all posts in the crosspost chain (including this one)",
-
"items": {
-
"type": "string",
-
"format": "at-uri"
-
}
-
},
-
"createdAt": {
-
"type": "string",
-
"format": "datetime"
-
}
-
}
-
}
-
},
-
"originalAuthor": {
-
"type": "object",
-
"description": "Information about the original author from a federated platform",
-
"required": ["handle"],
-
"properties": {
-
"did": {
-
"type": "string",
-
"format": "did",
-
"description": "Original author's DID (if available)"
-
},
-
"handle": {
-
"type": "string",
-
"maxLength": 253,
-
"description": "Original author's handle"
-
},
-
"displayName": {
-
"type": "string",
-
"maxLength": 640,
-
"description": "Original author's display name"
-
},
-
"avatar": {
-
"type": "string",
-
"format": "uri",
-
"description": "URL to original author's avatar"
-
}
-
}
-
}
-
}
-
}
-80
internal/atproto/lexicon/social/coves/post/search.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.post.search",
-
"defs": {
-
"main": {
-
"type": "query",
-
"description": "Search for posts",
-
"parameters": {
-
"type": "params",
-
"required": ["q"],
-
"properties": {
-
"q": {
-
"type": "string",
-
"description": "Search query"
-
},
-
"community": {
-
"type": "string",
-
"format": "at-identifier",
-
"description": "Filter by specific community"
-
},
-
"author": {
-
"type": "string",
-
"format": "at-identifier",
-
"description": "Filter by author"
-
},
-
"type": {
-
"type": "string",
-
"enum": ["text", "image", "video", "article", "microblog"],
-
"description": "Filter by post type"
-
},
-
"tags": {
-
"type": "array",
-
"items": {
-
"type": "string"
-
},
-
"description": "Filter by tags"
-
},
-
"sort": {
-
"type": "string",
-
"enum": ["relevance", "new", "top"],
-
"default": "relevance"
-
},
-
"timeframe": {
-
"type": "string",
-
"enum": ["hour", "day", "week", "month", "year", "all"],
-
"default": "all"
-
},
-
"limit": {
-
"type": "integer",
-
"minimum": 1,
-
"maximum": 100,
-
"default": 50
-
},
-
"cursor": {
-
"type": "string"
-
}
-
}
-
},
-
"output": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["posts"],
-
"properties": {
-
"posts": {
-
"type": "array",
-
"items": {
-
"type": "ref",
-
"ref": "social.coves.post.getFeed#feedPost"
-
}
-
},
-
"cursor": {
-
"type": "string"
-
}
-
}
-
}
-
}
-
}
-
}
-
}
-104
internal/atproto/lexicon/social/coves/post/update.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.post.update",
-
"defs": {
-
"main": {
-
"type": "procedure",
-
"description": "Update an existing post",
-
"input": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["uri"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the post to update"
-
},
-
"title": {
-
"type": "string",
-
"maxGraphemes": 300,
-
"maxLength": 3000,
-
"description": "Updated title"
-
},
-
"content": {
-
"type": "string",
-
"maxLength": 50000,
-
"description": "Updated content - main text for text posts, description for media, etc."
-
},
-
"facets": {
-
"type": "array",
-
"description": "Updated rich text annotations for content",
-
"items": {
-
"type": "ref",
-
"ref": "social.coves.richtext.facet"
-
}
-
},
-
"embed": {
-
"type": "union",
-
"description": "Updated embedded content (note: changing embed type may be restricted)",
-
"refs": [
-
"social.coves.embed.images",
-
"social.coves.embed.video",
-
"social.coves.embed.external",
-
"social.coves.embed.post"
-
]
-
},
-
"contentLabels": {
-
"type": "array",
-
"description": "Updated content labels",
-
"items": {
-
"type": "string",
-
"knownValues": ["nsfw", "spoiler", "violence"],
-
"maxLength": 32
-
}
-
},
-
"editNote": {
-
"type": "string",
-
"maxLength": 300,
-
"description": "Optional note explaining the edit"
-
}
-
}
-
}
-
},
-
"output": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["uri", "cid"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the updated post"
-
},
-
"cid": {
-
"type": "string",
-
"format": "cid",
-
"description": "New CID of the updated post"
-
}
-
}
-
}
-
},
-
"errors": [
-
{
-
"name": "PostNotFound",
-
"description": "Post not found"
-
},
-
{
-
"name": "NotAuthorized",
-
"description": "User is not authorized to edit this post"
-
},
-
{
-
"name": "EditWindowExpired",
-
"description": "Edit window has expired (posts can only be edited within 24 hours)"
-
},
-
{
-
"name": "InvalidUpdate",
-
"description": "Invalid update operation (e.g., changing post type)"
-
}
-
]
-
}
-
}
-
}
+156
internal/atproto/lexicon/com/atproto/label/defs.json
···
+
{
+
"lexicon": 1,
+
"id": "com.atproto.label.defs",
+
"defs": {
+
"label": {
+
"type": "object",
+
"description": "Metadata tag on an atproto resource (eg, repo or record).",
+
"required": ["src", "uri", "val", "cts"],
+
"properties": {
+
"ver": {
+
"type": "integer",
+
"description": "The AT Protocol version of the label object."
+
},
+
"src": {
+
"type": "string",
+
"format": "did",
+
"description": "DID of the actor who created this label."
+
},
+
"uri": {
+
"type": "string",
+
"format": "uri",
+
"description": "AT URI of the record, repository (account), or other resource that this label applies to."
+
},
+
"cid": {
+
"type": "string",
+
"format": "cid",
+
"description": "Optionally, CID specifying the specific version of 'uri' resource this label applies to."
+
},
+
"val": {
+
"type": "string",
+
"maxLength": 128,
+
"description": "The short string name of the value or type of this label."
+
},
+
"neg": {
+
"type": "boolean",
+
"description": "If true, this is a negation label, overwriting a previous label."
+
},
+
"cts": {
+
"type": "string",
+
"format": "datetime",
+
"description": "Timestamp when this label was created."
+
},
+
"exp": {
+
"type": "string",
+
"format": "datetime",
+
"description": "Timestamp at which this label expires (no longer applies)."
+
},
+
"sig": {
+
"type": "bytes",
+
"description": "Signature of dag-cbor encoded label."
+
}
+
}
+
},
+
"selfLabels": {
+
"type": "object",
+
"description": "Metadata tags on an atproto record, published by the author within the record.",
+
"required": ["values"],
+
"properties": {
+
"values": {
+
"type": "array",
+
"items": { "type": "ref", "ref": "#selfLabel" },
+
"maxLength": 10
+
}
+
}
+
},
+
"selfLabel": {
+
"type": "object",
+
"description": "Metadata tag on an atproto record, published by the author within the record. Note that schemas should use #selfLabels, not #selfLabel.",
+
"required": ["val"],
+
"properties": {
+
"val": {
+
"type": "string",
+
"maxLength": 128,
+
"description": "The short string name of the value or type of this label."
+
}
+
}
+
},
+
"labelValueDefinition": {
+
"type": "object",
+
"description": "Declares a label value and its expected interpretations and behaviors.",
+
"required": ["identifier", "severity", "blurs", "locales"],
+
"properties": {
+
"identifier": {
+
"type": "string",
+
"description": "The value of the label being defined. Must only include lowercase ascii and the '-' character ([a-z-]+).",
+
"maxLength": 100,
+
"maxGraphemes": 100
+
},
+
"severity": {
+
"type": "string",
+
"description": "How should a client visually convey this label? 'inform' means neutral and informational; 'alert' means negative and warning; 'none' means show nothing.",
+
"knownValues": ["inform", "alert", "none"]
+
},
+
"blurs": {
+
"type": "string",
+
"description": "What should this label hide in the UI, if applied? 'content' hides all of the target; 'media' hides the images/video/audio; 'none' hides nothing.",
+
"knownValues": ["content", "media", "none"]
+
},
+
"defaultSetting": {
+
"type": "string",
+
"description": "The default setting for this label.",
+
"knownValues": ["ignore", "warn", "hide"],
+
"default": "warn"
+
},
+
"adultOnly": {
+
"type": "boolean",
+
"description": "Does the user need to have adult content enabled in order to configure this label?"
+
},
+
"locales": {
+
"type": "array",
+
"items": { "type": "ref", "ref": "#labelValueDefinitionStrings" }
+
}
+
}
+
},
+
"labelValueDefinitionStrings": {
+
"type": "object",
+
"description": "Strings which describe the label in the UI, localized into a specific language.",
+
"required": ["lang", "name", "description"],
+
"properties": {
+
"lang": {
+
"type": "string",
+
"description": "The code of the language these strings are written in.",
+
"format": "language"
+
},
+
"name": {
+
"type": "string",
+
"description": "A short human-readable name for the label.",
+
"maxGraphemes": 64,
+
"maxLength": 640
+
},
+
"description": {
+
"type": "string",
+
"description": "A longer description of what the label means and why it might be applied.",
+
"maxGraphemes": 10000,
+
"maxLength": 100000
+
}
+
}
+
},
+
"labelValue": {
+
"type": "string",
+
"knownValues": [
+
"!hide",
+
"!no-promote",
+
"!warn",
+
"!no-unauthenticated",
+
"dmca-violation",
+
"doxxing",
+
"porn",
+
"sexual",
+
"nudity",
+
"nsfl",
+
"gore"
+
]
+
}
+
}
+
}
+15
internal/atproto/lexicon/com/atproto/repo/strongRef.json
···
+
{
+
"lexicon": 1,
+
"id": "com.atproto.repo.strongRef",
+
"description": "A URI with a content-hash fingerprint.",
+
"defs": {
+
"main": {
+
"type": "object",
+
"required": ["uri", "cid"],
+
"properties": {
+
"uri": { "type": "string", "format": "at-uri" },
+
"cid": { "type": "string", "format": "cid" }
+
}
+
}
+
}
+
}
+4 -21
internal/atproto/lexicon/social/coves/interaction/vote.json internal/atproto/lexicon/social/coves/feed/vote.json
···
{
"lexicon": 1,
-
"id": "social.coves.interaction.vote",
+
"id": "social.coves.feed.vote",
"defs": {
"main": {
"type": "record",
-
"description": "A vote (upvote or downvote) on a post or comment",
+
"description": "Record declaring a vote (upvote or downvote) on a post or comment. Requires authentication.",
"key": "tid",
"record": {
"type": "object",
···
"properties": {
"subject": {
"type": "ref",
-
"ref": "#strongRef",
+
"ref": "com.atproto.repo.strongRef",
"description": "Strong reference to the post or comment being voted on"
},
"direction": {
"type": "string",
-
"enum": ["up", "down"],
+
"knownValues": ["up", "down"],
"description": "Vote direction: up for upvote, down for downvote"
},
"createdAt": {
···
}
}
}
-
},
-
"strongRef": {
-
"type": "object",
-
"description": "Strong reference to a record (AT-URI + CID)",
-
"required": ["uri", "cid"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the record"
-
},
-
"cid": {
-
"type": "string",
-
"format": "cid",
-
"description": "CID of the record content"
-
}
-
}
}
}
}
+8 -5
internal/db/migrations/013_create_votes_table.sql
···
-- Votes are indexed from the firehose after being written to user repositories
CREATE TABLE votes (
id BIGSERIAL PRIMARY KEY,
-
uri TEXT UNIQUE NOT NULL, -- AT-URI (at://voter_did/social.coves.interaction.vote/rkey)
+
uri TEXT UNIQUE NOT NULL, -- AT-URI (at://voter_did/social.coves.feed.vote/rkey)
cid TEXT NOT NULL, -- Content ID
rkey TEXT NOT NULL, -- Record key (TID)
voter_did TEXT NOT NULL, -- User who voted (from AT-URI repo field)
···
-- Timestamps
created_at TIMESTAMPTZ NOT NULL, -- Voter's timestamp from record
indexed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), -- When indexed by AppView
-
deleted_at TIMESTAMPTZ, -- Soft delete (for firehose delete events)
+
deleted_at TIMESTAMPTZ -- Soft delete (for firehose delete events)
-
-- Foreign keys
-
CONSTRAINT fk_voter FOREIGN KEY (voter_did) REFERENCES users(did) ON DELETE CASCADE
+
-- NO foreign key constraint on voter_did to allow out-of-order indexing from Jetstream
+
-- Vote events may arrive before user events, which is acceptable since:
+
-- 1. Votes are authenticated by the user's PDS (security maintained)
+
-- 2. Orphaned votes from never-indexed users are harmless
+
-- 3. This prevents race conditions in the firehose consumer
);
-- Indexes for common query patterns
···
-- Comment on table
COMMENT ON TABLE votes IS 'Votes indexed from user repositories via Jetstream firehose consumer';
-
COMMENT ON COLUMN votes.uri IS 'AT-URI in format: at://voter_did/social.coves.interaction.vote/rkey';
+
COMMENT ON COLUMN votes.uri IS 'AT-URI in format: at://voter_did/social.coves.feed.vote/rkey';
COMMENT ON COLUMN votes.subject_uri IS 'Strong reference to post/comment being voted on';
COMMENT ON INDEX unique_voter_subject_active IS 'Ensures one active vote per user per subject (soft delete aware)';
+9
tests/lexicon-test-data/feed/vote-valid.json
···
+
{
+
"$type": "social.coves.feed.vote",
+
"subject": {
+
"uri": "at://did:plc:alice123/social.coves.community.post/3kbx2n5p",
+
"cid": "bafyreigj3fwnwjuzr35k2kuzmb5dixxczrzjhqkr5srlqplsh6gq3bj3si"
+
},
+
"direction": "up",
+
"createdAt": "2025-01-09T15:00:00Z"
+
}
-5
tests/lexicon-test-data/interaction/vote-valid.json
···
-
{
-
"$type": "social.coves.interaction.vote",
-
"subject": "at://did:plc:alice123/social.coves.post.text/3kbx2n5p",
-
"createdAt": "2025-01-09T15:00:00Z"
-
}
+3 -2
internal/atproto/lexicon/social/coves/community/getMembers.json
···
},
"sort": {
"type": "string",
-
"enum": ["reputation", "recent", "alphabetical"],
-
"default": "reputation"
+
"knownValues": ["reputation", "recent", "alphabetical"],
+
"default": "reputation",
+
"maxLength": 64
}
}
},
-33
internal/atproto/lexicon/social/coves/actor/block.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.actor.block",
-
"defs": {
-
"main": {
-
"type": "record",
-
"description": "A block relationship where one user blocks another",
-
"key": "tid",
-
"record": {
-
"type": "object",
-
"required": ["subject", "createdAt"],
-
"properties": {
-
"subject": {
-
"type": "string",
-
"format": "did",
-
"description": "DID of the user being blocked"
-
},
-
"createdAt": {
-
"type": "string",
-
"format": "datetime",
-
"description": "When the block was created"
-
},
-
"reason": {
-
"type": "string",
-
"maxGraphemes": 300,
-
"maxLength": 3000,
-
"description": "Optional reason for blocking"
-
}
-
}
-
}
-
}
-
}
-
}
-59
internal/atproto/lexicon/social/coves/actor/blockUser.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.actor.blockUser",
-
"defs": {
-
"main": {
-
"type": "procedure",
-
"description": "Block another user",
-
"input": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["subject"],
-
"properties": {
-
"subject": {
-
"type": "string",
-
"format": "did",
-
"description": "DID of the user to block"
-
},
-
"reason": {
-
"type": "string",
-
"maxGraphemes": 300,
-
"maxLength": 3000,
-
"description": "Optional reason for blocking"
-
}
-
}
-
}
-
},
-
"output": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["uri", "cid"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the created block record"
-
},
-
"cid": {
-
"type": "string",
-
"format": "cid",
-
"description": "CID of the created block record"
-
},
-
"existing": {
-
"type": "boolean",
-
"description": "True if user was already blocked"
-
}
-
}
-
}
-
},
-
"errors": [
-
{
-
"name": "SubjectNotFound",
-
"description": "Subject user not found"
-
}
-
]
-
}
-
}
-
}
+139
internal/atproto/lexicon/social/coves/actor/defs.json
···
+
{
+
"lexicon": 1,
+
"id": "social.coves.actor.defs",
+
"defs": {
+
"profileView": {
+
"type": "object",
+
"description": "Basic profile view with essential information",
+
"required": ["did"],
+
"properties": {
+
"did": {
+
"type": "string",
+
"format": "did"
+
},
+
"handle": {
+
"type": "string",
+
"format": "handle",
+
"description": "Current handle resolved from DID"
+
},
+
"displayName": {
+
"type": "string",
+
"maxGraphemes": 64,
+
"maxLength": 640
+
},
+
"avatar": {
+
"type": "string",
+
"format": "uri",
+
"description": "URL to avatar image"
+
}
+
}
+
},
+
"profileViewDetailed": {
+
"type": "object",
+
"description": "Detailed profile view with stats and viewer state",
+
"required": ["did"],
+
"properties": {
+
"did": {
+
"type": "string",
+
"format": "did"
+
},
+
"handle": {
+
"type": "string",
+
"format": "handle",
+
"description": "Current handle resolved from DID"
+
},
+
"displayName": {
+
"type": "string",
+
"maxGraphemes": 64,
+
"maxLength": 640
+
},
+
"bio": {
+
"type": "string",
+
"maxGraphemes": 256,
+
"maxLength": 2560
+
},
+
"bioFacets": {
+
"type": "array",
+
"description": "Rich text annotations for bio",
+
"items": {
+
"type": "ref",
+
"ref": "social.coves.richtext.facet"
+
}
+
},
+
"avatar": {
+
"type": "string",
+
"format": "uri",
+
"description": "URL to avatar image"
+
},
+
"banner": {
+
"type": "string",
+
"format": "uri",
+
"description": "URL to banner image"
+
},
+
"createdAt": {
+
"type": "string",
+
"format": "datetime"
+
},
+
"stats": {
+
"type": "ref",
+
"ref": "#profileStats",
+
"description": "Aggregated statistics"
+
},
+
"viewer": {
+
"type": "ref",
+
"ref": "#viewerState",
+
"description": "Viewer's relationship to this profile"
+
}
+
}
+
},
+
"profileStats": {
+
"type": "object",
+
"description": "Aggregated statistics for a user profile",
+
"properties": {
+
"postCount": {
+
"type": "integer",
+
"minimum": 0,
+
"description": "Total number of posts created"
+
},
+
"commentCount": {
+
"type": "integer",
+
"minimum": 0,
+
"description": "Total number of comments made"
+
},
+
"communityCount": {
+
"type": "integer",
+
"minimum": 0,
+
"description": "Number of communities subscribed to"
+
},
+
"reputation": {
+
"type": "integer",
+
"description": "Global reputation score"
+
},
+
"membershipCount": {
+
"type": "integer",
+
"minimum": 0,
+
"description": "Number of communities with membership status"
+
}
+
}
+
},
+
"viewerState": {
+
"type": "object",
+
"description": "The viewing user's relationship to this profile",
+
"properties": {
+
"blocked": {
+
"type": "boolean",
+
"description": "Whether the viewer has blocked this user"
+
},
+
"blockedBy": {
+
"type": "boolean",
+
"description": "Whether the viewer is blocked by this user"
+
},
+
"blockUri": {
+
"type": "string",
+
"format": "at-uri",
+
"description": "AT-URI of the block record if viewer blocked this user"
+
}
+
}
+
}
+
}
+
}
-85
internal/atproto/lexicon/social/coves/actor/getSaved.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.actor.getSaved",
-
"defs": {
-
"main": {
-
"type": "query",
-
"description": "Get all saved posts and comments for the authenticated user",
-
"input": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"properties": {
-
"limit": {
-
"type": "integer",
-
"minimum": 1,
-
"maximum": 100,
-
"default": 50,
-
"description": "Number of items to return"
-
},
-
"cursor": {
-
"type": "string",
-
"description": "Cursor for pagination"
-
},
-
"type": {
-
"type": "string",
-
"enum": ["post", "comment"],
-
"description": "Filter by content type (optional)"
-
}
-
}
-
}
-
},
-
"output": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["savedItems"],
-
"properties": {
-
"savedItems": {
-
"type": "array",
-
"description": "All saved items for the user",
-
"items": {
-
"type": "ref",
-
"ref": "#savedItemView"
-
}
-
},
-
"cursor": {
-
"type": "string",
-
"description": "Cursor for next page"
-
}
-
}
-
}
-
}
-
},
-
"savedItemView": {
-
"type": "object",
-
"required": ["uri", "subject", "type", "savedAt"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the saved record"
-
},
-
"subject": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the saved post or comment"
-
},
-
"type": {
-
"type": "string",
-
"enum": ["post", "comment"],
-
"description": "Type of content that was saved"
-
},
-
"savedAt": {
-
"type": "string",
-
"format": "datetime",
-
"description": "When the item was saved"
-
},
-
"note": {
-
"type": "string",
-
"description": "Optional note about why this was saved"
-
}
-
}
-
}
-
}
-
}
-198
internal/atproto/lexicon/social/coves/actor/preferences.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.actor.preferences",
-
"defs": {
-
"main": {
-
"type": "record",
-
"description": "User preferences and settings",
-
"key": "literal:self",
-
"record": {
-
"type": "object",
-
"properties": {
-
"feedPreferences": {
-
"type": "ref",
-
"ref": "#feedPreferences"
-
},
-
"contentFiltering": {
-
"type": "ref",
-
"ref": "#contentFiltering"
-
},
-
"notificationSettings": {
-
"type": "ref",
-
"ref": "#notificationSettings"
-
},
-
"privacySettings": {
-
"type": "ref",
-
"ref": "#privacySettings"
-
},
-
"displayPreferences": {
-
"type": "ref",
-
"ref": "#displayPreferences"
-
}
-
}
-
}
-
},
-
"feedPreferences": {
-
"type": "object",
-
"description": "Feed and content preferences",
-
"properties": {
-
"defaultFeed": {
-
"type": "string",
-
"enum": ["home", "all"],
-
"default": "home"
-
},
-
"defaultSort": {
-
"type": "string",
-
"enum": ["hot", "new", "top"],
-
"default": "hot",
-
"description": "Default sort order for community feeds"
-
},
-
"showNSFW": {
-
"type": "boolean",
-
"default": false
-
},
-
"blurNSFW": {
-
"type": "boolean",
-
"default": true,
-
"description": "Blur NSFW content until clicked"
-
},
-
"autoplayVideos": {
-
"type": "boolean",
-
"default": false
-
},
-
"infiniteScroll": {
-
"type": "boolean",
-
"default": true
-
}
-
}
-
},
-
"contentFiltering": {
-
"type": "object",
-
"description": "Content filtering preferences",
-
"properties": {
-
"blockedTags": {
-
"type": "array",
-
"items": {
-
"type": "string"
-
},
-
"description": "Tags to filter out from feeds"
-
},
-
"blockedCommunities": {
-
"type": "array",
-
"items": {
-
"type": "string",
-
"format": "did"
-
},
-
"description": "Communities to filter out from /all feeds"
-
},
-
"mutedWords": {
-
"type": "array",
-
"items": {
-
"type": "string"
-
},
-
"description": "Words to filter out from content"
-
},
-
"languageFilter": {
-
"type": "array",
-
"items": {
-
"type": "string",
-
"format": "language"
-
},
-
"description": "Only show content in these languages"
-
}
-
}
-
},
-
"notificationSettings": {
-
"type": "object",
-
"description": "Notification preferences",
-
"properties": {
-
"postReplies": {
-
"type": "boolean",
-
"default": true
-
},
-
"commentReplies": {
-
"type": "boolean",
-
"default": true
-
},
-
"mentions": {
-
"type": "boolean",
-
"default": true
-
},
-
"upvotes": {
-
"type": "boolean",
-
"default": false
-
},
-
"newFollowers": {
-
"type": "boolean",
-
"default": true
-
},
-
"communityInvites": {
-
"type": "boolean",
-
"default": true
-
},
-
"moderatorNotifications": {
-
"type": "boolean",
-
"default": true,
-
"description": "Notifications for moderator actions in your communities"
-
}
-
}
-
},
-
"privacySettings": {
-
"type": "object",
-
"description": "Privacy preferences",
-
"properties": {
-
"profileVisibility": {
-
"type": "string",
-
"enum": ["public", "authenticated", "followers"],
-
"default": "public"
-
},
-
"showSubscriptions": {
-
"type": "boolean",
-
"default": true
-
},
-
"showSavedPosts": {
-
"type": "boolean",
-
"default": false
-
},
-
"showVoteHistory": {
-
"type": "boolean",
-
"default": false
-
},
-
"allowDMs": {
-
"type": "string",
-
"enum": ["everyone", "followers", "none"],
-
"default": "everyone"
-
}
-
}
-
},
-
"displayPreferences": {
-
"type": "object",
-
"description": "Display and UI preferences",
-
"properties": {
-
"theme": {
-
"type": "string",
-
"enum": ["light", "dark", "auto"],
-
"default": "auto"
-
},
-
"compactView": {
-
"type": "boolean",
-
"default": false
-
},
-
"showAvatars": {
-
"type": "boolean",
-
"default": true
-
},
-
"showThumbnails": {
-
"type": "boolean",
-
"default": true
-
},
-
"postsPerPage": {
-
"type": "integer",
-
"minimum": 10,
-
"maximum": 100,
-
"default": 25
-
}
-
}
-
}
-
}
-
}
-63
internal/atproto/lexicon/social/coves/actor/saveItem.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.actor.saveItem",
-
"defs": {
-
"main": {
-
"type": "procedure",
-
"description": "Save a post or comment",
-
"input": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["subject", "type"],
-
"properties": {
-
"subject": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the post or comment to save"
-
},
-
"type": {
-
"type": "string",
-
"enum": ["post", "comment"],
-
"description": "Type of content being saved"
-
},
-
"note": {
-
"type": "string",
-
"maxLength": 300,
-
"description": "Optional note about why this was saved"
-
}
-
}
-
}
-
},
-
"output": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["uri", "cid"],
-
"properties": {
-
"uri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the created saved record"
-
},
-
"cid": {
-
"type": "string",
-
"format": "cid",
-
"description": "CID of the created saved record"
-
},
-
"existing": {
-
"type": "boolean",
-
"description": "True if item was already saved"
-
}
-
}
-
}
-
},
-
"errors": [
-
{
-
"name": "SubjectNotFound",
-
"description": "The post or comment to save was not found"
-
}
-
]
-
}
-
}
-
}
-37
internal/atproto/lexicon/social/coves/actor/saved.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.actor.saved",
-
"defs": {
-
"main": {
-
"type": "record",
-
"description": "A saved post or comment",
-
"key": "tid",
-
"record": {
-
"type": "object",
-
"required": ["subject", "type", "createdAt"],
-
"properties": {
-
"subject": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the post or comment being saved"
-
},
-
"type": {
-
"type": "string",
-
"enum": ["post", "comment"],
-
"description": "Type of content being saved"
-
},
-
"createdAt": {
-
"type": "string",
-
"format": "datetime",
-
"description": "When the item was saved"
-
},
-
"note": {
-
"type": "string",
-
"maxLength": 300,
-
"description": "Optional note about why this was saved"
-
}
-
}
-
}
-
}
-
}
-
}
-39
internal/atproto/lexicon/social/coves/actor/subscription.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.actor.subscription",
-
"defs": {
-
"main": {
-
"type": "record",
-
"description": "A subscription to a community",
-
"key": "tid",
-
"record": {
-
"type": "object",
-
"required": ["community", "createdAt"],
-
"properties": {
-
"community": {
-
"type": "string",
-
"format": "at-identifier",
-
"description": "DID or handle of the community"
-
},
-
"createdAt": {
-
"type": "string",
-
"format": "datetime",
-
"description": "When the subscription started"
-
},
-
"endedAt": {
-
"type": "string",
-
"format": "datetime",
-
"description": "When the subscription ended (null if current)"
-
},
-
"contentVisibility": {
-
"type": "integer",
-
"minimum": 1,
-
"maximum": 5,
-
"default": 3,
-
"description": "Content visibility level (1=only best content, 5=all content)"
-
}
-
}
-
}
-
}
-
}
-
}
-37
internal/atproto/lexicon/social/coves/actor/unblockUser.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.actor.unblockUser",
-
"defs": {
-
"main": {
-
"type": "procedure",
-
"description": "Unblock a previously blocked user",
-
"input": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["subject"],
-
"properties": {
-
"subject": {
-
"type": "string",
-
"format": "did",
-
"description": "DID of the user to unblock"
-
}
-
}
-
}
-
},
-
"output": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"properties": {}
-
}
-
},
-
"errors": [
-
{
-
"name": "NotBlocked",
-
"description": "User is not currently blocked"
-
}
-
]
-
}
-
}
-
}
-37
internal/atproto/lexicon/social/coves/actor/unsaveItem.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.actor.unsaveItem",
-
"defs": {
-
"main": {
-
"type": "procedure",
-
"description": "Unsave a previously saved post or comment",
-
"input": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"required": ["subject"],
-
"properties": {
-
"subject": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the post or comment to unsave"
-
}
-
}
-
}
-
},
-
"output": {
-
"encoding": "application/json",
-
"schema": {
-
"type": "object",
-
"properties": {}
-
}
-
},
-
"errors": [
-
{
-
"name": "NotSaved",
-
"description": "Item is not currently saved"
-
}
-
]
-
}
-
}
-
}
+1 -172
internal/atproto/lexicon/social/coves/actor/profile.json
···
"key": "literal:self",
"record": {
"type": "object",
-
"required": ["handle", "createdAt"],
+
"required": ["createdAt"],
"properties": {
-
"handle": {
-
"type": "string",
-
"format": "handle",
-
"maxLength": 253,
-
"description": "User's handle"
-
},
"displayName": {
"type": "string",
"maxGraphemes": 64,
···
"accept": ["image/png", "image/jpeg", "image/webp"],
"maxSize": 2000000
},
-
"verified": {
-
"type": "boolean",
-
"default": false,
-
"description": "Whether the user has completed phone verification"
-
},
-
"verifiedAt": {
-
"type": "string",
-
"format": "datetime",
-
"description": "When the user was verified"
-
},
-
"verificationExpiresAt": {
-
"type": "string",
-
"format": "datetime",
-
"description": "When verification expires"
-
},
-
"federatedFrom": {
-
"type": "string",
-
"knownValues": ["bluesky", "lemmy", "mastodon", "coves"],
-
"description": "Platform user federated from"
-
},
-
"federatedIdentity": {
-
"type": "ref",
-
"ref": "#federatedIdentity",
-
"description": "Identity information from federated platform"
-
},
-
"location": {
-
"type": "ref",
-
"ref": "#geoLocation"
-
},
"createdAt": {
"type": "string",
"format": "datetime"
-
},
-
"moderatedCommunities": {
-
"type": "array",
-
"description": "Communities the user currently moderates",
-
"items": {
-
"type": "string",
-
"format": "did"
-
}
-
},
-
"moderationHistory": {
-
"type": "array",
-
"description": "Historical record of all moderation roles",
-
"items": {
-
"type": "ref",
-
"ref": "#moderationRole"
-
}
-
},
-
"violations": {
-
"type": "array",
-
"description": "Record of rule violations across communities",
-
"items": {
-
"type": "ref",
-
"ref": "#violation"
-
}
}
}
}
-
},
-
"moderationRole": {
-
"type": "object",
-
"required": ["communityDid", "role", "startedAt"],
-
"properties": {
-
"communityDid": {
-
"type": "string",
-
"format": "did",
-
"description": "Community where moderation role was held"
-
},
-
"role": {
-
"type": "string",
-
"knownValues": ["moderator", "admin"],
-
"description": "Type of moderation role"
-
},
-
"startedAt": {
-
"type": "string",
-
"format": "datetime",
-
"description": "When the role began"
-
},
-
"endedAt": {
-
"type": "string",
-
"format": "datetime",
-
"description": "When the role ended (null if current)"
-
}
-
}
-
},
-
"violation": {
-
"type": "object",
-
"required": ["communityDid", "ruleViolated", "timestamp", "severity"],
-
"properties": {
-
"communityDid": {
-
"type": "string",
-
"format": "did",
-
"description": "Community where violation occurred"
-
},
-
"ruleViolated": {
-
"type": "string",
-
"description": "Description of the rule that was violated"
-
},
-
"timestamp": {
-
"type": "string",
-
"format": "datetime",
-
"description": "When the violation occurred"
-
},
-
"severity": {
-
"type": "string",
-
"knownValues": ["minor", "moderate", "major", "severe"],
-
"description": "Severity level of the violation"
-
},
-
"resolution": {
-
"type": "string",
-
"description": "How the violation was resolved"
-
},
-
"postUri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "Optional reference to the violating content"
-
}
-
}
-
},
-
"federatedIdentity": {
-
"type": "object",
-
"description": "Verified identity from a federated platform",
-
"required": ["did", "handle", "verifiedAt"],
-
"properties": {
-
"did": {
-
"type": "string",
-
"format": "did",
-
"description": "Original DID from the federated platform"
-
},
-
"handle": {
-
"type": "string",
-
"maxLength": 253,
-
"description": "Original handle from the federated platform"
-
},
-
"verifiedAt": {
-
"type": "string",
-
"format": "datetime",
-
"description": "When the federated identity was verified via OAuth"
-
},
-
"lastSyncedAt": {
-
"type": "string",
-
"format": "datetime",
-
"description": "Last time profile data was synced from the federated platform"
-
},
-
"homePDS": {
-
"type": "string",
-
"description": "Home PDS server URL for the federated account"
-
}
-
}
-
},
-
"geoLocation": {
-
"type": "object",
-
"description": "Geographic location information",
-
"properties": {
-
"country": {
-
"type": "string",
-
"maxLength": 2,
-
"description": "ISO 3166-1 alpha-2 country code"
-
},
-
"region": {
-
"type": "string",
-
"maxLength": 128,
-
"description": "State/province/region name"
-
},
-
"displayName": {
-
"type": "string",
-
"maxLength": 256,
-
"description": "Human-readable location name"
-
}
-
}
}
}
}
+3 -83
internal/atproto/lexicon/social/coves/community/get.json
···
"output": {
"encoding": "application/json",
"schema": {
-
"type": "object",
-
"required": ["did", "profile"],
-
"properties": {
-
"did": {
-
"type": "string",
-
"format": "did"
-
},
-
"profile": {
-
"type": "ref",
-
"ref": "social.coves.community.profile"
-
},
-
"stats": {
-
"type": "ref",
-
"ref": "#communityStats"
-
},
-
"viewer": {
-
"type": "ref",
-
"ref": "#viewerState",
-
"description": "Viewer's relationship to this community"
-
}
-
}
-
}
-
}
-
},
-
"communityStats": {
-
"type": "object",
-
"required": ["subscriberCount", "memberCount", "postCount", "activePostersCount"],
-
"properties": {
-
"subscriberCount": {
-
"type": "integer",
-
"minimum": 0,
-
"description": "Number of users subscribed to this community"
-
},
-
"memberCount": {
-
"type": "integer",
-
"minimum": 0,
-
"description": "Number of users with membership status"
-
},
-
"postCount": {
-
"type": "integer",
-
"minimum": 0,
-
"description": "Total number of posts in this community"
-
},
-
"activePostersCount": {
-
"type": "integer",
-
"minimum": 0,
-
"description": "Number of unique posters in the last 30 days"
-
},
-
"moderatorCount": {
-
"type": "integer",
-
"minimum": 0,
-
"description": "Number of active moderators"
-
}
-
}
-
},
-
"viewerState": {
-
"type": "object",
-
"description": "The viewing user's relationship to this community",
-
"properties": {
-
"subscribed": {
-
"type": "boolean",
-
"description": "Whether the viewer is subscribed"
-
},
-
"subscriptionUri": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the subscription record if subscribed"
-
},
-
"member": {
-
"type": "boolean",
-
"description": "Whether the viewer has membership status (AppView-computed)"
-
},
-
"reputation": {
-
"type": "integer",
-
"description": "Viewer's reputation in this community"
-
},
-
"moderator": {
-
"type": "boolean",
-
"description": "Whether the viewer is a moderator"
-
},
-
"banned": {
-
"type": "boolean",
-
"description": "Whether the viewer is banned from this community"
+
"type": "ref",
+
"ref": "social.coves.community.defs#communityViewDetailed",
+
"description": "Detailed community view with stats and viewer state"
}
}
}
-5
tests/lexicon-test-data/actor/block-invalid-did.json
···
-
{
-
"$type": "social.coves.actor.block",
-
"subject": "not-a-valid-did",
-
"createdAt": "2025-01-05T09:15:00Z"
-
}
-6
tests/lexicon-test-data/actor/block-valid.json
···
-
{
-
"$type": "social.coves.actor.block",
-
"subject": "did:plc:blockeduser123",
-
"createdAt": "2025-01-05T09:15:00Z",
-
"reason": "Repeated harassment and spam"
-
}
-7
tests/lexicon-test-data/actor/preferences-invalid-enum.json
···
-
{
-
"$type": "social.coves.actor.preferences",
-
"feedPreferences": {
-
"defaultFeed": "invalid-feed-type",
-
"defaultSort": "hot"
-
}
-
}
-40
tests/lexicon-test-data/actor/preferences-valid.json
···
-
{
-
"$type": "social.coves.actor.preferences",
-
"feedPreferences": {
-
"defaultFeed": "home",
-
"defaultSort": "hot",
-
"showNSFW": false,
-
"blurNSFW": true,
-
"autoplayVideos": true,
-
"infiniteScroll": true
-
},
-
"contentFiltering": {
-
"blockedTags": ["politics", "spoilers"],
-
"blockedCommunities": ["did:plc:controversialcommunity"],
-
"mutedWords": ["spam", "scam"],
-
"languageFilter": ["en", "es"]
-
},
-
"notificationSettings": {
-
"postReplies": true,
-
"commentReplies": true,
-
"mentions": true,
-
"upvotes": false,
-
"newFollowers": true,
-
"communityInvites": true,
-
"moderatorNotifications": true
-
},
-
"privacySettings": {
-
"profileVisibility": "public",
-
"showSubscriptions": true,
-
"showSavedPosts": false,
-
"showVoteHistory": false,
-
"allowDMs": "followers"
-
},
-
"displayPreferences": {
-
"theme": "dark",
-
"compactView": false,
-
"showAvatars": true,
-
"showThumbnails": true,
-
"postsPerPage": 25
-
}
-
}
-6
tests/lexicon-test-data/actor/profile-invalid-handle-format.json
···
-
{
-
"$type": "social.coves.actor.profile",
-
"handle": "invalid handle with spaces",
-
"displayName": "Test User",
-
"createdAt": "2024-01-01T00:00:00Z"
-
}
-4
tests/lexicon-test-data/actor/profile-invalid-missing-handle.json
···
-
{
-
"$type": "social.coves.actor.profile",
-
"displayName": "Missing Required Fields"
-
}
-1
tests/lexicon-test-data/actor/profile-valid.json
···
{
"$type": "social.coves.actor.profile",
-
"handle": "alice.example.com",
"displayName": "Alice Johnson",
"bio": "Software developer passionate about open-source",
"createdAt": "2024-01-15T10:30:00Z"
+9 -46
internal/atproto/lexicon/social/coves/richtext/facet.json
···
},
"mention": {
"type": "object",
-
"description": "Facet feature for user or community mentions",
-
"required": ["$type", "did"],
+
"description": "Facet feature for mention of a user or community. The text is usually a handle with '@' (user) or '!' (community) prefix, but the facet reference is a DID.",
+
"required": ["did"],
"properties": {
-
"$type": {
-
"type": "string",
-
"const": "social.coves.richtext.facet#mention"
-
},
"did": {
"type": "string",
"format": "did",
-
"description": "DID of the mentioned user (@) or community (!)"
-
},
-
"handle": {
-
"type": "string",
-
"description": "Handle at time of mention (may change)"
+
"description": "DID of the mentioned user or community"
}
}
},
"link": {
"type": "object",
-
"description": "Facet feature for hyperlinks",
-
"required": ["$type", "uri"],
+
"description": "Facet feature for a URL. The text URL may have been simplified or truncated, but the facet reference should be a complete URL.",
+
"required": ["uri"],
"properties": {
-
"$type": {
-
"type": "string",
-
"const": "social.coves.richtext.facet#link"
-
},
"uri": {
"type": "string",
"format": "uri",
···
},
"bold": {
"type": "object",
-
"description": "Bold text formatting",
-
"required": ["$type"],
-
"properties": {
-
"$type": {
-
"type": "string",
-
"const": "social.coves.richtext.facet#bold"
-
}
-
}
+
"description": "Bold text formatting"
},
"italic": {
"type": "object",
-
"description": "Italic text formatting",
-
"required": ["$type"],
-
"properties": {
-
"$type": {
-
"type": "string",
-
"const": "social.coves.richtext.facet#italic"
-
}
-
}
+
"description": "Italic text formatting"
},
"strikethrough": {
"type": "object",
-
"description": "Strikethrough text formatting",
-
"required": ["$type"],
-
"properties": {
-
"$type": {
-
"type": "string",
-
"const": "social.coves.richtext.facet#strikethrough"
-
}
-
}
+
"description": "Strikethrough text formatting"
},
"spoiler": {
"type": "object",
"description": "Hidden/spoiler text that requires user interaction to reveal",
-
"required": ["$type"],
"properties": {
-
"$type": {
-
"type": "string",
-
"const": "social.coves.richtext.facet#spoiler"
-
},
"reason": {
"type": "string",
"maxLength": 128,
+
"maxGraphemes": 32,
"description": "Optional explanation of what's hidden"
}
}
+3 -5
internal/atproto/lexicon/social/coves/richtext/facet_test.go
···
},
"features": [{
"$type": "social.coves.richtext.facet#mention",
-
"did": "did:plc:example123",
-
"handle": "alice.bsky.social"
+
"did": "did:plc:example123"
}]
}`,
wantErr: false,
···
name: "mention",
typeName: "social.coves.richtext.facet#mention",
feature: map[string]interface{}{
-
"$type": "social.coves.richtext.facet#mention",
-
"did": "did:plc:example123",
-
"handle": "alice.bsky.social",
+
"$type": "social.coves.richtext.facet#mention",
+
"did": "did:plc:example123",
},
},
{
+1
internal/atproto/lexicon/social/coves/community/profile.json
···
"name": {
"type": "string",
"maxLength": 64,
+
"maxGraphemes": 64,
"description": "Short community name (local part of handle)"
},
"displayName": {
+1 -1
internal/atproto/lexicon/social/coves/community/search.json
···
"type": "array",
"items": {
"type": "ref",
-
"ref": "social.coves.community.list#communityView"
+
"ref": "social.coves.community.defs#communityView"
}
},
"cursor": {
+1
internal/atproto/lexicon/social/coves/embed/images.json
···
"alt": {
"type": "string",
"maxLength": 1000,
+
"maxGraphemes": 1000,
"description": "Alt text for accessibility"
},
"aspectRatio": {
+1 -4
internal/atproto/lexicon/social/coves/embed/video.json
···
"alt": {
"type": "string",
"maxLength": 1000,
+
"maxGraphemes": 1000,
"description": "Alt text describing video content"
},
"duration": {
"type": "integer",
"minimum": 0,
"description": "Duration in seconds"
-
},
-
"aspectRatio": {
-
"type": "ref",
-
"ref": "social.coves.embed.image#aspectRatio"
}
}
}
-32
internal/atproto/lexicon/social/coves/federation/post.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.federation.post",
-
"defs": {
-
"main": {
-
"type": "object",
-
"description": "Reference to original federated post",
-
"required": ["platform", "uri"],
-
"properties": {
-
"platform": {
-
"type": "string",
-
"knownValues": ["bluesky", "lemmy", "atproto"],
-
"description": "Platform the post originated from"
-
},
-
"uri": {
-
"type": "string",
-
"format": "uri",
-
"description": "Original URI of the post (at:// URI for atproto platforms)"
-
},
-
"id": {
-
"type": "string",
-
"description": "Platform-specific post ID"
-
},
-
"originalCreatedAt": {
-
"type": "string",
-
"format": "datetime",
-
"description": "Timestamp when originally posted on source platform"
-
}
-
}
-
}
-
}
-
}
+5 -5
internal/atproto/lexicon/social/coves/feed/getAll.json
···
"properties": {
"sort": {
"type": "string",
-
"enum": ["hot", "top", "new"],
+
"knownValues": ["hot", "top", "new"],
"default": "hot",
"description": "Sort order for global feed"
},
"postType": {
"type": "string",
-
"enum": ["text", "article", "image", "video", "microblog"],
+
"knownValues": ["text", "article", "image", "video", "microblog"],
"description": "Filter by a single post type (computed from embed structure)"
},
"postTypes": {
"type": "array",
"items": {
"type": "string",
-
"enum": ["text", "article", "image", "video", "microblog"]
+
"knownValues": ["text", "article", "image", "video", "microblog"]
},
"description": "Filter by multiple post types (computed from embed structure)"
},
"timeframe": {
"type": "string",
-
"enum": ["hour", "day", "week", "month", "year", "all"],
+
"knownValues": ["hour", "day", "week", "month", "year", "all"],
"default": "day",
"description": "Timeframe for top sorting (only applies when sort=top)"
},
···
"type": "array",
"items": {
"type": "ref",
-
"ref": "social.coves.feed.getTimeline#feedViewPost"
+
"ref": "social.coves.feed.defs#feedViewPost"
}
},
"cursor": {
+3 -3
internal/atproto/lexicon/social/coves/feed/getCommunity.json
···
},
"sort": {
"type": "string",
-
"enum": ["hot", "top", "new"],
+
"knownValues": ["hot", "top", "new"],
"default": "hot",
"description": "Sort order for community feed"
},
"timeframe": {
"type": "string",
-
"enum": ["hour", "day", "week", "month", "year", "all"],
+
"knownValues": ["hour", "day", "week", "month", "year", "all"],
"default": "day",
"description": "Timeframe for top sorting (only applies when sort=top)"
},
···
"type": "array",
"items": {
"type": "ref",
-
"ref": "social.coves.feed.getTimeline#feedViewPost"
+
"ref": "social.coves.feed.defs#feedViewPost"
}
},
"cursor": {
+2 -2
internal/atproto/lexicon/social/coves/feed/getDiscover.json
···
"properties": {
"sort": {
"type": "string",
-
"enum": ["hot", "top", "new"],
+
"knownValues": ["hot", "top", "new"],
"default": "hot",
"description": "Sort order for discover feed"
},
"timeframe": {
"type": "string",
-
"enum": ["hour", "day", "week", "month", "year", "all"],
+
"knownValues": ["hour", "day", "week", "month", "year", "all"],
"default": "day",
"description": "Timeframe for top sorting (only applies when sort=top)"
},
+2 -2
internal/atproto/lexicon/social/coves/feed/getTimeline.json
···
"properties": {
"sort": {
"type": "string",
-
"enum": ["hot", "top", "new"],
+
"knownValues": ["hot", "top", "new"],
"default": "hot",
"description": "Sort order for timeline feed"
},
"timeframe": {
"type": "string",
-
"enum": ["hour", "day", "week", "month", "year", "all"],
+
"knownValues": ["hour", "day", "week", "month", "year", "all"],
"default": "day",
"description": "Timeframe for top sorting (only applies when sort=top)"
},
-31
internal/atproto/lexicon/social/coves/interaction/share.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.interaction.share",
-
"defs": {
-
"main": {
-
"type": "record",
-
"description": "Sharing a post to another community or platform",
-
"key": "tid",
-
"record": {
-
"type": "object",
-
"required": ["subject", "createdAt"],
-
"properties": {
-
"subject": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the post being shared"
-
},
-
"toCommunity": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "Community being shared to (if applicable)"
-
},
-
"createdAt": {
-
"type": "string",
-
"format": "datetime"
-
}
-
}
-
}
-
}
-
}
-
}
-33
internal/atproto/lexicon/social/coves/interaction/tag.json
···
-
{
-
"lexicon": 1,
-
"id": "social.coves.interaction.tag",
-
"defs": {
-
"main": {
-
"type": "record",
-
"description": "A tag applied to a post or comment",
-
"key": "tid",
-
"record": {
-
"type": "object",
-
"required": ["subject", "tag", "createdAt"],
-
"properties": {
-
"subject": {
-
"type": "string",
-
"format": "at-uri",
-
"description": "AT-URI of the post or comment being tagged"
-
},
-
"tag": {
-
"type": "string",
-
"minLength": 1,
-
"maxLength": 50,
-
"knownValues": ["helpful", "insightful", "spam", "hostile", "offtopic", "misleading"],
-
"description": "Predefined tag or custom community tag"
-
},
-
"createdAt": {
-
"type": "string",
-
"format": "datetime"
-
}
-
}
-
}
-
}
-
}
-
}
-9
tests/lexicon-test-data/community/moderator-invalid-permissions.json
···
-
{
-
"$type": "social.coves.community.moderator",
-
"user": "did:plc:moderator123",
-
"community": "did:plc:community123",
-
"role": "moderator",
-
"permissions": ["remove_posts", "invalid-permission"],
-
"createdAt": "2024-06-15T10:00:00Z",
-
"createdBy": "did:plc:owner123"
-
}
-5
tests/lexicon-test-data/interaction/share-valid-no-community.json
···
-
{
-
"$type": "social.coves.interaction.share",
-
"subject": "at://did:plc:originalauthor/social.coves.post.record/3k7a3dmb5bk2c",
-
"createdAt": "2025-01-09T17:00:00Z"
-
}
-6
tests/lexicon-test-data/interaction/share-valid.json
···
-
{
-
"$type": "social.coves.interaction.share",
-
"subject": "at://did:plc:originalauthor/social.coves.post.record/3k7a3dmb5bk2c",
-
"community": "did:plc:targetcommunity",
-
"createdAt": "2025-01-09T17:00:00Z"
-
}
-6
tests/lexicon-test-data/interaction/tag-invalid-empty.json
···
-
{
-
"$type": "social.coves.interaction.tag",
-
"subject": "at://did:plc:author123/social.coves.post.record/3k7a3dmb5bk2c",
-
"tag": "",
-
"createdAt": "2025-01-09T17:15:00Z"
-
}
-6
tests/lexicon-test-data/interaction/tag-valid-custom.json
···
-
{
-
"$type": "social.coves.interaction.tag",
-
"subject": "at://did:plc:author123/social.coves.post.record/3k7a3dmb5bk2c",
-
"tag": "beginner-friendly",
-
"createdAt": "2025-01-09T17:15:00Z"
-
}
-6
tests/lexicon-test-data/interaction/tag-valid-known.json
···
-
{
-
"$type": "social.coves.interaction.tag",
-
"subject": "at://did:plc:author123/social.coves.post.record/3k7a3dmb5bk2c",
-
"tag": "nsfw",
-
"createdAt": "2025-01-09T17:15:00Z"
-
}
+13
scripts/validate-schemas.sh
···
+
#!/bin/bash
+
# Validate all lexicon schemas and test data
+
+
set -e
+
+
echo "๐Ÿ” Validating Coves lexicon schemas..."
+
echo ""
+
+
# Run the Go validation tool
+
go run ./cmd/validate-lexicon/main.go
+
+
echo ""
+
echo "โœ… Schema validation complete!"
+63
internal/db/migrations/016_create_comments_table.sql
···
+
-- +goose Up
+
-- Create comments table for AppView indexing
+
-- Comments are indexed from the firehose after being written to user repositories
+
CREATE TABLE comments (
+
id BIGSERIAL PRIMARY KEY,
+
uri TEXT UNIQUE NOT NULL, -- AT-URI (at://commenter_did/social.coves.feed.comment/rkey)
+
cid TEXT NOT NULL, -- Content ID
+
rkey TEXT NOT NULL, -- Record key (TID)
+
commenter_did TEXT NOT NULL, -- User who commented (from AT-URI repo field)
+
+
-- Threading structure (reply references)
+
root_uri TEXT NOT NULL, -- Strong reference to original post (at://...)
+
root_cid TEXT NOT NULL, -- CID of root post (version pinning)
+
parent_uri TEXT NOT NULL, -- Strong reference to immediate parent (post or comment)
+
parent_cid TEXT NOT NULL, -- CID of parent (version pinning)
+
+
-- Content (content is required per lexicon, others optional)
+
content TEXT NOT NULL, -- Comment text (max 3000 graphemes, 30000 bytes)
+
content_facets JSONB, -- Rich text facets (social.coves.richtext.facet)
+
embed JSONB, -- Embedded content (images, quoted posts)
+
content_labels JSONB, -- Self-applied labels (com.atproto.label.defs#selfLabels)
+
langs TEXT[], -- Languages (ISO 639-1, max 3)
+
+
-- Timestamps
+
created_at TIMESTAMPTZ NOT NULL, -- Commenter's timestamp from record
+
indexed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), -- When indexed by AppView
+
deleted_at TIMESTAMPTZ, -- Soft delete (for firehose delete events)
+
+
-- Stats (denormalized for performance)
+
upvote_count INT NOT NULL DEFAULT 0, -- Comments can be voted on (per vote lexicon)
+
downvote_count INT NOT NULL DEFAULT 0,
+
score INT NOT NULL DEFAULT 0, -- upvote_count - downvote_count (for sorting)
+
reply_count INT NOT NULL DEFAULT 0 -- Number of direct replies to this comment
+
+
-- NO foreign key constraint on commenter_did to allow out-of-order indexing from Jetstream
+
-- Comment events may arrive before user events, which is acceptable since:
+
-- 1. Comments are authenticated by the user's PDS (security maintained)
+
-- 2. Orphaned comments from never-indexed users are harmless
+
-- 3. This prevents race conditions in the firehose consumer
+
);
+
+
-- Indexes for threading queries (most important for comment UX)
+
CREATE INDEX idx_comments_root ON comments(root_uri, created_at DESC) WHERE deleted_at IS NULL;
+
CREATE INDEX idx_comments_parent ON comments(parent_uri, created_at DESC) WHERE deleted_at IS NULL;
+
CREATE INDEX idx_comments_parent_score ON comments(parent_uri, score DESC, created_at DESC) WHERE deleted_at IS NULL;
+
+
-- Indexes for user queries
+
CREATE INDEX idx_comments_commenter ON comments(commenter_did, created_at DESC);
+
CREATE INDEX idx_comments_uri ON comments(uri);
+
+
-- Index for vote targeting (when votes target comments)
+
CREATE INDEX idx_comments_uri_active ON comments(uri) WHERE deleted_at IS NULL;
+
+
-- Comment on table
+
COMMENT ON TABLE comments IS 'Comments indexed from user repositories via Jetstream firehose consumer';
+
COMMENT ON COLUMN comments.uri IS 'AT-URI in format: at://commenter_did/social.coves.feed.comment/rkey';
+
COMMENT ON COLUMN comments.root_uri IS 'Strong reference to the original post that started the thread';
+
COMMENT ON COLUMN comments.parent_uri IS 'Strong reference to immediate parent (post or comment)';
+
COMMENT ON COLUMN comments.score IS 'Computed as upvote_count - downvote_count for ranking replies';
+
COMMENT ON COLUMN comments.content_labels IS 'Self-applied labels per com.atproto.label.defs#selfLabels (JSONB: {"values":[{"val":"nsfw","neg":false}]})';
+
+
-- +goose Down
+
DROP TABLE IF EXISTS comments CASCADE;
+125
internal/atproto/jetstream/comment_jetstream_connector.go
···
+
package jetstream
+
+
import (
+
"context"
+
"encoding/json"
+
"fmt"
+
"log"
+
"sync"
+
"time"
+
+
"github.com/gorilla/websocket"
+
)
+
+
// CommentJetstreamConnector handles WebSocket connection to Jetstream for comment events
+
type CommentJetstreamConnector struct {
+
consumer *CommentEventConsumer
+
wsURL string
+
}
+
+
// NewCommentJetstreamConnector creates a new Jetstream WebSocket connector for comment events
+
func NewCommentJetstreamConnector(consumer *CommentEventConsumer, wsURL string) *CommentJetstreamConnector {
+
return &CommentJetstreamConnector{
+
consumer: consumer,
+
wsURL: wsURL,
+
}
+
}
+
+
// Start begins consuming events from Jetstream
+
// Runs indefinitely, reconnecting on errors
+
func (c *CommentJetstreamConnector) Start(ctx context.Context) error {
+
log.Printf("Starting Jetstream comment consumer: %s", c.wsURL)
+
+
for {
+
select {
+
case <-ctx.Done():
+
log.Println("Jetstream comment consumer shutting down")
+
return ctx.Err()
+
default:
+
if err := c.connect(ctx); err != nil {
+
log.Printf("Jetstream comment connection error: %v. Retrying in 5s...", err)
+
time.Sleep(5 * time.Second)
+
continue
+
}
+
}
+
}
+
}
+
+
// connect establishes WebSocket connection and processes events
+
func (c *CommentJetstreamConnector) connect(ctx context.Context) error {
+
conn, _, err := websocket.DefaultDialer.DialContext(ctx, c.wsURL, nil)
+
if err != nil {
+
return fmt.Errorf("failed to connect to Jetstream: %w", err)
+
}
+
defer func() {
+
if closeErr := conn.Close(); closeErr != nil {
+
log.Printf("Failed to close WebSocket connection: %v", closeErr)
+
}
+
}()
+
+
log.Println("Connected to Jetstream (comment consumer)")
+
+
// Set read deadline to detect connection issues
+
if err := conn.SetReadDeadline(time.Now().Add(60 * time.Second)); err != nil {
+
log.Printf("Failed to set read deadline: %v", err)
+
}
+
+
// Set pong handler to keep connection alive
+
conn.SetPongHandler(func(string) error {
+
if err := conn.SetReadDeadline(time.Now().Add(60 * time.Second)); err != nil {
+
log.Printf("Failed to set read deadline in pong handler: %v", err)
+
}
+
return nil
+
})
+
+
// Start ping ticker
+
ticker := time.NewTicker(30 * time.Second)
+
defer ticker.Stop()
+
+
done := make(chan struct{})
+
var closeOnce sync.Once // Ensure done channel is only closed once
+
+
// Ping goroutine
+
go func() {
+
for {
+
select {
+
case <-ticker.C:
+
if err := conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(10*time.Second)); err != nil {
+
log.Printf("Failed to send ping: %v", err)
+
closeOnce.Do(func() { close(done) })
+
return
+
}
+
case <-done:
+
return
+
}
+
}
+
}()
+
+
// Read loop
+
for {
+
select {
+
case <-done:
+
return fmt.Errorf("connection closed by ping failure")
+
default:
+
}
+
+
_, message, err := conn.ReadMessage()
+
if err != nil {
+
closeOnce.Do(func() { close(done) })
+
return fmt.Errorf("read error: %w", err)
+
}
+
+
// Parse Jetstream event
+
var event JetstreamEvent
+
if err := json.Unmarshal(message, &event); err != nil {
+
log.Printf("Failed to parse Jetstream event: %v", err)
+
continue
+
}
+
+
// Process event through consumer
+
if err := c.consumer.HandleEvent(ctx, &event); err != nil {
+
log.Printf("Failed to handle comment event: %v", err)
+
// Continue processing other events even if one fails
+
}
+
}
+
}
+221
internal/atproto/lexicon/social/coves/community/comment/defs.json
···
+
{
+
"lexicon": 1,
+
"id": "social.coves.community.comment.defs",
+
"defs": {
+
"commentView": {
+
"type": "object",
+
"description": "Base view for a single comment with voting, stats, and viewer state",
+
"required": ["uri", "cid", "author", "record", "post", "content", "createdAt", "indexedAt", "stats"],
+
"properties": {
+
"uri": {
+
"type": "string",
+
"format": "at-uri",
+
"description": "AT-URI of the comment record"
+
},
+
"cid": {
+
"type": "string",
+
"format": "cid",
+
"description": "CID of the comment record"
+
},
+
"author": {
+
"type": "ref",
+
"ref": "social.coves.community.post.get#authorView",
+
"description": "Comment author information"
+
},
+
"record": {
+
"type": "unknown",
+
"description": "The actual comment record verbatim"
+
},
+
"post": {
+
"type": "ref",
+
"ref": "#postRef",
+
"description": "Reference to the parent post"
+
},
+
"parent": {
+
"type": "ref",
+
"ref": "#commentRef",
+
"description": "Reference to parent comment if this is a nested reply"
+
},
+
"content": {
+
"type": "string",
+
"description": "Comment text content"
+
},
+
"contentFacets": {
+
"type": "array",
+
"description": "Rich text annotations for mentions, links, formatting",
+
"items": {
+
"type": "ref",
+
"ref": "social.coves.richtext.facet"
+
}
+
},
+
"embed": {
+
"type": "union",
+
"description": "Embedded content in the comment (images or quoted post)",
+
"refs": [
+
"social.coves.embed.images#view",
+
"social.coves.embed.post#view"
+
]
+
},
+
"createdAt": {
+
"type": "string",
+
"format": "datetime",
+
"description": "When the comment was created"
+
},
+
"indexedAt": {
+
"type": "string",
+
"format": "datetime",
+
"description": "When this comment was indexed by the AppView"
+
},
+
"stats": {
+
"type": "ref",
+
"ref": "#commentStats",
+
"description": "Comment statistics (votes, replies)"
+
},
+
"viewer": {
+
"type": "ref",
+
"ref": "#commentViewerState",
+
"description": "Viewer-specific state (vote, saved, etc.)"
+
}
+
}
+
},
+
"threadViewComment": {
+
"type": "object",
+
"description": "Wrapper for threaded comment structure, similar to Bluesky's threadViewPost pattern",
+
"required": ["comment"],
+
"properties": {
+
"comment": {
+
"type": "ref",
+
"ref": "#commentView",
+
"description": "The comment itself"
+
},
+
"replies": {
+
"type": "array",
+
"description": "Nested replies to this comment",
+
"items": {
+
"type": "union",
+
"refs": ["#threadViewComment", "#notFoundComment", "#blockedComment"]
+
}
+
},
+
"hasMore": {
+
"type": "boolean",
+
"description": "True if more replies exist but are not included in this response"
+
}
+
}
+
},
+
"commentRef": {
+
"type": "object",
+
"description": "Reference to a comment record",
+
"required": ["uri", "cid"],
+
"properties": {
+
"uri": {
+
"type": "string",
+
"format": "at-uri",
+
"description": "AT-URI of the comment"
+
},
+
"cid": {
+
"type": "string",
+
"format": "cid",
+
"description": "CID of the comment record"
+
}
+
}
+
},
+
"postRef": {
+
"type": "object",
+
"description": "Reference to a post record",
+
"required": ["uri", "cid"],
+
"properties": {
+
"uri": {
+
"type": "string",
+
"format": "at-uri",
+
"description": "AT-URI of the post"
+
},
+
"cid": {
+
"type": "string",
+
"format": "cid",
+
"description": "CID of the post record"
+
}
+
}
+
},
+
"notFoundComment": {
+
"type": "object",
+
"description": "Comment was not found (deleted, never indexed, or invalid URI)",
+
"required": ["uri", "notFound"],
+
"properties": {
+
"uri": {
+
"type": "string",
+
"format": "at-uri",
+
"description": "AT-URI of the missing comment"
+
},
+
"notFound": {
+
"type": "boolean",
+
"const": true,
+
"description": "Always true for not found comments"
+
}
+
}
+
},
+
"blockedComment": {
+
"type": "object",
+
"description": "Comment is blocked due to viewer blocking author or moderation action",
+
"required": ["uri", "blocked"],
+
"properties": {
+
"uri": {
+
"type": "string",
+
"format": "at-uri",
+
"description": "AT-URI of the blocked comment"
+
},
+
"blocked": {
+
"type": "boolean",
+
"const": true,
+
"description": "Always true for blocked comments"
+
},
+
"blockedBy": {
+
"type": "string",
+
"knownValues": ["author", "moderator"],
+
"description": "What caused the block: viewer blocked author, or comment was removed by moderators"
+
}
+
}
+
},
+
"commentStats": {
+
"type": "object",
+
"description": "Statistics for a comment",
+
"required": ["upvotes", "downvotes", "score", "replyCount"],
+
"properties": {
+
"upvotes": {
+
"type": "integer",
+
"minimum": 0,
+
"description": "Number of upvotes"
+
},
+
"downvotes": {
+
"type": "integer",
+
"minimum": 0,
+
"description": "Number of downvotes"
+
},
+
"score": {
+
"type": "integer",
+
"description": "Calculated score (upvotes - downvotes)"
+
},
+
"replyCount": {
+
"type": "integer",
+
"minimum": 0,
+
"description": "Number of direct replies to this comment"
+
}
+
}
+
},
+
"commentViewerState": {
+
"type": "object",
+
"description": "Viewer-specific state for a comment",
+
"properties": {
+
"vote": {
+
"type": "string",
+
"knownValues": ["up", "down"],
+
"description": "Viewer's vote on this comment"
+
},
+
"voteUri": {
+
"type": "string",
+
"format": "at-uri",
+
"description": "AT-URI of the viewer's vote record"
+
}
+
}
+
}
+
}
+
}
+86
internal/atproto/lexicon/social/coves/community/comment/getComments.json
···
+
{
+
"lexicon": 1,
+
"id": "social.coves.community.comment.getComments",
+
"defs": {
+
"main": {
+
"type": "query",
+
"description": "Get comments for a post with threading and sorting support. Supports hot/top/new sorting, configurable nesting depth, and pagination.",
+
"parameters": {
+
"type": "params",
+
"required": ["post"],
+
"properties": {
+
"post": {
+
"type": "string",
+
"format": "at-uri",
+
"description": "AT-URI of the post to get comments for"
+
},
+
"sort": {
+
"type": "string",
+
"default": "hot",
+
"knownValues": ["hot", "top", "new"],
+
"description": "Sort order: hot (trending), top (highest score), new (most recent)"
+
},
+
"timeframe": {
+
"type": "string",
+
"knownValues": ["hour", "day", "week", "month", "year", "all"],
+
"description": "Timeframe for 'top' sort. Ignored for other sort types."
+
},
+
"depth": {
+
"type": "integer",
+
"default": 10,
+
"minimum": 0,
+
"maximum": 100,
+
"description": "Maximum reply nesting depth to return. 0 returns only top-level comments."
+
},
+
"limit": {
+
"type": "integer",
+
"default": 50,
+
"minimum": 1,
+
"maximum": 100,
+
"description": "Maximum number of top-level comments to return per page"
+
},
+
"cursor": {
+
"type": "string",
+
"description": "Pagination cursor from previous response"
+
}
+
}
+
},
+
"output": {
+
"encoding": "application/json",
+
"schema": {
+
"type": "object",
+
"required": ["comments", "post"],
+
"properties": {
+
"comments": {
+
"type": "array",
+
"description": "Top-level comments with nested replies up to requested depth",
+
"items": {
+
"type": "ref",
+
"ref": "social.coves.community.comment.defs#threadViewComment"
+
}
+
},
+
"post": {
+
"type": "ref",
+
"ref": "social.coves.community.post.get#postView",
+
"description": "The post these comments belong to"
+
},
+
"cursor": {
+
"type": "string",
+
"description": "Pagination cursor for fetching next page of top-level comments"
+
}
+
}
+
}
+
},
+
"errors": [
+
{
+
"name": "NotFound",
+
"description": "Post not found"
+
},
+
{
+
"name": "InvalidRequest",
+
"description": "Invalid parameters (malformed URI, invalid sort/timeframe combination, etc.)"
+
}
+
]
+
}
+
}
+
}
+11
internal/core/comments/interfaces.go
···
// Returns map[commentURI]*Vote for efficient lookups
// Future: Used when votes table is implemented
GetVoteStateForComments(ctx context.Context, viewerDID string, commentURIs []string) (map[string]interface{}, error)
+
+
// ListByParentsBatch retrieves direct replies to multiple parents in a single query
+
// Returns map[parentURI][]*Comment grouped by parent
+
// Used to prevent N+1 queries when loading nested replies
+
// Limits results per parent to avoid memory exhaustion
+
ListByParentsBatch(
+
ctx context.Context,
+
parentURIs []string,
+
sort string,
+
limitPerParent int,
+
) (map[string][]*Comment, error)
}
+6 -6
tests/lexicon_validation_test.go
···
// Test specific cross-references that should work
crossRefs := map[string]string{
-
"social.coves.richtext.facet#byteSlice": "byteSlice definition in facet schema",
-
"social.coves.community.rules#rule": "rule definition in community rules",
-
"social.coves.actor.defs#profileView": "profileView definition in actor defs",
-
"social.coves.actor.defs#profileStats": "profileStats definition in actor defs",
-
"social.coves.actor.defs#viewerState": "viewerState definition in actor defs",
-
"social.coves.community.defs#communityView": "communityView definition in community defs",
+
"social.coves.richtext.facet#byteSlice": "byteSlice definition in facet schema",
+
"social.coves.community.rules#rule": "rule definition in community rules",
+
"social.coves.actor.defs#profileView": "profileView definition in actor defs",
+
"social.coves.actor.defs#profileStats": "profileStats definition in actor defs",
+
"social.coves.actor.defs#viewerState": "viewerState definition in actor defs",
+
"social.coves.community.defs#communityView": "communityView definition in community defs",
"social.coves.community.defs#communityStats": "communityStats definition in community defs",
}
+5
internal/atproto/lexicon/social/coves/community/post/get.json
···
"type": "string",
"format": "did"
},
+
"handle": {
+
"type": "string",
+
"format": "handle",
+
"description": "Current handle resolved from DID"
+
},
"name": {
"type": "string"
},
+200
internal/core/unfurl/circuit_breaker.go
···
+
package unfurl
+
+
import (
+
"fmt"
+
"log"
+
"sync"
+
"time"
+
)
+
+
// circuitState represents the state of a circuit breaker
+
type circuitState int
+
+
const (
+
stateClosed circuitState = iota // Normal operation
+
stateOpen // Circuit is open (provider failing)
+
stateHalfOpen // Testing if provider recovered
+
)
+
+
// circuitBreaker tracks failures per provider and stops trying failing providers
+
type circuitBreaker struct {
+
failures map[string]int
+
lastFailure map[string]time.Time
+
state map[string]circuitState
+
lastStateLog map[string]time.Time
+
failureThreshold int
+
openDuration time.Duration
+
mu sync.RWMutex
+
}
+
+
// newCircuitBreaker creates a circuit breaker with default settings
+
func newCircuitBreaker() *circuitBreaker {
+
return &circuitBreaker{
+
failureThreshold: 3, // Open after 3 consecutive failures
+
openDuration: 5 * time.Minute, // Keep open for 5 minutes
+
failures: make(map[string]int),
+
lastFailure: make(map[string]time.Time),
+
state: make(map[string]circuitState),
+
lastStateLog: make(map[string]time.Time),
+
}
+
}
+
+
// canAttempt checks if we should attempt to call this provider
+
// Returns true if circuit is closed or half-open (ready to retry)
+
func (cb *circuitBreaker) canAttempt(provider string) (bool, error) {
+
cb.mu.RLock()
+
defer cb.mu.RUnlock()
+
+
state := cb.getState(provider)
+
+
switch state {
+
case stateClosed:
+
return true, nil
+
case stateOpen:
+
// Check if we should transition to half-open
+
lastFail := cb.lastFailure[provider]
+
if time.Since(lastFail) > cb.openDuration {
+
// Transition to half-open (allow one retry)
+
cb.mu.RUnlock()
+
cb.mu.Lock()
+
cb.state[provider] = stateHalfOpen
+
cb.logStateChange(provider, stateHalfOpen)
+
cb.mu.Unlock()
+
cb.mu.RLock()
+
return true, nil
+
}
+
// Still in open period
+
failCount := cb.failures[provider]
+
nextRetry := lastFail.Add(cb.openDuration)
+
return false, fmt.Errorf(
+
"circuit breaker open for provider '%s' (failures: %d, next retry: %s)",
+
provider,
+
failCount,
+
nextRetry.Format("15:04:05"),
+
)
+
case stateHalfOpen:
+
return true, nil
+
default:
+
return true, nil
+
}
+
}
+
+
// recordSuccess records a successful unfurl, resetting failure count
+
func (cb *circuitBreaker) recordSuccess(provider string) {
+
cb.mu.Lock()
+
defer cb.mu.Unlock()
+
+
oldState := cb.getState(provider)
+
+
// Reset failure tracking
+
delete(cb.failures, provider)
+
delete(cb.lastFailure, provider)
+
cb.state[provider] = stateClosed
+
+
// Log recovery if we were in a failure state
+
if oldState != stateClosed {
+
cb.logStateChange(provider, stateClosed)
+
}
+
}
+
+
// recordFailure records a failed unfurl attempt
+
func (cb *circuitBreaker) recordFailure(provider string, err error) {
+
cb.mu.Lock()
+
defer cb.mu.Unlock()
+
+
// Increment failure count
+
cb.failures[provider]++
+
cb.lastFailure[provider] = time.Now()
+
+
failCount := cb.failures[provider]
+
+
// Check if we should open the circuit
+
if failCount >= cb.failureThreshold {
+
oldState := cb.getState(provider)
+
cb.state[provider] = stateOpen
+
if oldState != stateOpen {
+
log.Printf(
+
"[UNFURL-CIRCUIT] Opening circuit for provider '%s' after %d consecutive failures. Last error: %v",
+
provider,
+
failCount,
+
err,
+
)
+
cb.lastStateLog[provider] = time.Now()
+
}
+
} else {
+
log.Printf(
+
"[UNFURL-CIRCUIT] Failure %d/%d for provider '%s': %v",
+
failCount,
+
cb.failureThreshold,
+
provider,
+
err,
+
)
+
}
+
}
+
+
// getState returns the current state (must be called with lock held)
+
func (cb *circuitBreaker) getState(provider string) circuitState {
+
if state, exists := cb.state[provider]; exists {
+
return state
+
}
+
return stateClosed
+
}
+
+
// logStateChange logs state transitions (must be called with lock held)
+
// Debounced to avoid log spam (max once per minute per provider)
+
func (cb *circuitBreaker) logStateChange(provider string, newState circuitState) {
+
lastLog, exists := cb.lastStateLog[provider]
+
if exists && time.Since(lastLog) < time.Minute {
+
return // Don't spam logs
+
}
+
+
var stateStr string
+
switch newState {
+
case stateClosed:
+
stateStr = "CLOSED (recovered)"
+
case stateOpen:
+
stateStr = "OPEN (failing)"
+
case stateHalfOpen:
+
stateStr = "HALF-OPEN (testing)"
+
}
+
+
log.Printf("[UNFURL-CIRCUIT] Circuit for provider '%s' is now %s", provider, stateStr)
+
cb.lastStateLog[provider] = time.Now()
+
}
+
+
// getStats returns current circuit breaker stats (for debugging/monitoring)
+
func (cb *circuitBreaker) getStats() map[string]interface{} {
+
cb.mu.RLock()
+
defer cb.mu.RUnlock()
+
+
stats := make(map[string]interface{})
+
+
// Collect all providers with any activity (state, failures, or both)
+
providers := make(map[string]bool)
+
for provider := range cb.state {
+
providers[provider] = true
+
}
+
for provider := range cb.failures {
+
providers[provider] = true
+
}
+
+
for provider := range providers {
+
state := cb.getState(provider)
+
var stateStr string
+
switch state {
+
case stateClosed:
+
stateStr = "closed"
+
case stateOpen:
+
stateStr = "open"
+
case stateHalfOpen:
+
stateStr = "half-open"
+
}
+
+
stats[provider] = map[string]interface{}{
+
"state": stateStr,
+
"failures": cb.failures[provider],
+
"last_failure": cb.lastFailure[provider],
+
}
+
}
+
return stats
+
}
+175
internal/core/unfurl/circuit_breaker_test.go
···
+
package unfurl
+
+
import (
+
"fmt"
+
"testing"
+
"time"
+
)
+
+
func TestCircuitBreaker_Basic(t *testing.T) {
+
cb := newCircuitBreaker()
+
+
provider := "test-provider"
+
+
// Should start closed (allow attempts)
+
canAttempt, err := cb.canAttempt(provider)
+
if !canAttempt {
+
t.Errorf("Expected circuit to be closed initially, but got error: %v", err)
+
}
+
+
// Record success
+
cb.recordSuccess(provider)
+
canAttempt, _ = cb.canAttempt(provider)
+
if !canAttempt {
+
t.Error("Expected circuit to remain closed after success")
+
}
+
}
+
+
func TestCircuitBreaker_OpensAfterFailures(t *testing.T) {
+
cb := newCircuitBreaker()
+
provider := "failing-provider"
+
+
// Record failures up to threshold
+
for i := 0; i < cb.failureThreshold; i++ {
+
cb.recordFailure(provider, fmt.Errorf("test error %d", i))
+
}
+
+
// Circuit should now be open
+
canAttempt, err := cb.canAttempt(provider)
+
if canAttempt {
+
t.Error("Expected circuit to be open after threshold failures")
+
}
+
if err == nil {
+
t.Error("Expected error when circuit is open")
+
}
+
}
+
+
func TestCircuitBreaker_RecoveryAfterSuccess(t *testing.T) {
+
cb := newCircuitBreaker()
+
provider := "recovery-provider"
+
+
// Record some failures
+
cb.recordFailure(provider, fmt.Errorf("error 1"))
+
cb.recordFailure(provider, fmt.Errorf("error 2"))
+
+
// Record success - should reset failure count
+
cb.recordSuccess(provider)
+
+
// Should be able to attempt again
+
canAttempt, err := cb.canAttempt(provider)
+
if !canAttempt {
+
t.Errorf("Expected circuit to be closed after success, but got error: %v", err)
+
}
+
+
// Failure count should be reset
+
if count := cb.failures[provider]; count != 0 {
+
t.Errorf("Expected failure count to be reset to 0, got %d", count)
+
}
+
}
+
+
func TestCircuitBreaker_HalfOpenTransition(t *testing.T) {
+
cb := newCircuitBreaker()
+
cb.openDuration = 100 * time.Millisecond // Short duration for testing
+
provider := "half-open-provider"
+
+
// Open the circuit
+
for i := 0; i < cb.failureThreshold; i++ {
+
cb.recordFailure(provider, fmt.Errorf("error %d", i))
+
}
+
+
// Should be open
+
canAttempt, _ := cb.canAttempt(provider)
+
if canAttempt {
+
t.Error("Expected circuit to be open")
+
}
+
+
// Wait for open duration
+
time.Sleep(150 * time.Millisecond)
+
+
// Should transition to half-open and allow one attempt
+
canAttempt, err := cb.canAttempt(provider)
+
if !canAttempt {
+
t.Errorf("Expected circuit to transition to half-open after duration, but got error: %v", err)
+
}
+
+
// State should be half-open
+
cb.mu.RLock()
+
state := cb.state[provider]
+
cb.mu.RUnlock()
+
+
if state != stateHalfOpen {
+
t.Errorf("Expected state to be half-open, got %v", state)
+
}
+
}
+
+
func TestCircuitBreaker_MultipleProviders(t *testing.T) {
+
cb := newCircuitBreaker()
+
+
// Open circuit for provider A
+
for i := 0; i < cb.failureThreshold; i++ {
+
cb.recordFailure("providerA", fmt.Errorf("error"))
+
}
+
+
// Provider A should be blocked
+
canAttemptA, _ := cb.canAttempt("providerA")
+
if canAttemptA {
+
t.Error("Expected providerA circuit to be open")
+
}
+
+
// Provider B should still be open (independent circuits)
+
canAttemptB, err := cb.canAttempt("providerB")
+
if !canAttemptB {
+
t.Errorf("Expected providerB circuit to be closed, but got error: %v", err)
+
}
+
}
+
+
func TestCircuitBreaker_GetStats(t *testing.T) {
+
cb := newCircuitBreaker()
+
+
// Record some activity
+
cb.recordFailure("provider1", fmt.Errorf("error 1"))
+
cb.recordFailure("provider1", fmt.Errorf("error 2"))
+
+
stats := cb.getStats()
+
+
// Should have stats for providers with failures
+
if providerStats, ok := stats["provider1"]; !ok {
+
t.Error("Expected stats for provider1")
+
} else {
+
// Check that failure count is tracked
+
statsMap := providerStats.(map[string]interface{})
+
if failures, ok := statsMap["failures"].(int); !ok || failures != 2 {
+
t.Errorf("Expected 2 failures for provider1, got %v", statsMap["failures"])
+
}
+
}
+
+
// Provider that succeeds is cleaned up from state
+
cb.recordSuccess("provider2")
+
_ = cb.getStats()
+
// Provider2 should not be in stats (or have state "closed" with 0 failures)
+
}
+
+
func TestCircuitBreaker_FailureThresholdExact(t *testing.T) {
+
cb := newCircuitBreaker()
+
provider := "exact-threshold-provider"
+
+
// Record failures just below threshold
+
for i := 0; i < cb.failureThreshold-1; i++ {
+
cb.recordFailure(provider, fmt.Errorf("error %d", i))
+
}
+
+
// Should still be closed
+
canAttempt, err := cb.canAttempt(provider)
+
if !canAttempt {
+
t.Errorf("Expected circuit to be closed below threshold, but got error: %v", err)
+
}
+
+
// One more failure should open it
+
cb.recordFailure(provider, fmt.Errorf("final error"))
+
+
// Should now be open
+
canAttempt, _ = cb.canAttempt(provider)
+
if canAttempt {
+
t.Error("Expected circuit to be open at threshold")
+
}
+
}
+202
internal/core/unfurl/kagi_test.go
···
+
package unfurl
+
+
import (
+
"context"
+
"net/http"
+
"net/http/httptest"
+
"testing"
+
"time"
+
+
"github.com/stretchr/testify/assert"
+
"github.com/stretchr/testify/require"
+
)
+
+
func TestFetchKagiKite_Success(t *testing.T) {
+
// Mock Kagi HTML response
+
mockHTML := `<!DOCTYPE html>
+
<html>
+
<head>
+
<title>FAA orders 10% flight cuts at 40 airports - Kagi News</title>
+
<meta property="og:title" content="FAA orders 10% flight cuts" />
+
<meta property="og:description" content="Flight restrictions announced" />
+
</head>
+
<body>
+
<img src="https://kagiproxy.com/img/DHdCvN_NqVDWU3UyoNZSv86b" alt="Airport runway" />
+
</body>
+
</html>`
+
+
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
w.Header().Set("Content-Type", "text/html")
+
w.WriteHeader(http.StatusOK)
+
_, _ = w.Write([]byte(mockHTML))
+
}))
+
defer server.Close()
+
+
ctx := context.Background()
+
+
result, err := fetchKagiKite(ctx, server.URL, 5*time.Second, "TestBot/1.0")
+
+
require.NoError(t, err)
+
assert.Equal(t, "article", result.Type)
+
assert.Equal(t, "FAA orders 10% flight cuts", result.Title)
+
assert.Equal(t, "Flight restrictions announced", result.Description)
+
assert.Contains(t, result.ThumbnailURL, "kagiproxy.com")
+
assert.Equal(t, "kagi", result.Provider)
+
assert.Equal(t, "kite.kagi.com", result.Domain)
+
}
+
+
func TestFetchKagiKite_NoImage(t *testing.T) {
+
mockHTML := `<!DOCTYPE html>
+
<html>
+
<head><title>Test Story</title></head>
+
<body><p>No images here</p></body>
+
</html>`
+
+
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
w.Header().Set("Content-Type", "text/html")
+
w.WriteHeader(http.StatusOK)
+
_, _ = w.Write([]byte(mockHTML))
+
}))
+
defer server.Close()
+
+
ctx := context.Background()
+
+
result, err := fetchKagiKite(ctx, server.URL, 5*time.Second, "TestBot/1.0")
+
+
assert.Error(t, err)
+
assert.Nil(t, result)
+
assert.Contains(t, err.Error(), "no image found")
+
}
+
+
func TestFetchKagiKite_FallbackToTitle(t *testing.T) {
+
mockHTML := `<!DOCTYPE html>
+
<html>
+
<head><title>Fallback Title</title></head>
+
<body>
+
<img src="https://kagiproxy.com/img/test123" />
+
</body>
+
</html>`
+
+
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
w.Header().Set("Content-Type", "text/html")
+
w.WriteHeader(http.StatusOK)
+
_, _ = w.Write([]byte(mockHTML))
+
}))
+
defer server.Close()
+
+
ctx := context.Background()
+
+
result, err := fetchKagiKite(ctx, server.URL, 5*time.Second, "TestBot/1.0")
+
+
require.NoError(t, err)
+
assert.Equal(t, "Fallback Title", result.Title)
+
assert.Contains(t, result.ThumbnailURL, "kagiproxy.com")
+
}
+
+
func TestFetchKagiKite_ImageWithAltText(t *testing.T) {
+
mockHTML := `<!DOCTYPE html>
+
<html>
+
<head><title>News Story</title></head>
+
<body>
+
<img src="https://kagiproxy.com/img/xyz789" alt="This is the alt text description" />
+
</body>
+
</html>`
+
+
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
w.Header().Set("Content-Type", "text/html")
+
w.WriteHeader(http.StatusOK)
+
_, _ = w.Write([]byte(mockHTML))
+
}))
+
defer server.Close()
+
+
ctx := context.Background()
+
+
result, err := fetchKagiKite(ctx, server.URL, 5*time.Second, "TestBot/1.0")
+
+
require.NoError(t, err)
+
assert.Equal(t, "News Story", result.Title)
+
assert.Equal(t, "This is the alt text description", result.Description)
+
assert.Contains(t, result.ThumbnailURL, "kagiproxy.com")
+
}
+
+
func TestFetchKagiKite_HTTPError(t *testing.T) {
+
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
w.WriteHeader(http.StatusNotFound)
+
}))
+
defer server.Close()
+
+
ctx := context.Background()
+
+
result, err := fetchKagiKite(ctx, server.URL, 5*time.Second, "TestBot/1.0")
+
+
assert.Error(t, err)
+
assert.Nil(t, result)
+
assert.Contains(t, err.Error(), "HTTP 404")
+
}
+
+
func TestFetchKagiKite_Timeout(t *testing.T) {
+
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
time.Sleep(2 * time.Second)
+
w.WriteHeader(http.StatusOK)
+
}))
+
defer server.Close()
+
+
ctx := context.Background()
+
+
result, err := fetchKagiKite(ctx, server.URL, 100*time.Millisecond, "TestBot/1.0")
+
+
assert.Error(t, err)
+
assert.Nil(t, result)
+
}
+
+
func TestFetchKagiKite_MultipleImages_PicksSecond(t *testing.T) {
+
mockHTML := `<!DOCTYPE html>
+
<html>
+
<head><title>Story with multiple images</title></head>
+
<body>
+
<img src="https://kagiproxy.com/img/first123" alt="First image (header/logo)" />
+
<img src="https://kagiproxy.com/img/second456" alt="Second image" />
+
</body>
+
</html>`
+
+
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
w.Header().Set("Content-Type", "text/html")
+
w.WriteHeader(http.StatusOK)
+
_, _ = w.Write([]byte(mockHTML))
+
}))
+
defer server.Close()
+
+
ctx := context.Background()
+
+
result, err := fetchKagiKite(ctx, server.URL, 5*time.Second, "TestBot/1.0")
+
+
require.NoError(t, err)
+
// We skip the first image (often a header/logo) and use the second
+
assert.Contains(t, result.ThumbnailURL, "second456")
+
assert.Equal(t, "Second image", result.Description)
+
}
+
+
func TestFetchKagiKite_OnlyNonKagiImages_NoMatch(t *testing.T) {
+
mockHTML := `<!DOCTYPE html>
+
<html>
+
<head><title>Story with non-Kagi images</title></head>
+
<body>
+
<img src="https://example.com/img/test.jpg" alt="External image" />
+
</body>
+
</html>`
+
+
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
w.Header().Set("Content-Type", "text/html")
+
w.WriteHeader(http.StatusOK)
+
_, _ = w.Write([]byte(mockHTML))
+
}))
+
defer server.Close()
+
+
ctx := context.Background()
+
+
result, err := fetchKagiKite(ctx, server.URL, 5*time.Second, "TestBot/1.0")
+
+
assert.Error(t, err)
+
assert.Nil(t, result)
+
assert.Contains(t, err.Error(), "no image found")
+
}
+269
internal/core/unfurl/opengraph_test.go
···
+
package unfurl
+
+
import (
+
"context"
+
"net/http"
+
"net/http/httptest"
+
"testing"
+
"time"
+
+
"github.com/stretchr/testify/assert"
+
"github.com/stretchr/testify/require"
+
)
+
+
func TestParseOpenGraph_ValidTags(t *testing.T) {
+
html := `
+
<!DOCTYPE html>
+
<html>
+
<head>
+
<meta property="og:title" content="Test Article Title" />
+
<meta property="og:description" content="This is a test description" />
+
<meta property="og:image" content="https://example.com/image.jpg" />
+
<meta property="og:url" content="https://example.com/canonical" />
+
</head>
+
<body>
+
<p>Some content</p>
+
</body>
+
</html>
+
`
+
+
og, err := parseOpenGraph(html)
+
require.NoError(t, err)
+
+
assert.Equal(t, "Test Article Title", og.Title)
+
assert.Equal(t, "This is a test description", og.Description)
+
assert.Equal(t, "https://example.com/image.jpg", og.Image)
+
assert.Equal(t, "https://example.com/canonical", og.URL)
+
}
+
+
func TestParseOpenGraph_MissingImage(t *testing.T) {
+
html := `
+
<!DOCTYPE html>
+
<html>
+
<head>
+
<meta property="og:title" content="Article Without Image" />
+
<meta property="og:description" content="No image tag" />
+
</head>
+
<body></body>
+
</html>
+
`
+
+
og, err := parseOpenGraph(html)
+
require.NoError(t, err)
+
+
assert.Equal(t, "Article Without Image", og.Title)
+
assert.Equal(t, "No image tag", og.Description)
+
assert.Empty(t, og.Image, "Image should be empty when not provided")
+
}
+
+
func TestParseOpenGraph_FallbackToTitle(t *testing.T) {
+
html := `
+
<!DOCTYPE html>
+
<html>
+
<head>
+
<title>Page Title Fallback</title>
+
<meta name="description" content="Meta description fallback" />
+
</head>
+
<body></body>
+
</html>
+
`
+
+
og, err := parseOpenGraph(html)
+
require.NoError(t, err)
+
+
assert.Equal(t, "Page Title Fallback", og.Title, "Should fall back to <title>")
+
assert.Equal(t, "Meta description fallback", og.Description, "Should fall back to meta description")
+
}
+
+
func TestParseOpenGraph_PreferOpenGraphOverFallback(t *testing.T) {
+
html := `
+
<!DOCTYPE html>
+
<html>
+
<head>
+
<title>Page Title</title>
+
<meta name="description" content="Meta description" />
+
<meta property="og:title" content="OpenGraph Title" />
+
<meta property="og:description" content="OpenGraph Description" />
+
</head>
+
<body></body>
+
</html>
+
`
+
+
og, err := parseOpenGraph(html)
+
require.NoError(t, err)
+
+
assert.Equal(t, "OpenGraph Title", og.Title, "Should prefer og:title")
+
assert.Equal(t, "OpenGraph Description", og.Description, "Should prefer og:description")
+
}
+
+
func TestParseOpenGraph_MalformedHTML(t *testing.T) {
+
html := `
+
<!DOCTYPE html>
+
<html>
+
<head>
+
<meta property="og:title" content="Still Works" />
+
<meta property="og:description" content="Even with broken tags
+
</head>
+
<body>
+
<p>Unclosed paragraph
+
</body>
+
`
+
+
og, err := parseOpenGraph(html)
+
require.NoError(t, err)
+
+
// Best-effort parsing should still extract what it can
+
assert.NotEmpty(t, og.Title, "Should extract title despite malformed HTML")
+
}
+
+
func TestParseOpenGraph_Empty(t *testing.T) {
+
html := `
+
<!DOCTYPE html>
+
<html>
+
<head></head>
+
<body></body>
+
</html>
+
`
+
+
og, err := parseOpenGraph(html)
+
require.NoError(t, err)
+
+
assert.Empty(t, og.Title)
+
assert.Empty(t, og.Description)
+
assert.Empty(t, og.Image)
+
}
+
+
func TestFetchOpenGraph_Success(t *testing.T) {
+
// Create test server with OpenGraph metadata
+
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
assert.Contains(t, r.Header.Get("User-Agent"), "CovesBot")
+
+
html := `
+
<!DOCTYPE html>
+
<html>
+
<head>
+
<meta property="og:title" content="Test News Article" />
+
<meta property="og:description" content="Breaking news story" />
+
<meta property="og:image" content="https://example.com/news.jpg" />
+
<meta property="og:url" content="https://example.com/article/123" />
+
</head>
+
<body><p>Article content</p></body>
+
</html>
+
`
+
w.Header().Set("Content-Type", "text/html")
+
w.WriteHeader(http.StatusOK)
+
_, _ = w.Write([]byte(html))
+
}))
+
defer server.Close()
+
+
ctx := context.Background()
+
result, err := fetchOpenGraph(ctx, server.URL, 10*time.Second, "CovesBot/1.0")
+
require.NoError(t, err)
+
require.NotNil(t, result)
+
+
assert.Equal(t, "Test News Article", result.Title)
+
assert.Equal(t, "Breaking news story", result.Description)
+
assert.Equal(t, "https://example.com/news.jpg", result.ThumbnailURL)
+
assert.Equal(t, "article", result.Type)
+
assert.Equal(t, "opengraph", result.Provider)
+
}
+
+
func TestFetchOpenGraph_HTTPError(t *testing.T) {
+
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
w.WriteHeader(http.StatusNotFound)
+
}))
+
defer server.Close()
+
+
ctx := context.Background()
+
result, err := fetchOpenGraph(ctx, server.URL, 10*time.Second, "CovesBot/1.0")
+
require.Error(t, err)
+
assert.Nil(t, result)
+
assert.Contains(t, err.Error(), "404")
+
}
+
+
func TestFetchOpenGraph_Timeout(t *testing.T) {
+
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
time.Sleep(2 * time.Second)
+
w.WriteHeader(http.StatusOK)
+
}))
+
defer server.Close()
+
+
ctx := context.Background()
+
result, err := fetchOpenGraph(ctx, server.URL, 100*time.Millisecond, "CovesBot/1.0")
+
require.Error(t, err)
+
assert.Nil(t, result)
+
}
+
+
func TestFetchOpenGraph_NoMetadata(t *testing.T) {
+
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
html := `<html><head></head><body><p>No metadata</p></body></html>`
+
w.Header().Set("Content-Type", "text/html")
+
w.WriteHeader(http.StatusOK)
+
_, _ = w.Write([]byte(html))
+
}))
+
defer server.Close()
+
+
ctx := context.Background()
+
result, err := fetchOpenGraph(ctx, server.URL, 10*time.Second, "CovesBot/1.0")
+
require.NoError(t, err)
+
require.NotNil(t, result)
+
+
// Should still return a result with domain
+
assert.Equal(t, "article", result.Type)
+
assert.Equal(t, "opengraph", result.Provider)
+
assert.NotEmpty(t, result.Domain)
+
}
+
+
func TestIsOEmbedProvider(t *testing.T) {
+
tests := []struct {
+
url string
+
expected bool
+
}{
+
{"https://streamable.com/abc123", true},
+
{"https://www.youtube.com/watch?v=test", true},
+
{"https://youtu.be/test", true},
+
{"https://reddit.com/r/test/comments/123", true},
+
{"https://www.reddit.com/r/test/comments/123", true},
+
{"https://example.com/article", false},
+
{"https://news.ycombinator.com/item?id=123", false},
+
{"https://kite.kagi.com/search?q=test", false},
+
}
+
+
for _, tt := range tests {
+
t.Run(tt.url, func(t *testing.T) {
+
result := isOEmbedProvider(tt.url)
+
assert.Equal(t, tt.expected, result, "URL: %s", tt.url)
+
})
+
}
+
}
+
+
func TestIsSupported(t *testing.T) {
+
tests := []struct {
+
url string
+
expected bool
+
}{
+
{"https://example.com", true},
+
{"http://example.com", true},
+
{"https://news.site.com/article", true},
+
{"ftp://example.com", false},
+
{"not-a-url", false},
+
{"", false},
+
}
+
+
for _, tt := range tests {
+
t.Run(tt.url, func(t *testing.T) {
+
result := isSupported(tt.url)
+
assert.Equal(t, tt.expected, result, "URL: %s", tt.url)
+
})
+
}
+
}
+
+
func TestGetAttr(t *testing.T) {
+
html := `<meta property="og:title" content="Test Title" name="test" />`
+
doc, err := parseOpenGraph(html)
+
require.NoError(t, err)
+
+
// This is a simple test to verify the helper function works
+
// The actual usage is tested in the parseOpenGraph tests
+
assert.NotNil(t, doc)
+
}
+170
internal/core/unfurl/service.go
···
+
package unfurl
+
+
import (
+
"context"
+
"fmt"
+
"log"
+
"time"
+
)
+
+
// Service handles URL unfurling with caching
+
type Service interface {
+
UnfurlURL(ctx context.Context, url string) (*UnfurlResult, error)
+
IsSupported(url string) bool
+
}
+
+
type service struct {
+
repo Repository
+
circuitBreaker *circuitBreaker
+
userAgent string
+
timeout time.Duration
+
cacheTTL time.Duration
+
}
+
+
// NewService creates a new unfurl service
+
func NewService(repo Repository, opts ...ServiceOption) Service {
+
s := &service{
+
repo: repo,
+
timeout: 10 * time.Second,
+
userAgent: "CovesBot/1.0 (+https://coves.social)",
+
cacheTTL: 24 * time.Hour,
+
circuitBreaker: newCircuitBreaker(),
+
}
+
+
for _, opt := range opts {
+
opt(s)
+
}
+
+
return s
+
}
+
+
// ServiceOption configures the service
+
type ServiceOption func(*service)
+
+
// WithTimeout sets the HTTP timeout for oEmbed requests
+
func WithTimeout(timeout time.Duration) ServiceOption {
+
return func(s *service) {
+
s.timeout = timeout
+
}
+
}
+
+
// WithUserAgent sets the User-Agent header for oEmbed requests
+
func WithUserAgent(userAgent string) ServiceOption {
+
return func(s *service) {
+
s.userAgent = userAgent
+
}
+
}
+
+
// WithCacheTTL sets the cache TTL
+
func WithCacheTTL(ttl time.Duration) ServiceOption {
+
return func(s *service) {
+
s.cacheTTL = ttl
+
}
+
}
+
+
// IsSupported returns true if we can unfurl this URL
+
func (s *service) IsSupported(url string) bool {
+
return isSupported(url)
+
}
+
+
// UnfurlURL fetches metadata for a URL (with caching)
+
func (s *service) UnfurlURL(ctx context.Context, urlStr string) (*UnfurlResult, error) {
+
// 1. Check cache first
+
cached, err := s.repo.Get(ctx, urlStr)
+
if err == nil && cached != nil {
+
log.Printf("[UNFURL] Cache hit for %s (provider: %s)", urlStr, cached.Provider)
+
return cached, nil
+
}
+
+
// 2. Check if we support this URL
+
if !isSupported(urlStr) {
+
return nil, fmt.Errorf("unsupported URL: %s", urlStr)
+
}
+
+
var result *UnfurlResult
+
domain := extractDomain(urlStr)
+
+
// 3. Smart routing: Special handling for Kagi Kite (client-side rendered, no og:image tags)
+
if domain == "kite.kagi.com" {
+
provider := "kagi"
+
+
// Check circuit breaker
+
canAttempt, err := s.circuitBreaker.canAttempt(provider)
+
if !canAttempt {
+
log.Printf("[UNFURL] Skipping %s due to circuit breaker: %v", urlStr, err)
+
return nil, err
+
}
+
+
log.Printf("[UNFURL] Cache miss for %s, fetching via Kagi parser...", urlStr)
+
result, err = fetchKagiKite(ctx, urlStr, s.timeout, s.userAgent)
+
if err != nil {
+
s.circuitBreaker.recordFailure(provider, err)
+
return nil, err
+
}
+
+
s.circuitBreaker.recordSuccess(provider)
+
+
// Cache result
+
if cacheErr := s.repo.Set(ctx, urlStr, result, s.cacheTTL); cacheErr != nil {
+
log.Printf("[UNFURL] Warning: failed to cache result: %v", cacheErr)
+
}
+
return result, nil
+
}
+
+
// 4. Check if this is a known oEmbed provider
+
if isOEmbedProvider(urlStr) {
+
provider := domain // Use domain as provider name (e.g., "streamable.com", "youtube.com")
+
+
// Check circuit breaker
+
canAttempt, err := s.circuitBreaker.canAttempt(provider)
+
if !canAttempt {
+
log.Printf("[UNFURL] Skipping %s due to circuit breaker: %v", urlStr, err)
+
return nil, err
+
}
+
+
log.Printf("[UNFURL] Cache miss for %s, fetching from oEmbed...", urlStr)
+
+
// Fetch from oEmbed provider
+
oembed, err := fetchOEmbed(ctx, urlStr, s.timeout, s.userAgent)
+
if err != nil {
+
s.circuitBreaker.recordFailure(provider, err)
+
return nil, fmt.Errorf("failed to fetch oEmbed data: %w", err)
+
}
+
+
s.circuitBreaker.recordSuccess(provider)
+
+
// Convert to UnfurlResult
+
result = mapOEmbedToResult(oembed, urlStr)
+
} else {
+
provider := "opengraph"
+
+
// Check circuit breaker
+
canAttempt, err := s.circuitBreaker.canAttempt(provider)
+
if !canAttempt {
+
log.Printf("[UNFURL] Skipping %s due to circuit breaker: %v", urlStr, err)
+
return nil, err
+
}
+
+
log.Printf("[UNFURL] Cache miss for %s, fetching via OpenGraph...", urlStr)
+
+
// Fetch via OpenGraph
+
result, err = fetchOpenGraph(ctx, urlStr, s.timeout, s.userAgent)
+
if err != nil {
+
s.circuitBreaker.recordFailure(provider, err)
+
return nil, fmt.Errorf("failed to fetch OpenGraph data: %w", err)
+
}
+
+
s.circuitBreaker.recordSuccess(provider)
+
}
+
+
// 5. Store in cache
+
if cacheErr := s.repo.Set(ctx, urlStr, result, s.cacheTTL); cacheErr != nil {
+
// Log but don't fail - cache is best-effort
+
log.Printf("[UNFURL] Warning: Failed to cache result for %s: %v", urlStr, cacheErr)
+
}
+
+
log.Printf("[UNFURL] Successfully unfurled %s (provider: %s, type: %s)",
+
urlStr, result.Provider, result.Type)
+
+
return result, nil
+
}
+27
internal/core/unfurl/types.go
···
+
package unfurl
+
+
import "time"
+
+
// UnfurlResult represents the result of unfurling a URL
+
type UnfurlResult struct {
+
Type string `json:"type"` // "video", "article", "image", "website"
+
URI string `json:"uri"` // Original URL
+
Title string `json:"title"` // Page/video title
+
Description string `json:"description"` // Page/video description
+
ThumbnailURL string `json:"thumbnailUrl"` // Preview image URL
+
Provider string `json:"provider"` // "streamable", "youtube", "reddit"
+
Domain string `json:"domain"` // Domain of the URL
+
Width int `json:"width"` // Media width (if applicable)
+
Height int `json:"height"` // Media height (if applicable)
+
}
+
+
// CacheEntry represents a cached unfurl result with metadata
+
type CacheEntry struct {
+
FetchedAt time.Time `db:"fetched_at"`
+
ExpiresAt time.Time `db:"expires_at"`
+
CreatedAt time.Time `db:"created_at"`
+
ThumbnailURL *string `db:"thumbnail_url"`
+
URL string `db:"url"`
+
Provider string `db:"provider"`
+
Metadata UnfurlResult `db:"metadata"`
+
}
+14
internal/core/unfurl/errors.go
···
+
package unfurl
+
+
import "errors"
+
+
var (
+
// ErrNotFound is returned when an unfurl cache entry is not found or has expired
+
ErrNotFound = errors.New("unfurl cache entry not found or expired")
+
+
// ErrInvalidURL is returned when the provided URL is invalid
+
ErrInvalidURL = errors.New("invalid URL")
+
+
// ErrInvalidTTL is returned when the provided TTL is invalid (e.g., negative or zero)
+
ErrInvalidTTL = errors.New("invalid TTL: must be positive")
+
)
+19
internal/core/unfurl/interfaces.go
···
+
package unfurl
+
+
import (
+
"context"
+
"time"
+
)
+
+
// Repository defines the interface for unfurl cache persistence
+
type Repository interface {
+
// Get retrieves a cached unfurl result for the given URL.
+
// Returns nil, nil if not found or expired (not an error condition).
+
// Returns error only on database failures.
+
Get(ctx context.Context, url string) (*UnfurlResult, error)
+
+
// Set stores an unfurl result in the cache with the specified TTL.
+
// If an entry already exists for the URL, it will be updated.
+
// The expires_at is calculated as NOW() + ttl.
+
Set(ctx context.Context, url string, result *UnfurlResult, ttl time.Duration) error
+
}
+117
internal/core/unfurl/repository.go
···
+
package unfurl
+
+
import (
+
"context"
+
"database/sql"
+
"encoding/json"
+
"fmt"
+
"time"
+
)
+
+
type postgresUnfurlRepo struct {
+
db *sql.DB
+
}
+
+
// NewRepository creates a new PostgreSQL unfurl cache repository
+
func NewRepository(db *sql.DB) Repository {
+
return &postgresUnfurlRepo{db: db}
+
}
+
+
// Get retrieves a cached unfurl result for the given URL.
+
// Returns nil, nil if not found or expired (not an error condition).
+
// Returns error only on database failures.
+
func (r *postgresUnfurlRepo) Get(ctx context.Context, url string) (*UnfurlResult, error) {
+
query := `
+
SELECT metadata, thumbnail_url, provider
+
FROM unfurl_cache
+
WHERE url = $1 AND expires_at > NOW()
+
`
+
+
var metadataJSON []byte
+
var thumbnailURL sql.NullString
+
var provider string
+
+
err := r.db.QueryRowContext(ctx, query, url).Scan(&metadataJSON, &thumbnailURL, &provider)
+
if err == sql.ErrNoRows {
+
// Not found or expired is not an error
+
return nil, nil
+
}
+
if err != nil {
+
return nil, fmt.Errorf("failed to get unfurl cache entry: %w", err)
+
}
+
+
// Unmarshal metadata JSONB to UnfurlResult
+
var result UnfurlResult
+
if err := json.Unmarshal(metadataJSON, &result); err != nil {
+
return nil, fmt.Errorf("failed to unmarshal metadata: %w", err)
+
}
+
+
// Ensure provider and thumbnailURL are set (may not be in metadata JSON)
+
result.Provider = provider
+
if thumbnailURL.Valid {
+
result.ThumbnailURL = thumbnailURL.String
+
}
+
+
return &result, nil
+
}
+
+
// Set stores an unfurl result in the cache with the specified TTL.
+
// If an entry already exists for the URL, it will be updated.
+
// The expires_at is calculated as NOW() + ttl.
+
func (r *postgresUnfurlRepo) Set(ctx context.Context, url string, result *UnfurlResult, ttl time.Duration) error {
+
// Marshal UnfurlResult to JSON for metadata column
+
metadataJSON, err := json.Marshal(result)
+
if err != nil {
+
return fmt.Errorf("failed to marshal metadata: %w", err)
+
}
+
+
// Store thumbnail_url separately for potential queries
+
var thumbnailURL sql.NullString
+
if result.ThumbnailURL != "" {
+
thumbnailURL.String = result.ThumbnailURL
+
thumbnailURL.Valid = true
+
}
+
+
// Convert Go duration to PostgreSQL interval string
+
// e.g., "1 hour", "24 hours", "7 days"
+
intervalStr := formatInterval(ttl)
+
+
query := `
+
INSERT INTO unfurl_cache (url, provider, metadata, thumbnail_url, expires_at)
+
VALUES ($1, $2, $3, $4, NOW() + $5::interval)
+
ON CONFLICT (url) DO UPDATE
+
SET provider = EXCLUDED.provider,
+
metadata = EXCLUDED.metadata,
+
thumbnail_url = EXCLUDED.thumbnail_url,
+
expires_at = EXCLUDED.expires_at,
+
fetched_at = NOW()
+
`
+
+
_, err = r.db.ExecContext(ctx, query, url, result.Provider, metadataJSON, thumbnailURL, intervalStr)
+
if err != nil {
+
return fmt.Errorf("failed to insert/update unfurl cache entry: %w", err)
+
}
+
+
return nil
+
}
+
+
// formatInterval converts a Go duration to a PostgreSQL interval string
+
// PostgreSQL accepts intervals like "1 hour", "24 hours", "7 days"
+
func formatInterval(d time.Duration) string {
+
seconds := int64(d.Seconds())
+
+
// Convert to appropriate unit for readability
+
switch {
+
case seconds >= 86400: // >= 1 day
+
days := seconds / 86400
+
return fmt.Sprintf("%d days", days)
+
case seconds >= 3600: // >= 1 hour
+
hours := seconds / 3600
+
return fmt.Sprintf("%d hours", hours)
+
case seconds >= 60: // >= 1 minute
+
minutes := seconds / 60
+
return fmt.Sprintf("%d minutes", minutes)
+
default:
+
return fmt.Sprintf("%d seconds", seconds)
+
}
+
}
+23
internal/db/migrations/017_create_unfurl_cache.sql
···
+
-- +goose Up
+
CREATE TABLE unfurl_cache (
+
url TEXT PRIMARY KEY,
+
provider TEXT NOT NULL,
+
metadata JSONB NOT NULL,
+
thumbnail_url TEXT,
+
fetched_at TIMESTAMP NOT NULL DEFAULT NOW(),
+
expires_at TIMESTAMP NOT NULL,
+
created_at TIMESTAMP NOT NULL DEFAULT NOW()
+
);
+
+
CREATE INDEX idx_unfurl_cache_expires ON unfurl_cache(expires_at);
+
+
COMMENT ON TABLE unfurl_cache IS 'Cache for oEmbed/URL unfurl results to reduce external API calls';
+
COMMENT ON COLUMN unfurl_cache.url IS 'The URL that was unfurled (primary key)';
+
COMMENT ON COLUMN unfurl_cache.provider IS 'Provider name (streamable, youtube, reddit, etc.)';
+
COMMENT ON COLUMN unfurl_cache.metadata IS 'Full unfurl result as JSON (title, description, type, etc.)';
+
COMMENT ON COLUMN unfurl_cache.thumbnail_url IS 'URL of the thumbnail image';
+
COMMENT ON COLUMN unfurl_cache.expires_at IS 'When this cache entry should be refetched (TTL-based)';
+
+
-- +goose Down
+
DROP INDEX IF EXISTS idx_unfurl_cache_expires;
+
DROP TABLE IF EXISTS unfurl_cache;
+9
internal/core/blobs/types.go
···
+
package blobs
+
+
// BlobRef represents a blob reference for atproto records
+
type BlobRef struct {
+
Type string `json:"$type"`
+
Ref map[string]string `json:"ref"`
+
MimeType string `json:"mimeType"`
+
Size int `json:"size"`
+
}
+7 -1
internal/core/posts/interfaces.go
···
package posts
-
import "context"
+
import (
+
"context"
+
)
+
+
// Service constructor accepts optional blobs.Service and unfurl.Service for embed enhancement.
+
// When unfurlService is provided, external embeds will be automatically enriched with metadata.
+
// When blobService is provided, thumbnails from unfurled URLs will be uploaded as blobs.
// Service defines the business logic interface for posts
// Coordinates between Repository, community service, and PDS
+81
internal/core/posts/blob_transform.go
···
+
package posts
+
+
import (
+
"fmt"
+
)
+
+
// TransformBlobRefsToURLs transforms all blob references in a PostView to PDS URLs
+
// This modifies the Embed field in-place, converting blob refs to direct URLs
+
// The transformation only affects external embeds with thumbnail blobs
+
func TransformBlobRefsToURLs(postView *PostView) {
+
if postView == nil || postView.Embed == nil {
+
return
+
}
+
+
// Get community PDS URL from post view
+
if postView.Community == nil || postView.Community.PDSURL == "" {
+
return // Cannot transform without PDS URL
+
}
+
+
communityDID := postView.Community.DID
+
pdsURL := postView.Community.PDSURL
+
+
// Check if embed is a map (should be for external embeds)
+
embedMap, ok := postView.Embed.(map[string]interface{})
+
if !ok {
+
return
+
}
+
+
// Check embed type
+
embedType, ok := embedMap["$type"].(string)
+
if !ok {
+
return
+
}
+
+
// Only transform external embeds
+
if embedType == "social.coves.embed.external" {
+
if external, ok := embedMap["external"].(map[string]interface{}); ok {
+
transformThumbToURL(external, communityDID, pdsURL)
+
}
+
}
+
}
+
+
// transformThumbToURL converts a thumb blob ref to a PDS URL
+
// This modifies the external map in-place
+
func transformThumbToURL(external map[string]interface{}, communityDID, pdsURL string) {
+
// Check if thumb exists
+
thumb, ok := external["thumb"]
+
if !ok {
+
return
+
}
+
+
// If thumb is already a string (URL), don't transform
+
if _, isString := thumb.(string); isString {
+
return
+
}
+
+
// Try to parse as blob ref
+
thumbMap, ok := thumb.(map[string]interface{})
+
if !ok {
+
return
+
}
+
+
// Extract CID from blob ref
+
ref, ok := thumbMap["ref"].(map[string]interface{})
+
if !ok {
+
return
+
}
+
+
cid, ok := ref["$link"].(string)
+
if !ok || cid == "" {
+
return
+
}
+
+
// Transform to PDS blob endpoint URL
+
// Format: {pds_url}/xrpc/com.atproto.sync.getBlob?did={community_did}&cid={cid}
+
blobURL := fmt.Sprintf("%s/xrpc/com.atproto.sync.getBlob?did=%s&cid=%s",
+
pdsURL, communityDID, cid)
+
+
// Replace blob ref with URL string
+
external["thumb"] = blobURL
+
}
+312
internal/core/posts/blob_transform_test.go
···
+
package posts
+
+
import (
+
"testing"
+
+
"github.com/stretchr/testify/assert"
+
"github.com/stretchr/testify/require"
+
)
+
+
func TestTransformBlobRefsToURLs(t *testing.T) {
+
t.Run("transforms external embed thumb from blob to URL", func(t *testing.T) {
+
post := &PostView{
+
Community: &CommunityRef{
+
DID: "did:plc:testcommunity",
+
PDSURL: "http://localhost:3001",
+
},
+
Embed: map[string]interface{}{
+
"$type": "social.coves.embed.external",
+
"external": map[string]interface{}{
+
"uri": "https://example.com",
+
"thumb": map[string]interface{}{
+
"$type": "blob",
+
"ref": map[string]interface{}{
+
"$link": "bafyreib6tbnql2ux3whnfysbzabthaj2vvck53nimhbi5g5a7jgvgr5eqm",
+
},
+
"mimeType": "image/jpeg",
+
"size": 52813,
+
},
+
},
+
},
+
}
+
+
TransformBlobRefsToURLs(post)
+
+
// Verify embed is still a map
+
embedMap, ok := post.Embed.(map[string]interface{})
+
require.True(t, ok, "embed should still be a map")
+
+
// Verify external is still a map
+
external, ok := embedMap["external"].(map[string]interface{})
+
require.True(t, ok, "external should be a map")
+
+
// Verify thumb is now a URL string
+
thumbURL, ok := external["thumb"].(string)
+
require.True(t, ok, "thumb should be a string URL")
+
assert.Equal(t,
+
"http://localhost:3001/xrpc/com.atproto.sync.getBlob?did=did:plc:testcommunity&cid=bafyreib6tbnql2ux3whnfysbzabthaj2vvck53nimhbi5g5a7jgvgr5eqm",
+
thumbURL)
+
})
+
+
t.Run("handles missing thumb gracefully", func(t *testing.T) {
+
post := &PostView{
+
Community: &CommunityRef{
+
DID: "did:plc:testcommunity",
+
PDSURL: "http://localhost:3001",
+
},
+
Embed: map[string]interface{}{
+
"$type": "social.coves.embed.external",
+
"external": map[string]interface{}{
+
"uri": "https://example.com",
+
// No thumb field
+
},
+
},
+
}
+
+
// Should not panic
+
TransformBlobRefsToURLs(post)
+
+
// Verify external is unchanged
+
embedMap := post.Embed.(map[string]interface{})
+
external := embedMap["external"].(map[string]interface{})
+
_, hasThumb := external["thumb"]
+
assert.False(t, hasThumb, "thumb should not be added")
+
})
+
+
t.Run("handles already-transformed URL thumb", func(t *testing.T) {
+
expectedURL := "http://localhost:3001/xrpc/com.atproto.sync.getBlob?did=did:plc:test&cid=bafytest"
+
post := &PostView{
+
Community: &CommunityRef{
+
DID: "did:plc:testcommunity",
+
PDSURL: "http://localhost:3001",
+
},
+
Embed: map[string]interface{}{
+
"$type": "social.coves.embed.external",
+
"external": map[string]interface{}{
+
"uri": "https://example.com",
+
"thumb": expectedURL, // Already a URL string
+
},
+
},
+
}
+
+
// Should not error or change the URL
+
TransformBlobRefsToURLs(post)
+
+
// Verify thumb is unchanged
+
embedMap := post.Embed.(map[string]interface{})
+
external := embedMap["external"].(map[string]interface{})
+
thumbURL, ok := external["thumb"].(string)
+
require.True(t, ok, "thumb should still be a string")
+
assert.Equal(t, expectedURL, thumbURL, "thumb URL should be unchanged")
+
})
+
+
t.Run("handles missing embed", func(t *testing.T) {
+
post := &PostView{
+
Community: &CommunityRef{
+
DID: "did:plc:testcommunity",
+
PDSURL: "http://localhost:3001",
+
},
+
Embed: nil,
+
}
+
+
// Should not panic
+
TransformBlobRefsToURLs(post)
+
+
// Verify embed is still nil
+
assert.Nil(t, post.Embed, "embed should remain nil")
+
})
+
+
t.Run("handles nil post", func(t *testing.T) {
+
// Should not panic
+
TransformBlobRefsToURLs(nil)
+
})
+
+
t.Run("handles missing community", func(t *testing.T) {
+
post := &PostView{
+
Community: nil,
+
Embed: map[string]interface{}{
+
"$type": "social.coves.embed.external",
+
"external": map[string]interface{}{
+
"uri": "https://example.com",
+
"thumb": map[string]interface{}{
+
"$type": "blob",
+
"ref": map[string]interface{}{
+
"$link": "bafyreib6tbnql2ux3whnfysbzabthaj2vvck53nimhbi5g5a7jgvgr5eqm",
+
},
+
},
+
},
+
},
+
}
+
+
// Should not panic or transform
+
TransformBlobRefsToURLs(post)
+
+
// Verify thumb is unchanged (still a blob)
+
embedMap := post.Embed.(map[string]interface{})
+
external := embedMap["external"].(map[string]interface{})
+
thumb, ok := external["thumb"].(map[string]interface{})
+
require.True(t, ok, "thumb should still be a map (blob ref)")
+
assert.Equal(t, "blob", thumb["$type"], "blob type should be unchanged")
+
})
+
+
t.Run("handles missing PDS URL", func(t *testing.T) {
+
post := &PostView{
+
Community: &CommunityRef{
+
DID: "did:plc:testcommunity",
+
PDSURL: "", // Empty PDS URL
+
},
+
Embed: map[string]interface{}{
+
"$type": "social.coves.embed.external",
+
"external": map[string]interface{}{
+
"uri": "https://example.com",
+
"thumb": map[string]interface{}{
+
"$type": "blob",
+
"ref": map[string]interface{}{
+
"$link": "bafyreib6tbnql2ux3whnfysbzabthaj2vvck53nimhbi5g5a7jgvgr5eqm",
+
},
+
},
+
},
+
},
+
}
+
+
// Should not panic or transform
+
TransformBlobRefsToURLs(post)
+
+
// Verify thumb is unchanged (still a blob)
+
embedMap := post.Embed.(map[string]interface{})
+
external := embedMap["external"].(map[string]interface{})
+
thumb, ok := external["thumb"].(map[string]interface{})
+
require.True(t, ok, "thumb should still be a map (blob ref)")
+
assert.Equal(t, "blob", thumb["$type"], "blob type should be unchanged")
+
})
+
+
t.Run("handles malformed blob ref gracefully", func(t *testing.T) {
+
post := &PostView{
+
Community: &CommunityRef{
+
DID: "did:plc:testcommunity",
+
PDSURL: "http://localhost:3001",
+
},
+
Embed: map[string]interface{}{
+
"$type": "social.coves.embed.external",
+
"external": map[string]interface{}{
+
"uri": "https://example.com",
+
"thumb": map[string]interface{}{
+
"$type": "blob",
+
"ref": "invalid-ref-format", // Should be a map with $link
+
},
+
},
+
},
+
}
+
+
// Should not panic
+
TransformBlobRefsToURLs(post)
+
+
// Verify thumb is unchanged (malformed blob)
+
embedMap := post.Embed.(map[string]interface{})
+
external := embedMap["external"].(map[string]interface{})
+
thumb, ok := external["thumb"].(map[string]interface{})
+
require.True(t, ok, "thumb should still be a map")
+
assert.Equal(t, "invalid-ref-format", thumb["ref"], "malformed ref should be unchanged")
+
})
+
+
t.Run("ignores non-external embed types", func(t *testing.T) {
+
post := &PostView{
+
Community: &CommunityRef{
+
DID: "did:plc:testcommunity",
+
PDSURL: "http://localhost:3001",
+
},
+
Embed: map[string]interface{}{
+
"$type": "social.coves.embed.images",
+
"images": []interface{}{
+
map[string]interface{}{
+
"image": map[string]interface{}{
+
"$type": "blob",
+
"ref": map[string]interface{}{
+
"$link": "bafyreib6tbnql2ux3whnfysbzabthaj2vvck53nimhbi5g5a7jgvgr5eqm",
+
},
+
},
+
},
+
},
+
},
+
}
+
+
// Should not transform non-external embeds
+
TransformBlobRefsToURLs(post)
+
+
// Verify images embed is unchanged
+
embedMap := post.Embed.(map[string]interface{})
+
images := embedMap["images"].([]interface{})
+
imageObj := images[0].(map[string]interface{})
+
imageBlob := imageObj["image"].(map[string]interface{})
+
assert.Equal(t, "blob", imageBlob["$type"], "image blob should be unchanged")
+
})
+
}
+
+
func TestTransformThumbToURL(t *testing.T) {
+
t.Run("transforms valid blob ref to URL", func(t *testing.T) {
+
external := map[string]interface{}{
+
"uri": "https://example.com",
+
"thumb": map[string]interface{}{
+
"$type": "blob",
+
"ref": map[string]interface{}{
+
"$link": "bafyreib6tbnql2ux3whnfysbzabthaj2vvck53nimhbi5g5a7jgvgr5eqm",
+
},
+
"mimeType": "image/jpeg",
+
"size": 52813,
+
},
+
}
+
+
transformThumbToURL(external, "did:plc:test", "http://localhost:3001")
+
+
thumbURL, ok := external["thumb"].(string)
+
require.True(t, ok, "thumb should be a string URL")
+
assert.Equal(t,
+
"http://localhost:3001/xrpc/com.atproto.sync.getBlob?did=did:plc:test&cid=bafyreib6tbnql2ux3whnfysbzabthaj2vvck53nimhbi5g5a7jgvgr5eqm",
+
thumbURL)
+
})
+
+
t.Run("does not transform if thumb is already string", func(t *testing.T) {
+
expectedURL := "http://localhost:3001/xrpc/com.atproto.sync.getBlob?did=did:plc:test&cid=bafytest"
+
external := map[string]interface{}{
+
"uri": "https://example.com",
+
"thumb": expectedURL,
+
}
+
+
transformThumbToURL(external, "did:plc:test", "http://localhost:3001")
+
+
thumbURL, ok := external["thumb"].(string)
+
require.True(t, ok, "thumb should still be a string")
+
assert.Equal(t, expectedURL, thumbURL, "thumb should be unchanged")
+
})
+
+
t.Run("does not transform if thumb is missing", func(t *testing.T) {
+
external := map[string]interface{}{
+
"uri": "https://example.com",
+
}
+
+
transformThumbToURL(external, "did:plc:test", "http://localhost:3001")
+
+
_, hasThumb := external["thumb"]
+
assert.False(t, hasThumb, "thumb should not be added")
+
})
+
+
t.Run("does not transform if CID is empty", func(t *testing.T) {
+
external := map[string]interface{}{
+
"uri": "https://example.com",
+
"thumb": map[string]interface{}{
+
"$type": "blob",
+
"ref": map[string]interface{}{
+
"$link": "", // Empty CID
+
},
+
},
+
}
+
+
transformThumbToURL(external, "did:plc:test", "http://localhost:3001")
+
+
// Verify thumb is unchanged
+
thumb, ok := external["thumb"].(map[string]interface{})
+
require.True(t, ok, "thumb should still be a map")
+
ref := thumb["ref"].(map[string]interface{})
+
assert.Equal(t, "", ref["$link"], "empty CID should be unchanged")
+
})
+
}
+13 -14
aggregators/kagi-news/src/coves_client.py
···
content: str,
facets: List[Dict],
title: Optional[str] = None,
-
embed: Optional[Dict] = None
+
embed: Optional[Dict] = None,
+
thumbnail_url: Optional[str] = None
) -> str:
"""
Create a post in a community.
···
facets: Rich text facets (formatting, links)
title: Optional post title
embed: Optional external embed
+
thumbnail_url: Optional thumbnail URL (for trusted aggregators only)
Returns:
AT Proto URI of created post (e.g., "at://did:plc:.../social.coves.post/...")
···
if embed:
post_data["embed"] = embed
+
# Add thumbnail URL at top level if provided (for trusted aggregators)
+
if thumbnail_url:
+
post_data["thumbnailUrl"] = thumbnail_url
+
# Use Coves-specific endpoint (not direct PDS write)
# This provides validation, authorization, and business logic
logger.info(f"Creating post in community: {community_handle}")
···
self,
uri: str,
title: str,
-
description: str,
-
thumb: Optional[str] = None
+
description: str
) -> Dict:
"""
Create external embed object for hot-linked content.
Args:
-
uri: External URL (story link)
-
title: Story title
-
description: Story description/summary
-
thumb: Optional thumbnail image URL
+
uri: URL of the external content
+
title: Title of the content
+
description: Description/summary
Returns:
-
External embed dictionary
+
Embed dictionary ready for post creation
"""
-
embed = {
+
return {
"$type": "social.coves.embed.external",
"external": {
"uri": uri,
···
}
}
-
if thumb:
-
embed["external"]["thumb"] = thumb
-
-
return embed
-
def _get_timestamp(self) -> str:
"""
Get current timestamp in ISO 8601 format.
+5 -14
aggregators/kagi-news/tests/test_e2e.py
···
Verifies:
- Embed structure matches social.coves.embed.external
- All required fields are present
-
- Optional thumbnail is included when provided
+
- Thumbnails are handled by server's unfurl service (not included in client)
"""
handle, password = aggregator_credentials
···
password=password
)
-
# Test with thumbnail
+
# Create external embed (server will handle thumbnail extraction)
embed = client.create_external_embed(
uri="https://example.com/story",
title="Test Story",
-
description="Test description",
-
thumb="https://example.com/image.jpg"
+
description="Test description"
)
assert embed["$type"] == "social.coves.embed.external"
assert embed["external"]["uri"] == "https://example.com/story"
assert embed["external"]["title"] == "Test Story"
assert embed["external"]["description"] == "Test description"
-
assert embed["external"]["thumb"] == "https://example.com/image.jpg"
-
-
# Test without thumbnail
-
embed_no_thumb = client.create_external_embed(
-
uri="https://example.com/story2",
-
title="Test Story 2",
-
description="Test description 2"
-
)
-
-
assert "thumb" not in embed_no_thumb["external"]
+
# Thumbnail is not included - server's unfurl service handles it
+
assert "thumb" not in embed["external"]
print("\nโœ… External embed format correct")
+4 -3
aggregators/kagi-news/tests/test_main.py
···
mock_client.create_post.return_value = "at://did:plc:test/social.coves.post/abc123"
# Mock create_external_embed to return proper embed structure
+
# Note: Thumbnails are handled by server's unfurl service, not client
mock_client.create_external_embed.return_value = {
"$type": "social.coves.embed.external",
"external": {
"uri": sample_story.link,
"title": sample_story.title,
-
"description": sample_story.summary,
-
"thumb": sample_story.image_url
+
"description": sample_story.summary
}
}
···
assert call_kwargs["embed"]["$type"] == "social.coves.embed.external"
assert call_kwargs["embed"]["external"]["uri"] == sample_story.link
assert call_kwargs["embed"]["external"]["title"] == sample_story.title
-
assert call_kwargs["embed"]["external"]["thumb"] == sample_story.image_url
+
# Thumbnail is not included - server's unfurl service handles it
+
assert "thumb" not in call_kwargs["embed"]["external"]
+134
scripts/post_streamable.py
···
+
#!/usr/bin/env python3
+
"""
+
Quick script to post a Streamable video to test-usnews community.
+
Uses the kagi-news CovesClient infrastructure.
+
"""
+
+
import sys
+
import os
+
+
# Add kagi-news src to path to use CovesClient
+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../aggregators/kagi-news'))
+
+
from src.coves_client import CovesClient
+
+
def main():
+
# Configuration
+
COVES_API_URL = "http://localhost:8081"
+
PDS_URL = "http://localhost:3001"
+
+
# Use PDS instance credentials (from .env.dev)
+
HANDLE = "testuser123.local.coves.dev"
+
PASSWORD = "test-password-123"
+
+
# Post details
+
COMMUNITY_HANDLE = "test-usnews.community.coves.social"
+
+
# Post 1: Streamable video
+
STREAMABLE_URL = "https://streamable.com/7kpdft"
+
STREAMABLE_TITLE = "NBACentral - \"Your son don't wanna be here, we know it's your last weekend. Enjoy ..."
+
+
# Post 2: Reddit highlight
+
REDDIT_URL = "https://www.reddit.com/r/nba/comments/1orfsgm/highlight_giannis_antetokounmpo_41_pts_15_reb_9/"
+
REDDIT_TITLE = "[Highlight] Giannis Antetokounmpo (41 PTS, 15 REB, 9 AST) tallies his 56th career regular season game of 40+ points, passing Kareem Abdul-Jabbar for the most such games in franchise history. Milwaukee defeats Chicago 126-110 to win their NBA Cup opener."
+
+
# Initialize client
+
print(f"Initializing Coves client...")
+
print(f" API URL: {COVES_API_URL}")
+
print(f" PDS URL: {PDS_URL}")
+
print(f" Handle: {HANDLE}")
+
+
client = CovesClient(
+
api_url=COVES_API_URL,
+
handle=HANDLE,
+
password=PASSWORD,
+
pds_url=PDS_URL
+
)
+
+
# Authenticate
+
print("\nAuthenticating...")
+
try:
+
client.authenticate()
+
print(f"โœ“ Authenticated as {client.did}")
+
except Exception as e:
+
print(f"โœ— Authentication failed: {e}")
+
return 1
+
+
# Post 1: Streamable video
+
print("\n" + "="*60)
+
print("POST 1: STREAMABLE VIDEO")
+
print("="*60)
+
+
print("\nCreating minimal external embed (URI only)...")
+
streamable_embed = {
+
"$type": "social.coves.embed.external",
+
"external": {
+
"uri": STREAMABLE_URL
+
}
+
}
+
print(f"โœ“ Embed created with URI only (unfurl service should enrich)")
+
+
print(f"\nPosting to {COMMUNITY_HANDLE}...")
+
print(f" Title: {STREAMABLE_TITLE}")
+
print(f" Video: {STREAMABLE_URL}")
+
+
try:
+
post_uri = client.create_post(
+
community_handle=COMMUNITY_HANDLE,
+
title=STREAMABLE_TITLE,
+
content="",
+
facets=[],
+
embed=streamable_embed
+
)
+
+
print(f"\nโœ“ Streamable post created successfully!")
+
print(f" URI: {post_uri}")
+
+
except Exception as e:
+
print(f"\nโœ— Streamable post creation failed: {e}")
+
import traceback
+
traceback.print_exc()
+
return 1
+
+
# Post 2: Reddit highlight
+
print("\n" + "="*60)
+
print("POST 2: REDDIT HIGHLIGHT")
+
print("="*60)
+
+
print("\nCreating minimal external embed (URI only)...")
+
reddit_embed = {
+
"$type": "social.coves.embed.external",
+
"external": {
+
"uri": REDDIT_URL
+
}
+
}
+
print(f"โœ“ Embed created with URI only (unfurl service should enrich)")
+
+
print(f"\nPosting to {COMMUNITY_HANDLE}...")
+
print(f" Title: {REDDIT_TITLE}")
+
print(f" URL: {REDDIT_URL}")
+
+
try:
+
post_uri = client.create_post(
+
community_handle=COMMUNITY_HANDLE,
+
title=REDDIT_TITLE,
+
content="",
+
facets=[],
+
embed=reddit_embed
+
)
+
+
print(f"\nโœ“ Reddit post created successfully!")
+
print(f" URI: {post_uri}")
+
print(f"\n" + "="*60)
+
print("Both posts created! Check them out at !test-usnews")
+
print("="*60)
+
return 0
+
+
except Exception as e:
+
print(f"\nโœ— Reddit post creation failed: {e}")
+
import traceback
+
traceback.print_exc()
+
return 1
+
+
if __name__ == "__main__":
+
sys.exit(main())
+6 -3
aggregators/kagi-news/src/html_parser.py
···
Perspective(
actor=p['actor'],
description=p['description'],
-
source_url=p['source_url']
+
source_url=p['source_url'],
+
source_name=p.get('source_name', '')
)
for p in parsed['perspectives']
]
···
actor, rest = full_text.split(':', 1)
actor = actor.strip()
-
# Find the <a> tag for source URL
+
# Find the <a> tag for source URL and name
a_tag = li.find('a')
source_url = a_tag['href'] if a_tag and a_tag.get('href') else ""
+
source_name = a_tag.get_text(strip=True) if a_tag else ""
# Extract description (between colon and source link)
# Remove the source citation part in parentheses
···
return {
'actor': actor,
'description': description,
-
'source_url': source_url
+
'source_url': source_url,
+
'source_name': source_name
}
def _extract_sources(self, soup: BeautifulSoup) -> List[Dict]:
+1
aggregators/kagi-news/src/models.py
···
actor: str
description: str
source_url: str
+
source_name: str = "" # Name of the source (e.g., "The Straits Times")
@dataclass
+11 -7
aggregators/kagi-news/src/richtext_formatter.py
···
builder.add_bold("Highlights:")
builder.add_text("\n")
for highlight in story.highlights:
-
builder.add_text(f"โ€ข {highlight}\n")
+
builder.add_text(f"โ€ข {highlight}\n\n")
builder.add_text("\n")
# Perspectives (if present)
···
# Bold the actor name
actor_with_colon = f"{perspective.actor}:"
builder.add_bold(actor_with_colon)
-
builder.add_text(f" {perspective.description} (")
+
builder.add_text(f" {perspective.description}")
-
# Add link to source
-
source_link_text = "Source"
-
builder.add_link(source_link_text, perspective.source_url)
-
builder.add_text(")\n")
+
# Add link to source if available
+
if perspective.source_url:
+
builder.add_text(" (")
+
source_link_text = perspective.source_name if perspective.source_name else "Source"
+
builder.add_link(source_link_text, perspective.source_url)
+
builder.add_text(")")
+
+
builder.add_text("\n\n")
builder.add_text("\n")
# Quote (if present)
···
for source in story.sources:
builder.add_text("โ€ข ")
builder.add_link(source.title, source.url)
-
builder.add_text(f" - {source.domain}\n")
+
builder.add_text(f" - {source.domain}\n\n")
builder.add_text("\n")
# Kagi News attribution
+1 -1
internal/atproto/lexicon/social/coves/feed/comment.json internal/atproto/lexicon/social/coves/community/comment.json
···
{
"lexicon": 1,
-
"id": "social.coves.feed.comment",
+
"id": "social.coves.community.comment",
"defs": {
"main": {
"type": "record",
+34
internal/db/migrations/018_migrate_comment_namespace.sql
···
+
-- +goose Up
+
-- Migration: Update comment URIs from social.coves.feed.comment to social.coves.community.comment
+
-- This updates the namespace for all comment records in the database.
+
-- Since we're pre-production, we're only updating the comments table (not votes).
+
+
-- Update main comment URIs
+
UPDATE comments
+
SET uri = REPLACE(uri, '/social.coves.feed.comment/', '/social.coves.community.comment/')
+
WHERE uri LIKE '%/social.coves.feed.comment/%';
+
+
-- Update root references (when root is a comment, not a post)
+
UPDATE comments
+
SET root_uri = REPLACE(root_uri, '/social.coves.feed.comment/', '/social.coves.community.comment/')
+
WHERE root_uri LIKE '%/social.coves.feed.comment/%';
+
+
-- Update parent references (when parent is a comment)
+
UPDATE comments
+
SET parent_uri = REPLACE(parent_uri, '/social.coves.feed.comment/', '/social.coves.community.comment/')
+
WHERE parent_uri LIKE '%/social.coves.feed.comment/%';
+
+
-- +goose Down
+
-- Rollback: Revert comment URIs from social.coves.community.comment to social.coves.feed.comment
+
+
UPDATE comments
+
SET uri = REPLACE(uri, '/social.coves.community.comment/', '/social.coves.feed.comment/')
+
WHERE uri LIKE '%/social.coves.community.comment/%';
+
+
UPDATE comments
+
SET root_uri = REPLACE(root_uri, '/social.coves.community.comment/', '/social.coves.feed.comment/')
+
WHERE root_uri LIKE '%/social.coves.community.comment/%';
+
+
UPDATE comments
+
SET parent_uri = REPLACE(parent_uri, '/social.coves.community.comment/', '/social.coves.feed.comment/')
+
WHERE parent_uri LIKE '%/social.coves.community.comment/%';
+2 -2
internal/core/comments/view_models.go
···
)
// CommentView represents the full view of a comment with all metadata
-
// Matches social.coves.feed.getComments#commentView lexicon
+
// Matches social.coves.community.comment.getComments#commentView lexicon
// Used in thread views and get endpoints
type CommentView struct {
Embed interface{} `json:"embed,omitempty"`
···
}
// ThreadViewComment represents a comment with its nested replies
-
// Matches social.coves.feed.getComments#threadViewComment lexicon
+
// Matches social.coves.community.comment.getComments#threadViewComment lexicon
// Supports recursive threading for comment trees
type ThreadViewComment struct {
Comment *CommentView `json:"comment"`
+1 -1
internal/validation/lexicon.go
···
// ValidateComment validates a comment record
func (v *LexiconValidator) ValidateComment(comment map[string]interface{}) error {
-
return v.ValidateRecord(comment, "social.coves.feed.comment")
+
return v.ValidateRecord(comment, "social.coves.community.comment")
}
// ValidateVote validates a vote record
+18 -15
docs/COMMENT_SYSTEM_IMPLEMENTATION.md
···
```json
{
"lexicon": 1,
-
"id": "social.coves.feed.comment",
+
"id": "social.coves.community.comment",
"defs": {
"main": {
"type": "record",
···
```sql
CREATE TABLE comments (
id BIGSERIAL PRIMARY KEY,
-
uri TEXT UNIQUE NOT NULL, -- AT-URI (at://commenter_did/social.coves.feed.comment/rkey)
+
uri TEXT UNIQUE NOT NULL, -- AT-URI (at://commenter_did/social.coves.community.comment/rkey)
cid TEXT NOT NULL, -- Content ID
rkey TEXT NOT NULL, -- Record key (TID)
commenter_did TEXT NOT NULL, -- User who commented (from AT-URI repo field)
···
return nil
}
-
if event.Commit.Collection == "social.coves.feed.comment" {
+
if event.Commit.Collection == "social.coves.community.comment" {
switch event.Commit.Operation {
case "create":
return c.createComment(ctx, event.Did, commit)
···
- Auto-reconnect on errors (5-second retry)
- Ping/pong keepalive (30-second ping, 60-second read deadline)
- Graceful shutdown via context cancellation
-
- Subscribes to: `wantedCollections=social.coves.feed.comment`
+
- Subscribes to: `wantedCollections=social.coves.community.comment`
---
···
// Start Jetstream consumer for comments
commentJetstreamURL := os.Getenv("COMMENT_JETSTREAM_URL")
if commentJetstreamURL == "" {
-
commentJetstreamURL = "ws://localhost:6008/subscribe?wantedCollections=social.coves.feed.comment"
+
commentJetstreamURL = "ws://localhost:6008/subscribe?wantedCollections=social.coves.community.comment"
}
commentEventConsumer := jetstream.NewCommentEventConsumer(commentRepo, db)
···
}()
log.Printf("Started Jetstream comment consumer: %s", commentJetstreamURL)
-
log.Println(" - Indexing: social.coves.feed.comment CREATE/UPDATE/DELETE operations")
+
log.Println(" - Indexing: social.coves.community.comment CREATE/UPDATE/DELETE operations")
log.Println(" - Updating: Post comment counts and comment reply counts atomically")
```
···
| Aspect | Votes | Comments |
|--------|-------|----------|
| **Location** | User repositories | User repositories |
-
| **Lexicon** | `social.coves.feed.vote` | `social.coves.feed.comment` |
+
| **Lexicon** | `social.coves.feed.vote` | `social.coves.community.comment` |
| **Operations** | CREATE, DELETE | CREATE, UPDATE, DELETE |
| **Mutability** | Immutable | Editable |
| **Foreign Keys** | None (out-of-order indexing) | None (out-of-order indexing) |
···
---
-
### ๐Ÿ“‹ Phase 4: Namespace Migration (Separate Task)
+
### โœ… Phase 4: Namespace Migration (COMPLETED)
+
+
**Completed:** 2025-11-16
**Scope:**
-
- Migrate existing `social.coves.feed.comment` records to `social.coves.community.comment`
-
- Update all AT-URIs in database
-
- Update Jetstream consumer collection filter
-
- Migration script with rollback capability
-
- Zero-downtime deployment strategy
+
- โœ… Migrated `social.coves.community.comment` namespace to `social.coves.community.comment`
+
- โœ… Updated lexicon definitions (record and query schemas)
+
- โœ… Updated Jetstream consumer collection filter
+
- โœ… Updated all code references (consumer, service, validation layers)
+
- โœ… Updated integration tests and test data generation scripts
+
- โœ… Created database migration (018_migrate_comment_namespace.sql)
-
**Note:** Currently out of scope - will be tackled separately when needed.
+
**Note:** Since we're pre-production, no historical data migration was needed. Migration script updates URIs in comments table (uri, root_uri, parent_uri columns).
---
···
### Environment Variables
```bash
# Jetstream URL (optional, defaults to localhost:6008)
-
export COMMENT_JETSTREAM_URL="ws://localhost:6008/subscribe?wantedCollections=social.coves.feed.comment"
+
export COMMENT_JETSTREAM_URL="ws://localhost:6008/subscribe?wantedCollections=social.coves.community.comment"
# Database URL
export TEST_DATABASE_URL="postgres://test_user:test_password@localhost:5434/coves_test?sslmode=disable"
+3 -2
internal/core/unfurl/providers.go
···
// normalizeURL converts protocol-relative URLs to HTTPS
// Examples:
-
// "//example.com/image.jpg" -> "https://example.com/image.jpg"
-
// "https://example.com/image.jpg" -> "https://example.com/image.jpg" (unchanged)
+
//
+
// "//example.com/image.jpg" -> "https://example.com/image.jpg"
+
// "https://example.com/image.jpg" -> "https://example.com/image.jpg" (unchanged)
func normalizeURL(urlStr string) string {
if strings.HasPrefix(urlStr, "//") {
return "https:" + urlStr
+785
docs/federation-prd.md
···
+
# Federation PRD: Cross-Instance Posting (Beta)
+
+
**Status:** Planning - Beta
+
**Target:** Beta Release
+
**Owner:** TBD
+
**Last Updated:** 2025-11-16
+
+
---
+
+
## Overview
+
+
Enable Lemmy-style federation where users on any Coves instance can post to communities hosted on other instances, while maintaining community ownership and moderation control.
+
+
### Problem Statement
+
+
**Current (Alpha):**
+
- Posts to communities require community credentials
+
- Users can only post to communities on their home instance
+
- No true federation across instances
+
+
**Desired (Beta):**
+
- User A@coves.social can post to !gaming@covesinstance.com
+
- Communities maintain full moderation control
+
- Content lives in community repositories (not user repos)
+
- Seamless UX - users don't think about federation
+
+
---
+
+
## Goals
+
+
### Primary Goals
+
1. **Enable cross-instance posting** - Users can post to any community on any federated instance
+
2. **Preserve community ownership** - Posts live in community repos, not user repos
+
3. **atProto-native implementation** - Use `com.atproto.server.getServiceAuth` pattern
+
4. **Maintain security** - No compromise on auth, validation, or moderation
+
+
### Non-Goals (Future Versions)
+
- Automatic instance discovery (Beta: manual allowlist)
+
- Cross-instance moderation delegation
+
- Content mirroring/replication
+
- User migration between instances
+
+
---
+
+
## Technical Approach
+
+
### Architecture: atProto Service Auth
+
+
Use atProto's native service authentication delegation pattern:
+
+
```
+
โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”
+
โ”‚ User A โ”‚ โ”‚ coves.social โ”‚ โ”‚ covesinstanceโ”‚
+
โ”‚ @coves.soc โ”‚โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ถโ”‚ AppView โ”‚โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–ถโ”‚ .com PDS โ”‚
+
โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ (1) โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ (2) โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜
+
JWT auth Request Service Auth Validate
+
โ”‚ โ”‚
+
โ”‚โ—€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜
+
โ”‚ (3) Scoped Token
+
โ”‚
+
โ–ผ
+
โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”
+
โ”‚ covesinstance โ”‚
+
โ”‚ .com PDS โ”‚
+
โ”‚ Write Post โ”‚
+
โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜
+
โ”‚
+
โ–ผ
+
โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”
+
โ”‚ Firehose โ”‚
+
โ”‚ (broadcasts) โ”‚
+
โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜
+
โ”‚
+
โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”
+
โ–ผ โ–ผ
+
โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”
+
โ”‚ coves.social โ”‚ โ”‚covesinstance โ”‚
+
โ”‚ AppView โ”‚ โ”‚ .com AppViewโ”‚
+
โ”‚ (indexes) โ”‚ โ”‚ (indexes) โ”‚
+
โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜
+
```
+
+
### Flow Breakdown
+
+
**Step 1: User Authentication (Unchanged)**
+
- User authenticates with their home instance (coves.social)
+
- Receives JWT token for API requests
+
+
**Step 2: Service Auth Request (New)**
+
- When posting to remote community, AppView requests service auth token
+
- Endpoint: `POST {remote-pds}/xrpc/com.atproto.server.getServiceAuth`
+
- Payload:
+
```json
+
{
+
"aud": "did:plc:community123", // Community DID
+
"exp": 1234567890, // Token expiration
+
"lxm": "social.coves.community.post.create" // Authorized method
+
}
+
```
+
+
**Step 3: Service Auth Validation (New - PDS Side)**
+
- Remote PDS validates request:
+
- Is requesting service trusted? (instance allowlist)
+
- Is user banned from community?
+
- Does community allow remote posts?
+
- Rate limiting checks
+
- Returns scoped token valid for specific community + operation
+
+
**Step 4: Post Creation (Modified)**
+
- AppView uses service auth token to write to remote PDS
+
- Same `com.atproto.repo.createRecord` endpoint as current implementation
+
- Post record written to community's repository
+
+
**Step 5: Indexing (Unchanged)**
+
- PDS broadcasts to firehose
+
- All AppViews index via Jetstream consumers
+
+
---
+
+
## Implementation Details
+
+
### Phase 1: Service Detection (Local vs Remote)
+
+
**File:** `internal/core/posts/service.go`
+
+
```go
+
func (s *postService) CreatePost(ctx context.Context, req CreatePostRequest) (*CreatePostResponse, error) {
+
// ... existing validation ...
+
+
community, err := s.communityService.GetByDID(ctx, communityDID)
+
if err != nil {
+
return nil, err
+
}
+
+
// NEW: Route based on community location
+
if s.isLocalCommunity(community) {
+
return s.createLocalPost(ctx, community, req)
+
}
+
return s.createFederatedPost(ctx, community, req)
+
}
+
+
func (s *postService) isLocalCommunity(community *communities.Community) bool {
+
localPDSHost := extractHost(s.pdsURL)
+
communityPDSHost := extractHost(community.PDSURL)
+
return localPDSHost == communityPDSHost
+
}
+
```
+
+
### Phase 2: Service Auth Client
+
+
**New File:** `internal/atproto/service_auth/client.go`
+
+
```go
+
type ServiceAuthClient interface {
+
// RequestServiceAuth obtains a scoped token for writing to remote community
+
RequestServiceAuth(ctx context.Context, opts ServiceAuthOptions) (*ServiceAuthToken, error)
+
}
+
+
type ServiceAuthOptions struct {
+
RemotePDSURL string // Remote PDS endpoint
+
CommunityDID string // Target community DID
+
UserDID string // Author DID (for validation)
+
Method string // "social.coves.community.post.create"
+
ExpiresIn int // Token lifetime (seconds)
+
}
+
+
type ServiceAuthToken struct {
+
Token string // JWT token for auth
+
ExpiresAt time.Time // When token expires
+
}
+
+
func (c *serviceAuthClient) RequestServiceAuth(ctx context.Context, opts ServiceAuthOptions) (*ServiceAuthToken, error) {
+
endpoint := fmt.Sprintf("%s/xrpc/com.atproto.server.getServiceAuth", opts.RemotePDSURL)
+
+
payload := map[string]interface{}{
+
"aud": opts.CommunityDID,
+
"exp": time.Now().Add(time.Duration(opts.ExpiresIn) * time.Second).Unix(),
+
"lxm": opts.Method,
+
}
+
+
// Sign request with our instance DID credentials
+
signedReq, err := c.signRequest(payload)
+
if err != nil {
+
return nil, fmt.Errorf("failed to sign service auth request: %w", err)
+
}
+
+
resp, err := c.httpClient.Post(endpoint, signedReq)
+
if err != nil {
+
return nil, fmt.Errorf("service auth request failed: %w", err)
+
}
+
+
return parseServiceAuthResponse(resp)
+
}
+
```
+
+
### Phase 3: Federated Post Creation
+
+
**File:** `internal/core/posts/service.go`
+
+
```go
+
func (s *postService) createFederatedPost(ctx context.Context, community *communities.Community, req CreatePostRequest) (*CreatePostResponse, error) {
+
// 1. Request service auth token from remote PDS
+
token, err := s.serviceAuthClient.RequestServiceAuth(ctx, service_auth.ServiceAuthOptions{
+
RemotePDSURL: community.PDSURL,
+
CommunityDID: community.DID,
+
UserDID: req.AuthorDID,
+
Method: "social.coves.community.post.create",
+
ExpiresIn: 300, // 5 minutes
+
})
+
if err != nil {
+
// Handle specific errors
+
if isUnauthorized(err) {
+
return nil, ErrNotAuthorizedRemote
+
}
+
if isBanned(err) {
+
return nil, ErrBannedRemote
+
}
+
return nil, fmt.Errorf("failed to obtain service auth: %w", err)
+
}
+
+
// 2. Build post record (same as local)
+
postRecord := PostRecord{
+
Type: "social.coves.community.post",
+
Community: community.DID,
+
Author: req.AuthorDID,
+
Title: req.Title,
+
Content: req.Content,
+
// ... other fields ...
+
CreatedAt: time.Now().UTC().Format(time.RFC3339),
+
}
+
+
// 3. Write to remote PDS using service auth token
+
uri, cid, err := s.createPostOnRemotePDS(ctx, community.PDSURL, community.DID, postRecord, token.Token)
+
if err != nil {
+
return nil, fmt.Errorf("failed to write to remote PDS: %w", err)
+
}
+
+
log.Printf("[FEDERATION] User %s posted to remote community %s: %s",
+
req.AuthorDID, community.DID, uri)
+
+
return &CreatePostResponse{
+
URI: uri,
+
CID: cid,
+
}, nil
+
}
+
+
func (s *postService) createPostOnRemotePDS(
+
ctx context.Context,
+
pdsURL string,
+
communityDID string,
+
record PostRecord,
+
serviceAuthToken string,
+
) (uri, cid string, err error) {
+
endpoint := fmt.Sprintf("%s/xrpc/com.atproto.repo.createRecord", pdsURL)
+
+
payload := map[string]interface{}{
+
"repo": communityDID,
+
"collection": "social.coves.community.post",
+
"record": record,
+
}
+
+
jsonData, _ := json.Marshal(payload)
+
req, _ := http.NewRequestWithContext(ctx, "POST", endpoint, bytes.NewBuffer(jsonData))
+
+
// Use service auth token instead of community credentials
+
req.Header.Set("Authorization", "Bearer "+serviceAuthToken)
+
req.Header.Set("Content-Type", "application/json")
+
+
// ... execute request, parse response ...
+
return uri, cid, nil
+
}
+
```
+
+
### Phase 4: PDS Service Auth Validation (PDS Extension)
+
+
**Note:** This requires extending the PDS. Options:
+
1. Contribute to official atproto PDS
+
2. Run modified PDS fork
+
3. Use PDS middleware/proxy
+
+
**Conceptual Implementation:**
+
+
```go
+
// PDS validates service auth requests before issuing tokens
+
func (h *ServiceAuthHandler) HandleGetServiceAuth(w http.ResponseWriter, r *http.Request) {
+
var req ServiceAuthRequest
+
json.NewDecoder(r.Body).Decode(&req)
+
+
// 1. Verify requesting service is trusted
+
requestingDID := extractDIDFromJWT(r.Header.Get("Authorization"))
+
if !h.isTrustedInstance(requestingDID) {
+
writeError(w, http.StatusForbidden, "UntrustedInstance", "Instance not in allowlist")
+
return
+
}
+
+
// 2. Validate community exists on this PDS
+
community, err := h.getCommunityByDID(req.Aud)
+
if err != nil {
+
writeError(w, http.StatusNotFound, "CommunityNotFound", "Community not hosted here")
+
return
+
}
+
+
// 3. Check user not banned (query from AppView or local moderation records)
+
if h.isUserBanned(req.UserDID, req.Aud) {
+
writeError(w, http.StatusForbidden, "Banned", "User banned from community")
+
return
+
}
+
+
// 4. Check community settings (allows remote posts?)
+
if !community.AllowFederatedPosts {
+
writeError(w, http.StatusForbidden, "FederationDisabled", "Community doesn't accept federated posts")
+
return
+
}
+
+
// 5. Rate limiting (per user, per community, per instance)
+
if h.exceedsRateLimit(req.UserDID, req.Aud, requestingDID) {
+
writeError(w, http.StatusTooManyRequests, "RateLimited", "Too many requests")
+
return
+
}
+
+
// 6. Generate scoped token
+
token := h.issueServiceAuthToken(ServiceAuthTokenOptions{
+
Audience: req.Aud, // Community DID
+
Subject: requestingDID, // Requesting instance DID
+
Method: req.Lxm, // Authorized method
+
ExpiresAt: time.Unix(req.Exp, 0),
+
Scopes: []string{"write:posts"},
+
})
+
+
json.NewEncoder(w).Encode(map[string]string{
+
"token": token,
+
})
+
}
+
```
+
+
---
+
+
## Database Schema Changes
+
+
### New Table: `instance_federation`
+
+
Tracks trusted instances and federation settings:
+
+
```sql
+
CREATE TABLE instance_federation (
+
id SERIAL PRIMARY KEY,
+
instance_did TEXT NOT NULL UNIQUE,
+
instance_domain TEXT NOT NULL,
+
trust_level TEXT NOT NULL, -- 'trusted', 'limited', 'blocked'
+
allowed_methods TEXT[] NOT NULL DEFAULT '{}',
+
rate_limit_posts_per_hour INTEGER NOT NULL DEFAULT 100,
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
+
notes TEXT
+
);
+
+
CREATE INDEX idx_instance_federation_did ON instance_federation(instance_did);
+
CREATE INDEX idx_instance_federation_trust ON instance_federation(trust_level);
+
```
+
+
### New Table: `federation_rate_limits`
+
+
Track federated post rate limits:
+
+
```sql
+
CREATE TABLE federation_rate_limits (
+
id SERIAL PRIMARY KEY,
+
user_did TEXT NOT NULL,
+
community_did TEXT NOT NULL,
+
instance_did TEXT NOT NULL,
+
window_start TIMESTAMPTZ NOT NULL,
+
post_count INTEGER NOT NULL DEFAULT 1,
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
+
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
+
+
UNIQUE(user_did, community_did, instance_did, window_start)
+
);
+
+
CREATE INDEX idx_federation_rate_limits_lookup
+
ON federation_rate_limits(user_did, community_did, instance_did, window_start);
+
```
+
+
### Update Table: `communities`
+
+
Add federation settings:
+
+
```sql
+
ALTER TABLE communities
+
ADD COLUMN allow_federated_posts BOOLEAN NOT NULL DEFAULT true,
+
ADD COLUMN federation_mode TEXT NOT NULL DEFAULT 'open';
+
-- federation_mode: 'open' (any instance), 'allowlist' (trusted only), 'local' (no federation)
+
```
+
+
---
+
+
## Security Considerations
+
+
### 1. Instance Trust Model
+
+
**Allowlist Approach (Beta):**
+
- Manual approval of federated instances
+
- Admin UI to manage instance trust levels
+
- Default: block all, explicit allow
+
+
**Trust Levels:**
+
- `trusted` - Full federation, normal rate limits
+
- `limited` - Federation allowed, strict rate limits
+
- `blocked` - No federation
+
+
### 2. User Ban Synchronization
+
+
**Challenge:** Remote instance needs to check local bans
+
+
**Options:**
+
1. **Service auth validation** - PDS queries AppView for ban status
+
2. **Ban records in PDS** - Moderation records stored in community repo
+
3. **Cached ban list** - Remote instances cache ban lists (with TTL)
+
+
**Beta Approach:** Option 1 (service auth validation queries AppView)
+
+
### 3. Rate Limiting
+
+
**Multi-level rate limits:**
+
- Per user per community: 10 posts/hour
+
- Per instance per community: 100 posts/hour
+
- Per user across all communities: 50 posts/hour
+
+
**Implementation:** In-memory + PostgreSQL for persistence
+
+
### 4. Content Validation
+
+
**Same validation as local posts:**
+
- Lexicon validation
+
- Content length limits
+
- Embed validation
+
- Label validation
+
+
**Additional federation checks:**
+
- Verify author DID is valid
+
- Verify requesting instance signature
+
- Verify token scopes match operation
+
+
---
+
+
## API Changes
+
+
### New Endpoint: `social.coves.federation.getTrustedInstances`
+
+
**Purpose:** List instances this instance federates with
+
+
**Lexicon:**
+
```json
+
{
+
"lexicon": 1,
+
"id": "social.coves.federation.getTrustedInstances",
+
"defs": {
+
"main": {
+
"type": "query",
+
"output": {
+
"encoding": "application/json",
+
"schema": {
+
"type": "object",
+
"required": ["instances"],
+
"properties": {
+
"instances": {
+
"type": "array",
+
"items": { "$ref": "#instanceView" }
+
}
+
}
+
}
+
}
+
},
+
"instanceView": {
+
"type": "object",
+
"required": ["did", "domain", "trustLevel"],
+
"properties": {
+
"did": { "type": "string" },
+
"domain": { "type": "string" },
+
"trustLevel": { "type": "string" },
+
"allowedMethods": { "type": "array", "items": { "type": "string" } }
+
}
+
}
+
}
+
}
+
```
+
+
### Modified Endpoint: `social.coves.community.post.create`
+
+
**Changes:**
+
- No API contract changes
+
- Internal routing: local vs federated
+
- New error codes:
+
- `FederationFailed` - Remote instance unreachable
+
- `RemoteNotAuthorized` - Remote instance rejected auth
+
- `RemoteBanned` - User banned on remote community
+
+
---
+
+
## User Experience
+
+
### Happy Path: Cross-Instance Post
+
+
1. User on coves.social navigates to !gaming@covesinstance.com
+
2. Clicks "Create Post"
+
3. Fills out post form (title, content, etc.)
+
4. Clicks "Submit"
+
5. **Behind the scenes:**
+
- coves.social requests service auth from covesinstance.com
+
- covesinstance.com validates and issues token
+
- coves.social writes post using token
+
- Post appears in feed within seconds (via firehose)
+
6. **User sees:** Post published successfully
+
7. Post appears in:
+
- covesinstance.com feeds (native community)
+
- coves.social discover/all feeds (indexed via firehose)
+
- User's profile on coves.social
+
+
### Error Cases
+
+
**User Banned:**
+
- Error: "You are banned from !gaming@covesinstance.com"
+
- Suggestion: "Contact community moderators for more information"
+
+
**Instance Blocked:**
+
- Error: "This community does not accept posts from your instance"
+
- Suggestion: "Contact community administrators or create a local account"
+
+
**Federation Unavailable:**
+
- Error: "Unable to connect to covesinstance.com. Try again later."
+
- Fallback: Allow saving as draft (future feature)
+
+
**Rate Limited:**
+
- Error: "You're posting too quickly. Please wait before posting again."
+
- Show: Countdown until next post allowed
+
+
---
+
+
## Testing Requirements
+
+
### Unit Tests
+
+
1. **Service Detection:**
+
- `isLocalCommunity()` correctly identifies local vs remote
+
- Handles edge cases (different ports, subdomains)
+
+
2. **Service Auth Client:**
+
- Correctly formats service auth requests
+
- Handles token expiration
+
- Retries on transient failures
+
+
3. **Federated Post Creation:**
+
- Uses service auth token instead of community credentials
+
- Falls back gracefully on errors
+
- Logs federation events
+
+
### Integration Tests
+
+
1. **Local Post (Regression):**
+
- Posting to local community still works
+
- No performance degradation
+
+
2. **Federated Post:**
+
- User can post to remote community
+
- Service auth token requested correctly
+
- Post written to remote PDS
+
- Post indexed by both AppViews
+
+
3. **Authorization Failures:**
+
- Banned users rejected at service auth stage
+
- Untrusted instances rejected
+
- Expired tokens rejected
+
+
4. **Rate Limiting:**
+
- Per-user rate limits enforced
+
- Per-instance rate limits enforced
+
- Rate limit resets correctly
+
+
### End-to-End Tests
+
+
1. **Cross-Instance User Journey:**
+
- Set up two instances (instance-a, instance-b)
+
- Create community on instance-b
+
- User on instance-a posts to instance-b community
+
- Verify post appears on both instances
+
+
2. **Moderation Enforcement:**
+
- Ban user on remote instance
+
- Verify user can't post from any instance
+
- Unban user
+
- Verify user can post again
+
+
3. **Instance Blocklist:**
+
- Block instance-a on instance-b
+
- Verify users from instance-a can't post to instance-b communities
+
- Unblock instance-a
+
- Verify posting works again
+
+
---
+
+
## Migration Path (Alpha โ†’ Beta)
+
+
### Phase 1: Backend Implementation (No User Impact)
+
1. Add service auth client
+
2. Add local vs remote detection
+
3. Deploy with feature flag `ENABLE_FEDERATION=false`
+
+
### Phase 2: Database Migration
+
1. Add federation tables
+
2. Seed with initial trusted instances (manual)
+
3. Add community federation flags (default: allow)
+
+
### Phase 3: Soft Launch
+
1. Enable federation for single test instance
+
2. Monitor service auth requests/errors
+
3. Validate rate limiting works
+
+
### Phase 4: Beta Rollout
+
1. Enable `ENABLE_FEDERATION=true` for all instances
+
2. Admin UI for managing trusted instances
+
3. Community settings for federation preferences
+
+
### Phase 5: Documentation & Onboarding
+
1. Instance operator guide: "How to federate with other instances"
+
2. Community moderator guide: "Federation settings"
+
3. User guide: "Posting across instances"
+
+
---
+
+
## Metrics & Success Criteria
+
+
### Performance Metrics
+
- Service auth request latency: p95 < 200ms
+
- Federated post creation time: p95 < 2 seconds (vs 500ms local)
+
- Service auth token cache hit rate: > 80%
+
+
### Adoption Metrics
+
- % of posts that are federated: Target 20% by end of Beta
+
- Number of federated instances: Target 5+ by end of Beta
+
- Cross-instance engagement (comments, votes): Monitor trend
+
+
### Reliability Metrics
+
- Service auth success rate: > 99%
+
- Federated post success rate: > 95%
+
- Service auth token validation errors: < 1%
+
+
### Security Metrics
+
- Unauthorized access attempts: Monitor & alert
+
- Rate limit triggers: Track per instance
+
- Ban evasion attempts: Zero tolerance
+
+
---
+
+
## Rollback Plan
+
+
If federation causes critical issues:
+
+
1. **Immediate:** Set `ENABLE_FEDERATION=false` via env var
+
2. **Fallback:** All posts route through local-only flow
+
3. **Investigation:** Review logs for service auth failures
+
4. **Fix Forward:** Deploy patch, re-enable gradually
+
+
**No data loss:** Posts are written to PDS, indexed via firehose regardless of federation method.
+
+
---
+
+
## Open Questions
+
+
1. **Instance Discovery:** How do users find communities on other instances?
+
- Beta: Manual (users share links)
+
- Future: Instance directory, community search across instances
+
+
2. **Service Auth Token Caching:** Should AppViews cache service auth tokens?
+
- Pros: Reduce latency, fewer PDS requests
+
- Cons: Stale permissions, ban enforcement delay
+
- **Decision needed:** Cache with short TTL (5 minutes)?
+
+
3. **PDS Implementation:** Who implements service auth validation?
+
- Option A: Contribute to official PDS (long timeline)
+
- Option B: Run forked PDS (maintenance burden)
+
- Option C: Proxy/middleware (added complexity)
+
- **Decision needed:** Start with Option B, migrate to Option A?
+
+
4. **Federation Symmetry:** If instance-a trusts instance-b, does instance-b auto-trust instance-a?
+
- Beta: No (asymmetric trust)
+
- Future: Mutual federation agreements?
+
+
5. **Cross-Instance Moderation:** Should bans propagate across instances?
+
- Beta: No (each instance decides)
+
- Future: Shared moderation lists?
+
+
---
+
+
## Future Enhancements (Post-Beta)
+
+
1. **Service Auth Token Caching:** Reduce latency for frequent posters
+
2. **Batch Service Auth:** Request tokens for multiple communities at once
+
3. **Instance Discovery API:** Automatic instance detection/registration
+
4. **Federation Analytics:** Dashboard showing cross-instance activity
+
5. **Moderation Sync:** Optional shared ban lists across trusted instances
+
6. **Content Mirroring:** Cache federated posts locally for performance
+
7. **User Migration:** Transfer account between instances
+
+
---
+
+
## Resources
+
+
### Documentation
+
- [atProto Service Auth Spec](https://atproto.com/specs/service-auth) (hypothetical - check actual docs)
+
- Lemmy Federation Architecture
+
- Mastodon Federation Implementation
+
+
### Code References
+
- `internal/core/posts/service.go` - Post creation service
+
- `internal/api/handlers/post/create.go` - Post creation handler
+
- `internal/atproto/jetstream/` - Firehose consumers
+
+
### Dependencies
+
- atproto SDK (for service auth)
+
- PDS v0.4+ (service auth support)
+
- PostgreSQL 14+ (for federation tables)
+
+
---
+
+
## Appendix A: Service Auth Request Example
+
+
**Request to Remote PDS:**
+
```http
+
POST https://covesinstance.com/xrpc/com.atproto.server.getServiceAuth
+
Authorization: Bearer {coves-social-instance-jwt}
+
Content-Type: application/json
+
+
{
+
"aud": "did:plc:community123",
+
"exp": 1700000000,
+
"lxm": "social.coves.community.post.create"
+
}
+
```
+
+
**Response:**
+
```http
+
HTTP/1.1 200 OK
+
Content-Type: application/json
+
+
{
+
"token": "eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9..."
+
}
+
```
+
+
**Using Token to Create Post:**
+
```http
+
POST https://covesinstance.com/xrpc/com.atproto.repo.createRecord
+
Authorization: Bearer {service-auth-token}
+
Content-Type: application/json
+
+
{
+
"repo": "did:plc:community123",
+
"collection": "social.coves.community.post",
+
"record": {
+
"$type": "social.coves.community.post",
+
"community": "did:plc:community123",
+
"author": "did:plc:user456",
+
"title": "Hello from coves.social!",
+
"content": "This is a federated post",
+
"createdAt": "2024-11-16T12:00:00Z"
+
}
+
}
+
```
+
+
---
+
+
## Appendix B: Error Handling Matrix
+
+
| Error Condition | HTTP Status | Error Code | User Message | Retry Strategy |
+
|----------------|-------------|------------|--------------|----------------|
+
| Instance not trusted | 403 | `UntrustedInstance` | "This community doesn't accept posts from your instance" | No retry |
+
| User banned | 403 | `Banned` | "You are banned from this community" | No retry |
+
| Rate limit exceeded | 429 | `RateLimited` | "Too many posts. Try again in X minutes" | Exponential backoff |
+
| PDS unreachable | 503 | `ServiceUnavailable` | "Community temporarily unavailable" | Retry 3x with backoff |
+
| Invalid token | 401 | `InvalidToken` | "Session expired. Please try again" | Refresh token & retry |
+
| Community not found | 404 | `CommunityNotFound` | "Community not found" | No retry |
+
| Service auth failed | 500 | `FederationFailed` | "Unable to connect. Try again later" | Retry 2x |
+
+
---
+
+
**End of PRD**
+130 -28
docs/PRD_ALPHA_GO_LIVE.md
···
## ๐ŸŽฏ Major Progress Update
**โœ… ALL E2E TESTS COMPLETE!** (Completed 2025-11-16)
+
**โœ… BIDIRECTIONAL DID VERIFICATION COMPLETE!** (Completed 2025-11-16)
All 6 critical E2E test suites have been implemented and are passing:
- โœ… Full User Journey (signup โ†’ community โ†’ post โ†’ comment โ†’ vote)
···
**Time Saved**: ~7-12 hours through parallel agent implementation
**Test Quality**: Enhanced with comprehensive database record verification to catch race conditions
+
### Production Deployment Requirements
+
+
**Architecture**:
+
- **AppView Domain**: coves.social (instance identity, API, frontend)
+
- **PDS Domain**: coves.me (separate domain required - cannot be same as AppView)
+
- **Community Handles**: Use @coves.social (AppView domain)
+
- **Jetstream**: Connects to Bluesky's production firehose (wss://jetstream2.us-east.bsky.network)
+
+
**Required: .well-known/did.json at coves.social**:
+
```json
+
{
+
"id": "did:web:coves.social",
+
"alsoKnownAs": ["at://coves.social"],
+
"verificationMethod": [
+
{
+
"id": "did:web:coves.social#atproto",
+
"type": "Multikey",
+
"controller": "did:web:coves.social",
+
"publicKeyMultibase": "z..."
+
}
+
],
+
"service": [
+
{
+
"id": "#atproto_pds",
+
"type": "AtprotoPersonalDataServer",
+
"serviceEndpoint": "https://coves.me"
+
}
+
]
+
}
+
```
+
+
**Environment Variables**:
+
- AppView:
+
- `INSTANCE_DID=did:web:coves.social`
+
- `INSTANCE_DOMAIN=coves.social`
+
- `PDS_URL=https://coves.me` (separate domain)
+
- `SKIP_DID_WEB_VERIFICATION=false` (production)
+
- `JETSTREAM_URL=wss://jetstream2.us-east.bsky.network/subscribe`
+
+
**Verification**:
+
- `curl https://coves.social/.well-known/did.json` (should return DID document)
+
- `curl https://coves.me/xrpc/_health` (PDS health check)
+
## Overview
This document tracks the remaining work required to launch Coves alpha with real users. Focus is on critical functionality, security, and operational readiness.
···
### 1. Authentication & Security
+
#### Production PDS Deployment
+
**CRITICAL**: PDS must be on separate domain from AppView (coves.me, not coves.social)
+
+
- [ ] Deploy PDS to coves.me domain
+
- [ ] Set up DNS: A record for coves.me โ†’ server IP
+
- [ ] Configure SSL certificate for coves.me
+
- [ ] Deploy PDS container/service on port 2583
+
- [ ] Configure nginx/Caddy reverse proxy for coves.me โ†’ localhost:2583
+
- [ ] Set PDS_HOSTNAME=coves.me in PDS environment
+
- [ ] Mount persistent volume for PDS data (/pds/data)
+
- [ ] Verify PDS connectivity
+
- [ ] Test: `curl https://coves.me/xrpc/_health`
+
- [ ] Create test community account on PDS
+
- [ ] Verify JWKS endpoint: `curl https://coves.me/.well-known/jwks.json`
+
- [ ] Test community account token provisioning
+
- [ ] Configure AppView to use production PDS
+
- [ ] Set `PDS_URL=https://coves.me` in AppView .env
+
- [ ] Test community creation flow (provisions account on coves.me)
+
- [ ] Verify account provisioning works end-to-end
+
+
**Important**: Jetstream connects to Bluesky's production firehose, which automatically includes events from all production PDS instances (including coves.me once it's live)
+
+
**Estimated Effort**: 4-6 hours
+
**Risk**: Medium (infrastructure setup, DNS propagation)
+
#### JWT Signature Verification (Production Mode)
-
- [ ] Test with production PDS at `pds.bretton.dev`
-
- [ ] Create test account on production PDS
-
- [ ] Verify JWKS endpoint is accessible
+
- [ ] Test with production PDS at coves.me
+
- [ ] Verify JWKS endpoint is accessible: `https://coves.me/.well-known/jwks.json`
- [ ] Run `TestJWTSignatureVerification` against production PDS
- [ ] Confirm signature verification succeeds
-
- [ ] Test token refresh flow
+
- [ ] Test token refresh flow for community accounts
- [ ] Set `AUTH_SKIP_VERIFY=false` in production environment
- [ ] Verify all auth middleware tests pass with verification enabled
-
- [ ] Document production PDS requirements for communities
-
-
**Estimated Effort**: 2-3 hours
-
**Risk**: Medium (code implemented, needs validation)
-
-
#### did:web Verification
-
- [ ] Complete did:web domain verification implementation
-
- [ ] Test with real did:web identities
-
- [ ] Add security logging for verification failures
-
- [ ] Set `SKIP_DID_WEB_VERIFICATION=false` for production
**Estimated Effort**: 2-3 hours
-
**Risk**: Medium
+
**Risk**: Low (depends on PDS deployment)
+
+
#### did:web Verification โœ… COMPLETE
+
- [x] Complete did:web domain verification implementation (2025-11-16)
+
- [x] Implement Bluesky-compatible bidirectional verification
+
- [x] Add alsoKnownAs field verification in DID documents
+
- [x] Add security logging for verification failures
+
- [x] Update cache TTL to 24h (matches Bluesky recommendations)
+
- [x] Comprehensive test coverage with mock HTTP servers
+
- [ ] Set `SKIP_DID_WEB_VERIFICATION=false` for production (dev default: true)
+
- [ ] Deploy `.well-known/did.json` to production domain
+
+
**Implementation Details**:
+
- **Location**: [internal/atproto/jetstream/community_consumer.go](../internal/atproto/jetstream/community_consumer.go)
+
- **Verification Flow**: Domain matching + DID document fetch + alsoKnownAs validation
+
- **Security Model**: Matches Bluesky (DNS/HTTPS authority + bidirectional binding)
+
- **Performance**: Bounded LRU cache (1000 entries), rate limiting (10 req/s), 24h TTL
+
- **Impact**: AppView indexing and federation trust (not community creation API)
+
- **Tests**: `tests/integration/community_hostedby_security_test.go`
+
+
**Actual Effort**: 3 hours (implementation + testing)
+
**Risk**: โœ… Low (complete and tested)
### 2. DPoP Token Architecture Fix
···
- [ ] Common issues and fixes
- [ ] Emergency procedures (PDS down, database down, etc.)
- [ ] Create production environment checklist
-
- [ ] All environment variables set
-
- [ ] `AUTH_SKIP_VERIFY=false`
-
- [ ] `SKIP_DID_WEB_VERIFICATION=false`
-
- [ ] Database migrations applied
-
- [ ] PDS connectivity verified
-
- [ ] JWKS caching working
-
- [ ] Jetstream consumers running
+
- [ ] **Domain Setup**
+
- [ ] AppView domain (coves.social) DNS configured
+
- [ ] PDS domain (coves.me) DNS configured - MUST be separate domain
+
- [ ] SSL certificates for both domains
+
- [ ] Nginx/Caddy reverse proxy configured for both domains
+
- [ ] **AppView Environment Variables**
+
- [ ] `INSTANCE_DID=did:web:coves.social`
+
- [ ] `INSTANCE_DOMAIN=coves.social`
+
- [ ] `PDS_URL=https://coves.me` (separate domain)
+
- [ ] `AUTH_SKIP_VERIFY=false`
+
- [ ] `SKIP_DID_WEB_VERIFICATION=false`
+
- [ ] `JETSTREAM_URL=wss://jetstream2.us-east.bsky.network/subscribe`
+
- [ ] **PDS Environment Variables**
+
- [ ] `PDS_HOSTNAME=coves.me`
+
- [ ] `PDS_PORT=2583`
+
- [ ] Persistent storage mounted
+
- [ ] **Deployment Verification**
+
- [ ] Deploy `.well-known/did.json` to coves.social with `serviceEndpoint: https://coves.me`
+
- [ ] Verify: `curl https://coves.social/.well-known/did.json`
+
- [ ] Verify: `curl https://coves.me/xrpc/_health`
+
- [ ] Database migrations applied
+
- [ ] PDS connectivity verified from AppView
+
- [ ] JWKS caching working
+
- [ ] Jetstream consumer connected to Bluesky production firehose
+
- [ ] Test community creation end-to-end
- [ ] Monitoring and alerting active
**Estimated Effort**: 6-8 hours
···
## Timeline Estimate
### Week 1: Critical Blockers (P0)
-
- **Days 1-2**: Authentication (JWT + did:web verification)
+
- ~~**Days 1-2**: Authentication (JWT + did:web verification)~~ โœ… **did:web COMPLETED**
+
- **Day 1**: Production PDS deployment (coves.me domain setup)
+
- **Day 2**: JWT signature verification with production PDS
- **Day 3**: DPoP token architecture fix
- ~~**Day 4**: Handle resolution + comment count reconciliation~~ โœ… **COMPLETED**
- **Day 4-5**: Testing and bug fixes
-
**Total**: 15-20 hours (reduced from 20-25 due to completed items)
+
**Total**: 16-23 hours (added 4-6 hours for PDS deployment, reduced from original due to did:web completion)
### Week 2: Production Infrastructure (P1)
- **Days 6-7**: Monitoring + structured logging
···
**Total**: ~~20-25 hours~~ โ†’ **13 hours actual** (E2E tests) + 7-12 hours remaining (load testing, polish)
-
**Grand Total: ~~65-80 hours~~ โ†’ 50-65 hours remaining (approximately 1.5-2 weeks full-time)**
-
*(Originally 70-85 hours. Reduced by completed items: handle resolution, comment count reconciliation, and ALL E2E tests)*
+
**Grand Total: ~~65-80 hours~~ โ†’ 51-68 hours remaining (approximately 1.5-2 weeks full-time)**
+
*(Originally 70-85 hours. Adjusted for: +4-6 hours PDS deployment, -3 hours did:web completion, -13 hours E2E tests completion, -4 hours handle resolution and comment reconciliation)*
**โœ… Progress Update**: E2E testing section COMPLETE ahead of schedule - saved ~7-12 hours through parallel agent implementation
···
- [ ] All P0 blockers resolved
- โœ… Handle resolution (COMPLETE)
- โœ… Comment count reconciliation (COMPLETE)
+
- โœ… did:web verification (COMPLETE - needs production deployment)
+
- [ ] Production PDS deployed to coves.me (separate domain)
- [ ] JWT signature verification working with production PDS
- [ ] DPoP architecture fix implemented
-
- [ ] did:web verification complete
- [ ] Subscriptions/blocking work via client-write pattern
- [x] **All integration tests passing** โœ…
- [x] **E2E user journey test passing** โœ…
···
11. [ ] Go/no-go decision
12. [ ] Launch! ๐Ÿš€
-
**๐ŸŽ‰ Major Milestone**: All E2E tests complete! Test coverage now includes full user journey, blob uploads, concurrent operations, rate limiting, and error recovery.
+
**๐ŸŽ‰ Major Milestones**:
+
- All E2E tests complete! Test coverage now includes full user journey, blob uploads, concurrent operations, rate limiting, and error recovery.
+
- Bidirectional DID verification complete! Bluesky-compatible security model with alsoKnownAs validation, 24h cache TTL, and comprehensive test coverage.
+18
internal/api/routes/aggregator.go
···
import (
"Coves/internal/api/handlers/aggregator"
+
"Coves/internal/api/middleware"
+
"Coves/internal/atproto/identity"
"Coves/internal/core/aggregators"
+
"Coves/internal/core/users"
+
"net/http"
+
"time"
"github.com/go-chi/chi/v5"
)
···
func RegisterAggregatorRoutes(
r chi.Router,
aggregatorService aggregators.Service,
+
userService users.UserService,
+
identityResolver identity.Resolver,
) {
// Create query handlers
getServicesHandler := aggregator.NewGetServicesHandler(aggregatorService)
getAuthorizationsHandler := aggregator.NewGetAuthorizationsHandler(aggregatorService)
listForCommunityHandler := aggregator.NewListForCommunityHandler(aggregatorService)
+
// Create registration handler
+
registerHandler := aggregator.NewRegisterHandler(userService, identityResolver)
+
// Query endpoints (public - no auth required)
// GET /xrpc/social.coves.aggregator.getServices?dids=did:plc:abc,did:plc:def
// Following app.bsky.feed.getFeedGenerators pattern
···
// Lists aggregators authorized by a community
r.Get("/xrpc/social.coves.aggregator.listForCommunity", listForCommunityHandler.HandleListForCommunity)
+
// Registration endpoint (public - no auth required)
+
// Aggregators register themselves after creating their own PDS accounts
+
// POST /xrpc/social.coves.aggregator.register
+
// Rate limited to 10 requests per 10 minutes per IP to prevent abuse
+
registrationRateLimiter := middleware.NewRateLimiter(10, 10*time.Minute)
+
r.Post("/xrpc/social.coves.aggregator.register",
+
registrationRateLimiter.Middleware(http.HandlerFunc(registerHandler.HandleRegister)).ServeHTTP)
+
// Write endpoints (Phase 2 - require authentication and moderator permissions)
// TODO: Implement after Jetstream consumer is ready
// POST /xrpc/social.coves.aggregator.enable (requires auth + moderator)
+73
internal/atproto/lexicon/social/coves/aggregator/register.json
···
+
{
+
"lexicon": 1,
+
"id": "social.coves.aggregator.register",
+
"defs": {
+
"main": {
+
"type": "procedure",
+
"description": "Register an existing aggregator DID with this Coves instance. Aggregators must first create their own DID via PLC directory, then call this endpoint to register. Domain ownership is verified via .well-known/atproto-did file.",
+
"input": {
+
"encoding": "application/json",
+
"schema": {
+
"type": "object",
+
"required": ["did", "domain"],
+
"properties": {
+
"did": {
+
"type": "string",
+
"format": "did",
+
"description": "DID of the aggregator (did:plc or did:web format)"
+
},
+
"domain": {
+
"type": "string",
+
"format": "uri",
+
"description": "Domain where the aggregator is hosted (e.g., 'rss-bot.example.com'). Must serve .well-known/atproto-did file containing the DID."
+
}
+
}
+
}
+
},
+
"output": {
+
"encoding": "application/json",
+
"schema": {
+
"type": "object",
+
"required": ["did", "handle"],
+
"properties": {
+
"did": {
+
"type": "string",
+
"format": "did",
+
"description": "DID of the registered aggregator"
+
},
+
"handle": {
+
"type": "string",
+
"description": "Handle extracted from DID document"
+
},
+
"message": {
+
"type": "string",
+
"description": "Success message with next steps"
+
}
+
}
+
}
+
},
+
"errors": [
+
{
+
"name": "InvalidDID",
+
"description": "DID format is invalid or not did:plc or did:web format"
+
},
+
{
+
"name": "DomainVerificationFailed",
+
"description": "Could not verify domain ownership via .well-known/atproto-did or DID mismatch"
+
},
+
{
+
"name": "AlreadyRegistered",
+
"description": "This aggregator DID is already registered with this instance"
+
},
+
{
+
"name": "DIDResolutionFailed",
+
"description": "Could not resolve DID document to extract handle and PDS URL"
+
},
+
{
+
"name": "RegistrationFailed",
+
"description": "Internal server error occurred during registration"
+
}
+
]
+
}
+
}
+
}
+591
docs/aggregators/SETUP_GUIDE.md
···
+
# Aggregator Setup Guide
+
+
This guide explains how to set up and register an aggregator with Coves instances.
+
+
## Table of Contents
+
+
- [Overview](#overview)
+
- [Architecture](#architecture)
+
- [Prerequisites](#prerequisites)
+
- [Quick Start](#quick-start)
+
- [Detailed Setup Steps](#detailed-setup-steps)
+
- [Authorization Process](#authorization-process)
+
- [Posting to Communities](#posting-to-communities)
+
- [Rate Limits](#rate-limits)
+
- [Security Best Practices](#security-best-practices)
+
- [Troubleshooting](#troubleshooting)
+
- [API Reference](#api-reference)
+
+
## Overview
+
+
**Aggregators** are automated services that post content to Coves communities. They are similar to Bluesky's feed generators and labelers - self-managed external services that integrate with the platform.
+
+
**Key characteristics**:
+
- Self-owned: You create and manage your own PDS account
+
- Domain-verified: Prove ownership via `.well-known/atproto-did`
+
- Community-authorized: Moderators grant posting permission per-community
+
- Rate-limited: 10 posts per hour per community
+
+
**Example use cases**:
+
- RSS feed aggregators (tech news, blog posts)
+
- Social media cross-posters (Twitter โ†’ Coves)
+
- Event notifications (GitHub releases, weather alerts)
+
- Content curation bots (daily links, summaries)
+
+
## Architecture
+
+
### Data Flow
+
+
```
+
โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”
+
โ”‚ 1. One-Time Setup โ”‚
+
โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค
+
โ”‚ Aggregator creates PDS account โ”‚
+
โ”‚ โ†“ โ”‚
+
โ”‚ Proves domain ownership (.well-known) โ”‚
+
โ”‚ โ†“ โ”‚
+
โ”‚ Registers with Coves (enters users table) โ”‚
+
โ”‚ โ†“ โ”‚
+
โ”‚ Writes service declaration โ”‚
+
โ”‚ โ†“ โ”‚
+
โ”‚ Jetstream indexes into aggregators table โ”‚
+
โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜
+
+
โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”
+
โ”‚ 2. Per-Community Authorization โ”‚
+
โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค
+
โ”‚ Moderator writes authorization record โ”‚
+
โ”‚ โ†“ โ”‚
+
โ”‚ Jetstream indexes into aggregator_authorizations โ”‚
+
โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜
+
+
โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”
+
โ”‚ 3. Posting (Ongoing) โ”‚
+
โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค
+
โ”‚ Aggregator calls post creation endpoint โ”‚
+
โ”‚ โ†“ โ”‚
+
โ”‚ Handler validates: โ”‚
+
โ”‚ - Author in users table โœ“ โ”‚
+
โ”‚ - Author in aggregators table โœ“ โ”‚
+
โ”‚ - Authorization exists โœ“ โ”‚
+
โ”‚ - Rate limit not exceeded โœ“ โ”‚
+
โ”‚ โ†“ โ”‚
+
โ”‚ Post written to community's PDS โ”‚
+
โ”‚ โ†“ โ”‚
+
โ”‚ Jetstream indexes post โ”‚
+
โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜
+
```
+
+
### Database Tables
+
+
**users** - All actors (users, communities, aggregators)
+
```sql
+
CREATE TABLE users (
+
did TEXT PRIMARY KEY,
+
handle TEXT NOT NULL,
+
pds_url TEXT,
+
indexed_at TIMESTAMPTZ
+
);
+
```
+
+
**aggregators** - Aggregator-specific metadata
+
```sql
+
CREATE TABLE aggregators (
+
did TEXT PRIMARY KEY,
+
display_name TEXT NOT NULL,
+
description TEXT,
+
avatar_url TEXT,
+
config_schema JSONB,
+
source_url TEXT,
+
maintainer_did TEXT,
+
record_uri TEXT NOT NULL UNIQUE,
+
record_cid TEXT NOT NULL,
+
created_at TIMESTAMPTZ,
+
indexed_at TIMESTAMPTZ
+
);
+
```
+
+
**aggregator_authorizations** - Community authorizations
+
```sql
+
CREATE TABLE aggregator_authorizations (
+
id BIGSERIAL PRIMARY KEY,
+
aggregator_did TEXT NOT NULL,
+
community_did TEXT NOT NULL,
+
enabled BOOLEAN NOT NULL DEFAULT true,
+
config JSONB,
+
created_by TEXT,
+
record_uri TEXT NOT NULL UNIQUE,
+
record_cid TEXT NOT NULL,
+
UNIQUE(aggregator_did, community_did)
+
);
+
```
+
+
## Prerequisites
+
+
1. **Domain ownership**: You must own a domain where you can host static files over HTTPS
+
2. **Web server**: Ability to serve the `.well-known/atproto-did` file
+
3. **Development tools**: `curl`, `jq`, basic shell scripting knowledge
+
4. **Email address**: For creating the PDS account
+
+
**Optional**:
+
- Custom avatar image (PNG/JPEG/WebP, max 1MB)
+
- GitHub repository for source code transparency
+
+
## Quick Start
+
+
We provide automated setup scripts:
+
+
```bash
+
cd scripts/aggregator-setup
+
+
# Make scripts executable
+
chmod +x *.sh
+
+
# Run setup scripts in order
+
./1-create-pds-account.sh
+
./2-setup-wellknown.sh
+
# (Upload .well-known to your web server)
+
./3-register-with-coves.sh
+
./4-create-service-declaration.sh
+
```
+
+
See [scripts/aggregator-setup/README.md](../../scripts/aggregator-setup/README.md) for detailed script documentation.
+
+
## Detailed Setup Steps
+
+
### Step 1: Create PDS Account
+
+
Your aggregator needs its own atProto identity (DID). The easiest way is to create an account on an existing PDS.
+
+
**Using an existing PDS (recommended)**:
+
+
```bash
+
curl -X POST https://bsky.social/xrpc/com.atproto.server.createAccount \
+
-H "Content-Type: application/json" \
+
-d '{
+
"handle": "mynewsbot.bsky.social",
+
"email": "bot@example.com",
+
"password": "secure-password-here"
+
}'
+
```
+
+
**Response**:
+
```json
+
{
+
"accessJwt": "eyJ...",
+
"refreshJwt": "eyJ...",
+
"handle": "mynewsbot.bsky.social",
+
"did": "did:plc:abc123...",
+
"didDoc": {...}
+
}
+
```
+
+
**Save these credentials securely!** You'll need the DID and access token for all subsequent operations.
+
+
**Alternative**: Run your own PDS or use `did:web` (advanced).
+
+
### Step 2: Prove Domain Ownership
+
+
To register with Coves, you must prove you own a domain by serving your DID at `https://yourdomain.com/.well-known/atproto-did`.
+
+
**Create the file**:
+
+
```bash
+
mkdir -p .well-known
+
echo "did:plc:abc123..." > .well-known/atproto-did
+
```
+
+
**Upload to your web server** so it's accessible at:
+
```
+
https://rss-bot.example.com/.well-known/atproto-did
+
```
+
+
**Verify it works**:
+
```bash
+
curl https://rss-bot.example.com/.well-known/atproto-did
+
# Should return: did:plc:abc123...
+
```
+
+
**Nginx configuration example**:
+
```nginx
+
location /.well-known/atproto-did {
+
alias /var/www/.well-known/atproto-did;
+
default_type text/plain;
+
add_header Access-Control-Allow-Origin *;
+
}
+
```
+
+
### Step 3: Register with Coves
+
+
Call the registration endpoint to register your aggregator DID with the Coves instance.
+
+
**Endpoint**: `POST /xrpc/social.coves.aggregator.register`
+
+
**Request**:
+
```bash
+
curl -X POST https://api.coves.social/xrpc/social.coves.aggregator.register \
+
-H "Content-Type: application/json" \
+
-d '{
+
"did": "did:plc:abc123...",
+
"domain": "rss-bot.example.com"
+
}'
+
```
+
+
**Response** (Success):
+
```json
+
{
+
"did": "did:plc:abc123...",
+
"handle": "mynewsbot.bsky.social",
+
"message": "Aggregator registered successfully. Next step: create a service declaration record at at://did:plc:abc123.../social.coves.aggregator.service/self"
+
}
+
```
+
+
**What happens**:
+
1. Coves fetches `https://rss-bot.example.com/.well-known/atproto-did`
+
2. Verifies it contains your DID
+
3. Resolves your DID to get handle and PDS URL
+
4. Inserts you into the `users` table
+
+
**You're now registered!** But you need to create a service declaration next.
+
+
### Step 4: Create Service Declaration
+
+
Write a `social.coves.aggregator.service` record to your repository. This contains metadata about your aggregator and gets indexed by Coves' Jetstream consumer.
+
+
**Endpoint**: `POST https://your-pds.com/xrpc/com.atproto.repo.createRecord`
+
+
**Request**:
+
```bash
+
curl -X POST https://bsky.social/xrpc/com.atproto.repo.createRecord \
+
-H "Authorization: Bearer YOUR_ACCESS_TOKEN" \
+
-H "Content-Type: application/json" \
+
-d '{
+
"repo": "did:plc:abc123...",
+
"collection": "social.coves.aggregator.service",
+
"rkey": "self",
+
"record": {
+
"$type": "social.coves.aggregator.service",
+
"did": "did:plc:abc123...",
+
"displayName": "RSS News Aggregator",
+
"description": "Aggregates tech news from various RSS feeds",
+
"sourceUrl": "https://github.com/yourname/rss-aggregator",
+
"maintainer": "did:plc:your-personal-did",
+
"createdAt": "2024-01-15T12:00:00Z"
+
}
+
}'
+
```
+
+
**Response**:
+
```json
+
{
+
"uri": "at://did:plc:abc123.../social.coves.aggregator.service/self",
+
"cid": "bafyrei..."
+
}
+
```
+
+
**Optional fields**:
+
- `avatar`: Blob reference to avatar image
+
- `configSchema`: JSON Schema for community-specific configuration
+
+
**Wait 5-10 seconds** for Jetstream to index your service declaration into the `aggregators` table.
+
+
## Authorization Process
+
+
Before you can post to a community, a moderator must authorize your aggregator.
+
+
### How Authorization Works
+
+
1. **Moderator decision**: Community moderator evaluates your aggregator
+
2. **Authorization record**: Moderator writes `social.coves.aggregator.authorization` to community's repo
+
3. **Jetstream indexing**: Record gets indexed into `aggregator_authorizations` table
+
4. **Posting enabled**: You can now post to that community
+
+
### Authorization Record Structure
+
+
**Location**: `at://{community_did}/social.coves.aggregator.authorization/{rkey}`
+
+
**Example**:
+
```json
+
{
+
"$type": "social.coves.aggregator.authorization",
+
"aggregatorDid": "did:plc:abc123...",
+
"communityDid": "did:plc:community123...",
+
"enabled": true,
+
"createdBy": "did:plc:moderator...",
+
"createdAt": "2024-01-15T12:00:00Z",
+
"config": {
+
"maxPostsPerHour": 5,
+
"allowedCategories": ["tech", "news"]
+
}
+
}
+
```
+
+
### Checking Your Authorizations
+
+
**Endpoint**: `GET /xrpc/social.coves.aggregator.getAuthorizations`
+
+
```bash
+
curl "https://api.coves.social/xrpc/social.coves.aggregator.getAuthorizations?aggregatorDid=did:plc:abc123...&enabledOnly=true"
+
```
+
+
**Response**:
+
```json
+
{
+
"authorizations": [
+
{
+
"aggregatorDid": "did:plc:abc123...",
+
"communityDid": "did:plc:community123...",
+
"communityHandle": "~tech@coves.social",
+
"enabled": true,
+
"createdAt": "2024-01-15T12:00:00Z",
+
"config": {...}
+
}
+
]
+
}
+
```
+
+
## Posting to Communities
+
+
Once authorized, you can post to communities using the standard post creation endpoint.
+
+
### Create Post
+
+
**Endpoint**: `POST /xrpc/social.coves.community.post.create`
+
+
**Request**:
+
```bash
+
curl -X POST https://api.coves.social/xrpc/social.coves.community.post.create \
+
-H "Authorization: Bearer YOUR_ACCESS_TOKEN" \
+
-H "Content-Type: application/json" \
+
-d '{
+
"communityDid": "did:plc:community123...",
+
"post": {
+
"text": "New blog post: Understanding atProto Identity\nhttps://example.com/post",
+
"createdAt": "2024-01-15T12:00:00Z",
+
"facets": [
+
{
+
"index": { "byteStart": 50, "byteEnd": 75 },
+
"features": [
+
{
+
"$type": "social.coves.richtext.facet#link",
+
"uri": "https://example.com/post"
+
}
+
]
+
}
+
]
+
}
+
}'
+
```
+
+
**Response**:
+
```json
+
{
+
"uri": "at://did:plc:abc123.../social.coves.community.post/3k...",
+
"cid": "bafyrei..."
+
}
+
```
+
+
### Post Validation
+
+
The handler validates:
+
1. **Authentication**: Valid JWT token
+
2. **Author exists**: DID in `users` table
+
3. **Is aggregator**: DID in `aggregators` table
+
4. **Authorization**: Active authorization for (aggregator, community)
+
5. **Rate limit**: Less than 10 posts/hour to this community
+
6. **Content**: Valid post structure per lexicon
+
+
### Rate Limits
+
+
**Per-community rate limit**: 10 posts per hour
+
+
This is tracked in the `aggregator_posts` table and enforced at the handler level.
+
+
**Why?**: Prevents spam while allowing useful bot activity.
+
+
**Best practices**:
+
- Batch similar content
+
- Post only high-quality content
+
- Respect community guidelines
+
- Monitor your posting rate
+
+
## Security Best Practices
+
+
### Credential Management
+
+
โœ… **DO**:
+
- Store credentials in environment variables or secret management
+
- Use HTTPS for all API calls
+
- Rotate access tokens regularly (use refresh tokens)
+
- Keep `aggregator-config.env` out of version control
+
+
โŒ **DON'T**:
+
- Hardcode credentials in source code
+
- Commit credentials to Git
+
- Share access tokens publicly
+
- Reuse personal credentials for bots
+
+
### Domain Security
+
+
โœ… **DO**:
+
- Use HTTPS for `.well-known` endpoint
+
- Keep domain under your control
+
- Monitor for unauthorized changes
+
- Use DNSSEC if possible
+
+
โŒ **DON'T**:
+
- Use HTTP (will fail verification)
+
- Use shared/untrusted hosting
+
- Allow others to modify `.well-known` files
+
- Use expired SSL certificates
+
+
### Content Security
+
+
โœ… **DO**:
+
- Validate all external content before posting
+
- Sanitize URLs and text
+
- Rate-limit your own posting
+
- Implement circuit breakers for failures
+
+
โŒ **DON'T**:
+
- Post unvalidated user input
+
- Include malicious links
+
- Spam communities
+
- Bypass rate limits
+
+
## Troubleshooting
+
+
### Registration Errors
+
+
#### Error: "DomainVerificationFailed"
+
+
**Cause**: `.well-known/atproto-did` not accessible or contains wrong DID
+
+
**Solutions**:
+
1. Verify file is accessible: `curl https://yourdomain.com/.well-known/atproto-did`
+
2. Check content matches your DID exactly (no extra whitespace)
+
3. Ensure HTTPS is working (not HTTP)
+
4. Check web server logs for access errors
+
5. Verify firewall rules allow HTTPS traffic
+
+
#### Error: "AlreadyRegistered"
+
+
**Cause**: This DID is already registered with this Coves instance
+
+
**Solutions**:
+
- This is safe to ignore if you're re-running setup
+
- If you need to update info, just create a new service declaration
+
- Contact instance admin if you need to remove registration
+
+
#### Error: "DIDResolutionFailed"
+
+
**Cause**: Could not resolve DID document from PLC directory
+
+
**Solutions**:
+
1. Verify DID exists: `curl https://plc.directory/{your-did}`
+
2. Wait 30 seconds and retry (PLC propagation delay)
+
3. Check PDS is accessible
+
4. Verify DID format is correct (must start with `did:plc:` or `did:web:`)
+
+
### Posting Errors
+
+
#### Error: "NotAuthorized"
+
+
**Cause**: No active authorization for this (aggregator, community) pair
+
+
**Solutions**:
+
1. Check authorizations: `GET /xrpc/social.coves.aggregator.getAuthorizations`
+
2. Contact community moderator to request authorization
+
3. Verify authorization wasn't disabled
+
4. Wait for Jetstream to index authorization (5-10 seconds)
+
+
#### Error: "RateLimitExceeded"
+
+
**Cause**: Exceeded 10 posts/hour to this community
+
+
**Solutions**:
+
1. Wait for the rate limit window to reset
+
2. Batch posts to stay under limit
+
3. Distribute posts across multiple communities
+
4. Implement posting queue in your aggregator
+
+
### Service Declaration Not Appearing
+
+
**Symptoms**: Service declaration created but not in `aggregators` table
+
+
**Solutions**:
+
1. Wait 5-10 seconds for Jetstream to index
+
2. Check Jetstream consumer logs for errors
+
3. Verify record was created: Check PDS at `at://your-did/social.coves.aggregator.service/self`
+
4. Verify `$type` field is exactly `"social.coves.aggregator.service"`
+
5. Check `displayName` is not empty (required field)
+
+
## API Reference
+
+
### Registration Endpoint
+
+
**`POST /xrpc/social.coves.aggregator.register`**
+
+
**Input**:
+
```typescript
+
{
+
did: string // DID of aggregator (did:plc or did:web)
+
domain: string // Domain serving .well-known/atproto-did
+
}
+
```
+
+
**Output**:
+
```typescript
+
{
+
did: string // Registered DID
+
handle: string // Handle from DID document
+
message: string // Next steps message
+
}
+
```
+
+
**Errors**:
+
- `InvalidDID`: DID format invalid
+
- `DomainVerificationFailed`: .well-known verification failed
+
- `AlreadyRegistered`: DID already registered
+
- `DIDResolutionFailed`: Could not resolve DID
+
+
### Query Endpoints
+
+
**`GET /xrpc/social.coves.aggregator.getServices`**
+
+
Get aggregator service details.
+
+
**Parameters**:
+
- `dids`: Array of DIDs (comma-separated)
+
+
**`GET /xrpc/social.coves.aggregator.getAuthorizations`**
+
+
List communities that authorized an aggregator.
+
+
**Parameters**:
+
- `aggregatorDid`: Aggregator DID
+
- `enabledOnly`: Filter to enabled only (default: false)
+
+
**`GET /xrpc/social.coves.aggregator.listForCommunity`**
+
+
List aggregators authorized by a community.
+
+
**Parameters**:
+
- `communityDid`: Community DID
+
- `enabledOnly`: Filter to enabled only (default: false)
+
+
## Further Reading
+
+
- [Aggregator PRD](PRD_AGGREGATORS.md) - Architecture and design decisions
+
- [atProto Guide](../../ATPROTO_GUIDE.md) - atProto fundamentals
+
- [Communities PRD](../PRD_COMMUNITIES.md) - Community system overview
+
- [Setup Scripts README](../../scripts/aggregator-setup/README.md) - Script documentation
+
+
## Support
+
+
For issues or questions:
+
+
1. Check this guide's troubleshooting section
+
2. Review the PRD and architecture docs
+
3. Check Coves GitHub issues
+
4. Ask in Coves developer community
+95
scripts/aggregator-setup/1-create-pds-account.sh
···
+
#!/bin/bash
+
+
# Script: 1-create-pds-account.sh
+
# Purpose: Create a PDS account for your aggregator
+
#
+
# This script helps you create an account on a PDS (Personal Data Server).
+
# The PDS will automatically create a DID:PLC for you.
+
+
set -e
+
+
echo "================================================"
+
echo "Step 1: Create PDS Account for Your Aggregator"
+
echo "================================================"
+
echo ""
+
+
# Get PDS URL
+
read -p "Enter PDS URL (default: https://bsky.social): " PDS_URL
+
PDS_URL=${PDS_URL:-https://bsky.social}
+
+
# Get credentials
+
read -p "Enter desired handle (e.g., mynewsbot.bsky.social): " HANDLE
+
read -p "Enter email: " EMAIL
+
read -sp "Enter password: " PASSWORD
+
echo ""
+
+
# Validate inputs
+
if [ -z "$HANDLE" ] || [ -z "$EMAIL" ] || [ -z "$PASSWORD" ]; then
+
echo "Error: All fields are required"
+
exit 1
+
fi
+
+
echo ""
+
echo "Creating account on $PDS_URL..."
+
+
# Create account via com.atproto.server.createAccount
+
RESPONSE=$(curl -s -X POST "$PDS_URL/xrpc/com.atproto.server.createAccount" \
+
-H "Content-Type: application/json" \
+
-d "{
+
\"handle\": \"$HANDLE\",
+
\"email\": \"$EMAIL\",
+
\"password\": \"$PASSWORD\"
+
}")
+
+
# Check if successful
+
if echo "$RESPONSE" | jq -e '.error' > /dev/null 2>&1; then
+
echo "Error creating account:"
+
echo "$RESPONSE" | jq '.'
+
exit 1
+
fi
+
+
# Extract DID and access token
+
DID=$(echo "$RESPONSE" | jq -r '.did')
+
ACCESS_JWT=$(echo "$RESPONSE" | jq -r '.accessJwt')
+
REFRESH_JWT=$(echo "$RESPONSE" | jq -r '.refreshJwt')
+
+
if [ -z "$DID" ] || [ "$DID" = "null" ]; then
+
echo "Error: Failed to extract DID from response"
+
echo "$RESPONSE" | jq '.'
+
exit 1
+
fi
+
+
echo ""
+
echo "โœ“ Account created successfully!"
+
echo ""
+
echo "=== Save these credentials ===="
+
echo "DID: $DID"
+
echo "Handle: $HANDLE"
+
echo "PDS URL: $PDS_URL"
+
echo "Email: $EMAIL"
+
echo "Password: [hidden]"
+
echo "Access JWT: $ACCESS_JWT"
+
echo "Refresh JWT: $REFRESH_JWT"
+
echo "==============================="
+
echo ""
+
+
# Save to config file
+
CONFIG_FILE="aggregator-config.env"
+
cat > "$CONFIG_FILE" <<EOF
+
# Aggregator Account Configuration
+
# Generated: $(date)
+
+
AGGREGATOR_DID="$DID"
+
AGGREGATOR_HANDLE="$HANDLE"
+
AGGREGATOR_PDS_URL="$PDS_URL"
+
AGGREGATOR_EMAIL="$EMAIL"
+
AGGREGATOR_PASSWORD="$PASSWORD"
+
AGGREGATOR_ACCESS_JWT="$ACCESS_JWT"
+
AGGREGATOR_REFRESH_JWT="$REFRESH_JWT"
+
EOF
+
+
echo "โœ“ Configuration saved to $CONFIG_FILE"
+
echo ""
+
echo "IMPORTANT: Keep this file secure! It contains your credentials."
+
echo ""
+
echo "Next step: Run ./2-setup-wellknown.sh"
+93
scripts/aggregator-setup/2-setup-wellknown.sh
···
+
#!/bin/bash
+
+
# Script: 2-setup-wellknown.sh
+
# Purpose: Generate .well-known/atproto-did file for domain verification
+
#
+
# This script creates the .well-known/atproto-did file that proves you own your domain.
+
# You'll need to host this file at https://yourdomain.com/.well-known/atproto-did
+
+
set -e
+
+
echo "================================================"
+
echo "Step 2: Setup .well-known/atproto-did"
+
echo "================================================"
+
echo ""
+
+
# Load config if available
+
if [ -f "aggregator-config.env" ]; then
+
source aggregator-config.env
+
echo "โœ“ Loaded configuration from aggregator-config.env"
+
echo " DID: $AGGREGATOR_DID"
+
echo ""
+
else
+
echo "Configuration file not found. Please run 1-create-pds-account.sh first."
+
exit 1
+
fi
+
+
# Get domain
+
read -p "Enter your aggregator's domain (e.g., rss-bot.example.com): " DOMAIN
+
+
if [ -z "$DOMAIN" ]; then
+
echo "Error: Domain is required"
+
exit 1
+
fi
+
+
# Save domain to config
+
echo "" >> aggregator-config.env
+
echo "AGGREGATOR_DOMAIN=\"$DOMAIN\"" >> aggregator-config.env
+
+
echo ""
+
echo "Creating .well-known directory..."
+
mkdir -p .well-known
+
+
# Create the atproto-did file
+
echo "$AGGREGATOR_DID" > .well-known/atproto-did
+
+
echo "โœ“ Created .well-known/atproto-did with content: $AGGREGATOR_DID"
+
echo ""
+
+
echo "================================================"
+
echo "Next Steps:"
+
echo "================================================"
+
echo ""
+
echo "1. Upload the .well-known directory to your web server"
+
echo " The file must be accessible at:"
+
echo " https://$DOMAIN/.well-known/atproto-did"
+
echo ""
+
echo "2. Verify it's working by running:"
+
echo " curl https://$DOMAIN/.well-known/atproto-did"
+
echo " (Should return: $AGGREGATOR_DID)"
+
echo ""
+
echo "3. Once verified, run: ./3-register-with-coves.sh"
+
echo ""
+
+
# Create nginx example
+
cat > nginx-example.conf <<EOF
+
# Example nginx configuration for serving .well-known
+
# Add this to your nginx server block:
+
+
location /.well-known/atproto-did {
+
alias /path/to/your/.well-known/atproto-did;
+
default_type text/plain;
+
add_header Access-Control-Allow-Origin *;
+
}
+
EOF
+
+
echo "โœ“ Created nginx-example.conf for reference"
+
echo ""
+
+
# Create Apache example
+
cat > apache-example.conf <<EOF
+
# Example Apache configuration for serving .well-known
+
# Add this to your Apache virtual host:
+
+
Alias /.well-known /path/to/your/.well-known
+
<Directory /path/to/your/.well-known>
+
Options None
+
AllowOverride None
+
Require all granted
+
Header set Access-Control-Allow-Origin "*"
+
</Directory>
+
EOF
+
+
echo "โœ“ Created apache-example.conf for reference"
+103
scripts/aggregator-setup/3-register-with-coves.sh
···
+
#!/bin/bash
+
+
# Script: 3-register-with-coves.sh
+
# Purpose: Register your aggregator with a Coves instance
+
#
+
# This script calls the social.coves.aggregator.register XRPC endpoint
+
# to register your aggregator DID with the Coves instance.
+
+
set -e
+
+
echo "================================================"
+
echo "Step 3: Register with Coves Instance"
+
echo "================================================"
+
echo ""
+
+
# Load config if available
+
if [ -f "aggregator-config.env" ]; then
+
source aggregator-config.env
+
echo "โœ“ Loaded configuration from aggregator-config.env"
+
echo " DID: $AGGREGATOR_DID"
+
echo " Domain: $AGGREGATOR_DOMAIN"
+
echo ""
+
else
+
echo "Configuration file not found. Please run previous scripts first."
+
exit 1
+
fi
+
+
# Validate domain is set
+
if [ -z "$AGGREGATOR_DOMAIN" ]; then
+
echo "Error: AGGREGATOR_DOMAIN not set. Please run 2-setup-wellknown.sh first."
+
exit 1
+
fi
+
+
# Get Coves instance URL
+
read -p "Enter Coves instance URL (default: https://api.coves.social): " COVES_URL
+
COVES_URL=${COVES_URL:-https://api.coves.social}
+
+
echo ""
+
echo "Verifying .well-known/atproto-did is accessible..."
+
+
# Verify .well-known is accessible
+
WELLKNOWN_URL="https://$AGGREGATOR_DOMAIN/.well-known/atproto-did"
+
WELLKNOWN_CONTENT=$(curl -s "$WELLKNOWN_URL" || echo "ERROR")
+
+
if [ "$WELLKNOWN_CONTENT" = "ERROR" ]; then
+
echo "โœ— Error: Could not access $WELLKNOWN_URL"
+
echo " Please ensure the file is uploaded and accessible."
+
exit 1
+
elif [ "$WELLKNOWN_CONTENT" != "$AGGREGATOR_DID" ]; then
+
echo "โœ— Error: .well-known/atproto-did contains wrong DID"
+
echo " Expected: $AGGREGATOR_DID"
+
echo " Got: $WELLKNOWN_CONTENT"
+
exit 1
+
fi
+
+
echo "โœ“ .well-known/atproto-did is correctly configured"
+
echo ""
+
+
echo "Registering with $COVES_URL..."
+
+
# Call registration endpoint
+
RESPONSE=$(curl -s -X POST "$COVES_URL/xrpc/social.coves.aggregator.register" \
+
-H "Content-Type: application/json" \
+
-d "{
+
\"did\": \"$AGGREGATOR_DID\",
+
\"domain\": \"$AGGREGATOR_DOMAIN\"
+
}")
+
+
# Check if successful
+
if echo "$RESPONSE" | jq -e '.error' > /dev/null 2>&1; then
+
echo "โœ— Registration failed:"
+
echo "$RESPONSE" | jq '.'
+
exit 1
+
fi
+
+
# Extract response
+
REGISTERED_DID=$(echo "$RESPONSE" | jq -r '.did')
+
REGISTERED_HANDLE=$(echo "$RESPONSE" | jq -r '.handle')
+
MESSAGE=$(echo "$RESPONSE" | jq -r '.message')
+
+
if [ -z "$REGISTERED_DID" ] || [ "$REGISTERED_DID" = "null" ]; then
+
echo "โœ— Error: Unexpected response format"
+
echo "$RESPONSE" | jq '.'
+
exit 1
+
fi
+
+
echo ""
+
echo "โœ“ Registration successful!"
+
echo ""
+
echo "=== Registration Details ===="
+
echo "DID: $REGISTERED_DID"
+
echo "Handle: $REGISTERED_HANDLE"
+
echo "Message: $MESSAGE"
+
echo "============================="
+
echo ""
+
+
# Save Coves URL to config
+
echo "" >> aggregator-config.env
+
echo "COVES_INSTANCE_URL=\"$COVES_URL\"" >> aggregator-config.env
+
+
echo "โœ“ Updated aggregator-config.env with Coves instance URL"
+
echo ""
+
echo "Next step: Run ./4-create-service-declaration.sh"
+125
scripts/aggregator-setup/4-create-service-declaration.sh
···
+
#!/bin/bash
+
+
# Script: 4-create-service-declaration.sh
+
# Purpose: Create aggregator service declaration record
+
#
+
# This script writes a social.coves.aggregator.service record to your aggregator's repository.
+
# This record contains metadata about your aggregator (name, description, etc.) and will be
+
# indexed by Coves' Jetstream consumer into the aggregators table.
+
+
set -e
+
+
echo "================================================"
+
echo "Step 4: Create Service Declaration"
+
echo "================================================"
+
echo ""
+
+
# Load config if available
+
if [ -f "aggregator-config.env" ]; then
+
source aggregator-config.env
+
echo "โœ“ Loaded configuration from aggregator-config.env"
+
echo " DID: $AGGREGATOR_DID"
+
echo " PDS URL: $AGGREGATOR_PDS_URL"
+
echo ""
+
else
+
echo "Configuration file not found. Please run previous scripts first."
+
exit 1
+
fi
+
+
# Validate required fields
+
if [ -z "$AGGREGATOR_ACCESS_JWT" ]; then
+
echo "Error: AGGREGATOR_ACCESS_JWT not set. Please run 1-create-pds-account.sh first."
+
exit 1
+
fi
+
+
echo "Enter aggregator metadata:"
+
echo ""
+
+
# Get metadata from user
+
read -p "Display Name (e.g., 'RSS News Aggregator'): " DISPLAY_NAME
+
read -p "Description: " DESCRIPTION
+
read -p "Source URL (e.g., 'https://github.com/yourname/aggregator'): " SOURCE_URL
+
read -p "Maintainer DID (your personal DID, optional): " MAINTAINER_DID
+
+
if [ -z "$DISPLAY_NAME" ]; then
+
echo "Error: Display name is required"
+
exit 1
+
fi
+
+
echo ""
+
echo "Creating service declaration record..."
+
+
# Build the service record
+
SERVICE_RECORD=$(cat <<EOF
+
{
+
"\$type": "social.coves.aggregator.service",
+
"did": "$AGGREGATOR_DID",
+
"displayName": "$DISPLAY_NAME",
+
"description": "$DESCRIPTION",
+
"sourceUrl": "$SOURCE_URL",
+
"maintainer": "$MAINTAINER_DID",
+
"createdAt": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
+
}
+
EOF
+
)
+
+
# Call com.atproto.repo.createRecord
+
RESPONSE=$(curl -s -X POST "$AGGREGATOR_PDS_URL/xrpc/com.atproto.repo.createRecord" \
+
-H "Authorization: Bearer $AGGREGATOR_ACCESS_JWT" \
+
-H "Content-Type: application/json" \
+
-d "{
+
\"repo\": \"$AGGREGATOR_DID\",
+
\"collection\": \"social.coves.aggregator.service\",
+
\"rkey\": \"self\",
+
\"record\": $SERVICE_RECORD
+
}")
+
+
# Check if successful
+
if echo "$RESPONSE" | jq -e '.error' > /dev/null 2>&1; then
+
echo "โœ— Failed to create service declaration:"
+
echo "$RESPONSE" | jq '.'
+
exit 1
+
fi
+
+
# Extract response
+
RECORD_URI=$(echo "$RESPONSE" | jq -r '.uri')
+
RECORD_CID=$(echo "$RESPONSE" | jq -r '.cid')
+
+
if [ -z "$RECORD_URI" ] || [ "$RECORD_URI" = "null" ]; then
+
echo "โœ— Error: Unexpected response format"
+
echo "$RESPONSE" | jq '.'
+
exit 1
+
fi
+
+
echo ""
+
echo "โœ“ Service declaration created successfully!"
+
echo ""
+
echo "=== Record Details ===="
+
echo "URI: $RECORD_URI"
+
echo "CID: $RECORD_CID"
+
echo "======================="
+
echo ""
+
+
# Save to config
+
echo "" >> aggregator-config.env
+
echo "SERVICE_DECLARATION_URI=\"$RECORD_URI\"" >> aggregator-config.env
+
echo "SERVICE_DECLARATION_CID=\"$RECORD_CID\"" >> aggregator-config.env
+
+
echo "โœ“ Updated aggregator-config.env"
+
echo ""
+
echo "================================================"
+
echo "Setup Complete!"
+
echo "================================================"
+
echo ""
+
echo "Your aggregator is now registered with Coves!"
+
echo ""
+
echo "Next steps:"
+
echo "1. Wait a few seconds for Jetstream to index your service declaration"
+
echo "2. Verify your aggregator appears in the aggregators list"
+
echo "3. Community moderators can now authorize your aggregator"
+
echo "4. Once authorized, you can start posting to communities"
+
echo ""
+
echo "To test posting, use the Coves XRPC endpoint:"
+
echo " POST $COVES_INSTANCE_URL/xrpc/social.coves.community.post.create"
+
echo ""
+
echo "See docs/aggregators/SETUP_GUIDE.md for more information"
+252
scripts/aggregator-setup/README.md
···
+
# Aggregator Setup Scripts
+
+
This directory contains scripts to help you set up and register your aggregator with Coves instances.
+
+
## Overview
+
+
Aggregators are automated services that post content to Coves communities. They are similar to Bluesky's feed generators and labelers. To use aggregators with Coves, you need to:
+
+
1. Create a PDS account for your aggregator (gets you a DID)
+
2. Prove you own a domain via `.well-known/atproto-did`
+
3. Register with a Coves instance
+
4. Create a service declaration record
+
+
These scripts automate this process for you.
+
+
## Prerequisites
+
+
- **Domain ownership**: You must own a domain where you can host the `.well-known/atproto-did` file
+
- **Web server**: Ability to serve static files over HTTPS
+
- **Tools**: `curl`, `jq` (for JSON processing)
+
- **Account**: Email address for creating the PDS account
+
+
## Quick Start
+
+
### Interactive Setup (Recommended)
+
+
Run the scripts in order:
+
+
```bash
+
# Make scripts executable
+
chmod +x *.sh
+
+
# Step 1: Create PDS account
+
./1-create-pds-account.sh
+
+
# Step 2: Generate .well-known file
+
./2-setup-wellknown.sh
+
+
# Step 3: Register with Coves (after uploading .well-known)
+
./3-register-with-coves.sh
+
+
# Step 4: Create service declaration
+
./4-create-service-declaration.sh
+
```
+
+
### Automated Setup Example
+
+
For a reference implementation of automated setup, see the Kagi News aggregator at [aggregators/kagi-news/scripts/setup.sh](../../aggregators/kagi-news/scripts/setup.sh).
+
+
The Kagi script shows how to automate all 4 steps (with the manual .well-known upload step in between).
+
+
## Script Reference
+
+
### 1-create-pds-account.sh
+
+
**Purpose**: Creates a PDS account for your aggregator
+
+
**Prompts for**:
+
- PDS URL (default: https://bsky.social)
+
- Handle (e.g., mynewsbot.bsky.social)
+
- Email
+
- Password
+
+
**Outputs**:
+
- `aggregator-config.env` - Configuration file with DID and credentials
+
- Prints your DID and access tokens
+
+
**Notes**:
+
- Keep the config file secure! It contains your credentials
+
- The PDS automatically generates a DID:PLC for you
+
- You can use any PDS service, not just bsky.social
+
+
### 2-setup-wellknown.sh
+
+
**Purpose**: Generates the `.well-known/atproto-did` file for domain verification
+
+
**Prompts for**:
+
- Your domain (e.g., rss-bot.example.com)
+
+
**Outputs**:
+
- `.well-known/atproto-did` - File containing your DID
+
- `nginx-example.conf` - Example nginx configuration
+
- `apache-example.conf` - Example Apache configuration
+
+
**Manual step required**:
+
Upload the `.well-known` directory to your web server. The file must be accessible at:
+
```
+
https://yourdomain.com/.well-known/atproto-did
+
```
+
+
**Verify it works**:
+
```bash
+
curl https://yourdomain.com/.well-known/atproto-did
+
# Should return your DID (e.g., did:plc:abc123...)
+
```
+
+
### 3-register-with-coves.sh
+
+
**Purpose**: Registers your aggregator with a Coves instance
+
+
**Prompts for**:
+
- Coves instance URL (default: https://api.coves.social)
+
+
**Prerequisites**:
+
- `.well-known/atproto-did` must be accessible from your domain
+
- Scripts 1 and 2 must be completed
+
+
**What it does**:
+
1. Verifies your `.well-known/atproto-did` is accessible
+
2. Calls `social.coves.aggregator.register` XRPC endpoint
+
3. Coves verifies domain ownership
+
4. Inserts your aggregator into the `users` table
+
+
**Outputs**:
+
- Updates `aggregator-config.env` with Coves instance URL
+
- Prints registration confirmation
+
+
### 4-create-service-declaration.sh
+
+
**Purpose**: Creates the service declaration record in your repository
+
+
**Prompts for**:
+
- Display name (e.g., "RSS News Aggregator")
+
- Description
+
- Source URL (GitHub repo, etc.)
+
- Maintainer DID (optional)
+
+
**What it does**:
+
1. Creates a `social.coves.aggregator.service` record at `at://your-did/social.coves.aggregator.service/self`
+
2. Jetstream consumer will index this into the `aggregators` table
+
3. Communities can now discover and authorize your aggregator
+
+
**Outputs**:
+
- Updates `aggregator-config.env` with record URI and CID
+
- Prints record details
+
+
## Configuration File
+
+
After running the scripts, you'll have an `aggregator-config.env` file with:
+
+
```bash
+
AGGREGATOR_DID="did:plc:..."
+
AGGREGATOR_HANDLE="mynewsbot.bsky.social"
+
AGGREGATOR_PDS_URL="https://bsky.social"
+
AGGREGATOR_EMAIL="bot@example.com"
+
AGGREGATOR_PASSWORD="..."
+
AGGREGATOR_ACCESS_JWT="..."
+
AGGREGATOR_REFRESH_JWT="..."
+
AGGREGATOR_DOMAIN="rss-bot.example.com"
+
COVES_INSTANCE_URL="https://api.coves.social"
+
SERVICE_DECLARATION_URI="at://did:plc:.../social.coves.aggregator.service/self"
+
SERVICE_DECLARATION_CID="..."
+
```
+
+
**Use this in your aggregator code** to authenticate and post.
+
+
## What Happens Next?
+
+
After completing all 4 steps:
+
+
1. **Your aggregator is registered** in the Coves instance's `users` table
+
2. **Your service declaration is indexed** in the `aggregators` table (takes a few seconds)
+
3. **Community moderators can now authorize** your aggregator for their communities
+
4. **Once authorized**, your aggregator can post to those communities
+
+
## Creating an Authorization
+
+
Authorizations are created by community moderators, not by aggregators. The moderator writes a `social.coves.aggregator.authorization` record to their community's repository.
+
+
See `docs/aggregators/SETUP_GUIDE.md` for more information on the authorization process.
+
+
## Posting to Communities
+
+
Once authorized, your aggregator can post using:
+
+
```bash
+
curl -X POST https://api.coves.social/xrpc/social.coves.community.post.create \
+
-H "Authorization: Bearer $AGGREGATOR_ACCESS_JWT" \
+
-H "Content-Type: application/json" \
+
-d '{
+
"communityDid": "did:plc:...",
+
"post": {
+
"text": "Your post content",
+
"createdAt": "2024-01-15T12:00:00Z"
+
}
+
}'
+
```
+
+
## Troubleshooting
+
+
### Error: "DomainVerificationFailed"
+
+
- Verify `.well-known/atproto-did` is accessible: `curl https://yourdomain.com/.well-known/atproto-did`
+
- Check the content matches your DID exactly (no extra whitespace)
+
- Ensure HTTPS is working (not HTTP)
+
- Check CORS headers if accessing from browser
+
+
### Error: "AlreadyRegistered"
+
+
- You've already registered this DID with this Coves instance
+
- This is safe to ignore if you're re-running the setup
+
+
### Error: "DIDResolutionFailed"
+
+
- Your DID might be invalid or not found in the PLC directory
+
- Verify your DID exists: `curl https://plc.directory/<your-did>`
+
- Wait a few seconds and try again (PLC directory might be propagating)
+
+
### Service declaration not appearing
+
+
- Wait 5-10 seconds for Jetstream consumer to index it
+
- Check the Jetstream logs for errors
+
- Verify the record was created: Check your PDS at `at://your-did/social.coves.aggregator.service/self`
+
+
## Example: Kagi News Aggregator
+
+
For a complete reference implementation, see the Kagi News aggregator at `aggregators/kagi-news/`.
+
+
The Kagi aggregator includes an automated setup script at [aggregators/kagi-news/scripts/setup.sh](../../aggregators/kagi-news/scripts/setup.sh) that demonstrates how to:
+
+
- Automate the entire registration process
+
- Use environment variables for configuration
+
- Handle errors gracefully
+
- Integrate the setup into your aggregator project
+
+
This shows how you can package scripts 1-4 into a single automated flow for your specific aggregator.
+
+
## Security Notes
+
+
- **Never commit `aggregator-config.env`** to version control
+
- Store credentials securely (use environment variables or secret management)
+
- Rotate access tokens regularly
+
- Use HTTPS for all API calls
+
- Validate community authorization before posting
+
+
## More Information
+
+
- [Aggregator Setup Guide](../../docs/aggregators/SETUP_GUIDE.md)
+
- [Aggregator PRD](../../docs/aggregators/PRD_AGGREGATORS.md)
+
- [atProto Identity Guide](../../ATPROTO_GUIDE.md)
+
- [Coves Communities PRD](../../docs/PRD_COMMUNITIES.md)
+
+
## Support
+
+
If you encounter issues:
+
+
1. Check the troubleshooting section above
+
2. Review the full documentation in `docs/aggregators/`
+
3. Open an issue on GitHub with:
+
- Which script failed
+
- Error message
+
- Your domain (without credentials)
+195
aggregators/kagi-news/scripts/setup.sh
···
+
#!/bin/bash
+
+
# Script: setup-kagi-aggregator.sh
+
# Purpose: Complete setup script for Kagi News RSS aggregator
+
#
+
# This is a reference implementation showing automated setup for a specific aggregator.
+
# Other aggregator developers can use this as a template.
+
+
set -e
+
+
echo "================================================"
+
echo "Kagi News RSS Aggregator - Automated Setup"
+
echo "================================================"
+
echo ""
+
+
# Configuration for Kagi aggregator
+
AGGREGATOR_NAME="kagi-news-bot"
+
DISPLAY_NAME="Kagi News RSS"
+
DESCRIPTION="Aggregates tech news from Kagi RSS feeds and posts to relevant communities"
+
SOURCE_URL="https://github.com/coves-social/kagi-aggregator"
+
+
# Check if config already exists
+
if [ -f "kagi-aggregator-config.env" ]; then
+
echo "Configuration file already exists. Loading existing configuration..."
+
source kagi-aggregator-config.env
+
SKIP_ACCOUNT_CREATION=true
+
else
+
SKIP_ACCOUNT_CREATION=false
+
fi
+
+
# Get runtime configuration
+
if [ "$SKIP_ACCOUNT_CREATION" = false ]; then
+
read -p "Enter PDS URL (default: https://bsky.social): " PDS_URL
+
PDS_URL=${PDS_URL:-https://bsky.social}
+
+
read -p "Enter email for bot account: " EMAIL
+
read -sp "Enter password for bot account: " PASSWORD
+
echo ""
+
+
# Generate handle
+
TIMESTAMP=$(date +%s)
+
HANDLE="$AGGREGATOR_NAME-$TIMESTAMP.bsky.social"
+
+
echo ""
+
echo "Creating PDS account..."
+
echo "Handle: $HANDLE"
+
+
# Create account
+
RESPONSE=$(curl -s -X POST "$PDS_URL/xrpc/com.atproto.server.createAccount" \
+
-H "Content-Type: application/json" \
+
-d "{
+
\"handle\": \"$HANDLE\",
+
\"email\": \"$EMAIL\",
+
\"password\": \"$PASSWORD\"
+
}")
+
+
if echo "$RESPONSE" | jq -e '.error' > /dev/null 2>&1; then
+
echo "โœ— Error creating account:"
+
echo "$RESPONSE" | jq '.'
+
exit 1
+
fi
+
+
DID=$(echo "$RESPONSE" | jq -r '.did')
+
ACCESS_JWT=$(echo "$RESPONSE" | jq -r '.accessJwt')
+
REFRESH_JWT=$(echo "$RESPONSE" | jq -r '.refreshJwt')
+
+
echo "โœ“ Account created: $DID"
+
+
# Save configuration
+
cat > kagi-aggregator-config.env <<EOF
+
# Kagi Aggregator Configuration
+
AGGREGATOR_DID="$DID"
+
AGGREGATOR_HANDLE="$HANDLE"
+
AGGREGATOR_PDS_URL="$PDS_URL"
+
AGGREGATOR_EMAIL="$EMAIL"
+
AGGREGATOR_PASSWORD="$PASSWORD"
+
AGGREGATOR_ACCESS_JWT="$ACCESS_JWT"
+
AGGREGATOR_REFRESH_JWT="$REFRESH_JWT"
+
EOF
+
+
echo "โœ“ Configuration saved to kagi-aggregator-config.env"
+
fi
+
+
# Get domain and Coves instance
+
read -p "Enter aggregator domain (e.g., kagi-news.example.com): " DOMAIN
+
read -p "Enter Coves instance URL (default: https://api.coves.social): " COVES_URL
+
COVES_URL=${COVES_URL:-https://api.coves.social}
+
+
# Setup .well-known
+
echo ""
+
echo "Setting up .well-known/atproto-did..."
+
mkdir -p .well-known
+
echo "$DID" > .well-known/atproto-did
+
echo "โœ“ Created .well-known/atproto-did"
+
+
echo ""
+
echo "================================================"
+
echo "IMPORTANT: Manual Step Required"
+
echo "================================================"
+
echo ""
+
echo "Upload the .well-known directory to your web server at:"
+
echo " https://$DOMAIN/.well-known/atproto-did"
+
echo ""
+
read -p "Press Enter when the file is uploaded and accessible..."
+
+
# Verify .well-known
+
echo ""
+
echo "Verifying .well-known/atproto-did..."
+
WELLKNOWN_CONTENT=$(curl -s "https://$DOMAIN/.well-known/atproto-did" || echo "ERROR")
+
+
if [ "$WELLKNOWN_CONTENT" != "$DID" ]; then
+
echo "โœ— Error: .well-known/atproto-did not accessible or contains wrong DID"
+
echo " Expected: $DID"
+
echo " Got: $WELLKNOWN_CONTENT"
+
exit 1
+
fi
+
+
echo "โœ“ .well-known/atproto-did verified"
+
+
# Register with Coves
+
echo ""
+
echo "Registering with Coves instance..."
+
RESPONSE=$(curl -s -X POST "$COVES_URL/xrpc/social.coves.aggregator.register" \
+
-H "Content-Type: application/json" \
+
-d "{
+
\"did\": \"$DID\",
+
\"domain\": \"$DOMAIN\"
+
}")
+
+
if echo "$RESPONSE" | jq -e '.error' > /dev/null 2>&1; then
+
echo "โœ— Registration failed:"
+
echo "$RESPONSE" | jq '.'
+
exit 1
+
fi
+
+
echo "โœ“ Registered with Coves"
+
+
# Create service declaration
+
echo ""
+
echo "Creating service declaration..."
+
SERVICE_RECORD=$(cat <<EOF
+
{
+
"\$type": "social.coves.aggregator.service",
+
"did": "$DID",
+
"displayName": "$DISPLAY_NAME",
+
"description": "$DESCRIPTION",
+
"sourceUrl": "$SOURCE_URL",
+
"createdAt": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
+
}
+
EOF
+
)
+
+
RESPONSE=$(curl -s -X POST "$PDS_URL/xrpc/com.atproto.repo.createRecord" \
+
-H "Authorization: Bearer $ACCESS_JWT" \
+
-H "Content-Type: application/json" \
+
-d "{
+
\"repo\": \"$DID\",
+
\"collection\": \"social.coves.aggregator.service\",
+
\"rkey\": \"self\",
+
\"record\": $SERVICE_RECORD
+
}")
+
+
if echo "$RESPONSE" | jq -e '.error' > /dev/null 2>&1; then
+
echo "โœ— Failed to create service declaration:"
+
echo "$RESPONSE" | jq '.'
+
exit 1
+
fi
+
+
RECORD_URI=$(echo "$RESPONSE" | jq -r '.uri')
+
echo "โœ“ Service declaration created: $RECORD_URI"
+
+
# Save final configuration
+
cat >> kagi-aggregator-config.env <<EOF
+
+
# Setup completed on $(date)
+
AGGREGATOR_DOMAIN="$DOMAIN"
+
COVES_INSTANCE_URL="$COVES_URL"
+
SERVICE_DECLARATION_URI="$RECORD_URI"
+
EOF
+
+
echo ""
+
echo "================================================"
+
echo "โœ“ Kagi Aggregator Setup Complete!"
+
echo "================================================"
+
echo ""
+
echo "Configuration saved to: kagi-aggregator-config.env"
+
echo ""
+
echo "Your aggregator is now registered and ready to use."
+
echo ""
+
echo "Next steps:"
+
echo "1. Start your aggregator bot: npm start (or appropriate command)"
+
echo "2. Community moderators can authorize your aggregator"
+
echo "3. Once authorized, your bot can start posting"
+
echo ""
+
echo "See docs/aggregators/SETUP_GUIDE.md for more information"
+55
aggregators/kagi-news/.dockerignore
···
+
# Git
+
.git
+
.gitignore
+
+
# Python
+
__pycache__
+
*.py[cod]
+
*$py.class
+
*.so
+
.Python
+
venv/
+
*.egg-info
+
dist/
+
build/
+
+
# Testing
+
.pytest_cache/
+
.coverage
+
htmlcov/
+
.tox/
+
.hypothesis/
+
+
# IDE
+
.vscode/
+
.idea/
+
*.swp
+
*.swo
+
*~
+
+
# Environment
+
.env.local
+
.env.*.local
+
+
# Data and logs
+
data/
+
*.log
+
+
# Documentation
+
README.md
+
docs/
+
+
# Docker
+
Dockerfile
+
docker-compose.yml
+
.dockerignore
+
+
# Development
+
tests/
+
pytest.ini
+
mypy.ini
+
.mypy_cache/
+
+
# OS
+
.DS_Store
+
Thumbs.db
+53
aggregators/kagi-news/Dockerfile
···
+
# Kagi News RSS Aggregator
+
# Production-ready Docker image with cron scheduler
+
+
FROM python:3.11-slim
+
+
# Install cron and other utilities
+
RUN apt-get update && apt-get install -y \
+
cron \
+
curl \
+
procps \
+
&& rm -rf /var/lib/apt/lists/*
+
+
# Set working directory
+
WORKDIR /app
+
+
# Copy requirements first for better caching
+
COPY requirements.txt .
+
+
# Install Python dependencies (exclude dev/test deps in production)
+
RUN pip install --no-cache-dir \
+
feedparser==6.0.11 \
+
beautifulsoup4==4.12.3 \
+
requests==2.31.0 \
+
atproto==0.0.55 \
+
pyyaml==6.0.1
+
+
# Copy application code
+
COPY src/ ./src/
+
COPY config.yaml ./
+
+
# Copy crontab file
+
COPY crontab /etc/cron.d/kagi-aggregator
+
+
# Give execution rights on the cron job and apply it
+
RUN chmod 0644 /etc/cron.d/kagi-aggregator && \
+
crontab /etc/cron.d/kagi-aggregator
+
+
# Create log file to be able to run tail
+
RUN touch /var/log/cron.log
+
+
# Copy entrypoint script
+
COPY docker-entrypoint.sh /usr/local/bin/
+
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
+
+
# Health check - verify cron is running
+
HEALTHCHECK --interval=60s --timeout=10s --start-period=10s --retries=3 \
+
CMD pgrep cron || exit 1
+
+
# Run the entrypoint script
+
ENTRYPOINT ["docker-entrypoint.sh"]
+
+
# Default command: tail the cron log
+
CMD ["tail", "-f", "/var/log/cron.log"]
+48
aggregators/kagi-news/docker-compose.yml
···
+
services:
+
kagi-aggregator:
+
build:
+
context: .
+
dockerfile: Dockerfile
+
container_name: kagi-news-aggregator
+
restart: unless-stopped
+
+
# Environment variables - override in .env file or here
+
environment:
+
# Required: Aggregator credentials
+
- AGGREGATOR_HANDLE=${AGGREGATOR_HANDLE}
+
- AGGREGATOR_PASSWORD=${AGGREGATOR_PASSWORD}
+
+
# Optional: Override Coves API URL
+
- COVES_API_URL=${COVES_API_URL:-https://api.coves.social}
+
+
# Optional: Run immediately on startup (useful for testing)
+
- RUN_ON_STARTUP=${RUN_ON_STARTUP:-false}
+
+
# Mount config file if you want to modify it without rebuilding
+
volumes:
+
- ./config.yaml:/app/config.yaml:ro
+
- ./data:/app/data # For state persistence (if implemented)
+
+
# Use env_file to load credentials from .env
+
env_file:
+
- .env
+
+
# Logging configuration
+
logging:
+
driver: "json-file"
+
options:
+
max-size: "10m"
+
max-file: "3"
+
+
# Health check
+
healthcheck:
+
test: ["CMD", "pgrep", "cron"]
+
interval: 60s
+
timeout: 10s
+
retries: 3
+
start_period: 10s
+
+
# Optional: Networks for multi-container setups
+
# networks:
+
# coves:
+
# external: true
+41
aggregators/kagi-news/docker-entrypoint.sh
···
+
#!/bin/bash
+
set -e
+
+
echo "Starting Kagi News RSS Aggregator..."
+
echo "========================================="
+
+
# Load environment variables if .env file exists
+
if [ -f /app/.env ]; then
+
echo "Loading environment variables from .env"
+
export $(grep -v '^#' /app/.env | xargs)
+
fi
+
+
# Validate required environment variables
+
if [ -z "$AGGREGATOR_HANDLE" ] || [ -z "$AGGREGATOR_PASSWORD" ]; then
+
echo "ERROR: Missing required environment variables!"
+
echo "Please set AGGREGATOR_HANDLE and AGGREGATOR_PASSWORD"
+
exit 1
+
fi
+
+
echo "Aggregator Handle: $AGGREGATOR_HANDLE"
+
echo "Cron schedule loaded from /etc/cron.d/kagi-aggregator"
+
+
# Start cron in the background
+
echo "Starting cron daemon..."
+
cron
+
+
# Optional: Run aggregator immediately on startup (for testing)
+
if [ "$RUN_ON_STARTUP" = "true" ]; then
+
echo "Running aggregator immediately (RUN_ON_STARTUP=true)..."
+
cd /app && python -m src.main
+
fi
+
+
echo "========================================="
+
echo "Kagi News Aggregator is running!"
+
echo "Cron schedule: Daily at 1 PM UTC"
+
echo "Logs will appear below:"
+
echo "========================================="
+
echo ""
+
+
# Execute the command passed to docker run (defaults to tail -f /var/log/cron.log)
+
exec "$@"
+20
.beads/.gitignore
···
+
# SQLite databases
+
*.db
+
*.db-journal
+
*.db-wal
+
*.db-shm
+
+
# Daemon runtime files
+
daemon.lock
+
daemon.log
+
daemon.pid
+
bd.sock
+
+
# Legacy database files
+
db.sqlite
+
bd.db
+
+
# Keep JSONL exports and config (source of truth for git)
+
!*.jsonl
+
!metadata.json
+
!config.json
+56
.beads/config.yaml
···
+
# Beads Configuration File
+
# This file configures default behavior for all bd commands in this repository
+
# All settings can also be set via environment variables (BD_* prefix)
+
# or overridden with command-line flags
+
+
# Issue prefix for this repository (used by bd init)
+
# If not set, bd init will auto-detect from directory name
+
# Example: issue-prefix: "myproject" creates issues like "myproject-1", "myproject-2", etc.
+
# issue-prefix: ""
+
+
# Use no-db mode: load from JSONL, no SQLite, write back after each command
+
# When true, bd will use .beads/issues.jsonl as the source of truth
+
# instead of SQLite database
+
# no-db: false
+
+
# Disable daemon for RPC communication (forces direct database access)
+
# no-daemon: false
+
+
# Disable auto-flush of database to JSONL after mutations
+
# no-auto-flush: false
+
+
# Disable auto-import from JSONL when it's newer than database
+
# no-auto-import: false
+
+
# Enable JSON output by default
+
# json: false
+
+
# Default actor for audit trails (overridden by BD_ACTOR or --actor)
+
# actor: ""
+
+
# Path to database (overridden by BEADS_DB or --db)
+
# db: ""
+
+
# Auto-start daemon if not running (can also use BEADS_AUTO_START_DAEMON)
+
# auto-start-daemon: true
+
+
# Debounce interval for auto-flush (can also use BEADS_FLUSH_DEBOUNCE)
+
# flush-debounce: "5s"
+
+
# Multi-repo configuration (experimental - bd-307)
+
# Allows hydrating from multiple repositories and routing writes to the correct JSONL
+
# repos:
+
# primary: "." # Primary repo (where this database lives)
+
# additional: # Additional repos to hydrate from (read-only)
+
# - ~/beads-planning # Personal planning repo
+
# - ~/work-planning # Work planning repo
+
+
# Integration settings (access with 'bd config get/set')
+
# These are stored in the database, not in this file:
+
# - jira.url
+
# - jira.project
+
# - linear.url
+
# - linear.api-key
+
# - github.org
+
# - github.repo
+
# - sync.branch - Git branch for beads commits (use BEADS_SYNC_BRANCH env var or bd config set)
+4
.beads/metadata.json
···
+
{
+
"database": "beads.db",
+
"jsonl_export": "beads.jsonl"
+
}
+3
.gitattributes
···
+
+
# Use bd merge for beads JSONL files
+
.beads/beads.jsonl merge=beads
+131
AGENTS.md
···
+
# AI Agent Guidelines for Coves
+
+
## Issue Tracking with bd (beads)
+
+
**IMPORTANT**: This project uses **bd (beads)** for ALL issue tracking. Do NOT use markdown TODOs, task lists, or other tracking methods.
+
+
### Why bd?
+
+
- Dependency-aware: Track blockers and relationships between issues
+
- Git-friendly: Auto-syncs to JSONL for version control
+
- Agent-optimized: JSON output, ready work detection, discovered-from links
+
- Prevents duplicate tracking systems and confusion
+
+
### Quick Start
+
+
**Check for ready work:**
+
```bash
+
bd ready --json
+
```
+
+
**Create new issues:**
+
```bash
+
bd create "Issue title" -t bug|feature|task -p 0-4 --json
+
bd create "Issue title" -p 1 --deps discovered-from:bd-123 --json
+
```
+
+
**Claim and update:**
+
```bash
+
bd update bd-42 --status in_progress --json
+
bd update bd-42 --priority 1 --json
+
```
+
+
**Complete work:**
+
```bash
+
bd close bd-42 --reason "Completed" --json
+
```
+
+
### Issue Types
+
+
- `bug` - Something broken
+
- `feature` - New functionality
+
- `task` - Work item (tests, docs, refactoring)
+
- `epic` - Large feature with subtasks
+
- `chore` - Maintenance (dependencies, tooling)
+
+
### Priorities
+
+
- `0` - Critical (security, data loss, broken builds)
+
- `1` - High (major features, important bugs)
+
- `2` - Medium (default, nice-to-have)
+
- `3` - Low (polish, optimization)
+
- `4` - Backlog (future ideas)
+
+
### Workflow for AI Agents
+
+
1. **Check ready work**: `bd ready` shows unblocked issues
+
2. **Claim your task**: `bd update <id> --status in_progress`
+
3. **Work on it**: Implement, test, document
+
4. **Discover new work?** Create linked issue:
+
- `bd create "Found bug" -p 1 --deps discovered-from:<parent-id>`
+
5. **Complete**: `bd close <id> --reason "Done"`
+
6. **Commit together**: Always commit the `.beads/issues.jsonl` file together with the code changes so issue state stays in sync with code state
+
+
### Auto-Sync
+
+
bd automatically syncs with git:
+
- Exports to `.beads/issues.jsonl` after changes (5s debounce)
+
- Imports from JSONL when newer (e.g., after `git pull`)
+
- No manual export/import needed!
+
+
### MCP Server (Recommended)
+
+
If using Claude or MCP-compatible clients, install the beads MCP server:
+
+
```bash
+
pip install beads-mcp
+
```
+
+
Add to MCP config (e.g., `~/.config/claude/config.json`):
+
```json
+
{
+
"beads": {
+
"command": "beads-mcp",
+
"args": []
+
}
+
}
+
```
+
+
Then use `mcp__beads__*` functions instead of CLI commands.
+
+
### Managing AI-Generated Planning Documents
+
+
AI assistants often create planning and design documents during development:
+
- PLAN.md, IMPLEMENTATION.md, ARCHITECTURE.md
+
- DESIGN.md, CODEBASE_SUMMARY.md, INTEGRATION_PLAN.md
+
- TESTING_GUIDE.md, TECHNICAL_DESIGN.md, and similar files
+
+
**Best Practice: Use a dedicated directory for these ephemeral files**
+
+
**Recommended approach:**
+
- Create a `history/` directory in the project root
+
- Store ALL AI-generated planning/design docs in `history/`
+
- Keep the repository root clean and focused on permanent project files
+
- Only access `history/` when explicitly asked to review past planning
+
+
**Example .gitignore entry (optional):**
+
```
+
# AI planning documents (ephemeral)
+
history/
+
```
+
+
**Benefits:**
+
- โœ… Clean repository root
+
- โœ… Clear separation between ephemeral and permanent documentation
+
- โœ… Easy to exclude from version control if desired
+
- โœ… Preserves planning history for archeological research
+
- โœ… Reduces noise when browsing the project
+
+
### Important Rules
+
+
- โœ… Use bd for ALL task tracking
+
- โœ… Always use `--json` flag for programmatic use
+
- โœ… Link discovered work with `discovered-from` dependencies
+
- โœ… Check `bd ready` before asking "what should I work on?"
+
- โœ… Store AI planning docs in `history/` directory
+
- โŒ Do NOT create markdown TODO lists
+
- โŒ Do NOT use external issue trackers
+
- โŒ Do NOT duplicate tracking systems
+
- โŒ Do NOT clutter repo root with planning documents
+
+
For more details, see the [beads repository](https://github.com/steveyegge/beads).
+15
CLAUDE.md
···
- Security is built-in, not bolted-on
- Test-driven: write the test, then make it pass
- ASK QUESTIONS if you need context surrounding the product DONT ASSUME
+
## No Stubs, No Shortcuts
- **NEVER** use `unimplemented!()`, `todo!()`, or stub implementations
- **NEVER** leave placeholder code or incomplete implementations
···
- Every feature must be complete before moving on
- E2E tests must test REAL infrastructure - not mocks
+
## Issue Tracking
+
+
**This project uses [bd (beads)](https://github.com/steveyegge/beads) for ALL issue tracking.**
+
+
- Use `bd` commands, NOT markdown TODOs or task lists
+
- Check `bd ready` for unblocked work
+
- Always commit `.beads/issues.jsonl` with code changes
+
- See [AGENTS.md](AGENTS.md) for full workflow details
+
+
Quick commands:
+
- `bd ready --json` - Show ready work
+
- `bd create "Title" -t bug|feature|task -p 0-4 --json` - Create issue
+
- `bd update <id> --status in_progress --json` - Claim work
+
- `bd close <id> --reason "Done" --json` - Complete work
## Break Down Complex Tasks
- Large files or complex features should be broken into manageable chunks
- If a file is too large, discuss breaking it into smaller modules
+8 -8
internal/core/communities/community.go
···
// ListCommunitiesRequest represents query parameters for listing communities
type ListCommunitiesRequest struct {
-
Visibility string `json:"visibility,omitempty"`
-
HostedBy string `json:"hostedBy,omitempty"`
-
SortBy string `json:"sortBy,omitempty"`
-
SortOrder string `json:"sortOrder,omitempty"`
-
Limit int `json:"limit"`
-
Offset int `json:"offset"`
+
Sort string `json:"sort,omitempty"` // Enum: popular, active, new, alphabetical
+
Visibility string `json:"visibility,omitempty"` // Filter: public, unlisted, private
+
Category string `json:"category,omitempty"` // Optional: filter by category (future)
+
Language string `json:"language,omitempty"` // Optional: filter by language (future)
+
Limit int `json:"limit"` // 1-100, default 50
+
Offset int `json:"offset"` // Pagination offset
}
// SearchCommunitiesRequest represents query parameters for searching communities
···
name := c.Handle[:communityIndex]
// Extract instance domain (everything after ".community.")
-
// len(".community.") = 11
-
instanceDomain := c.Handle[communityIndex+11:]
+
communitySegment := ".community."
+
instanceDomain := c.Handle[communityIndex+len(communitySegment):]
return fmt.Sprintf("!%s@%s", name, instanceDomain)
}
+2 -2
internal/core/communities/interfaces.go
···
UpdateCredentials(ctx context.Context, did, accessToken, refreshToken string) error
// Listing & Search
-
List(ctx context.Context, req ListCommunitiesRequest) ([]*Community, int, error) // Returns communities + total count
+
List(ctx context.Context, req ListCommunitiesRequest) ([]*Community, error)
Search(ctx context.Context, req SearchCommunitiesRequest) ([]*Community, int, error)
// Subscriptions (lightweight feed follows)
···
CreateCommunity(ctx context.Context, req CreateCommunityRequest) (*Community, error)
GetCommunity(ctx context.Context, identifier string) (*Community, error) // identifier can be DID or handle
UpdateCommunity(ctx context.Context, req UpdateCommunityRequest) (*Community, error)
-
ListCommunities(ctx context.Context, req ListCommunitiesRequest) ([]*Community, int, error)
+
ListCommunities(ctx context.Context, req ListCommunitiesRequest) ([]*Community, error)
SearchCommunities(ctx context.Context, req SearchCommunitiesRequest) ([]*Community, int, error)
// Subscription operations (write-forward: creates record in user's PDS)
+57
scripts/backup.sh
···
+
#!/bin/bash
+
# Coves Database Backup Script
+
# Usage: ./scripts/backup.sh
+
#
+
# Creates timestamped PostgreSQL backups in ./backups/
+
# Retention: Keeps last 30 days of backups
+
+
set -e
+
+
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
+
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
+
BACKUP_DIR="$PROJECT_DIR/backups"
+
COMPOSE_FILE="$PROJECT_DIR/docker-compose.prod.yml"
+
+
# Load environment
+
set -a
+
source "$PROJECT_DIR/.env.prod"
+
set +a
+
+
# Colors
+
GREEN='\033[0;32m'
+
YELLOW='\033[1;33m'
+
NC='\033[0m'
+
+
log() { echo -e "${GREEN}[BACKUP]${NC} $1"; }
+
warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
+
+
# Create backup directory
+
mkdir -p "$BACKUP_DIR"
+
+
# Generate timestamp
+
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
+
BACKUP_FILE="$BACKUP_DIR/coves_${TIMESTAMP}.sql.gz"
+
+
log "Starting backup..."
+
+
# Run pg_dump inside container
+
docker compose -f "$COMPOSE_FILE" exec -T postgres \
+
pg_dump -U "$POSTGRES_USER" -d "$POSTGRES_DB" --clean --if-exists \
+
| gzip > "$BACKUP_FILE"
+
+
# Get file size
+
SIZE=$(du -h "$BACKUP_FILE" | cut -f1)
+
+
log "โœ… Backup complete: $BACKUP_FILE ($SIZE)"
+
+
# Cleanup old backups (keep last 30 days)
+
log "Cleaning up backups older than 30 days..."
+
find "$BACKUP_DIR" -name "coves_*.sql.gz" -mtime +30 -delete
+
+
# List recent backups
+
log ""
+
log "Recent backups:"
+
ls -lh "$BACKUP_DIR"/*.sql.gz 2>/dev/null | tail -5
+
+
log ""
+
log "To restore: gunzip -c $BACKUP_FILE | docker compose -f docker-compose.prod.yml exec -T postgres psql -U $POSTGRES_USER -d $POSTGRES_DB"
+133
scripts/deploy.sh
···
+
#!/bin/bash
+
# Coves Deployment Script
+
# Usage: ./scripts/deploy.sh [service]
+
#
+
# Examples:
+
# ./scripts/deploy.sh # Deploy all services
+
# ./scripts/deploy.sh appview # Deploy only AppView
+
# ./scripts/deploy.sh --pull # Pull from git first, then deploy
+
+
set -e
+
+
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
+
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
+
COMPOSE_FILE="$PROJECT_DIR/docker-compose.prod.yml"
+
+
# Colors for output
+
RED='\033[0;31m'
+
GREEN='\033[0;32m'
+
YELLOW='\033[1;33m'
+
NC='\033[0m' # No Color
+
+
log() {
+
echo -e "${GREEN}[DEPLOY]${NC} $1"
+
}
+
+
warn() {
+
echo -e "${YELLOW}[WARN]${NC} $1"
+
}
+
+
error() {
+
echo -e "${RED}[ERROR]${NC} $1"
+
exit 1
+
}
+
+
# Parse arguments
+
PULL_GIT=false
+
SERVICE=""
+
+
for arg in "$@"; do
+
case $arg in
+
--pull)
+
PULL_GIT=true
+
;;
+
*)
+
SERVICE="$arg"
+
;;
+
esac
+
done
+
+
cd "$PROJECT_DIR"
+
+
# Load environment variables
+
if [ ! -f ".env.prod" ]; then
+
error ".env.prod not found! Copy from .env.prod.example and configure secrets."
+
fi
+
+
log "Loading environment from .env.prod..."
+
set -a
+
source .env.prod
+
set +a
+
+
# Optional: Pull from git
+
if [ "$PULL_GIT" = true ]; then
+
log "Pulling latest code from git..."
+
git fetch origin
+
git pull origin main
+
fi
+
+
# Check database connectivity before deployment
+
log "Checking database connectivity..."
+
if docker compose -f "$COMPOSE_FILE" exec -T postgres pg_isready -U "$POSTGRES_USER" -d "$POSTGRES_DB" > /dev/null 2>&1; then
+
log "Database is ready"
+
else
+
warn "Database not ready yet - it will start with the deployment"
+
fi
+
+
# Build and deploy
+
if [ -n "$SERVICE" ]; then
+
log "Building $SERVICE..."
+
docker compose -f "$COMPOSE_FILE" build --no-cache "$SERVICE"
+
+
log "Deploying $SERVICE..."
+
docker compose -f "$COMPOSE_FILE" up -d "$SERVICE"
+
else
+
log "Building all services..."
+
docker compose -f "$COMPOSE_FILE" build --no-cache
+
+
log "Deploying all services..."
+
docker compose -f "$COMPOSE_FILE" up -d
+
fi
+
+
# Health check
+
log "Waiting for services to be healthy..."
+
sleep 10
+
+
# Wait for database to be ready before running migrations
+
log "Waiting for database..."
+
for i in {1..30}; do
+
if docker compose -f "$COMPOSE_FILE" exec -T postgres pg_isready -U "$POSTGRES_USER" -d "$POSTGRES_DB" > /dev/null 2>&1; then
+
break
+
fi
+
sleep 1
+
done
+
+
# Run database migrations
+
# The AppView runs migrations on startup, but we can also trigger them explicitly
+
log "Running database migrations..."
+
if docker compose -f "$COMPOSE_FILE" exec -T appview /app/coves-server migrate 2>/dev/null; then
+
log "โœ… Migrations completed"
+
else
+
warn "โš ๏ธ Migration command not available or failed - AppView will run migrations on startup"
+
fi
+
+
# Check AppView health
+
if docker compose -f "$COMPOSE_FILE" exec -T appview wget --spider -q http://localhost:8080/xrpc/_health 2>/dev/null; then
+
log "โœ… AppView is healthy"
+
else
+
warn "โš ๏ธ AppView health check failed - check logs with: docker compose -f docker-compose.prod.yml logs appview"
+
fi
+
+
# Check PDS health
+
if docker compose -f "$COMPOSE_FILE" exec -T pds wget --spider -q http://localhost:3000/xrpc/_health 2>/dev/null; then
+
log "โœ… PDS is healthy"
+
else
+
warn "โš ๏ธ PDS health check failed - check logs with: docker compose -f docker-compose.prod.yml logs pds"
+
fi
+
+
log "Deployment complete!"
+
log ""
+
log "Useful commands:"
+
log " View logs: docker compose -f docker-compose.prod.yml logs -f"
+
log " Check status: docker compose -f docker-compose.prod.yml ps"
+
log " Rollback: docker compose -f docker-compose.prod.yml down && git checkout HEAD~1 && ./scripts/deploy.sh"
+149
scripts/generate-did-keys.sh
···
+
#!/bin/bash
+
# Generate cryptographic keys for Coves did:web DID document
+
#
+
# This script generates a secp256k1 (K-256) key pair as required by atproto.
+
# Reference: https://atproto.com/specs/cryptography
+
#
+
# Key format:
+
# - Curve: secp256k1 (K-256) - same as Bitcoin/Ethereum
+
# - Type: Multikey
+
# - Encoding: publicKeyMultibase with base58btc ('z' prefix)
+
# - Multicodec: 0xe7 for secp256k1 compressed public key
+
#
+
# Output:
+
# - Private key (hex) for PDS_PLC_ROTATION_KEY_K256_PRIVATE_KEY_HEX
+
# - Public key (multibase) for did.json publicKeyMultibase field
+
# - Complete did.json file
+
+
set -e
+
+
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
+
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
+
OUTPUT_DIR="$PROJECT_DIR/static/.well-known"
+
+
# Colors
+
GREEN='\033[0;32m'
+
YELLOW='\033[1;33m'
+
RED='\033[0;31m'
+
NC='\033[0m'
+
+
log() { echo -e "${GREEN}[KEYGEN]${NC} $1"; }
+
warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
+
error() { echo -e "${RED}[ERROR]${NC} $1"; exit 1; }
+
+
# Check for required tools
+
if ! command -v openssl &> /dev/null; then
+
error "openssl is required but not installed"
+
fi
+
+
if ! command -v python3 &> /dev/null; then
+
error "python3 is required for base58 encoding"
+
fi
+
+
# Check for base58 library
+
if ! python3 -c "import base58" 2>/dev/null; then
+
warn "Installing base58 Python library..."
+
pip3 install base58 || error "Failed to install base58. Run: pip3 install base58"
+
fi
+
+
log "Generating secp256k1 key pair for did:web..."
+
+
# Generate private key
+
PRIVATE_KEY_PEM=$(mktemp)
+
openssl ecparam -name secp256k1 -genkey -noout -out "$PRIVATE_KEY_PEM" 2>/dev/null
+
+
# Extract private key as hex (for PDS config)
+
PRIVATE_KEY_HEX=$(openssl ec -in "$PRIVATE_KEY_PEM" -text -noout 2>/dev/null | \
+
grep -A 3 "priv:" | tail -n 3 | tr -d ' :\n' | tr -d '\r')
+
+
# Extract public key as compressed format
+
# OpenSSL outputs the public key, we need to get the compressed form
+
PUBLIC_KEY_HEX=$(openssl ec -in "$PRIVATE_KEY_PEM" -pubout -conv_form compressed -outform DER 2>/dev/null | \
+
tail -c 33 | xxd -p | tr -d '\n')
+
+
# Clean up temp file
+
rm -f "$PRIVATE_KEY_PEM"
+
+
# Encode public key as multibase with multicodec
+
# Multicodec 0xe7 = secp256k1 compressed public key
+
# Then base58btc encode with 'z' prefix
+
PUBLIC_KEY_MULTIBASE=$(python3 << EOF
+
import base58
+
+
# Compressed public key bytes
+
pub_hex = "$PUBLIC_KEY_HEX"
+
pub_bytes = bytes.fromhex(pub_hex)
+
+
# Prepend multicodec 0xe7 for secp256k1-pub
+
# 0xe7 as varint is just 0xe7 (single byte, < 128)
+
multicodec = bytes([0xe7, 0x01]) # 0xe701 for secp256k1-pub compressed
+
key_with_codec = multicodec + pub_bytes
+
+
# Base58btc encode
+
encoded = base58.b58encode(key_with_codec).decode('ascii')
+
+
# Add 'z' prefix for multibase
+
print('z' + encoded)
+
EOF
+
)
+
+
log "Keys generated successfully!"
+
echo ""
+
echo "============================================"
+
echo " PRIVATE KEY (keep secret!)"
+
echo "============================================"
+
echo ""
+
echo "Add this to your .env.prod file:"
+
echo ""
+
echo "PDS_ROTATION_KEY=$PRIVATE_KEY_HEX"
+
echo ""
+
echo "============================================"
+
echo " PUBLIC KEY (for did.json)"
+
echo "============================================"
+
echo ""
+
echo "publicKeyMultibase: $PUBLIC_KEY_MULTIBASE"
+
echo ""
+
+
# Generate the did.json file
+
log "Generating did.json..."
+
+
mkdir -p "$OUTPUT_DIR"
+
+
cat > "$OUTPUT_DIR/did.json" << EOF
+
{
+
"id": "did:web:coves.social",
+
"alsoKnownAs": ["at://coves.social"],
+
"verificationMethod": [
+
{
+
"id": "did:web:coves.social#atproto",
+
"type": "Multikey",
+
"controller": "did:web:coves.social",
+
"publicKeyMultibase": "$PUBLIC_KEY_MULTIBASE"
+
}
+
],
+
"service": [
+
{
+
"id": "#atproto_pds",
+
"type": "AtprotoPersonalDataServer",
+
"serviceEndpoint": "https://coves.me"
+
}
+
]
+
}
+
EOF
+
+
log "Created: $OUTPUT_DIR/did.json"
+
echo ""
+
echo "============================================"
+
echo " NEXT STEPS"
+
echo "============================================"
+
echo ""
+
echo "1. Copy the PDS_ROTATION_KEY value to your .env.prod file"
+
echo ""
+
echo "2. Verify the did.json looks correct:"
+
echo " cat $OUTPUT_DIR/did.json"
+
echo ""
+
echo "3. After deployment, verify it's accessible:"
+
echo " curl https://coves.social/.well-known/did.json"
+
echo ""
+
warn "IMPORTANT: Keep the private key secret! Only share the public key."
+
warn "The did.json file with the public key IS safe to commit to git."
+106
scripts/setup-production.sh
···
+
#!/bin/bash
+
# Coves Production Setup Script
+
# Run this once on a fresh server to set up everything
+
#
+
# Prerequisites:
+
# - Docker and docker-compose installed
+
# - Git installed
+
# - .env.prod file configured
+
+
set -e
+
+
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
+
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
+
+
# Colors
+
GREEN='\033[0;32m'
+
YELLOW='\033[1;33m'
+
RED='\033[0;31m'
+
NC='\033[0m'
+
+
log() { echo -e "${GREEN}[SETUP]${NC} $1"; }
+
warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
+
error() { echo -e "${RED}[ERROR]${NC} $1"; exit 1; }
+
+
cd "$PROJECT_DIR"
+
+
# Check prerequisites
+
log "Checking prerequisites..."
+
+
if ! command -v docker &> /dev/null; then
+
error "Docker is not installed. Install with: curl -fsSL https://get.docker.com | sh"
+
fi
+
+
if ! docker compose version &> /dev/null; then
+
error "docker compose is not available. Install with: apt install docker-compose-plugin"
+
fi
+
+
# Check for .env.prod
+
if [ ! -f ".env.prod" ]; then
+
error ".env.prod not found! Copy from .env.prod.example and configure secrets."
+
fi
+
+
# Load environment
+
set -a
+
source .env.prod
+
set +a
+
+
# Create required directories
+
log "Creating directories..."
+
mkdir -p backups
+
mkdir -p static/.well-known
+
+
# Check for did.json
+
if [ ! -f "static/.well-known/did.json" ]; then
+
warn "static/.well-known/did.json not found!"
+
warn "Run ./scripts/generate-did-keys.sh to create it."
+
fi
+
+
# Note: Caddy logs are written to Docker volume (caddy-data)
+
# If you need host-accessible logs, uncomment and run as root:
+
# mkdir -p /var/log/caddy && chown 1000:1000 /var/log/caddy
+
+
# Pull Docker images
+
log "Pulling Docker images..."
+
docker compose -f docker-compose.prod.yml pull postgres pds caddy
+
+
# Build AppView
+
log "Building AppView..."
+
docker compose -f docker-compose.prod.yml build appview
+
+
# Start services
+
log "Starting services..."
+
docker compose -f docker-compose.prod.yml up -d
+
+
# Wait for PostgreSQL
+
log "Waiting for PostgreSQL to be ready..."
+
until docker compose -f docker-compose.prod.yml exec -T postgres pg_isready -U "$POSTGRES_USER" -d "$POSTGRES_DB" > /dev/null 2>&1; do
+
sleep 2
+
done
+
log "PostgreSQL is ready!"
+
+
# Run migrations
+
log "Running database migrations..."
+
# The AppView runs migrations on startup, but you can also run them manually:
+
# docker compose -f docker-compose.prod.yml exec appview /app/coves-server migrate
+
+
# Final status
+
log ""
+
log "============================================"
+
log " Coves Production Setup Complete!"
+
log "============================================"
+
log ""
+
log "Services running:"
+
docker compose -f docker-compose.prod.yml ps
+
log ""
+
log "Next steps:"
+
log " 1. Configure DNS for coves.social and coves.me"
+
log " 2. Run ./scripts/generate-did-keys.sh to create DID keys"
+
log " 3. Test health endpoints:"
+
log " curl https://coves.social/xrpc/_health"
+
log " curl https://coves.me/xrpc/_health"
+
log ""
+
log "Useful commands:"
+
log " View logs: docker compose -f docker-compose.prod.yml logs -f"
+
log " Deploy update: ./scripts/deploy.sh appview"
+
log " Backup DB: ./scripts/backup.sh"
+19
static/.well-known/did.json.template
···
+
{
+
"id": "did:web:coves.social",
+
"alsoKnownAs": ["at://coves.social"],
+
"verificationMethod": [
+
{
+
"id": "did:web:coves.social#atproto",
+
"type": "Multikey",
+
"controller": "did:web:coves.social",
+
"publicKeyMultibase": "REPLACE_WITH_YOUR_PUBLIC_KEY"
+
}
+
],
+
"service": [
+
{
+
"id": "#atproto_pds",
+
"type": "AtprotoPersonalDataServer",
+
"serviceEndpoint": "https://coves.me"
+
}
+
]
+
}
+18
static/client-metadata.json
···
+
{
+
"client_id": "https://coves.social/client-metadata.json",
+
"client_name": "Coves",
+
"client_uri": "https://coves.social",
+
"logo_uri": "https://coves.social/logo.png",
+
"tos_uri": "https://coves.social/terms",
+
"policy_uri": "https://coves.social/privacy",
+
"redirect_uris": [
+
"https://coves.social/oauth/callback",
+
"social.coves:/oauth/callback"
+
],
+
"scope": "atproto transition:generic",
+
"grant_types": ["authorization_code", "refresh_token"],
+
"response_types": ["code"],
+
"application_type": "native",
+
"token_endpoint_auth_method": "none",
+
"dpop_bound_access_tokens": true
+
}
+97
static/oauth/callback.html
···
+
<!DOCTYPE html>
+
<html>
+
<head>
+
<meta charset="utf-8">
+
<meta name="viewport" content="width=device-width, initial-scale=1">
+
<meta http-equiv="Content-Security-Policy" content="default-src 'self'; script-src 'unsafe-inline'; style-src 'unsafe-inline'">
+
<title>Authorization Successful - Coves</title>
+
<style>
+
body {
+
font-family: system-ui, -apple-system, sans-serif;
+
display: flex;
+
align-items: center;
+
justify-content: center;
+
min-height: 100vh;
+
margin: 0;
+
background: #f5f5f5;
+
}
+
.container {
+
text-align: center;
+
padding: 2rem;
+
background: white;
+
border-radius: 8px;
+
box-shadow: 0 2px 8px rgba(0,0,0,0.1);
+
max-width: 400px;
+
}
+
.success { color: #22c55e; font-size: 3rem; margin-bottom: 1rem; }
+
h1 { margin: 0 0 0.5rem; color: #1f2937; font-size: 1.5rem; }
+
p { color: #6b7280; margin: 0.5rem 0; }
+
a {
+
display: inline-block;
+
margin-top: 1rem;
+
padding: 0.75rem 1.5rem;
+
background: #3b82f6;
+
color: white;
+
text-decoration: none;
+
border-radius: 6px;
+
font-weight: 500;
+
}
+
a:hover { background: #2563eb; }
+
</style>
+
</head>
+
<body>
+
<div class="container">
+
<div class="success">โœ“</div>
+
<h1>Authorization Successful!</h1>
+
<p id="status">Returning to Coves...</p>
+
<a href="#" id="manualLink">Open Coves</a>
+
</div>
+
<script>
+
(function() {
+
// Parse and sanitize query params - only allow expected OAuth parameters
+
const urlParams = new URLSearchParams(window.location.search);
+
const safeParams = new URLSearchParams();
+
+
// Whitelist only expected OAuth callback parameters
+
const code = urlParams.get('code');
+
const state = urlParams.get('state');
+
const error = urlParams.get('error');
+
const errorDescription = urlParams.get('error_description');
+
const iss = urlParams.get('iss');
+
+
if (code) safeParams.set('code', code);
+
if (state) safeParams.set('state', state);
+
if (error) safeParams.set('error', error);
+
if (errorDescription) safeParams.set('error_description', errorDescription);
+
if (iss) safeParams.set('iss', iss);
+
+
const sanitizedQuery = safeParams.toString() ? '?' + safeParams.toString() : '';
+
+
const userAgent = navigator.userAgent || '';
+
const isAndroid = /Android/i.test(userAgent);
+
+
// Build deep link based on platform
+
let deepLink;
+
if (isAndroid) {
+
// Android: Intent URL format
+
const pathAndQuery = '/oauth/callback' + sanitizedQuery;
+
deepLink = 'intent:/' + pathAndQuery + '#Intent;scheme=social.coves;package=social.coves;end';
+
} else {
+
// iOS: Custom scheme
+
deepLink = 'social.coves:/oauth/callback' + sanitizedQuery;
+
}
+
+
// Update manual link
+
document.getElementById('manualLink').href = deepLink;
+
+
// Attempt automatic redirect
+
window.location.href = deepLink;
+
+
// Update status after 2 seconds if redirect didn't work
+
setTimeout(function() {
+
document.getElementById('status').textContent = 'Click the button above to continue';
+
}, 2000);
+
})();
+
</script>
+
</body>
+
</html>
+2 -1
Dockerfile
···
COPY --from=builder /build/coves-server /app/coves-server
# Copy migrations (needed for goose)
-
COPY --from=builder /build/internal/db/migrations /app/migrations
+
# Must maintain path structure as app looks for internal/db/migrations
+
COPY --from=builder /build/internal/db/migrations /app/internal/db/migrations
# Set ownership
RUN chown -R coves:coves /app
+187
scripts/derive-did-from-key.sh
···
+
#!/bin/bash
+
# Derive public key from existing PDS_ROTATION_KEY and create did.json
+
#
+
# This script takes your existing private key and derives the public key from it.
+
# Use this if you already have a PDS running with a rotation key but need to
+
# create/fix the did.json file.
+
#
+
# Usage: ./scripts/derive-did-from-key.sh
+
+
set -e
+
+
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
+
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
+
OUTPUT_DIR="$PROJECT_DIR/static/.well-known"
+
+
# Colors
+
GREEN='\033[0;32m'
+
YELLOW='\033[1;33m'
+
RED='\033[0;31m'
+
NC='\033[0m'
+
+
log() { echo -e "${GREEN}[DERIVE]${NC} $1"; }
+
warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
+
error() { echo -e "${RED}[ERROR]${NC} $1"; exit 1; }
+
+
# Check for required tools
+
if ! command -v openssl &> /dev/null; then
+
error "openssl is required but not installed"
+
fi
+
+
if ! command -v python3 &> /dev/null; then
+
error "python3 is required for base58 encoding"
+
fi
+
+
# Check for base58 library
+
if ! python3 -c "import base58" 2>/dev/null; then
+
warn "Installing base58 Python library..."
+
pip3 install base58 || error "Failed to install base58. Run: pip3 install base58"
+
fi
+
+
# Load environment to get the existing key
+
if [ -f "$PROJECT_DIR/.env.prod" ]; then
+
source "$PROJECT_DIR/.env.prod"
+
elif [ -f "$PROJECT_DIR/.env" ]; then
+
source "$PROJECT_DIR/.env"
+
else
+
error "No .env.prod or .env file found"
+
fi
+
+
if [ -z "$PDS_ROTATION_KEY" ]; then
+
error "PDS_ROTATION_KEY not found in environment"
+
fi
+
+
# Validate key format (should be 64 hex chars)
+
if [[ ! "$PDS_ROTATION_KEY" =~ ^[0-9a-fA-F]{64}$ ]]; then
+
error "PDS_ROTATION_KEY is not a valid 64-character hex string"
+
fi
+
+
log "Deriving public key from existing PDS_ROTATION_KEY..."
+
+
# Create a temporary PEM file from the hex private key
+
TEMP_DIR=$(mktemp -d)
+
PRIVATE_KEY_HEX="$PDS_ROTATION_KEY"
+
+
# Convert hex private key to PEM format
+
# secp256k1 curve OID: 1.3.132.0.10
+
python3 > "$TEMP_DIR/private.pem" << EOF
+
import binascii
+
+
# Private key in hex
+
priv_hex = "$PRIVATE_KEY_HEX"
+
priv_bytes = binascii.unhexlify(priv_hex)
+
+
# secp256k1 OID
+
oid = bytes([0x06, 0x05, 0x2b, 0x81, 0x04, 0x00, 0x0a])
+
+
# Build the EC private key structure
+
# SEQUENCE { version INTEGER, privateKey OCTET STRING, [0] OID, [1] publicKey }
+
# We'll use a simpler approach: just the private key with curve params
+
+
# EC PARAMETERS for secp256k1
+
ec_params = bytes([
+
0x30, 0x07, # SEQUENCE, 7 bytes
+
0x06, 0x05, 0x2b, 0x81, 0x04, 0x00, 0x0a # OID for secp256k1
+
])
+
+
# EC PRIVATE KEY structure
+
# SEQUENCE { version, privateKey, [0] parameters }
+
inner = bytes([0x02, 0x01, 0x01]) # version = 1
+
inner += bytes([0x04, 0x20]) + priv_bytes # OCTET STRING with 32-byte key
+
inner += bytes([0xa0, 0x07]) + bytes([0x06, 0x05, 0x2b, 0x81, 0x04, 0x00, 0x0a]) # [0] OID
+
+
# Wrap in SEQUENCE
+
key_der = bytes([0x30, len(inner)]) + inner
+
+
# Base64 encode
+
import base64
+
key_b64 = base64.b64encode(key_der).decode('ascii')
+
+
# Format as PEM
+
print("-----BEGIN EC PRIVATE KEY-----")
+
for i in range(0, len(key_b64), 64):
+
print(key_b64[i:i+64])
+
print("-----END EC PRIVATE KEY-----")
+
EOF
+
+
# Extract the compressed public key
+
PUBLIC_KEY_HEX=$(openssl ec -in "$TEMP_DIR/private.pem" -pubout -conv_form compressed -outform DER 2>/dev/null | \
+
tail -c 33 | xxd -p | tr -d '\n')
+
+
# Clean up
+
rm -rf "$TEMP_DIR"
+
+
if [ -z "$PUBLIC_KEY_HEX" ] || [ ${#PUBLIC_KEY_HEX} -ne 66 ]; then
+
error "Failed to derive public key. Got: $PUBLIC_KEY_HEX"
+
fi
+
+
log "Derived public key: ${PUBLIC_KEY_HEX:0:8}...${PUBLIC_KEY_HEX: -8}"
+
+
# Encode public key as multibase with multicodec
+
PUBLIC_KEY_MULTIBASE=$(python3 << EOF
+
import base58
+
+
# Compressed public key bytes
+
pub_hex = "$PUBLIC_KEY_HEX"
+
pub_bytes = bytes.fromhex(pub_hex)
+
+
# Prepend multicodec 0xe7 for secp256k1-pub
+
# 0xe7 as varint is just 0xe7 (single byte, < 128)
+
multicodec = bytes([0xe7, 0x01]) # 0xe701 for secp256k1-pub compressed
+
key_with_codec = multicodec + pub_bytes
+
+
# Base58btc encode
+
encoded = base58.b58encode(key_with_codec).decode('ascii')
+
+
# Add 'z' prefix for multibase
+
print('z' + encoded)
+
EOF
+
)
+
+
log "Public key multibase: $PUBLIC_KEY_MULTIBASE"
+
+
# Generate the did.json file
+
log "Generating did.json..."
+
+
mkdir -p "$OUTPUT_DIR"
+
+
cat > "$OUTPUT_DIR/did.json" << EOF
+
{
+
"id": "did:web:coves.social",
+
"alsoKnownAs": ["at://coves.social"],
+
"verificationMethod": [
+
{
+
"id": "did:web:coves.social#atproto",
+
"type": "Multikey",
+
"controller": "did:web:coves.social",
+
"publicKeyMultibase": "$PUBLIC_KEY_MULTIBASE"
+
}
+
],
+
"service": [
+
{
+
"id": "#atproto_pds",
+
"type": "AtprotoPersonalDataServer",
+
"serviceEndpoint": "https://coves.me"
+
}
+
]
+
}
+
EOF
+
+
log "Created: $OUTPUT_DIR/did.json"
+
echo ""
+
echo "============================================"
+
echo " DID Document Generated Successfully!"
+
echo "============================================"
+
echo ""
+
echo "Public key multibase: $PUBLIC_KEY_MULTIBASE"
+
echo ""
+
echo "Next steps:"
+
echo " 1. Copy this file to your production server:"
+
echo " scp $OUTPUT_DIR/did.json user@server:/opt/coves/static/.well-known/"
+
echo ""
+
echo " 2. Or if running on production, restart Caddy:"
+
echo " docker compose -f docker-compose.prod.yml restart caddy"
+
echo ""
+
echo " 3. Verify it's accessible:"
+
echo " curl https://coves.social/.well-known/did.json"
+
echo ""
+3 -2
internal/api/routes/community.go
···
// RegisterCommunityRoutes registers community-related XRPC endpoints on the router
// Implements social.coves.community.* lexicon endpoints
-
func RegisterCommunityRoutes(r chi.Router, service communities.Service, authMiddleware *middleware.AtProtoAuthMiddleware) {
+
// allowedCommunityCreators restricts who can create communities. If empty, anyone can create.
+
func RegisterCommunityRoutes(r chi.Router, service communities.Service, authMiddleware *middleware.AtProtoAuthMiddleware, allowedCommunityCreators []string) {
// Initialize handlers
-
createHandler := community.NewCreateHandler(service)
+
createHandler := community.NewCreateHandler(service, allowedCommunityCreators)
getHandler := community.NewGetHandler(service)
updateHandler := community.NewUpdateHandler(service)
listHandler := community.NewListHandler(service)
+1 -2
internal/api/handlers/aggregator/get_authorizations.go
···
package aggregator
import (
+
"Coves/internal/core/aggregators"
"encoding/json"
"log"
"net/http"
"strconv"
-
-
"Coves/internal/core/aggregators"
)
// GetAuthorizationsHandler handles listing authorizations for an aggregator
+1 -2
internal/api/handlers/aggregator/list_for_community.go
···
package aggregator
import (
+
"Coves/internal/core/aggregators"
"encoding/json"
"log"
"net/http"
"strconv"
-
-
"Coves/internal/core/aggregators"
)
// ListForCommunityHandler handles listing aggregators for a community
+1 -2
internal/api/handlers/comments/errors.go
···
package comments
import (
+
"Coves/internal/core/comments"
"encoding/json"
"log"
"net/http"
-
-
"Coves/internal/core/comments"
)
// errorResponse represents a standardized JSON error response
+1 -2
internal/api/handlers/comments/service_adapter.go
···
package comments
import (
-
"net/http"
-
"Coves/internal/core/comments"
+
"net/http"
)
// ServiceAdapter adapts the core comments.Service to the handler's Service interface
+2 -3
internal/api/handlers/community/block.go
···
package community
import (
+
"Coves/internal/api/middleware"
+
"Coves/internal/core/communities"
"encoding/json"
"log"
"net/http"
-
-
"Coves/internal/api/middleware"
-
"Coves/internal/core/communities"
)
// BlockHandler handles community blocking operations
+1 -2
internal/api/handlers/community/list.go
···
package community
import (
+
"Coves/internal/core/communities"
"encoding/json"
"net/http"
"strconv"
-
-
"Coves/internal/core/communities"
)
// ListHandler handles listing communities
+2 -3
internal/api/handlers/communityFeed/get_community.go
···
package communityFeed
import (
+
"Coves/internal/core/communityFeeds"
+
"Coves/internal/core/posts"
"encoding/json"
"log"
"net/http"
"strconv"
-
-
"Coves/internal/core/communityFeeds"
-
"Coves/internal/core/posts"
)
// GetCommunityHandler handles community feed retrieval
+2 -3
internal/api/handlers/discover/get_discover.go
···
package discover
import (
+
"Coves/internal/core/discover"
+
"Coves/internal/core/posts"
"encoding/json"
"log"
"net/http"
"strconv"
-
-
"Coves/internal/core/discover"
-
"Coves/internal/core/posts"
)
// GetDiscoverHandler handles discover feed retrieval
+2 -3
internal/api/handlers/post/create.go
···
package post
import (
+
"Coves/internal/api/middleware"
+
"Coves/internal/core/posts"
"encoding/json"
"log"
"net/http"
"strings"
-
-
"Coves/internal/api/middleware"
-
"Coves/internal/core/posts"
)
// CreateHandler handles post creation requests
+2 -3
internal/api/handlers/post/errors.go
···
package post
import (
+
"Coves/internal/core/aggregators"
+
"Coves/internal/core/posts"
"encoding/json"
"log"
"net/http"
-
-
"Coves/internal/core/aggregators"
-
"Coves/internal/core/posts"
)
type errorResponse struct {
+3 -4
internal/api/handlers/timeline/get_timeline.go
···
package timeline
import (
+
"Coves/internal/api/middleware"
+
"Coves/internal/core/posts"
+
"Coves/internal/core/timeline"
"encoding/json"
"log"
"net/http"
"strconv"
"strings"
-
-
"Coves/internal/api/middleware"
-
"Coves/internal/core/posts"
-
"Coves/internal/core/timeline"
)
// GetTimelineHandler handles timeline feed retrieval
+1 -2
internal/atproto/jetstream/aggregator_consumer.go
···
package jetstream
import (
+
"Coves/internal/core/aggregators"
"context"
"encoding/json"
"fmt"
"log"
"time"
-
-
"Coves/internal/core/aggregators"
)
// AggregatorEventConsumer consumes aggregator-related events from Jetstream
+2 -3
internal/atproto/jetstream/comment_consumer.go
···
package jetstream
import (
+
"Coves/internal/atproto/utils"
+
"Coves/internal/core/comments"
"context"
"database/sql"
"encoding/json"
···
"strings"
"time"
-
"Coves/internal/atproto/utils"
-
"Coves/internal/core/comments"
-
"github.com/lib/pq"
)
+3 -4
internal/atproto/jetstream/community_consumer.go
···
package jetstream
import (
+
"Coves/internal/atproto/identity"
+
"Coves/internal/atproto/utils"
+
"Coves/internal/core/communities"
"context"
"encoding/json"
"fmt"
···
"strings"
"time"
-
"Coves/internal/atproto/identity"
-
"Coves/internal/atproto/utils"
-
"Coves/internal/core/communities"
-
lru "github.com/hashicorp/golang-lru/v2"
"golang.org/x/net/publicsuffix"
"golang.org/x/time/rate"
+3 -4
internal/atproto/jetstream/post_consumer.go
···
package jetstream
import (
+
"Coves/internal/core/communities"
+
"Coves/internal/core/posts"
+
"Coves/internal/core/users"
"context"
"database/sql"
"encoding/json"
···
"log"
"strings"
"time"
-
-
"Coves/internal/core/communities"
-
"Coves/internal/core/posts"
-
"Coves/internal/core/users"
)
// PostEventConsumer consumes post-related events from Jetstream
+3 -4
internal/atproto/jetstream/vote_consumer.go
···
package jetstream
import (
+
"Coves/internal/atproto/utils"
+
"Coves/internal/core/users"
+
"Coves/internal/core/votes"
"context"
"database/sql"
"fmt"
"log"
"strings"
"time"
-
-
"Coves/internal/atproto/utils"
-
"Coves/internal/core/users"
-
"Coves/internal/core/votes"
)
// VoteEventConsumer consumes vote-related events from Jetstream
+1 -2
internal/core/blobs/service.go
···
package blobs
import (
+
"Coves/internal/core/communities"
"bytes"
"context"
"encoding/json"
···
"log"
"net/http"
"time"
-
-
"Coves/internal/core/communities"
)
// Service defines the interface for blob operations
+3 -4
internal/core/comments/comment_service.go
···
package comments
import (
+
"Coves/internal/core/communities"
+
"Coves/internal/core/posts"
+
"Coves/internal/core/users"
"context"
"encoding/json"
"errors"
···
"net/url"
"strings"
"time"
-
-
"Coves/internal/core/communities"
-
"Coves/internal/core/posts"
-
"Coves/internal/core/users"
)
const (
+5 -6
internal/core/comments/comment_service_test.go
···
package comments
import (
+
"Coves/internal/core/communities"
+
"Coves/internal/core/posts"
+
"Coves/internal/core/users"
"context"
"errors"
"testing"
"time"
-
"Coves/internal/core/communities"
-
"Coves/internal/core/posts"
-
"Coves/internal/core/users"
-
"github.com/stretchr/testify/assert"
)
···
return nil
}
-
func (m *mockCommunityRepo) List(ctx context.Context, req communities.ListCommunitiesRequest) ([]*communities.Community, int, error) {
-
return nil, 0, nil
+
func (m *mockCommunityRepo) List(ctx context.Context, req communities.ListCommunitiesRequest) ([]*communities.Community, error) {
+
return nil, nil
}
func (m *mockCommunityRepo) Search(ctx context.Context, req communities.SearchCommunitiesRequest) ([]*communities.Community, int, error) {
+1 -2
internal/core/communities/service.go
···
package communities
import (
+
"Coves/internal/atproto/utils"
"bytes"
"context"
"encoding/json"
···
"strings"
"sync"
"time"
-
-
"Coves/internal/atproto/utils"
)
// Community handle validation regex (DNS-valid handle: name.community.instance.com)
+5 -6
internal/core/posts/service.go
···
package posts
import (
+
"Coves/internal/api/middleware"
+
"Coves/internal/core/aggregators"
+
"Coves/internal/core/blobs"
+
"Coves/internal/core/communities"
+
"Coves/internal/core/unfurl"
"bytes"
"context"
"encoding/json"
···
"net/http"
"os"
"time"
-
-
"Coves/internal/api/middleware"
-
"Coves/internal/core/aggregators"
-
"Coves/internal/core/blobs"
-
"Coves/internal/core/communities"
-
"Coves/internal/core/unfurl"
)
type postService struct {
+1 -2
internal/db/postgres/aggregator_repo.go
···
package postgres
import (
+
"Coves/internal/core/aggregators"
"context"
"database/sql"
"fmt"
"strings"
"time"
-
-
"Coves/internal/core/aggregators"
)
type postgresAggregatorRepo struct {
+1 -2
internal/db/postgres/comment_repo.go
···
package postgres
import (
+
"Coves/internal/core/comments"
"context"
"database/sql"
"encoding/base64"
···
"log"
"strings"
-
"Coves/internal/core/comments"
-
"github.com/lib/pq"
)
+1 -2
internal/db/postgres/feed_repo.go
···
package postgres
import (
+
"Coves/internal/core/communityFeeds"
"context"
"database/sql"
"fmt"
-
-
"Coves/internal/core/communityFeeds"
)
type postgresFeedRepo struct {
+1 -2
internal/db/postgres/feed_repo_base.go
···
package postgres
import (
+
"Coves/internal/core/posts"
"crypto/hmac"
"crypto/sha256"
"database/sql"
···
"strconv"
"strings"
"time"
-
-
"Coves/internal/core/posts"
)
// feedRepoBase contains shared logic for timeline and discover feed repositories
+1 -2
internal/db/postgres/post_repo.go
···
package postgres
import (
+
"Coves/internal/core/posts"
"context"
"database/sql"
"fmt"
"strings"
-
-
"Coves/internal/core/posts"
)
type postgresPostRepo struct {
+1 -2
internal/db/postgres/vote_repo.go
···
package postgres
import (
+
"Coves/internal/core/votes"
"context"
"database/sql"
"fmt"
"strings"
-
-
"Coves/internal/core/votes"
)
type postgresVoteRepo struct {
+1 -2
internal/db/postgres/vote_repo_test.go
···
package postgres
import (
+
"Coves/internal/core/votes"
"context"
"database/sql"
"os"
"testing"
"time"
-
"Coves/internal/core/votes"
-
_ "github.com/lib/pq"
"github.com/pressly/goose/v3"
"github.com/stretchr/testify/assert"
+7 -8
tests/e2e/error_recovery_test.go
···
package e2e
import (
+
"Coves/internal/atproto/identity"
+
"Coves/internal/atproto/jetstream"
+
"Coves/internal/core/users"
+
"Coves/internal/db/postgres"
"context"
"database/sql"
"fmt"
···
"testing"
"time"
-
"Coves/internal/atproto/identity"
-
"Coves/internal/atproto/jetstream"
-
"Coves/internal/core/users"
-
"Coves/internal/db/postgres"
-
_ "github.com/lib/pq"
"github.com/pressly/goose/v3"
)
···
testCases := []struct {
name string
-
event jetstream.JetstreamEvent
shouldLog string
+
event jetstream.JetstreamEvent
}{
{
name: "Nil identity data",
···
if shouldFail.Load() {
t.Logf("Mock PDS: Simulating unavailability (request #%d)", requestCount.Load())
w.WriteHeader(http.StatusServiceUnavailable)
-
w.Write([]byte(`{"error":"ServiceUnavailable","message":"PDS temporarily unavailable"}`))
+
_, _ = w.Write([]byte(`{"error":"ServiceUnavailable","message":"PDS temporarily unavailable"}`))
return
}
t.Logf("Mock PDS: Serving request successfully (request #%d)", requestCount.Load())
// Simulate successful PDS response
w.WriteHeader(http.StatusOK)
-
w.Write([]byte(`{"did":"did:plc:pdstest123","handle":"pds.test"}`))
+
_, _ = w.Write([]byte(`{"did":"did:plc:pdstest123","handle":"pds.test"}`))
}))
defer mockPDS.Close()
+3 -4
tests/integration/aggregator_test.go
···
package integration
import (
+
"Coves/internal/core/aggregators"
+
"Coves/internal/core/communities"
+
"Coves/internal/db/postgres"
"context"
"encoding/json"
"fmt"
"testing"
"time"
-
-
"Coves/internal/core/aggregators"
-
"Coves/internal/core/communities"
-
"Coves/internal/db/postgres"
)
// TestAggregatorRepository_Create tests basic aggregator creation
+13 -14
tests/integration/blob_upload_e2e_test.go
···
package integration
import (
+
"Coves/internal/atproto/identity"
+
"Coves/internal/atproto/jetstream"
+
"Coves/internal/core/blobs"
+
"Coves/internal/core/communities"
+
"Coves/internal/core/posts"
+
"Coves/internal/core/users"
+
"Coves/internal/db/postgres"
"bytes"
"context"
"encoding/json"
···
"testing"
"time"
-
"Coves/internal/atproto/identity"
-
"Coves/internal/atproto/jetstream"
-
"Coves/internal/core/blobs"
-
"Coves/internal/core/communities"
-
"Coves/internal/core/posts"
-
"Coves/internal/core/users"
-
"Coves/internal/db/postgres"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
···
if err != nil {
t.Skipf("PDS not running at %s: %v. Run 'make dev-up' to start PDS.", pdsURL, err)
}
-
defer healthResp.Body.Close()
+
defer func() { _ = healthResp.Body.Close() }()
if healthResp.StatusCode != http.StatusOK {
t.Skipf("PDS health check failed at %s: status %d", pdsURL, healthResp.StatusCode)
}
···
if err != nil {
t.Skipf("PDS not running at %s: %v. Run 'make dev-up' to start PDS.", pdsURL, err)
}
-
defer healthResp.Body.Close()
+
defer func() { _ = healthResp.Body.Close() }()
if healthResp.StatusCode != http.StatusOK {
t.Skipf("PDS health check failed at %s: status %d", pdsURL, healthResp.StatusCode)
}
···
t.Run("Accept matching image formats with correct MIME types", func(t *testing.T) {
testCases := []struct {
+
createFunc func(*testing.T, int, int, color.Color) []byte
format string
mimeType string
-
createFunc func(*testing.T, int, int, color.Color) []byte
}{
-
{"PNG", "image/png", createTestPNG},
-
{"JPEG", "image/jpeg", createTestJPEG},
+
{createTestPNG, "PNG", "image/png"},
+
{createTestJPEG, "JPEG", "image/jpeg"},
// Note: WebP requires external library (golang.org/x/image/webp)
// For now, we test that the MIME type is accepted even with PNG data
// In production, actual WebP validation would happen at PDS
-
{"WebP (MIME only)", "image/webp", createTestPNG},
+
{createTestPNG, "WebP (MIME only)", "image/webp"},
}
for _, tc := range testCases {
+14 -14
tests/integration/block_handle_resolution_test.go
···
package integration
import (
+
"Coves/internal/api/handlers/community"
+
"Coves/internal/api/middleware"
+
"Coves/internal/core/communities"
"bytes"
"context"
"encoding/json"
···
"net/http/httptest"
"testing"
-
"Coves/internal/api/handlers/community"
-
"Coves/internal/api/middleware"
-
"Coves/internal/core/communities"
postgresRepo "Coves/internal/db/postgres"
)
···
// We expect 401 (no auth) but verify the error is NOT "Community not found"
// If handle resolution worked, we'd get past that validation
resp := w.Result()
-
defer resp.Body.Close()
+
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode == http.StatusNotFound {
t.Errorf("Handle resolution failed - got 404 CommunityNotFound")
···
// Expected: 401 Unauthorized (because we didn't add auth context)
if resp.StatusCode != http.StatusUnauthorized {
var errorResp map[string]interface{}
-
json.NewDecoder(resp.Body).Decode(&errorResp)
+
_ = json.NewDecoder(resp.Body).Decode(&errorResp)
t.Logf("Response status: %d, body: %+v", resp.StatusCode, errorResp)
}
})
···
blockHandler.HandleBlock(w, req)
resp := w.Result()
-
defer resp.Body.Close()
+
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode == http.StatusNotFound {
t.Errorf("@-prefixed handle resolution failed - got 404 CommunityNotFound")
···
blockHandler.HandleBlock(w, req)
resp := w.Result()
-
defer resp.Body.Close()
+
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode == http.StatusNotFound {
t.Errorf("Scoped format resolution failed - got 404 CommunityNotFound")
···
blockHandler.HandleBlock(w, req)
resp := w.Result()
-
defer resp.Body.Close()
+
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode == http.StatusNotFound {
t.Errorf("DID resolution failed - got 404 CommunityNotFound")
···
blockHandler.HandleBlock(w, req)
resp := w.Result()
-
defer resp.Body.Close()
+
defer func() { _ = resp.Body.Close() }()
// Should return 400 Bad Request for validation errors
if resp.StatusCode != http.StatusBadRequest {
···
}
var errorResp map[string]interface{}
-
json.NewDecoder(resp.Body).Decode(&errorResp)
+
_ = json.NewDecoder(resp.Body).Decode(&errorResp)
if errorCode, ok := errorResp["error"].(string); !ok || errorCode != "InvalidRequest" {
t.Errorf("Expected error code 'InvalidRequest', got %v", errorResp["error"])
···
blockHandler.HandleBlock(w, req)
resp := w.Result()
-
defer resp.Body.Close()
+
defer func() { _ = resp.Body.Close() }()
// Expected: 401 (auth check happens before resolution)
// In a real scenario with auth, invalid handle would return 404
···
blockHandler.HandleUnblock(w, req)
resp := w.Result()
-
defer resp.Body.Close()
+
defer func() { _ = resp.Body.Close() }()
// Should NOT be 404 (handle resolution should work)
if resp.StatusCode == http.StatusNotFound {
···
// Expected: 401 (no auth context)
if resp.StatusCode != http.StatusUnauthorized {
var errorResp map[string]interface{}
-
json.NewDecoder(resp.Body).Decode(&errorResp)
+
_ = json.NewDecoder(resp.Body).Decode(&errorResp)
t.Logf("Response: status=%d, body=%+v", resp.StatusCode, errorResp)
}
})
···
blockHandler.HandleUnblock(w, req)
resp := w.Result()
-
defer resp.Body.Close()
+
defer func() { _ = resp.Body.Close() }()
// Expected: 401 (auth check happens before resolution)
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusNotFound {
+3 -4
tests/integration/comment_consumer_test.go
···
package integration
import (
+
"Coves/internal/atproto/jetstream"
+
"Coves/internal/core/comments"
+
"Coves/internal/db/postgres"
"context"
"fmt"
"testing"
"time"
-
-
"Coves/internal/atproto/jetstream"
-
"Coves/internal/core/comments"
-
"Coves/internal/db/postgres"
)
func TestCommentConsumer_CreateComment(t *testing.T) {
+3 -4
tests/integration/comment_query_test.go
···
package integration
import (
+
"Coves/internal/atproto/jetstream"
+
"Coves/internal/core/comments"
+
"Coves/internal/db/postgres"
"context"
"database/sql"
"encoding/json"
···
"testing"
"time"
-
"Coves/internal/atproto/jetstream"
-
"Coves/internal/core/comments"
-
"Coves/internal/db/postgres"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
+4 -5
tests/integration/comment_vote_test.go
···
package integration
import (
-
"context"
-
"fmt"
-
"testing"
-
"time"
-
"Coves/internal/atproto/jetstream"
"Coves/internal/core/comments"
"Coves/internal/core/users"
"Coves/internal/db/postgres"
+
"context"
+
"fmt"
+
"testing"
+
"time"
)
// TestCommentVote_CreateAndUpdate tests voting on comments and vote count updates
+2 -3
tests/integration/community_blocking_test.go
···
package integration
import (
+
"Coves/internal/atproto/jetstream"
+
"Coves/internal/core/communities"
"context"
"database/sql"
"fmt"
"testing"
"time"
-
"Coves/internal/atproto/jetstream"
-
"Coves/internal/core/communities"
-
postgresRepo "Coves/internal/db/postgres"
)
+13 -14
tests/integration/community_e2e_test.go
···
package integration
import (
+
"Coves/internal/api/middleware"
+
"Coves/internal/api/routes"
+
"Coves/internal/atproto/identity"
+
"Coves/internal/atproto/jetstream"
+
"Coves/internal/atproto/utils"
+
"Coves/internal/core/communities"
+
"Coves/internal/core/users"
+
"Coves/internal/db/postgres"
"bytes"
"context"
"database/sql"
···
"testing"
"time"
-
"Coves/internal/api/middleware"
-
"Coves/internal/api/routes"
-
"Coves/internal/atproto/identity"
-
"Coves/internal/atproto/jetstream"
-
"Coves/internal/atproto/utils"
-
"Coves/internal/core/communities"
-
"Coves/internal/core/users"
-
"Coves/internal/db/postgres"
-
"github.com/go-chi/chi/v5"
"github.com/gorilla/websocket"
_ "github.com/lib/pq"
···
}
var listResp struct {
-
Communities []communities.Community `json:"communities"`
Cursor string `json:"cursor"`
+
Communities []communities.Community `json:"communities"`
}
if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil {
···
}
var listResp struct {
-
Communities []communities.Community `json:"communities"`
Cursor string `json:"cursor"`
+
Communities []communities.Community `json:"communities"`
}
if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil {
t.Fatalf("Failed to decode response: %v", err)
···
}
var listResp struct {
-
Communities []communities.Community `json:"communities"`
Cursor string `json:"cursor"`
+
Communities []communities.Community `json:"communities"`
}
if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil {
t.Fatalf("Failed to decode response: %v", err)
···
}
var listResp struct {
-
Communities []communities.Community `json:"communities"`
Cursor string `json:"cursor"`
+
Communities []communities.Community `json:"communities"`
}
if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil {
t.Fatalf("Failed to decode response: %v", err)
···
}
var listResp struct {
-
Communities []communities.Community `json:"communities"`
Cursor string `json:"cursor"`
+
Communities []communities.Community `json:"communities"`
}
if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil {
t.Fatalf("Failed to decode response: %v", err)
+2 -3
tests/integration/community_repo_test.go
···
package integration
import (
+
"Coves/internal/core/communities"
+
"Coves/internal/db/postgres"
"context"
"fmt"
"testing"
"time"
-
-
"Coves/internal/core/communities"
-
"Coves/internal/db/postgres"
)
func TestCommunityRepository_Create(t *testing.T) {
+3 -4
tests/integration/community_v2_validation_test.go
···
package integration
import (
+
"Coves/internal/atproto/jetstream"
+
"Coves/internal/core/communities"
+
"Coves/internal/db/postgres"
"context"
"fmt"
"testing"
"time"
-
-
"Coves/internal/atproto/jetstream"
-
"Coves/internal/core/communities"
-
"Coves/internal/db/postgres"
)
// TestCommunityConsumer_V2RKeyValidation tests that only V2 communities (rkey="self") are accepted
+6 -7
tests/integration/concurrent_scenarios_test.go
···
package integration
import (
-
"context"
-
"fmt"
-
"sync"
-
"testing"
-
"time"
-
"Coves/internal/atproto/jetstream"
"Coves/internal/core/comments"
"Coves/internal/core/communities"
"Coves/internal/core/users"
"Coves/internal/db/postgres"
+
"context"
+
"fmt"
+
"sync"
+
"testing"
+
"time"
)
// TestConcurrentVoting_MultipleUsersOnSamePost tests race conditions when multiple users
···
wg.Add(numAttempts)
type result struct {
-
success bool
err error
+
success bool
}
results := make(chan result, numAttempts)
+2 -3
tests/integration/discover_test.go
···
package integration
import (
+
"Coves/internal/api/handlers/discover"
+
"Coves/internal/db/postgres"
"context"
"encoding/json"
"fmt"
···
"testing"
"time"
-
"Coves/internal/api/handlers/discover"
-
"Coves/internal/db/postgres"
-
discoverCore "Coves/internal/core/discover"
"github.com/stretchr/testify/assert"
+4 -5
tests/integration/feed_test.go
···
package integration
import (
+
"Coves/internal/api/handlers/communityFeed"
+
"Coves/internal/core/communities"
+
"Coves/internal/core/communityFeeds"
+
"Coves/internal/db/postgres"
"context"
"encoding/json"
"fmt"
···
"testing"
"time"
-
"Coves/internal/api/handlers/communityFeed"
-
"Coves/internal/core/communities"
-
"Coves/internal/core/communityFeeds"
-
"Coves/internal/db/postgres"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
+3 -4
tests/integration/jwt_verification_test.go
···
package integration
import (
+
"Coves/internal/api/middleware"
+
"Coves/internal/atproto/auth"
"fmt"
"net/http"
"net/http/httptest"
···
"strings"
"testing"
"time"
-
-
"Coves/internal/api/middleware"
-
"Coves/internal/atproto/auth"
)
// TestJWTSignatureVerification tests end-to-end JWT signature verification
···
// Check if JWKS is available (production PDS) or symmetric secret (dev PDS)
jwksResp, _ := http.Get(pdsURL + "/oauth/jwks")
if jwksResp != nil {
-
defer jwksResp.Body.Close()
+
defer func() { _ = jwksResp.Body.Close() }()
}
t.Run("JWT parsing and middleware integration", func(t *testing.T) {
+3 -4
tests/integration/post_consumer_test.go
···
package integration
import (
+
"Coves/internal/atproto/jetstream"
+
"Coves/internal/core/users"
+
"Coves/internal/db/postgres"
"context"
"fmt"
"testing"
"time"
-
-
"Coves/internal/atproto/jetstream"
-
"Coves/internal/core/users"
-
"Coves/internal/db/postgres"
)
// TestPostConsumer_CommentCountReconciliation tests that post comment_count
+8 -9
tests/integration/post_e2e_test.go
···
package integration
import (
+
"Coves/internal/api/handlers/post"
+
"Coves/internal/api/middleware"
+
"Coves/internal/atproto/identity"
+
"Coves/internal/atproto/jetstream"
+
"Coves/internal/core/communities"
+
"Coves/internal/core/posts"
+
"Coves/internal/core/users"
+
"Coves/internal/db/postgres"
"bytes"
"context"
"database/sql"
···
"testing"
"time"
-
"Coves/internal/api/handlers/post"
-
"Coves/internal/api/middleware"
-
"Coves/internal/atproto/identity"
-
"Coves/internal/atproto/jetstream"
-
"Coves/internal/core/communities"
-
"Coves/internal/core/posts"
-
"Coves/internal/core/users"
-
"Coves/internal/db/postgres"
-
"github.com/gorilla/websocket"
_ "github.com/lib/pq"
"github.com/pressly/goose/v3"
+5 -6
tests/integration/post_handler_test.go
···
package integration
import (
+
"Coves/internal/api/handlers/post"
+
"Coves/internal/api/middleware"
+
"Coves/internal/core/communities"
+
"Coves/internal/core/posts"
+
"Coves/internal/db/postgres"
"bytes"
"encoding/json"
"net/http"
···
"strings"
"testing"
-
"Coves/internal/api/handlers/post"
-
"Coves/internal/api/middleware"
-
"Coves/internal/core/communities"
-
"Coves/internal/core/posts"
-
"Coves/internal/db/postgres"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
+5 -6
tests/integration/post_thumb_validation_test.go
···
package integration
import (
+
"Coves/internal/api/handlers/post"
+
"Coves/internal/api/middleware"
+
"Coves/internal/core/communities"
+
"Coves/internal/core/posts"
+
"Coves/internal/db/postgres"
"bytes"
"context"
"encoding/json"
···
"net/http/httptest"
"testing"
-
"Coves/internal/api/handlers/post"
-
"Coves/internal/api/middleware"
-
"Coves/internal/core/communities"
-
"Coves/internal/core/posts"
-
"Coves/internal/db/postgres"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
+5 -6
tests/integration/post_unfurl_test.go
···
package integration
import (
-
"context"
-
"encoding/json"
-
"fmt"
-
"testing"
-
"time"
-
"Coves/internal/api/middleware"
"Coves/internal/atproto/identity"
"Coves/internal/atproto/jetstream"
···
"Coves/internal/core/unfurl"
"Coves/internal/core/users"
"Coves/internal/db/postgres"
+
"context"
+
"encoding/json"
+
"fmt"
+
"testing"
+
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+2 -3
tests/integration/subscription_indexing_test.go
···
package integration
import (
+
"Coves/internal/atproto/jetstream"
+
"Coves/internal/core/communities"
"context"
"database/sql"
"fmt"
"testing"
"time"
-
"Coves/internal/atproto/jetstream"
-
"Coves/internal/core/communities"
-
postgresRepo "Coves/internal/db/postgres"
)
+3 -4
tests/integration/timeline_test.go
···
package integration
import (
+
"Coves/internal/api/handlers/timeline"
+
"Coves/internal/api/middleware"
+
"Coves/internal/db/postgres"
"context"
"encoding/json"
"fmt"
···
"testing"
"time"
-
"Coves/internal/api/handlers/timeline"
-
"Coves/internal/api/middleware"
-
"Coves/internal/db/postgres"
-
timelineCore "Coves/internal/core/timeline"
"github.com/stretchr/testify/assert"
+11 -11
tests/integration/user_journey_e2e_test.go
···
package integration
import (
+
"Coves/internal/api/middleware"
+
"Coves/internal/api/routes"
+
"Coves/internal/atproto/identity"
+
"Coves/internal/atproto/jetstream"
+
"Coves/internal/core/communities"
+
"Coves/internal/core/posts"
+
"Coves/internal/core/users"
+
"Coves/internal/db/postgres"
"bytes"
"context"
"database/sql"
···
"testing"
"time"
-
"Coves/internal/api/middleware"
-
"Coves/internal/api/routes"
-
"Coves/internal/atproto/identity"
-
"Coves/internal/atproto/jetstream"
-
"Coves/internal/core/communities"
-
"Coves/internal/core/posts"
timelineCore "Coves/internal/core/timeline"
-
"Coves/internal/core/users"
-
"Coves/internal/db/postgres"
"github.com/go-chi/chi/v5"
"github.com/gorilla/websocket"
···
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
-
defer resp.Body.Close()
+
defer func() { _ = resp.Body.Close() }()
require.Equal(t, http.StatusOK, resp.StatusCode, "Community creation should succeed")
···
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
-
defer resp.Body.Close()
+
defer func() { _ = resp.Body.Close() }()
require.Equal(t, http.StatusOK, resp.StatusCode, "Post creation should succeed")
···
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
-
defer resp.Body.Close()
+
defer func() { _ = resp.Body.Close() }()
require.Equal(t, http.StatusOK, resp.StatusCode, "Subscription should succeed")
+23
static/.well-known/did.json
···
+
{
+
"@context": [
+
"https://www.w3.org/ns/did/v1",
+
"https://w3id.org/security/multikey/v1"
+
],
+
"id": "did:web:coves.social",
+
"alsoKnownAs": ["at://coves.social"],
+
"verificationMethod": [
+
{
+
"id": "did:web:coves.social#atproto",
+
"type": "Multikey",
+
"controller": "did:web:coves.social",
+
"publicKeyMultibase": "zQ3shu1T3Y3MYoC1n7fCqkZqyrk8FiY3PV3BYM2JwyqcXFY6s"
+
}
+
],
+
"service": [
+
{
+
"id": "#atproto_pds",
+
"type": "AtprotoPersonalDataServer",
+
"serviceEndpoint": "https://pds.coves.me"
+
}
+
]
+
}
+1 -1
docs/E2E_TESTING.md
···
Query via API:
```bash
-
curl "http://localhost:8081/xrpc/social.coves.actor.getProfile?actor=alice.local.coves.dev"
+
curl "http://localhost:8081/xrpc/social.coves.actor.getprofile?actor=alice.local.coves.dev"
```
Expected response:
+3 -3
internal/api/routes/user.go
···
func RegisterUserRoutes(r chi.Router, service users.UserService) {
h := NewUserHandler(service)
-
// social.coves.actor.getProfile - query endpoint
-
r.Get("/xrpc/social.coves.actor.getProfile", h.GetProfile)
+
// social.coves.actor.getprofile - query endpoint
+
r.Get("/xrpc/social.coves.actor.getprofile", h.GetProfile)
// social.coves.actor.signup - procedure endpoint
r.Post("/xrpc/social.coves.actor.signup", h.Signup)
}
-
// GetProfile handles social.coves.actor.getProfile
+
// GetProfile handles social.coves.actor.getprofile
// Query endpoint that retrieves a user profile by DID or handle
func (h *UserHandler) GetProfile(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
+44 -5
internal/atproto/lexicon/social/coves/embed/external.json
···
"defs": {
"main": {
"type": "object",
-
"description": "External link embed with preview metadata and provider support",
+
"description": "External link embed with optional aggregated sources for megathreads",
"required": ["external"],
"properties": {
"external": {
···
},
"external": {
"type": "object",
-
"description": "External link metadata",
+
"description": "Primary external link metadata",
"required": ["uri"],
"properties": {
"uri": {
"type": "string",
"format": "uri",
-
"description": "URI of the external content"
+
"description": "URI of the primary external content"
},
"title": {
"type": "string",
···
"type": "blob",
"accept": ["image/png", "image/jpeg", "image/webp"],
"maxSize": 1000000,
-
"description": "Thumbnail image for the link"
+
"description": "Thumbnail image for the post (applies to primary link)"
},
"domain": {
"type": "string",
-
"description": "Domain of the linked content"
+
"maxLength": 253,
+
"description": "Domain of the linked content (e.g., nytimes.com)"
},
"embedType": {
"type": "string",
···
},
"provider": {
"type": "string",
+
"maxLength": 100,
"description": "Service provider name (e.g., imgur, streamable)"
},
"images": {
···
"type": "integer",
"minimum": 0,
"description": "Total number of items if more than displayed (for galleries)"
+
},
+
"sources": {
+
"type": "array",
+
"description": "Aggregated source links for megathreads. Each source references an original article and optionally the Coves post that shared it",
+
"maxLength": 50,
+
"items": {
+
"type": "ref",
+
"ref": "#source"
+
}
+
}
+
}
+
},
+
"source": {
+
"type": "object",
+
"description": "A source link aggregated into a megathread",
+
"required": ["uri"],
+
"properties": {
+
"uri": {
+
"type": "string",
+
"format": "uri",
+
"description": "URI of the source article"
+
},
+
"title": {
+
"type": "string",
+
"maxLength": 500,
+
"maxGraphemes": 500,
+
"description": "Title of the source article"
+
},
+
"domain": {
+
"type": "string",
+
"maxLength": 253,
+
"description": "Domain of the source (e.g., nytimes.com)"
+
},
+
"sourcePost": {
+
"type": "ref",
+
"ref": "com.atproto.repo.strongRef",
+
"description": "Reference to the Coves post that originally shared this link. Used for feed deprioritization of rolled-up posts"
}
}
}
+52
internal/atproto/auth/combined_key_fetcher.go
···
+
package auth
+
+
import (
+
"context"
+
"fmt"
+
"strings"
+
+
indigoIdentity "github.com/bluesky-social/indigo/atproto/identity"
+
)
+
+
// CombinedKeyFetcher handles JWT public key fetching for both:
+
// - DID issuers (did:plc:, did:web:) โ†’ resolves via DID document
+
// - URL issuers (https://) โ†’ fetches via JWKS endpoint (legacy/fallback)
+
//
+
// For atproto service authentication, the issuer is typically the user's DID,
+
// and the signing key is published in their DID document.
+
type CombinedKeyFetcher struct {
+
didFetcher *DIDKeyFetcher
+
jwksFetcher JWKSFetcher
+
}
+
+
// NewCombinedKeyFetcher creates a key fetcher that supports both DID and URL issuers.
+
// Parameters:
+
// - directory: Indigo's identity directory for DID resolution
+
// - jwksFetcher: fallback JWKS fetcher for URL issuers (can be nil if not needed)
+
func NewCombinedKeyFetcher(directory indigoIdentity.Directory, jwksFetcher JWKSFetcher) *CombinedKeyFetcher {
+
return &CombinedKeyFetcher{
+
didFetcher: NewDIDKeyFetcher(directory),
+
jwksFetcher: jwksFetcher,
+
}
+
}
+
+
// FetchPublicKey fetches the public key for verifying a JWT.
+
// Routes to the appropriate fetcher based on issuer format:
+
// - DID (did:plc:, did:web:) โ†’ DIDKeyFetcher
+
// - URL (https://) โ†’ JWKSFetcher
+
func (f *CombinedKeyFetcher) FetchPublicKey(ctx context.Context, issuer, token string) (interface{}, error) {
+
// Check if issuer is a DID
+
if strings.HasPrefix(issuer, "did:") {
+
return f.didFetcher.FetchPublicKey(ctx, issuer, token)
+
}
+
+
// Check if issuer is a URL (https:// or http:// in dev)
+
if strings.HasPrefix(issuer, "https://") || strings.HasPrefix(issuer, "http://") {
+
if f.jwksFetcher == nil {
+
return nil, fmt.Errorf("URL issuer %s requires JWKS fetcher, but none configured", issuer)
+
}
+
return f.jwksFetcher.FetchPublicKey(ctx, issuer, token)
+
}
+
+
return nil, fmt.Errorf("unsupported issuer format: %s (expected DID or URL)", issuer)
+
}
+116
internal/atproto/auth/did_key_fetcher.go
···
+
package auth
+
+
import (
+
"context"
+
"crypto/ecdsa"
+
"crypto/elliptic"
+
"encoding/base64"
+
"fmt"
+
"math/big"
+
"strings"
+
+
"github.com/bluesky-social/indigo/atproto/atcrypto"
+
indigoIdentity "github.com/bluesky-social/indigo/atproto/identity"
+
"github.com/bluesky-social/indigo/atproto/syntax"
+
)
+
+
// DIDKeyFetcher fetches public keys from DID documents for JWT verification.
+
// This is the primary method for atproto service authentication, where:
+
// - The JWT issuer is the user's DID (e.g., did:plc:abc123)
+
// - The signing key is published in the user's DID document
+
// - Verification happens by resolving the DID and checking the signature
+
type DIDKeyFetcher struct {
+
directory indigoIdentity.Directory
+
}
+
+
// NewDIDKeyFetcher creates a new DID-based key fetcher.
+
func NewDIDKeyFetcher(directory indigoIdentity.Directory) *DIDKeyFetcher {
+
return &DIDKeyFetcher{
+
directory: directory,
+
}
+
}
+
+
// FetchPublicKey fetches the public key for verifying a JWT from the issuer's DID document.
+
// For DID issuers (did:plc: or did:web:), resolves the DID and extracts the signing key.
+
// Returns an *ecdsa.PublicKey suitable for use with jwt-go.
+
func (f *DIDKeyFetcher) FetchPublicKey(ctx context.Context, issuer, token string) (interface{}, error) {
+
// Only handle DID issuers
+
if !strings.HasPrefix(issuer, "did:") {
+
return nil, fmt.Errorf("DIDKeyFetcher only handles DID issuers, got: %s", issuer)
+
}
+
+
// Parse the DID
+
did, err := syntax.ParseDID(issuer)
+
if err != nil {
+
return nil, fmt.Errorf("invalid DID format: %w", err)
+
}
+
+
// Resolve the DID to get the identity (includes public keys)
+
ident, err := f.directory.LookupDID(ctx, did)
+
if err != nil {
+
return nil, fmt.Errorf("failed to resolve DID %s: %w", issuer, err)
+
}
+
+
// Get the atproto signing key from the DID document
+
pubKey, err := ident.PublicKey()
+
if err != nil {
+
return nil, fmt.Errorf("failed to get public key from DID document: %w", err)
+
}
+
+
// Convert to JWK format to extract coordinates
+
jwk, err := pubKey.JWK()
+
if err != nil {
+
return nil, fmt.Errorf("failed to convert public key to JWK: %w", err)
+
}
+
+
// Convert atcrypto JWK to Go ecdsa.PublicKey
+
return atcryptoJWKToECDSA(jwk)
+
}
+
+
// atcryptoJWKToECDSA converts an atcrypto.JWK to a Go ecdsa.PublicKey
+
func atcryptoJWKToECDSA(jwk *atcrypto.JWK) (*ecdsa.PublicKey, error) {
+
if jwk.KeyType != "EC" {
+
return nil, fmt.Errorf("unsupported JWK key type: %s (expected EC)", jwk.KeyType)
+
}
+
+
// Decode X and Y coordinates (base64url, no padding)
+
xBytes, err := base64.RawURLEncoding.DecodeString(jwk.X)
+
if err != nil {
+
return nil, fmt.Errorf("invalid JWK X coordinate encoding: %w", err)
+
}
+
yBytes, err := base64.RawURLEncoding.DecodeString(jwk.Y)
+
if err != nil {
+
return nil, fmt.Errorf("invalid JWK Y coordinate encoding: %w", err)
+
}
+
+
var ecCurve elliptic.Curve
+
switch jwk.Curve {
+
case "P-256":
+
ecCurve = elliptic.P256()
+
case "P-384":
+
ecCurve = elliptic.P384()
+
case "P-521":
+
ecCurve = elliptic.P521()
+
case "secp256k1":
+
// secp256k1 (K-256) is used by some atproto implementations
+
// Go's standard library doesn't include secp256k1, but we can still
+
// construct the key - jwt-go may not support it directly
+
return nil, fmt.Errorf("secp256k1 curve requires special handling for JWT verification")
+
default:
+
return nil, fmt.Errorf("unsupported JWK curve: %s", jwk.Curve)
+
}
+
+
// Create the public key
+
pubKey := &ecdsa.PublicKey{
+
Curve: ecCurve,
+
X: new(big.Int).SetBytes(xBytes),
+
Y: new(big.Int).SetBytes(yBytes),
+
}
+
+
// Validate point is on curve
+
if !ecCurve.IsOnCurve(pubKey.X, pubKey.Y) {
+
return nil, fmt.Errorf("invalid public key: point not on curve")
+
}
+
+
return pubKey, nil
+
}
+24 -3
cmd/server/main.go
···
commentsAPI "Coves/internal/api/handlers/comments"
postgresRepo "Coves/internal/db/postgres"
+
+
indigoIdentity "github.com/bluesky-social/indigo/atproto/identity"
)
func main() {
···
log.Println(" Set AUTH_SKIP_VERIFY=false for production")
}
-
jwksCacheTTL := 1 * time.Hour // Cache public keys for 1 hour
+
// Initialize Indigo directory for DID resolution (used by auth)
+
plcURL := os.Getenv("PLC_DIRECTORY_URL")
+
if plcURL == "" {
+
plcURL = "https://plc.directory"
+
}
+
indigoDir := &indigoIdentity.BaseDirectory{
+
PLCURL: plcURL,
+
HTTPClient: http.Client{Timeout: 10 * time.Second},
+
}
+
+
// Initialize JWT config early to cache HS256_ISSUERS and PDS_JWT_SECRET
+
// This avoids reading env vars on every request
+
auth.InitJWTConfig()
+
+
// Create combined key fetcher for both DID and URL issuers
+
// - DID issuers (did:plc:, did:web:) โ†’ resolved via DID document keys (ES256)
+
// - URL issuers โ†’ JWKS endpoint (fallback for legacy tokens)
+
jwksCacheTTL := 1 * time.Hour
jwksFetcher := auth.NewCachedJWKSFetcher(jwksCacheTTL)
-
authMiddleware := middleware.NewAtProtoAuthMiddleware(jwksFetcher, skipVerify)
-
log.Println("โœ… atProto auth middleware initialized")
+
keyFetcher := auth.NewCombinedKeyFetcher(indigoDir, jwksFetcher)
+
+
authMiddleware := middleware.NewAtProtoAuthMiddleware(keyFetcher, skipVerify)
+
log.Println("โœ… atProto auth middleware initialized (DID + JWKS key resolution)")
// Initialize repositories and services
userRepo := postgresRepo.NewUserRepository(db)
+5
.env.dev
···
# When false, verifies JWT signature against issuer's JWKS
AUTH_SKIP_VERIFY=true
+
# HS256 Issuers: PDSes allowed to use HS256 (shared secret) authentication
+
# Must share PDS_JWT_SECRET with Coves instance. External PDSes use ES256 via DID resolution.
+
# For local dev, allow the local PDS or turn AUTH_SKIP_VERIFY = true
+
HS256_ISSUERS=http://localhost:3001
+
# Logging
LOG_LEVEL=debug
LOG_ENABLED=true
+484
internal/atproto/auth/dpop.go
···
+
package auth
+
+
import (
+
"crypto/ecdsa"
+
"crypto/elliptic"
+
"crypto/sha256"
+
"encoding/base64"
+
"encoding/json"
+
"fmt"
+
"math/big"
+
"strings"
+
"sync"
+
"time"
+
+
indigoCrypto "github.com/bluesky-social/indigo/atproto/atcrypto"
+
"github.com/golang-jwt/jwt/v5"
+
)
+
+
// NonceCache provides replay protection for DPoP proofs by tracking seen jti values.
+
// This prevents an attacker from reusing a captured DPoP proof within the validity window.
+
// Per RFC 9449 Section 11.1, servers SHOULD prevent replay attacks.
+
type NonceCache struct {
+
seen map[string]time.Time // jti -> expiration time
+
stopCh chan struct{}
+
maxAge time.Duration // How long to keep entries
+
cleanup time.Duration // How often to clean up expired entries
+
mu sync.RWMutex
+
}
+
+
// NewNonceCache creates a new nonce cache for DPoP replay protection.
+
// maxAge should match or exceed DPoPVerifier.MaxProofAge.
+
func NewNonceCache(maxAge time.Duration) *NonceCache {
+
nc := &NonceCache{
+
seen: make(map[string]time.Time),
+
maxAge: maxAge,
+
cleanup: maxAge / 2, // Clean up at half the max age
+
stopCh: make(chan struct{}),
+
}
+
+
// Start background cleanup goroutine
+
go nc.cleanupLoop()
+
+
return nc
+
}
+
+
// CheckAndStore checks if a jti has been seen before and stores it if not.
+
// Returns true if the jti is fresh (not a replay), false if it's a replay.
+
func (nc *NonceCache) CheckAndStore(jti string) bool {
+
nc.mu.Lock()
+
defer nc.mu.Unlock()
+
+
now := time.Now()
+
expiry := now.Add(nc.maxAge)
+
+
// Check if already seen
+
if existingExpiry, seen := nc.seen[jti]; seen {
+
// Still valid (not expired) - this is a replay
+
if existingExpiry.After(now) {
+
return false
+
}
+
// Expired entry - allow reuse and update expiry
+
}
+
+
// Store the new jti
+
nc.seen[jti] = expiry
+
return true
+
}
+
+
// cleanupLoop periodically removes expired entries from the cache
+
func (nc *NonceCache) cleanupLoop() {
+
ticker := time.NewTicker(nc.cleanup)
+
defer ticker.Stop()
+
+
for {
+
select {
+
case <-ticker.C:
+
nc.cleanupExpired()
+
case <-nc.stopCh:
+
return
+
}
+
}
+
}
+
+
// cleanupExpired removes expired entries from the cache
+
func (nc *NonceCache) cleanupExpired() {
+
nc.mu.Lock()
+
defer nc.mu.Unlock()
+
+
now := time.Now()
+
for jti, expiry := range nc.seen {
+
if expiry.Before(now) {
+
delete(nc.seen, jti)
+
}
+
}
+
}
+
+
// Stop stops the cleanup goroutine. Call this when done with the cache.
+
func (nc *NonceCache) Stop() {
+
close(nc.stopCh)
+
}
+
+
// Size returns the number of entries in the cache (for testing/monitoring)
+
func (nc *NonceCache) Size() int {
+
nc.mu.RLock()
+
defer nc.mu.RUnlock()
+
return len(nc.seen)
+
}
+
+
// DPoPClaims represents the claims in a DPoP proof JWT (RFC 9449)
+
type DPoPClaims struct {
+
jwt.RegisteredClaims
+
+
// HTTP method of the request (e.g., "GET", "POST")
+
HTTPMethod string `json:"htm"`
+
+
// HTTP URI of the request (without query and fragment parts)
+
HTTPURI string `json:"htu"`
+
+
// Access token hash (optional, for token binding)
+
AccessTokenHash string `json:"ath,omitempty"`
+
}
+
+
// DPoPProof represents a parsed and verified DPoP proof
+
type DPoPProof struct {
+
RawPublicJWK map[string]interface{}
+
Claims *DPoPClaims
+
PublicKey interface{} // *ecdsa.PublicKey or similar
+
Thumbprint string // JWK thumbprint (base64url)
+
}
+
+
// DPoPVerifier verifies DPoP proofs for OAuth token binding
+
type DPoPVerifier struct {
+
// Optional: custom nonce validation function (for server-issued nonces)
+
ValidateNonce func(nonce string) bool
+
+
// NonceCache for replay protection (optional but recommended)
+
// If nil, jti replay protection is disabled
+
NonceCache *NonceCache
+
+
// Maximum allowed clock skew for timestamp validation
+
MaxClockSkew time.Duration
+
+
// Maximum age of DPoP proof (prevents replay with old proofs)
+
MaxProofAge time.Duration
+
}
+
+
// NewDPoPVerifier creates a DPoP verifier with sensible defaults including replay protection
+
func NewDPoPVerifier() *DPoPVerifier {
+
maxProofAge := 5 * time.Minute
+
return &DPoPVerifier{
+
MaxClockSkew: 30 * time.Second,
+
MaxProofAge: maxProofAge,
+
NonceCache: NewNonceCache(maxProofAge),
+
}
+
}
+
+
// NewDPoPVerifierWithoutReplayProtection creates a DPoP verifier without replay protection.
+
// This should only be used in testing or when replay protection is handled externally.
+
func NewDPoPVerifierWithoutReplayProtection() *DPoPVerifier {
+
return &DPoPVerifier{
+
MaxClockSkew: 30 * time.Second,
+
MaxProofAge: 5 * time.Minute,
+
NonceCache: nil, // No replay protection
+
}
+
}
+
+
// Stop stops background goroutines. Call this when shutting down.
+
func (v *DPoPVerifier) Stop() {
+
if v.NonceCache != nil {
+
v.NonceCache.Stop()
+
}
+
}
+
+
// VerifyDPoPProof verifies a DPoP proof JWT and returns the parsed proof
+
func (v *DPoPVerifier) VerifyDPoPProof(dpopProof, httpMethod, httpURI string) (*DPoPProof, error) {
+
// Parse the DPoP JWT without verification first to extract the header
+
parser := jwt.NewParser(jwt.WithoutClaimsValidation())
+
token, _, err := parser.ParseUnverified(dpopProof, &DPoPClaims{})
+
if err != nil {
+
return nil, fmt.Errorf("failed to parse DPoP proof: %w", err)
+
}
+
+
// Extract and validate the header
+
header, ok := token.Header["typ"].(string)
+
if !ok || header != "dpop+jwt" {
+
return nil, fmt.Errorf("invalid DPoP proof: typ must be 'dpop+jwt', got '%s'", header)
+
}
+
+
alg, ok := token.Header["alg"].(string)
+
if !ok {
+
return nil, fmt.Errorf("invalid DPoP proof: missing alg header")
+
}
+
+
// Extract the JWK from the header
+
jwkRaw, ok := token.Header["jwk"]
+
if !ok {
+
return nil, fmt.Errorf("invalid DPoP proof: missing jwk header")
+
}
+
+
jwkMap, ok := jwkRaw.(map[string]interface{})
+
if !ok {
+
return nil, fmt.Errorf("invalid DPoP proof: jwk must be an object")
+
}
+
+
// Parse the public key from JWK
+
publicKey, err := parseJWKToPublicKey(jwkMap)
+
if err != nil {
+
return nil, fmt.Errorf("invalid DPoP proof JWK: %w", err)
+
}
+
+
// Calculate the JWK thumbprint
+
thumbprint, err := CalculateJWKThumbprint(jwkMap)
+
if err != nil {
+
return nil, fmt.Errorf("failed to calculate JWK thumbprint: %w", err)
+
}
+
+
// Now verify the signature
+
verifiedToken, err := jwt.ParseWithClaims(dpopProof, &DPoPClaims{}, func(token *jwt.Token) (interface{}, error) {
+
// Verify the signing method matches what we expect
+
switch alg {
+
case "ES256":
+
if _, ok := token.Method.(*jwt.SigningMethodECDSA); !ok {
+
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
+
}
+
case "ES384", "ES512":
+
if _, ok := token.Method.(*jwt.SigningMethodECDSA); !ok {
+
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
+
}
+
case "RS256", "RS384", "RS512", "PS256", "PS384", "PS512":
+
// RSA methods - we primarily support ES256 for atproto
+
return nil, fmt.Errorf("RSA algorithms not yet supported for DPoP: %s", alg)
+
default:
+
return nil, fmt.Errorf("unsupported DPoP algorithm: %s", alg)
+
}
+
return publicKey, nil
+
})
+
if err != nil {
+
return nil, fmt.Errorf("DPoP proof signature verification failed: %w", err)
+
}
+
+
claims, ok := verifiedToken.Claims.(*DPoPClaims)
+
if !ok {
+
return nil, fmt.Errorf("invalid DPoP claims type")
+
}
+
+
// Validate the claims
+
if err := v.validateDPoPClaims(claims, httpMethod, httpURI); err != nil {
+
return nil, err
+
}
+
+
return &DPoPProof{
+
Claims: claims,
+
PublicKey: publicKey,
+
Thumbprint: thumbprint,
+
RawPublicJWK: jwkMap,
+
}, nil
+
}
+
+
// validateDPoPClaims validates the DPoP proof claims
+
func (v *DPoPVerifier) validateDPoPClaims(claims *DPoPClaims, expectedMethod, expectedURI string) error {
+
// Validate jti (unique identifier) is present
+
if claims.ID == "" {
+
return fmt.Errorf("DPoP proof missing jti claim")
+
}
+
+
// Validate htm (HTTP method)
+
if !strings.EqualFold(claims.HTTPMethod, expectedMethod) {
+
return fmt.Errorf("DPoP proof htm mismatch: expected %s, got %s", expectedMethod, claims.HTTPMethod)
+
}
+
+
// Validate htu (HTTP URI) - compare without query/fragment
+
expectedURIBase := stripQueryFragment(expectedURI)
+
claimURIBase := stripQueryFragment(claims.HTTPURI)
+
if expectedURIBase != claimURIBase {
+
return fmt.Errorf("DPoP proof htu mismatch: expected %s, got %s", expectedURIBase, claimURIBase)
+
}
+
+
// Validate iat (issued at) is present and recent
+
if claims.IssuedAt == nil {
+
return fmt.Errorf("DPoP proof missing iat claim")
+
}
+
+
now := time.Now()
+
iat := claims.IssuedAt.Time
+
+
// Check clock skew (not too far in the future)
+
if iat.After(now.Add(v.MaxClockSkew)) {
+
return fmt.Errorf("DPoP proof iat is in the future")
+
}
+
+
// Check proof age (not too old)
+
if now.Sub(iat) > v.MaxProofAge {
+
return fmt.Errorf("DPoP proof is too old (issued %v ago, max %v)", now.Sub(iat), v.MaxProofAge)
+
}
+
+
// SECURITY: Check for replay attack using jti
+
// Per RFC 9449 Section 11.1, servers SHOULD prevent replay attacks
+
if v.NonceCache != nil {
+
if !v.NonceCache.CheckAndStore(claims.ID) {
+
return fmt.Errorf("DPoP proof replay detected: jti %s already used", claims.ID)
+
}
+
}
+
+
return nil
+
}
+
+
// VerifyTokenBinding verifies that the DPoP proof binds to the access token
+
// by comparing the proof's thumbprint to the token's cnf.jkt claim
+
func (v *DPoPVerifier) VerifyTokenBinding(proof *DPoPProof, expectedThumbprint string) error {
+
if proof.Thumbprint != expectedThumbprint {
+
return fmt.Errorf("DPoP proof thumbprint mismatch: token expects %s, proof has %s",
+
expectedThumbprint, proof.Thumbprint)
+
}
+
return nil
+
}
+
+
// CalculateJWKThumbprint calculates the JWK thumbprint per RFC 7638
+
// The thumbprint is the base64url-encoded SHA-256 hash of the canonical JWK representation
+
func CalculateJWKThumbprint(jwk map[string]interface{}) (string, error) {
+
kty, ok := jwk["kty"].(string)
+
if !ok {
+
return "", fmt.Errorf("JWK missing kty")
+
}
+
+
// Build the canonical JWK representation based on key type
+
// Per RFC 7638, only specific members are included, in lexicographic order
+
var canonical map[string]string
+
+
switch kty {
+
case "EC":
+
crv, ok := jwk["crv"].(string)
+
if !ok {
+
return "", fmt.Errorf("EC JWK missing crv")
+
}
+
x, ok := jwk["x"].(string)
+
if !ok {
+
return "", fmt.Errorf("EC JWK missing x")
+
}
+
y, ok := jwk["y"].(string)
+
if !ok {
+
return "", fmt.Errorf("EC JWK missing y")
+
}
+
// Lexicographic order: crv, kty, x, y
+
canonical = map[string]string{
+
"crv": crv,
+
"kty": kty,
+
"x": x,
+
"y": y,
+
}
+
case "RSA":
+
e, ok := jwk["e"].(string)
+
if !ok {
+
return "", fmt.Errorf("RSA JWK missing e")
+
}
+
n, ok := jwk["n"].(string)
+
if !ok {
+
return "", fmt.Errorf("RSA JWK missing n")
+
}
+
// Lexicographic order: e, kty, n
+
canonical = map[string]string{
+
"e": e,
+
"kty": kty,
+
"n": n,
+
}
+
case "OKP":
+
crv, ok := jwk["crv"].(string)
+
if !ok {
+
return "", fmt.Errorf("OKP JWK missing crv")
+
}
+
x, ok := jwk["x"].(string)
+
if !ok {
+
return "", fmt.Errorf("OKP JWK missing x")
+
}
+
// Lexicographic order: crv, kty, x
+
canonical = map[string]string{
+
"crv": crv,
+
"kty": kty,
+
"x": x,
+
}
+
default:
+
return "", fmt.Errorf("unsupported JWK key type: %s", kty)
+
}
+
+
// Serialize to JSON (Go's json.Marshal produces lexicographically ordered keys for map[string]string)
+
canonicalJSON, err := json.Marshal(canonical)
+
if err != nil {
+
return "", fmt.Errorf("failed to serialize canonical JWK: %w", err)
+
}
+
+
// SHA-256 hash
+
hash := sha256.Sum256(canonicalJSON)
+
+
// Base64url encode (no padding)
+
thumbprint := base64.RawURLEncoding.EncodeToString(hash[:])
+
+
return thumbprint, nil
+
}
+
+
// parseJWKToPublicKey parses a JWK map to a Go public key
+
func parseJWKToPublicKey(jwkMap map[string]interface{}) (interface{}, error) {
+
// Convert map to JSON bytes for indigo's parser
+
jwkBytes, err := json.Marshal(jwkMap)
+
if err != nil {
+
return nil, fmt.Errorf("failed to serialize JWK: %w", err)
+
}
+
+
// Try to parse with indigo's crypto package
+
pubKey, err := indigoCrypto.ParsePublicJWKBytes(jwkBytes)
+
if err != nil {
+
return nil, fmt.Errorf("failed to parse JWK: %w", err)
+
}
+
+
// Convert indigo's PublicKey to Go's ecdsa.PublicKey
+
jwk, err := pubKey.JWK()
+
if err != nil {
+
return nil, fmt.Errorf("failed to get JWK from public key: %w", err)
+
}
+
+
// Use our existing conversion function
+
return atcryptoJWKToECDSAFromIndigoJWK(jwk)
+
}
+
+
// atcryptoJWKToECDSAFromIndigoJWK converts an indigo JWK to Go ecdsa.PublicKey
+
func atcryptoJWKToECDSAFromIndigoJWK(jwk *indigoCrypto.JWK) (*ecdsa.PublicKey, error) {
+
if jwk.KeyType != "EC" {
+
return nil, fmt.Errorf("unsupported JWK key type: %s (expected EC)", jwk.KeyType)
+
}
+
+
xBytes, err := base64.RawURLEncoding.DecodeString(jwk.X)
+
if err != nil {
+
return nil, fmt.Errorf("invalid JWK X coordinate: %w", err)
+
}
+
yBytes, err := base64.RawURLEncoding.DecodeString(jwk.Y)
+
if err != nil {
+
return nil, fmt.Errorf("invalid JWK Y coordinate: %w", err)
+
}
+
+
var curve ecdsa.PublicKey
+
switch jwk.Curve {
+
case "P-256":
+
curve.Curve = ecdsaP256Curve()
+
case "P-384":
+
curve.Curve = ecdsaP384Curve()
+
case "P-521":
+
curve.Curve = ecdsaP521Curve()
+
default:
+
return nil, fmt.Errorf("unsupported curve: %s", jwk.Curve)
+
}
+
+
curve.X = new(big.Int).SetBytes(xBytes)
+
curve.Y = new(big.Int).SetBytes(yBytes)
+
+
return &curve, nil
+
}
+
+
// Helper functions for elliptic curves
+
func ecdsaP256Curve() elliptic.Curve { return elliptic.P256() }
+
func ecdsaP384Curve() elliptic.Curve { return elliptic.P384() }
+
func ecdsaP521Curve() elliptic.Curve { return elliptic.P521() }
+
+
// stripQueryFragment removes query and fragment from a URI
+
func stripQueryFragment(uri string) string {
+
if idx := strings.Index(uri, "?"); idx != -1 {
+
uri = uri[:idx]
+
}
+
if idx := strings.Index(uri, "#"); idx != -1 {
+
uri = uri[:idx]
+
}
+
return uri
+
}
+
+
// ExtractCnfJkt extracts the cnf.jkt (confirmation key thumbprint) from JWT claims
+
func ExtractCnfJkt(claims *Claims) (string, error) {
+
if claims.Confirmation == nil {
+
return "", fmt.Errorf("token missing cnf claim (no DPoP binding)")
+
}
+
+
jkt, ok := claims.Confirmation["jkt"].(string)
+
if !ok || jkt == "" {
+
return "", fmt.Errorf("token cnf claim missing jkt (DPoP key thumbprint)")
+
}
+
+
return jkt, nil
+
}
+921
internal/atproto/auth/dpop_test.go
···
+
package auth
+
+
import (
+
"crypto/ecdsa"
+
"crypto/elliptic"
+
"crypto/rand"
+
"crypto/sha256"
+
"encoding/base64"
+
"encoding/json"
+
"strings"
+
"testing"
+
"time"
+
+
"github.com/golang-jwt/jwt/v5"
+
"github.com/google/uuid"
+
)
+
+
// === Test Helpers ===
+
+
// testECKey holds a test ES256 key pair
+
type testECKey struct {
+
privateKey *ecdsa.PrivateKey
+
publicKey *ecdsa.PublicKey
+
jwk map[string]interface{}
+
thumbprint string
+
}
+
+
// generateTestES256Key generates a test ES256 key pair and JWK
+
func generateTestES256Key(t *testing.T) *testECKey {
+
t.Helper()
+
+
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+
if err != nil {
+
t.Fatalf("Failed to generate test key: %v", err)
+
}
+
+
// Encode public key coordinates as base64url
+
xBytes := privateKey.PublicKey.X.Bytes()
+
yBytes := privateKey.PublicKey.Y.Bytes()
+
+
// P-256 coordinates must be 32 bytes (pad if needed)
+
xBytes = padTo32Bytes(xBytes)
+
yBytes = padTo32Bytes(yBytes)
+
+
x := base64.RawURLEncoding.EncodeToString(xBytes)
+
y := base64.RawURLEncoding.EncodeToString(yBytes)
+
+
jwk := map[string]interface{}{
+
"kty": "EC",
+
"crv": "P-256",
+
"x": x,
+
"y": y,
+
}
+
+
// Calculate thumbprint
+
thumbprint, err := CalculateJWKThumbprint(jwk)
+
if err != nil {
+
t.Fatalf("Failed to calculate thumbprint: %v", err)
+
}
+
+
return &testECKey{
+
privateKey: privateKey,
+
publicKey: &privateKey.PublicKey,
+
jwk: jwk,
+
thumbprint: thumbprint,
+
}
+
}
+
+
// padTo32Bytes pads a byte slice to 32 bytes (required for P-256 coordinates)
+
func padTo32Bytes(b []byte) []byte {
+
if len(b) >= 32 {
+
return b
+
}
+
padded := make([]byte, 32)
+
copy(padded[32-len(b):], b)
+
return padded
+
}
+
+
// createDPoPProof creates a DPoP proof JWT for testing
+
func createDPoPProof(t *testing.T, key *testECKey, method, uri string, iat time.Time, jti string) string {
+
t.Helper()
+
+
claims := &DPoPClaims{
+
RegisteredClaims: jwt.RegisteredClaims{
+
ID: jti,
+
IssuedAt: jwt.NewNumericDate(iat),
+
},
+
HTTPMethod: method,
+
HTTPURI: uri,
+
}
+
+
token := jwt.NewWithClaims(jwt.SigningMethodES256, claims)
+
token.Header["typ"] = "dpop+jwt"
+
token.Header["jwk"] = key.jwk
+
+
tokenString, err := token.SignedString(key.privateKey)
+
if err != nil {
+
t.Fatalf("Failed to create DPoP proof: %v", err)
+
}
+
+
return tokenString
+
}
+
+
// === JWK Thumbprint Tests (RFC 7638) ===
+
+
func TestCalculateJWKThumbprint_EC_P256(t *testing.T) {
+
// Test with known values from RFC 7638 Appendix A (adapted for P-256)
+
jwk := map[string]interface{}{
+
"kty": "EC",
+
"crv": "P-256",
+
"x": "WKn-ZIGevcwGIyyrzFoZNBdaq9_TsqzGl96oc0CWuis",
+
"y": "y77t-RvAHRKTsSGdIYUfweuOvwrvDD-Q3Hv5J0fSKbE",
+
}
+
+
thumbprint, err := CalculateJWKThumbprint(jwk)
+
if err != nil {
+
t.Fatalf("CalculateJWKThumbprint failed: %v", err)
+
}
+
+
if thumbprint == "" {
+
t.Error("Expected non-empty thumbprint")
+
}
+
+
// Verify it's valid base64url
+
_, err = base64.RawURLEncoding.DecodeString(thumbprint)
+
if err != nil {
+
t.Errorf("Thumbprint is not valid base64url: %v", err)
+
}
+
+
// Verify length (SHA-256 produces 32 bytes = 43 base64url chars)
+
if len(thumbprint) != 43 {
+
t.Errorf("Expected thumbprint length 43, got %d", len(thumbprint))
+
}
+
}
+
+
func TestCalculateJWKThumbprint_Deterministic(t *testing.T) {
+
// Same key should produce same thumbprint
+
jwk := map[string]interface{}{
+
"kty": "EC",
+
"crv": "P-256",
+
"x": "test-x-coordinate",
+
"y": "test-y-coordinate",
+
}
+
+
thumbprint1, err := CalculateJWKThumbprint(jwk)
+
if err != nil {
+
t.Fatalf("First CalculateJWKThumbprint failed: %v", err)
+
}
+
+
thumbprint2, err := CalculateJWKThumbprint(jwk)
+
if err != nil {
+
t.Fatalf("Second CalculateJWKThumbprint failed: %v", err)
+
}
+
+
if thumbprint1 != thumbprint2 {
+
t.Errorf("Thumbprints are not deterministic: %s != %s", thumbprint1, thumbprint2)
+
}
+
}
+
+
func TestCalculateJWKThumbprint_DifferentKeys(t *testing.T) {
+
// Different keys should produce different thumbprints
+
jwk1 := map[string]interface{}{
+
"kty": "EC",
+
"crv": "P-256",
+
"x": "coordinate-x-1",
+
"y": "coordinate-y-1",
+
}
+
+
jwk2 := map[string]interface{}{
+
"kty": "EC",
+
"crv": "P-256",
+
"x": "coordinate-x-2",
+
"y": "coordinate-y-2",
+
}
+
+
thumbprint1, err := CalculateJWKThumbprint(jwk1)
+
if err != nil {
+
t.Fatalf("First CalculateJWKThumbprint failed: %v", err)
+
}
+
+
thumbprint2, err := CalculateJWKThumbprint(jwk2)
+
if err != nil {
+
t.Fatalf("Second CalculateJWKThumbprint failed: %v", err)
+
}
+
+
if thumbprint1 == thumbprint2 {
+
t.Error("Different keys produced same thumbprint (collision)")
+
}
+
}
+
+
func TestCalculateJWKThumbprint_MissingKty(t *testing.T) {
+
jwk := map[string]interface{}{
+
"crv": "P-256",
+
"x": "test-x",
+
"y": "test-y",
+
}
+
+
_, err := CalculateJWKThumbprint(jwk)
+
if err == nil {
+
t.Error("Expected error for missing kty, got nil")
+
}
+
if err != nil && !contains(err.Error(), "missing kty") {
+
t.Errorf("Expected error about missing kty, got: %v", err)
+
}
+
}
+
+
func TestCalculateJWKThumbprint_EC_MissingCrv(t *testing.T) {
+
jwk := map[string]interface{}{
+
"kty": "EC",
+
"x": "test-x",
+
"y": "test-y",
+
}
+
+
_, err := CalculateJWKThumbprint(jwk)
+
if err == nil {
+
t.Error("Expected error for missing crv, got nil")
+
}
+
if err != nil && !contains(err.Error(), "missing crv") {
+
t.Errorf("Expected error about missing crv, got: %v", err)
+
}
+
}
+
+
func TestCalculateJWKThumbprint_EC_MissingX(t *testing.T) {
+
jwk := map[string]interface{}{
+
"kty": "EC",
+
"crv": "P-256",
+
"y": "test-y",
+
}
+
+
_, err := CalculateJWKThumbprint(jwk)
+
if err == nil {
+
t.Error("Expected error for missing x, got nil")
+
}
+
if err != nil && !contains(err.Error(), "missing x") {
+
t.Errorf("Expected error about missing x, got: %v", err)
+
}
+
}
+
+
func TestCalculateJWKThumbprint_EC_MissingY(t *testing.T) {
+
jwk := map[string]interface{}{
+
"kty": "EC",
+
"crv": "P-256",
+
"x": "test-x",
+
}
+
+
_, err := CalculateJWKThumbprint(jwk)
+
if err == nil {
+
t.Error("Expected error for missing y, got nil")
+
}
+
if err != nil && !contains(err.Error(), "missing y") {
+
t.Errorf("Expected error about missing y, got: %v", err)
+
}
+
}
+
+
func TestCalculateJWKThumbprint_RSA(t *testing.T) {
+
// Test RSA key thumbprint calculation
+
jwk := map[string]interface{}{
+
"kty": "RSA",
+
"e": "AQAB",
+
"n": "test-modulus",
+
}
+
+
thumbprint, err := CalculateJWKThumbprint(jwk)
+
if err != nil {
+
t.Fatalf("CalculateJWKThumbprint failed for RSA: %v", err)
+
}
+
+
if thumbprint == "" {
+
t.Error("Expected non-empty thumbprint for RSA key")
+
}
+
}
+
+
func TestCalculateJWKThumbprint_OKP(t *testing.T) {
+
// Test OKP (Octet Key Pair) thumbprint calculation
+
jwk := map[string]interface{}{
+
"kty": "OKP",
+
"crv": "Ed25519",
+
"x": "test-x-coordinate",
+
}
+
+
thumbprint, err := CalculateJWKThumbprint(jwk)
+
if err != nil {
+
t.Fatalf("CalculateJWKThumbprint failed for OKP: %v", err)
+
}
+
+
if thumbprint == "" {
+
t.Error("Expected non-empty thumbprint for OKP key")
+
}
+
}
+
+
func TestCalculateJWKThumbprint_UnsupportedKeyType(t *testing.T) {
+
jwk := map[string]interface{}{
+
"kty": "UNKNOWN",
+
}
+
+
_, err := CalculateJWKThumbprint(jwk)
+
if err == nil {
+
t.Error("Expected error for unsupported key type, got nil")
+
}
+
if err != nil && !contains(err.Error(), "unsupported JWK key type") {
+
t.Errorf("Expected error about unsupported key type, got: %v", err)
+
}
+
}
+
+
func TestCalculateJWKThumbprint_CanonicalJSON(t *testing.T) {
+
// RFC 7638 requires lexicographic ordering of keys in canonical JSON
+
// This test verifies that the canonical JSON is correctly ordered
+
+
jwk := map[string]interface{}{
+
"kty": "EC",
+
"crv": "P-256",
+
"x": "x-coord",
+
"y": "y-coord",
+
}
+
+
// The canonical JSON should be: {"crv":"P-256","kty":"EC","x":"x-coord","y":"y-coord"}
+
// (lexicographically ordered: crv, kty, x, y)
+
+
canonical := map[string]string{
+
"crv": "P-256",
+
"kty": "EC",
+
"x": "x-coord",
+
"y": "y-coord",
+
}
+
+
canonicalJSON, err := json.Marshal(canonical)
+
if err != nil {
+
t.Fatalf("Failed to marshal canonical JSON: %v", err)
+
}
+
+
expectedHash := sha256.Sum256(canonicalJSON)
+
expectedThumbprint := base64.RawURLEncoding.EncodeToString(expectedHash[:])
+
+
actualThumbprint, err := CalculateJWKThumbprint(jwk)
+
if err != nil {
+
t.Fatalf("CalculateJWKThumbprint failed: %v", err)
+
}
+
+
if actualThumbprint != expectedThumbprint {
+
t.Errorf("Thumbprint doesn't match expected canonical JSON hash\nExpected: %s\nGot: %s",
+
expectedThumbprint, actualThumbprint)
+
}
+
}
+
+
// === DPoP Proof Verification Tests ===
+
+
func TestVerifyDPoPProof_Valid(t *testing.T) {
+
verifier := NewDPoPVerifier()
+
key := generateTestES256Key(t)
+
+
method := "POST"
+
uri := "https://api.example.com/resource"
+
iat := time.Now()
+
jti := uuid.New().String()
+
+
proof := createDPoPProof(t, key, method, uri, iat, jti)
+
+
result, err := verifier.VerifyDPoPProof(proof, method, uri)
+
if err != nil {
+
t.Fatalf("VerifyDPoPProof failed for valid proof: %v", err)
+
}
+
+
if result == nil {
+
t.Fatal("Expected non-nil proof result")
+
}
+
+
if result.Claims.HTTPMethod != method {
+
t.Errorf("Expected method %s, got %s", method, result.Claims.HTTPMethod)
+
}
+
+
if result.Claims.HTTPURI != uri {
+
t.Errorf("Expected URI %s, got %s", uri, result.Claims.HTTPURI)
+
}
+
+
if result.Claims.ID != jti {
+
t.Errorf("Expected jti %s, got %s", jti, result.Claims.ID)
+
}
+
+
if result.Thumbprint != key.thumbprint {
+
t.Errorf("Expected thumbprint %s, got %s", key.thumbprint, result.Thumbprint)
+
}
+
}
+
+
func TestVerifyDPoPProof_InvalidSignature(t *testing.T) {
+
verifier := NewDPoPVerifier()
+
key := generateTestES256Key(t)
+
wrongKey := generateTestES256Key(t)
+
+
method := "POST"
+
uri := "https://api.example.com/resource"
+
iat := time.Now()
+
jti := uuid.New().String()
+
+
// Create proof with one key
+
proof := createDPoPProof(t, key, method, uri, iat, jti)
+
+
// Parse and modify to use wrong key's JWK in header (signature won't match)
+
parts := splitJWT(proof)
+
header := parseJWTHeader(t, parts[0])
+
header["jwk"] = wrongKey.jwk
+
modifiedHeader := encodeJSON(t, header)
+
tamperedProof := modifiedHeader + "." + parts[1] + "." + parts[2]
+
+
_, err := verifier.VerifyDPoPProof(tamperedProof, method, uri)
+
if err == nil {
+
t.Error("Expected error for invalid signature, got nil")
+
}
+
if err != nil && !contains(err.Error(), "signature verification failed") {
+
t.Errorf("Expected signature verification error, got: %v", err)
+
}
+
}
+
+
func TestVerifyDPoPProof_WrongHTTPMethod(t *testing.T) {
+
verifier := NewDPoPVerifier()
+
key := generateTestES256Key(t)
+
+
method := "POST"
+
wrongMethod := "GET"
+
uri := "https://api.example.com/resource"
+
iat := time.Now()
+
jti := uuid.New().String()
+
+
proof := createDPoPProof(t, key, method, uri, iat, jti)
+
+
_, err := verifier.VerifyDPoPProof(proof, wrongMethod, uri)
+
if err == nil {
+
t.Error("Expected error for HTTP method mismatch, got nil")
+
}
+
if err != nil && !contains(err.Error(), "htm mismatch") {
+
t.Errorf("Expected htm mismatch error, got: %v", err)
+
}
+
}
+
+
func TestVerifyDPoPProof_WrongURI(t *testing.T) {
+
verifier := NewDPoPVerifier()
+
key := generateTestES256Key(t)
+
+
method := "POST"
+
uri := "https://api.example.com/resource"
+
wrongURI := "https://api.example.com/different"
+
iat := time.Now()
+
jti := uuid.New().String()
+
+
proof := createDPoPProof(t, key, method, uri, iat, jti)
+
+
_, err := verifier.VerifyDPoPProof(proof, method, wrongURI)
+
if err == nil {
+
t.Error("Expected error for URI mismatch, got nil")
+
}
+
if err != nil && !contains(err.Error(), "htu mismatch") {
+
t.Errorf("Expected htu mismatch error, got: %v", err)
+
}
+
}
+
+
func TestVerifyDPoPProof_URIWithQuery(t *testing.T) {
+
// URI comparison should strip query and fragment
+
verifier := NewDPoPVerifier()
+
key := generateTestES256Key(t)
+
+
method := "POST"
+
baseURI := "https://api.example.com/resource"
+
uriWithQuery := baseURI + "?param=value"
+
iat := time.Now()
+
jti := uuid.New().String()
+
+
proof := createDPoPProof(t, key, method, baseURI, iat, jti)
+
+
// Should succeed because query is stripped
+
_, err := verifier.VerifyDPoPProof(proof, method, uriWithQuery)
+
if err != nil {
+
t.Fatalf("VerifyDPoPProof failed for URI with query: %v", err)
+
}
+
}
+
+
func TestVerifyDPoPProof_URIWithFragment(t *testing.T) {
+
// URI comparison should strip query and fragment
+
verifier := NewDPoPVerifier()
+
key := generateTestES256Key(t)
+
+
method := "POST"
+
baseURI := "https://api.example.com/resource"
+
uriWithFragment := baseURI + "#section"
+
iat := time.Now()
+
jti := uuid.New().String()
+
+
proof := createDPoPProof(t, key, method, baseURI, iat, jti)
+
+
// Should succeed because fragment is stripped
+
_, err := verifier.VerifyDPoPProof(proof, method, uriWithFragment)
+
if err != nil {
+
t.Fatalf("VerifyDPoPProof failed for URI with fragment: %v", err)
+
}
+
}
+
+
func TestVerifyDPoPProof_ExpiredProof(t *testing.T) {
+
verifier := NewDPoPVerifier()
+
key := generateTestES256Key(t)
+
+
method := "POST"
+
uri := "https://api.example.com/resource"
+
// Proof issued 10 minutes ago (exceeds default MaxProofAge of 5 minutes)
+
iat := time.Now().Add(-10 * time.Minute)
+
jti := uuid.New().String()
+
+
proof := createDPoPProof(t, key, method, uri, iat, jti)
+
+
_, err := verifier.VerifyDPoPProof(proof, method, uri)
+
if err == nil {
+
t.Error("Expected error for expired proof, got nil")
+
}
+
if err != nil && !contains(err.Error(), "too old") {
+
t.Errorf("Expected 'too old' error, got: %v", err)
+
}
+
}
+
+
func TestVerifyDPoPProof_FutureProof(t *testing.T) {
+
verifier := NewDPoPVerifier()
+
key := generateTestES256Key(t)
+
+
method := "POST"
+
uri := "https://api.example.com/resource"
+
// Proof issued 1 minute in the future (exceeds MaxClockSkew)
+
iat := time.Now().Add(1 * time.Minute)
+
jti := uuid.New().String()
+
+
proof := createDPoPProof(t, key, method, uri, iat, jti)
+
+
_, err := verifier.VerifyDPoPProof(proof, method, uri)
+
if err == nil {
+
t.Error("Expected error for future proof, got nil")
+
}
+
if err != nil && !contains(err.Error(), "in the future") {
+
t.Errorf("Expected 'in the future' error, got: %v", err)
+
}
+
}
+
+
func TestVerifyDPoPProof_WithinClockSkew(t *testing.T) {
+
verifier := NewDPoPVerifier()
+
key := generateTestES256Key(t)
+
+
method := "POST"
+
uri := "https://api.example.com/resource"
+
// Proof issued 15 seconds in the future (within MaxClockSkew of 30s)
+
iat := time.Now().Add(15 * time.Second)
+
jti := uuid.New().String()
+
+
proof := createDPoPProof(t, key, method, uri, iat, jti)
+
+
_, err := verifier.VerifyDPoPProof(proof, method, uri)
+
if err != nil {
+
t.Fatalf("VerifyDPoPProof failed for proof within clock skew: %v", err)
+
}
+
}
+
+
func TestVerifyDPoPProof_MissingJti(t *testing.T) {
+
verifier := NewDPoPVerifier()
+
key := generateTestES256Key(t)
+
+
method := "POST"
+
uri := "https://api.example.com/resource"
+
iat := time.Now()
+
+
claims := &DPoPClaims{
+
RegisteredClaims: jwt.RegisteredClaims{
+
// No ID (jti)
+
IssuedAt: jwt.NewNumericDate(iat),
+
},
+
HTTPMethod: method,
+
HTTPURI: uri,
+
}
+
+
token := jwt.NewWithClaims(jwt.SigningMethodES256, claims)
+
token.Header["typ"] = "dpop+jwt"
+
token.Header["jwk"] = key.jwk
+
+
proof, err := token.SignedString(key.privateKey)
+
if err != nil {
+
t.Fatalf("Failed to create test proof: %v", err)
+
}
+
+
_, err = verifier.VerifyDPoPProof(proof, method, uri)
+
if err == nil {
+
t.Error("Expected error for missing jti, got nil")
+
}
+
if err != nil && !contains(err.Error(), "missing jti") {
+
t.Errorf("Expected missing jti error, got: %v", err)
+
}
+
}
+
+
func TestVerifyDPoPProof_MissingTypHeader(t *testing.T) {
+
verifier := NewDPoPVerifier()
+
key := generateTestES256Key(t)
+
+
method := "POST"
+
uri := "https://api.example.com/resource"
+
iat := time.Now()
+
jti := uuid.New().String()
+
+
claims := &DPoPClaims{
+
RegisteredClaims: jwt.RegisteredClaims{
+
ID: jti,
+
IssuedAt: jwt.NewNumericDate(iat),
+
},
+
HTTPMethod: method,
+
HTTPURI: uri,
+
}
+
+
token := jwt.NewWithClaims(jwt.SigningMethodES256, claims)
+
// Don't set typ header
+
token.Header["jwk"] = key.jwk
+
+
proof, err := token.SignedString(key.privateKey)
+
if err != nil {
+
t.Fatalf("Failed to create test proof: %v", err)
+
}
+
+
_, err = verifier.VerifyDPoPProof(proof, method, uri)
+
if err == nil {
+
t.Error("Expected error for missing typ header, got nil")
+
}
+
if err != nil && !contains(err.Error(), "typ must be 'dpop+jwt'") {
+
t.Errorf("Expected typ header error, got: %v", err)
+
}
+
}
+
+
func TestVerifyDPoPProof_WrongTypHeader(t *testing.T) {
+
verifier := NewDPoPVerifier()
+
key := generateTestES256Key(t)
+
+
method := "POST"
+
uri := "https://api.example.com/resource"
+
iat := time.Now()
+
jti := uuid.New().String()
+
+
claims := &DPoPClaims{
+
RegisteredClaims: jwt.RegisteredClaims{
+
ID: jti,
+
IssuedAt: jwt.NewNumericDate(iat),
+
},
+
HTTPMethod: method,
+
HTTPURI: uri,
+
}
+
+
token := jwt.NewWithClaims(jwt.SigningMethodES256, claims)
+
token.Header["typ"] = "JWT" // Wrong typ
+
token.Header["jwk"] = key.jwk
+
+
proof, err := token.SignedString(key.privateKey)
+
if err != nil {
+
t.Fatalf("Failed to create test proof: %v", err)
+
}
+
+
_, err = verifier.VerifyDPoPProof(proof, method, uri)
+
if err == nil {
+
t.Error("Expected error for wrong typ header, got nil")
+
}
+
if err != nil && !contains(err.Error(), "typ must be 'dpop+jwt'") {
+
t.Errorf("Expected typ header error, got: %v", err)
+
}
+
}
+
+
func TestVerifyDPoPProof_MissingJWK(t *testing.T) {
+
verifier := NewDPoPVerifier()
+
key := generateTestES256Key(t)
+
+
method := "POST"
+
uri := "https://api.example.com/resource"
+
iat := time.Now()
+
jti := uuid.New().String()
+
+
claims := &DPoPClaims{
+
RegisteredClaims: jwt.RegisteredClaims{
+
ID: jti,
+
IssuedAt: jwt.NewNumericDate(iat),
+
},
+
HTTPMethod: method,
+
HTTPURI: uri,
+
}
+
+
token := jwt.NewWithClaims(jwt.SigningMethodES256, claims)
+
token.Header["typ"] = "dpop+jwt"
+
// Don't include JWK
+
+
proof, err := token.SignedString(key.privateKey)
+
if err != nil {
+
t.Fatalf("Failed to create test proof: %v", err)
+
}
+
+
_, err = verifier.VerifyDPoPProof(proof, method, uri)
+
if err == nil {
+
t.Error("Expected error for missing jwk header, got nil")
+
}
+
if err != nil && !contains(err.Error(), "missing jwk") {
+
t.Errorf("Expected missing jwk error, got: %v", err)
+
}
+
}
+
+
func TestVerifyDPoPProof_CustomTimeSettings(t *testing.T) {
+
verifier := &DPoPVerifier{
+
MaxClockSkew: 1 * time.Minute,
+
MaxProofAge: 10 * time.Minute,
+
}
+
key := generateTestES256Key(t)
+
+
method := "POST"
+
uri := "https://api.example.com/resource"
+
// Proof issued 50 seconds in the future (within custom MaxClockSkew)
+
iat := time.Now().Add(50 * time.Second)
+
jti := uuid.New().String()
+
+
proof := createDPoPProof(t, key, method, uri, iat, jti)
+
+
_, err := verifier.VerifyDPoPProof(proof, method, uri)
+
if err != nil {
+
t.Fatalf("VerifyDPoPProof failed with custom time settings: %v", err)
+
}
+
}
+
+
func TestVerifyDPoPProof_HTTPMethodCaseInsensitive(t *testing.T) {
+
// HTTP method comparison should be case-insensitive per spec
+
verifier := NewDPoPVerifier()
+
key := generateTestES256Key(t)
+
+
method := "post"
+
uri := "https://api.example.com/resource"
+
iat := time.Now()
+
jti := uuid.New().String()
+
+
proof := createDPoPProof(t, key, method, uri, iat, jti)
+
+
// Verify with uppercase method
+
_, err := verifier.VerifyDPoPProof(proof, "POST", uri)
+
if err != nil {
+
t.Fatalf("VerifyDPoPProof failed for case-insensitive method: %v", err)
+
}
+
}
+
+
// === Token Binding Verification Tests ===
+
+
func TestVerifyTokenBinding_Matching(t *testing.T) {
+
verifier := NewDPoPVerifier()
+
key := generateTestES256Key(t)
+
+
method := "POST"
+
uri := "https://api.example.com/resource"
+
iat := time.Now()
+
jti := uuid.New().String()
+
+
proof := createDPoPProof(t, key, method, uri, iat, jti)
+
+
result, err := verifier.VerifyDPoPProof(proof, method, uri)
+
if err != nil {
+
t.Fatalf("VerifyDPoPProof failed: %v", err)
+
}
+
+
// Verify token binding with matching thumbprint
+
err = verifier.VerifyTokenBinding(result, key.thumbprint)
+
if err != nil {
+
t.Fatalf("VerifyTokenBinding failed for matching thumbprint: %v", err)
+
}
+
}
+
+
func TestVerifyTokenBinding_Mismatch(t *testing.T) {
+
verifier := NewDPoPVerifier()
+
key := generateTestES256Key(t)
+
wrongKey := generateTestES256Key(t)
+
+
method := "POST"
+
uri := "https://api.example.com/resource"
+
iat := time.Now()
+
jti := uuid.New().String()
+
+
proof := createDPoPProof(t, key, method, uri, iat, jti)
+
+
result, err := verifier.VerifyDPoPProof(proof, method, uri)
+
if err != nil {
+
t.Fatalf("VerifyDPoPProof failed: %v", err)
+
}
+
+
// Verify token binding with wrong thumbprint
+
err = verifier.VerifyTokenBinding(result, wrongKey.thumbprint)
+
if err == nil {
+
t.Error("Expected error for thumbprint mismatch, got nil")
+
}
+
if err != nil && !contains(err.Error(), "thumbprint mismatch") {
+
t.Errorf("Expected thumbprint mismatch error, got: %v", err)
+
}
+
}
+
+
// === ExtractCnfJkt Tests ===
+
+
func TestExtractCnfJkt_Valid(t *testing.T) {
+
expectedJkt := "test-thumbprint-123"
+
claims := &Claims{
+
Confirmation: map[string]interface{}{
+
"jkt": expectedJkt,
+
},
+
}
+
+
jkt, err := ExtractCnfJkt(claims)
+
if err != nil {
+
t.Fatalf("ExtractCnfJkt failed for valid claims: %v", err)
+
}
+
+
if jkt != expectedJkt {
+
t.Errorf("Expected jkt %s, got %s", expectedJkt, jkt)
+
}
+
}
+
+
func TestExtractCnfJkt_MissingCnf(t *testing.T) {
+
claims := &Claims{
+
// No Confirmation
+
}
+
+
_, err := ExtractCnfJkt(claims)
+
if err == nil {
+
t.Error("Expected error for missing cnf, got nil")
+
}
+
if err != nil && !contains(err.Error(), "missing cnf claim") {
+
t.Errorf("Expected missing cnf error, got: %v", err)
+
}
+
}
+
+
func TestExtractCnfJkt_NilCnf(t *testing.T) {
+
claims := &Claims{
+
Confirmation: nil,
+
}
+
+
_, err := ExtractCnfJkt(claims)
+
if err == nil {
+
t.Error("Expected error for nil cnf, got nil")
+
}
+
if err != nil && !contains(err.Error(), "missing cnf claim") {
+
t.Errorf("Expected missing cnf error, got: %v", err)
+
}
+
}
+
+
func TestExtractCnfJkt_MissingJkt(t *testing.T) {
+
claims := &Claims{
+
Confirmation: map[string]interface{}{
+
"other": "value",
+
},
+
}
+
+
_, err := ExtractCnfJkt(claims)
+
if err == nil {
+
t.Error("Expected error for missing jkt, got nil")
+
}
+
if err != nil && !contains(err.Error(), "missing jkt") {
+
t.Errorf("Expected missing jkt error, got: %v", err)
+
}
+
}
+
+
func TestExtractCnfJkt_EmptyJkt(t *testing.T) {
+
claims := &Claims{
+
Confirmation: map[string]interface{}{
+
"jkt": "",
+
},
+
}
+
+
_, err := ExtractCnfJkt(claims)
+
if err == nil {
+
t.Error("Expected error for empty jkt, got nil")
+
}
+
if err != nil && !contains(err.Error(), "missing jkt") {
+
t.Errorf("Expected missing jkt error, got: %v", err)
+
}
+
}
+
+
func TestExtractCnfJkt_WrongType(t *testing.T) {
+
claims := &Claims{
+
Confirmation: map[string]interface{}{
+
"jkt": 123, // Not a string
+
},
+
}
+
+
_, err := ExtractCnfJkt(claims)
+
if err == nil {
+
t.Error("Expected error for wrong type jkt, got nil")
+
}
+
if err != nil && !contains(err.Error(), "missing jkt") {
+
t.Errorf("Expected missing jkt error, got: %v", err)
+
}
+
}
+
+
// === Helper Functions for Tests ===
+
+
// splitJWT splits a JWT into its three parts
+
func splitJWT(token string) []string {
+
return []string{
+
token[:strings.IndexByte(token, '.')],
+
token[strings.IndexByte(token, '.')+1 : strings.LastIndexByte(token, '.')],
+
token[strings.LastIndexByte(token, '.')+1:],
+
}
+
}
+
+
// parseJWTHeader parses a base64url-encoded JWT header
+
func parseJWTHeader(t *testing.T, encoded string) map[string]interface{} {
+
t.Helper()
+
decoded, err := base64.RawURLEncoding.DecodeString(encoded)
+
if err != nil {
+
t.Fatalf("Failed to decode header: %v", err)
+
}
+
+
var header map[string]interface{}
+
if err := json.Unmarshal(decoded, &header); err != nil {
+
t.Fatalf("Failed to unmarshal header: %v", err)
+
}
+
+
return header
+
}
+
+
// encodeJSON encodes a value to base64url-encoded JSON
+
func encodeJSON(t *testing.T, v interface{}) string {
+
t.Helper()
+
data, err := json.Marshal(v)
+
if err != nil {
+
t.Fatalf("Failed to marshal JSON: %v", err)
+
}
+
return base64.RawURLEncoding.EncodeToString(data)
+
}
+148 -6
internal/api/middleware/auth.go
···
import (
"Coves/internal/atproto/auth"
"context"
+
"fmt"
"log"
"net/http"
"strings"
···
UserDIDKey contextKey = "user_did"
JWTClaimsKey contextKey = "jwt_claims"
UserAccessToken contextKey = "user_access_token"
+
DPoPProofKey contextKey = "dpop_proof"
)
// AtProtoAuthMiddleware enforces atProto OAuth authentication for protected routes
// Validates JWT Bearer tokens from the Authorization header
+
// Supports DPoP (RFC 9449) for token binding verification
type AtProtoAuthMiddleware struct {
-
jwksFetcher auth.JWKSFetcher
-
skipVerify bool // For Phase 1 testing only
+
jwksFetcher auth.JWKSFetcher
+
dpopVerifier *auth.DPoPVerifier
+
skipVerify bool // For Phase 1 testing only
}
// NewAtProtoAuthMiddleware creates a new atProto auth middleware
// skipVerify: if true, only parses JWT without signature verification (Phase 1)
//
// if false, performs full signature verification (Phase 2)
+
//
+
// IMPORTANT: Call Stop() when shutting down to clean up background goroutines.
func NewAtProtoAuthMiddleware(jwksFetcher auth.JWKSFetcher, skipVerify bool) *AtProtoAuthMiddleware {
return &AtProtoAuthMiddleware{
-
jwksFetcher: jwksFetcher,
-
skipVerify: skipVerify,
+
jwksFetcher: jwksFetcher,
+
dpopVerifier: auth.NewDPoPVerifier(),
+
skipVerify: skipVerify,
+
}
+
}
+
+
// Stop stops background goroutines. Call this when shutting down the server.
+
// This prevents goroutine leaks from the DPoP verifier's replay protection cache.
+
func (m *AtProtoAuthMiddleware) Stop() {
+
if m.dpopVerifier != nil {
+
m.dpopVerifier.Stop()
}
}
···
}
} else {
// Phase 2: Full verification with signature check
+
//
+
// SECURITY: The access token MUST be verified before trusting any claims.
+
// DPoP is an ADDITIONAL security layer, not a replacement for signature verification.
claims, err = auth.VerifyJWT(r.Context(), token, m.jwksFetcher)
if err != nil {
-
// Try to extract issuer for better logging
+
// Token verification failed - REJECT
+
// DO NOT fall back to DPoP-only verification, as that would trust unverified claims
issuer := "unknown"
if parsedClaims, parseErr := auth.ParseJWT(token); parseErr == nil {
issuer = parsedClaims.Issuer
···
writeAuthError(w, "Invalid or expired token")
return
}
+
+
// Token signature verified - now check if DPoP binding is required
+
// If the token has a cnf.jkt claim, DPoP proof is REQUIRED
+
dpopHeader := r.Header.Get("DPoP")
+
hasCnfJkt := claims.Confirmation != nil && claims.Confirmation["jkt"] != nil
+
+
if hasCnfJkt {
+
// Token has DPoP binding - REQUIRE valid DPoP proof
+
if dpopHeader == "" {
+
log.Printf("[AUTH_FAILURE] type=missing_dpop ip=%s method=%s path=%s error=token has cnf.jkt but no DPoP header",
+
r.RemoteAddr, r.Method, r.URL.Path)
+
writeAuthError(w, "DPoP proof required")
+
return
+
}
+
+
proof, err := m.verifyDPoPBinding(r, claims, dpopHeader)
+
if err != nil {
+
log.Printf("[AUTH_FAILURE] type=dpop_verification_failed ip=%s method=%s path=%s error=%v",
+
r.RemoteAddr, r.Method, r.URL.Path, err)
+
writeAuthError(w, "Invalid DPoP proof")
+
return
+
}
+
+
// Store verified DPoP proof in context
+
ctx := context.WithValue(r.Context(), DPoPProofKey, proof)
+
r = r.WithContext(ctx)
+
} else if dpopHeader != "" {
+
// DPoP header present but token doesn't have cnf.jkt - this is suspicious
+
// Log warning but don't reject (could be a misconfigured client)
+
log.Printf("[AUTH_WARNING] type=unexpected_dpop ip=%s method=%s path=%s warning=DPoP header present but token has no cnf.jkt",
+
r.RemoteAddr, r.Method, r.URL.Path)
+
}
}
// Extract user DID from 'sub' claim
···
claims, err = auth.ParseJWT(token)
} else {
// Phase 2: Full verification
+
// SECURITY: Token MUST be verified before trusting claims
claims, err = auth.VerifyJWT(r.Context(), token, m.jwksFetcher)
}
···
return
}
-
// Inject user info and access token into context
+
// Check DPoP binding if token has cnf.jkt (after successful verification)
+
// SECURITY: If token has cnf.jkt but no DPoP header, we cannot trust it
+
// (could be a stolen token). Continue as unauthenticated.
+
if !m.skipVerify {
+
dpopHeader := r.Header.Get("DPoP")
+
hasCnfJkt := claims.Confirmation != nil && claims.Confirmation["jkt"] != nil
+
+
if hasCnfJkt {
+
if dpopHeader == "" {
+
// Token requires DPoP binding but no proof provided
+
// Cannot trust this token - continue without auth
+
log.Printf("[AUTH_WARNING] Optional auth: token has cnf.jkt but no DPoP header - treating as unauthenticated (potential token theft)")
+
next.ServeHTTP(w, r)
+
return
+
}
+
+
proof, err := m.verifyDPoPBinding(r, claims, dpopHeader)
+
if err != nil {
+
// DPoP verification failed - cannot trust this token
+
log.Printf("[AUTH_WARNING] Optional auth: DPoP verification failed - treating as unauthenticated: %v", err)
+
next.ServeHTTP(w, r)
+
return
+
}
+
+
// DPoP verified - inject proof into context
+
ctx := context.WithValue(r.Context(), UserDIDKey, claims.Subject)
+
ctx = context.WithValue(ctx, JWTClaimsKey, claims)
+
ctx = context.WithValue(ctx, UserAccessToken, token)
+
ctx = context.WithValue(ctx, DPoPProofKey, proof)
+
next.ServeHTTP(w, r.WithContext(ctx))
+
return
+
}
+
}
+
+
// No DPoP binding required - inject user info and access token into context
ctx := context.WithValue(r.Context(), UserDIDKey, claims.Subject)
ctx = context.WithValue(ctx, JWTClaimsKey, claims)
ctx = context.WithValue(ctx, UserAccessToken, token)
···
return token
}
+
// GetDPoPProof extracts the DPoP proof from the request context
+
// Returns nil if no DPoP proof was verified
+
func GetDPoPProof(r *http.Request) *auth.DPoPProof {
+
proof, _ := r.Context().Value(DPoPProofKey).(*auth.DPoPProof)
+
return proof
+
}
+
+
// verifyDPoPBinding verifies DPoP proof binding for an ALREADY VERIFIED token.
+
//
+
// SECURITY: This function ONLY verifies the DPoP proof and its binding to the token.
+
// The access token MUST be signature-verified BEFORE calling this function.
+
// DPoP is an ADDITIONAL security layer, not a replacement for signature verification.
+
//
+
// This prevents token theft attacks by proving the client possesses the private key
+
// corresponding to the public key thumbprint in the token's cnf.jkt claim.
+
func (m *AtProtoAuthMiddleware) verifyDPoPBinding(r *http.Request, claims *auth.Claims, dpopProofHeader string) (*auth.DPoPProof, error) {
+
// Extract the cnf.jkt claim from the already-verified token
+
jkt, err := auth.ExtractCnfJkt(claims)
+
if err != nil {
+
return nil, fmt.Errorf("token requires DPoP but missing cnf.jkt: %w", err)
+
}
+
+
// Build the HTTP URI for DPoP verification
+
// Use the full URL including scheme and host
+
scheme := strings.TrimSpace(r.URL.Scheme)
+
if forwardedProto := r.Header.Get("X-Forwarded-Proto"); forwardedProto != "" {
+
// Forwarded proto may contain a comma-separated list; use the first entry
+
parts := strings.Split(forwardedProto, ",")
+
if len(parts) > 0 && strings.TrimSpace(parts[0]) != "" {
+
scheme = strings.ToLower(strings.TrimSpace(parts[0]))
+
}
+
}
+
if scheme == "" {
+
if r.TLS != nil {
+
scheme = "https"
+
} else {
+
scheme = "http"
+
}
+
}
+
scheme = strings.ToLower(scheme)
+
httpURI := scheme + "://" + r.Host + r.URL.Path
+
+
// Verify the DPoP proof
+
proof, err := m.dpopVerifier.VerifyDPoPProof(dpopProofHeader, r.Method, httpURI)
+
if err != nil {
+
return nil, fmt.Errorf("DPoP proof verification failed: %w", err)
+
}
+
+
// Verify the binding between the proof and the token
+
if err := m.dpopVerifier.VerifyTokenBinding(proof, jkt); err != nil {
+
return nil, fmt.Errorf("DPoP binding verification failed: %w", err)
+
}
+
+
return proof, nil
+
}
+
// writeAuthError writes a JSON error response for authentication failures
func writeAuthError(w http.ResponseWriter, message string) {
w.Header().Set("Content-Type", "application/json")
+416
internal/api/middleware/auth_test.go
···
package middleware
import (
+
"Coves/internal/atproto/auth"
"context"
+
"crypto/ecdsa"
+
"crypto/elliptic"
+
"crypto/rand"
+
"encoding/base64"
"fmt"
"net/http"
"net/http/httptest"
···
"time"
"github.com/golang-jwt/jwt/v5"
+
"github.com/google/uuid"
)
// mockJWKSFetcher is a test double for JWKSFetcher
···
t.Errorf("expected nil claims, got %+v", claims)
}
}
+
+
// TestGetDPoPProof_NotAuthenticated tests that GetDPoPProof returns nil when no DPoP was verified
+
func TestGetDPoPProof_NotAuthenticated(t *testing.T) {
+
req := httptest.NewRequest("GET", "/test", nil)
+
proof := GetDPoPProof(req)
+
+
if proof != nil {
+
t.Errorf("expected nil proof, got %+v", proof)
+
}
+
}
+
+
// TestRequireAuth_WithDPoP_SecurityModel tests the correct DPoP security model:
+
// Token MUST be verified first, then DPoP is checked as an additional layer.
+
// DPoP is NOT a fallback for failed token verification.
+
func TestRequireAuth_WithDPoP_SecurityModel(t *testing.T) {
+
// Generate an ECDSA key pair for DPoP
+
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+
if err != nil {
+
t.Fatalf("failed to generate key: %v", err)
+
}
+
+
// Calculate JWK thumbprint for cnf.jkt
+
jwk := ecdsaPublicKeyToJWK(&privateKey.PublicKey)
+
thumbprint, err := auth.CalculateJWKThumbprint(jwk)
+
if err != nil {
+
t.Fatalf("failed to calculate thumbprint: %v", err)
+
}
+
+
t.Run("DPoP_is_NOT_fallback_for_failed_verification", func(t *testing.T) {
+
// SECURITY TEST: When token verification fails, DPoP should NOT be used as fallback
+
// This prevents an attacker from forging a token with their own cnf.jkt
+
+
// Create a DPoP-bound access token (unsigned - will fail verification)
+
claims := auth.Claims{
+
RegisteredClaims: jwt.RegisteredClaims{
+
Subject: "did:plc:attacker",
+
Issuer: "https://external.pds.local",
+
ExpiresAt: jwt.NewNumericDate(time.Now().Add(1 * time.Hour)),
+
IssuedAt: jwt.NewNumericDate(time.Now()),
+
},
+
Scope: "atproto",
+
Confirmation: map[string]interface{}{
+
"jkt": thumbprint,
+
},
+
}
+
+
token := jwt.NewWithClaims(jwt.SigningMethodNone, claims)
+
tokenString, _ := token.SignedString(jwt.UnsafeAllowNoneSignatureType)
+
+
// Create valid DPoP proof (attacker has the private key)
+
dpopProof := createDPoPProof(t, privateKey, "GET", "https://test.local/api/endpoint")
+
+
// Mock fetcher that fails (simulating external PDS without JWKS)
+
fetcher := &mockJWKSFetcher{shouldFail: true}
+
middleware := NewAtProtoAuthMiddleware(fetcher, false) // skipVerify=false
+
+
handler := middleware.RequireAuth(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
t.Error("SECURITY VULNERABILITY: handler was called despite token verification failure")
+
}))
+
+
req := httptest.NewRequest("GET", "https://test.local/api/endpoint", nil)
+
req.Header.Set("Authorization", "Bearer "+tokenString)
+
req.Header.Set("DPoP", dpopProof)
+
w := httptest.NewRecorder()
+
+
handler.ServeHTTP(w, req)
+
+
// MUST reject - token verification failed, DPoP cannot substitute for signature verification
+
if w.Code != http.StatusUnauthorized {
+
t.Errorf("SECURITY: expected 401 for unverified token, got %d", w.Code)
+
}
+
})
+
+
t.Run("DPoP_required_when_cnf_jkt_present_in_verified_token", func(t *testing.T) {
+
// When token has cnf.jkt, DPoP header MUST be present
+
// This test uses skipVerify=true to simulate a verified token
+
+
claims := auth.Claims{
+
RegisteredClaims: jwt.RegisteredClaims{
+
Subject: "did:plc:test123",
+
Issuer: "https://test.pds.local",
+
ExpiresAt: jwt.NewNumericDate(time.Now().Add(1 * time.Hour)),
+
IssuedAt: jwt.NewNumericDate(time.Now()),
+
},
+
Scope: "atproto",
+
Confirmation: map[string]interface{}{
+
"jkt": thumbprint,
+
},
+
}
+
+
token := jwt.NewWithClaims(jwt.SigningMethodNone, claims)
+
tokenString, _ := token.SignedString(jwt.UnsafeAllowNoneSignatureType)
+
+
// NO DPoP header - should fail when skipVerify is false
+
// Note: with skipVerify=true, DPoP is not checked
+
fetcher := &mockJWKSFetcher{}
+
middleware := NewAtProtoAuthMiddleware(fetcher, true) // skipVerify=true for parsing
+
+
handlerCalled := false
+
handler := middleware.RequireAuth(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
handlerCalled = true
+
w.WriteHeader(http.StatusOK)
+
}))
+
+
req := httptest.NewRequest("GET", "https://test.local/api/endpoint", nil)
+
req.Header.Set("Authorization", "Bearer "+tokenString)
+
// No DPoP header
+
w := httptest.NewRecorder()
+
+
handler.ServeHTTP(w, req)
+
+
// With skipVerify=true, DPoP is not checked, so this should succeed
+
if !handlerCalled {
+
t.Error("handler should be called when skipVerify=true")
+
}
+
})
+
}
+
+
// TestRequireAuth_TokenVerificationFails_DPoPNotUsedAsFallback is the key security test.
+
// It ensures that DPoP cannot be used as a fallback when token signature verification fails.
+
func TestRequireAuth_TokenVerificationFails_DPoPNotUsedAsFallback(t *testing.T) {
+
// Generate a key pair (attacker's key)
+
attackerKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+
jwk := ecdsaPublicKeyToJWK(&attackerKey.PublicKey)
+
thumbprint, _ := auth.CalculateJWKThumbprint(jwk)
+
+
// Create a FORGED token claiming to be the victim
+
claims := auth.Claims{
+
RegisteredClaims: jwt.RegisteredClaims{
+
Subject: "did:plc:victim_user", // Attacker claims to be victim
+
Issuer: "https://untrusted.pds",
+
ExpiresAt: jwt.NewNumericDate(time.Now().Add(1 * time.Hour)),
+
IssuedAt: jwt.NewNumericDate(time.Now()),
+
},
+
Scope: "atproto",
+
Confirmation: map[string]interface{}{
+
"jkt": thumbprint, // Attacker uses their own key
+
},
+
}
+
+
token := jwt.NewWithClaims(jwt.SigningMethodNone, claims)
+
tokenString, _ := token.SignedString(jwt.UnsafeAllowNoneSignatureType)
+
+
// Attacker creates a valid DPoP proof with their key
+
dpopProof := createDPoPProof(t, attackerKey, "POST", "https://api.example.com/protected")
+
+
// Fetcher fails (external PDS without JWKS)
+
fetcher := &mockJWKSFetcher{shouldFail: true}
+
middleware := NewAtProtoAuthMiddleware(fetcher, false) // skipVerify=false - REAL verification
+
+
handler := middleware.RequireAuth(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
t.Fatalf("CRITICAL SECURITY FAILURE: Request authenticated as %s despite forged token!",
+
GetUserDID(r))
+
}))
+
+
req := httptest.NewRequest("POST", "https://api.example.com/protected", nil)
+
req.Header.Set("Authorization", "Bearer "+tokenString)
+
req.Header.Set("DPoP", dpopProof)
+
w := httptest.NewRecorder()
+
+
handler.ServeHTTP(w, req)
+
+
// MUST reject - the token signature was never verified
+
if w.Code != http.StatusUnauthorized {
+
t.Errorf("SECURITY VULNERABILITY: Expected 401, got %d. Token was not properly verified!", w.Code)
+
}
+
}
+
+
// TestVerifyDPoPBinding_UsesForwardedProto ensures we honor the external HTTPS
+
// scheme when TLS is terminated upstream and X-Forwarded-Proto is present.
+
func TestVerifyDPoPBinding_UsesForwardedProto(t *testing.T) {
+
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+
if err != nil {
+
t.Fatalf("failed to generate key: %v", err)
+
}
+
+
jwk := ecdsaPublicKeyToJWK(&privateKey.PublicKey)
+
thumbprint, err := auth.CalculateJWKThumbprint(jwk)
+
if err != nil {
+
t.Fatalf("failed to calculate thumbprint: %v", err)
+
}
+
+
claims := &auth.Claims{
+
RegisteredClaims: jwt.RegisteredClaims{
+
Subject: "did:plc:test123",
+
Issuer: "https://test.pds.local",
+
ExpiresAt: jwt.NewNumericDate(time.Now().Add(1 * time.Hour)),
+
IssuedAt: jwt.NewNumericDate(time.Now()),
+
},
+
Scope: "atproto",
+
Confirmation: map[string]interface{}{
+
"jkt": thumbprint,
+
},
+
}
+
+
middleware := NewAtProtoAuthMiddleware(&mockJWKSFetcher{}, false)
+
defer middleware.Stop()
+
+
externalURI := "https://api.example.com/protected/resource"
+
dpopProof := createDPoPProof(t, privateKey, "GET", externalURI)
+
+
req := httptest.NewRequest("GET", "http://internal-service/protected/resource", nil)
+
req.Host = "api.example.com"
+
req.Header.Set("X-Forwarded-Proto", "https")
+
+
proof, err := middleware.verifyDPoPBinding(req, claims, dpopProof)
+
if err != nil {
+
t.Fatalf("expected DPoP verification to succeed with forwarded proto, got %v", err)
+
}
+
+
if proof == nil || proof.Claims == nil {
+
t.Fatal("expected DPoP proof to be returned")
+
}
+
}
+
+
// TestMiddlewareStop tests that the middleware can be stopped properly
+
func TestMiddlewareStop(t *testing.T) {
+
fetcher := &mockJWKSFetcher{}
+
middleware := NewAtProtoAuthMiddleware(fetcher, false)
+
+
// Stop should not panic and should clean up resources
+
middleware.Stop()
+
+
// Calling Stop again should also be safe (idempotent-ish)
+
// Note: The underlying DPoPVerifier.Stop() closes a channel, so this might panic
+
// if not handled properly. We test that at least one Stop works.
+
}
+
+
// TestOptionalAuth_DPoPBoundToken_NoDPoPHeader tests that OptionalAuth treats
+
// tokens with cnf.jkt but no DPoP header as unauthenticated (potential token theft)
+
func TestOptionalAuth_DPoPBoundToken_NoDPoPHeader(t *testing.T) {
+
// Generate a key pair for DPoP binding
+
privateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+
jwk := ecdsaPublicKeyToJWK(&privateKey.PublicKey)
+
thumbprint, _ := auth.CalculateJWKThumbprint(jwk)
+
+
// Create a DPoP-bound token (has cnf.jkt)
+
claims := auth.Claims{
+
RegisteredClaims: jwt.RegisteredClaims{
+
Subject: "did:plc:user123",
+
Issuer: "https://test.pds.local",
+
ExpiresAt: jwt.NewNumericDate(time.Now().Add(1 * time.Hour)),
+
IssuedAt: jwt.NewNumericDate(time.Now()),
+
},
+
Scope: "atproto",
+
Confirmation: map[string]interface{}{
+
"jkt": thumbprint,
+
},
+
}
+
+
token := jwt.NewWithClaims(jwt.SigningMethodNone, claims)
+
tokenString, _ := token.SignedString(jwt.UnsafeAllowNoneSignatureType)
+
+
// Use skipVerify=true to simulate a verified token
+
// (In production, skipVerify would be false and VerifyJWT would be called)
+
// However, for this test we need skipVerify=false to trigger DPoP checking
+
// But the fetcher will fail, so let's use skipVerify=true and verify the logic
+
// Actually, the DPoP check only happens when skipVerify=false
+
+
t.Run("with_skipVerify_false", func(t *testing.T) {
+
// This will fail at JWT verification level, but that's expected
+
// The important thing is the code path for DPoP checking
+
fetcher := &mockJWKSFetcher{shouldFail: true}
+
middleware := NewAtProtoAuthMiddleware(fetcher, false)
+
defer middleware.Stop()
+
+
handlerCalled := false
+
var capturedDID string
+
handler := middleware.OptionalAuth(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
handlerCalled = true
+
capturedDID = GetUserDID(r)
+
w.WriteHeader(http.StatusOK)
+
}))
+
+
req := httptest.NewRequest("GET", "/test", nil)
+
req.Header.Set("Authorization", "Bearer "+tokenString)
+
// Deliberately NOT setting DPoP header
+
w := httptest.NewRecorder()
+
+
handler.ServeHTTP(w, req)
+
+
// Handler should be called (optional auth doesn't block)
+
if !handlerCalled {
+
t.Error("handler should be called")
+
}
+
+
// But since JWT verification fails, user should not be authenticated
+
if capturedDID != "" {
+
t.Errorf("expected empty DID when verification fails, got %s", capturedDID)
+
}
+
})
+
+
t.Run("with_skipVerify_true_dpop_not_checked", func(t *testing.T) {
+
// When skipVerify=true, DPoP is not checked (Phase 1 mode)
+
fetcher := &mockJWKSFetcher{}
+
middleware := NewAtProtoAuthMiddleware(fetcher, true)
+
defer middleware.Stop()
+
+
handlerCalled := false
+
var capturedDID string
+
handler := middleware.OptionalAuth(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
handlerCalled = true
+
capturedDID = GetUserDID(r)
+
w.WriteHeader(http.StatusOK)
+
}))
+
+
req := httptest.NewRequest("GET", "/test", nil)
+
req.Header.Set("Authorization", "Bearer "+tokenString)
+
// No DPoP header
+
w := httptest.NewRecorder()
+
+
handler.ServeHTTP(w, req)
+
+
if !handlerCalled {
+
t.Error("handler should be called")
+
}
+
+
// With skipVerify=true, DPoP check is bypassed - token is trusted
+
if capturedDID != "did:plc:user123" {
+
t.Errorf("expected DID when skipVerify=true, got %s", capturedDID)
+
}
+
})
+
}
+
+
// TestDPoPReplayProtection tests that the same DPoP proof cannot be used twice
+
func TestDPoPReplayProtection(t *testing.T) {
+
// This tests the NonceCache functionality
+
cache := auth.NewNonceCache(5 * time.Minute)
+
defer cache.Stop()
+
+
jti := "unique-proof-id-123"
+
+
// First use should succeed
+
if !cache.CheckAndStore(jti) {
+
t.Error("First use of jti should succeed")
+
}
+
+
// Second use should fail (replay detected)
+
if cache.CheckAndStore(jti) {
+
t.Error("SECURITY: Replay attack not detected - same jti accepted twice")
+
}
+
+
// Different jti should succeed
+
if !cache.CheckAndStore("different-jti-456") {
+
t.Error("Different jti should succeed")
+
}
+
}
+
+
// Helper: createDPoPProof creates a DPoP proof JWT for testing
+
func createDPoPProof(t *testing.T, privateKey *ecdsa.PrivateKey, method, uri string) string {
+
// Create JWK from public key
+
jwk := ecdsaPublicKeyToJWK(&privateKey.PublicKey)
+
+
// Create DPoP claims with UUID for jti to ensure uniqueness across tests
+
claims := auth.DPoPClaims{
+
RegisteredClaims: jwt.RegisteredClaims{
+
IssuedAt: jwt.NewNumericDate(time.Now()),
+
ID: uuid.New().String(),
+
},
+
HTTPMethod: method,
+
HTTPURI: uri,
+
}
+
+
// Create token with custom header
+
token := jwt.NewWithClaims(jwt.SigningMethodES256, claims)
+
token.Header["typ"] = "dpop+jwt"
+
token.Header["jwk"] = jwk
+
+
// Sign with private key
+
signedToken, err := token.SignedString(privateKey)
+
if err != nil {
+
t.Fatalf("failed to sign DPoP proof: %v", err)
+
}
+
+
return signedToken
+
}
+
+
// Helper: ecdsaPublicKeyToJWK converts an ECDSA public key to JWK map
+
func ecdsaPublicKeyToJWK(pubKey *ecdsa.PublicKey) map[string]interface{} {
+
// Get curve name
+
var crv string
+
switch pubKey.Curve {
+
case elliptic.P256():
+
crv = "P-256"
+
case elliptic.P384():
+
crv = "P-384"
+
case elliptic.P521():
+
crv = "P-521"
+
default:
+
panic("unsupported curve")
+
}
+
+
// Encode coordinates
+
xBytes := pubKey.X.Bytes()
+
yBytes := pubKey.Y.Bytes()
+
+
// Ensure proper byte length (pad if needed)
+
keySize := (pubKey.Curve.Params().BitSize + 7) / 8
+
xPadded := make([]byte, keySize)
+
yPadded := make([]byte, keySize)
+
copy(xPadded[keySize-len(xBytes):], xBytes)
+
copy(yPadded[keySize-len(yBytes):], yBytes)
+
+
return map[string]interface{}{
+
"kty": "EC",
+
"crv": crv,
+
"x": base64.RawURLEncoding.EncodeToString(xPadded),
+
"y": base64.RawURLEncoding.EncodeToString(yPadded),
+
}
+
}
+4 -1
internal/atproto/auth/jwt.go
···
// Claims represents the standard JWT claims we care about
type Claims struct {
jwt.RegisteredClaims
-
Scope string `json:"scope,omitempty"`
+
// Confirmation claim for DPoP token binding (RFC 9449)
+
// Contains "jkt" (JWK thumbprint) when token is bound to a DPoP key
+
Confirmation map[string]interface{} `json:"cnf,omitempty"`
+
Scope string `json:"scope,omitempty"`
}
// stripBearerPrefix removes the "Bearer " prefix from a token string
+134 -2
internal/atproto/auth/README.md
···
5. Find matching key by `kid` from JWT header
6. Cache the JWKS for 1 hour
+
## DPoP Token Binding
+
+
DPoP (Demonstrating Proof-of-Possession) binds access tokens to client-controlled cryptographic keys, preventing token theft and replay attacks.
+
+
### What is DPoP?
+
+
DPoP is an OAuth extension (RFC 9449) that adds proof-of-possession semantics to bearer tokens. When a PDS issues a DPoP-bound access token:
+
+
1. Access token contains `cnf.jkt` claim (JWK thumbprint of client's public key)
+
2. Client creates a DPoP proof JWT signed with their private key
+
3. Server verifies the proof signature and checks it matches the token's `cnf.jkt`
+
+
### CRITICAL: DPoP Security Model
+
+
> โš ๏ธ **DPoP is an ADDITIONAL security layer, NOT a replacement for token signature verification.**
+
+
The correct verification order is:
+
1. **ALWAYS verify the access token signature first** (via JWKS, HS256 shared secret, or DID resolution)
+
2. **If the verified token has `cnf.jkt`, REQUIRE valid DPoP proof**
+
3. **NEVER use DPoP as a fallback when signature verification fails**
+
+
**Why This Matters**: An attacker could create a fake token with `sub: "did:plc:victim"` and their own `cnf.jkt`, then present a valid DPoP proof signed with their key. If we accept DPoP as a fallback, the attacker can impersonate any user.
+
+
### How DPoP Works
+
+
```
+
โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”
+
โ”‚ Client โ”‚ โ”‚ Server โ”‚
+
โ”‚ โ”‚ โ”‚ (Coves) โ”‚
+
โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜
+
โ”‚ โ”‚
+
โ”‚ 1. Authorization: Bearer <token> โ”‚
+
โ”‚ DPoP: <proof-jwt> โ”‚
+
โ”‚โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€>โ”‚
+
โ”‚ โ”‚
+
โ”‚ โ”‚ 2. VERIFY token signature
+
โ”‚ โ”‚ (REQUIRED - no fallback!)
+
โ”‚ โ”‚
+
โ”‚ โ”‚ 3. If token has cnf.jkt:
+
โ”‚ โ”‚ - Verify DPoP proof
+
โ”‚ โ”‚ - Check thumbprint match
+
โ”‚ โ”‚
+
โ”‚ 200 OK โ”‚
+
โ”‚<โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”‚
+
```
+
+
### When DPoP is Required
+
+
DPoP verification is **REQUIRED** when:
+
- Access token signature has been verified AND
+
- Access token contains `cnf.jkt` claim (DPoP-bound)
+
+
If the token has `cnf.jkt` but no DPoP header is present, the request is **REJECTED**.
+
+
### Replay Protection
+
+
DPoP proofs include a unique `jti` (JWT ID) claim. The server tracks seen `jti` values to prevent replay attacks:
+
+
```go
+
// Create a verifier with replay protection (default)
+
verifier := auth.NewDPoPVerifier()
+
defer verifier.Stop() // Stop cleanup goroutine on shutdown
+
+
// The verifier automatically rejects reused jti values within the proof validity window (5 minutes)
+
```
+
+
### DPoP Implementation
+
+
The `dpop.go` module provides:
+
+
```go
+
// Create a verifier with replay protection
+
verifier := auth.NewDPoPVerifier()
+
defer verifier.Stop()
+
+
// Verify the DPoP proof
+
proof, err := verifier.VerifyDPoPProof(dpopHeader, "POST", "https://coves.social/xrpc/...")
+
if err != nil {
+
// Invalid proof (includes replay detection)
+
}
+
+
// Verify it binds to the VERIFIED access token
+
expectedThumbprint, err := auth.ExtractCnfJkt(claims)
+
if err != nil {
+
// Token not DPoP-bound
+
}
+
+
if err := verifier.VerifyTokenBinding(proof, expectedThumbprint); err != nil {
+
// Proof doesn't match token
+
}
+
```
+
+
### DPoP Proof Format
+
+
The DPoP header contains a JWT with:
+
+
**Header**:
+
- `typ`: `"dpop+jwt"` (required)
+
- `alg`: `"ES256"` (or other supported algorithm)
+
- `jwk`: Client's public key (JWK format)
+
+
**Claims**:
+
- `jti`: Unique proof identifier (tracked for replay protection)
+
- `htm`: HTTP method (e.g., `"POST"`)
+
- `htu`: HTTP URI (without query/fragment)
+
- `iat`: Timestamp (must be recent, within 5 minutes)
+
+
**Example**:
+
```json
+
{
+
"typ": "dpop+jwt",
+
"alg": "ES256",
+
"jwk": {
+
"kty": "EC",
+
"crv": "P-256",
+
"x": "...",
+
"y": "..."
+
}
+
}
+
{
+
"jti": "unique-id-123",
+
"htm": "POST",
+
"htu": "https://coves.social/xrpc/social.coves.community.create",
+
"iat": 1700000000
+
}
+
```
+
## Security Considerations
### โœ… Implemented
···
- Required claims validation (sub, iss)
- Key caching with TTL
- Secure error messages (no internal details leaked)
+
- **DPoP proof verification** (proof-of-possession for token binding)
+
- **DPoP thumbprint validation** (prevents token theft attacks)
+
- **DPoP freshness checks** (5-minute proof validity window)
+
- **DPoP replay protection** (jti tracking with in-memory cache)
+
- **Secure DPoP model** (DPoP required AFTER signature verification, never as fallback)
### โš ๏ธ Not Yet Implemented
-
- DPoP validation (for replay attack prevention)
+
- Server-issued DPoP nonces (additional replay protection)
- Scope validation (checking `scope` claim)
- Audience validation (checking `aud` claim)
- Rate limiting per DID
···
## Future Enhancements
-
- [ ] DPoP proof validation
+
- [ ] DPoP nonce validation (server-managed nonce for additional replay protection)
- [ ] Scope-based authorization
- [ ] Audience claim validation
- [ ] Token revocation support
+4 -1
.gitignore
···
# Build artifacts
/validate-lexicon
-
/bin/
+
/bin/
+
+
# Go build cache
+
.cache/
+5 -6
go.mod
···
module Coves
-
go 1.24.0
+
go 1.25
require (
-
github.com/bluesky-social/indigo v0.0.0-20251009212240-20524de167fe
+
github.com/bluesky-social/indigo v0.0.0-20251127021457-6f2658724b36
github.com/go-chi/chi/v5 v5.2.1
github.com/golang-jwt/jwt/v5 v5.3.0
github.com/gorilla/websocket v1.5.3
···
github.com/lestrrat-go/jwx/v2 v2.0.12
github.com/lib/pq v1.10.9
github.com/pressly/goose/v3 v3.22.1
-
github.com/stretchr/testify v1.9.0
+
github.com/stretchr/testify v1.10.0
+
github.com/xeipuuv/gojsonschema v1.2.0
golang.org/x/net v0.46.0
golang.org/x/time v0.3.0
)
require (
github.com/beorn7/perks v1.0.1 // indirect
-
github.com/carlmjohnson/versioninfo v0.22.5 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
+
github.com/earthboundkid/versioninfo/v2 v2.24.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
···
github.com/segmentio/asm v1.2.0 // indirect
github.com/sethvargo/go-retry v0.3.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
-
github.com/stretchr/objx v0.5.2 // indirect
github.com/whyrusleeping/cbor-gen v0.2.1-0.20241030202151-b7a6831be65e // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
-
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b // indirect
gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
+6 -8
go.sum
···
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-
github.com/bluesky-social/indigo v0.0.0-20251009212240-20524de167fe h1:VBhaqE5ewQgXbY5SfSWFZC/AwHFo7cHxZKFYi2ce9Yo=
-
github.com/bluesky-social/indigo v0.0.0-20251009212240-20524de167fe/go.mod h1:RuQVrCGm42QNsgumKaR6se+XkFKfCPNwdCiTvqKRUck=
-
github.com/carlmjohnson/versioninfo v0.22.5 h1:O00sjOLUAFxYQjlN/bzYTuZiS0y6fWDQjMRvwtKgwwc=
-
github.com/carlmjohnson/versioninfo v0.22.5/go.mod h1:QT9mph3wcVfISUKd0i9sZfVrPviHuSF+cUtLjm2WSf8=
+
github.com/bluesky-social/indigo v0.0.0-20251127021457-6f2658724b36 h1:Vc+l4sltxQfBT8qC3dm87PRYInmxlGyF1dmpjaW0WkU=
+
github.com/bluesky-social/indigo v0.0.0-20251127021457-6f2658724b36/go.mod h1:Pm2I1+iDXn/hLbF7XCg/DsZi6uDCiOo7hZGWprSM7k0=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
···
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+
github.com/earthboundkid/versioninfo/v2 v2.24.1 h1:SJTMHaoUx3GzjjnUO1QzP3ZXK6Ee/nbWyCm58eY3oUg=
+
github.com/earthboundkid/versioninfo/v2 v2.24.1/go.mod h1:VcWEooDEuyUJnMfbdTh0uFN4cfEIg+kHMuWB2CDCLjw=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/go-chi/chi/v5 v5.2.1 h1:KOIHODQj58PmL80G2Eak4WdvUzjSJSm0vG72crDCqb8=
···
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
-
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
···
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
-
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ=
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=