An atproto PDS written in Go

Compare changes

Choose any two refs to compare.

+63 -7
.github/workflows/docker-image.yml
···
push:
branches:
- main
+
tags:
+
- 'v*'
env:
REGISTRY: ghcr.io
···
jobs:
build-and-push-image:
-
runs-on: ubuntu-latest
+
strategy:
+
matrix:
+
include:
+
- arch: amd64
+
runner: ubuntu-latest
+
- arch: arm64
+
runner: ubuntu-24.04-arm
+
runs-on: ${{ matrix.runner }}
# Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job.
permissions:
contents: read
packages: write
attestations: write
id-token: write
-
#
+
outputs:
+
digest-amd64: ${{ matrix.arch == 'amd64' && steps.push.outputs.digest || '' }}
+
digest-arm64: ${{ matrix.arch == 'arm64' && steps.push.outputs.digest || '' }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
+
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
- name: Log in to the Container registry
-
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
+
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
+
# This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
- name: Extract metadata (tags, labels) for Docker
id: meta
···
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
-
type=sha
-
type=sha,format=long
+
type=raw,value=latest,enable={{is_default_branch}},suffix=-${{ matrix.arch }}
+
type=sha,suffix=-${{ matrix.arch }}
+
type=sha,format=long,suffix=-${{ matrix.arch }}
+
type=semver,pattern={{version}},suffix=-${{ matrix.arch }}
+
type=semver,pattern={{major}}.{{minor}},suffix=-${{ matrix.arch }}
+
# This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
# It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository.
# It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
- name: Build and push Docker image
id: push
-
uses: docker/build-push-action@v5
+
uses: docker/build-push-action@v6
with:
context: .
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
+
publish-manifest:
+
needs: build-and-push-image
+
runs-on: ubuntu-latest
+
permissions:
+
packages: write
+
attestations: write
+
id-token: write
+
steps:
+
- name: Log in to the Container registry
+
uses: docker/login-action@v3
+
with:
+
registry: ${{ env.REGISTRY }}
+
username: ${{ github.actor }}
+
password: ${{ secrets.GITHUB_TOKEN }}
+
+
- name: Extract metadata (tags, labels) for Docker
+
id: meta
+
uses: docker/metadata-action@v5
+
with:
+
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
+
tags: |
+
type=raw,value=latest,enable={{is_default_branch}}
+
type=sha
+
type=sha,format=long
+
type=semver,pattern={{version}}
+
type=semver,pattern={{major}}.{{minor}}
+
+
- name: Create and push manifest
+
run: |
+
# Split tags into an array
+
readarray -t tags <<< "${{ steps.meta.outputs.tags }}"
+
+
# Create and push manifest for each tag
+
for tag in "${tags[@]}"; do
+
docker buildx imagetools create -t "$tag" \
+
"${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}@${{ needs.build-and-push-image.outputs.digest-amd64 }}" \
+
"${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}@${{ needs.build-and-push-image.outputs.digest-arm64 }}"
+
done
+
# This step generates an artifact attestation for the image, which is an unforgeable statement about where and how it was built. It increases supply chain security for people who consume the image. For more information, see "[AUTOTITLE](/actions/security-guides/using-artifact-attestations-to-establish-provenance-for-builds)."
- name: Generate artifact attestation
uses: actions/attest-build-provenance@v1
with:
subject-name: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME}}
-
subject-digest: ${{ steps.push.outputs.digest }}
+
subject-digest: ${{ needs.build-and-push-image.outputs.digest-amd64 }}
push-to-registry: true
+1
.gitignore
···
.DS_Store
data/
keys/
+
dist/
+10
Caddyfile.postgres
···
+
{$COCOON_HOSTNAME} {
+
reverse_proxy cocoon:8080
+
+
encode gzip
+
+
log {
+
output file /data/access.log
+
format json
+
}
+
}
+1 -1
Dockerfile
···
### Run stage
FROM debian:bookworm-slim AS run
-
RUN apt-get update && apt-get install -y dumb-init runit ca-certificates && rm -rf /var/lib/apt/lists/*
+
RUN apt-get update && apt-get install -y dumb-init runit ca-certificates curl && rm -rf /var/lib/apt/lists/*
ENTRYPOINT ["dumb-init", "--"]
WORKDIR /
+37 -1
Makefile
···
GIT_COMMIT := $(shell git rev-parse --short=9 HEAD)
VERSION := $(if $(GIT_TAG),$(GIT_TAG),dev-$(GIT_COMMIT))
+
# Build output directory
+
BUILD_DIR := dist
+
+
# Platforms to build for
+
PLATFORMS := \
+
linux/amd64 \
+
linux/arm64 \
+
linux/arm \
+
darwin/amd64 \
+
darwin/arm64 \
+
windows/amd64 \
+
windows/arm64 \
+
freebsd/amd64 \
+
freebsd/arm64 \
+
openbsd/amd64 \
+
openbsd/arm64
+
.PHONY: help
help: ## Print info about all commands
@echo "Commands:"
···
build: ## Build all executables
go build -ldflags "-X main.Version=$(VERSION)" -o cocoon ./cmd/cocoon
+
.PHONY: build-release
+
build-all: ## Build binaries for all architectures
+
@echo "Building for all architectures..."
+
@mkdir -p $(BUILD_DIR)
+
@$(foreach platform,$(PLATFORMS), \
+
$(eval OS := $(word 1,$(subst /, ,$(platform)))) \
+
$(eval ARCH := $(word 2,$(subst /, ,$(platform)))) \
+
$(eval EXT := $(if $(filter windows,$(OS)),.exe,)) \
+
$(eval OUTPUT := $(BUILD_DIR)/cocoon-$(VERSION)-$(OS)-$(ARCH)$(EXT)) \
+
echo "Building $(OS)/$(ARCH)..."; \
+
GOOS=$(OS) GOARCH=$(ARCH) go build -ldflags "-X main.Version=$(VERSION)" -o $(OUTPUT) ./cmd/cocoon && \
+
echo " โœ“ $(OUTPUT)" || echo " โœ— Failed: $(OS)/$(ARCH)"; \
+
)
+
@echo "Done! Binaries are in $(BUILD_DIR)/"
+
+
.PHONY: clean-dist
+
clean-dist: ## Remove all built binaries
+
rm -rf $(BUILD_DIR)
+
.PHONY: run
run:
go build -ldflags "-X main.Version=dev-local" -o cocoon ./cmd/cocoon && ./cocoon run
···
.PHONY: docker-build
docker-build:
-
docker build -t cocoon .
+
docker build -t cocoon .
+59 -6
README.md
···
docker-compose up -d
```
+
**For PostgreSQL deployment:**
+
```bash
+
# Add POSTGRES_PASSWORD to your .env file first!
+
docker-compose -f docker-compose.postgres.yaml up -d
+
```
+
5. **Get your invite code**
On first run, an invite code is automatically created. View it with:
···
### Optional Configuration
+
#### Database Configuration
+
+
By default, Cocoon uses SQLite which requires no additional setup. For production deployments with higher traffic, you can use PostgreSQL:
+
+
```bash
+
# Database type: sqlite (default) or postgres
+
COCOON_DB_TYPE="postgres"
+
+
# PostgreSQL connection string (required if db-type is postgres)
+
# Format: postgres://user:password@host:port/database?sslmode=disable
+
COCOON_DATABASE_URL="postgres://cocoon:password@localhost:5432/cocoon?sslmode=disable"
+
+
# Or use the standard DATABASE_URL environment variable
+
DATABASE_URL="postgres://cocoon:password@localhost:5432/cocoon?sslmode=disable"
+
```
+
+
For SQLite (default):
+
```bash
+
COCOON_DB_TYPE="sqlite"
+
COCOON_DB_NAME="/data/cocoon/cocoon.db"
+
```
+
+
> **Note**: When using PostgreSQL, database backups to S3 are not handled by Cocoon. Use `pg_dump` or your database provider's backup solution instead.
+
#### SMTP Email Settings
```bash
COCOON_SMTP_USER="your-smtp-username"
···
```
#### S3 Storage
+
+
Cocoon supports S3-compatible storage for both database backups (SQLite only) and blob storage (images, videos, etc.):
+
```bash
+
# Enable S3 backups (SQLite databases only - hourly backups)
COCOON_S3_BACKUPS_ENABLED=true
+
+
# Enable S3 for blob storage (images, videos, etc.)
+
# When enabled, blobs are stored in S3 instead of the database
COCOON_S3_BLOBSTORE_ENABLED=true
+
+
# S3 configuration (works with AWS S3, MinIO, Cloudflare R2, etc.)
COCOON_S3_REGION="us-east-1"
COCOON_S3_BUCKET="your-bucket"
COCOON_S3_ENDPOINT="https://s3.amazonaws.com"
COCOON_S3_ACCESS_KEY="your-access-key"
COCOON_S3_SECRET_KEY="your-secret-key"
+
+
# Optional: CDN/public URL for blob redirects
+
# When set, com.atproto.sync.getBlob redirects to this URL instead of proxying
+
COCOON_S3_CDN_URL="https://cdn.example.com"
```
+
**Blob Storage Options:**
+
- `COCOON_S3_BLOBSTORE_ENABLED=false` (default): Blobs stored in the database
+
- `COCOON_S3_BLOBSTORE_ENABLED=true`: Blobs stored in S3 bucket under `blobs/{did}/{cid}`
+
+
**Blob Serving Options:**
+
- Without `COCOON_S3_CDN_URL`: Blobs are proxied through the PDS server
+
- With `COCOON_S3_CDN_URL`: `getBlob` returns a 302 redirect to `{CDN_URL}/blobs/{did}/{cid}`
+
+
> **Tip**: For Cloudflare R2, you can use the public bucket URL as the CDN URL. For AWS S3, you can use CloudFront or the S3 bucket URL directly if public access is enabled.
+
### Management Commands
Create an invite code:
···
- [x] `com.atproto.repo.getRecord`
- [x] `com.atproto.repo.importRepo` (Works "okay". Use with extreme caution.)
- [x] `com.atproto.repo.listRecords`
-
- [ ] `com.atproto.repo.listMissingBlobs`
+
- [x] `com.atproto.repo.listMissingBlobs`
### Server
···
- [x] `com.atproto.server.createInviteCode`
- [x] `com.atproto.server.createInviteCodes`
- [x] `com.atproto.server.deactivateAccount`
-
- [ ] `com.atproto.server.deleteAccount`
+
- [x] `com.atproto.server.deleteAccount`
- [x] `com.atproto.server.deleteSession`
- [x] `com.atproto.server.describeServer`
- [ ] `com.atproto.server.getAccountInviteCodes`
-
- [ ] `com.atproto.server.getServiceAuth`
+
- [x] `com.atproto.server.getServiceAuth`
- ~~[ ] `com.atproto.server.listAppPasswords`~~ - not going to add app passwords
- [x] `com.atproto.server.refreshSession`
-
- [ ] `com.atproto.server.requestAccountDelete`
+
- [x] `com.atproto.server.requestAccountDelete`
- [x] `com.atproto.server.requestEmailConfirmation`
- [x] `com.atproto.server.requestEmailUpdate`
- [x] `com.atproto.server.requestPasswordReset`
-
- [ ] `com.atproto.server.reserveSigningKey`
+
- [x] `com.atproto.server.reserveSigningKey`
- [x] `com.atproto.server.resetPassword`
- ~~[] `com.atproto.server.revokeAppPassword`~~ - not going to add app passwords
- [x] `com.atproto.server.updateEmail`
···
### Other
-
- [ ] `com.atproto.label.queryLabels`
+
- [x] `com.atproto.label.queryLabels`
- [x] `com.atproto.moderation.createReport` (Note: this should be handled by proxying, not actually implemented in the PDS)
- [x] `app.bsky.actor.getPreferences`
- [x] `app.bsky.actor.putPreferences`
+52 -4
cmd/cocoon/main.go
···
"github.com/lestrrat-go/jwx/v2/jwk"
"github.com/urfave/cli/v2"
"golang.org/x/crypto/bcrypt"
+
"gorm.io/driver/postgres"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)
···
EnvVars: []string{"COCOON_DB_NAME"},
},
&cli.StringFlag{
+
Name: "db-type",
+
Value: "sqlite",
+
Usage: "Database type: sqlite or postgres",
+
EnvVars: []string{"COCOON_DB_TYPE"},
+
},
+
&cli.StringFlag{
+
Name: "database-url",
+
Aliases: []string{"db-url"},
+
Usage: "PostgreSQL connection string (required if db-type is postgres)",
+
EnvVars: []string{"COCOON_DATABASE_URL", "DATABASE_URL"},
+
},
+
&cli.StringFlag{
Name: "did",
EnvVars: []string{"COCOON_DID"},
},
···
Name: "admin-password",
EnvVars: []string{"COCOON_ADMIN_PASSWORD"},
},
+
&cli.BoolFlag{
+
Name: "require-invite",
+
EnvVars: []string{"COCOON_REQUIRE_INVITE"},
+
Value: true,
+
},
&cli.StringFlag{
Name: "smtp-user",
EnvVars: []string{"COCOON_SMTP_USER"},
···
EnvVars: []string{"COCOON_S3_SECRET_KEY"},
},
&cli.StringFlag{
+
Name: "s3-cdn-url",
+
EnvVars: []string{"COCOON_S3_CDN_URL"},
+
Usage: "Public URL for S3 blob redirects (e.g., https://cdn.example.com). When set, getBlob redirects to this URL instead of proxying.",
+
},
+
&cli.StringFlag{
Name: "session-secret",
EnvVars: []string{"COCOON_SESSION_SECRET"},
},
···
s, err := server.New(&server.Args{
Addr: cmd.String("addr"),
DbName: cmd.String("db-name"),
+
DbType: cmd.String("db-type"),
+
DatabaseURL: cmd.String("database-url"),
Did: cmd.String("did"),
Hostname: cmd.String("hostname"),
RotationKeyPath: cmd.String("rotation-key-path"),
···
Version: Version,
Relays: cmd.StringSlice("relays"),
AdminPassword: cmd.String("admin-password"),
+
RequireInvite: cmd.Bool("require-invite"),
SmtpUser: cmd.String("smtp-user"),
SmtpPass: cmd.String("smtp-pass"),
SmtpHost: cmd.String("smtp-host"),
···
Endpoint: cmd.String("s3-endpoint"),
AccessKey: cmd.String("s3-access-key"),
SecretKey: cmd.String("s3-secret-key"),
+
CDNUrl: cmd.String("s3-cdn-url"),
},
SessionSecret: cmd.String("session-secret"),
BlockstoreVariant: server.MustReturnBlockstoreVariant(cmd.String("blockstore-variant")),
···
},
},
Action: func(cmd *cli.Context) error {
-
db, err := newDb()
+
db, err := newDb(cmd)
if err != nil {
return err
}
···
},
},
Action: func(cmd *cli.Context) error {
-
db, err := newDb()
+
db, err := newDb(cmd)
if err != nil {
return err
}
···
},
}
-
func newDb() (*gorm.DB, error) {
-
return gorm.Open(sqlite.Open("cocoon.db"), &gorm.Config{})
+
func newDb(cmd *cli.Context) (*gorm.DB, error) {
+
dbType := cmd.String("db-type")
+
if dbType == "" {
+
dbType = "sqlite"
+
}
+
+
switch dbType {
+
case "postgres":
+
databaseURL := cmd.String("database-url")
+
if databaseURL == "" {
+
databaseURL = cmd.String("database-url")
+
}
+
if databaseURL == "" {
+
return nil, fmt.Errorf("COCOON_DATABASE_URL or DATABASE_URL must be set when using postgres")
+
}
+
return gorm.Open(postgres.Open(databaseURL), &gorm.Config{})
+
default:
+
dbName := cmd.String("db-name")
+
if dbName == "" {
+
dbName = "cocoon.db"
+
}
+
return gorm.Open(sqlite.Open(dbName), &gorm.Config{})
+
}
}
+158
docker-compose.postgres.yaml
···
+
# Docker Compose with PostgreSQL
+
#
+
# Usage:
+
# docker-compose -f docker-compose.postgres.yaml up -d
+
#
+
# This file extends the base docker-compose.yaml with a PostgreSQL database.
+
# Set the following in your .env file:
+
# COCOON_DB_TYPE=postgres
+
# POSTGRES_PASSWORD=your-secure-password
+
+
version: '3.8'
+
+
services:
+
postgres:
+
image: postgres:16-alpine
+
container_name: cocoon-postgres
+
environment:
+
POSTGRES_USER: cocoon
+
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required}
+
POSTGRES_DB: cocoon
+
volumes:
+
- postgres_data:/var/lib/postgresql/data
+
healthcheck:
+
test: ["CMD-SHELL", "pg_isready -U cocoon -d cocoon"]
+
interval: 10s
+
timeout: 5s
+
retries: 5
+
restart: unless-stopped
+
+
init-keys:
+
build:
+
context: .
+
dockerfile: Dockerfile
+
image: ghcr.io/haileyok/cocoon:latest
+
container_name: cocoon-init-keys
+
volumes:
+
- ./keys:/keys
+
- ./data:/data/cocoon
+
- ./init-keys.sh:/init-keys.sh:ro
+
environment:
+
COCOON_DID: ${COCOON_DID}
+
COCOON_HOSTNAME: ${COCOON_HOSTNAME}
+
COCOON_ROTATION_KEY_PATH: /keys/rotation.key
+
COCOON_JWK_PATH: /keys/jwk.key
+
COCOON_CONTACT_EMAIL: ${COCOON_CONTACT_EMAIL}
+
COCOON_RELAYS: ${COCOON_RELAYS:-https://bsky.network}
+
COCOON_ADMIN_PASSWORD: ${COCOON_ADMIN_PASSWORD}
+
entrypoint: ["/bin/sh", "/init-keys.sh"]
+
restart: "no"
+
+
cocoon:
+
build:
+
context: .
+
dockerfile: Dockerfile
+
image: ghcr.io/haileyok/cocoon:latest
+
container_name: cocoon-pds
+
depends_on:
+
init-keys:
+
condition: service_completed_successfully
+
postgres:
+
condition: service_healthy
+
ports:
+
- "8080:8080"
+
volumes:
+
- ./data:/data/cocoon
+
- ./keys/rotation.key:/keys/rotation.key:ro
+
- ./keys/jwk.key:/keys/jwk.key:ro
+
environment:
+
# Required settings
+
COCOON_DID: ${COCOON_DID}
+
COCOON_HOSTNAME: ${COCOON_HOSTNAME}
+
COCOON_ROTATION_KEY_PATH: /keys/rotation.key
+
COCOON_JWK_PATH: /keys/jwk.key
+
COCOON_CONTACT_EMAIL: ${COCOON_CONTACT_EMAIL}
+
COCOON_RELAYS: ${COCOON_RELAYS:-https://bsky.network}
+
COCOON_ADMIN_PASSWORD: ${COCOON_ADMIN_PASSWORD}
+
COCOON_SESSION_SECRET: ${COCOON_SESSION_SECRET}
+
+
# Database configuration - PostgreSQL
+
COCOON_ADDR: ":8080"
+
COCOON_DB_TYPE: postgres
+
COCOON_DATABASE_URL: postgres://cocoon:${POSTGRES_PASSWORD}@postgres:5432/cocoon?sslmode=disable
+
COCOON_BLOCKSTORE_VARIANT: ${COCOON_BLOCKSTORE_VARIANT:-sqlite}
+
+
# Optional: SMTP settings for email
+
COCOON_SMTP_USER: ${COCOON_SMTP_USER:-}
+
COCOON_SMTP_PASS: ${COCOON_SMTP_PASS:-}
+
COCOON_SMTP_HOST: ${COCOON_SMTP_HOST:-}
+
COCOON_SMTP_PORT: ${COCOON_SMTP_PORT:-}
+
COCOON_SMTP_EMAIL: ${COCOON_SMTP_EMAIL:-}
+
COCOON_SMTP_NAME: ${COCOON_SMTP_NAME:-}
+
+
# Optional: S3 configuration
+
COCOON_S3_BACKUPS_ENABLED: ${COCOON_S3_BACKUPS_ENABLED:-false}
+
COCOON_S3_BLOBSTORE_ENABLED: ${COCOON_S3_BLOBSTORE_ENABLED:-false}
+
COCOON_S3_REGION: ${COCOON_S3_REGION:-}
+
COCOON_S3_BUCKET: ${COCOON_S3_BUCKET:-}
+
COCOON_S3_ENDPOINT: ${COCOON_S3_ENDPOINT:-}
+
COCOON_S3_ACCESS_KEY: ${COCOON_S3_ACCESS_KEY:-}
+
COCOON_S3_SECRET_KEY: ${COCOON_S3_SECRET_KEY:-}
+
+
# Optional: Fallback proxy
+
COCOON_FALLBACK_PROXY: ${COCOON_FALLBACK_PROXY:-}
+
restart: unless-stopped
+
healthcheck:
+
test: ["CMD", "curl", "-f", "http://localhost:8080/xrpc/_health"]
+
interval: 30s
+
timeout: 10s
+
retries: 3
+
start_period: 40s
+
+
create-invite:
+
build:
+
context: .
+
dockerfile: Dockerfile
+
image: ghcr.io/haileyok/cocoon:latest
+
container_name: cocoon-create-invite
+
volumes:
+
- ./keys:/keys
+
- ./create-initial-invite.sh:/create-initial-invite.sh:ro
+
environment:
+
COCOON_DID: ${COCOON_DID}
+
COCOON_HOSTNAME: ${COCOON_HOSTNAME}
+
COCOON_ROTATION_KEY_PATH: /keys/rotation.key
+
COCOON_JWK_PATH: /keys/jwk.key
+
COCOON_CONTACT_EMAIL: ${COCOON_CONTACT_EMAIL}
+
COCOON_RELAYS: ${COCOON_RELAYS:-https://bsky.network}
+
COCOON_ADMIN_PASSWORD: ${COCOON_ADMIN_PASSWORD}
+
COCOON_DB_TYPE: postgres
+
COCOON_DATABASE_URL: postgres://cocoon:${POSTGRES_PASSWORD}@postgres:5432/cocoon?sslmode=disable
+
depends_on:
+
cocoon:
+
condition: service_healthy
+
entrypoint: ["/bin/sh", "/create-initial-invite.sh"]
+
restart: "no"
+
+
caddy:
+
image: caddy:2-alpine
+
container_name: cocoon-caddy
+
ports:
+
- "80:80"
+
- "443:443"
+
volumes:
+
- ./Caddyfile.postgres:/etc/caddy/Caddyfile:ro
+
- caddy_data:/data
+
- caddy_config:/config
+
restart: unless-stopped
+
environment:
+
COCOON_HOSTNAME: ${COCOON_HOSTNAME}
+
CADDY_ACME_EMAIL: ${COCOON_CONTACT_EMAIL:-}
+
+
volumes:
+
postgres_data:
+
driver: local
+
caddy_data:
+
driver: local
+
caddy_config:
+
driver: local
+7 -2
docker-compose.yaml
···
# Server configuration
COCOON_ADDR: ":8080"
-
COCOON_DB_NAME: /data/cocoon/cocoon.db
+
COCOON_DB_TYPE: ${COCOON_DB_TYPE:-sqlite}
+
COCOON_DB_NAME: ${COCOON_DB_NAME:-/data/cocoon/cocoon.db}
+
COCOON_DATABASE_URL: ${COCOON_DATABASE_URL:-}
COCOON_BLOCKSTORE_VARIANT: ${COCOON_BLOCKSTORE_VARIANT:-sqlite}
# Optional: SMTP settings for email
···
COCOON_S3_ENDPOINT: ${COCOON_S3_ENDPOINT:-}
COCOON_S3_ACCESS_KEY: ${COCOON_S3_ACCESS_KEY:-}
COCOON_S3_SECRET_KEY: ${COCOON_S3_SECRET_KEY:-}
+
COCOON_S3_CDN_URL: ${COCOON_S3_CDN_URL:-}
# Optional: Fallback proxy
COCOON_FALLBACK_PROXY: ${COCOON_FALLBACK_PROXY:-}
···
COCOON_CONTACT_EMAIL: ${COCOON_CONTACT_EMAIL}
COCOON_RELAYS: ${COCOON_RELAYS:-https://bsky.network}
COCOON_ADMIN_PASSWORD: ${COCOON_ADMIN_PASSWORD}
-
COCOON_DB_NAME: /data/cocoon/cocoon.db
+
COCOON_DB_TYPE: ${COCOON_DB_TYPE:-sqlite}
+
COCOON_DB_NAME: ${COCOON_DB_NAME:-/data/cocoon/cocoon.db}
+
COCOON_DATABASE_URL: ${COCOON_DATABASE_URL:-}
depends_on:
- init-keys
entrypoint: ["/bin/sh", "/create-initial-invite.sh"]
+9
models/models.go
···
PasswordResetCodeExpiresAt *time.Time
PlcOperationCode *string
PlcOperationCodeExpiresAt *time.Time
+
AccountDeleteCode *string
+
AccountDeleteCodeExpiresAt *time.Time
Password string
SigningKey []byte
Rev string
···
Idx int `gorm:"primaryKey"`
Data []byte
}
+
+
type ReservedKey struct {
+
KeyDid string `gorm:"primaryKey"`
+
Did *string `gorm:"index"`
+
PrivateKey []byte
+
CreatedAt time.Time `gorm:"index"`
+
}
+3 -2
oauth/dpop/nonce.go
···
}
func (n *Nonce) Check(nonce string) bool {
-
n.mu.RLock()
-
defer n.mu.RUnlock()
+
n.mu.Lock()
+
defer n.mu.Unlock()
+
n.rotate()
return nonce == n.prev || nonce == n.curr || nonce == n.next
}
+34
server/handle_label_query_labels.go
···
+
package server
+
+
import (
+
"github.com/labstack/echo/v4"
+
)
+
+
type Label struct {
+
Ver *int `json:"ver,omitempty"`
+
Src string `json:"src"`
+
Uri string `json:"uri"`
+
Cid *string `json:"cid,omitempty"`
+
Val string `json:"val"`
+
Neg *bool `json:"neg,omitempty"`
+
Cts string `json:"cts"`
+
Exp *string `json:"exp,omitempty"`
+
Sig []byte `json:"sig,omitempty"`
+
}
+
+
type ComAtprotoLabelQueryLabelsResponse struct {
+
Cursor *string `json:"cursor,omitempty"`
+
Labels []Label `json:"labels"`
+
}
+
+
func (s *Server) handleLabelQueryLabels(e echo.Context) error {
+
svc := e.Request().Header.Get("atproto-proxy")
+
if svc != "" || s.config.FallbackProxy != "" {
+
return s.handleProxy(e)
+
}
+
+
return e.JSON(200, ComAtprotoLabelQueryLabelsResponse{
+
Cursor: nil,
+
Labels: []Label{},
+
})
+
}
+5
server/handle_oauth_par.go
···
dpopProof, err := s.oauthProvider.DpopManager.CheckProof(e.Request().Method, "https://"+s.config.Hostname+e.Request().URL.String(), e.Request().Header, nil)
if err != nil {
if errors.Is(err, dpop.ErrUseDpopNonce) {
+
nonce := s.oauthProvider.NextNonce()
+
if nonce != "" {
+
e.Response().Header().Set("DPoP-Nonce", nonce)
+
e.Response().Header().Add("access-control-expose-headers", "DPoP-Nonce")
+
}
return e.JSON(400, map[string]string{
"error": "use_dpop_nonce",
})
+5
server/handle_oauth_token.go
···
proof, err := s.oauthProvider.DpopManager.CheckProof(e.Request().Method, e.Request().URL.String(), e.Request().Header, nil)
if err != nil {
if errors.Is(err, dpop.ErrUseDpopNonce) {
+
nonce := s.oauthProvider.NextNonce()
+
if nonce != "" {
+
e.Response().Header().Set("DPoP-Nonce", nonce)
+
e.Response().Header().Add("access-control-expose-headers", "DPoP-Nonce")
+
}
return e.JSON(400, map[string]string{
"error": "use_dpop_nonce",
})
+112
server/handle_repo_list_missing_blobs.go
···
+
package server
+
+
import (
+
"fmt"
+
"strconv"
+
+
"github.com/bluesky-social/indigo/atproto/atdata"
+
"github.com/haileyok/cocoon/internal/helpers"
+
"github.com/haileyok/cocoon/models"
+
"github.com/ipfs/go-cid"
+
"github.com/labstack/echo/v4"
+
)
+
+
type ComAtprotoRepoListMissingBlobsResponse struct {
+
Cursor *string `json:"cursor,omitempty"`
+
Blobs []ComAtprotoRepoListMissingBlobsRecordBlob `json:"blobs"`
+
}
+
+
type ComAtprotoRepoListMissingBlobsRecordBlob struct {
+
Cid string `json:"cid"`
+
RecordUri string `json:"recordUri"`
+
}
+
+
func (s *Server) handleListMissingBlobs(e echo.Context) error {
+
urepo := e.Get("repo").(*models.RepoActor)
+
+
limitStr := e.QueryParam("limit")
+
cursor := e.QueryParam("cursor")
+
+
limit := 500
+
if limitStr != "" {
+
if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 1000 {
+
limit = l
+
}
+
}
+
+
var records []models.Record
+
if err := s.db.Raw("SELECT * FROM records WHERE did = ?", nil, urepo.Repo.Did).Scan(&records).Error; err != nil {
+
s.logger.Error("failed to get records for listMissingBlobs", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
type blobRef struct {
+
cid cid.Cid
+
recordUri string
+
}
+
var allBlobRefs []blobRef
+
+
for _, rec := range records {
+
blobs := getBlobsFromRecord(rec.Value)
+
recordUri := fmt.Sprintf("at://%s/%s/%s", urepo.Repo.Did, rec.Nsid, rec.Rkey)
+
for _, b := range blobs {
+
allBlobRefs = append(allBlobRefs, blobRef{cid: cid.Cid(b.Ref), recordUri: recordUri})
+
}
+
}
+
+
missingBlobs := make([]ComAtprotoRepoListMissingBlobsRecordBlob, 0)
+
seenCids := make(map[string]bool)
+
+
for _, ref := range allBlobRefs {
+
cidStr := ref.cid.String()
+
+
if seenCids[cidStr] {
+
continue
+
}
+
+
if cursor != "" && cidStr <= cursor {
+
continue
+
}
+
+
var count int64
+
if err := s.db.Raw("SELECT COUNT(*) FROM blobs WHERE did = ? AND cid = ?", nil, urepo.Repo.Did, ref.cid.Bytes()).Scan(&count).Error; err != nil {
+
continue
+
}
+
+
if count == 0 {
+
missingBlobs = append(missingBlobs, ComAtprotoRepoListMissingBlobsRecordBlob{
+
Cid: cidStr,
+
RecordUri: ref.recordUri,
+
})
+
seenCids[cidStr] = true
+
+
if len(missingBlobs) >= limit {
+
break
+
}
+
}
+
}
+
+
var nextCursor *string
+
if len(missingBlobs) > 0 && len(missingBlobs) >= limit {
+
lastCid := missingBlobs[len(missingBlobs)-1].Cid
+
nextCursor = &lastCid
+
}
+
+
return e.JSON(200, ComAtprotoRepoListMissingBlobsResponse{
+
Cursor: nextCursor,
+
Blobs: missingBlobs,
+
})
+
}
+
+
func getBlobsFromRecord(data []byte) []atdata.Blob {
+
if len(data) == 0 {
+
return nil
+
}
+
+
decoded, err := atdata.UnmarshalCBOR(data)
+
if err != nil {
+
return nil
+
}
+
+
return atdata.ExtractBlobs(decoded)
+
}
+47 -15
server/handle_server_create_account.go
···
Handle string `json:"handle" validate:"required,atproto-handle"`
Did *string `json:"did" validate:"atproto-did"`
Password string `json:"password" validate:"required"`
-
InviteCode string `json:"inviteCode" validate:"required"`
+
InviteCode string `json:"inviteCode" validate:"omitempty"`
}
type ComAtprotoServerCreateAccountResponse struct {
···
}
var ic models.InviteCode
-
if err := s.db.Raw("SELECT * FROM invite_codes WHERE code = ?", nil, request.InviteCode).Scan(&ic).Error; err != nil {
-
if err == gorm.ErrRecordNotFound {
+
if s.config.RequireInvite {
+
if strings.TrimSpace(request.InviteCode) == "" {
return helpers.InputError(e, to.StringPtr("InvalidInviteCode"))
}
-
s.logger.Error("error getting invite code from db", "error", err)
-
return helpers.ServerError(e, nil)
-
}
+
+
if err := s.db.Raw("SELECT * FROM invite_codes WHERE code = ?", nil, request.InviteCode).Scan(&ic).Error; err != nil {
+
if err == gorm.ErrRecordNotFound {
+
return helpers.InputError(e, to.StringPtr("InvalidInviteCode"))
+
}
+
s.logger.Error("error getting invite code from db", "error", err)
+
return helpers.ServerError(e, nil)
+
}
-
if ic.RemainingUseCount < 1 {
-
return helpers.InputError(e, to.StringPtr("InvalidInviteCode"))
+
if ic.RemainingUseCount < 1 {
+
return helpers.InputError(e, to.StringPtr("InvalidInviteCode"))
+
}
}
// see if the email is already taken
···
// TODO: unsupported domains
-
k, err := atcrypto.GeneratePrivateKeyK256()
-
if err != nil {
-
s.logger.Error("error creating signing key", "endpoint", "com.atproto.server.createAccount", "error", err)
-
return helpers.ServerError(e, nil)
+
var k *atcrypto.PrivateKeyK256
+
+
if signupDid != "" {
+
reservedKey, err := s.getReservedKey(signupDid)
+
if err != nil {
+
s.logger.Error("error looking up reserved key", "error", err)
+
}
+
if reservedKey != nil {
+
k, err = atcrypto.ParsePrivateBytesK256(reservedKey.PrivateKey)
+
if err != nil {
+
s.logger.Error("error parsing reserved key", "error", err)
+
k = nil
+
} else {
+
defer func() {
+
if delErr := s.deleteReservedKey(reservedKey.KeyDid, reservedKey.Did); delErr != nil {
+
s.logger.Error("error deleting reserved key", "error", delErr)
+
}
+
}()
+
}
+
}
+
}
+
+
if k == nil {
+
k, err = atcrypto.GeneratePrivateKeyK256()
+
if err != nil {
+
s.logger.Error("error creating signing key", "endpoint", "com.atproto.server.createAccount", "error", err)
+
return helpers.ServerError(e, nil)
+
}
}
if signupDid == "" {
···
})
}
-
if err := s.db.Raw("UPDATE invite_codes SET remaining_use_count = remaining_use_count - 1 WHERE code = ?", nil, request.InviteCode).Scan(&ic).Error; err != nil {
-
s.logger.Error("error decrementing use count", "error", err)
-
return helpers.ServerError(e, nil)
+
if s.config.RequireInvite {
+
if err := s.db.Raw("UPDATE invite_codes SET remaining_use_count = remaining_use_count - 1 WHERE code = ?", nil, request.InviteCode).Scan(&ic).Error; err != nil {
+
s.logger.Error("error decrementing use count", "error", err)
+
return helpers.ServerError(e, nil)
+
}
}
sess, err := s.createSession(&urepo)
+145
server/handle_server_delete_account.go
···
+
package server
+
+
import (
+
"context"
+
"time"
+
+
"github.com/Azure/go-autorest/autorest/to"
+
"github.com/bluesky-social/indigo/api/atproto"
+
"github.com/bluesky-social/indigo/events"
+
"github.com/bluesky-social/indigo/util"
+
"github.com/haileyok/cocoon/internal/helpers"
+
"github.com/labstack/echo/v4"
+
"golang.org/x/crypto/bcrypt"
+
)
+
+
type ComAtprotoServerDeleteAccountRequest struct {
+
Did string `json:"did" validate:"required"`
+
Password string `json:"password" validate:"required"`
+
Token string `json:"token" validate:"required"`
+
}
+
+
func (s *Server) handleServerDeleteAccount(e echo.Context) error {
+
var req ComAtprotoServerDeleteAccountRequest
+
if err := e.Bind(&req); err != nil {
+
s.logger.Error("error binding", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := e.Validate(&req); err != nil {
+
s.logger.Error("error validating", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
urepo, err := s.getRepoActorByDid(req.Did)
+
if err != nil {
+
s.logger.Error("error getting repo", "error", err)
+
return echo.NewHTTPError(400, "account not found")
+
}
+
+
if err := bcrypt.CompareHashAndPassword([]byte(urepo.Repo.Password), []byte(req.Password)); err != nil {
+
s.logger.Error("password mismatch", "error", err)
+
return echo.NewHTTPError(401, "Invalid did or password")
+
}
+
+
if urepo.Repo.AccountDeleteCode == nil || urepo.Repo.AccountDeleteCodeExpiresAt == nil {
+
s.logger.Error("no deletion token found for account")
+
return echo.NewHTTPError(400, map[string]interface{}{
+
"error": "InvalidToken",
+
"message": "Token is invalid",
+
})
+
}
+
+
if *urepo.Repo.AccountDeleteCode != req.Token {
+
s.logger.Error("deletion token mismatch")
+
return echo.NewHTTPError(400, map[string]interface{}{
+
"error": "InvalidToken",
+
"message": "Token is invalid",
+
})
+
}
+
+
if time.Now().UTC().After(*urepo.Repo.AccountDeleteCodeExpiresAt) {
+
s.logger.Error("deletion token expired")
+
return echo.NewHTTPError(400, map[string]interface{}{
+
"error": "ExpiredToken",
+
"message": "Token is expired",
+
})
+
}
+
+
tx := s.db.BeginDangerously()
+
if tx.Error != nil {
+
s.logger.Error("error starting transaction", "error", tx.Error)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM blocks WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting blocks", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM records WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting records", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM blobs WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting blobs", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM tokens WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting tokens", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM refresh_tokens WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting refresh tokens", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM reserved_keys WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting reserved keys", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM invite_codes WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting invite codes", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM actors WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting actor", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM repos WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting repo", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Commit().Error; err != nil {
+
s.logger.Error("error committing transaction", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
s.evtman.AddEvent(context.TODO(), &events.XRPCStreamEvent{
+
RepoAccount: &atproto.SyncSubscribeRepos_Account{
+
Active: false,
+
Did: req.Did,
+
Status: to.StringPtr("deleted"),
+
Seq: time.Now().UnixMicro(),
+
Time: time.Now().Format(util.ISO8601),
+
},
+
})
+
+
return e.NoContent(200)
+
}
+1 -1
server/handle_server_describe_server.go
···
func (s *Server) handleDescribeServer(e echo.Context) error {
return e.JSON(200, ComAtprotoServerDescribeServerResponse{
-
InviteCodeRequired: true,
+
InviteCodeRequired: s.config.RequireInvite,
PhoneVerificationRequired: false,
AvailableUserDomains: []string{"." + s.config.Hostname}, // TODO: more
Links: ComAtprotoServerDescribeServerResponseLinks{
+10 -3
server/handle_server_get_service_auth.go
···
Aud string `query:"aud" validate:"required,atproto-did"`
// exp should be a float, as some clients will send a non-integer expiration
Exp float64 `query:"exp"`
-
Lxm string `query:"lxm" validate:"required,atproto-nsid"`
+
Lxm string `query:"lxm"`
}
func (s *Server) handleServerGetServiceAuth(e echo.Context) error {
···
return helpers.InputError(e, to.StringPtr("may not generate auth tokens recursively"))
}
-
maxExp := now + (60 * 30)
+
var maxExp int64
+
if req.Lxm != "" {
+
maxExp = now + (60 * 60)
+
} else {
+
maxExp = now + 60
+
}
if exp > maxExp {
return helpers.InputError(e, to.StringPtr("expiration too big. smoller please"))
}
···
payload := map[string]any{
"iss": repo.Repo.Did,
"aud": req.Aud,
-
"lxm": req.Lxm,
"jti": uuid.NewString(),
"exp": exp,
"iat": now,
+
}
+
if req.Lxm != "" {
+
payload["lxm"] = req.Lxm
}
pj, err := json.Marshal(payload)
if err != nil {
+49
server/handle_server_request_account_delete.go
···
+
package server
+
+
import (
+
"fmt"
+
"time"
+
+
"github.com/haileyok/cocoon/internal/helpers"
+
"github.com/haileyok/cocoon/models"
+
"github.com/labstack/echo/v4"
+
)
+
+
func (s *Server) handleServerRequestAccountDelete(e echo.Context) error {
+
urepo := e.Get("repo").(*models.RepoActor)
+
+
token := fmt.Sprintf("%s-%s", helpers.RandomVarchar(5), helpers.RandomVarchar(5))
+
expiresAt := time.Now().UTC().Add(15 * time.Minute)
+
+
if err := s.db.Exec("UPDATE repos SET account_delete_code = ?, account_delete_code_expires_at = ? WHERE did = ?", nil, token, expiresAt, urepo.Repo.Did).Error; err != nil {
+
s.logger.Error("error setting deletion token", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if urepo.Email != "" {
+
if err := s.sendAccountDeleteEmail(urepo.Email, urepo.Actor.Handle, token); err != nil {
+
s.logger.Error("error sending account deletion email", "error", err)
+
}
+
}
+
+
return e.NoContent(200)
+
}
+
+
func (s *Server) sendAccountDeleteEmail(email, handle, token string) error {
+
if s.mail == nil {
+
return nil
+
}
+
+
s.mailLk.Lock()
+
defer s.mailLk.Unlock()
+
+
s.mail.To(email)
+
s.mail.Subject("Account Deletion Request for " + s.config.Hostname)
+
s.mail.Plain().Set(fmt.Sprintf("Hello %s. Your account deletion code is %s. This code will expire in fifteen minutes. If you did not request this, please ignore this email.", handle, token))
+
+
if err := s.mail.Send(); err != nil {
+
return err
+
}
+
+
return nil
+
}
+95
server/handle_server_reserve_signing_key.go
···
+
package server
+
+
import (
+
"time"
+
+
"github.com/bluesky-social/indigo/atproto/atcrypto"
+
"github.com/haileyok/cocoon/internal/helpers"
+
"github.com/haileyok/cocoon/models"
+
"github.com/labstack/echo/v4"
+
)
+
+
type ServerReserveSigningKeyRequest struct {
+
Did *string `json:"did"`
+
}
+
+
type ServerReserveSigningKeyResponse struct {
+
SigningKey string `json:"signingKey"`
+
}
+
+
func (s *Server) handleServerReserveSigningKey(e echo.Context) error {
+
var req ServerReserveSigningKeyRequest
+
if err := e.Bind(&req); err != nil {
+
s.logger.Error("could not bind reserve signing key request", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if req.Did != nil && *req.Did != "" {
+
var existing models.ReservedKey
+
if err := s.db.Raw("SELECT * FROM reserved_keys WHERE did = ?", nil, *req.Did).Scan(&existing).Error; err == nil && existing.KeyDid != "" {
+
return e.JSON(200, ServerReserveSigningKeyResponse{
+
SigningKey: existing.KeyDid,
+
})
+
}
+
}
+
+
k, err := atcrypto.GeneratePrivateKeyK256()
+
if err != nil {
+
s.logger.Error("error creating signing key", "endpoint", "com.atproto.server.reserveSigningKey", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
pubKey, err := k.PublicKey()
+
if err != nil {
+
s.logger.Error("error getting public key", "endpoint", "com.atproto.server.reserveSigningKey", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
keyDid := pubKey.DIDKey()
+
+
reservedKey := models.ReservedKey{
+
KeyDid: keyDid,
+
Did: req.Did,
+
PrivateKey: k.Bytes(),
+
CreatedAt: time.Now(),
+
}
+
+
if err := s.db.Create(&reservedKey, nil).Error; err != nil {
+
s.logger.Error("error storing reserved key", "endpoint", "com.atproto.server.reserveSigningKey", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
s.logger.Info("reserved signing key", "keyDid", keyDid, "forDid", req.Did)
+
+
return e.JSON(200, ServerReserveSigningKeyResponse{
+
SigningKey: keyDid,
+
})
+
}
+
+
func (s *Server) getReservedKey(keyDidOrDid string) (*models.ReservedKey, error) {
+
var reservedKey models.ReservedKey
+
+
if err := s.db.Raw("SELECT * FROM reserved_keys WHERE key_did = ?", nil, keyDidOrDid).Scan(&reservedKey).Error; err == nil && reservedKey.KeyDid != "" {
+
return &reservedKey, nil
+
}
+
+
if err := s.db.Raw("SELECT * FROM reserved_keys WHERE did = ?", nil, keyDidOrDid).Scan(&reservedKey).Error; err == nil && reservedKey.KeyDid != "" {
+
return &reservedKey, nil
+
}
+
+
return nil, nil
+
}
+
+
func (s *Server) deleteReservedKey(keyDid string, did *string) error {
+
if err := s.db.Exec("DELETE FROM reserved_keys WHERE key_did = ?", nil, keyDid).Error; err != nil {
+
return err
+
}
+
+
if did != nil && *did != "" {
+
if err := s.db.Exec("DELETE FROM reserved_keys WHERE did = ?", nil, *did).Error; err != nil {
+
return err
+
}
+
}
+
+
return nil
+
}
+9 -2
server/handle_sync_get_blob.go
···
buf.Write(p.Data)
}
} else if blob.Storage == "s3" {
-
if !(s.s3Config != nil && s.s3Config.BlobstoreEnabled) {
+
if !(s.s3Config != nil && s.s3Config.BlobstoreEnabled) {
s.logger.Error("s3 storage disabled")
return helpers.ServerError(e, nil)
+
}
+
+
blobKey := fmt.Sprintf("blobs/%s/%s", urepo.Repo.Did, c.String())
+
+
if s.s3Config.CDNUrl != "" {
+
redirectUrl := fmt.Sprintf("%s/%s", s.s3Config.CDNUrl, blobKey)
+
return e.Redirect(302, redirectUrl)
}
config := &aws.Config{
···
svc := s3.New(sess)
if result, err := svc.GetObject(&s3.GetObjectInput{
Bucket: aws.String(s.s3Config.Bucket),
-
Key: aws.String(fmt.Sprintf("blobs/%s/%s", urepo.Repo.Did, c.String())),
+
Key: aws.String(blobKey),
}); err != nil {
s.logger.Error("error getting blob from s3", "error", err)
return helpers.ServerError(e, nil)
+33
server/handle_well_known.go
···
import (
"fmt"
+
"strings"
"github.com/Azure/go-autorest/autorest/to"
+
"github.com/haileyok/cocoon/internal/helpers"
"github.com/labstack/echo/v4"
+
"gorm.io/gorm"
)
var (
···
},
},
})
+
}
+
+
func (s *Server) handleAtprotoDid(e echo.Context) error {
+
host := e.Request().Host
+
if host == "" {
+
return helpers.InputError(e, to.StringPtr("Invalid handle."))
+
}
+
+
host = strings.Split(host, ":")[0]
+
host = strings.ToLower(strings.TrimSpace(host))
+
+
if host == s.config.Hostname {
+
return e.String(200, s.config.Did)
+
}
+
+
suffix := "." + s.config.Hostname
+
if !strings.HasSuffix(host, suffix) {
+
return e.NoContent(404)
+
}
+
+
actor, err := s.getActorByHandle(host)
+
if err != nil {
+
if err == gorm.ErrRecordNotFound {
+
return e.NoContent(404)
+
}
+
s.logger.Error("error looking up actor by handle", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
return e.String(200, actor.Did)
}
func (s *Server) handleOauthProtectedResource(e echo.Context) error {
+9 -2
server/middleware.go
···
proof, err := s.oauthProvider.DpopManager.CheckProof(e.Request().Method, "https://"+s.config.Hostname+e.Request().URL.String(), e.Request().Header, to.StringPtr(accessToken))
if err != nil {
if errors.Is(err, dpop.ErrUseDpopNonce) {
-
return e.JSON(400, map[string]string{
+
e.Response().Header().Set("WWW-Authenticate", `DPoP error="use_dpop_nonce"`)
+
e.Response().Header().Add("access-control-expose-headers", "WWW-Authenticate")
+
return e.JSON(401, map[string]string{
"error": "use_dpop_nonce",
})
}
···
}
if time.Now().After(oauthToken.ExpiresAt) {
-
return helpers.ExpiredTokenError(e)
+
e.Response().Header().Set("WWW-Authenticate", `DPoP error="invalid_token", error_description="Token expired"`)
+
e.Response().Header().Add("access-control-expose-headers", "WWW-Authenticate")
+
return e.JSON(401, map[string]string{
+
"error": "invalid_token",
+
"error_description": "Token expired",
+
})
}
repo, err := s.getRepoActorByDid(oauthToken.Sub)
+46 -3
server/server.go
···
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
slogecho "github.com/samber/slog-echo"
+
"gorm.io/driver/postgres"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)
···
Bucket string
AccessKey string
SecretKey string
+
CDNUrl string
}
type Server struct {
···
requestCrawlMu sync.Mutex
dbName string
+
dbType string
s3Config *S3Config
}
type Args struct {
Addr string
DbName string
+
DbType string
+
DatabaseURL string
Logger *slog.Logger
Version string
Did string
···
ContactEmail string
Relays []string
AdminPassword string
+
RequireInvite bool
SmtpUser string
SmtpPass string
···
EnforcePeering bool
Relays []string
AdminPassword string
+
RequireInvite bool
SmtpEmail string
SmtpName string
BlockstoreVariant BlockstoreVariant
···
IdleTimeout: 5 * time.Minute,
}
-
gdb, err := gorm.Open(sqlite.Open(args.DbName), &gorm.Config{})
-
if err != nil {
-
return nil, err
+
dbType := args.DbType
+
if dbType == "" {
+
dbType = "sqlite"
+
}
+
+
var gdb *gorm.DB
+
var err error
+
switch dbType {
+
case "postgres":
+
if args.DatabaseURL == "" {
+
return nil, fmt.Errorf("database-url must be set when using postgres")
+
}
+
gdb, err = gorm.Open(postgres.Open(args.DatabaseURL), &gorm.Config{})
+
if err != nil {
+
return nil, fmt.Errorf("failed to connect to postgres: %w", err)
+
}
+
args.Logger.Info("connected to PostgreSQL database")
+
default:
+
gdb, err = gorm.Open(sqlite.Open(args.DbName), &gorm.Config{})
+
if err != nil {
+
return nil, fmt.Errorf("failed to open sqlite database: %w", err)
+
}
+
args.Logger.Info("connected to SQLite database", "path", args.DbName)
}
dbw := db.NewDB(gdb)
···
EnforcePeering: false,
Relays: args.Relays,
AdminPassword: args.AdminPassword,
+
RequireInvite: args.RequireInvite,
SmtpName: args.SmtpName,
SmtpEmail: args.SmtpEmail,
BlockstoreVariant: args.BlockstoreVariant,
···
passport: identity.NewPassport(h, identity.NewMemCache(10_000)),
dbName: args.DbName,
+
dbType: dbType,
s3Config: args.S3Config,
oauthProvider: provider.NewProvider(provider.Args{
···
s.echo.GET("/", s.handleRoot)
s.echo.GET("/xrpc/_health", s.handleHealth)
s.echo.GET("/.well-known/did.json", s.handleWellKnown)
+
s.echo.GET("/.well-known/atproto-did", s.handleAtprotoDid)
s.echo.GET("/.well-known/oauth-protected-resource", s.handleOauthProtectedResource)
s.echo.GET("/.well-known/oauth-authorization-server", s.handleOauthAuthorizationServer)
s.echo.GET("/robots.txt", s.handleRobots)
···
s.echo.POST("/xrpc/com.atproto.server.createAccount", s.handleCreateAccount)
s.echo.POST("/xrpc/com.atproto.server.createSession", s.handleCreateSession)
s.echo.GET("/xrpc/com.atproto.server.describeServer", s.handleDescribeServer)
+
s.echo.POST("/xrpc/com.atproto.server.reserveSigningKey", s.handleServerReserveSigningKey)
s.echo.GET("/xrpc/com.atproto.repo.describeRepo", s.handleDescribeRepo)
s.echo.GET("/xrpc/com.atproto.sync.listRepos", s.handleListRepos)
···
s.echo.GET("/xrpc/com.atproto.sync.listBlobs", s.handleSyncListBlobs)
s.echo.GET("/xrpc/com.atproto.sync.getBlob", s.handleSyncGetBlob)
+
// labels
+
s.echo.GET("/xrpc/com.atproto.label.queryLabels", s.handleLabelQueryLabels)
+
// account
s.echo.GET("/account", s.handleAccount)
s.echo.POST("/account/revoke", s.handleAccountRevoke)
···
s.echo.GET("/xrpc/com.atproto.server.checkAccountStatus", s.handleServerCheckAccountStatus, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.POST("/xrpc/com.atproto.server.deactivateAccount", s.handleServerDeactivateAccount, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.POST("/xrpc/com.atproto.server.activateAccount", s.handleServerActivateAccount, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
+
s.echo.POST("/xrpc/com.atproto.server.requestAccountDelete", s.handleServerRequestAccountDelete, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
+
s.echo.POST("/xrpc/com.atproto.server.deleteAccount", s.handleServerDeleteAccount)
// repo
+
s.echo.GET("/xrpc/com.atproto.repo.listMissingBlobs", s.handleListMissingBlobs, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.POST("/xrpc/com.atproto.repo.createRecord", s.handleCreateRecord, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.POST("/xrpc/com.atproto.repo.putRecord", s.handlePutRecord, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.POST("/xrpc/com.atproto.repo.deleteRecord", s.handleDeleteRecord, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
···
&models.Record{},
&models.Blob{},
&models.BlobPart{},
+
&models.ReservedKey{},
&provider.OauthToken{},
&provider.OauthAuthorizationRequest{},
)
···
}
func (s *Server) doBackup() {
+
if s.dbType == "postgres" {
+
s.logger.Info("skipping S3 backup - PostgreSQL backups should be handled externally (pg_dump, managed database backups, etc.)")
+
return
+
}
+
start := time.Now()
s.logger.Info("beginning backup to s3...")