An atproto PDS written in Go

Compare changes

Choose any two refs to compare.

Changed files
+3139 -690
.github
workflows
blockstore
cmd
cocoon
identity
internal
db
helpers
models
oauth
plc
recording_blockstore
server
sqlite_blockstore
+1 -1
.env.example
···
COCOON_RELAYS=https://bsky.network
# Generate with `openssl rand -hex 16`
COCOON_ADMIN_PASSWORD=
-
# openssl rand -hex 32
+
# Generate with `openssl rand -hex 32`
COCOON_SESSION_SECRET=
+116
.github/workflows/docker-image.yml
···
+
name: Docker image
+
+
on:
+
workflow_dispatch:
+
push:
+
branches:
+
- main
+
tags:
+
- 'v*'
+
+
env:
+
REGISTRY: ghcr.io
+
IMAGE_NAME: ${{ github.repository }}
+
+
jobs:
+
build-and-push-image:
+
strategy:
+
matrix:
+
include:
+
- arch: amd64
+
runner: ubuntu-latest
+
- arch: arm64
+
runner: ubuntu-24.04-arm
+
runs-on: ${{ matrix.runner }}
+
# Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job.
+
permissions:
+
contents: read
+
packages: write
+
attestations: write
+
id-token: write
+
outputs:
+
digest-amd64: ${{ matrix.arch == 'amd64' && steps.push.outputs.digest || '' }}
+
digest-arm64: ${{ matrix.arch == 'arm64' && steps.push.outputs.digest || '' }}
+
steps:
+
- name: Checkout repository
+
uses: actions/checkout@v4
+
+
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
+
- name: Log in to the Container registry
+
uses: docker/login-action@v3
+
with:
+
registry: ${{ env.REGISTRY }}
+
username: ${{ github.actor }}
+
password: ${{ secrets.GITHUB_TOKEN }}
+
+
# This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
+
- name: Extract metadata (tags, labels) for Docker
+
id: meta
+
uses: docker/metadata-action@v5
+
with:
+
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
+
tags: |
+
type=raw,value=latest,enable={{is_default_branch}},suffix=-${{ matrix.arch }}
+
type=sha,suffix=-${{ matrix.arch }}
+
type=sha,format=long,suffix=-${{ matrix.arch }}
+
type=semver,pattern={{version}},suffix=-${{ matrix.arch }}
+
type=semver,pattern={{major}}.{{minor}},suffix=-${{ matrix.arch }}
+
+
# This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
+
# It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository.
+
# It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
+
- name: Build and push Docker image
+
id: push
+
uses: docker/build-push-action@v6
+
with:
+
context: .
+
push: true
+
tags: ${{ steps.meta.outputs.tags }}
+
labels: ${{ steps.meta.outputs.labels }}
+
+
publish-manifest:
+
needs: build-and-push-image
+
runs-on: ubuntu-latest
+
permissions:
+
packages: write
+
attestations: write
+
id-token: write
+
steps:
+
- name: Log in to the Container registry
+
uses: docker/login-action@v3
+
with:
+
registry: ${{ env.REGISTRY }}
+
username: ${{ github.actor }}
+
password: ${{ secrets.GITHUB_TOKEN }}
+
+
- name: Extract metadata (tags, labels) for Docker
+
id: meta
+
uses: docker/metadata-action@v5
+
with:
+
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
+
tags: |
+
type=raw,value=latest,enable={{is_default_branch}}
+
type=sha
+
type=sha,format=long
+
type=semver,pattern={{version}}
+
type=semver,pattern={{major}}.{{minor}}
+
+
- name: Create and push manifest
+
run: |
+
# Split tags into an array
+
readarray -t tags <<< "${{ steps.meta.outputs.tags }}"
+
+
# Create and push manifest for each tag
+
for tag in "${tags[@]}"; do
+
docker buildx imagetools create -t "$tag" \
+
"${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}@${{ needs.build-and-push-image.outputs.digest-amd64 }}" \
+
"${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}@${{ needs.build-and-push-image.outputs.digest-arm64 }}"
+
done
+
+
# This step generates an artifact attestation for the image, which is an unforgeable statement about where and how it was built. It increases supply chain security for people who consume the image. For more information, see "[AUTOTITLE](/actions/security-guides/using-artifact-attestations-to-establish-provenance-for-builds)."
+
- name: Generate artifact attestation
+
uses: actions/attest-build-provenance@v1
+
with:
+
subject-name: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME}}
+
subject-digest: ${{ needs.build-and-push-image.outputs.digest-amd64 }}
+
push-to-registry: true
+3
.gitignore
···
*.key
*.secret
.DS_Store
+
data/
+
keys/
+
dist/
+10
Caddyfile
···
+
{$COCOON_HOSTNAME} {
+
reverse_proxy localhost:8080
+
+
encode gzip
+
+
log {
+
output file /data/access.log
+
format json
+
}
+
}
+10
Caddyfile.postgres
···
+
{$COCOON_HOSTNAME} {
+
reverse_proxy cocoon:8080
+
+
encode gzip
+
+
log {
+
output file /data/access.log
+
format json
+
}
+
}
+25
Dockerfile
···
+
### Compile stage
+
FROM golang:1.25.1-bookworm AS build-env
+
+
ADD . /dockerbuild
+
WORKDIR /dockerbuild
+
+
RUN GIT_VERSION=$(git describe --tags --long --always || echo "dev-local") && \
+
go mod tidy && \
+
go build -ldflags "-X main.Version=$GIT_VERSION" -o cocoon ./cmd/cocoon
+
+
### Run stage
+
FROM debian:bookworm-slim AS run
+
+
RUN apt-get update && apt-get install -y dumb-init runit ca-certificates curl && rm -rf /var/lib/apt/lists/*
+
ENTRYPOINT ["dumb-init", "--"]
+
+
WORKDIR /
+
RUN mkdir -p data/cocoon
+
COPY --from=build-env /dockerbuild/cocoon /
+
+
CMD ["/cocoon", "run"]
+
+
LABEL org.opencontainers.image.source=https://github.com/haileyok/cocoon
+
LABEL org.opencontainers.image.description="Cocoon ATProto PDS"
+
LABEL org.opencontainers.image.licenses=MIT
+40
Makefile
···
GIT_COMMIT := $(shell git rev-parse --short=9 HEAD)
VERSION := $(if $(GIT_TAG),$(GIT_TAG),dev-$(GIT_COMMIT))
+
# Build output directory
+
BUILD_DIR := dist
+
+
# Platforms to build for
+
PLATFORMS := \
+
linux/amd64 \
+
linux/arm64 \
+
linux/arm \
+
darwin/amd64 \
+
darwin/arm64 \
+
windows/amd64 \
+
windows/arm64 \
+
freebsd/amd64 \
+
freebsd/arm64 \
+
openbsd/amd64 \
+
openbsd/arm64
+
.PHONY: help
help: ## Print info about all commands
@echo "Commands:"
···
build: ## Build all executables
go build -ldflags "-X main.Version=$(VERSION)" -o cocoon ./cmd/cocoon
+
.PHONY: build-release
+
build-all: ## Build binaries for all architectures
+
@echo "Building for all architectures..."
+
@mkdir -p $(BUILD_DIR)
+
@$(foreach platform,$(PLATFORMS), \
+
$(eval OS := $(word 1,$(subst /, ,$(platform)))) \
+
$(eval ARCH := $(word 2,$(subst /, ,$(platform)))) \
+
$(eval EXT := $(if $(filter windows,$(OS)),.exe,)) \
+
$(eval OUTPUT := $(BUILD_DIR)/cocoon-$(VERSION)-$(OS)-$(ARCH)$(EXT)) \
+
echo "Building $(OS)/$(ARCH)..."; \
+
GOOS=$(OS) GOARCH=$(ARCH) go build -ldflags "-X main.Version=$(VERSION)" -o $(OUTPUT) ./cmd/cocoon && \
+
echo " โœ“ $(OUTPUT)" || echo " โœ— Failed: $(OS)/$(ARCH)"; \
+
)
+
@echo "Done! Binaries are in $(BUILD_DIR)/"
+
+
.PHONY: clean-dist
+
clean-dist: ## Remove all built binaries
+
rm -rf $(BUILD_DIR)
+
.PHONY: run
run:
go build -ldflags "-X main.Version=dev-local" -o cocoon ./cmd/cocoon && ./cocoon run
···
.env:
if [ ! -f ".env" ]; then cp example.dev.env .env; fi
+
+
.PHONY: docker-build
+
docker-build:
+
docker build -t cocoon .
+248 -60
README.md
···
# Cocoon
> [!WARNING]
-
You should not use this PDS. You should not rely on this code as a reference for a PDS implementation. You should not trust this code. Using this PDS implementation may result in data loss, corruption, etc.
+
I migrated and have been running my main account on this PDS for months now without issue, however, I am still not responsible if things go awry, particularly during account migration. Please use caution.
Cocoon is a PDS implementation in Go. It is highly experimental, and is not ready for any production use.
-
### Impmlemented Endpoints
+
## Quick Start with Docker Compose
+
+
### Prerequisites
+
+
- Docker and Docker Compose installed
+
- A domain name pointing to your server (for automatic HTTPS)
+
- Ports 80 and 443 open in i.e. UFW
+
+
### Installation
+
+
1. **Clone the repository**
+
```bash
+
git clone https://github.com/haileyok/cocoon.git
+
cd cocoon
+
```
+
+
2. **Create your configuration file**
+
```bash
+
cp .env.example .env
+
```
+
+
3. **Edit `.env` with your settings**
+
+
Required settings:
+
```bash
+
COCOON_DID="did:web:your-domain.com"
+
COCOON_HOSTNAME="your-domain.com"
+
COCOON_CONTACT_EMAIL="you@example.com"
+
COCOON_RELAYS="https://bsky.network"
+
+
# Generate with: openssl rand -hex 16
+
COCOON_ADMIN_PASSWORD="your-secure-password"
+
+
# Generate with: openssl rand -hex 32
+
COCOON_SESSION_SECRET="your-session-secret"
+
```
+
+
4. **Start the services**
+
```bash
+
# Pull pre-built image from GitHub Container Registry
+
docker-compose pull
+
docker-compose up -d
+
```
+
+
Or build locally:
+
```bash
+
docker-compose build
+
docker-compose up -d
+
```
+
+
**For PostgreSQL deployment:**
+
```bash
+
# Add POSTGRES_PASSWORD to your .env file first!
+
docker-compose -f docker-compose.postgres.yaml up -d
+
```
+
+
5. **Get your invite code**
+
+
On first run, an invite code is automatically created. View it with:
+
```bash
+
docker-compose logs create-invite
+
```
+
+
Or check the saved file:
+
```bash
+
cat keys/initial-invite-code.txt
+
```
+
+
**IMPORTANT**: Save this invite code! You'll need it to create your first account.
+
+
6. **Monitor the services**
+
```bash
+
docker-compose logs -f
+
```
+
+
### What Gets Set Up
+
+
The Docker Compose setup includes:
+
+
- **init-keys**: Automatically generates cryptographic keys (rotation key and JWK) on first run
+
- **cocoon**: The main PDS service running on port 8080
+
- **create-invite**: Automatically creates an initial invite code after Cocoon starts (first run only)
+
- **caddy**: Reverse proxy with automatic HTTPS via Let's Encrypt
+
+
### Data Persistence
+
+
The following directories will be created automatically:
+
+
- `./keys/` - Cryptographic keys (generated automatically)
+
- `rotation.key` - PDS rotation key
+
- `jwk.key` - JWK private key
+
- `initial-invite-code.txt` - Your first invite code (first run only)
+
- `./data/` - SQLite database and blockstore
+
- Docker volumes for Caddy configuration and certificates
+
+
### Optional Configuration
+
+
#### Database Configuration
+
+
By default, Cocoon uses SQLite which requires no additional setup. For production deployments with higher traffic, you can use PostgreSQL:
+
+
```bash
+
# Database type: sqlite (default) or postgres
+
COCOON_DB_TYPE="postgres"
+
+
# PostgreSQL connection string (required if db-type is postgres)
+
# Format: postgres://user:password@host:port/database?sslmode=disable
+
COCOON_DATABASE_URL="postgres://cocoon:password@localhost:5432/cocoon?sslmode=disable"
+
+
# Or use the standard DATABASE_URL environment variable
+
DATABASE_URL="postgres://cocoon:password@localhost:5432/cocoon?sslmode=disable"
+
```
+
+
For SQLite (default):
+
```bash
+
COCOON_DB_TYPE="sqlite"
+
COCOON_DB_NAME="/data/cocoon/cocoon.db"
+
```
+
+
> **Note**: When using PostgreSQL, database backups to S3 are not handled by Cocoon. Use `pg_dump` or your database provider's backup solution instead.
+
+
#### SMTP Email Settings
+
```bash
+
COCOON_SMTP_USER="your-smtp-username"
+
COCOON_SMTP_PASS="your-smtp-password"
+
COCOON_SMTP_HOST="smtp.example.com"
+
COCOON_SMTP_PORT="587"
+
COCOON_SMTP_EMAIL="noreply@example.com"
+
COCOON_SMTP_NAME="Cocoon PDS"
+
```
+
+
#### S3 Storage
+
+
Cocoon supports S3-compatible storage for both database backups (SQLite only) and blob storage (images, videos, etc.):
+
+
```bash
+
# Enable S3 backups (SQLite databases only - hourly backups)
+
COCOON_S3_BACKUPS_ENABLED=true
+
+
# Enable S3 for blob storage (images, videos, etc.)
+
# When enabled, blobs are stored in S3 instead of the database
+
COCOON_S3_BLOBSTORE_ENABLED=true
+
+
# S3 configuration (works with AWS S3, MinIO, Cloudflare R2, etc.)
+
COCOON_S3_REGION="us-east-1"
+
COCOON_S3_BUCKET="your-bucket"
+
COCOON_S3_ENDPOINT="https://s3.amazonaws.com"
+
COCOON_S3_ACCESS_KEY="your-access-key"
+
COCOON_S3_SECRET_KEY="your-secret-key"
+
+
# Optional: CDN/public URL for blob redirects
+
# When set, com.atproto.sync.getBlob redirects to this URL instead of proxying
+
COCOON_S3_CDN_URL="https://cdn.example.com"
+
```
+
+
**Blob Storage Options:**
+
- `COCOON_S3_BLOBSTORE_ENABLED=false` (default): Blobs stored in the database
+
- `COCOON_S3_BLOBSTORE_ENABLED=true`: Blobs stored in S3 bucket under `blobs/{did}/{cid}`
+
+
**Blob Serving Options:**
+
- Without `COCOON_S3_CDN_URL`: Blobs are proxied through the PDS server
+
- With `COCOON_S3_CDN_URL`: `getBlob` returns a 302 redirect to `{CDN_URL}/blobs/{did}/{cid}`
+
+
> **Tip**: For Cloudflare R2, you can use the public bucket URL as the CDN URL. For AWS S3, you can use CloudFront or the S3 bucket URL directly if public access is enabled.
+
+
### Management Commands
+
+
Create an invite code:
+
```bash
+
docker exec cocoon-pds /cocoon create-invite-code --uses 1
+
```
+
+
Reset a user's password:
+
```bash
+
docker exec cocoon-pds /cocoon reset-password --did "did:plc:xxx"
+
```
+
+
### Updating
+
+
```bash
+
docker-compose pull
+
docker-compose up -d
+
```
+
+
## Implemented Endpoints
> [!NOTE]
-
Just because something is implemented doesn't mean it is finisehd. Tons of these are returning bad errors, don't do validation properly, etc. I'll make a "second pass" checklist at some point to do all of that.
+
Just because something is implemented doesn't mean it is finished. Tons of these are returning bad errors, don't do validation properly, etc. I'll make a "second pass" checklist at some point to do all of that.
-
#### Identity
-
- [ ] com.atproto.identity.getRecommendedDidCredentials
-
- [ ] com.atproto.identity.requestPlcOperationSignature
-
- [x] com.atproto.identity.resolveHandle
-
- [ ] com.atproto.identity.signPlcOperation
-
- [ ] com.atproto.identity.submitPlcOperatioin
-
- [x] com.atproto.identity.updateHandle
+
### Identity
-
#### Repo
-
- [x] com.atproto.repo.applyWrites
-
- [x] com.atproto.repo.createRecord
-
- [x] com.atproto.repo.putRecord
-
- [x] com.atproto.repo.deleteRecord
-
- [x] com.atproto.repo.describeRepo
-
- [x] com.atproto.repo.getRecord
-
- [x] com.atproto.repo.importRepo (Works "okay". You still have to handle PLC operations on your own when migrating. Use with extreme caution.)
-
- [x] com.atproto.repo.listRecords
-
- [ ] com.atproto.repo.listMissingBlobs
+
- [x] `com.atproto.identity.getRecommendedDidCredentials`
+
- [x] `com.atproto.identity.requestPlcOperationSignature`
+
- [x] `com.atproto.identity.resolveHandle`
+
- [x] `com.atproto.identity.signPlcOperation`
+
- [x] `com.atproto.identity.submitPlcOperation`
+
- [x] `com.atproto.identity.updateHandle`
-
#### Server
-
- [ ] com.atproto.server.activateAccount
-
- [x] com.atproto.server.checkAccountStatus
-
- [x] com.atproto.server.confirmEmail
-
- [x] com.atproto.server.createAccount
-
- [x] com.atproto.server.createInviteCode
-
- [x] com.atproto.server.createInviteCodes
-
- [ ] com.atproto.server.deactivateAccount
-
- [ ] com.atproto.server.deleteAccount
-
- [x] com.atproto.server.deleteSession
-
- [x] com.atproto.server.describeServer
-
- [ ] com.atproto.server.getAccountInviteCodes
-
- [ ] com.atproto.server.getServiceAuth
-
- ~[ ] com.atproto.server.listAppPasswords~ - not going to add app passwords
-
- [x] com.atproto.server.refreshSession
-
- [ ] com.atproto.server.requestAccountDelete
-
- [x] com.atproto.server.requestEmailConfirmation
-
- [x] com.atproto.server.requestEmailUpdate
-
- [x] com.atproto.server.requestPasswordReset
-
- [ ] com.atproto.server.reserveSigningKey
-
- [x] com.atproto.server.resetPassword
-
- ~[ ] com.atproto.server.revokeAppPassword~ - not going to add app passwords
-
- [x] com.atproto.server.updateEmail
+
### Repo
+
+
- [x] `com.atproto.repo.applyWrites`
+
- [x] `com.atproto.repo.createRecord`
+
- [x] `com.atproto.repo.putRecord`
+
- [x] `com.atproto.repo.deleteRecord`
+
- [x] `com.atproto.repo.describeRepo`
+
- [x] `com.atproto.repo.getRecord`
+
- [x] `com.atproto.repo.importRepo` (Works "okay". Use with extreme caution.)
+
- [x] `com.atproto.repo.listRecords`
+
- [x] `com.atproto.repo.listMissingBlobs`
+
+
### Server
+
+
- [x] `com.atproto.server.activateAccount`
+
- [x] `com.atproto.server.checkAccountStatus`
+
- [x] `com.atproto.server.confirmEmail`
+
- [x] `com.atproto.server.createAccount`
+
- [x] `com.atproto.server.createInviteCode`
+
- [x] `com.atproto.server.createInviteCodes`
+
- [x] `com.atproto.server.deactivateAccount`
+
- [x] `com.atproto.server.deleteAccount`
+
- [x] `com.atproto.server.deleteSession`
+
- [x] `com.atproto.server.describeServer`
+
- [ ] `com.atproto.server.getAccountInviteCodes`
+
- [x] `com.atproto.server.getServiceAuth`
+
- ~~[ ] `com.atproto.server.listAppPasswords`~~ - not going to add app passwords
+
- [x] `com.atproto.server.refreshSession`
+
- [x] `com.atproto.server.requestAccountDelete`
+
- [x] `com.atproto.server.requestEmailConfirmation`
+
- [x] `com.atproto.server.requestEmailUpdate`
+
- [x] `com.atproto.server.requestPasswordReset`
+
- [x] `com.atproto.server.reserveSigningKey`
+
- [x] `com.atproto.server.resetPassword`
+
- ~~[] `com.atproto.server.revokeAppPassword`~~ - not going to add app passwords
+
- [x] `com.atproto.server.updateEmail`
+
+
### Sync
-
#### Sync
-
- [x] com.atproto.sync.getBlob
-
- [x] com.atproto.sync.getBlocks
-
- [x] com.atproto.sync.getLatestCommit
-
- [x] com.atproto.sync.getRecord
-
- [x] com.atproto.sync.getRepoStatus
-
- [x] com.atproto.sync.getRepo
-
- [x] com.atproto.sync.listBlobs
-
- [x] com.atproto.sync.listRepos
-
- ~[ ] com.atproto.sync.notifyOfUpdate~ - BGS doesn't even have this implemented lol
-
- [x] com.atproto.sync.requestCrawl
-
- [x] com.atproto.sync.subscribeRepos
+
- [x] `com.atproto.sync.getBlob`
+
- [x] `com.atproto.sync.getBlocks`
+
- [x] `com.atproto.sync.getLatestCommit`
+
- [x] `com.atproto.sync.getRecord`
+
- [x] `com.atproto.sync.getRepoStatus`
+
- [x] `com.atproto.sync.getRepo`
+
- [x] `com.atproto.sync.listBlobs`
+
- [x] `com.atproto.sync.listRepos`
+
- ~~[ ] `com.atproto.sync.notifyOfUpdate`~~ - BGS doesn't even have this implemented lol
+
- [x] `com.atproto.sync.requestCrawl`
+
- [x] `com.atproto.sync.subscribeRepos`
-
#### Other
-
- [ ] com.atproto.label.queryLabels
-
- [ ] com.atproto.moderation.createReport
-
- [x] app.bsky.actor.getPreferences
-
- [x] app.bsky.actor.putPreferences
+
### Other
+
- [x] `com.atproto.label.queryLabels`
+
- [x] `com.atproto.moderation.createReport` (Note: this should be handled by proxying, not actually implemented in the PDS)
+
- [x] `app.bsky.actor.getPreferences`
+
- [x] `app.bsky.actor.putPreferences`
## License
-163
blockstore/blockstore.go
···
-
package blockstore
-
-
import (
-
"context"
-
"fmt"
-
-
"github.com/bluesky-social/indigo/atproto/syntax"
-
"github.com/haileyok/cocoon/internal/db"
-
"github.com/haileyok/cocoon/models"
-
blocks "github.com/ipfs/go-block-format"
-
"github.com/ipfs/go-cid"
-
"gorm.io/gorm/clause"
-
)
-
-
type SqliteBlockstore struct {
-
db *db.DB
-
did string
-
readonly bool
-
inserts map[cid.Cid]blocks.Block
-
}
-
-
func New(did string, db *db.DB) *SqliteBlockstore {
-
return &SqliteBlockstore{
-
did: did,
-
db: db,
-
readonly: false,
-
inserts: map[cid.Cid]blocks.Block{},
-
}
-
}
-
-
func NewReadOnly(did string, db *db.DB) *SqliteBlockstore {
-
return &SqliteBlockstore{
-
did: did,
-
db: db,
-
readonly: true,
-
inserts: map[cid.Cid]blocks.Block{},
-
}
-
}
-
-
func (bs *SqliteBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
-
var block models.Block
-
-
maybeBlock, ok := bs.inserts[cid]
-
if ok {
-
return maybeBlock, nil
-
}
-
-
if err := bs.db.Raw("SELECT * FROM blocks WHERE did = ? AND cid = ?", nil, bs.did, cid.Bytes()).Scan(&block).Error; err != nil {
-
return nil, err
-
}
-
-
b, err := blocks.NewBlockWithCid(block.Value, cid)
-
if err != nil {
-
return nil, err
-
}
-
-
return b, nil
-
}
-
-
func (bs *SqliteBlockstore) Put(ctx context.Context, block blocks.Block) error {
-
bs.inserts[block.Cid()] = block
-
-
if bs.readonly {
-
return nil
-
}
-
-
b := models.Block{
-
Did: bs.did,
-
Cid: block.Cid().Bytes(),
-
Rev: syntax.NewTIDNow(0).String(), // TODO: WARN, this is bad. don't do this
-
Value: block.RawData(),
-
}
-
-
if err := bs.db.Create(&b, []clause.Expression{clause.OnConflict{
-
Columns: []clause.Column{{Name: "did"}, {Name: "cid"}},
-
UpdateAll: true,
-
}}).Error; err != nil {
-
return err
-
}
-
-
return nil
-
}
-
-
func (bs *SqliteBlockstore) DeleteBlock(context.Context, cid.Cid) error {
-
panic("not implemented")
-
}
-
-
func (bs *SqliteBlockstore) Has(context.Context, cid.Cid) (bool, error) {
-
panic("not implemented")
-
}
-
-
func (bs *SqliteBlockstore) GetSize(context.Context, cid.Cid) (int, error) {
-
panic("not implemented")
-
}
-
-
func (bs *SqliteBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
-
tx := bs.db.BeginDangerously()
-
-
for _, block := range blocks {
-
bs.inserts[block.Cid()] = block
-
-
if bs.readonly {
-
continue
-
}
-
-
b := models.Block{
-
Did: bs.did,
-
Cid: block.Cid().Bytes(),
-
Rev: syntax.NewTIDNow(0).String(), // TODO: WARN, this is bad. don't do this
-
Value: block.RawData(),
-
}
-
-
if err := tx.Clauses(clause.OnConflict{
-
Columns: []clause.Column{{Name: "did"}, {Name: "cid"}},
-
UpdateAll: true,
-
}).Create(&b).Error; err != nil {
-
tx.Rollback()
-
return err
-
}
-
}
-
-
if bs.readonly {
-
return nil
-
}
-
-
tx.Commit()
-
-
return nil
-
}
-
-
func (bs *SqliteBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
-
panic("not implemented")
-
}
-
-
func (bs *SqliteBlockstore) HashOnRead(enabled bool) {
-
panic("not implemented")
-
}
-
-
func (bs *SqliteBlockstore) UpdateRepo(ctx context.Context, root cid.Cid, rev string) error {
-
if err := bs.db.Exec("UPDATE repos SET root = ?, rev = ? WHERE did = ?", nil, root.Bytes(), rev, bs.did).Error; err != nil {
-
return err
-
}
-
-
return nil
-
}
-
-
func (bs *SqliteBlockstore) Execute(ctx context.Context) error {
-
if !bs.readonly {
-
return fmt.Errorf("blockstore was not readonly")
-
}
-
-
bs.readonly = false
-
for _, b := range bs.inserts {
-
bs.Put(ctx, b)
-
}
-
bs.readonly = true
-
-
return nil
-
}
-
-
func (bs *SqliteBlockstore) GetLog() map[cid.Cid]blocks.Block {
-
return bs.inserts
-
}
+104 -52
cmd/cocoon/main.go
···
"os"
"time"
-
"github.com/bluesky-social/indigo/atproto/crypto"
+
"github.com/bluesky-social/indigo/atproto/atcrypto"
"github.com/bluesky-social/indigo/atproto/syntax"
"github.com/haileyok/cocoon/internal/helpers"
"github.com/haileyok/cocoon/server"
···
"github.com/lestrrat-go/jwx/v2/jwk"
"github.com/urfave/cli/v2"
"golang.org/x/crypto/bcrypt"
+
"gorm.io/driver/postgres"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)
···
EnvVars: []string{"COCOON_DB_NAME"},
},
&cli.StringFlag{
-
Name: "did",
-
Required: true,
-
EnvVars: []string{"COCOON_DID"},
+
Name: "db-type",
+
Value: "sqlite",
+
Usage: "Database type: sqlite or postgres",
+
EnvVars: []string{"COCOON_DB_TYPE"},
},
&cli.StringFlag{
-
Name: "hostname",
-
Required: true,
-
EnvVars: []string{"COCOON_HOSTNAME"},
+
Name: "database-url",
+
Aliases: []string{"db-url"},
+
Usage: "PostgreSQL connection string (required if db-type is postgres)",
+
EnvVars: []string{"COCOON_DATABASE_URL", "DATABASE_URL"},
+
},
+
&cli.StringFlag{
+
Name: "did",
+
EnvVars: []string{"COCOON_DID"},
},
&cli.StringFlag{
-
Name: "rotation-key-path",
-
Required: true,
-
EnvVars: []string{"COCOON_ROTATION_KEY_PATH"},
+
Name: "hostname",
+
EnvVars: []string{"COCOON_HOSTNAME"},
},
&cli.StringFlag{
-
Name: "jwk-path",
-
Required: true,
-
EnvVars: []string{"COCOON_JWK_PATH"},
+
Name: "rotation-key-path",
+
EnvVars: []string{"COCOON_ROTATION_KEY_PATH"},
},
&cli.StringFlag{
-
Name: "contact-email",
-
Required: true,
-
EnvVars: []string{"COCOON_CONTACT_EMAIL"},
+
Name: "jwk-path",
+
EnvVars: []string{"COCOON_JWK_PATH"},
+
},
+
&cli.StringFlag{
+
Name: "contact-email",
+
EnvVars: []string{"COCOON_CONTACT_EMAIL"},
},
&cli.StringSliceFlag{
-
Name: "relays",
-
Required: true,
-
EnvVars: []string{"COCOON_RELAYS"},
+
Name: "relays",
+
EnvVars: []string{"COCOON_RELAYS"},
},
&cli.StringFlag{
-
Name: "admin-password",
-
Required: true,
-
EnvVars: []string{"COCOON_ADMIN_PASSWORD"},
+
Name: "admin-password",
+
EnvVars: []string{"COCOON_ADMIN_PASSWORD"},
+
},
+
&cli.BoolFlag{
+
Name: "require-invite",
+
EnvVars: []string{"COCOON_REQUIRE_INVITE"},
+
Value: true,
},
&cli.StringFlag{
-
Name: "smtp-user",
-
Required: false,
-
EnvVars: []string{"COCOON_SMTP_USER"},
+
Name: "smtp-user",
+
EnvVars: []string{"COCOON_SMTP_USER"},
},
&cli.StringFlag{
-
Name: "smtp-pass",
-
Required: false,
-
EnvVars: []string{"COCOON_SMTP_PASS"},
+
Name: "smtp-pass",
+
EnvVars: []string{"COCOON_SMTP_PASS"},
},
&cli.StringFlag{
-
Name: "smtp-host",
-
Required: false,
-
EnvVars: []string{"COCOON_SMTP_HOST"},
+
Name: "smtp-host",
+
EnvVars: []string{"COCOON_SMTP_HOST"},
},
&cli.StringFlag{
-
Name: "smtp-port",
-
Required: false,
-
EnvVars: []string{"COCOON_SMTP_PORT"},
+
Name: "smtp-port",
+
EnvVars: []string{"COCOON_SMTP_PORT"},
},
&cli.StringFlag{
-
Name: "smtp-email",
-
Required: false,
-
EnvVars: []string{"COCOON_SMTP_EMAIL"},
+
Name: "smtp-email",
+
EnvVars: []string{"COCOON_SMTP_EMAIL"},
},
&cli.StringFlag{
-
Name: "smtp-name",
-
Required: false,
-
EnvVars: []string{"COCOON_SMTP_NAME"},
+
Name: "smtp-name",
+
EnvVars: []string{"COCOON_SMTP_NAME"},
},
&cli.BoolFlag{
Name: "s3-backups-enabled",
EnvVars: []string{"COCOON_S3_BACKUPS_ENABLED"},
},
+
&cli.BoolFlag{
+
Name: "s3-blobstore-enabled",
+
EnvVars: []string{"COCOON_S3_BLOBSTORE_ENABLED"},
+
},
&cli.StringFlag{
Name: "s3-region",
EnvVars: []string{"COCOON_S3_REGION"},
···
EnvVars: []string{"COCOON_S3_SECRET_KEY"},
},
&cli.StringFlag{
+
Name: "s3-cdn-url",
+
EnvVars: []string{"COCOON_S3_CDN_URL"},
+
Usage: "Public URL for S3 blob redirects (e.g., https://cdn.example.com). When set, getBlob redirects to this URL instead of proxying.",
+
},
+
&cli.StringFlag{
Name: "session-secret",
EnvVars: []string{"COCOON_SESSION_SECRET"},
},
+
&cli.StringFlag{
+
Name: "blockstore-variant",
+
EnvVars: []string{"COCOON_BLOCKSTORE_VARIANT"},
+
Value: "sqlite",
+
},
+
&cli.StringFlag{
+
Name: "fallback-proxy",
+
EnvVars: []string{"COCOON_FALLBACK_PROXY"},
+
},
},
Commands: []*cli.Command{
runServe,
···
Usage: "Start the cocoon PDS",
Flags: []cli.Flag{},
Action: func(cmd *cli.Context) error {
+
s, err := server.New(&server.Args{
Addr: cmd.String("addr"),
DbName: cmd.String("db-name"),
+
DbType: cmd.String("db-type"),
+
DatabaseURL: cmd.String("database-url"),
Did: cmd.String("did"),
Hostname: cmd.String("hostname"),
RotationKeyPath: cmd.String("rotation-key-path"),
···
Version: Version,
Relays: cmd.StringSlice("relays"),
AdminPassword: cmd.String("admin-password"),
+
RequireInvite: cmd.Bool("require-invite"),
SmtpUser: cmd.String("smtp-user"),
SmtpPass: cmd.String("smtp-pass"),
SmtpHost: cmd.String("smtp-host"),
···
SmtpEmail: cmd.String("smtp-email"),
SmtpName: cmd.String("smtp-name"),
S3Config: &server.S3Config{
-
BackupsEnabled: cmd.Bool("s3-backups-enabled"),
-
Region: cmd.String("s3-region"),
-
Bucket: cmd.String("s3-bucket"),
-
Endpoint: cmd.String("s3-endpoint"),
-
AccessKey: cmd.String("s3-access-key"),
-
SecretKey: cmd.String("s3-secret-key"),
+
BackupsEnabled: cmd.Bool("s3-backups-enabled"),
+
BlobstoreEnabled: cmd.Bool("s3-blobstore-enabled"),
+
Region: cmd.String("s3-region"),
+
Bucket: cmd.String("s3-bucket"),
+
Endpoint: cmd.String("s3-endpoint"),
+
AccessKey: cmd.String("s3-access-key"),
+
SecretKey: cmd.String("s3-secret-key"),
+
CDNUrl: cmd.String("s3-cdn-url"),
},
-
SessionSecret: cmd.String("session-secret"),
+
SessionSecret: cmd.String("session-secret"),
+
BlockstoreVariant: server.MustReturnBlockstoreVariant(cmd.String("blockstore-variant")),
+
FallbackProxy: cmd.String("fallback-proxy"),
})
if err != nil {
fmt.Printf("error creating cocoon: %v", err)
···
},
},
Action: func(cmd *cli.Context) error {
-
key, err := crypto.GeneratePrivateKeyK256()
+
key, err := atcrypto.GeneratePrivateKeyK256()
if err != nil {
return err
}
···
},
},
Action: func(cmd *cli.Context) error {
-
db, err := newDb()
+
db, err := newDb(cmd)
if err != nil {
return err
}
···
},
},
Action: func(cmd *cli.Context) error {
-
db, err := newDb()
+
db, err := newDb(cmd)
if err != nil {
return err
}
···
},
}
-
func newDb() (*gorm.DB, error) {
-
return gorm.Open(sqlite.Open("cocoon.db"), &gorm.Config{})
+
func newDb(cmd *cli.Context) (*gorm.DB, error) {
+
dbType := cmd.String("db-type")
+
if dbType == "" {
+
dbType = "sqlite"
+
}
+
+
switch dbType {
+
case "postgres":
+
databaseURL := cmd.String("database-url")
+
if databaseURL == "" {
+
databaseURL = cmd.String("database-url")
+
}
+
if databaseURL == "" {
+
return nil, fmt.Errorf("COCOON_DATABASE_URL or DATABASE_URL must be set when using postgres")
+
}
+
return gorm.Open(postgres.Open(databaseURL), &gorm.Config{})
+
default:
+
dbName := cmd.String("db-name")
+
if dbName == "" {
+
dbName = "cocoon.db"
+
}
+
return gorm.Open(sqlite.Open(dbName), &gorm.Config{})
+
}
}
+56
create-initial-invite.sh
···
+
#!/bin/sh
+
+
INVITE_FILE="/keys/initial-invite-code.txt"
+
MARKER="/keys/.invite_created"
+
+
# Check if invite code was already created
+
if [ -f "$MARKER" ]; then
+
echo "โœ“ Initial invite code already created"
+
exit 0
+
fi
+
+
echo "Waiting for database to be ready..."
+
sleep 10
+
+
# Try to create invite code - retry until database is ready
+
MAX_ATTEMPTS=30
+
ATTEMPT=0
+
INVITE_CODE=""
+
+
while [ $ATTEMPT -lt $MAX_ATTEMPTS ]; do
+
ATTEMPT=$((ATTEMPT + 1))
+
OUTPUT=$(/cocoon create-invite-code --uses 1 2>&1)
+
INVITE_CODE=$(echo "$OUTPUT" | grep -oE '[a-zA-Z0-9]{8}-[a-zA-Z0-9]{8}' || echo "")
+
+
if [ -n "$INVITE_CODE" ]; then
+
break
+
fi
+
+
if [ $((ATTEMPT % 5)) -eq 0 ]; then
+
echo " Waiting for database... ($ATTEMPT/$MAX_ATTEMPTS)"
+
fi
+
sleep 2
+
done
+
+
if [ -n "$INVITE_CODE" ]; then
+
echo ""
+
echo "โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•—"
+
echo "โ•‘ SAVE THIS INVITE CODE! โ•‘"
+
echo "โ•‘ โ•‘"
+
echo "โ•‘ $INVITE_CODE โ•‘"
+
echo "โ•‘ โ•‘"
+
echo "โ•‘ Use this to create your first โ•‘"
+
echo "โ•‘ account on your PDS. โ•‘"
+
echo "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•"
+
echo ""
+
+
echo "$INVITE_CODE" > "$INVITE_FILE"
+
echo "โœ“ Invite code saved to: $INVITE_FILE"
+
+
touch "$MARKER"
+
echo "โœ“ Initial setup complete!"
+
else
+
echo "โœ— Failed to create invite code"
+
echo "Output: $OUTPUT"
+
exit 1
+
fi
+45
cspell.json
···
+
{
+
"version": "0.2",
+
"language": "en",
+
"words": [
+
"atproto",
+
"bsky",
+
"Cocoon",
+
"PDS",
+
"Plc",
+
"plc",
+
"repo",
+
"InviteCodes",
+
"InviteCode",
+
"Invite",
+
"Signin",
+
"Signout",
+
"JWKS",
+
"dpop",
+
"BGS",
+
"pico",
+
"picocss",
+
"par",
+
"blobs",
+
"blob",
+
"did",
+
"DID",
+
"OAuth",
+
"oauth",
+
"par",
+
"Cocoon",
+
"memcache",
+
"db",
+
"helpers",
+
"middleware",
+
"repo",
+
"static",
+
"pico",
+
"picocss",
+
"MIT",
+
"Go"
+
],
+
"ignorePaths": [
+
"server/static/pico.css"
+
]
+
}
+158
docker-compose.postgres.yaml
···
+
# Docker Compose with PostgreSQL
+
#
+
# Usage:
+
# docker-compose -f docker-compose.postgres.yaml up -d
+
#
+
# This file extends the base docker-compose.yaml with a PostgreSQL database.
+
# Set the following in your .env file:
+
# COCOON_DB_TYPE=postgres
+
# POSTGRES_PASSWORD=your-secure-password
+
+
version: '3.8'
+
+
services:
+
postgres:
+
image: postgres:16-alpine
+
container_name: cocoon-postgres
+
environment:
+
POSTGRES_USER: cocoon
+
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required}
+
POSTGRES_DB: cocoon
+
volumes:
+
- postgres_data:/var/lib/postgresql/data
+
healthcheck:
+
test: ["CMD-SHELL", "pg_isready -U cocoon -d cocoon"]
+
interval: 10s
+
timeout: 5s
+
retries: 5
+
restart: unless-stopped
+
+
init-keys:
+
build:
+
context: .
+
dockerfile: Dockerfile
+
image: ghcr.io/haileyok/cocoon:latest
+
container_name: cocoon-init-keys
+
volumes:
+
- ./keys:/keys
+
- ./data:/data/cocoon
+
- ./init-keys.sh:/init-keys.sh:ro
+
environment:
+
COCOON_DID: ${COCOON_DID}
+
COCOON_HOSTNAME: ${COCOON_HOSTNAME}
+
COCOON_ROTATION_KEY_PATH: /keys/rotation.key
+
COCOON_JWK_PATH: /keys/jwk.key
+
COCOON_CONTACT_EMAIL: ${COCOON_CONTACT_EMAIL}
+
COCOON_RELAYS: ${COCOON_RELAYS:-https://bsky.network}
+
COCOON_ADMIN_PASSWORD: ${COCOON_ADMIN_PASSWORD}
+
entrypoint: ["/bin/sh", "/init-keys.sh"]
+
restart: "no"
+
+
cocoon:
+
build:
+
context: .
+
dockerfile: Dockerfile
+
image: ghcr.io/haileyok/cocoon:latest
+
container_name: cocoon-pds
+
depends_on:
+
init-keys:
+
condition: service_completed_successfully
+
postgres:
+
condition: service_healthy
+
ports:
+
- "8080:8080"
+
volumes:
+
- ./data:/data/cocoon
+
- ./keys/rotation.key:/keys/rotation.key:ro
+
- ./keys/jwk.key:/keys/jwk.key:ro
+
environment:
+
# Required settings
+
COCOON_DID: ${COCOON_DID}
+
COCOON_HOSTNAME: ${COCOON_HOSTNAME}
+
COCOON_ROTATION_KEY_PATH: /keys/rotation.key
+
COCOON_JWK_PATH: /keys/jwk.key
+
COCOON_CONTACT_EMAIL: ${COCOON_CONTACT_EMAIL}
+
COCOON_RELAYS: ${COCOON_RELAYS:-https://bsky.network}
+
COCOON_ADMIN_PASSWORD: ${COCOON_ADMIN_PASSWORD}
+
COCOON_SESSION_SECRET: ${COCOON_SESSION_SECRET}
+
+
# Database configuration - PostgreSQL
+
COCOON_ADDR: ":8080"
+
COCOON_DB_TYPE: postgres
+
COCOON_DATABASE_URL: postgres://cocoon:${POSTGRES_PASSWORD}@postgres:5432/cocoon?sslmode=disable
+
COCOON_BLOCKSTORE_VARIANT: ${COCOON_BLOCKSTORE_VARIANT:-sqlite}
+
+
# Optional: SMTP settings for email
+
COCOON_SMTP_USER: ${COCOON_SMTP_USER:-}
+
COCOON_SMTP_PASS: ${COCOON_SMTP_PASS:-}
+
COCOON_SMTP_HOST: ${COCOON_SMTP_HOST:-}
+
COCOON_SMTP_PORT: ${COCOON_SMTP_PORT:-}
+
COCOON_SMTP_EMAIL: ${COCOON_SMTP_EMAIL:-}
+
COCOON_SMTP_NAME: ${COCOON_SMTP_NAME:-}
+
+
# Optional: S3 configuration
+
COCOON_S3_BACKUPS_ENABLED: ${COCOON_S3_BACKUPS_ENABLED:-false}
+
COCOON_S3_BLOBSTORE_ENABLED: ${COCOON_S3_BLOBSTORE_ENABLED:-false}
+
COCOON_S3_REGION: ${COCOON_S3_REGION:-}
+
COCOON_S3_BUCKET: ${COCOON_S3_BUCKET:-}
+
COCOON_S3_ENDPOINT: ${COCOON_S3_ENDPOINT:-}
+
COCOON_S3_ACCESS_KEY: ${COCOON_S3_ACCESS_KEY:-}
+
COCOON_S3_SECRET_KEY: ${COCOON_S3_SECRET_KEY:-}
+
+
# Optional: Fallback proxy
+
COCOON_FALLBACK_PROXY: ${COCOON_FALLBACK_PROXY:-}
+
restart: unless-stopped
+
healthcheck:
+
test: ["CMD", "curl", "-f", "http://localhost:8080/xrpc/_health"]
+
interval: 30s
+
timeout: 10s
+
retries: 3
+
start_period: 40s
+
+
create-invite:
+
build:
+
context: .
+
dockerfile: Dockerfile
+
image: ghcr.io/haileyok/cocoon:latest
+
container_name: cocoon-create-invite
+
volumes:
+
- ./keys:/keys
+
- ./create-initial-invite.sh:/create-initial-invite.sh:ro
+
environment:
+
COCOON_DID: ${COCOON_DID}
+
COCOON_HOSTNAME: ${COCOON_HOSTNAME}
+
COCOON_ROTATION_KEY_PATH: /keys/rotation.key
+
COCOON_JWK_PATH: /keys/jwk.key
+
COCOON_CONTACT_EMAIL: ${COCOON_CONTACT_EMAIL}
+
COCOON_RELAYS: ${COCOON_RELAYS:-https://bsky.network}
+
COCOON_ADMIN_PASSWORD: ${COCOON_ADMIN_PASSWORD}
+
COCOON_DB_TYPE: postgres
+
COCOON_DATABASE_URL: postgres://cocoon:${POSTGRES_PASSWORD}@postgres:5432/cocoon?sslmode=disable
+
depends_on:
+
cocoon:
+
condition: service_healthy
+
entrypoint: ["/bin/sh", "/create-initial-invite.sh"]
+
restart: "no"
+
+
caddy:
+
image: caddy:2-alpine
+
container_name: cocoon-caddy
+
ports:
+
- "80:80"
+
- "443:443"
+
volumes:
+
- ./Caddyfile.postgres:/etc/caddy/Caddyfile:ro
+
- caddy_data:/data
+
- caddy_config:/config
+
restart: unless-stopped
+
environment:
+
COCOON_HOSTNAME: ${COCOON_HOSTNAME}
+
CADDY_ACME_EMAIL: ${COCOON_CONTACT_EMAIL:-}
+
+
volumes:
+
postgres_data:
+
driver: local
+
caddy_data:
+
driver: local
+
caddy_config:
+
driver: local
+130
docker-compose.yaml
···
+
version: '3.8'
+
+
services:
+
init-keys:
+
build:
+
context: .
+
dockerfile: Dockerfile
+
image: ghcr.io/haileyok/cocoon:latest
+
container_name: cocoon-init-keys
+
volumes:
+
- ./keys:/keys
+
- ./data:/data/cocoon
+
- ./init-keys.sh:/init-keys.sh:ro
+
environment:
+
COCOON_DID: ${COCOON_DID}
+
COCOON_HOSTNAME: ${COCOON_HOSTNAME}
+
COCOON_ROTATION_KEY_PATH: /keys/rotation.key
+
COCOON_JWK_PATH: /keys/jwk.key
+
COCOON_CONTACT_EMAIL: ${COCOON_CONTACT_EMAIL}
+
COCOON_RELAYS: ${COCOON_RELAYS:-https://bsky.network}
+
COCOON_ADMIN_PASSWORD: ${COCOON_ADMIN_PASSWORD}
+
entrypoint: ["/bin/sh", "/init-keys.sh"]
+
restart: "no"
+
+
cocoon:
+
build:
+
context: .
+
dockerfile: Dockerfile
+
image: ghcr.io/haileyok/cocoon:latest
+
container_name: cocoon-pds
+
network_mode: host
+
depends_on:
+
init-keys:
+
condition: service_completed_successfully
+
volumes:
+
- ./data:/data/cocoon
+
- ./keys/rotation.key:/keys/rotation.key:ro
+
- ./keys/jwk.key:/keys/jwk.key:ro
+
environment:
+
# Required settings
+
COCOON_DID: ${COCOON_DID}
+
COCOON_HOSTNAME: ${COCOON_HOSTNAME}
+
COCOON_ROTATION_KEY_PATH: /keys/rotation.key
+
COCOON_JWK_PATH: /keys/jwk.key
+
COCOON_CONTACT_EMAIL: ${COCOON_CONTACT_EMAIL}
+
COCOON_RELAYS: ${COCOON_RELAYS:-https://bsky.network}
+
COCOON_ADMIN_PASSWORD: ${COCOON_ADMIN_PASSWORD}
+
COCOON_SESSION_SECRET: ${COCOON_SESSION_SECRET}
+
+
# Server configuration
+
COCOON_ADDR: ":8080"
+
COCOON_DB_TYPE: ${COCOON_DB_TYPE:-sqlite}
+
COCOON_DB_NAME: ${COCOON_DB_NAME:-/data/cocoon/cocoon.db}
+
COCOON_DATABASE_URL: ${COCOON_DATABASE_URL:-}
+
COCOON_BLOCKSTORE_VARIANT: ${COCOON_BLOCKSTORE_VARIANT:-sqlite}
+
+
# Optional: SMTP settings for email
+
COCOON_SMTP_USER: ${COCOON_SMTP_USER:-}
+
COCOON_SMTP_PASS: ${COCOON_SMTP_PASS:-}
+
COCOON_SMTP_HOST: ${COCOON_SMTP_HOST:-}
+
COCOON_SMTP_PORT: ${COCOON_SMTP_PORT:-}
+
COCOON_SMTP_EMAIL: ${COCOON_SMTP_EMAIL:-}
+
COCOON_SMTP_NAME: ${COCOON_SMTP_NAME:-}
+
+
# Optional: S3 configuration
+
COCOON_S3_BACKUPS_ENABLED: ${COCOON_S3_BACKUPS_ENABLED:-false}
+
COCOON_S3_BLOBSTORE_ENABLED: ${COCOON_S3_BLOBSTORE_ENABLED:-false}
+
COCOON_S3_REGION: ${COCOON_S3_REGION:-}
+
COCOON_S3_BUCKET: ${COCOON_S3_BUCKET:-}
+
COCOON_S3_ENDPOINT: ${COCOON_S3_ENDPOINT:-}
+
COCOON_S3_ACCESS_KEY: ${COCOON_S3_ACCESS_KEY:-}
+
COCOON_S3_SECRET_KEY: ${COCOON_S3_SECRET_KEY:-}
+
COCOON_S3_CDN_URL: ${COCOON_S3_CDN_URL:-}
+
+
# Optional: Fallback proxy
+
COCOON_FALLBACK_PROXY: ${COCOON_FALLBACK_PROXY:-}
+
restart: unless-stopped
+
healthcheck:
+
test: ["CMD", "curl", "-f", "http://localhost:8080/xrpc/_health"]
+
interval: 30s
+
timeout: 10s
+
retries: 3
+
start_period: 40s
+
+
create-invite:
+
build:
+
context: .
+
dockerfile: Dockerfile
+
image: ghcr.io/haileyok/cocoon:latest
+
container_name: cocoon-create-invite
+
network_mode: host
+
volumes:
+
- ./keys:/keys
+
- ./create-initial-invite.sh:/create-initial-invite.sh:ro
+
environment:
+
COCOON_DID: ${COCOON_DID}
+
COCOON_HOSTNAME: ${COCOON_HOSTNAME}
+
COCOON_ROTATION_KEY_PATH: /keys/rotation.key
+
COCOON_JWK_PATH: /keys/jwk.key
+
COCOON_CONTACT_EMAIL: ${COCOON_CONTACT_EMAIL}
+
COCOON_RELAYS: ${COCOON_RELAYS:-https://bsky.network}
+
COCOON_ADMIN_PASSWORD: ${COCOON_ADMIN_PASSWORD}
+
COCOON_DB_TYPE: ${COCOON_DB_TYPE:-sqlite}
+
COCOON_DB_NAME: ${COCOON_DB_NAME:-/data/cocoon/cocoon.db}
+
COCOON_DATABASE_URL: ${COCOON_DATABASE_URL:-}
+
depends_on:
+
- init-keys
+
entrypoint: ["/bin/sh", "/create-initial-invite.sh"]
+
restart: "no"
+
+
caddy:
+
image: caddy:2-alpine
+
container_name: cocoon-caddy
+
network_mode: host
+
volumes:
+
- ./Caddyfile:/etc/caddy/Caddyfile:ro
+
- caddy_data:/data
+
- caddy_config:/config
+
restart: unless-stopped
+
environment:
+
COCOON_HOSTNAME: ${COCOON_HOSTNAME}
+
CADDY_ACME_EMAIL: ${COCOON_CONTACT_EMAIL:-}
+
+
volumes:
+
data:
+
driver: local
+
caddy_data:
+
driver: local
+
caddy_config:
+
driver: local
+3 -2
go.mod
···
require (
github.com/Azure/go-autorest/autorest/to v0.4.1
github.com/aws/aws-sdk-go v1.55.7
-
github.com/bluesky-social/indigo v0.0.0-20250414202759-826fcdeaa36b
+
github.com/bluesky-social/indigo v0.0.0-20251009212240-20524de167fe
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792
github.com/domodwyer/mailyak/v3 v3.6.2
github.com/go-pkgz/expirable-cache/v3 v3.0.0
···
github.com/google/uuid v1.4.0
github.com/gorilla/sessions v1.4.0
github.com/gorilla/websocket v1.5.1
+
github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/ipfs/go-block-format v0.2.0
github.com/ipfs/go-cid v0.4.1
+
github.com/ipfs/go-ipfs-blockstore v1.3.1
github.com/ipfs/go-ipld-cbor v0.1.0
github.com/ipld/go-car v0.6.1-0.20230509095817-92d28eb23ba4
github.com/joho/godotenv v1.5.1
···
github.com/ipfs/bbloom v0.0.4 // indirect
github.com/ipfs/go-blockservice v0.5.2 // indirect
github.com/ipfs/go-datastore v0.6.0 // indirect
-
github.com/ipfs/go-ipfs-blockstore v1.3.1 // indirect
github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect
github.com/ipfs/go-ipfs-exchange-interface v0.2.1 // indirect
github.com/ipfs/go-ipfs-util v0.0.3 // indirect
+4 -4
go.sum
···
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
-
github.com/bluesky-social/indigo v0.0.0-20250414202759-826fcdeaa36b h1:elwfbe+W7GkUmPKFX1h7HaeHvC/kC0XJWfiEHC62xPg=
-
github.com/bluesky-social/indigo v0.0.0-20250414202759-826fcdeaa36b/go.mod h1:yjdhLA1LkK8VDS/WPUoYPo25/Hq/8rX38Ftr67EsqKY=
+
github.com/bluesky-social/indigo v0.0.0-20251009212240-20524de167fe h1:VBhaqE5ewQgXbY5SfSWFZC/AwHFo7cHxZKFYi2ce9Yo=
+
github.com/bluesky-social/indigo v0.0.0-20251009212240-20524de167fe/go.mod h1:RuQVrCGm42QNsgumKaR6se+XkFKfCPNwdCiTvqKRUck=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc=
···
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
+
github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b h1:wDUNC2eKiL35DbLvsDhiblTUXHxcOPwQSCzi7xpQUN4=
+
github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b/go.mod h1:VzxiSdG6j1pi7rwGm/xYI5RbtpBgM8sARDXlvEvxlu0=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
···
github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM=
github.com/ipfs/go-blockservice v0.5.2 h1:in9Bc+QcXwd1apOVM7Un9t8tixPKdaHQFdLSUM1Xgk8=
github.com/ipfs/go-blockservice v0.5.2/go.mod h1:VpMblFEqG67A/H2sHKAemeH9vlURVavlysbdUI632yk=
-
github.com/ipfs/go-bs-sqlite3 v0.0.0-20221122195556-bfcee1be620d h1:9V+GGXCuOfDiFpdAHz58q9mKLg447xp0cQKvqQrAwYE=
-
github.com/ipfs/go-bs-sqlite3 v0.0.0-20221122195556-bfcee1be620d/go.mod h1:pMbnFyNAGjryYCLCe59YDLRv/ujdN+zGJBT1umlvYRM=
github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk=
+74 -55
identity/identity.go
···
"github.com/bluesky-social/indigo/util"
)
-
func ResolveHandle(ctx context.Context, cli *http.Client, handle string) (string, error) {
-
if cli == nil {
-
cli = util.RobustHTTPClient()
+
func ResolveHandleFromTXT(ctx context.Context, handle string) (string, error) {
+
name := fmt.Sprintf("_atproto.%s", handle)
+
recs, err := net.LookupTXT(name)
+
if err != nil {
+
return "", fmt.Errorf("handle could not be resolved via txt: %w", err)
+
}
+
+
for _, rec := range recs {
+
if strings.HasPrefix(rec, "did=") {
+
maybeDid := strings.Split(rec, "did=")[1]
+
if _, err := syntax.ParseDID(maybeDid); err == nil {
+
return maybeDid, nil
+
}
+
}
+
}
+
+
return "", fmt.Errorf("handle could not be resolved via txt: no record found")
+
}
+
+
func ResolveHandleFromWellKnown(ctx context.Context, cli *http.Client, handle string) (string, error) {
+
ustr := fmt.Sprintf("https://%s/.well-known/atproto-did", handle)
+
req, err := http.NewRequestWithContext(
+
ctx,
+
"GET",
+
ustr,
+
nil,
+
)
+
if err != nil {
+
return "", fmt.Errorf("handle could not be resolved via web: %w", err)
}
-
var did string
+
resp, err := cli.Do(req)
+
if err != nil {
+
return "", fmt.Errorf("handle could not be resolved via web: %w", err)
+
}
+
defer resp.Body.Close()
-
_, err := syntax.ParseHandle(handle)
+
b, err := io.ReadAll(resp.Body)
if err != nil {
-
return "", err
+
return "", fmt.Errorf("handle could not be resolved via web: %w", err)
}
-
recs, err := net.LookupTXT(fmt.Sprintf("_atproto.%s", handle))
-
if err == nil {
-
for _, rec := range recs {
-
if strings.HasPrefix(rec, "did=") {
-
did = strings.Split(rec, "did=")[1]
-
break
-
}
-
}
-
} else {
-
fmt.Printf("erorr getting txt records: %v\n", err)
+
if resp.StatusCode != http.StatusOK {
+
return "", fmt.Errorf("handle could not be resolved via web: invalid status code %d", resp.StatusCode)
}
-
if did == "" {
-
req, err := http.NewRequestWithContext(
-
ctx,
-
"GET",
-
fmt.Sprintf("https://%s/.well-known/atproto-did", handle),
-
nil,
-
)
-
if err != nil {
-
return "", nil
-
}
+
maybeDid := string(b)
-
resp, err := http.DefaultClient.Do(req)
-
if err != nil {
-
return "", nil
-
}
-
defer resp.Body.Close()
+
if _, err := syntax.ParseDID(maybeDid); err != nil {
+
return "", fmt.Errorf("handle could not be resolved via web: invalid did in document")
+
}
-
if resp.StatusCode != http.StatusOK {
-
io.Copy(io.Discard, resp.Body)
-
return "", fmt.Errorf("unable to resolve handle")
-
}
+
return maybeDid, nil
+
}
-
b, err := io.ReadAll(resp.Body)
-
if err != nil {
-
return "", err
-
}
+
func ResolveHandle(ctx context.Context, cli *http.Client, handle string) (string, error) {
+
if cli == nil {
+
cli = util.RobustHTTPClient()
+
}
-
maybeDid := string(b)
+
_, err := syntax.ParseHandle(handle)
+
if err != nil {
+
return "", err
+
}
-
if _, err := syntax.ParseDID(maybeDid); err != nil {
-
return "", fmt.Errorf("unable to resolve handle")
-
}
+
if maybeDidFromTxt, err := ResolveHandleFromTXT(ctx, handle); err == nil {
+
return maybeDidFromTxt, nil
+
}
-
did = maybeDid
+
if maybeDidFromWeb, err := ResolveHandleFromWellKnown(ctx, cli, handle); err == nil {
+
return maybeDidFromWeb, nil
}
-
return did, nil
+
return "", fmt.Errorf("handle could not be resolved")
+
}
+
+
func DidToDocUrl(did string) (string, error) {
+
if strings.HasPrefix(did, "did:plc:") {
+
return fmt.Sprintf("https://plc.directory/%s", did), nil
+
} else if after, ok := strings.CutPrefix(did, "did:web:"); ok {
+
return fmt.Sprintf("https://%s/.well-known/did.json", after), nil
+
} else {
+
return "", fmt.Errorf("did was not a supported did type")
+
}
}
func FetchDidDoc(ctx context.Context, cli *http.Client, did string) (*DidDoc, error) {
···
cli = util.RobustHTTPClient()
}
-
var ustr string
-
if strings.HasPrefix(did, "did:plc:") {
-
ustr = fmt.Sprintf("https://plc.directory/%s", did)
-
} else if strings.HasPrefix(did, "did:web:") {
-
ustr = fmt.Sprintf("https://%s/.well-known/did.json", strings.TrimPrefix(did, "did:web:"))
-
} else {
-
return nil, fmt.Errorf("did was not a supported did type")
+
ustr, err := DidToDocUrl(did)
+
if err != nil {
+
return nil, err
}
req, err := http.NewRequestWithContext(ctx, "GET", ustr, nil)
···
return nil, err
}
-
resp, err := http.DefaultClient.Do(req)
+
resp, err := cli.Do(req)
if err != nil {
return nil, err
}
···
if resp.StatusCode != 200 {
io.Copy(io.Discard, resp.Body)
-
return nil, fmt.Errorf("could not find identity in plc registry")
+
return nil, fmt.Errorf("unable to find did doc at url. did: %s. url: %s", did, ustr)
}
var diddoc DidDoc
···
return nil, err
}
-
resp, err := http.DefaultClient.Do(req)
+
resp, err := cli.Do(req)
if err != nil {
return nil, err
}
+15 -5
identity/passport.go
···
type Passport struct {
h *http.Client
bc BackingCache
-
lk sync.Mutex
+
mu sync.RWMutex
}
func NewPassport(h *http.Client, bc BackingCache) *Passport {
···
return &Passport{
h: h,
bc: bc,
-
lk: sync.Mutex{},
}
}
···
skipCache, _ := ctx.Value("skip-cache").(bool)
if !skipCache {
+
p.mu.RLock()
cached, ok := p.bc.GetDoc(did)
+
p.mu.RUnlock()
+
if ok {
return cached, nil
}
}
-
p.lk.Lock() // this is pretty pathetic, and i should rethink this. but for now, fuck it
-
defer p.lk.Unlock()
-
doc, err := FetchDidDoc(ctx, p.h, did)
if err != nil {
return nil, err
}
+
p.mu.Lock()
p.bc.PutDoc(did, doc)
+
p.mu.Unlock()
return doc, nil
}
···
skipCache, _ := ctx.Value("skip-cache").(bool)
if !skipCache {
+
p.mu.RLock()
cached, ok := p.bc.GetDid(handle)
+
p.mu.RUnlock()
+
if ok {
return cached, nil
}
···
return "", err
}
+
p.mu.Lock()
p.bc.PutDid(handle, did)
+
p.mu.Unlock()
return did, nil
}
func (p *Passport) BustDoc(ctx context.Context, did string) error {
+
p.mu.Lock()
+
defer p.mu.Unlock()
return p.bc.BustDoc(did)
}
func (p *Passport) BustDid(ctx context.Context, handle string) error {
+
p.mu.Lock()
+
defer p.mu.Unlock()
return p.bc.BustDid(handle)
}
+1 -1
identity/types.go
···
Context []string `json:"@context"`
Id string `json:"id"`
AlsoKnownAs []string `json:"alsoKnownAs"`
-
VerificationMethods []DidDocVerificationMethod `json:"verificationMethods"`
+
VerificationMethods []DidDocVerificationMethod `json:"verificationMethod"`
Service []DidDocService `json:"service"`
}
+34
init-keys.sh
···
+
#!/bin/sh
+
set -e
+
+
mkdir -p /keys
+
mkdir -p /data/cocoon
+
+
if [ ! -f /keys/rotation.key ]; then
+
echo "Generating rotation key..."
+
/cocoon create-rotation-key --out /keys/rotation.key 2>/dev/null || true
+
if [ -f /keys/rotation.key ]; then
+
echo "โœ“ Rotation key generated at /keys/rotation.key"
+
else
+
echo "โœ— Failed to generate rotation key"
+
exit 1
+
fi
+
else
+
echo "โœ“ Rotation key already exists"
+
fi
+
+
if [ ! -f /keys/jwk.key ]; then
+
echo "Generating JWK..."
+
/cocoon create-private-jwk --out /keys/jwk.key 2>/dev/null || true
+
if [ -f /keys/jwk.key ]; then
+
echo "โœ“ JWK generated at /keys/jwk.key"
+
else
+
echo "โœ— Failed to generate JWK"
+
exit 1
+
fi
+
else
+
echo "โœ“ JWK already exists"
+
fi
+
+
echo ""
+
echo "โœ“ Key initialization complete!"
+6
internal/db/db.go
···
return db.cli.Clauses(clauses...).Create(value)
}
+
func (db *DB) Save(value any, clauses []clause.Expression) *gorm.DB {
+
db.mu.Lock()
+
defer db.mu.Unlock()
+
return db.cli.Clauses(clauses...).Save(value)
+
}
+
func (db *DB) Exec(sql string, clauses []clause.Expression, values ...any) *gorm.DB {
db.mu.Lock()
defer db.mu.Unlock()
+29
internal/helpers/helpers.go
···
"math/rand"
"net/url"
+
"github.com/Azure/go-autorest/autorest/to"
"github.com/labstack/echo/v4"
"github.com/lestrrat-go/jwx/v2/jwk"
)
···
msg += ". " + *suffix
}
return genericError(e, 400, msg)
+
}
+
+
func UnauthorizedError(e echo.Context, suffix *string) error {
+
msg := "Unauthorized"
+
if suffix != nil {
+
msg += ". " + *suffix
+
}
+
return genericError(e, 401, msg)
+
}
+
+
func ForbiddenError(e echo.Context, suffix *string) error {
+
msg := "Forbidden"
+
if suffix != nil {
+
msg += ". " + *suffix
+
}
+
return genericError(e, 403, msg)
+
}
+
+
func InvalidTokenError(e echo.Context) error {
+
return InputError(e, to.StringPtr("InvalidToken"))
+
}
+
+
func ExpiredTokenError(e echo.Context) error {
+
// WARN: See https://github.com/bluesky-social/atproto/discussions/3319
+
return e.JSON(400, map[string]string{
+
"error": "ExpiredToken",
+
"message": "*",
+
})
}
func genericError(e echo.Context, code int, msg string) error {
+28 -2
models/models.go
···
"context"
"time"
-
"github.com/bluesky-social/indigo/atproto/crypto"
+
"github.com/Azure/go-autorest/autorest/to"
+
"github.com/bluesky-social/indigo/atproto/atcrypto"
)
type Repo struct {
···
EmailUpdateCodeExpiresAt *time.Time
PasswordResetCode *string
PasswordResetCodeExpiresAt *time.Time
+
PlcOperationCode *string
+
PlcOperationCodeExpiresAt *time.Time
+
AccountDeleteCode *string
+
AccountDeleteCodeExpiresAt *time.Time
Password string
SigningKey []byte
Rev string
Root []byte
Preferences []byte
+
Deactivated bool
}
func (r *Repo) SignFor(ctx context.Context, did string, msg []byte) ([]byte, error) {
-
k, err := crypto.ParsePrivateBytesK256(r.SigningKey)
+
k, err := atcrypto.ParsePrivateBytesK256(r.SigningKey)
if err != nil {
return nil, err
}
···
return sig, nil
}
+
func (r *Repo) Status() *string {
+
var status *string
+
if r.Deactivated {
+
status = to.StringPtr("deactivated")
+
}
+
return status
+
}
+
+
func (r *Repo) Active() bool {
+
return r.Status() == nil
+
}
+
type Actor struct {
Did string `gorm:"primaryKey"`
Handle string `gorm:"uniqueIndex"`
···
Did string `gorm:"index;index:idx_blob_did_cid"`
Cid []byte `gorm:"index;index:idx_blob_did_cid"`
RefCount int
+
Storage string `gorm:"default:sqlite"`
}
type BlobPart struct {
···
Idx int `gorm:"primaryKey"`
Data []byte
}
+
+
type ReservedKey struct {
+
KeyDid string `gorm:"primaryKey"`
+
Did *string `gorm:"index"`
+
PrivateKey []byte
+
CreatedAt time.Time `gorm:"index"`
+
}
+48 -24
oauth/client/manager.go
···
cli *http.Client
logger *slog.Logger
jwksCache cache.Cache[string, jwk.Key]
-
metadataCache cache.Cache[string, Metadata]
+
metadataCache cache.Cache[string, *Metadata]
}
type ManagerArgs struct {
···
}
jwksCache := cache.NewCache[string, jwk.Key]().WithLRU().WithMaxKeys(500).WithTTL(5 * time.Minute)
-
metadataCache := cache.NewCache[string, Metadata]().WithLRU().WithMaxKeys(500).WithTTL(5 * time.Minute)
+
metadataCache := cache.NewCache[string, *Metadata]().WithLRU().WithMaxKeys(500).WithTTL(5 * time.Minute)
return &Manager{
cli: args.Cli,
···
}
var jwks jwk.Key
-
if metadata.JWKS != nil {
-
// TODO: this is kinda bad but whatever for now. there could obviously be more than one jwk, and we need to
-
// make sure we use the right one
-
k, err := helpers.ParseJWKFromBytes((*metadata.JWKS)[0])
-
if err != nil {
-
return nil, err
-
}
-
jwks = k
-
} else if metadata.JWKSURI != nil {
-
maybeJwks, err := cm.getClientJwks(ctx, clientId, *metadata.JWKSURI)
-
if err != nil {
-
return nil, err
-
}
+
if metadata.TokenEndpointAuthMethod == "private_key_jwt" {
+
if metadata.JWKS != nil && len(metadata.JWKS.Keys) > 0 {
+
// TODO: this is kinda bad but whatever for now. there could obviously be more than one jwk, and we need to
+
// make sure we use the right one
+
b, err := json.Marshal(metadata.JWKS.Keys[0])
+
if err != nil {
+
return nil, err
+
}
-
jwks = maybeJwks
+
k, err := helpers.ParseJWKFromBytes(b)
+
if err != nil {
+
return nil, err
+
}
+
+
jwks = k
+
} else if metadata.JWKS != nil {
+
} else if metadata.JWKSURI != nil {
+
maybeJwks, err := cm.getClientJwks(ctx, clientId, *metadata.JWKSURI)
+
if err != nil {
+
return nil, err
+
}
+
+
jwks = maybeJwks
+
} else {
+
return nil, fmt.Errorf("no valid jwks found in oauth client metadata")
+
}
}
return &Client{
···
}
func (cm *Manager) getClientMetadata(ctx context.Context, clientId string) (*Metadata, error) {
-
metadataCached, ok := cm.metadataCache.Get(clientId)
+
cached, ok := cm.metadataCache.Get(clientId)
if !ok {
req, err := http.NewRequestWithContext(ctx, "GET", clientId, nil)
if err != nil {
···
return nil, err
}
+
cm.metadataCache.Set(clientId, validated, 10*time.Minute)
+
return validated, nil
} else {
-
return &metadataCached, nil
+
return cached, nil
}
}
···
return nil, fmt.Errorf("error unmarshaling metadata: %w", err)
}
+
if metadata.ClientURI == "" {
+
u, err := url.Parse(metadata.ClientID)
+
if err != nil {
+
return nil, fmt.Errorf("unable to parse client id: %w", err)
+
}
+
u.RawPath = ""
+
u.RawQuery = ""
+
metadata.ClientURI = u.String()
+
}
+
u, err := url.Parse(metadata.ClientURI)
if err != nil {
return nil, fmt.Errorf("unable to parse client uri: %w", err)
+
}
+
+
if metadata.ClientName == "" {
+
metadata.ClientName = metadata.ClientURI
}
if isLocalHostname(u.Hostname()) {
-
return nil, errors.New("`client_uri` hostname is invalid")
+
return nil, fmt.Errorf("`client_uri` hostname is invalid: %s", u.Hostname())
}
if metadata.Scope == "" {
···
return nil, errors.New("private_key_jwt auth method requires jwks or jwks_uri")
}
-
if metadata.JWKS != nil && len(*metadata.JWKS) == 0 {
+
if metadata.JWKS != nil && len(metadata.JWKS.Keys) == 0 {
return nil, errors.New("private_key_jwt auth method requires atleast one key in jwks")
}
···
return nil, errors.New("at least one `redirect_uri` is required")
}
-
if metadata.ApplicationType == "native" && metadata.TokenEndpointAuthMethod == "none" {
+
if metadata.ApplicationType == "native" && metadata.TokenEndpointAuthMethod != "none" {
return nil, errors.New("native clients must authenticate using `none` method")
}
···
if u.Scheme != "http" {
return nil, fmt.Errorf("loopback redirect uri %s must use http", ruri)
}
-
-
break
case u.Scheme == "http":
return nil, errors.New("only loopbvack redirect uris are allowed to use the `http` scheme")
case u.Scheme == "https":
if isLocalHostname(u.Hostname()) {
return nil, fmt.Errorf("redirect uri %s's domain must not be a local hostname", ruri)
}
-
break
case strings.Contains(u.Scheme, "."):
if metadata.ApplicationType != "native" {
return nil, errors.New("private-use uri scheme redirect uris are only allowed for native apps")
+20 -16
oauth/client/metadata.go
···
package client
type Metadata struct {
-
ClientID string `json:"client_id"`
-
ClientName string `json:"client_name"`
-
ClientURI string `json:"client_uri"`
-
LogoURI string `json:"logo_uri"`
-
TOSURI string `json:"tos_uri"`
-
PolicyURI string `json:"policy_uri"`
-
RedirectURIs []string `json:"redirect_uris"`
-
GrantTypes []string `json:"grant_types"`
-
ResponseTypes []string `json:"response_types"`
-
ApplicationType string `json:"application_type"`
-
DpopBoundAccessTokens bool `json:"dpop_bound_access_tokens"`
-
JWKSURI *string `json:"jwks_uri,omitempty"`
-
JWKS *[][]byte `json:"jwks,omitempty"`
-
Scope string `json:"scope"`
-
TokenEndpointAuthMethod string `json:"token_endpoint_auth_method"`
-
TokenEndpointAuthSigningAlg string `json:"token_endpoint_auth_signing_alg"`
+
ClientID string `json:"client_id"`
+
ClientName string `json:"client_name"`
+
ClientURI string `json:"client_uri"`
+
LogoURI string `json:"logo_uri"`
+
TOSURI string `json:"tos_uri"`
+
PolicyURI string `json:"policy_uri"`
+
RedirectURIs []string `json:"redirect_uris"`
+
GrantTypes []string `json:"grant_types"`
+
ResponseTypes []string `json:"response_types"`
+
ApplicationType string `json:"application_type"`
+
DpopBoundAccessTokens bool `json:"dpop_bound_access_tokens"`
+
JWKSURI *string `json:"jwks_uri,omitempty"`
+
JWKS *MetadataJwks `json:"jwks,omitempty"`
+
Scope string `json:"scope"`
+
TokenEndpointAuthMethod string `json:"token_endpoint_auth_method"`
+
TokenEndpointAuthSigningAlg string `json:"token_endpoint_auth_signing_alg"`
+
}
+
+
type MetadataJwks struct {
+
Keys []any `json:"keys"`
}
+6 -2
oauth/dpop/manager.go
···
Hostname string
}
+
var (
+
ErrUseDpopNonce = errors.New("use_dpop_nonce")
+
)
+
func NewManager(args ManagerArgs) *Manager {
if args.Logger == nil {
args.Logger = slog.Default()
···
nonce, _ := claims["nonce"].(string)
if nonce == "" {
// WARN: this _must_ be `use_dpop_nonce` for clients know they should make another request
-
return nil, errors.New("use_dpop_nonce")
+
return nil, ErrUseDpopNonce
}
if nonce != "" && !dm.nonce.Check(nonce) {
// WARN: this _must_ be `use_dpop_nonce` so that clients will fetch a new nonce
-
return nil, errors.New("use_dpop_nonce")
+
return nil, ErrUseDpopNonce
}
ath, _ := claims["ath"].(string)
+3 -2
oauth/dpop/nonce.go
···
}
func (n *Nonce) Check(nonce string) bool {
-
n.mu.RLock()
-
defer n.mu.RUnlock()
+
n.mu.Lock()
+
defer n.mu.Unlock()
+
n.rotate()
return nonce == n.prev || nonce == n.curr || nonce == n.next
}
+32
oauth/helpers.go
···
"errors"
"fmt"
"net/url"
+
"time"
"github.com/haileyok/cocoon/internal/helpers"
"github.com/haileyok/cocoon/oauth/constants"
+
"github.com/haileyok/cocoon/oauth/provider"
)
func GenerateCode() string {
···
return reqId, nil
}
+
+
type SessionAgeResult struct {
+
SessionAge time.Duration
+
RefreshAge time.Duration
+
SessionExpired bool
+
RefreshExpired bool
+
}
+
+
func GetSessionAgeFromToken(t provider.OauthToken) SessionAgeResult {
+
sessionLifetime := constants.PublicClientSessionLifetime
+
refreshLifetime := constants.PublicClientRefreshLifetime
+
if t.ClientAuth.Method != "none" {
+
sessionLifetime = constants.ConfidentialClientSessionLifetime
+
refreshLifetime = constants.ConfidentialClientRefreshLifetime
+
}
+
+
res := SessionAgeResult{}
+
+
res.SessionAge = time.Since(t.CreatedAt)
+
if res.SessionAge > sessionLifetime {
+
res.SessionExpired = true
+
}
+
+
refreshAge := time.Since(t.UpdatedAt)
+
if refreshAge > refreshLifetime {
+
res.RefreshExpired = true
+
}
+
+
return res
+
}
+2
oauth/provider/models.go
···
Code string `gorm:"index"`
Token string `gorm:"uniqueIndex"`
RefreshToken string `gorm:"uniqueIndex"`
+
Ip string
}
type OauthAuthorizationRequest struct {
···
Sub *string
Code *string
Accepted *bool
+
Ip string
}
+36 -20
plc/client.go
···
"net/url"
"strings"
-
"github.com/bluesky-social/indigo/atproto/crypto"
+
"github.com/bluesky-social/indigo/atproto/atcrypto"
"github.com/bluesky-social/indigo/util"
"github.com/haileyok/cocoon/identity"
)
···
h *http.Client
service string
pdsHostname string
-
rotationKey *crypto.PrivateKeyK256
+
rotationKey *atcrypto.PrivateKeyK256
}
type ClientArgs struct {
···
args.H = util.RobustHTTPClient()
}
-
rk, err := crypto.ParsePrivateBytesK256([]byte(args.RotationKey))
+
rk, err := atcrypto.ParsePrivateBytesK256([]byte(args.RotationKey))
if err != nil {
return nil, err
}
···
}, nil
}
-
func (c *Client) CreateDID(sigkey *crypto.PrivateKeyK256, recovery string, handle string) (string, *Operation, error) {
-
pubsigkey, err := sigkey.PublicKey()
+
func (c *Client) CreateDID(sigkey *atcrypto.PrivateKeyK256, recovery string, handle string) (string, *Operation, error) {
+
creds, err := c.CreateDidCredentials(sigkey, recovery, handle)
if err != nil {
return "", nil, err
}
-
pubrotkey, err := c.rotationKey.PublicKey()
+
op := Operation{
+
Type: "plc_operation",
+
VerificationMethods: creds.VerificationMethods,
+
RotationKeys: creds.RotationKeys,
+
AlsoKnownAs: creds.AlsoKnownAs,
+
Services: creds.Services,
+
Prev: nil,
+
}
+
+
if err := c.SignOp(sigkey, &op); err != nil {
+
return "", nil, err
+
}
+
+
did, err := DidFromOp(&op)
if err != nil {
return "", nil, err
}
+
return did, &op, nil
+
}
+
+
func (c *Client) CreateDidCredentials(sigkey *atcrypto.PrivateKeyK256, recovery string, handle string) (*DidCredentials, error) {
+
pubsigkey, err := sigkey.PublicKey()
+
if err != nil {
+
return nil, err
+
}
+
+
pubrotkey, err := c.rotationKey.PublicKey()
+
if err != nil {
+
return nil, err
+
}
+
// todo
rotationKeys := []string{pubrotkey.DIDKey()}
if recovery != "" {
···
}(recovery)
}
-
op := Operation{
-
Type: "plc_operation",
+
creds := DidCredentials{
VerificationMethods: map[string]string{
"atproto": pubsigkey.DIDKey(),
},
···
Endpoint: "https://" + c.pdsHostname,
},
},
-
Prev: nil,
}
-
if err := c.SignOp(sigkey, &op); err != nil {
-
return "", nil, err
-
}
-
-
did, err := DidFromOp(&op)
-
if err != nil {
-
return "", nil, err
-
}
-
-
return did, &op, nil
+
return &creds, nil
}
-
func (c *Client) SignOp(sigkey *crypto.PrivateKeyK256, op *Operation) error {
+
func (c *Client) SignOp(sigkey *atcrypto.PrivateKeyK256, op *Operation) error {
b, err := op.MarshalCBOR()
if err != nil {
return err
+10 -2
plc/types.go
···
import (
"encoding/json"
-
"github.com/bluesky-social/indigo/atproto/data"
+
"github.com/bluesky-social/indigo/atproto/atdata"
"github.com/haileyok/cocoon/identity"
cbg "github.com/whyrusleeping/cbor-gen"
)
+
+
+
type DidCredentials struct {
+
VerificationMethods map[string]string `json:"verificationMethods"`
+
RotationKeys []string `json:"rotationKeys"`
+
AlsoKnownAs []string `json:"alsoKnownAs"`
+
Services map[string]identity.OperationService `json:"services"`
+
}
type Operation struct {
Type string `json:"type"`
···
return nil, err
}
-
b, err = data.MarshalCBOR(m)
+
b, err = atdata.MarshalCBOR(m)
if err != nil {
return nil, err
}
+85
recording_blockstore/recording_blockstore.go
···
+
package recording_blockstore
+
+
import (
+
"context"
+
"fmt"
+
+
blockformat "github.com/ipfs/go-block-format"
+
"github.com/ipfs/go-cid"
+
blockstore "github.com/ipfs/go-ipfs-blockstore"
+
)
+
+
type RecordingBlockstore struct {
+
base blockstore.Blockstore
+
+
inserts map[cid.Cid]blockformat.Block
+
reads map[cid.Cid]blockformat.Block
+
}
+
+
func New(base blockstore.Blockstore) *RecordingBlockstore {
+
return &RecordingBlockstore{
+
base: base,
+
inserts: make(map[cid.Cid]blockformat.Block),
+
reads: make(map[cid.Cid]blockformat.Block),
+
}
+
}
+
+
func (bs *RecordingBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) {
+
return bs.base.Has(ctx, c)
+
}
+
+
func (bs *RecordingBlockstore) Get(ctx context.Context, c cid.Cid) (blockformat.Block, error) {
+
b, err := bs.base.Get(ctx, c)
+
if err != nil {
+
return nil, err
+
}
+
bs.reads[c] = b
+
return b, nil
+
}
+
+
func (bs *RecordingBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) {
+
return bs.base.GetSize(ctx, c)
+
}
+
+
func (bs *RecordingBlockstore) DeleteBlock(ctx context.Context, c cid.Cid) error {
+
return bs.base.DeleteBlock(ctx, c)
+
}
+
+
func (bs *RecordingBlockstore) Put(ctx context.Context, block blockformat.Block) error {
+
if err := bs.base.Put(ctx, block); err != nil {
+
return err
+
}
+
bs.inserts[block.Cid()] = block
+
return nil
+
}
+
+
func (bs *RecordingBlockstore) PutMany(ctx context.Context, blocks []blockformat.Block) error {
+
if err := bs.base.PutMany(ctx, blocks); err != nil {
+
return err
+
}
+
+
for _, b := range blocks {
+
bs.inserts[b.Cid()] = b
+
}
+
+
return nil
+
}
+
+
func (bs *RecordingBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+
return nil, fmt.Errorf("iteration not allowed on recording blockstore")
+
}
+
+
func (bs *RecordingBlockstore) HashOnRead(enabled bool) {
+
}
+
+
func (bs *RecordingBlockstore) GetWriteLog() map[cid.Cid]blockformat.Block {
+
return bs.inserts
+
}
+
+
func (bs *RecordingBlockstore) GetReadLog() []blockformat.Block {
+
var blocks []blockformat.Block
+
for _, b := range bs.reads {
+
blocks = append(blocks, b)
+
}
+
return blocks
+
}
+30
server/blockstore_variant.go
···
+
package server
+
+
import (
+
"github.com/haileyok/cocoon/sqlite_blockstore"
+
blockstore "github.com/ipfs/go-ipfs-blockstore"
+
)
+
+
type BlockstoreVariant int
+
+
const (
+
BlockstoreVariantSqlite = iota
+
)
+
+
func MustReturnBlockstoreVariant(maybeBsv string) BlockstoreVariant {
+
switch maybeBsv {
+
case "sqlite":
+
return BlockstoreVariantSqlite
+
default:
+
panic("invalid blockstore variant provided")
+
}
+
}
+
+
func (s *Server) getBlockstore(did string) blockstore.Blockstore {
+
switch s.config.BlockstoreVariant {
+
case BlockstoreVariantSqlite:
+
return sqlite_blockstore.New(did, s.db)
+
default:
+
return sqlite_blockstore.New(did, s.db)
+
}
+
}
+37 -7
server/handle_account.go
···
import (
"time"
+
"github.com/haileyok/cocoon/oauth"
+
"github.com/haileyok/cocoon/oauth/constants"
"github.com/haileyok/cocoon/oauth/provider"
+
"github.com/hako/durafmt"
"github.com/labstack/echo/v4"
)
func (s *Server) handleAccount(e echo.Context) error {
+
ctx := e.Request().Context()
repo, sess, err := s.getSessionRepoOrErr(e)
if err != nil {
return e.Redirect(303, "/account/signin")
}
-
now := time.Now()
+
oldestPossibleSession := time.Now().Add(constants.ConfidentialClientSessionLifetime)
var tokens []provider.OauthToken
-
if err := s.db.Raw("SELECT * FROM oauth_tokens WHERE sub = ? AND expires_at >= ? ORDER BY created_at ASC", nil, repo.Repo.Did, now).Scan(&tokens).Error; err != nil {
+
if err := s.db.Raw("SELECT * FROM oauth_tokens WHERE sub = ? AND created_at < ? ORDER BY created_at ASC", nil, repo.Repo.Did, oldestPossibleSession).Scan(&tokens).Error; err != nil {
s.logger.Error("couldnt fetch oauth sessions for account", "did", repo.Repo.Did, "error", err)
sess.AddFlash("Unable to fetch sessions. See server logs for more details.", "error")
sess.Save(e.Request(), e.Response())
···
})
}
+
var filtered []provider.OauthToken
+
for _, t := range tokens {
+
ageRes := oauth.GetSessionAgeFromToken(t)
+
if ageRes.SessionExpired {
+
continue
+
}
+
filtered = append(filtered, t)
+
}
+
+
now := time.Now()
+
tokenInfo := []map[string]string{}
for _, t := range tokens {
+
ageRes := oauth.GetSessionAgeFromToken(t)
+
maxTime := constants.PublicClientSessionLifetime
+
if t.ClientAuth.Method != "none" {
+
maxTime = constants.ConfidentialClientSessionLifetime
+
}
+
+
var clientName string
+
metadata, err := s.oauthProvider.ClientManager.GetClient(ctx, t.ClientId)
+
if err != nil {
+
clientName = t.ClientId
+
} else {
+
clientName = metadata.Metadata.ClientName
+
}
+
tokenInfo = append(tokenInfo, map[string]string{
-
"ClientId": t.ClientId,
-
"CreatedAt": t.CreatedAt.Format("02 Jan 06 15:04 MST"),
-
"UpdatedAt": t.CreatedAt.Format("02 Jan 06 15:04 MST"),
-
"ExpiresAt": t.CreatedAt.Format("02 Jan 06 15:04 MST"),
-
"Token": t.Token,
+
"ClientName": clientName,
+
"Age": durafmt.Parse(ageRes.SessionAge).LimitFirstN(2).String(),
+
"LastUpdated": durafmt.Parse(now.Sub(t.UpdatedAt)).LimitFirstN(2).String(),
+
"ExpiresIn": durafmt.Parse(now.Add(maxTime).Sub(now)).LimitFirstN(2).String(),
+
"Token": t.Token,
+
"Ip": t.Ip,
})
}
+1 -1
server/handle_actor_get_preferences.go
···
err := json.Unmarshal(repo.Preferences, &prefs)
if err != nil || prefs["preferences"] == nil {
prefs = map[string]any{
-
"preferences": map[string]any{},
+
"preferences": []any{},
}
}
+29
server/handle_identity_request_plc_operation.go
···
+
package server
+
+
import (
+
"fmt"
+
"time"
+
+
"github.com/haileyok/cocoon/internal/helpers"
+
"github.com/haileyok/cocoon/models"
+
"github.com/labstack/echo/v4"
+
)
+
+
func (s *Server) handleIdentityRequestPlcOperationSignature(e echo.Context) error {
+
urepo := e.Get("repo").(*models.RepoActor)
+
+
code := fmt.Sprintf("%s-%s", helpers.RandomVarchar(5), helpers.RandomVarchar(5))
+
eat := time.Now().Add(10 * time.Minute).UTC()
+
+
if err := s.db.Exec("UPDATE repos SET plc_operation_code = ?, plc_operation_code_expires_at = ? WHERE did = ?", nil, code, eat, urepo.Repo.Did).Error; err != nil {
+
s.logger.Error("error updating user", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := s.sendPlcTokenReset(urepo.Email, urepo.Handle, code); err != nil {
+
s.logger.Error("error sending mail", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
return e.NoContent(200)
+
}
+103
server/handle_identity_sign_plc_operation.go
···
+
package server
+
+
import (
+
"context"
+
"strings"
+
"time"
+
+
"github.com/Azure/go-autorest/autorest/to"
+
"github.com/bluesky-social/indigo/atproto/atcrypto"
+
"github.com/haileyok/cocoon/identity"
+
"github.com/haileyok/cocoon/internal/helpers"
+
"github.com/haileyok/cocoon/models"
+
"github.com/haileyok/cocoon/plc"
+
"github.com/labstack/echo/v4"
+
)
+
+
type ComAtprotoSignPlcOperationRequest struct {
+
Token string `json:"token"`
+
VerificationMethods *map[string]string `json:"verificationMethods"`
+
RotationKeys *[]string `json:"rotationKeys"`
+
AlsoKnownAs *[]string `json:"alsoKnownAs"`
+
Services *map[string]identity.OperationService `json:"services"`
+
}
+
+
type ComAtprotoSignPlcOperationResponse struct {
+
Operation plc.Operation `json:"operation"`
+
}
+
+
func (s *Server) handleSignPlcOperation(e echo.Context) error {
+
repo := e.Get("repo").(*models.RepoActor)
+
+
var req ComAtprotoSignPlcOperationRequest
+
if err := e.Bind(&req); err != nil {
+
s.logger.Error("error binding", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if !strings.HasPrefix(repo.Repo.Did, "did:plc:") {
+
return helpers.InputError(e, nil)
+
}
+
+
if repo.PlcOperationCode == nil || repo.PlcOperationCodeExpiresAt == nil {
+
return helpers.InputError(e, to.StringPtr("InvalidToken"))
+
}
+
+
if *repo.PlcOperationCode != req.Token {
+
return helpers.InvalidTokenError(e)
+
}
+
+
if time.Now().UTC().After(*repo.PlcOperationCodeExpiresAt) {
+
return helpers.ExpiredTokenError(e)
+
}
+
+
ctx := context.WithValue(e.Request().Context(), "skip-cache", true)
+
log, err := identity.FetchDidAuditLog(ctx, nil, repo.Repo.Did)
+
if err != nil {
+
s.logger.Error("error fetching doc", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
latest := log[len(log)-1]
+
+
op := plc.Operation{
+
Type: "plc_operation",
+
VerificationMethods: latest.Operation.VerificationMethods,
+
RotationKeys: latest.Operation.RotationKeys,
+
AlsoKnownAs: latest.Operation.AlsoKnownAs,
+
Services: latest.Operation.Services,
+
Prev: &latest.Cid,
+
}
+
if req.VerificationMethods != nil {
+
op.VerificationMethods = *req.VerificationMethods
+
}
+
if req.RotationKeys != nil {
+
op.RotationKeys = *req.RotationKeys
+
}
+
if req.AlsoKnownAs != nil {
+
op.AlsoKnownAs = *req.AlsoKnownAs
+
}
+
if req.Services != nil {
+
op.Services = *req.Services
+
}
+
+
k, err := atcrypto.ParsePrivateBytesK256(repo.SigningKey)
+
if err != nil {
+
s.logger.Error("error parsing signing key", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := s.plcClient.SignOp(k, &op); err != nil {
+
s.logger.Error("error signing plc operation", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := s.db.Exec("UPDATE repos SET plc_operation_code = NULL, plc_operation_code_expires_at = NULL WHERE did = ?", nil, repo.Repo.Did).Error; err != nil {
+
s.logger.Error("error updating repo", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
return e.JSON(200, ComAtprotoSignPlcOperationResponse{
+
Operation: op,
+
})
+
}
+87
server/handle_identity_submit_plc_operation.go
···
+
package server
+
+
import (
+
"context"
+
"slices"
+
"strings"
+
"time"
+
+
"github.com/bluesky-social/indigo/api/atproto"
+
"github.com/bluesky-social/indigo/atproto/atcrypto"
+
"github.com/bluesky-social/indigo/events"
+
"github.com/bluesky-social/indigo/util"
+
"github.com/haileyok/cocoon/internal/helpers"
+
"github.com/haileyok/cocoon/models"
+
"github.com/haileyok/cocoon/plc"
+
"github.com/labstack/echo/v4"
+
)
+
+
type ComAtprotoSubmitPlcOperationRequest struct {
+
Operation plc.Operation `json:"operation"`
+
}
+
+
func (s *Server) handleSubmitPlcOperation(e echo.Context) error {
+
repo := e.Get("repo").(*models.RepoActor)
+
+
var req ComAtprotoSubmitPlcOperationRequest
+
if err := e.Bind(&req); err != nil {
+
s.logger.Error("error binding", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := e.Validate(req); err != nil {
+
return helpers.InputError(e, nil)
+
}
+
if !strings.HasPrefix(repo.Repo.Did, "did:plc:") {
+
return helpers.InputError(e, nil)
+
}
+
+
op := req.Operation
+
+
k, err := atcrypto.ParsePrivateBytesK256(repo.SigningKey)
+
if err != nil {
+
s.logger.Error("error parsing key", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
required, err := s.plcClient.CreateDidCredentials(k, "", repo.Actor.Handle)
+
if err != nil {
+
s.logger.Error("error crating did credentials", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
for _, expectedKey := range required.RotationKeys {
+
if !slices.Contains(op.RotationKeys, expectedKey) {
+
return helpers.InputError(e, nil)
+
}
+
}
+
if op.Services["atproto_pds"].Type != "AtprotoPersonalDataServer" {
+
return helpers.InputError(e, nil)
+
}
+
if op.Services["atproto_pds"].Endpoint != required.Services["atproto_pds"].Endpoint {
+
return helpers.InputError(e, nil)
+
}
+
if op.VerificationMethods["atproto"] != required.VerificationMethods["atproto"] {
+
return helpers.InputError(e, nil)
+
}
+
if op.AlsoKnownAs[0] != required.AlsoKnownAs[0] {
+
return helpers.InputError(e, nil)
+
}
+
+
if err := s.plcClient.SendOperation(e.Request().Context(), repo.Repo.Did, &op); err != nil {
+
return err
+
}
+
+
if err := s.passport.BustDoc(context.TODO(), repo.Repo.Did); err != nil {
+
s.logger.Warn("error busting did doc", "error", err)
+
}
+
+
s.evtman.AddEvent(context.TODO(), &events.XRPCStreamEvent{
+
RepoIdentity: &atproto.SyncSubscribeRepos_Identity{
+
Did: repo.Repo.Did,
+
Seq: time.Now().UnixMicro(), // TODO: no
+
Time: time.Now().Format(util.ISO8601),
+
},
+
})
+
+
return nil
+
}
+2 -11
server/handle_identity_update_handle.go
···
"github.com/Azure/go-autorest/autorest/to"
"github.com/bluesky-social/indigo/api/atproto"
-
"github.com/bluesky-social/indigo/atproto/crypto"
+
"github.com/bluesky-social/indigo/atproto/atcrypto"
"github.com/bluesky-social/indigo/events"
"github.com/bluesky-social/indigo/util"
"github.com/haileyok/cocoon/identity"
···
Prev: &latest.Cid,
}
-
k, err := crypto.ParsePrivateBytesK256(repo.SigningKey)
+
k, err := atcrypto.ParsePrivateBytesK256(repo.SigningKey)
if err != nil {
s.logger.Error("error parsing signing key", "error", err)
return helpers.ServerError(e, nil)
···
if err := s.passport.BustDoc(context.TODO(), repo.Repo.Did); err != nil {
s.logger.Warn("error busting did doc", "error", err)
}
-
-
s.evtman.AddEvent(context.TODO(), &events.XRPCStreamEvent{
-
RepoHandle: &atproto.SyncSubscribeRepos_Handle{
-
Did: repo.Repo.Did,
-
Handle: req.Handle,
-
Seq: time.Now().UnixMicro(), // TODO: no
-
Time: time.Now().Format(util.ISO8601),
-
},
-
})
s.evtman.AddEvent(context.TODO(), &events.XRPCStreamEvent{
RepoIdentity: &atproto.SyncSubscribeRepos_Identity{
+3 -4
server/handle_import_repo.go
···
"github.com/bluesky-social/indigo/atproto/syntax"
"github.com/bluesky-social/indigo/repo"
-
"github.com/haileyok/cocoon/blockstore"
"github.com/haileyok/cocoon/internal/helpers"
"github.com/haileyok/cocoon/models"
blocks "github.com/ipfs/go-block-format"
···
return helpers.ServerError(e, nil)
}
-
bs := blockstore.New(urepo.Repo.Did, s.db)
+
bs := s.getBlockstore(urepo.Repo.Did)
cs, err := car.NewCarReader(bytes.NewReader(b))
if err != nil {
···
Value: b.RawData(),
}
-
if err := tx.Create(rec).Error; err != nil {
+
if err := tx.Save(rec).Error; err != nil {
return err
}
···
return helpers.ServerError(e, nil)
}
-
if err := bs.UpdateRepo(context.TODO(), root, rev); err != nil {
+
if err := s.UpdateRepo(context.TODO(), urepo.Repo.Did, root, rev); err != nil {
s.logger.Error("error updating repo after commit", "error", err)
return helpers.ServerError(e, nil)
}
+34
server/handle_label_query_labels.go
···
+
package server
+
+
import (
+
"github.com/labstack/echo/v4"
+
)
+
+
type Label struct {
+
Ver *int `json:"ver,omitempty"`
+
Src string `json:"src"`
+
Uri string `json:"uri"`
+
Cid *string `json:"cid,omitempty"`
+
Val string `json:"val"`
+
Neg *bool `json:"neg,omitempty"`
+
Cts string `json:"cts"`
+
Exp *string `json:"exp,omitempty"`
+
Sig []byte `json:"sig,omitempty"`
+
}
+
+
type ComAtprotoLabelQueryLabelsResponse struct {
+
Cursor *string `json:"cursor,omitempty"`
+
Labels []Label `json:"labels"`
+
}
+
+
func (s *Server) handleLabelQueryLabels(e echo.Context) error {
+
svc := e.Request().Header.Get("atproto-proxy")
+
if svc != "" || s.config.FallbackProxy != "" {
+
return s.handleProxy(e)
+
}
+
+
return e.JSON(200, ComAtprotoLabelQueryLabelsResponse{
+
Cursor: nil,
+
Labels: []Label{},
+
})
+
}
+1 -1
server/handle_oauth_authorize.go
···
code := oauth.GenerateCode()
-
if err := s.db.Exec("UPDATE oauth_authorization_requests SET sub = ?, code = ?, accepted = ? WHERE request_id = ?", nil, repo.Repo.Did, code, true, reqId).Error; err != nil {
+
if err := s.db.Exec("UPDATE oauth_authorization_requests SET sub = ?, code = ?, accepted = ?, ip = ? WHERE request_id = ?", nil, repo.Repo.Did, code, true, e.RealIP(), reqId).Error; err != nil {
s.logger.Error("error updating authorization request", "error", err)
return helpers.ServerError(e, nil)
}
+14 -2
server/handle_oauth_par.go
···
package server
import (
+
"errors"
"time"
"github.com/Azure/go-autorest/autorest/to"
"github.com/haileyok/cocoon/internal/helpers"
"github.com/haileyok/cocoon/oauth"
"github.com/haileyok/cocoon/oauth/constants"
+
"github.com/haileyok/cocoon/oauth/dpop"
"github.com/haileyok/cocoon/oauth/provider"
"github.com/labstack/echo/v4"
)
···
// TODO: this seems wrong. should be a way to get the entire request url i believe, but this will work for now
dpopProof, err := s.oauthProvider.DpopManager.CheckProof(e.Request().Method, "https://"+s.config.Hostname+e.Request().URL.String(), e.Request().Header, nil)
if err != nil {
+
if errors.Is(err, dpop.ErrUseDpopNonce) {
+
nonce := s.oauthProvider.NextNonce()
+
if nonce != "" {
+
e.Response().Header().Set("DPoP-Nonce", nonce)
+
e.Response().Header().Add("access-control-expose-headers", "DPoP-Nonce")
+
}
+
return e.JSON(400, map[string]string{
+
"error": "use_dpop_nonce",
+
})
+
}
s.logger.Error("error getting dpop proof", "error", err)
-
return helpers.InputError(e, to.StringPtr(err.Error()))
+
return helpers.InputError(e, nil)
}
client, clientAuth, err := s.oauthProvider.AuthenticateClient(e.Request().Context(), parRequest.AuthenticateClientRequestBase, dpopProof, &provider.AuthenticateClientOptions{
···
AllowMissingDpopProof: true,
})
if err != nil {
-
s.logger.Error("error authenticating client", "error", err)
+
s.logger.Error("error authenticating client", "client_id", parRequest.ClientID, "error", err)
return helpers.InputError(e, to.StringPtr(err.Error()))
}
+18 -12
server/handle_oauth_token.go
···
"bytes"
"crypto/sha256"
"encoding/base64"
+
"errors"
"fmt"
"slices"
"time"
···
"github.com/haileyok/cocoon/internal/helpers"
"github.com/haileyok/cocoon/oauth"
"github.com/haileyok/cocoon/oauth/constants"
+
"github.com/haileyok/cocoon/oauth/dpop"
"github.com/haileyok/cocoon/oauth/provider"
"github.com/labstack/echo/v4"
)
···
proof, err := s.oauthProvider.DpopManager.CheckProof(e.Request().Method, e.Request().URL.String(), e.Request().Header, nil)
if err != nil {
+
if errors.Is(err, dpop.ErrUseDpopNonce) {
+
nonce := s.oauthProvider.NextNonce()
+
if nonce != "" {
+
e.Response().Header().Set("DPoP-Nonce", nonce)
+
e.Response().Header().Add("access-control-expose-headers", "DPoP-Nonce")
+
}
+
return e.JSON(400, map[string]string{
+
"error": "use_dpop_nonce",
+
})
+
}
s.logger.Error("error getting dpop proof", "error", err)
-
return helpers.InputError(e, to.StringPtr(err.Error()))
+
return helpers.InputError(e, nil)
}
client, clientAuth, err := s.oauthProvider.AuthenticateClient(e.Request().Context(), req.AuthenticateClientRequestBase, proof, &provider.AuthenticateClientOptions{
AllowMissingDpopProof: true,
})
if err != nil {
-
s.logger.Error("error authenticating client", "error", err)
+
s.logger.Error("error authenticating client", "client_id", req.ClientID, "error", err)
return helpers.InputError(e, to.StringPtr(err.Error()))
}
···
Code: *authReq.Code,
Token: accessString,
RefreshToken: refreshToken,
+
Ip: authReq.Ip,
}, nil).Error; err != nil {
s.logger.Error("error creating token in db", "error", err)
return helpers.ServerError(e, nil)
···
return helpers.InputError(e, to.StringPtr("dpop proof does not match expected jkt"))
}
-
sessionLifetime := constants.PublicClientSessionLifetime
-
refreshLifetime := constants.PublicClientRefreshLifetime
-
if clientAuth.Method != "none" {
-
sessionLifetime = constants.ConfidentialClientSessionLifetime
-
refreshLifetime = constants.ConfidentialClientRefreshLifetime
-
}
+
ageRes := oauth.GetSessionAgeFromToken(oauthToken)
-
sessionAge := time.Since(oauthToken.CreatedAt)
-
if sessionAge > sessionLifetime {
+
if ageRes.SessionExpired {
return helpers.InputError(e, to.StringPtr("Session expired"))
}
-
refreshAge := time.Since(oauthToken.UpdatedAt)
-
if refreshAge > refreshLifetime {
+
if ageRes.RefreshExpired {
return helpers.InputError(e, to.StringPtr("Refresh token expired"))
}
+43 -18
server/handle_proxy.go
···
secp256k1secec "gitlab.com/yawning/secp256k1-voi/secec"
)
-
func (s *Server) handleProxy(e echo.Context) error {
-
repo, isAuthed := e.Get("repo").(*models.RepoActor)
-
-
pts := strings.Split(e.Request().URL.Path, "/")
-
if len(pts) != 3 {
-
return fmt.Errorf("incorrect number of parts")
-
}
-
+
func (s *Server) getAtprotoProxyEndpointFromRequest(e echo.Context) (string, string, error) {
svc := e.Request().Header.Get("atproto-proxy")
-
if svc == "" {
-
svc = "did:web:api.bsky.app#bsky_appview" // TODO: should be a config var probably
+
if svc == "" && s.config.FallbackProxy != "" {
+
svc = s.config.FallbackProxy
}
svcPts := strings.Split(svc, "#")
if len(svcPts) != 2 {
-
return fmt.Errorf("invalid service header")
+
return "", "", fmt.Errorf("invalid service header")
}
svcDid := svcPts[0]
···
doc, err := s.passport.FetchDoc(e.Request().Context(), svcDid)
if err != nil {
-
return err
+
return "", "", err
}
var endpoint string
···
}
}
+
return endpoint, svcDid, nil
+
}
+
+
func (s *Server) handleProxy(e echo.Context) error {
+
lgr := s.logger.With("handler", "handleProxy")
+
+
repo, isAuthed := e.Get("repo").(*models.RepoActor)
+
+
pts := strings.Split(e.Request().URL.Path, "/")
+
if len(pts) != 3 {
+
return fmt.Errorf("incorrect number of parts")
+
}
+
+
endpoint, svcDid, err := s.getAtprotoProxyEndpointFromRequest(e)
+
if err != nil {
+
lgr.Error("could not get atproto proxy", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
requrl := e.Request().URL
requrl.Host = strings.TrimPrefix(endpoint, "https://")
requrl.Scheme = "https"
···
}
hj, err := json.Marshal(header)
if err != nil {
-
s.logger.Error("error marshaling header", "error", err)
+
lgr.Error("error marshaling header", "error", err)
return helpers.ServerError(e, nil)
}
encheader := strings.TrimRight(base64.RawURLEncoding.EncodeToString(hj), "=")
+
// When proxying app.bsky.feed.getFeed the token is actually issued for the
+
// underlying feed generator and the app view passes it on. This allows the
+
// getFeed implementation to pass in the desired lxm and aud for the token
+
// and then just delegate to the general proxying logic
+
lxm, proxyTokenLxmExists := e.Get("proxyTokenLxm").(string)
+
if !proxyTokenLxmExists || lxm == "" {
+
lxm = pts[2]
+
}
+
aud, proxyTokenAudExists := e.Get("proxyTokenAud").(string)
+
if !proxyTokenAudExists || aud == "" {
+
aud = svcDid
+
}
+
payload := map[string]any{
"iss": repo.Repo.Did,
-
"aud": svcDid,
-
"lxm": pts[2],
+
"aud": aud,
+
"lxm": lxm,
"jti": uuid.NewString(),
"exp": time.Now().Add(1 * time.Minute).UTC().Unix(),
}
pj, err := json.Marshal(payload)
if err != nil {
-
s.logger.Error("error marashaling payload", "error", err)
+
lgr.Error("error marashaling payload", "error", err)
return helpers.ServerError(e, nil)
}
···
sk, err := secp256k1secec.NewPrivateKey(repo.SigningKey)
if err != nil {
-
s.logger.Error("can't load private key", "error", err)
+
lgr.Error("can't load private key", "error", err)
return err
}
R, S, _, err := sk.SignRaw(rand.Reader, hash[:])
if err != nil {
-
s.logger.Error("error signing", "error", err)
+
lgr.Error("error signing", "error", err)
}
rBytes := R.Bytes()
+35
server/handle_proxy_get_feed.go
···
+
package server
+
+
import (
+
"github.com/Azure/go-autorest/autorest/to"
+
"github.com/bluesky-social/indigo/api/atproto"
+
"github.com/bluesky-social/indigo/api/bsky"
+
"github.com/bluesky-social/indigo/atproto/syntax"
+
"github.com/bluesky-social/indigo/xrpc"
+
"github.com/haileyok/cocoon/internal/helpers"
+
"github.com/labstack/echo/v4"
+
)
+
+
func (s *Server) handleProxyBskyFeedGetFeed(e echo.Context) error {
+
feedUri, err := syntax.ParseATURI(e.QueryParam("feed"))
+
if err != nil {
+
return helpers.InputError(e, to.StringPtr("invalid feed uri"))
+
}
+
+
appViewEndpoint, _, err := s.getAtprotoProxyEndpointFromRequest(e)
+
if err != nil {
+
e.Logger().Error("could not get atproto proxy", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
appViewClient := xrpc.Client{
+
Host: appViewEndpoint,
+
}
+
feedRecord, err := atproto.RepoGetRecord(e.Request().Context(), &appViewClient, "", feedUri.Collection().String(), feedUri.Authority().String(), feedUri.RecordKey().String())
+
feedGeneratorDid := feedRecord.Value.Val.(*bsky.FeedGenerator).Did
+
+
e.Set("proxyTokenLxm", "app.bsky.feed.getFeedSkeleton")
+
e.Set("proxyTokenAud", feedGeneratorDid)
+
+
return s.handleProxy(e)
+
}
+2 -2
server/handle_repo_get_record.go
···
package server
import (
-
"github.com/bluesky-social/indigo/atproto/data"
+
"github.com/bluesky-social/indigo/atproto/atdata"
"github.com/bluesky-social/indigo/atproto/syntax"
"github.com/haileyok/cocoon/models"
"github.com/labstack/echo/v4"
···
return err
}
-
val, err := data.UnmarshalCBOR(record.Value)
+
val, err := atdata.UnmarshalCBOR(record.Value)
if err != nil {
return s.handleProxy(e) // TODO: this should be getting handled like...if we don't find it in the db. why doesn't it throw error up there?
}
+112
server/handle_repo_list_missing_blobs.go
···
+
package server
+
+
import (
+
"fmt"
+
"strconv"
+
+
"github.com/bluesky-social/indigo/atproto/atdata"
+
"github.com/haileyok/cocoon/internal/helpers"
+
"github.com/haileyok/cocoon/models"
+
"github.com/ipfs/go-cid"
+
"github.com/labstack/echo/v4"
+
)
+
+
type ComAtprotoRepoListMissingBlobsResponse struct {
+
Cursor *string `json:"cursor,omitempty"`
+
Blobs []ComAtprotoRepoListMissingBlobsRecordBlob `json:"blobs"`
+
}
+
+
type ComAtprotoRepoListMissingBlobsRecordBlob struct {
+
Cid string `json:"cid"`
+
RecordUri string `json:"recordUri"`
+
}
+
+
func (s *Server) handleListMissingBlobs(e echo.Context) error {
+
urepo := e.Get("repo").(*models.RepoActor)
+
+
limitStr := e.QueryParam("limit")
+
cursor := e.QueryParam("cursor")
+
+
limit := 500
+
if limitStr != "" {
+
if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 1000 {
+
limit = l
+
}
+
}
+
+
var records []models.Record
+
if err := s.db.Raw("SELECT * FROM records WHERE did = ?", nil, urepo.Repo.Did).Scan(&records).Error; err != nil {
+
s.logger.Error("failed to get records for listMissingBlobs", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
type blobRef struct {
+
cid cid.Cid
+
recordUri string
+
}
+
var allBlobRefs []blobRef
+
+
for _, rec := range records {
+
blobs := getBlobsFromRecord(rec.Value)
+
recordUri := fmt.Sprintf("at://%s/%s/%s", urepo.Repo.Did, rec.Nsid, rec.Rkey)
+
for _, b := range blobs {
+
allBlobRefs = append(allBlobRefs, blobRef{cid: cid.Cid(b.Ref), recordUri: recordUri})
+
}
+
}
+
+
missingBlobs := make([]ComAtprotoRepoListMissingBlobsRecordBlob, 0)
+
seenCids := make(map[string]bool)
+
+
for _, ref := range allBlobRefs {
+
cidStr := ref.cid.String()
+
+
if seenCids[cidStr] {
+
continue
+
}
+
+
if cursor != "" && cidStr <= cursor {
+
continue
+
}
+
+
var count int64
+
if err := s.db.Raw("SELECT COUNT(*) FROM blobs WHERE did = ? AND cid = ?", nil, urepo.Repo.Did, ref.cid.Bytes()).Scan(&count).Error; err != nil {
+
continue
+
}
+
+
if count == 0 {
+
missingBlobs = append(missingBlobs, ComAtprotoRepoListMissingBlobsRecordBlob{
+
Cid: cidStr,
+
RecordUri: ref.recordUri,
+
})
+
seenCids[cidStr] = true
+
+
if len(missingBlobs) >= limit {
+
break
+
}
+
}
+
}
+
+
var nextCursor *string
+
if len(missingBlobs) > 0 && len(missingBlobs) >= limit {
+
lastCid := missingBlobs[len(missingBlobs)-1].Cid
+
nextCursor = &lastCid
+
}
+
+
return e.JSON(200, ComAtprotoRepoListMissingBlobsResponse{
+
Cursor: nextCursor,
+
Blobs: missingBlobs,
+
})
+
}
+
+
func getBlobsFromRecord(data []byte) []atdata.Blob {
+
if len(data) == 0 {
+
return nil
+
}
+
+
decoded, err := atdata.UnmarshalCBOR(data)
+
if err != nil {
+
return nil
+
}
+
+
return atdata.ExtractBlobs(decoded)
+
}
+2 -2
server/handle_repo_list_records.go
···
"strconv"
"github.com/Azure/go-autorest/autorest/to"
-
"github.com/bluesky-social/indigo/atproto/data"
+
"github.com/bluesky-social/indigo/atproto/atdata"
"github.com/bluesky-social/indigo/atproto/syntax"
"github.com/haileyok/cocoon/internal/helpers"
"github.com/haileyok/cocoon/models"
···
items := []ComAtprotoRepoListRecordsRecordItem{}
for _, r := range records {
-
val, err := data.UnmarshalCBOR(r.Value)
+
val, err := atdata.UnmarshalCBOR(r.Value)
if err != nil {
return err
}
+2 -2
server/handle_repo_list_repos.go
···
Did: r.Did,
Head: c.String(),
Rev: r.Rev,
-
Active: true,
-
Status: nil,
+
Active: r.Active(),
+
Status: r.Status(),
})
}
+50 -8
server/handle_repo_upload_blob.go
···
import (
"bytes"
+
"fmt"
"io"
+
"github.com/aws/aws-sdk-go/aws"
+
"github.com/aws/aws-sdk-go/aws/credentials"
+
"github.com/aws/aws-sdk-go/aws/session"
+
"github.com/aws/aws-sdk-go/service/s3"
"github.com/haileyok/cocoon/internal/helpers"
"github.com/haileyok/cocoon/models"
"github.com/ipfs/go-cid"
···
mime = "application/octet-stream"
}
+
storage := "sqlite"
+
s3Upload := s.s3Config != nil && s.s3Config.BlobstoreEnabled
+
if s3Upload {
+
storage = "s3"
+
}
blob := models.Blob{
Did: urepo.Repo.Did,
RefCount: 0,
CreatedAt: s.repoman.clock.Next().String(),
+
Storage: storage,
}
if err := s.db.Create(&blob, nil).Error; err != nil {
···
read += n
fulldata.Write(data)
-
blobPart := models.BlobPart{
-
BlobID: blob.ID,
-
Idx: part,
-
Data: data,
-
}
+
if !s3Upload {
+
blobPart := models.BlobPart{
+
BlobID: blob.ID,
+
Idx: part,
+
Data: data,
+
}
-
if err := s.db.Create(&blobPart, nil).Error; err != nil {
-
s.logger.Error("error adding blob part to db", "error", err)
-
return helpers.ServerError(e, nil)
+
if err := s.db.Create(&blobPart, nil).Error; err != nil {
+
s.logger.Error("error adding blob part to db", "error", err)
+
return helpers.ServerError(e, nil)
+
}
}
part++
···
if err != nil {
s.logger.Error("error creating cid prefix", "error", err)
return helpers.ServerError(e, nil)
+
}
+
+
if s3Upload {
+
config := &aws.Config{
+
Region: aws.String(s.s3Config.Region),
+
Credentials: credentials.NewStaticCredentials(s.s3Config.AccessKey, s.s3Config.SecretKey, ""),
+
}
+
+
if s.s3Config.Endpoint != "" {
+
config.Endpoint = aws.String(s.s3Config.Endpoint)
+
config.S3ForcePathStyle = aws.Bool(true)
+
}
+
+
sess, err := session.NewSession(config)
+
if err != nil {
+
s.logger.Error("error creating aws session", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
svc := s3.New(sess)
+
+
if _, err := svc.PutObject(&s3.PutObjectInput{
+
Bucket: aws.String(s.s3Config.Bucket),
+
Key: aws.String(fmt.Sprintf("blobs/%s/%s", urepo.Repo.Did, c.String())),
+
Body: bytes.NewReader(fulldata.Bytes()),
+
}); err != nil {
+
s.logger.Error("error uploading blob to s3", "error", err)
+
return helpers.ServerError(e, nil)
+
}
}
if err := s.db.Exec("UPDATE blobs SET cid = ? WHERE id = ?", nil, c.Bytes(), blob.ID).Error; err != nil {
+45
server/handle_server_activate_account.go
···
+
package server
+
+
import (
+
"context"
+
"time"
+
+
"github.com/bluesky-social/indigo/api/atproto"
+
"github.com/bluesky-social/indigo/events"
+
"github.com/bluesky-social/indigo/util"
+
"github.com/haileyok/cocoon/internal/helpers"
+
"github.com/haileyok/cocoon/models"
+
"github.com/labstack/echo/v4"
+
)
+
+
type ComAtprotoServerActivateAccountRequest struct {
+
// NOTE: this implementation will not pay attention to this value
+
DeleteAfter time.Time `json:"deleteAfter"`
+
}
+
+
func (s *Server) handleServerActivateAccount(e echo.Context) error {
+
var req ComAtprotoServerDeactivateAccountRequest
+
if err := e.Bind(&req); err != nil {
+
s.logger.Error("error binding", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
urepo := e.Get("repo").(*models.RepoActor)
+
+
if err := s.db.Exec("UPDATE repos SET deactivated = ? WHERE did = ?", nil, false, urepo.Repo.Did).Error; err != nil {
+
s.logger.Error("error updating account status to deactivated", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
s.evtman.AddEvent(context.TODO(), &events.XRPCStreamEvent{
+
RepoAccount: &atproto.SyncSubscribeRepos_Account{
+
Active: true,
+
Did: urepo.Repo.Did,
+
Status: nil,
+
Seq: time.Now().UnixMicro(), // TODO: bad puppy
+
Time: time.Now().Format(util.ISO8601),
+
},
+
})
+
+
return e.NoContent(200)
+
}
+2 -2
server/handle_server_confirm_email.go
···
}
if urepo.EmailVerificationCode == nil || urepo.EmailVerificationCodeExpiresAt == nil {
-
return helpers.InputError(e, to.StringPtr("ExpiredToken"))
+
return helpers.ExpiredTokenError(e)
}
if *urepo.EmailVerificationCode != req.Token {
···
}
if time.Now().UTC().After(*urepo.EmailVerificationCodeExpiresAt) {
-
return helpers.InputError(e, to.StringPtr("ExpiredToken"))
+
return helpers.ExpiredTokenError(e)
}
now := time.Now().UTC()
+95 -63
server/handle_server_create_account.go
···
"github.com/Azure/go-autorest/autorest/to"
"github.com/bluesky-social/indigo/api/atproto"
-
"github.com/bluesky-social/indigo/atproto/crypto"
-
"github.com/bluesky-social/indigo/atproto/syntax"
+
"github.com/bluesky-social/indigo/atproto/atcrypto"
"github.com/bluesky-social/indigo/events"
"github.com/bluesky-social/indigo/repo"
"github.com/bluesky-social/indigo/util"
-
"github.com/haileyok/cocoon/blockstore"
"github.com/haileyok/cocoon/internal/helpers"
"github.com/haileyok/cocoon/models"
"github.com/labstack/echo/v4"
···
Handle string `json:"handle" validate:"required,atproto-handle"`
Did *string `json:"did" validate:"atproto-did"`
Password string `json:"password" validate:"required"`
-
InviteCode string `json:"inviteCode" validate:"required"`
+
InviteCode string `json:"inviteCode" validate:"omitempty"`
}
type ComAtprotoServerCreateAccountResponse struct {
···
func (s *Server) handleCreateAccount(e echo.Context) error {
var request ComAtprotoServerCreateAccountRequest
-
var signupDid string
-
customDidHeader := e.Request().Header.Get("authorization")
-
if customDidHeader != "" {
-
pts := strings.Split(customDidHeader, " ")
-
if len(pts) != 2 {
-
return helpers.InputError(e, to.StringPtr("InvalidDid"))
-
}
-
-
_, err := syntax.ParseDID(pts[1])
-
if err != nil {
-
return helpers.InputError(e, to.StringPtr("InvalidDid"))
-
}
-
-
signupDid = pts[1]
-
}
-
if err := e.Bind(&request); err != nil {
s.logger.Error("error receiving request", "endpoint", "com.atproto.server.createAccount", "error", err)
return helpers.ServerError(e, nil)
···
}
}
}
+
+
var signupDid string
+
if request.Did != nil {
+
signupDid = *request.Did;
+
+
token := strings.TrimSpace(strings.Replace(e.Request().Header.Get("authorization"), "Bearer ", "", 1))
+
if token == "" {
+
return helpers.UnauthorizedError(e, to.StringPtr("must authenticate to use an existing did"))
+
}
+
authDid, err := s.validateServiceAuth(e.Request().Context(), token, "com.atproto.server.createAccount")
+
+
if err != nil {
+
s.logger.Warn("error validating authorization token", "endpoint", "com.atproto.server.createAccount", "error", err)
+
return helpers.UnauthorizedError(e, to.StringPtr("invalid authorization token"))
+
}
+
+
if authDid != signupDid {
+
return helpers.ForbiddenError(e, to.StringPtr("auth did did not match signup did"))
+
}
+
}
// see if the handle is already taken
-
_, err := s.getActorByHandle(request.Handle)
+
actor, err := s.getActorByHandle(request.Handle)
if err != nil && err != gorm.ErrRecordNotFound {
s.logger.Error("error looking up handle in db", "endpoint", "com.atproto.server.createAccount", "error", err)
return helpers.ServerError(e, nil)
}
-
if err == nil {
+
if err == nil && actor.Did != signupDid {
return helpers.InputError(e, to.StringPtr("HandleNotAvailable"))
}
-
if did, err := s.passport.ResolveHandle(e.Request().Context(), request.Handle); err == nil && did != "" {
+
if did, err := s.passport.ResolveHandle(e.Request().Context(), request.Handle); err == nil && did != signupDid {
return helpers.InputError(e, to.StringPtr("HandleNotAvailable"))
}
var ic models.InviteCode
-
if err := s.db.Raw("SELECT * FROM invite_codes WHERE code = ?", nil, request.InviteCode).Scan(&ic).Error; err != nil {
-
if err == gorm.ErrRecordNotFound {
+
if s.config.RequireInvite {
+
if strings.TrimSpace(request.InviteCode) == "" {
return helpers.InputError(e, to.StringPtr("InvalidInviteCode"))
}
-
s.logger.Error("error getting invite code from db", "error", err)
-
return helpers.ServerError(e, nil)
-
}
+
+
if err := s.db.Raw("SELECT * FROM invite_codes WHERE code = ?", nil, request.InviteCode).Scan(&ic).Error; err != nil {
+
if err == gorm.ErrRecordNotFound {
+
return helpers.InputError(e, to.StringPtr("InvalidInviteCode"))
+
}
+
s.logger.Error("error getting invite code from db", "error", err)
+
return helpers.ServerError(e, nil)
+
}
-
if ic.RemainingUseCount < 1 {
-
return helpers.InputError(e, to.StringPtr("InvalidInviteCode"))
+
if ic.RemainingUseCount < 1 {
+
return helpers.InputError(e, to.StringPtr("InvalidInviteCode"))
+
}
}
// see if the email is already taken
-
_, err = s.getRepoByEmail(request.Email)
+
existingRepo, err := s.getRepoByEmail(request.Email)
if err != nil && err != gorm.ErrRecordNotFound {
s.logger.Error("error looking up email in db", "endpoint", "com.atproto.server.createAccount", "error", err)
return helpers.ServerError(e, nil)
}
-
if err == nil {
+
if err == nil && existingRepo.Did != signupDid {
return helpers.InputError(e, to.StringPtr("EmailNotAvailable"))
}
// TODO: unsupported domains
-
k, err := crypto.GeneratePrivateKeyK256()
-
if err != nil {
-
s.logger.Error("error creating signing key", "endpoint", "com.atproto.server.createAccount", "error", err)
-
return helpers.ServerError(e, nil)
+
var k *atcrypto.PrivateKeyK256
+
+
if signupDid != "" {
+
reservedKey, err := s.getReservedKey(signupDid)
+
if err != nil {
+
s.logger.Error("error looking up reserved key", "error", err)
+
}
+
if reservedKey != nil {
+
k, err = atcrypto.ParsePrivateBytesK256(reservedKey.PrivateKey)
+
if err != nil {
+
s.logger.Error("error parsing reserved key", "error", err)
+
k = nil
+
} else {
+
defer func() {
+
if delErr := s.deleteReservedKey(reservedKey.KeyDid, reservedKey.Did); delErr != nil {
+
s.logger.Error("error deleting reserved key", "error", delErr)
+
}
+
}()
+
}
+
}
+
}
+
+
if k == nil {
+
k, err = atcrypto.GeneratePrivateKeyK256()
+
if err != nil {
+
s.logger.Error("error creating signing key", "endpoint", "com.atproto.server.createAccount", "error", err)
+
return helpers.ServerError(e, nil)
+
}
}
if signupDid == "" {
···
SigningKey: k.Bytes(),
}
-
actor := models.Actor{
-
Did: signupDid,
-
Handle: request.Handle,
-
}
+
if actor == nil {
+
actor = &models.Actor{
+
Did: signupDid,
+
Handle: request.Handle,
+
}
-
if err := s.db.Create(&urepo, nil).Error; err != nil {
-
s.logger.Error("error inserting new repo", "error", err)
-
return helpers.ServerError(e, nil)
-
}
-
-
if err := s.db.Create(&actor, nil).Error; err != nil {
-
s.logger.Error("error inserting new actor", "error", err)
-
return helpers.ServerError(e, nil)
+
if err := s.db.Create(&urepo, nil).Error; err != nil {
+
s.logger.Error("error inserting new repo", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := s.db.Create(&actor, nil).Error; err != nil {
+
s.logger.Error("error inserting new actor", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
} else {
+
if err := s.db.Save(&actor, nil).Error; err != nil {
+
s.logger.Error("error inserting new actor", "error", err)
+
return helpers.ServerError(e, nil)
+
}
}
-
if customDidHeader == "" {
-
bs := blockstore.New(signupDid, s.db)
+
if request.Did == nil || *request.Did == "" {
+
bs := s.getBlockstore(signupDid)
r := repo.NewRepo(context.TODO(), signupDid, bs)
root, rev, err := r.Commit(context.TODO(), urepo.SignFor)
···
return helpers.ServerError(e, nil)
}
-
if err := bs.UpdateRepo(context.TODO(), root, rev); err != nil {
+
if err := s.UpdateRepo(context.TODO(), urepo.Did, root, rev); err != nil {
s.logger.Error("error updating repo after commit", "error", err)
return helpers.ServerError(e, nil)
}
s.evtman.AddEvent(context.TODO(), &events.XRPCStreamEvent{
-
RepoHandle: &atproto.SyncSubscribeRepos_Handle{
-
Did: urepo.Did,
-
Handle: request.Handle,
-
Seq: time.Now().UnixMicro(), // TODO: no
-
Time: time.Now().Format(util.ISO8601),
-
},
-
})
-
-
s.evtman.AddEvent(context.TODO(), &events.XRPCStreamEvent{
RepoIdentity: &atproto.SyncSubscribeRepos_Identity{
Did: urepo.Did,
Handle: to.StringPtr(request.Handle),
···
})
}
-
if err := s.db.Raw("UPDATE invite_codes SET remaining_use_count = remaining_use_count - 1 WHERE code = ?", nil, request.InviteCode).Scan(&ic).Error; err != nil {
-
s.logger.Error("error decrementing use count", "error", err)
-
return helpers.ServerError(e, nil)
+
if s.config.RequireInvite {
+
if err := s.db.Raw("UPDATE invite_codes SET remaining_use_count = remaining_use_count - 1 WHERE code = ?", nil, request.InviteCode).Scan(&ic).Error; err != nil {
+
s.logger.Error("error decrementing use count", "error", err)
+
return helpers.ServerError(e, nil)
+
}
}
sess, err := s.createSession(&urepo)
+2 -2
server/handle_server_create_session.go
···
Email: repo.Email,
EmailConfirmed: repo.EmailConfirmedAt != nil,
EmailAuthFactor: false,
-
Active: true, // TODO: eventually do takedowns
-
Status: nil, // TODO eventually do takedowns
+
Active: repo.Active(),
+
Status: repo.Status(),
})
}
+46
server/handle_server_deactivate_account.go
···
+
package server
+
+
import (
+
"context"
+
"time"
+
+
"github.com/Azure/go-autorest/autorest/to"
+
"github.com/bluesky-social/indigo/api/atproto"
+
"github.com/bluesky-social/indigo/events"
+
"github.com/bluesky-social/indigo/util"
+
"github.com/haileyok/cocoon/internal/helpers"
+
"github.com/haileyok/cocoon/models"
+
"github.com/labstack/echo/v4"
+
)
+
+
type ComAtprotoServerDeactivateAccountRequest struct {
+
// NOTE: this implementation will not pay attention to this value
+
DeleteAfter time.Time `json:"deleteAfter"`
+
}
+
+
func (s *Server) handleServerDeactivateAccount(e echo.Context) error {
+
var req ComAtprotoServerDeactivateAccountRequest
+
if err := e.Bind(&req); err != nil {
+
s.logger.Error("error binding", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
urepo := e.Get("repo").(*models.RepoActor)
+
+
if err := s.db.Exec("UPDATE repos SET deactivated = ? WHERE did = ?", nil, true, urepo.Repo.Did).Error; err != nil {
+
s.logger.Error("error updating account status to deactivated", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
s.evtman.AddEvent(context.TODO(), &events.XRPCStreamEvent{
+
RepoAccount: &atproto.SyncSubscribeRepos_Account{
+
Active: false,
+
Did: urepo.Repo.Did,
+
Status: to.StringPtr("deactivated"),
+
Seq: time.Now().UnixMicro(), // TODO: bad puppy
+
Time: time.Now().Format(util.ISO8601),
+
},
+
})
+
+
return e.NoContent(200)
+
}
+145
server/handle_server_delete_account.go
···
+
package server
+
+
import (
+
"context"
+
"time"
+
+
"github.com/Azure/go-autorest/autorest/to"
+
"github.com/bluesky-social/indigo/api/atproto"
+
"github.com/bluesky-social/indigo/events"
+
"github.com/bluesky-social/indigo/util"
+
"github.com/haileyok/cocoon/internal/helpers"
+
"github.com/labstack/echo/v4"
+
"golang.org/x/crypto/bcrypt"
+
)
+
+
type ComAtprotoServerDeleteAccountRequest struct {
+
Did string `json:"did" validate:"required"`
+
Password string `json:"password" validate:"required"`
+
Token string `json:"token" validate:"required"`
+
}
+
+
func (s *Server) handleServerDeleteAccount(e echo.Context) error {
+
var req ComAtprotoServerDeleteAccountRequest
+
if err := e.Bind(&req); err != nil {
+
s.logger.Error("error binding", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := e.Validate(&req); err != nil {
+
s.logger.Error("error validating", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
urepo, err := s.getRepoActorByDid(req.Did)
+
if err != nil {
+
s.logger.Error("error getting repo", "error", err)
+
return echo.NewHTTPError(400, "account not found")
+
}
+
+
if err := bcrypt.CompareHashAndPassword([]byte(urepo.Repo.Password), []byte(req.Password)); err != nil {
+
s.logger.Error("password mismatch", "error", err)
+
return echo.NewHTTPError(401, "Invalid did or password")
+
}
+
+
if urepo.Repo.AccountDeleteCode == nil || urepo.Repo.AccountDeleteCodeExpiresAt == nil {
+
s.logger.Error("no deletion token found for account")
+
return echo.NewHTTPError(400, map[string]interface{}{
+
"error": "InvalidToken",
+
"message": "Token is invalid",
+
})
+
}
+
+
if *urepo.Repo.AccountDeleteCode != req.Token {
+
s.logger.Error("deletion token mismatch")
+
return echo.NewHTTPError(400, map[string]interface{}{
+
"error": "InvalidToken",
+
"message": "Token is invalid",
+
})
+
}
+
+
if time.Now().UTC().After(*urepo.Repo.AccountDeleteCodeExpiresAt) {
+
s.logger.Error("deletion token expired")
+
return echo.NewHTTPError(400, map[string]interface{}{
+
"error": "ExpiredToken",
+
"message": "Token is expired",
+
})
+
}
+
+
tx := s.db.BeginDangerously()
+
if tx.Error != nil {
+
s.logger.Error("error starting transaction", "error", tx.Error)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM blocks WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting blocks", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM records WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting records", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM blobs WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting blobs", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM tokens WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting tokens", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM refresh_tokens WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting refresh tokens", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM reserved_keys WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting reserved keys", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM invite_codes WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting invite codes", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM actors WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting actor", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Exec("DELETE FROM repos WHERE did = ?", nil, req.Did).Error; err != nil {
+
tx.Rollback()
+
s.logger.Error("error deleting repo", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if err := tx.Commit().Error; err != nil {
+
s.logger.Error("error committing transaction", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
s.evtman.AddEvent(context.TODO(), &events.XRPCStreamEvent{
+
RepoAccount: &atproto.SyncSubscribeRepos_Account{
+
Active: false,
+
Did: req.Did,
+
Status: to.StringPtr("deleted"),
+
Seq: time.Now().UnixMicro(),
+
Time: time.Now().Format(util.ISO8601),
+
},
+
})
+
+
return e.NoContent(200)
+
}
+1 -1
server/handle_server_describe_server.go
···
func (s *Server) handleDescribeServer(e echo.Context) error {
return e.JSON(200, ComAtprotoServerDescribeServerResponse{
-
InviteCodeRequired: true,
+
InviteCodeRequired: s.config.RequireInvite,
PhoneVerificationRequired: false,
AvailableUserDomains: []string{"." + s.config.Hostname}, // TODO: more
Links: ComAtprotoServerDescribeServerResponseLinks{
+17 -8
server/handle_server_get_service_auth.go
···
type ServerGetServiceAuthRequest struct {
Aud string `query:"aud" validate:"required,atproto-did"`
-
Exp int64 `query:"exp"`
-
Lxm string `query:"lxm" validate:"required,atproto-nsid"`
+
// exp should be a float, as some clients will send a non-integer expiration
+
Exp float64 `query:"exp"`
+
Lxm string `query:"lxm"`
}
func (s *Server) handleServerGetServiceAuth(e echo.Context) error {
···
return helpers.InputError(e, nil)
}
+
exp := int64(req.Exp)
now := time.Now().Unix()
-
if req.Exp == 0 {
-
req.Exp = now + 60 // default
+
if exp == 0 {
+
exp = now + 60 // default
}
if req.Lxm == "com.atproto.server.getServiceAuth" {
return helpers.InputError(e, to.StringPtr("may not generate auth tokens recursively"))
}
-
maxExp := now + (60 * 30)
-
if req.Exp > maxExp {
+
var maxExp int64
+
if req.Lxm != "" {
+
maxExp = now + (60 * 60)
+
} else {
+
maxExp = now + 60
+
}
+
if exp > maxExp {
return helpers.InputError(e, to.StringPtr("expiration too big. smoller please"))
}
···
payload := map[string]any{
"iss": repo.Repo.Did,
"aud": req.Aud,
-
"lxm": req.Lxm,
"jti": uuid.NewString(),
-
"exp": req.Exp,
+
"exp": exp,
"iat": now,
+
}
+
if req.Lxm != "" {
+
payload["lxm"] = req.Lxm
}
pj, err := json.Marshal(payload)
if err != nil {
+2 -2
server/handle_server_get_session.go
···
Email: repo.Email,
EmailConfirmed: repo.EmailConfirmedAt != nil,
EmailAuthFactor: false, // TODO: todo todo
-
Active: true,
-
Status: nil,
+
Active: repo.Active(),
+
Status: repo.Status(),
})
}
+2 -2
server/handle_server_refresh_session.go
···
RefreshJwt: sess.RefreshToken,
Handle: repo.Handle,
Did: repo.Repo.Did,
-
Active: true,
-
Status: nil,
+
Active: repo.Active(),
+
Status: repo.Status(),
})
}
+49
server/handle_server_request_account_delete.go
···
+
package server
+
+
import (
+
"fmt"
+
"time"
+
+
"github.com/haileyok/cocoon/internal/helpers"
+
"github.com/haileyok/cocoon/models"
+
"github.com/labstack/echo/v4"
+
)
+
+
func (s *Server) handleServerRequestAccountDelete(e echo.Context) error {
+
urepo := e.Get("repo").(*models.RepoActor)
+
+
token := fmt.Sprintf("%s-%s", helpers.RandomVarchar(5), helpers.RandomVarchar(5))
+
expiresAt := time.Now().UTC().Add(15 * time.Minute)
+
+
if err := s.db.Exec("UPDATE repos SET account_delete_code = ?, account_delete_code_expires_at = ? WHERE did = ?", nil, token, expiresAt, urepo.Repo.Did).Error; err != nil {
+
s.logger.Error("error setting deletion token", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if urepo.Email != "" {
+
if err := s.sendAccountDeleteEmail(urepo.Email, urepo.Actor.Handle, token); err != nil {
+
s.logger.Error("error sending account deletion email", "error", err)
+
}
+
}
+
+
return e.NoContent(200)
+
}
+
+
func (s *Server) sendAccountDeleteEmail(email, handle, token string) error {
+
if s.mail == nil {
+
return nil
+
}
+
+
s.mailLk.Lock()
+
defer s.mailLk.Unlock()
+
+
s.mail.To(email)
+
s.mail.Subject("Account Deletion Request for " + s.config.Hostname)
+
s.mail.Plain().Set(fmt.Sprintf("Hello %s. Your account deletion code is %s. This code will expire in fifteen minutes. If you did not request this, please ignore this email.", handle, token))
+
+
if err := s.mail.Send(); err != nil {
+
return err
+
}
+
+
return nil
+
}
+95
server/handle_server_reserve_signing_key.go
···
+
package server
+
+
import (
+
"time"
+
+
"github.com/bluesky-social/indigo/atproto/atcrypto"
+
"github.com/haileyok/cocoon/internal/helpers"
+
"github.com/haileyok/cocoon/models"
+
"github.com/labstack/echo/v4"
+
)
+
+
type ServerReserveSigningKeyRequest struct {
+
Did *string `json:"did"`
+
}
+
+
type ServerReserveSigningKeyResponse struct {
+
SigningKey string `json:"signingKey"`
+
}
+
+
func (s *Server) handleServerReserveSigningKey(e echo.Context) error {
+
var req ServerReserveSigningKeyRequest
+
if err := e.Bind(&req); err != nil {
+
s.logger.Error("could not bind reserve signing key request", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
if req.Did != nil && *req.Did != "" {
+
var existing models.ReservedKey
+
if err := s.db.Raw("SELECT * FROM reserved_keys WHERE did = ?", nil, *req.Did).Scan(&existing).Error; err == nil && existing.KeyDid != "" {
+
return e.JSON(200, ServerReserveSigningKeyResponse{
+
SigningKey: existing.KeyDid,
+
})
+
}
+
}
+
+
k, err := atcrypto.GeneratePrivateKeyK256()
+
if err != nil {
+
s.logger.Error("error creating signing key", "endpoint", "com.atproto.server.reserveSigningKey", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
pubKey, err := k.PublicKey()
+
if err != nil {
+
s.logger.Error("error getting public key", "endpoint", "com.atproto.server.reserveSigningKey", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
keyDid := pubKey.DIDKey()
+
+
reservedKey := models.ReservedKey{
+
KeyDid: keyDid,
+
Did: req.Did,
+
PrivateKey: k.Bytes(),
+
CreatedAt: time.Now(),
+
}
+
+
if err := s.db.Create(&reservedKey, nil).Error; err != nil {
+
s.logger.Error("error storing reserved key", "endpoint", "com.atproto.server.reserveSigningKey", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
s.logger.Info("reserved signing key", "keyDid", keyDid, "forDid", req.Did)
+
+
return e.JSON(200, ServerReserveSigningKeyResponse{
+
SigningKey: keyDid,
+
})
+
}
+
+
func (s *Server) getReservedKey(keyDidOrDid string) (*models.ReservedKey, error) {
+
var reservedKey models.ReservedKey
+
+
if err := s.db.Raw("SELECT * FROM reserved_keys WHERE key_did = ?", nil, keyDidOrDid).Scan(&reservedKey).Error; err == nil && reservedKey.KeyDid != "" {
+
return &reservedKey, nil
+
}
+
+
if err := s.db.Raw("SELECT * FROM reserved_keys WHERE did = ?", nil, keyDidOrDid).Scan(&reservedKey).Error; err == nil && reservedKey.KeyDid != "" {
+
return &reservedKey, nil
+
}
+
+
return nil, nil
+
}
+
+
func (s *Server) deleteReservedKey(keyDid string, did *string) error {
+
if err := s.db.Exec("DELETE FROM reserved_keys WHERE key_did = ?", nil, keyDid).Error; err != nil {
+
return err
+
}
+
+
if did != nil && *did != "" {
+
if err := s.db.Exec("DELETE FROM reserved_keys WHERE did = ?", nil, *did).Error; err != nil {
+
return err
+
}
+
}
+
+
return nil
+
}
+2 -2
server/handle_server_reset_password.go
···
}
if *urepo.PasswordResetCode != req.Token {
-
return helpers.InputError(e, to.StringPtr("InvalidToken"))
+
return helpers.InvalidTokenError(e)
}
if time.Now().UTC().After(*urepo.PasswordResetCodeExpiresAt) {
-
return helpers.InputError(e, to.StringPtr("ExpiredToken"))
+
return helpers.ExpiredTokenError(e)
}
hash, err := bcrypt.GenerateFromPassword([]byte(req.Password), 10)
+3 -4
server/handle_server_update_email.go
···
import (
"time"
-
"github.com/Azure/go-autorest/autorest/to"
"github.com/haileyok/cocoon/internal/helpers"
"github.com/haileyok/cocoon/models"
"github.com/labstack/echo/v4"
···
}
if urepo.EmailUpdateCode == nil || urepo.EmailUpdateCodeExpiresAt == nil {
-
return helpers.InputError(e, to.StringPtr("InvalidToken"))
+
return helpers.InvalidTokenError(e)
}
if *urepo.EmailUpdateCode != req.Token {
-
return helpers.InputError(e, to.StringPtr("InvalidToken"))
+
return helpers.InvalidTokenError(e)
}
if time.Now().UTC().After(*urepo.EmailUpdateCodeExpiresAt) {
-
return helpers.InputError(e, to.StringPtr("ExpiredToken"))
+
return helpers.ExpiredTokenError(e)
}
if err := s.db.Exec("UPDATE repos SET email_update_code = NULL, email_update_code_expires_at = NULL, email_confirmed_at = NULL, email = ? WHERE did = ?", nil, req.Email, urepo.Repo.Did).Error; err != nil {
+91 -8
server/handle_sync_get_blob.go
···
import (
"bytes"
+
"fmt"
+
"io"
+
"github.com/Azure/go-autorest/autorest/to"
+
"github.com/aws/aws-sdk-go/aws"
+
"github.com/aws/aws-sdk-go/aws/credentials"
+
"github.com/aws/aws-sdk-go/aws/session"
+
"github.com/aws/aws-sdk-go/service/s3"
"github.com/haileyok/cocoon/internal/helpers"
"github.com/haileyok/cocoon/models"
"github.com/ipfs/go-cid"
···
return helpers.InputError(e, nil)
}
+
urepo, err := s.getRepoActorByDid(did)
+
if err != nil {
+
s.logger.Error("could not find user for requested blob", "error", err)
+
return helpers.InputError(e, nil)
+
}
+
+
status := urepo.Status()
+
if status != nil {
+
if *status == "deactivated" {
+
return helpers.InputError(e, to.StringPtr("RepoDeactivated"))
+
}
+
}
+
var blob models.Blob
if err := s.db.Raw("SELECT * FROM blobs WHERE did = ? AND cid = ?", nil, did, c.Bytes()).Scan(&blob).Error; err != nil {
s.logger.Error("error looking up blob", "error", err)
···
buf := new(bytes.Buffer)
-
var parts []models.BlobPart
-
if err := s.db.Raw("SELECT * FROM blob_parts WHERE blob_id = ? ORDER BY idx", nil, blob.ID).Scan(&parts).Error; err != nil {
-
s.logger.Error("error getting blob parts", "error", err)
-
return helpers.ServerError(e, nil)
-
}
+
if blob.Storage == "sqlite" {
+
var parts []models.BlobPart
+
if err := s.db.Raw("SELECT * FROM blob_parts WHERE blob_id = ? ORDER BY idx", nil, blob.ID).Scan(&parts).Error; err != nil {
+
s.logger.Error("error getting blob parts", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
// TODO: we can just stream this, don't need to make a buffer
+
for _, p := range parts {
+
buf.Write(p.Data)
+
}
+
} else if blob.Storage == "s3" {
+
if !(s.s3Config != nil && s.s3Config.BlobstoreEnabled) {
+
s.logger.Error("s3 storage disabled")
+
return helpers.ServerError(e, nil)
+
}
+
+
blobKey := fmt.Sprintf("blobs/%s/%s", urepo.Repo.Did, c.String())
+
+
if s.s3Config.CDNUrl != "" {
+
redirectUrl := fmt.Sprintf("%s/%s", s.s3Config.CDNUrl, blobKey)
+
return e.Redirect(302, redirectUrl)
+
}
+
+
config := &aws.Config{
+
Region: aws.String(s.s3Config.Region),
+
Credentials: credentials.NewStaticCredentials(s.s3Config.AccessKey, s.s3Config.SecretKey, ""),
+
}
+
+
if s.s3Config.Endpoint != "" {
+
config.Endpoint = aws.String(s.s3Config.Endpoint)
+
config.S3ForcePathStyle = aws.Bool(true)
+
}
+
+
sess, err := session.NewSession(config)
+
if err != nil {
+
s.logger.Error("error creating aws session", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
svc := s3.New(sess)
+
if result, err := svc.GetObject(&s3.GetObjectInput{
+
Bucket: aws.String(s.s3Config.Bucket),
+
Key: aws.String(blobKey),
+
}); err != nil {
+
s.logger.Error("error getting blob from s3", "error", err)
+
return helpers.ServerError(e, nil)
+
} else {
+
read := 0
+
part := 0
+
partBuf := make([]byte, 0x10000)
+
+
for {
+
n, err := io.ReadFull(result.Body, partBuf)
+
if err == io.ErrUnexpectedEOF || err == io.EOF {
+
if n == 0 {
+
break
+
}
+
} else if err != nil && err != io.ErrUnexpectedEOF {
+
s.logger.Error("error reading blob", "error", err)
+
return helpers.ServerError(e, nil)
+
}
-
// TODO: we can just stream this, don't need to make a buffer
-
for _, p := range parts {
-
buf.Write(p.Data)
+
data := partBuf[:n]
+
read += n
+
buf.Write(data)
+
part++
+
}
+
}
+
} else {
+
s.logger.Error("unknown storage", "storage", blob.Storage)
+
return helpers.ServerError(e, nil)
}
e.Response().Header().Set(echo.HeaderContentDisposition, "attachment; filename="+c.String())
+14 -12
server/handle_sync_get_blocks.go
···
import (
"bytes"
-
"context"
-
"strings"
"github.com/bluesky-social/indigo/carstore"
-
"github.com/haileyok/cocoon/blockstore"
"github.com/haileyok/cocoon/internal/helpers"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
···
"github.com/labstack/echo/v4"
)
+
type ComAtprotoSyncGetBlocksRequest struct {
+
Did string `query:"did"`
+
Cids []string `query:"cids"`
+
}
+
func (s *Server) handleGetBlocks(e echo.Context) error {
-
did := e.QueryParam("did")
-
cidsstr := e.QueryParam("cids")
-
if did == "" {
+
ctx := e.Request().Context()
+
+
var req ComAtprotoSyncGetBlocksRequest
+
if err := e.Bind(&req); err != nil {
return helpers.InputError(e, nil)
}
-
cidstrs := strings.Split(cidsstr, ",")
-
cids := []cid.Cid{}
+
var cids []cid.Cid
-
for _, cs := range cidstrs {
+
for _, cs := range req.Cids {
c, err := cid.Cast([]byte(cs))
if err != nil {
return err
···
cids = append(cids, c)
}
-
urepo, err := s.getRepoActorByDid(did)
+
urepo, err := s.getRepoActorByDid(req.Did)
if err != nil {
return helpers.ServerError(e, nil)
}
···
return helpers.ServerError(e, nil)
}
-
bs := blockstore.New(urepo.Repo.Did, s.db)
+
bs := s.getBlockstore(urepo.Repo.Did)
for _, c := range cids {
-
b, err := bs.Get(context.TODO(), c)
+
b, err := bs.Get(ctx, c)
if err != nil {
return err
}
+2 -2
server/handle_sync_get_repo_status.go
···
return e.JSON(200, ComAtprotoSyncGetRepoStatusResponse{
Did: urepo.Repo.Did,
-
Active: true,
-
Status: nil,
+
Active: urepo.Active(),
+
Status: urepo.Status(),
Rev: &urepo.Rev,
})
}
+14
server/handle_sync_list_blobs.go
···
package server
import (
+
"github.com/Azure/go-autorest/autorest/to"
"github.com/haileyok/cocoon/internal/helpers"
"github.com/haileyok/cocoon/models"
"github.com/ipfs/go-cid"
···
cursorquery = "AND created_at < ?"
}
params = append(params, limit)
+
+
urepo, err := s.getRepoActorByDid(did)
+
if err != nil {
+
s.logger.Error("could not find user for requested blobs", "error", err)
+
return helpers.InputError(e, nil)
+
}
+
+
status := urepo.Status()
+
if status != nil {
+
if *status == "deactivated" {
+
return helpers.InputError(e, to.StringPtr("RepoDeactivated"))
+
}
+
}
var blobs []models.Blob
if err := s.db.Raw("SELECT * FROM blobs WHERE did = ? "+cursorquery+" ORDER BY created_at DESC LIMIT ?", nil, params...).Scan(&blobs).Error; err != nil {
+31 -29
server/handle_sync_subscribe_repos.go
···
package server
import (
-
"fmt"
-
"net/http"
+
"context"
+
"time"
"github.com/bluesky-social/indigo/events"
"github.com/bluesky-social/indigo/lex/util"
···
"github.com/labstack/echo/v4"
)
-
var upgrader = websocket.Upgrader{
-
ReadBufferSize: 1024,
-
WriteBufferSize: 1024,
-
CheckOrigin: func(r *http.Request) bool {
-
return true
-
},
-
}
+
func (s *Server) handleSyncSubscribeRepos(e echo.Context) error {
+
ctx := e.Request().Context()
+
logger := s.logger.With("component", "subscribe-repos-websocket")
-
func (s *Server) handleSyncSubscribeRepos(e echo.Context) error {
conn, err := websocket.Upgrade(e.Response().Writer, e.Request(), e.Response().Header(), 1<<10, 1<<10)
if err != nil {
+
logger.Error("unable to establish websocket with relay", "err", err)
return err
}
-
s.logger.Info("new connection", "ua", e.Request().UserAgent())
-
-
ctx := e.Request().Context()
-
ident := e.RealIP() + "-" + e.Request().UserAgent()
+
logger = logger.With("ident", ident)
+
logger.Info("new connection established")
evts, cancel, err := s.evtman.Subscribe(ctx, ident, func(evt *events.XRPCStreamEvent) bool {
return true
···
for evt := range evts {
wc, err := conn.NextWriter(websocket.BinaryMessage)
if err != nil {
-
return err
+
logger.Error("error writing message to relay", "err", err)
+
break
}
-
var obj util.CBOR
+
if ctx.Err() != nil {
+
logger.Error("context error", "err", err)
+
break
+
}
+
var obj util.CBOR
switch {
case evt.Error != nil:
header.Op = events.EvtKindErrorFrame
···
case evt.RepoCommit != nil:
header.MsgType = "#commit"
obj = evt.RepoCommit
-
case evt.RepoHandle != nil:
-
header.MsgType = "#handle"
-
obj = evt.RepoHandle
case evt.RepoIdentity != nil:
header.MsgType = "#identity"
obj = evt.RepoIdentity
···
case evt.RepoInfo != nil:
header.MsgType = "#info"
obj = evt.RepoInfo
-
case evt.RepoMigrate != nil:
-
header.MsgType = "#migrate"
-
obj = evt.RepoMigrate
-
case evt.RepoTombstone != nil:
-
header.MsgType = "#tombstone"
-
obj = evt.RepoTombstone
default:
-
return fmt.Errorf("unrecognized event kind")
+
logger.Warn("unrecognized event kind")
+
return nil
}
if err := header.MarshalCBOR(wc); err != nil {
-
return fmt.Errorf("failed to write header: %w", err)
+
logger.Error("failed to write header to relay", "err", err)
+
break
}
if err := obj.MarshalCBOR(wc); err != nil {
-
return fmt.Errorf("failed to write event: %w", err)
+
logger.Error("failed to write event to relay", "err", err)
+
break
}
if err := wc.Close(); err != nil {
-
return fmt.Errorf("failed to flush-close our event write: %w", err)
+
logger.Error("failed to flush-close our event write", "err", err)
+
break
}
+
}
+
+
// we should tell the relay to request a new crawl at this point if we got disconnected
+
// use a new context since the old one might be cancelled at this point
+
ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
+
defer cancel()
+
if err := s.requestCrawl(ctx); err != nil {
+
logger.Error("error requesting crawls", "err", err)
}
return nil
+33
server/handle_well_known.go
···
import (
"fmt"
+
"strings"
"github.com/Azure/go-autorest/autorest/to"
+
"github.com/haileyok/cocoon/internal/helpers"
"github.com/labstack/echo/v4"
+
"gorm.io/gorm"
)
var (
···
},
},
})
+
}
+
+
func (s *Server) handleAtprotoDid(e echo.Context) error {
+
host := e.Request().Host
+
if host == "" {
+
return helpers.InputError(e, to.StringPtr("Invalid handle."))
+
}
+
+
host = strings.Split(host, ":")[0]
+
host = strings.ToLower(strings.TrimSpace(host))
+
+
if host == s.config.Hostname {
+
return e.String(200, s.config.Did)
+
}
+
+
suffix := "." + s.config.Hostname
+
if !strings.HasSuffix(host, suffix) {
+
return e.NoContent(404)
+
}
+
+
actor, err := s.getActorByHandle(host)
+
if err != nil {
+
if err == gorm.ErrRecordNotFound {
+
return e.NoContent(404)
+
}
+
s.logger.Error("error looking up actor by handle", "error", err)
+
return helpers.ServerError(e, nil)
+
}
+
+
return e.String(200, actor.Did)
}
func (s *Server) handleOauthProtectedResource(e echo.Context) error {
+19
server/mail.go
···
return nil
}
+
func (s *Server) sendPlcTokenReset(email, handle, code string) error {
+
if s.mail == nil {
+
return nil
+
}
+
+
s.mailLk.Lock()
+
defer s.mailLk.Unlock()
+
+
s.mail.To(email)
+
s.mail.Subject("PLC token for " + s.config.Hostname)
+
s.mail.Plain().Set(fmt.Sprintf("Hello %s. Your PLC operation code is %s. This code will expire in ten minutes.", handle, code))
+
+
if err := s.mail.Send(); err != nil {
+
return err
+
}
+
+
return nil
+
}
+
func (s *Server) sendEmailUpdate(email, handle, code string) error {
if s.mail == nil {
return nil
+26 -13
server/middleware.go
···
import (
"crypto/sha256"
"encoding/base64"
+
"errors"
"fmt"
"strings"
"time"
···
"github.com/golang-jwt/jwt/v4"
"github.com/haileyok/cocoon/internal/helpers"
"github.com/haileyok/cocoon/models"
+
"github.com/haileyok/cocoon/oauth/dpop"
"github.com/haileyok/cocoon/oauth/provider"
"github.com/labstack/echo/v4"
"gitlab.com/yawning/secp256k1-voi"
···
token, _, err := new(jwt.Parser).ParseUnverified(tokenstr, jwt.MapClaims{})
claims, ok := token.Claims.(jwt.MapClaims)
if !ok {
-
return helpers.InputError(e, to.StringPtr("InvalidToken"))
+
return helpers.InvalidTokenError(e)
}
var did string
···
})
if err != nil {
s.logger.Error("error parsing jwt", "error", err)
-
// NOTE: https://github.com/bluesky-social/atproto/discussions/3319
-
return e.JSON(400, map[string]string{"error": "ExpiredToken", "message": "token has expired"})
+
return helpers.ExpiredTokenError(e)
}
if !token.Valid {
-
return helpers.InputError(e, to.StringPtr("InvalidToken"))
+
return helpers.InvalidTokenError(e)
}
} else {
kpts := strings.Split(tokenstr, ".")
···
scope, _ := claims["scope"].(string)
if isRefresh && scope != "com.atproto.refresh" {
-
return helpers.InputError(e, to.StringPtr("InvalidToken"))
+
return helpers.InvalidTokenError(e)
} else if !hasLxm && !isRefresh && scope != "com.atproto.access" {
-
return helpers.InputError(e, to.StringPtr("InvalidToken"))
+
return helpers.InvalidTokenError(e)
}
table := "tokens"
···
var result Result
if err := s.db.Raw("SELECT EXISTS(SELECT 1 FROM "+table+" WHERE token = ?) AS found", nil, tokenstr).Scan(&result).Error; err != nil {
if err == gorm.ErrRecordNotFound {
-
return helpers.InputError(e, to.StringPtr("InvalidToken"))
+
return helpers.InvalidTokenError(e)
}
s.logger.Error("error getting token from db", "error", err)
···
}
if !result.Found {
-
return helpers.InputError(e, to.StringPtr("InvalidToken"))
+
return helpers.InvalidTokenError(e)
}
}
···
}
if exp < float64(time.Now().UTC().Unix()) {
-
return helpers.InputError(e, to.StringPtr("ExpiredToken"))
+
return helpers.ExpiredTokenError(e)
}
if repo == nil {
···
e.Set("token", tokenstr)
if err := next(e); err != nil {
-
e.Error(err)
+
return helpers.InvalidTokenError(e)
}
return nil
···
proof, err := s.oauthProvider.DpopManager.CheckProof(e.Request().Method, "https://"+s.config.Hostname+e.Request().URL.String(), e.Request().Header, to.StringPtr(accessToken))
if err != nil {
+
if errors.Is(err, dpop.ErrUseDpopNonce) {
+
e.Response().Header().Set("WWW-Authenticate", `DPoP error="use_dpop_nonce"`)
+
e.Response().Header().Add("access-control-expose-headers", "WWW-Authenticate")
+
return e.JSON(401, map[string]string{
+
"error": "use_dpop_nonce",
+
})
+
}
s.logger.Error("invalid dpop proof", "error", err)
-
return helpers.InputError(e, to.StringPtr(err.Error()))
+
return helpers.InputError(e, nil)
}
var oauthToken provider.OauthToken
···
}
if oauthToken.Token == "" {
-
return helpers.InputError(e, to.StringPtr("InvalidToken"))
+
return helpers.InvalidTokenError(e)
}
if *oauthToken.Parameters.DpopJkt != proof.JKT {
···
}
if time.Now().After(oauthToken.ExpiresAt) {
-
return e.JSON(400, map[string]string{"error": "ExpiredToken", "message": "token has expired"})
+
e.Response().Header().Set("WWW-Authenticate", `DPoP error="invalid_token", error_description="Token expired"`)
+
e.Response().Header().Add("access-control-expose-headers", "WWW-Authenticate")
+
return e.JSON(401, map[string]string{
+
"error": "invalid_token",
+
"error_description": "Token expired",
+
})
}
repo, err := s.getRepoActorByDid(oauthToken.Sub)
+27 -21
server/repo.go
···
"github.com/Azure/go-autorest/autorest/to"
"github.com/bluesky-social/indigo/api/atproto"
-
"github.com/bluesky-social/indigo/atproto/data"
+
"github.com/bluesky-social/indigo/atproto/atdata"
"github.com/bluesky-social/indigo/atproto/syntax"
"github.com/bluesky-social/indigo/carstore"
"github.com/bluesky-social/indigo/events"
lexutil "github.com/bluesky-social/indigo/lex/util"
"github.com/bluesky-social/indigo/repo"
-
"github.com/bluesky-social/indigo/util"
-
"github.com/haileyok/cocoon/blockstore"
"github.com/haileyok/cocoon/internal/db"
"github.com/haileyok/cocoon/models"
+
"github.com/haileyok/cocoon/recording_blockstore"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
···
}
func (mm *MarshalableMap) MarshalCBOR(w io.Writer) error {
-
data, err := data.MarshalCBOR(*mm)
+
data, err := atdata.MarshalCBOR(*mm)
if err != nil {
return err
}
···
return nil, err
}
-
dbs := blockstore.New(urepo.Did, rm.db)
-
r, err := repo.OpenRepo(context.TODO(), dbs, rootcid)
+
dbs := rm.s.getBlockstore(urepo.Did)
+
bs := recording_blockstore.New(dbs)
+
r, err := repo.OpenRepo(context.TODO(), bs, rootcid)
entries := []models.Record{}
var results []ApplyWriteResult
···
if err != nil {
return nil, err
}
-
out, err := data.UnmarshalJSON(j)
+
out, err := atdata.UnmarshalJSON(j)
if err != nil {
return nil, err
}
mm := MarshalableMap(out)
+
+
// HACK: if a record doesn't contain a $type, we can manually set it here based on the op's collection
+
if mm["$type"] == "" {
+
mm["$type"] = op.Collection
+
}
+
nc, err := r.PutRecord(context.TODO(), op.Collection+"/"+*op.Rkey, &mm)
if err != nil {
return nil, err
}
-
d, err := data.MarshalCBOR(mm)
+
d, err := atdata.MarshalCBOR(mm)
if err != nil {
return nil, err
}
···
if err != nil {
return nil, err
}
-
out, err := data.UnmarshalJSON(j)
+
out, err := atdata.UnmarshalJSON(j)
if err != nil {
return nil, err
}
···
if err != nil {
return nil, err
}
-
d, err := data.MarshalCBOR(mm)
+
d, err := atdata.MarshalCBOR(mm)
if err != nil {
return nil, err
}
···
}
}
-
for _, op := range dbs.GetLog() {
+
for _, op := range bs.GetWriteLog() {
if _, err := carstore.LdWrite(buf, op.Cid().Bytes(), op.RawData()); err != nil {
return nil, err
}
···
Rev: rev,
Since: &urepo.Rev,
Commit: lexutil.LexLink(newroot),
-
Time: time.Now().Format(util.ISO8601),
+
Time: time.Now().Format(time.RFC3339Nano),
Ops: ops,
TooBig: false,
},
})
-
if err := dbs.UpdateRepo(context.TODO(), newroot, rev); err != nil {
+
if err := rm.s.UpdateRepo(context.TODO(), urepo.Did, newroot, rev); err != nil {
return nil, err
}
···
return cid.Undef, nil, err
}
-
dbs := blockstore.New(urepo.Did, rm.db)
-
bs := util.NewLoggingBstore(dbs)
+
dbs := rm.s.getBlockstore(urepo.Did)
+
bs := recording_blockstore.New(dbs)
r, err := repo.OpenRepo(context.TODO(), bs, c)
if err != nil {
···
return cid.Undef, nil, err
}
-
return c, bs.GetLoggedBlocks(), nil
+
return c, bs.GetReadLog(), nil
}
func (rm *RepoMan) incrementBlobRefs(urepo models.Repo, cbor []byte) ([]cid.Cid, error) {
···
func getBlobCidsFromCbor(cbor []byte) ([]cid.Cid, error) {
var cids []cid.Cid
-
decoded, err := data.UnmarshalCBOR(cbor)
+
decoded, err := atdata.UnmarshalCBOR(cbor)
if err != nil {
return nil, fmt.Errorf("error unmarshaling cbor: %w", err)
}
-
var deepiter func(interface{}) error
-
deepiter = func(item interface{}) error {
+
var deepiter func(any) error
+
deepiter = func(item any) error {
switch val := item.(type) {
-
case map[string]interface{}:
+
case map[string]any:
if val["$type"] == "blob" {
if ref, ok := val["ref"].(string); ok {
c, err := cid.Parse(ref)
···
return deepiter(v)
}
}
-
case []interface{}:
+
case []any:
for _, v := range val {
deepiter(v)
}
+132 -34
server/server.go
···
"github.com/haileyok/cocoon/oauth/dpop"
"github.com/haileyok/cocoon/oauth/provider"
"github.com/haileyok/cocoon/plc"
+
"github.com/ipfs/go-cid"
echo_session "github.com/labstack/echo-contrib/session"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
slogecho "github.com/samber/slog-echo"
+
"gorm.io/driver/postgres"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)
···
)
type S3Config struct {
-
BackupsEnabled bool
-
Endpoint string
-
Region string
-
Bucket string
-
AccessKey string
-
SecretKey string
+
BackupsEnabled bool
+
BlobstoreEnabled bool
+
Endpoint string
+
Region string
+
Bucket string
+
AccessKey string
+
SecretKey string
+
CDNUrl string
}
type Server struct {
···
oauthProvider *provider.Provider
evtman *events.EventManager
passport *identity.Passport
+
fallbackProxy string
+
+
lastRequestCrawl time.Time
+
requestCrawlMu sync.Mutex
dbName string
+
dbType string
s3Config *S3Config
}
type Args struct {
Addr string
DbName string
+
DbType string
+
DatabaseURL string
Logger *slog.Logger
Version string
Did string
···
ContactEmail string
Relays []string
AdminPassword string
+
RequireInvite bool
SmtpUser string
SmtpPass string
···
S3Config *S3Config
SessionSecret string
+
+
BlockstoreVariant BlockstoreVariant
+
FallbackProxy string
}
type config struct {
-
Version string
-
Did string
-
Hostname string
-
ContactEmail string
-
EnforcePeering bool
-
Relays []string
-
AdminPassword string
-
SmtpEmail string
-
SmtpName string
+
Version string
+
Did string
+
Hostname string
+
ContactEmail string
+
EnforcePeering bool
+
Relays []string
+
AdminPassword string
+
RequireInvite bool
+
SmtpEmail string
+
SmtpName string
+
BlockstoreVariant BlockstoreVariant
+
FallbackProxy string
}
type CustomValidator struct {
···
IdleTimeout: 5 * time.Minute,
}
-
gdb, err := gorm.Open(sqlite.Open("cocoon.db"), &gorm.Config{})
-
if err != nil {
-
return nil, err
+
dbType := args.DbType
+
if dbType == "" {
+
dbType = "sqlite"
+
}
+
+
var gdb *gorm.DB
+
var err error
+
switch dbType {
+
case "postgres":
+
if args.DatabaseURL == "" {
+
return nil, fmt.Errorf("database-url must be set when using postgres")
+
}
+
gdb, err = gorm.Open(postgres.Open(args.DatabaseURL), &gorm.Config{})
+
if err != nil {
+
return nil, fmt.Errorf("failed to connect to postgres: %w", err)
+
}
+
args.Logger.Info("connected to PostgreSQL database")
+
default:
+
gdb, err = gorm.Open(sqlite.Open(args.DbName), &gorm.Config{})
+
if err != nil {
+
return nil, fmt.Errorf("failed to open sqlite database: %w", err)
+
}
+
args.Logger.Info("connected to SQLite database", "path", args.DbName)
}
dbw := db.NewDB(gdb)
···
plcClient: plcClient,
privateKey: &pkey,
config: &config{
-
Version: args.Version,
-
Did: args.Did,
-
Hostname: args.Hostname,
-
ContactEmail: args.ContactEmail,
-
EnforcePeering: false,
-
Relays: args.Relays,
-
AdminPassword: args.AdminPassword,
-
SmtpName: args.SmtpName,
-
SmtpEmail: args.SmtpEmail,
+
Version: args.Version,
+
Did: args.Did,
+
Hostname: args.Hostname,
+
ContactEmail: args.ContactEmail,
+
EnforcePeering: false,
+
Relays: args.Relays,
+
AdminPassword: args.AdminPassword,
+
RequireInvite: args.RequireInvite,
+
SmtpName: args.SmtpName,
+
SmtpEmail: args.SmtpEmail,
+
BlockstoreVariant: args.BlockstoreVariant,
+
FallbackProxy: args.FallbackProxy,
},
evtman: events.NewEventManager(events.NewMemPersister()),
passport: identity.NewPassport(h, identity.NewMemCache(10_000)),
dbName: args.DbName,
+
dbType: dbType,
s3Config: args.S3Config,
oauthProvider: provider.NewProvider(provider.Args{
···
// TODO: should validate these args
if args.SmtpUser == "" || args.SmtpPass == "" || args.SmtpHost == "" || args.SmtpPort == "" || args.SmtpEmail == "" || args.SmtpName == "" {
-
args.Logger.Warn("not enough smpt args were provided. mailing will not work for your server.")
+
args.Logger.Warn("not enough smtp args were provided. mailing will not work for your server.")
} else {
mail := mailyak.New(args.SmtpHost+":"+args.SmtpPort, smtp.PlainAuth("", args.SmtpUser, args.SmtpPass, args.SmtpHost))
mail.From(s.config.SmtpEmail)
···
s.echo.GET("/", s.handleRoot)
s.echo.GET("/xrpc/_health", s.handleHealth)
s.echo.GET("/.well-known/did.json", s.handleWellKnown)
+
s.echo.GET("/.well-known/atproto-did", s.handleAtprotoDid)
s.echo.GET("/.well-known/oauth-protected-resource", s.handleOauthProtectedResource)
s.echo.GET("/.well-known/oauth-authorization-server", s.handleOauthAuthorizationServer)
s.echo.GET("/robots.txt", s.handleRobots)
···
// public
s.echo.GET("/xrpc/com.atproto.identity.resolveHandle", s.handleResolveHandle)
s.echo.POST("/xrpc/com.atproto.server.createAccount", s.handleCreateAccount)
-
s.echo.POST("/xrpc/com.atproto.server.createAccount", s.handleCreateAccount)
s.echo.POST("/xrpc/com.atproto.server.createSession", s.handleCreateSession)
s.echo.GET("/xrpc/com.atproto.server.describeServer", s.handleDescribeServer)
+
s.echo.POST("/xrpc/com.atproto.server.reserveSigningKey", s.handleServerReserveSigningKey)
s.echo.GET("/xrpc/com.atproto.repo.describeRepo", s.handleDescribeRepo)
s.echo.GET("/xrpc/com.atproto.sync.listRepos", s.handleListRepos)
···
s.echo.GET("/xrpc/com.atproto.sync.subscribeRepos", s.handleSyncSubscribeRepos)
s.echo.GET("/xrpc/com.atproto.sync.listBlobs", s.handleSyncListBlobs)
s.echo.GET("/xrpc/com.atproto.sync.getBlob", s.handleSyncGetBlob)
+
+
// labels
+
s.echo.GET("/xrpc/com.atproto.label.queryLabels", s.handleLabelQueryLabels)
// account
s.echo.GET("/account", s.handleAccount)
···
s.echo.GET("/xrpc/com.atproto.server.getSession", s.handleGetSession, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.POST("/xrpc/com.atproto.server.refreshSession", s.handleRefreshSession, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.POST("/xrpc/com.atproto.server.deleteSession", s.handleDeleteSession, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
+
s.echo.GET("/xrpc/com.atproto.identity.getRecommendedDidCredentials", s.handleGetRecommendedDidCredentials, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.POST("/xrpc/com.atproto.identity.updateHandle", s.handleIdentityUpdateHandle, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
+
s.echo.POST("/xrpc/com.atproto.identity.requestPlcOperationSignature", s.handleIdentityRequestPlcOperationSignature, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
+
s.echo.POST("/xrpc/com.atproto.identity.signPlcOperation", s.handleSignPlcOperation, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
+
s.echo.POST("/xrpc/com.atproto.identity.submitPlcOperation", s.handleSubmitPlcOperation, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.POST("/xrpc/com.atproto.server.confirmEmail", s.handleServerConfirmEmail, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.POST("/xrpc/com.atproto.server.requestEmailConfirmation", s.handleServerRequestEmailConfirmation, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.POST("/xrpc/com.atproto.server.requestPasswordReset", s.handleServerRequestPasswordReset) // AUTH NOT REQUIRED FOR THIS ONE
···
s.echo.POST("/xrpc/com.atproto.server.updateEmail", s.handleServerUpdateEmail, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.GET("/xrpc/com.atproto.server.getServiceAuth", s.handleServerGetServiceAuth, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.GET("/xrpc/com.atproto.server.checkAccountStatus", s.handleServerCheckAccountStatus, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
+
s.echo.POST("/xrpc/com.atproto.server.deactivateAccount", s.handleServerDeactivateAccount, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
+
s.echo.POST("/xrpc/com.atproto.server.activateAccount", s.handleServerActivateAccount, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
+
s.echo.POST("/xrpc/com.atproto.server.requestAccountDelete", s.handleServerRequestAccountDelete, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
+
s.echo.POST("/xrpc/com.atproto.server.deleteAccount", s.handleServerDeleteAccount)
// repo
+
s.echo.GET("/xrpc/com.atproto.repo.listMissingBlobs", s.handleListMissingBlobs, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.POST("/xrpc/com.atproto.repo.createRecord", s.handleCreateRecord, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.POST("/xrpc/com.atproto.repo.putRecord", s.handlePutRecord, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.POST("/xrpc/com.atproto.repo.deleteRecord", s.handleDeleteRecord, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
···
// stupid silly endpoints
s.echo.GET("/xrpc/app.bsky.actor.getPreferences", s.handleActorGetPreferences, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
s.echo.POST("/xrpc/app.bsky.actor.putPreferences", s.handleActorPutPreferences, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
+
s.echo.GET("/xrpc/app.bsky.feed.getFeed", s.handleProxyBskyFeedGetFeed, s.handleLegacySessionMiddleware, s.handleOauthSessionMiddleware)
// admin routes
s.echo.POST("/xrpc/com.atproto.server.createInviteCode", s.handleCreateInviteCode, s.handleAdminMiddleware)
···
&models.Record{},
&models.Blob{},
&models.BlobPart{},
+
&models.ReservedKey{},
&provider.OauthToken{},
&provider.OauthAuthorizationRequest{},
)
···
go s.backupRoutine()
+
go func() {
+
if err := s.requestCrawl(ctx); err != nil {
+
s.logger.Error("error requesting crawls", "err", err)
+
}
+
}()
+
+
<-ctx.Done()
+
+
fmt.Println("shut down")
+
+
return nil
+
}
+
+
func (s *Server) requestCrawl(ctx context.Context) error {
+
logger := s.logger.With("component", "request-crawl")
+
s.requestCrawlMu.Lock()
+
defer s.requestCrawlMu.Unlock()
+
+
logger.Info("requesting crawl with configured relays")
+
+
if time.Now().Sub(s.lastRequestCrawl) <= 1*time.Minute {
+
return fmt.Errorf("a crawl request has already been made within the last minute")
+
}
+
for _, relay := range s.config.Relays {
+
logger := logger.With("relay", relay)
+
logger.Info("requesting crawl from relay")
cli := xrpc.Client{Host: relay}
-
atproto.SyncRequestCrawl(ctx, &cli, &atproto.SyncRequestCrawl_Input{
+
if err := atproto.SyncRequestCrawl(ctx, &cli, &atproto.SyncRequestCrawl_Input{
Hostname: s.config.Hostname,
-
})
+
}); err != nil {
+
logger.Error("error requesting crawl", "err", err)
+
} else {
+
logger.Info("crawl requested successfully")
+
}
}
-
<-ctx.Done()
-
-
fmt.Println("shut down")
+
s.lastRequestCrawl = time.Now()
return nil
}
func (s *Server) doBackup() {
+
if s.dbType == "postgres" {
+
s.logger.Info("skipping S3 backup - PostgreSQL backups should be handled externally (pg_dump, managed database backups, etc.)")
+
return
+
}
+
start := time.Now()
s.logger.Info("beginning backup to s3...")
···
go s.doBackup()
}
}
+
+
func (s *Server) UpdateRepo(ctx context.Context, did string, root cid.Cid, rev string) error {
+
if err := s.db.Exec("UPDATE repos SET root = ?, rev = ? WHERE did = ?", nil, root.Bytes(), rev, did).Error; err != nil {
+
return err
+
}
+
+
return nil
+
}
+91
server/service_auth.go
···
+
package server
+
+
import (
+
"context"
+
"fmt"
+
"strings"
+
+
"github.com/bluesky-social/indigo/atproto/atcrypto"
+
"github.com/bluesky-social/indigo/atproto/identity"
+
atproto_identity "github.com/bluesky-social/indigo/atproto/identity"
+
"github.com/bluesky-social/indigo/atproto/syntax"
+
"github.com/golang-jwt/jwt/v4"
+
)
+
+
type ES256KSigningMethod struct {
+
alg string
+
}
+
+
func (m *ES256KSigningMethod) Alg() string {
+
return m.alg
+
}
+
+
func (m *ES256KSigningMethod) Verify(signingString string, signature string, key interface{}) error {
+
signatureBytes, err := jwt.DecodeSegment(signature)
+
if err != nil {
+
return err
+
}
+
return key.(atcrypto.PublicKey).HashAndVerifyLenient([]byte(signingString), signatureBytes)
+
}
+
+
func (m *ES256KSigningMethod) Sign(signingString string, key interface{}) (string, error) {
+
return "", fmt.Errorf("unimplemented")
+
}
+
+
func init() {
+
ES256K := ES256KSigningMethod{alg: "ES256K"}
+
jwt.RegisterSigningMethod(ES256K.Alg(), func() jwt.SigningMethod {
+
return &ES256K
+
})
+
}
+
+
func (s *Server) validateServiceAuth(ctx context.Context, rawToken string, nsid string) (string, error) {
+
token := strings.TrimSpace(rawToken)
+
+
parsedToken, err := jwt.ParseWithClaims(token, jwt.MapClaims{}, func(token *jwt.Token) (interface{}, error) {
+
did := syntax.DID(token.Claims.(jwt.MapClaims)["iss"].(string))
+
didDoc, err := s.passport.FetchDoc(ctx, did.String());
+
if err != nil {
+
return nil, fmt.Errorf("unable to resolve did %s: %s", did, err)
+
}
+
+
verificationMethods := make([]atproto_identity.DocVerificationMethod, len(didDoc.VerificationMethods))
+
for i, verificationMethod := range didDoc.VerificationMethods {
+
verificationMethods[i] = atproto_identity.DocVerificationMethod{
+
ID: verificationMethod.Id,
+
Type: verificationMethod.Type,
+
PublicKeyMultibase: verificationMethod.PublicKeyMultibase,
+
Controller: verificationMethod.Controller,
+
}
+
}
+
services := make([]atproto_identity.DocService, len(didDoc.Service))
+
for i, service := range didDoc.Service {
+
services[i] = atproto_identity.DocService{
+
ID: service.Id,
+
Type: service.Type,
+
ServiceEndpoint: service.ServiceEndpoint,
+
}
+
}
+
parsedIdentity := atproto_identity.ParseIdentity(&identity.DIDDocument{
+
DID: did,
+
AlsoKnownAs: didDoc.AlsoKnownAs,
+
VerificationMethod: verificationMethods,
+
Service: services,
+
})
+
+
key, err := parsedIdentity.PublicKey()
+
if err != nil {
+
return nil, fmt.Errorf("signing key not found for did %s: %s", did, err)
+
}
+
return key, nil
+
})
+
if err != nil {
+
return "", fmt.Errorf("invalid token: %s", err)
+
}
+
+
claims := parsedToken.Claims.(jwt.MapClaims)
+
if claims["lxm"] != nsid {
+
return "", fmt.Errorf("bad jwt lexicon method (\"lxm\"). must match: %s", nsid)
+
}
+
return claims["iss"].(string), nil
+
}
+5 -4
server/templates/account.html
···
</div>
{{ else }} {{ range .Tokens }}
<div class="base-container">
-
<h4>{{ .ClientId }}</h4>
-
<p>Created: {{ .CreatedAt }}</p>
-
<p>Updated: {{ .UpdatedAt }}</p>
-
<p>Expires: {{ .ExpiresAt }}</p>
+
<h4>{{ .ClientName }}</h4>
+
<p>Session Age: {{ .Age}}</p>
+
<p>Last Updated: {{ .LastUpdated }} ago</p>
+
<p>Expires In: {{ .ExpiresIn }}</p>
+
<p>IP Address: {{ .Ip }}</p>
<form action="/account/revoke" method="post">
<input type="hidden" name="token" value="{{ .Token }}" />
<button type="submit" value="">Revoke</button>
+137
sqlite_blockstore/sqlite_blockstore.go
···
+
package sqlite_blockstore
+
+
import (
+
"context"
+
"fmt"
+
+
"github.com/bluesky-social/indigo/atproto/syntax"
+
"github.com/haileyok/cocoon/internal/db"
+
"github.com/haileyok/cocoon/models"
+
blocks "github.com/ipfs/go-block-format"
+
"github.com/ipfs/go-cid"
+
"gorm.io/gorm/clause"
+
)
+
+
type SqliteBlockstore struct {
+
db *db.DB
+
did string
+
readonly bool
+
inserts map[cid.Cid]blocks.Block
+
}
+
+
func New(did string, db *db.DB) *SqliteBlockstore {
+
return &SqliteBlockstore{
+
did: did,
+
db: db,
+
readonly: false,
+
inserts: map[cid.Cid]blocks.Block{},
+
}
+
}
+
+
func NewReadOnly(did string, db *db.DB) *SqliteBlockstore {
+
return &SqliteBlockstore{
+
did: did,
+
db: db,
+
readonly: true,
+
inserts: map[cid.Cid]blocks.Block{},
+
}
+
}
+
+
func (bs *SqliteBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
+
var block models.Block
+
+
maybeBlock, ok := bs.inserts[cid]
+
if ok {
+
return maybeBlock, nil
+
}
+
+
if err := bs.db.Raw("SELECT * FROM blocks WHERE did = ? AND cid = ?", nil, bs.did, cid.Bytes()).Scan(&block).Error; err != nil {
+
return nil, err
+
}
+
+
b, err := blocks.NewBlockWithCid(block.Value, cid)
+
if err != nil {
+
return nil, err
+
}
+
+
return b, nil
+
}
+
+
func (bs *SqliteBlockstore) Put(ctx context.Context, block blocks.Block) error {
+
bs.inserts[block.Cid()] = block
+
+
if bs.readonly {
+
return nil
+
}
+
+
b := models.Block{
+
Did: bs.did,
+
Cid: block.Cid().Bytes(),
+
Rev: syntax.NewTIDNow(0).String(), // TODO: WARN, this is bad. don't do this
+
Value: block.RawData(),
+
}
+
+
if err := bs.db.Create(&b, []clause.Expression{clause.OnConflict{
+
Columns: []clause.Column{{Name: "did"}, {Name: "cid"}},
+
UpdateAll: true,
+
}}).Error; err != nil {
+
return err
+
}
+
+
return nil
+
}
+
+
func (bs *SqliteBlockstore) DeleteBlock(context.Context, cid.Cid) error {
+
panic("not implemented")
+
}
+
+
func (bs *SqliteBlockstore) Has(context.Context, cid.Cid) (bool, error) {
+
panic("not implemented")
+
}
+
+
func (bs *SqliteBlockstore) GetSize(context.Context, cid.Cid) (int, error) {
+
panic("not implemented")
+
}
+
+
func (bs *SqliteBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
+
tx := bs.db.BeginDangerously()
+
+
for _, block := range blocks {
+
bs.inserts[block.Cid()] = block
+
+
if bs.readonly {
+
continue
+
}
+
+
b := models.Block{
+
Did: bs.did,
+
Cid: block.Cid().Bytes(),
+
Rev: syntax.NewTIDNow(0).String(), // TODO: WARN, this is bad. don't do this
+
Value: block.RawData(),
+
}
+
+
if err := tx.Clauses(clause.OnConflict{
+
Columns: []clause.Column{{Name: "did"}, {Name: "cid"}},
+
UpdateAll: true,
+
}).Create(&b).Error; err != nil {
+
tx.Rollback()
+
return err
+
}
+
}
+
+
if bs.readonly {
+
return nil
+
}
+
+
tx.Commit()
+
+
return nil
+
}
+
+
func (bs *SqliteBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
+
return nil, fmt.Errorf("iteration not allowed on sqlite blockstore")
+
}
+
+
func (bs *SqliteBlockstore) HashOnRead(enabled bool) {
+
panic("not implemented")
+
}