forked from tangled.org/core
Monorepo for Tangled — https://tangled.org

Compare changes

Choose any two refs to compare.

+79 -20
api/tangled/cbor_gen.go
···
cw := cbg.NewCborWriter(w)
-
fieldCount := 9
+
fieldCount := 10
if t.Body == nil {
fieldCount--
if t.Mentions == nil {
+
fieldCount--
+
}
+
+
if t.Patch == nil {
fieldCount--
···
// t.Patch (string) (string)
-
if len("patch") > 1000000 {
-
return xerrors.Errorf("Value in field \"patch\" was too long")
-
}
+
if t.Patch != nil {
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patch"))); err != nil {
-
return err
-
}
-
if _, err := cw.WriteString(string("patch")); err != nil {
-
return err
-
}
+
if len("patch") > 1000000 {
+
return xerrors.Errorf("Value in field \"patch\" was too long")
+
}
-
if len(t.Patch) > 1000000 {
-
return xerrors.Errorf("Value in field t.Patch was too long")
-
}
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patch"))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string("patch")); err != nil {
+
return err
+
}
+
+
if t.Patch == nil {
+
if _, err := cw.Write(cbg.CborNull); err != nil {
+
return err
+
}
+
} else {
+
if len(*t.Patch) > 1000000 {
+
return xerrors.Errorf("Value in field t.Patch was too long")
+
}
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Patch))); err != nil {
-
return err
-
}
-
if _, err := cw.WriteString(string(t.Patch)); err != nil {
-
return err
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Patch))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string(*t.Patch)); err != nil {
+
return err
+
}
+
}
// t.Title (string) (string)
···
return err
+
// t.PatchBlob (util.LexBlob) (struct)
+
if len("patchBlob") > 1000000 {
+
return xerrors.Errorf("Value in field \"patchBlob\" was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patchBlob"))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string("patchBlob")); err != nil {
+
return err
+
}
+
+
if err := t.PatchBlob.MarshalCBOR(cw); err != nil {
+
return err
+
}
+
// t.References ([]string) (slice)
if t.References != nil {
···
case "patch":
-
sval, err := cbg.ReadStringWithMax(cr, 1000000)
+
b, err := cr.ReadByte()
if err != nil {
return err
+
if b != cbg.CborNull[0] {
+
if err := cr.UnreadByte(); err != nil {
+
return err
+
}
-
t.Patch = string(sval)
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
+
if err != nil {
+
return err
+
}
+
+
t.Patch = (*string)(&sval)
+
}
// t.Title (string) (string)
case "title":
···
t.CreatedAt = string(sval)
+
}
+
// t.PatchBlob (util.LexBlob) (struct)
+
case "patchBlob":
+
+
{
+
+
b, err := cr.ReadByte()
+
if err != nil {
+
return err
+
}
+
if b != cbg.CborNull[0] {
+
if err := cr.UnreadByte(); err != nil {
+
return err
+
}
+
t.PatchBlob = new(util.LexBlob)
+
if err := t.PatchBlob.UnmarshalCBOR(cr); err != nil {
+
return xerrors.Errorf("unmarshaling t.PatchBlob pointer: %w", err)
+
}
+
}
+
// t.References ([]string) (slice)
case "references":
+12 -9
api/tangled/repopull.go
···
} //
// RECORDTYPE: RepoPull
type RepoPull struct {
-
LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"`
-
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
-
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
-
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
-
Patch string `json:"patch" cborgen:"patch"`
-
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
-
Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"`
-
Target *RepoPull_Target `json:"target" cborgen:"target"`
-
Title string `json:"title" cborgen:"title"`
+
LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"`
+
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
+
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
+
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
+
// patch: (deprecated) use patchBlob instead
+
Patch *string `json:"patch,omitempty" cborgen:"patch,omitempty"`
+
// patchBlob: patch content
+
PatchBlob *util.LexBlob `json:"patchBlob" cborgen:"patchBlob"`
+
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
+
Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"`
+
Target *RepoPull_Target `json:"target" cborgen:"target"`
+
Title string `json:"title" cborgen:"title"`
}
// RepoPull_Source is a "source" in the sh.tangled.repo.pull schema.
+2
appview/db/follow.go
···
if err != nil {
return nil, err
}
+
defer rows.Close()
+
for rows.Next() {
var follow models.Follow
var followedAt string
+1
appview/db/issues.go
···
if err != nil {
return nil, err
}
+
defer rows.Close()
for rows.Next() {
var comment models.IssueComment
+1 -1
appview/db/language.go
···
whereClause,
)
rows, err := e.Query(query, args...)
-
if err != nil {
return nil, fmt.Errorf("failed to execute query: %w ", err)
}
+
defer rows.Close()
var langs []models.RepoLanguage
for rows.Next() {
+5
appview/db/profile.go
···
if err != nil {
return nil, err
}
+
defer rows.Close()
profileMap := make(map[string]*models.Profile)
for rows.Next() {
···
if err != nil {
return nil, err
}
+
defer rows.Close()
+
idxs := make(map[string]int)
for did := range profileMap {
idxs[did] = 0
···
if err != nil {
return nil, err
}
+
defer rows.Close()
+
idxs = make(map[string]int)
for did := range profileMap {
idxs[did] = 0
+1
appview/db/registration.go
···
if err != nil {
return nil, err
}
+
defer rows.Close()
for rows.Next() {
var createdAt string
+12 -1
appview/db/repos.go
···
limitClause,
)
rows, err := e.Query(repoQuery, args...)
-
if err != nil {
return nil, fmt.Errorf("failed to execute repo query: %w ", err)
}
+
defer rows.Close()
for rows.Next() {
var repo models.Repo
···
if err != nil {
return nil, fmt.Errorf("failed to execute labels query: %w ", err)
}
+
defer rows.Close()
+
for rows.Next() {
var repoat, labelat string
if err := rows.Scan(&repoat, &labelat); err != nil {
···
from repo_languages
where repo_at in (%s)
and is_default_ref = 1
+
and language <> ''
)
where rn = 1
`,
···
if err != nil {
return nil, fmt.Errorf("failed to execute lang query: %w ", err)
}
+
defer rows.Close()
+
for rows.Next() {
var repoat, lang string
if err := rows.Scan(&repoat, &lang); err != nil {
···
if err != nil {
return nil, fmt.Errorf("failed to execute star-count query: %w ", err)
}
+
defer rows.Close()
+
for rows.Next() {
var repoat string
var count int
···
if err != nil {
return nil, fmt.Errorf("failed to execute issue-count query: %w ", err)
}
+
defer rows.Close()
+
for rows.Next() {
var repoat string
var open, closed int
···
if err != nil {
return nil, fmt.Errorf("failed to execute pulls-count query: %w ", err)
}
+
defer rows.Close()
+
for rows.Next() {
var repoat string
var open, merged, closed, deleted int
+1
appview/db/star.go
···
if err != nil {
return nil, err
}
+
defer rows.Close()
starMap := make(map[string][]models.Star)
for rows.Next() {
+1 -1
appview/models/pull.go
···
Repo *Repo
}
+
// NOTE: This method does not include patch blob in returned atproto record
func (p Pull) AsRecord() tangled.RepoPull {
var source *tangled.RepoPull_Source
if p.PullSource != nil {
···
Repo: p.RepoAt.String(),
Branch: p.TargetBranch,
},
-
Patch: p.LatestPatch(),
Source: source,
}
return record
-1
appview/notify/merged_notifier.go
···
v.Call(in)
}(n)
}
-
wg.Wait()
}
func (m *mergedNotifier) NewRepo(ctx context.Context, repo *models.Repo) {
+6 -1
appview/pages/funcmap.go
···
"github.com/dustin/go-humanize"
"github.com/go-enry/go-enry/v2"
"github.com/yuin/goldmark"
+
emoji "github.com/yuin/goldmark-emoji"
"tangled.org/core/appview/filetree"
"tangled.org/core/appview/models"
"tangled.org/core/appview/pages/markup"
···
},
"description": func(text string) template.HTML {
p.rctx.RendererType = markup.RendererTypeDefault
-
htmlString := p.rctx.RenderMarkdownWith(text, goldmark.New())
+
htmlString := p.rctx.RenderMarkdownWith(text, goldmark.New(
+
goldmark.WithExtensions(
+
emoji.Emoji,
+
),
+
))
sanitized := p.rctx.SanitizeDescription(htmlString)
return template.HTML(sanitized)
},
+13 -3
appview/pages/markup/extension/atlink.go
···
return KindAt
}
-
var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9.-]+)(\b)`)
+
var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\b)`)
+
var markdownLinkRegexp = regexp.MustCompile(`(?ms)\[.*\]\(.*\)`)
type atParser struct{}
···
if m == nil {
return nil
}
+
+
// Check for all links in the markdown to see if the handle found is inside one
+
linksIndexes := markdownLinkRegexp.FindAllIndex(block.Source(), -1)
+
for _, linkMatch := range linksIndexes {
+
if linkMatch[0] < segment.Start && segment.Start < linkMatch[1] {
+
return nil
+
}
+
}
+
atSegment := text.NewSegment(segment.Start, segment.Start+m[1])
block.Advance(m[1])
node := &AtNode{}
···
func (r *atHtmlRenderer) renderAt(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
if entering {
-
w.WriteString(`<a href="/@`)
+
w.WriteString(`<a href="/`)
w.WriteString(n.(*AtNode).Handle)
-
w.WriteString(`" class="mention font-bold">`)
+
w.WriteString(`" class="mention">`)
} else {
w.WriteString("</a>")
}
+2
appview/pages/markup/markdown.go
···
chromahtml "github.com/alecthomas/chroma/v2/formatters/html"
"github.com/alecthomas/chroma/v2/styles"
"github.com/yuin/goldmark"
+
"github.com/yuin/goldmark-emoji"
highlighting "github.com/yuin/goldmark-highlighting/v2"
"github.com/yuin/goldmark/ast"
"github.com/yuin/goldmark/extension"
···
),
callout.CalloutExtention,
textension.AtExt,
+
emoji.Emoji,
),
goldmark.WithParserOptions(
parser.WithAutoHeadingID(),
+121
appview/pages/markup/markdown_test.go
···
+
package markup
+
+
import (
+
"bytes"
+
"testing"
+
)
+
+
func TestAtExtension_Rendering(t *testing.T) {
+
tests := []struct {
+
name string
+
markdown string
+
expected string
+
}{
+
{
+
name: "renders simple at mention",
+
markdown: "Hello @user.tngl.sh!",
+
expected: `<p>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>!</p>`,
+
},
+
{
+
name: "renders multiple at mentions",
+
markdown: "Hi @alice.tngl.sh and @bob.example.com",
+
expected: `<p>Hi <a href="/alice.tngl.sh" class="mention">@alice.tngl.sh</a> and <a href="/bob.example.com" class="mention">@bob.example.com</a></p>`,
+
},
+
{
+
name: "renders at mention in parentheses",
+
markdown: "Check this out (@user.tngl.sh)",
+
expected: `<p>Check this out (<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>)</p>`,
+
},
+
{
+
name: "does not render email",
+
markdown: "Contact me at test@example.com",
+
expected: `<p>Contact me at <a href="mailto:test@example.com">test@example.com</a></p>`,
+
},
+
{
+
name: "renders at mention with hyphen",
+
markdown: "Follow @user-name.tngl.sh",
+
expected: `<p>Follow <a href="/user-name.tngl.sh" class="mention">@user-name.tngl.sh</a></p>`,
+
},
+
{
+
name: "renders at mention with numbers",
+
markdown: "@user123.test456.social",
+
expected: `<p><a href="/user123.test456.social" class="mention">@user123.test456.social</a></p>`,
+
},
+
{
+
name: "at mention at start of line",
+
markdown: "@user.tngl.sh is cool",
+
expected: `<p><a href="/user.tngl.sh" class="mention">@user.tngl.sh</a> is cool</p>`,
+
},
+
}
+
+
for _, tt := range tests {
+
t.Run(tt.name, func(t *testing.T) {
+
md := NewMarkdown()
+
+
var buf bytes.Buffer
+
if err := md.Convert([]byte(tt.markdown), &buf); err != nil {
+
t.Fatalf("failed to convert markdown: %v", err)
+
}
+
+
result := buf.String()
+
if result != tt.expected+"\n" {
+
t.Errorf("expected:\n%s\ngot:\n%s", tt.expected, result)
+
}
+
})
+
}
+
}
+
+
func TestAtExtension_WithOtherMarkdown(t *testing.T) {
+
tests := []struct {
+
name string
+
markdown string
+
contains string
+
}{
+
{
+
name: "at mention with bold",
+
markdown: "**Hello @user.tngl.sh**",
+
contains: `<strong>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></strong>`,
+
},
+
{
+
name: "at mention with italic",
+
markdown: "*Check @user.tngl.sh*",
+
contains: `<em>Check <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></em>`,
+
},
+
{
+
name: "at mention in list",
+
markdown: "- Item 1\n- @user.tngl.sh\n- Item 3",
+
contains: `<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>`,
+
},
+
{
+
name: "at mention in link",
+
markdown: "[@regnault.dev](https://regnault.dev)",
+
contains: `<a href="https://regnault.dev">@regnault.dev</a>`,
+
},
+
{
+
name: "at mention in link again",
+
markdown: "[check out @regnault.dev](https://regnault.dev)",
+
contains: `<a href="https://regnault.dev">check out @regnault.dev</a>`,
+
},
+
{
+
name: "at mention in link again, multiline",
+
markdown: "[\ncheck out @regnault.dev](https://regnault.dev)",
+
contains: "<a href=\"https://regnault.dev\">\ncheck out @regnault.dev</a>",
+
},
+
}
+
+
for _, tt := range tests {
+
t.Run(tt.name, func(t *testing.T) {
+
md := NewMarkdown()
+
+
var buf bytes.Buffer
+
if err := md.Convert([]byte(tt.markdown), &buf); err != nil {
+
t.Fatalf("failed to convert markdown: %v", err)
+
}
+
+
result := buf.String()
+
if !bytes.Contains([]byte(result), []byte(tt.contains)) {
+
t.Errorf("expected output to contain:\n%s\ngot:\n%s", tt.contains, result)
+
}
+
})
+
}
+
}
+1 -1
appview/pages/pages.go
···
}
func (p *Pages) StarBtnFragment(w io.Writer, params StarBtnFragmentParams) error {
-
return p.executePlain("fragments/starBtn", w, params)
+
return p.executePlain("fragments/starBtn-oob", w, params)
}
type RepoIndexParams struct {
+1 -1
appview/pages/templates/banner.html
···
<div class="mx-6">
These services may not be fully accessible until upgraded.
<a class="underline text-red-800 dark:text-red-200"
-
href="https://tangled.org/@tangled.org/core/tree/master/docs/migrations.md">
+
href="https://docs.tangled.org/migrating-knots-spindles.html#migrating-knots-spindles">
Click to read the upgrade guide</a>.
</div>
</details>
+5
appview/pages/templates/fragments/starBtn-oob.html
···
+
{{ define "fragments/starBtn-oob" }}
+
<div hx-swap-oob='outerHTML:#starBtn[data-star-subject-at="{{ .SubjectAt }}"]'>
+
{{ template "fragments/starBtn" . }}
+
</div>
+
{{ end }}
+1 -3
appview/pages/templates/fragments/starBtn.html
···
{{ define "fragments/starBtn" }}
+
{{/* NOTE: this fragment is always replaced with hx-swap-oob */}}
<button
id="starBtn"
class="btn disabled:opacity-50 disabled:cursor-not-allowed flex gap-2 items-center group"
···
{{ end }}
hx-trigger="click"
-
hx-target="this"
-
hx-swap="outerHTML"
-
hx-swap-oob='outerHTML:#starBtn[data-star-subject-at="{{ .SubjectAt }}"]'
hx-disabled-elt="#starBtn"
>
{{ if .IsStarred }}
+1 -1
appview/pages/templates/knots/index.html
···
{{ define "docsButton" }}
<a
class="btn flex items-center gap-2"
-
href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
+
href="https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide">
{{ i "book" "size-4" }}
docs
</a>
+2 -2
appview/pages/templates/layouts/fragments/footer.html
···
<div class="flex flex-col gap-1">
<div class="{{ $headerStyle }}">resources</div>
<a href="https://blog.tangled.org" class="{{ $linkStyle }}" target="_blank" rel="noopener noreferrer">{{ i "book-open" $iconStyle }} blog</a>
-
<a href="https://tangled.org/@tangled.org/core/tree/master/docs" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a>
+
<a href="https://docs.tangled.org" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a>
<a href="https://tangled.org/@tangled.org/core" class="{{ $linkStyle }}">{{ i "code" $iconStyle }} source</a>
<a href="https://tangled.org/brand" class="{{ $linkStyle }}">{{ i "paintbrush" $iconStyle }} brand</a>
</div>
···
<div class="flex flex-col gap-1">
<div class="{{ $headerStyle }}">resources</div>
<a href="https://blog.tangled.org" class="{{ $linkStyle }}" target="_blank" rel="noopener noreferrer">{{ i "book-open" $iconStyle }} blog</a>
-
<a href="https://tangled.org/@tangled.org/core/tree/master/docs" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a>
+
<a href="https://docs.tangled.org" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a>
<a href="https://tangled.org/@tangled.org/core" class="{{ $linkStyle }}">{{ i "code" $iconStyle }} source</a>
<a href="https://tangled.org/brand" class="{{ $linkStyle }}">{{ i "paintbrush" $iconStyle }} brand</a>
</div>
+1 -1
appview/pages/templates/repo/empty.html
···
{{ else if (and .LoggedInUser (eq .LoggedInUser.Did .RepoInfo.OwnerDid)) }}
{{ $knot := .RepoInfo.Knot }}
{{ if eq $knot "knot1.tangled.sh" }}
-
{{ $knot = "tangled.sh" }}
+
{{ $knot = "tangled.org" }}
{{ end }}
<div class="w-full flex place-content-center">
<div class="py-6 w-fit flex flex-col gap-4">
+6 -6
appview/pages/templates/repo/fragments/backlinks.html
···
<div class="flex gap-2 items-center">
{{ if .State.IsClosed }}
<span class="text-gray-500 dark:text-gray-400">
-
{{ i "ban" "w-4 h-4" }}
+
{{ i "ban" "size-3" }}
</span>
{{ else if eq .Kind.String "issues" }}
<span class="text-green-600 dark:text-green-500">
-
{{ i "circle-dot" "w-4 h-4" }}
+
{{ i "circle-dot" "size-3" }}
</span>
{{ else if .State.IsOpen }}
<span class="text-green-600 dark:text-green-500">
-
{{ i "git-pull-request" "w-4 h-4" }}
+
{{ i "git-pull-request" "size-3" }}
</span>
{{ else if .State.IsMerged }}
<span class="text-purple-600 dark:text-purple-500">
-
{{ i "git-merge" "w-4 h-4" }}
+
{{ i "git-merge" "size-3" }}
</span>
{{ else }}
<span class="text-gray-600 dark:text-gray-300">
-
{{ i "git-pull-request-closed" "w-4 h-4" }}
+
{{ i "git-pull-request-closed" "size-3" }}
</span>
{{ end }}
-
<a href="{{ . }}"><span class="text-gray-500 dark:text-gray-400">#{{ .SubjectId }}</span> {{ .Title }}</a>
+
<a href="{{ . }}" class="line-clamp-1 text-sm"><span class="text-gray-500 dark:text-gray-400">#{{ .SubjectId }}</span> {{ .Title }}</a>
</div>
{{ if not (eq $.RepoInfo.FullName $repoUrl) }}
<div>
+1 -1
appview/pages/templates/repo/fragments/diff.html
···
{{ else }}
{{ range $idx, $hunk := $diff }}
{{ with $hunk }}
-
<details open id="file-{{ .Name.New }}" class="group border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm" tabindex="{{ add $idx 1 }}">
+
<details open id="file-{{ .Id }}" class="group border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm" tabindex="{{ add $idx 1 }}">
<summary class="list-none cursor-pointer sticky top-0">
<div id="diff-file-header" class="rounded cursor-pointer bg-white dark:bg-gray-800 flex justify-between">
<div id="left-side-items" class="p-2 flex gap-2 items-center overflow-x-auto">
+1 -1
appview/pages/templates/repo/pipelines/pipelines.html
···
</p>
<p>
<span class="{{ $bullet }}">2</span>Configure your CI/CD
-
<a href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/pipeline.md" class="underline">pipeline</a>.
+
<a href="https://docs.tangled.org/spindles.html#pipelines" class="underline">pipeline</a>.
</p>
<p><span class="{{ $bullet }}">3</span>Trigger a workflow with a push or a pull-request!</p>
</div>
+1 -1
appview/pages/templates/repo/settings/pipelines.html
···
<p class="text-gray-500 dark:text-gray-400">
Choose a spindle to execute your workflows on. Only repository owners
can configure spindles. Spindles can be selfhosted,
-
<a class="text-gray-500 dark:text-gray-400 underline" href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
+
<a class="text-gray-500 dark:text-gray-400 underline" href="https://docs.tangled.org/spindles.html#self-hosting-guide">
click to learn more.
</a>
</p>
+1 -1
appview/pages/templates/spindles/index.html
···
{{ define "docsButton" }}
<a
class="btn flex items-center gap-2"
-
href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
+
href="https://docs.tangled.org/spindles.html#self-hosting-guide">
{{ i "book" "size-4" }}
docs
</a>
+1 -1
appview/pages/templates/strings/string.html
···
<span class="select-none">/</span>
<a href="/strings/{{ $ownerId }}/{{ .String.Rkey }}" class="font-bold">{{ .String.Filename }}</a>
</div>
-
<div class="flex gap-2 text-base">
+
<div class="flex gap-2 items-stretch text-base">
{{ if and .LoggedInUser (eq .LoggedInUser.Did .String.Did) }}
<a class="btn flex items-center gap-2 no-underline hover:no-underline p-2 group"
hx-boost="true"
+2 -2
appview/pages/templates/user/fragments/followCard.html
···
<img class="object-cover rounded-full p-2" src="{{ fullAvatar $userIdent }}" alt="{{ $userIdent }}" />
</div>
-
<div class="flex flex-col md:flex-row md:items-center md:justify-between gap-2 w-full">
+
<div class="flex flex-col md:flex-row md:items-center md:justify-between gap-2 w-full min-w-0">
<div class="flex-1 min-h-0 justify-around flex flex-col">
<a href="/{{ $userIdent }}">
<span class="font-bold dark:text-white overflow-hidden text-ellipsis whitespace-nowrap max-w-full">{{ $userIdent | truncateAt30 }}</span>
</a>
{{ with .Profile }}
-
<p class="text-sm pb-2 md:pb-2">{{.Description}}</p>
+
<p class="text-sm pb-2 md:pb-2 break-words">{{.Description}}</p>
{{ end }}
<div class="text-sm flex items-center gap-2 my-2 overflow-hidden text-ellipsis whitespace-nowrap max-w-full">
<span class="flex-shrink-0">{{ i "users" "size-4" }}</span>
+56 -36
appview/pulls/pulls.go
···
return
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch))
+
if err != nil {
+
log.Println("failed to upload patch", err)
+
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
+
return
+
}
+
_, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{
Collection: tangled.RepoPullNSID,
Repo: user.Did,
···
Repo: string(repo.RepoAt()),
Branch: targetBranch,
},
-
Patch: patch,
+
PatchBlob: blob.Blob,
Source: recordPullSource,
CreatedAt: time.Now().Format(time.RFC3339),
},
···
// apply all record creations at once
var writes []*comatproto.RepoApplyWrites_Input_Writes_Elem
for _, p := range stack {
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(p.LatestPatch()))
+
if err != nil {
+
log.Println("failed to upload patch blob", err)
+
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
+
return
+
}
+
record := p.AsRecord()
-
write := comatproto.RepoApplyWrites_Input_Writes_Elem{
+
record.PatchBlob = blob.Blob
+
writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{
RepoApplyWrites_Create: &comatproto.RepoApplyWrites_Create{
Collection: tangled.RepoPullNSID,
Rkey: &p.Rkey,
···
Val: &record,
},
},
-
}
-
writes = append(writes, &write)
+
})
_, err = comatproto.RepoApplyWrites(r.Context(), client, &comatproto.RepoApplyWrites_Input{
Repo: user.Did,
···
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
return
+
if err = tx.Commit(); err != nil {
log.Println("failed to create pull request", err)
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
return
+
}
+
+
// notify about each pull
+
//
+
// this is performed after tx.Commit, because it could result in a locked DB otherwise
+
for _, p := range stack {
+
s.notifier.NewPull(r.Context(), p)
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo)
···
return
-
var recordPullSource *tangled.RepoPull_Source
-
if pull.IsBranchBased() {
-
recordPullSource = &tangled.RepoPull_Source{
-
Branch: pull.PullSource.Branch,
-
Sha: sourceRev,
-
}
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch))
+
if err != nil {
+
log.Println("failed to upload patch blob", err)
+
s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.")
+
return
-
if pull.IsForkBased() {
-
repoAt := pull.PullSource.RepoAt.String()
-
recordPullSource = &tangled.RepoPull_Source{
-
Branch: pull.PullSource.Branch,
-
Repo: &repoAt,
-
Sha: sourceRev,
-
}
-
}
+
record := pull.AsRecord()
+
record.PatchBlob = blob.Blob
+
record.CreatedAt = time.Now().Format(time.RFC3339)
_, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{
Collection: tangled.RepoPullNSID,
···
Rkey: pull.Rkey,
SwapRecord: ex.Cid,
Record: &lexutil.LexiconTypeDecoder{
-
Val: &tangled.RepoPull{
-
Title: pull.Title,
-
Target: &tangled.RepoPull_Target{
-
Repo: string(repo.RepoAt()),
-
Branch: pull.TargetBranch,
-
},
-
Patch: patch, // new patch
-
Source: recordPullSource,
-
CreatedAt: time.Now().Format(time.RFC3339),
-
},
+
Val: &record,
},
})
if err != nil {
···
defer tx.Rollback()
+
client, err := s.oauth.AuthorizedClient(r)
+
if err != nil {
+
log.Println("failed to authorize client")
+
s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.")
+
return
+
}
+
// pds updates to make
var writes []*comatproto.RepoApplyWrites_Input_Writes_Elem
···
return
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch))
+
if err != nil {
+
log.Println("failed to upload patch blob", err)
+
s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.")
+
return
+
}
record := p.AsRecord()
+
record.PatchBlob = blob.Blob
writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{
RepoApplyWrites_Create: &comatproto.RepoApplyWrites_Create{
Collection: tangled.RepoPullNSID,
···
return
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch))
+
if err != nil {
+
log.Println("failed to upload patch blob", err)
+
s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.")
+
return
+
}
record := np.AsRecord()
-
+
record.PatchBlob = blob.Blob
writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{
RepoApplyWrites_Update: &comatproto.RepoApplyWrites_Update{
Collection: tangled.RepoPullNSID,
···
if err != nil {
log.Println("failed to resubmit pull", err)
s.pages.Notice(w, "pull-resubmit-error", "Failed to resubmit pull request. Try again later.")
-
return
-
}
-
-
client, err := s.oauth.AuthorizedClient(r)
-
if err != nil {
-
log.Println("failed to authorize client")
-
s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.")
return
+17
appview/state/git_http.go
···
}
+
func (s *State) UploadArchive(w http.ResponseWriter, r *http.Request) {
+
user, ok := r.Context().Value("resolvedId").(identity.Identity)
+
if !ok {
+
http.Error(w, "failed to resolve user", http.StatusInternalServerError)
+
return
+
}
+
repo := r.Context().Value("repo").(*models.Repo)
+
+
scheme := "https"
+
if s.config.Core.Dev {
+
scheme = "http"
+
}
+
+
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-archive?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
+
s.proxyRequest(w, r, targetURL)
+
}
+
func (s *State) UploadPack(w http.ResponseWriter, r *http.Request) {
user, ok := r.Context().Value("resolvedId").(identity.Identity)
if !ok {
+1
appview/state/router.go
···
// These routes get proxied to the knot
r.Get("/info/refs", s.InfoRefs)
+
r.Post("/git-upload-archive", s.UploadArchive)
r.Post("/git-upload-pack", s.UploadPack)
r.Post("/git-receive-pack", s.ReceivePack)
+1529
docs/DOCS.md
···
+
---
+
title: Tangled docs
+
author: The Tangled Contributors
+
date: 21 Sun, Dec 2025
+
---
+
+
# Introduction
+
+
Tangled is a decentralized code hosting and collaboration
+
platform. Every component of Tangled is open-source and
+
self-hostable. [tangled.org](https://tangled.org) also
+
provides hosting and CI services that are free to use.
+
+
There are several models for decentralized code
+
collaboration platforms, ranging from ActivityPub’s
+
(Forgejo) federated model, to Radicle’s entirely P2P model.
+
Our approach attempts to be the best of both worlds by
+
adopting the AT Protocol—a protocol for building decentralized
+
social applications with a central identity
+
+
Our approach to this is the idea of “knots”. Knots are
+
lightweight, headless servers that enable users to host Git
+
repositories with ease. Knots are designed for either single
+
or multi-tenant use which is perfect for self-hosting on a
+
Raspberry Pi at home, or larger “community” servers. By
+
default, Tangled provides managed knots where you can host
+
your repositories for free.
+
+
The appview at tangled.org acts as a consolidated "view"
+
into the whole network, allowing users to access, clone and
+
contribute to repositories hosted across different knots
+
seamlessly.
+
+
# Quick start guide
+
+
## Login or sign up
+
+
You can [login](https://tangled.org) by using your AT Protocol
+
account. If you are unclear on what that means, simply head
+
to the [signup](https://tangled.org/signup) page and create
+
an account. By doing so, you will be choosing Tangled as
+
your account provider (you will be granted a handle of the
+
form `user.tngl.sh`).
+
+
In the AT Protocol network, users are free to choose their account
+
provider (known as a "Personal Data Service", or PDS), and
+
login to applications that support AT accounts.
+
+
You can think of it as "one account for all of the atmosphere"!
+
+
If you already have an AT account (you may have one if you
+
signed up to Bluesky, for example), you can login with the
+
same handle on Tangled (so just use `user.bsky.social` on
+
the login page).
+
+
## Add an SSH key
+
+
Once you are logged in, you can start creating repositories
+
and pushing code. Tangled supports pushing git repositories
+
over SSH.
+
+
First, you'll need to generate an SSH key if you don't
+
already have one:
+
+
```bash
+
ssh-keygen -t ed25519 -C "foo@bar.com"
+
```
+
+
When prompted, save the key to the default location
+
(`~/.ssh/id_ed25519`) and optionally set a passphrase.
+
+
Copy your public key to your clipboard:
+
+
```bash
+
# on X11
+
cat ~/.ssh/id_ed25519.pub | xclip -sel c
+
+
# on wayland
+
cat ~/.ssh/id_ed25519.pub | wl-copy
+
+
# on macos
+
cat ~/.ssh/id_ed25519.pub | pbcopy
+
```
+
+
Now, navigate to 'Settings' -> 'Keys' and hit 'Add Key',
+
paste your public key, give it a descriptive name, and hit
+
save.
+
+
## Create a repository
+
+
Once your SSH key is added, create your first repository:
+
+
1. Hit the green `+` icon on the topbar, and select
+
repository
+
2. Enter a repository name
+
3. Add a description
+
4. Choose a knotserver to host this repository on
+
5. Hit create
+
+
Knots are self-hostable, lightweight Git servers that can
+
host your repository. Unlike traditional code forges, your
+
code can live on any server. Read the [Knots](TODO) section
+
for more.
+
+
## Configure SSH
+
+
To ensure Git uses the correct SSH key and connects smoothly
+
to Tangled, add this configuration to your `~/.ssh/config`
+
file:
+
+
```
+
Host tangled.org
+
Hostname tangled.org
+
User git
+
IdentityFile ~/.ssh/id_ed25519
+
AddressFamily inet
+
```
+
+
This tells SSH to use your specific key when connecting to
+
Tangled and prevents authentication issues if you have
+
multiple SSH keys.
+
+
Note that this configuration only works for knotservers that
+
are hosted by tangled.org. If you use a custom knot, refer
+
to the [Knots](TODO) section.
+
+
## Push your first repository
+
+
Initialize a new Git repository:
+
+
```bash
+
mkdir my-project
+
cd my-project
+
+
git init
+
echo "# My Project" > README.md
+
```
+
+
Add some content and push!
+
+
```bash
+
git add README.md
+
git commit -m "Initial commit"
+
git remote add origin git@tangled.org:user.tngl.sh/my-project
+
git push -u origin main
+
```
+
+
That's it! Your code is now hosted on Tangled.
+
+
## Migrating an existing repository
+
+
Moving your repositories from GitHub, GitLab, Bitbucket, or
+
any other Git forge to Tangled is straightforward. You'll
+
simply change your repository's remote URL. At the moment,
+
Tangled does not have any tooling to migrate data such as
+
GitHub issues or pull requests.
+
+
First, create a new repository on tangled.org as described
+
in the [Quick Start Guide](#create-a-repository).
+
+
Navigate to your existing local repository:
+
+
```bash
+
cd /path/to/your/existing/repo
+
```
+
+
You can inspect your existing Git remote like so:
+
+
```bash
+
git remote -v
+
```
+
+
You'll see something like:
+
+
```
+
origin git@github.com:username/my-project (fetch)
+
origin git@github.com:username/my-project (push)
+
```
+
+
Update the remote URL to point to tangled:
+
+
```bash
+
git remote set-url origin git@tangled.org:user.tngl.sh/my-project
+
```
+
+
Verify the change:
+
+
```bash
+
git remote -v
+
```
+
+
You should now see:
+
+
```
+
origin git@tangled.org:user.tngl.sh/my-project (fetch)
+
origin git@tangled.org:user.tngl.sh/my-project (push)
+
```
+
+
Push all your branches and tags to Tangled:
+
+
```bash
+
git push -u origin --all
+
git push -u origin --tags
+
```
+
+
Your repository is now migrated to Tangled! All commit
+
history, branches, and tags have been preserved.
+
+
## Mirroring a repository to Tangled
+
+
If you want to maintain your repository on multiple forges
+
simultaneously, for example, keeping your primary repository
+
on GitHub while mirroring to Tangled for backup or
+
redundancy, you can do so by adding multiple remotes.
+
+
You can configure your local repository to push to both
+
Tangled and, say, GitHub. You may already have the following
+
setup:
+
+
```
+
$ git remote -v
+
origin git@github.com:username/my-project (fetch)
+
origin git@github.com:username/my-project (push)
+
```
+
+
Now add Tangled as an additional push URL to the same
+
remote:
+
+
```bash
+
git remote set-url --add --push origin git@tangled.org:user.tngl.sh/my-project
+
```
+
+
You also need to re-add the original URL as a push
+
destination (Git replaces the push URL when you use `--add`
+
the first time):
+
+
```bash
+
git remote set-url --add --push origin git@github.com:username/my-project
+
```
+
+
Verify your configuration:
+
+
```
+
$ git remote -v
+
origin git@github.com:username/repo (fetch)
+
origin git@tangled.org:username/my-project (push)
+
origin git@github.com:username/repo (push)
+
```
+
+
Notice that there's one fetch URL (the primary remote) and
+
two push URLs. Now, whenever you push, Git will
+
automatically push to both remotes:
+
+
```bash
+
git push origin main
+
```
+
+
This single command pushes your `main` branch to both GitHub
+
and Tangled simultaneously.
+
+
To push all branches and tags:
+
+
```bash
+
git push origin --all
+
git push origin --tags
+
```
+
+
If you prefer more control over which remote you push to,
+
you can maintain separate remotes:
+
+
```bash
+
git remote add github git@github.com:username/my-project
+
git remote add tangled git@tangled.org:username/my-project
+
```
+
+
Then push to each explicitly:
+
+
```bash
+
git push github main
+
git push tangled main
+
```
+
+
# Knot self-hosting guide
+
+
So you want to run your own knot server? Great! Here are a few prerequisites:
+
+
1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
+
2. A (sub)domain name. People generally use `knot.example.com`.
+
3. A valid SSL certificate for your domain.
+
+
## NixOS
+
+
Refer to the [knot
+
module](https://tangled.org/tangled.org/core/blob/master/nix/modules/knot.nix)
+
for a full list of options. Sample configurations:
+
+
- [The test VM](https://tangled.org/tangled.org/core/blob/master/nix/vm.nix#L85)
+
- [@pyrox.dev/nix](https://tangled.org/pyrox.dev/nix/blob/d19571cc1b5fe01035e1e6951ec8cf8a476b4dee/hosts/marvin/services/tangled.nix#L15-25)
+
+
## Docker
+
+
Refer to
+
[@tangled.org/knot-docker](https://tangled.org/@tangled.org/knot-docker).
+
Note that this is community maintained.
+
+
## Manual setup
+
+
First, clone this repository:
+
+
```
+
git clone https://tangled.org/@tangled.org/core
+
```
+
+
Then, build the `knot` CLI. This is the knot administration
+
and operation tool. For the purpose of this guide, we're
+
only concerned with these subcommands:
+
+
* `knot server`: the main knot server process, typically
+
run as a supervised service
+
* `knot guard`: handles role-based access control for git
+
over SSH (you'll never have to run this yourself)
+
* `knot keys`: fetches SSH keys associated with your knot;
+
we'll use this to generate the SSH
+
`AuthorizedKeysCommand`
+
+
```
+
cd core
+
export CGO_ENABLED=1
+
go build -o knot ./cmd/knot
+
```
+
+
Next, move the `knot` binary to a location owned by `root` --
+
`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
+
+
```
+
sudo mv knot /usr/local/bin/knot
+
sudo chown root:root /usr/local/bin/knot
+
```
+
+
This is necessary because SSH `AuthorizedKeysCommand` requires [really
+
specific permissions](https://stackoverflow.com/a/27638306). The
+
`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
+
retrieve a user's public SSH keys dynamically for authentication. Let's
+
set that up.
+
+
```
+
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
+
Match User git
+
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
+
AuthorizedKeysCommandUser nobody
+
EOF
+
```
+
+
Then, reload `sshd`:
+
+
```
+
sudo systemctl reload ssh
+
```
+
+
Next, create the `git` user. We'll use the `git` user's home directory
+
to store repositories:
+
+
```
+
sudo adduser git
+
```
+
+
Create `/home/git/.knot.env` with the following, updating the values as
+
necessary. The `KNOT_SERVER_OWNER` should be set to your
+
DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
+
+
```
+
KNOT_REPO_SCAN_PATH=/home/git
+
KNOT_SERVER_HOSTNAME=knot.example.com
+
APPVIEW_ENDPOINT=https://tangled.org
+
KNOT_SERVER_OWNER=did:plc:foobar
+
KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
+
KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
+
```
+
+
If you run a Linux distribution that uses systemd, you can use the provided
+
service file to run the server. Copy
+
[`knotserver.service`](/systemd/knotserver.service)
+
to `/etc/systemd/system/`. Then, run:
+
+
```
+
systemctl enable knotserver
+
systemctl start knotserver
+
```
+
+
The last step is to configure a reverse proxy like Nginx or Caddy to front your
+
knot. Here's an example configuration for Nginx:
+
+
```
+
server {
+
listen 80;
+
listen [::]:80;
+
server_name knot.example.com;
+
+
location / {
+
proxy_pass http://localhost:5555;
+
proxy_set_header Host $host;
+
proxy_set_header X-Real-IP $remote_addr;
+
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+
proxy_set_header X-Forwarded-Proto $scheme;
+
}
+
+
# wss endpoint for git events
+
location /events {
+
proxy_set_header X-Forwarded-For $remote_addr;
+
proxy_set_header Host $http_host;
+
proxy_set_header Upgrade websocket;
+
proxy_set_header Connection Upgrade;
+
proxy_pass http://localhost:5555;
+
}
+
# additional config for SSL/TLS go here.
+
}
+
+
```
+
+
Remember to use Let's Encrypt or similar to procure a certificate for your
+
knot domain.
+
+
You should now have a running knot server! You can finalize
+
your registration by hitting the `verify` button on the
+
[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
+
a record on your PDS to announce the existence of the knot.
+
+
### Custom paths
+
+
(This section applies to manual setup only. Docker users should edit the mounts
+
in `docker-compose.yml` instead.)
+
+
Right now, the database and repositories of your knot lives in `/home/git`. You
+
can move these paths if you'd like to store them in another folder. Be careful
+
when adjusting these paths:
+
+
* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
+
any possible side effects. Remember to restart it once you're done.
+
* Make backups before moving in case something goes wrong.
+
* Make sure the `git` user can read and write from the new paths.
+
+
#### Database
+
+
As an example, let's say the current database is at `/home/git/knotserver.db`,
+
and we want to move it to `/home/git/database/knotserver.db`.
+
+
Copy the current database to the new location. Make sure to copy the `.db-shm`
+
and `.db-wal` files if they exist.
+
+
```
+
mkdir /home/git/database
+
cp /home/git/knotserver.db* /home/git/database
+
```
+
+
In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
+
the new file path (_not_ the directory):
+
+
```
+
KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
+
```
+
+
#### Repositories
+
+
As an example, let's say the repositories are currently in `/home/git`, and we
+
want to move them into `/home/git/repositories`.
+
+
Create the new folder, then move the existing repositories (if there are any):
+
+
```
+
mkdir /home/git/repositories
+
# move all DIDs into the new folder; these will vary for you!
+
mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
+
```
+
+
In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
+
to the new directory:
+
+
```
+
KNOT_REPO_SCAN_PATH=/home/git/repositories
+
```
+
+
Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
+
repository path:
+
+
```
+
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
+
Match User git
+
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
+
AuthorizedKeysCommandUser nobody
+
EOF
+
```
+
+
Make sure to restart your SSH server!
+
+
#### MOTD (message of the day)
+
+
To configure the MOTD used ("Welcome to this knot!" by default), edit the
+
`/home/git/motd` file:
+
+
```
+
printf "Hi from this knot!\n" > /home/git/motd
+
```
+
+
Note that you should add a newline at the end if setting a non-empty message
+
since the knot won't do this for you.
+
+
# Spindles
+
+
## Pipelines
+
+
Spindle workflows allow you to write CI/CD pipelines in a
+
simple format. They're located in the `.tangled/workflows`
+
directory at the root of your repository, and are defined
+
using YAML.
+
+
The fields are:
+
+
- [Trigger](#trigger): A **required** field that defines
+
when a workflow should be triggered.
+
- [Engine](#engine): A **required** field that defines which
+
engine a workflow should run on.
+
- [Clone options](#clone-options): An **optional** field
+
that defines how the repository should be cloned.
+
- [Dependencies](#dependencies): An **optional** field that
+
allows you to list dependencies you may need.
+
- [Environment](#environment): An **optional** field that
+
allows you to define environment variables.
+
- [Steps](#steps): An **optional** field that allows you to
+
define what steps should run in the workflow.
+
+
### Trigger
+
+
The first thing to add to a workflow is the trigger, which
+
defines when a workflow runs. This is defined using a `when`
+
field, which takes in a list of conditions. Each condition
+
has the following fields:
+
+
- `event`: This is a **required** field that defines when
+
your workflow should run. It's a list that can take one or
+
more of the following values:
+
- `push`: The workflow should run every time a commit is
+
pushed to the repository.
+
- `pull_request`: The workflow should run every time a
+
pull request is made or updated.
+
- `manual`: The workflow can be triggered manually.
+
- `branch`: Defines which branches the workflow should run
+
for. If used with the `push` event, commits to the
+
branch(es) listed here will trigger the workflow. If used
+
with the `pull_request` event, updates to pull requests
+
targeting the branch(es) listed here will trigger the
+
workflow. This field has no effect with the `manual`
+
event. Supports glob patterns using `*` and `**` (e.g.,
+
`main`, `develop`, `release-*`). Either `branch` or `tag`
+
(or both) must be specified for `push` events.
+
- `tag`: Defines which tags the workflow should run for.
+
Only used with the `push` event - when tags matching the
+
pattern(s) listed here are pushed, the workflow will
+
trigger. This field has no effect with `pull_request` or
+
`manual` events. Supports glob patterns using `*` and `**`
+
(e.g., `v*`, `v1.*`, `release-**`). Either `branch` or
+
`tag` (or both) must be specified for `push` events.
+
+
For example, if you'd like to define a workflow that runs
+
when commits are pushed to the `main` and `develop`
+
branches, or when pull requests that target the `main`
+
branch are updated, or manually, you can do so with:
+
+
```yaml
+
when:
+
- event: ["push", "manual"]
+
branch: ["main", "develop"]
+
- event: ["pull_request"]
+
branch: ["main"]
+
```
+
+
You can also trigger workflows on tag pushes. For instance,
+
to run a deployment workflow when tags matching `v*` are
+
pushed:
+
+
```yaml
+
when:
+
- event: ["push"]
+
tag: ["v*"]
+
```
+
+
You can even combine branch and tag patterns in a single
+
constraint (the workflow triggers if either matches):
+
+
```yaml
+
when:
+
- event: ["push"]
+
branch: ["main", "release-*"]
+
tag: ["v*", "stable"]
+
```
+
+
### Engine
+
+
Next is the engine on which the workflow should run, defined
+
using the **required** `engine` field. The currently
+
supported engines are:
+
+
- `nixery`: This uses an instance of
+
[Nixery](https://nixery.dev) to run steps, which allows
+
you to add [dependencies](#dependencies) from
+
Nixpkgs (https://github.com/NixOS/nixpkgs). You can
+
search for packages on https://search.nixos.org, and
+
there's a pretty good chance the package(s) you're looking
+
for will be there.
+
+
Example:
+
+
```yaml
+
engine: "nixery"
+
```
+
+
### Clone options
+
+
When a workflow starts, the first step is to clone the
+
repository. You can customize this behavior using the
+
**optional** `clone` field. It has the following fields:
+
+
- `skip`: Setting this to `true` will skip cloning the
+
repository. This can be useful if your workflow is doing
+
something that doesn't require anything from the
+
repository itself. This is `false` by default.
+
- `depth`: This sets the number of commits, or the "clone
+
depth", to fetch from the repository. For example, if you
+
set this to 2, the last 2 commits will be fetched. By
+
default, the depth is set to 1, meaning only the most
+
recent commit will be fetched, which is the commit that
+
triggered the workflow.
+
- `submodules`: If you use Git submodules
+
(https://git-scm.com/book/en/v2/Git-Tools-Submodules)
+
in your repository, setting this field to `true` will
+
recursively fetch all submodules. This is `false` by
+
default.
+
+
The default settings are:
+
+
```yaml
+
clone:
+
skip: false
+
depth: 1
+
submodules: false
+
```
+
+
### Dependencies
+
+
Usually when you're running a workflow, you'll need
+
additional dependencies. The `dependencies` field lets you
+
define which dependencies to get, and from where. It's a
+
key-value map, with the key being the registry to fetch
+
dependencies from, and the value being the list of
+
dependencies to fetch.
+
+
Say you want to fetch Node.js and Go from `nixpkgs`, and a
+
package called `my_pkg` you've made from your own registry
+
at your repository at
+
`https://tangled.org/@example.com/my_pkg`. You can define
+
those dependencies like so:
+
+
```yaml
+
dependencies:
+
# nixpkgs
+
nixpkgs:
+
- nodejs
+
- go
+
# custom registry
+
git+https://tangled.org/@example.com/my_pkg:
+
- my_pkg
+
```
+
+
Now these dependencies are available to use in your
+
workflow!
+
+
### Environment
+
+
The `environment` field allows you define environment
+
variables that will be available throughout the entire
+
workflow. **Do not put secrets here, these environment
+
variables are visible to anyone viewing the repository. You
+
can add secrets for pipelines in your repository's
+
settings.**
+
+
Example:
+
+
```yaml
+
environment:
+
GOOS: "linux"
+
GOARCH: "arm64"
+
NODE_ENV: "production"
+
MY_ENV_VAR: "MY_ENV_VALUE"
+
```
+
+
### Steps
+
+
The `steps` field allows you to define what steps should run
+
in the workflow. It's a list of step objects, each with the
+
following fields:
+
+
- `name`: This field allows you to give your step a name.
+
This name is visible in your workflow runs, and is used to
+
describe what the step is doing.
+
- `command`: This field allows you to define a command to
+
run in that step. The step is run in a Bash shell, and the
+
logs from the command will be visible in the pipelines
+
page on the Tangled website. The
+
[dependencies](#dependencies) you added will be available
+
to use here.
+
- `environment`: Similar to the global
+
[environment](#environment) config, this **optional**
+
field is a key-value map that allows you to set
+
environment variables for the step. **Do not put secrets
+
here, these environment variables are visible to anyone
+
viewing the repository. You can add secrets for pipelines
+
in your repository's settings.**
+
+
Example:
+
+
```yaml
+
steps:
+
- name: "Build backend"
+
command: "go build"
+
environment:
+
GOOS: "darwin"
+
GOARCH: "arm64"
+
- name: "Build frontend"
+
command: "npm run build"
+
environment:
+
NODE_ENV: "production"
+
```
+
+
### Complete workflow
+
+
```yaml
+
# .tangled/workflows/build.yml
+
+
when:
+
- event: ["push", "manual"]
+
branch: ["main", "develop"]
+
- event: ["pull_request"]
+
branch: ["main"]
+
+
engine: "nixery"
+
+
# using the default values
+
clone:
+
skip: false
+
depth: 1
+
submodules: false
+
+
dependencies:
+
# nixpkgs
+
nixpkgs:
+
- nodejs
+
- go
+
# custom registry
+
git+https://tangled.org/@example.com/my_pkg:
+
- my_pkg
+
+
environment:
+
GOOS: "linux"
+
GOARCH: "arm64"
+
NODE_ENV: "production"
+
MY_ENV_VAR: "MY_ENV_VALUE"
+
+
steps:
+
- name: "Build backend"
+
command: "go build"
+
environment:
+
GOOS: "darwin"
+
GOARCH: "arm64"
+
- name: "Build frontend"
+
command: "npm run build"
+
environment:
+
NODE_ENV: "production"
+
```
+
+
If you want another example of a workflow, you can look at
+
the one [Tangled uses to build the
+
project](https://tangled.org/@tangled.org/core/blob/master/.tangled/workflows/build.yml).
+
+
## Self-hosting guide
+
+
### Prerequisites
+
+
* Go
+
* Docker (the only supported backend currently)
+
+
### Configuration
+
+
Spindle is configured using environment variables. The following environment variables are available:
+
+
* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
+
* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
+
* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
+
* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
+
* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
+
* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
+
* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
+
* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
+
* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
+
+
### Running spindle
+
+
1. **Set the environment variables.** For example:
+
+
```shell
+
export SPINDLE_SERVER_HOSTNAME="your-hostname"
+
export SPINDLE_SERVER_OWNER="your-did"
+
```
+
+
2. **Build the Spindle binary.**
+
+
```shell
+
cd core
+
go mod download
+
go build -o cmd/spindle/spindle cmd/spindle/main.go
+
```
+
+
3. **Create the log directory.**
+
+
```shell
+
sudo mkdir -p /var/log/spindle
+
sudo chown $USER:$USER -R /var/log/spindle
+
```
+
+
4. **Run the Spindle binary.**
+
+
```shell
+
./cmd/spindle/spindle
+
```
+
+
Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
+
+
## Architecture
+
+
Spindle is a small CI runner service. Here's a high-level overview of how it operates:
+
+
* Listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
+
[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
+
* When a new repo record comes through (typically when you add a spindle to a
+
repo from the settings), spindle then resolves the underlying knot and
+
subscribes to repo events (see:
+
[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
+
* The spindle engine then handles execution of the pipeline, with results and
+
logs beamed on the spindle event stream over WebSocket
+
+
### The engine
+
+
At present, the only supported backend is Docker (and Podman, if Docker
+
compatibility is enabled, so that `/run/docker.sock` is created). spindle
+
executes each step in the pipeline in a fresh container, with state persisted
+
across steps within the `/tangled/workspace` directory.
+
+
The base image for the container is constructed on the fly using
+
[Nixery](https://nixery.dev), which is handy for caching layers for frequently
+
used packages.
+
+
The pipeline manifest is [specified here](https://docs.tangled.org/spindles.html#pipelines).
+
+
## Secrets with openbao
+
+
This document covers setting up spindle to use OpenBao for secrets
+
management via OpenBao Proxy instead of the default SQLite backend.
+
+
### Overview
+
+
Spindle now uses OpenBao Proxy for secrets management. The proxy handles
+
authentication automatically using AppRole credentials, while spindle
+
connects to the local proxy instead of directly to the OpenBao server.
+
+
This approach provides better security, automatic token renewal, and
+
simplified application code.
+
+
### Installation
+
+
Install OpenBao from Nixpkgs:
+
+
```bash
+
nix shell nixpkgs#openbao # for a local server
+
```
+
+
### Setup
+
+
The setup process can is documented for both local development and production.
+
+
#### Local development
+
+
Start OpenBao in dev mode:
+
+
```bash
+
bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
+
```
+
+
This starts OpenBao on `http://localhost:8201` with a root token.
+
+
Set up environment for bao CLI:
+
+
```bash
+
export BAO_ADDR=http://localhost:8200
+
export BAO_TOKEN=root
+
```
+
+
#### Production
+
+
You would typically use a systemd service with a
+
configuration file. Refer to
+
[@tangled.org/infra](https://tangled.org/@tangled.org/infra)
+
for how this can be achieved using Nix.
+
+
Then, initialize the bao server:
+
+
```bash
+
bao operator init -key-shares=1 -key-threshold=1
+
```
+
+
This will print out an unseal key and a root key. Save them
+
somewhere (like a password manager). Then unseal the vault
+
to begin setting it up:
+
+
```bash
+
bao operator unseal <unseal_key>
+
```
+
+
All steps below remain the same across both dev and
+
production setups.
+
+
#### Configure openbao server
+
+
Create the spindle KV mount:
+
+
```bash
+
bao secrets enable -path=spindle -version=2 kv
+
```
+
+
Set up AppRole authentication and policy:
+
+
Create a policy file `spindle-policy.hcl`:
+
+
```hcl
+
# Full access to spindle KV v2 data
+
path "spindle/data/*" {
+
capabilities = ["create", "read", "update", "delete"]
+
}
+
+
# Access to metadata for listing and management
+
path "spindle/metadata/*" {
+
capabilities = ["list", "read", "delete", "update"]
+
}
+
+
# Allow listing at root level
+
path "spindle/" {
+
capabilities = ["list"]
+
}
+
+
# Required for connection testing and health checks
+
path "auth/token/lookup-self" {
+
capabilities = ["read"]
+
}
+
```
+
+
Apply the policy and create an AppRole:
+
+
```bash
+
bao policy write spindle-policy spindle-policy.hcl
+
bao auth enable approle
+
bao write auth/approle/role/spindle \
+
token_policies="spindle-policy" \
+
token_ttl=1h \
+
token_max_ttl=4h \
+
bind_secret_id=true \
+
secret_id_ttl=0 \
+
secret_id_num_uses=0
+
```
+
+
Get the credentials:
+
+
```bash
+
# Get role ID (static)
+
ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
+
+
# Generate secret ID
+
SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
+
+
echo "Role ID: $ROLE_ID"
+
echo "Secret ID: $SECRET_ID"
+
```
+
+
#### Create proxy configuration
+
+
Create the credential files:
+
+
```bash
+
# Create directory for OpenBao files
+
mkdir -p /tmp/openbao
+
+
# Save credentials
+
echo "$ROLE_ID" > /tmp/openbao/role-id
+
echo "$SECRET_ID" > /tmp/openbao/secret-id
+
chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
+
```
+
+
Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
+
+
```hcl
+
# OpenBao server connection
+
vault {
+
address = "http://localhost:8200"
+
}
+
+
# Auto-Auth using AppRole
+
auto_auth {
+
method "approle" {
+
mount_path = "auth/approle"
+
config = {
+
role_id_file_path = "/tmp/openbao/role-id"
+
secret_id_file_path = "/tmp/openbao/secret-id"
+
}
+
}
+
+
# Optional: write token to file for debugging
+
sink "file" {
+
config = {
+
path = "/tmp/openbao/token"
+
mode = 0640
+
}
+
}
+
}
+
+
# Proxy listener for spindle
+
listener "tcp" {
+
address = "127.0.0.1:8201"
+
tls_disable = true
+
}
+
+
# Enable API proxy with auto-auth token
+
api_proxy {
+
use_auto_auth_token = true
+
}
+
+
# Enable response caching
+
cache {
+
use_auto_auth_token = true
+
}
+
+
# Logging
+
log_level = "info"
+
```
+
+
#### Start the proxy
+
+
Start OpenBao Proxy:
+
+
```bash
+
bao proxy -config=/tmp/openbao/proxy.hcl
+
```
+
+
The proxy will authenticate with OpenBao and start listening on
+
`127.0.0.1:8201`.
+
+
#### Configure spindle
+
+
Set these environment variables for spindle:
+
+
```bash
+
export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
+
export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
+
export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
+
```
+
+
On startup, spindle will now connect to the local proxy,
+
which handles all authentication automatically.
+
+
### Production setup for proxy
+
+
For production, you'll want to run the proxy as a service:
+
+
Place your production configuration in
+
`/etc/openbao/proxy.hcl` with proper TLS settings for the
+
vault connection.
+
+
### Verifying setup
+
+
Test the proxy directly:
+
+
```bash
+
# Check proxy health
+
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
+
+
# Test token lookup through proxy
+
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
+
```
+
+
Test OpenBao operations through the server:
+
+
```bash
+
# List all secrets
+
bao kv list spindle/
+
+
# Add a test secret via the spindle API, then check it exists
+
bao kv list spindle/repos/
+
+
# Get a specific secret
+
bao kv get spindle/repos/your_repo_path/SECRET_NAME
+
```
+
+
### How it works
+
+
- Spindle connects to OpenBao Proxy on localhost (typically
+
port 8200 or 8201)
+
- The proxy authenticates with OpenBao using AppRole
+
credentials
+
- All spindle requests go through the proxy, which injects
+
authentication tokens
+
- Secrets are stored at
+
`spindle/repos/{sanitized_repo_path}/{secret_key}`
+
- Repository paths like `did:plc:alice/myrepo` become
+
`did_plc_alice_myrepo`
+
- The proxy handles all token renewal automatically
+
- Spindle no longer manages tokens or authentication
+
directly
+
+
### Troubleshooting
+
+
**Connection refused**: Check that the OpenBao Proxy is
+
running and listening on the configured address.
+
+
**403 errors**: Verify the AppRole credentials are correct
+
and the policy has the necessary permissions.
+
+
**404 route errors**: The spindle KV mount probably doesn't
+
exist—run the mount creation step again.
+
+
**Proxy authentication failures**: Check the proxy logs and
+
verify the role-id and secret-id files are readable and
+
contain valid credentials.
+
+
**Secret not found after writing**: This can indicate policy
+
permission issues. Verify the policy includes both
+
`spindle/data/*` and `spindle/metadata/*` paths with
+
appropriate capabilities.
+
+
Check proxy logs:
+
+
```bash
+
# If running as systemd service
+
journalctl -u openbao-proxy -f
+
+
# If running directly, check the console output
+
```
+
+
Test AppRole authentication manually:
+
+
```bash
+
bao write auth/approle/login \
+
role_id="$(cat /tmp/openbao/role-id)" \
+
secret_id="$(cat /tmp/openbao/secret-id)"
+
```
+
+
# Migrating knots and spindles
+
+
Sometimes, non-backwards compatible changes are made to the
+
knot/spindle XRPC APIs. If you host a knot or a spindle, you
+
will need to follow this guide to upgrade. Typically, this
+
only requires you to deploy the newest version.
+
+
This document is laid out in reverse-chronological order.
+
Newer migration guides are listed first, and older guides
+
are further down the page.
+
+
## Upgrading from v1.8.x
+
+
After v1.8.2, the HTTP API for knots and spindles has been
+
deprecated and replaced with XRPC. Repositories on outdated
+
knots will not be viewable from the appview. Upgrading is
+
straightforward however.
+
+
For knots:
+
+
- Upgrade to the latest tag (v1.9.0 or above)
+
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
+
hit the "retry" button to verify your knot
+
+
For spindles:
+
+
- Upgrade to the latest tag (v1.9.0 or above)
+
- Head to the [spindle
+
dashboard](https://tangled.org/settings/spindles) and hit the
+
"retry" button to verify your spindle
+
+
## Upgrading from v1.7.x
+
+
After v1.7.0, knot secrets have been deprecated. You no
+
longer need a secret from the appview to run a knot. All
+
authorized commands to knots are managed via [Inter-Service
+
Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
+
Knots will be read-only until upgraded.
+
+
Upgrading is quite easy, in essence:
+
+
- `KNOT_SERVER_SECRET` is no more, you can remove this
+
environment variable entirely
+
- `KNOT_SERVER_OWNER` is now required on boot, set this to
+
your DID. You can find your DID in the
+
[settings](https://tangled.org/settings) page.
+
- Restart your knot once you have replaced the environment
+
variable
+
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
+
hit the "retry" button to verify your knot. This simply
+
writes a `sh.tangled.knot` record to your PDS.
+
+
If you use the nix module, simply bump the flake to the
+
latest revision, and change your config block like so:
+
+
```diff
+
services.tangled.knot = {
+
enable = true;
+
server = {
+
- secretFile = /path/to/secret;
+
+ owner = "did:plc:foo";
+
};
+
};
+
```
+
+
# Hacking on Tangled
+
+
We highly recommend [installing
+
Nix](https://nixos.org/download/) (the package manager)
+
before working on the codebase. The Nix flake provides a lot
+
of helpers to get started and most importantly, builds and
+
dev shells are entirely deterministic.
+
+
To set up your dev environment:
+
+
```bash
+
nix develop
+
```
+
+
Non-Nix users can look at the `devShell` attribute in the
+
`flake.nix` file to determine necessary dependencies.
+
+
## Running the appview
+
+
The Nix flake also exposes a few `app` attributes (run `nix
+
flake show` to see a full list of what the flake provides),
+
one of the apps runs the appview with the `air`
+
live-reloader:
+
+
```bash
+
TANGLED_DEV=true nix run .#watch-appview
+
+
# TANGLED_DB_PATH might be of interest to point to
+
# different sqlite DBs
+
+
# in a separate shell, you can live-reload tailwind
+
nix run .#watch-tailwind
+
```
+
+
To authenticate with the appview, you will need Redis and
+
OAuth JWKs to be set up:
+
+
```
+
# OAuth JWKs should already be set up by the Nix devshell:
+
echo $TANGLED_OAUTH_CLIENT_SECRET
+
z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
+
+
echo $TANGLED_OAUTH_CLIENT_KID
+
1761667908
+
+
# if not, you can set it up yourself:
+
goat key generate -t P-256
+
Key Type: P-256 / secp256r1 / ES256 private key
+
Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
+
z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
+
Public Key (DID Key Syntax): share or publish this (eg, in DID document)
+
did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
+
+
# the secret key from above
+
export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
+
+
# Run Redis in a new shell to store OAuth sessions
+
redis-server
+
```
+
+
## Running knots and spindles
+
+
An end-to-end knot setup requires setting up a machine with
+
`sshd`, `AuthorizedKeysCommand`, and a Git user, which is
+
quite cumbersome. So the Nix flake provides a
+
`nixosConfiguration` to do so.
+
+
<details>
+
<summary><strong>macOS users will have to set up a Nix Builder first</strong></summary>
+
+
In order to build Tangled's dev VM on macOS, you will
+
first need to set up a Linux Nix builder. The recommended
+
way to do so is to run a [`darwin.linux-builder`
+
VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
+
and to register it in `nix.conf` as a builder for Linux
+
with the same architecture as your Mac (`linux-aarch64` if
+
you are using Apple Silicon).
+
+
> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
+
> the Tangled repo so that it doesn't conflict with the other VM. For example,
+
> you can do
+
>
+
> ```shell
+
> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
+
> ```
+
>
+
> to store the builder VM in a temporary dir.
+
>
+
> You should read and follow [all the other intructions][darwin builder vm] to
+
> avoid subtle problems.
+
+
Alternatively, you can use any other method to set up a
+
Linux machine with Nix installed that you can `sudo ssh`
+
into (in other words, root user on your Mac has to be able
+
to ssh into the Linux machine without entering a password)
+
and that has the same architecture as your Mac. See
+
[remote builder
+
instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
+
for how to register such a builder in `nix.conf`.
+
+
> WARNING: If you'd like to use
+
> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
+
> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
+
> ssh` works can be tricky. It seems to be [possible with
+
> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
+
+
</details>
+
+
To begin, grab your DID from http://localhost:3000/settings.
+
Then, set `TANGLED_VM_KNOT_OWNER` and
+
`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
+
lightweight NixOS VM like so:
+
+
```bash
+
nix run --impure .#vm
+
+
# type `poweroff` at the shell to exit the VM
+
```
+
+
This starts a knot on port 6444, a spindle on port 6555
+
with `ssh` exposed on port 2222.
+
+
Once the services are running, head to
+
http://localhost:3000/settings/knots and hit "Verify". It should
+
verify the ownership of the services instantly if everything
+
went smoothly.
+
+
You can push repositories to this VM with this ssh config
+
block on your main machine:
+
+
```bash
+
Host nixos-shell
+
Hostname localhost
+
Port 2222
+
User git
+
IdentityFile ~/.ssh/my_tangled_key
+
```
+
+
Set up a remote called `local-dev` on a git repo:
+
+
```bash
+
git remote add local-dev git@nixos-shell:user/repo
+
git push local-dev main
+
```
+
+
The above VM should already be running a spindle on
+
`localhost:6555`. Head to http://localhost:3000/settings/spindles and
+
hit "Verify". You can then configure each repository to use
+
this spindle and run CI jobs.
+
+
Of interest when debugging spindles:
+
+
```
+
# Service logs from journald:
+
journalctl -xeu spindle
+
+
# CI job logs from disk:
+
ls /var/log/spindle
+
+
# Debugging spindle database:
+
sqlite3 /var/lib/spindle/spindle.db
+
+
# litecli has a nicer REPL interface:
+
litecli /var/lib/spindle/spindle.db
+
```
+
+
If for any reason you wish to disable either one of the
+
services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
+
`services.tangled.spindle.enable` (or
+
`services.tangled.knot.enable`) to `false`.
+
+
# Contribution guide
+
+
## Commit guidelines
+
+
We follow a commit style similar to the Go project. Please keep commits:
+
+
* **atomic**: each commit should represent one logical change
+
* **descriptive**: the commit message should clearly describe what the
+
change does and why it's needed
+
+
### Message format
+
+
```
+
<service/top-level directory>/<affected package/directory>: <short summary of change>
+
+
Optional longer description can go here, if necessary. Explain what the
+
change does and why, especially if not obvious. Reference relevant
+
issues or PRs when applicable. These can be links for now since we don't
+
auto-link issues/PRs yet.
+
```
+
+
Here are some examples:
+
+
```
+
appview/state: fix token expiry check in middleware
+
+
The previous check did not account for clock drift, leading to premature
+
token invalidation.
+
```
+
+
```
+
knotserver/git/service: improve error checking in upload-pack
+
```
+
+
+
### General notes
+
+
- PRs get merged "as-is" (fast-forward)—like applying a patch-series
+
using `git am`. At present, there is no squashing—so please author
+
your commits as they would appear on `master`, following the above
+
guidelines.
+
- If there is a lot of nesting, for example "appview:
+
pages/templates/repo/fragments: ...", these can be truncated down to
+
just "appview: repo/fragments: ...". If the change affects a lot of
+
subdirectories, you may abbreviate to just the top-level names, e.g.
+
"appview: ..." or "knotserver: ...".
+
- Keep commits lowercased with no trailing period.
+
- Use the imperative mood in the summary line (e.g., "fix bug" not
+
"fixed bug" or "fixes bug").
+
- Try to keep the summary line under 72 characters, but we aren't too
+
fussed about this.
+
- Follow the same formatting for PR titles if filled manually.
+
- Don't include unrelated changes in the same commit.
+
- Avoid noisy commit messages like "wip" or "final fix"—rewrite history
+
before submitting if necessary.
+
+
## Code formatting
+
+
We use a variety of tools to format our code, and multiplex them with
+
[`treefmt`](https://treefmt.com). All you need to do to format your changes
+
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
+
+
## Proposals for bigger changes
+
+
Small fixes like typos, minor bugs, or trivial refactors can be
+
submitted directly as PRs.
+
+
For larger changes—especially those introducing new features, significant
+
refactoring, or altering system behavior—please open a proposal first. This
+
helps us evaluate the scope, design, and potential impact before implementation.
+
+
Create a new issue titled:
+
+
```
+
proposal: <affected scope>: <summary of change>
+
```
+
+
In the description, explain:
+
+
- What the change is
+
- Why it's needed
+
- How you plan to implement it (roughly)
+
- Any open questions or tradeoffs
+
+
We'll use the issue thread to discuss and refine the idea before moving
+
forward.
+
+
## Developer Certificate of Origin (DCO)
+
+
We require all contributors to certify that they have the right to
+
submit the code they're contributing. To do this, we follow the
+
[Developer Certificate of Origin
+
(DCO)](https://developercertificate.org/).
+
+
By signing your commits, you're stating that the contribution is your
+
own work, or that you have the right to submit it under the project's
+
license. This helps us keep things clean and legally sound.
+
+
To sign your commit, just add the `-s` flag when committing:
+
+
```sh
+
git commit -s -m "your commit message"
+
```
+
+
This appends a line like:
+
+
```
+
Signed-off-by: Your Name <your.email@example.com>
+
```
+
+
We won't merge commits if they aren't signed off. If you forget, you can
+
amend the last commit like this:
+
+
```sh
+
git commit --amend -s
+
```
+
+
If you're submitting a PR with multiple commits, make sure each one is
+
signed.
+
+
For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
+
to make it sign off commits in the tangled repo:
+
+
```shell
+
# Safety check, should say "No matching config key..."
+
jj config list templates.commit_trailers
+
# The command below may need to be adjusted if the command above returned something.
+
jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
+
```
+
+
Refer to the [jujutsu
+
documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
+
for more information.
-136
docs/contributing.md
···
-
# tangled contributing guide
-
-
## commit guidelines
-
-
We follow a commit style similar to the Go project. Please keep commits:
-
-
* **atomic**: each commit should represent one logical change
-
* **descriptive**: the commit message should clearly describe what the
-
change does and why it's needed
-
-
### message format
-
-
```
-
<service/top-level directory>/<affected package/directory>: <short summary of change>
-
-
-
Optional longer description can go here, if necessary. Explain what the
-
change does and why, especially if not obvious. Reference relevant
-
issues or PRs when applicable. These can be links for now since we don't
-
auto-link issues/PRs yet.
-
```
-
-
Here are some examples:
-
-
```
-
appview/state: fix token expiry check in middleware
-
-
The previous check did not account for clock drift, leading to premature
-
token invalidation.
-
```
-
-
```
-
knotserver/git/service: improve error checking in upload-pack
-
```
-
-
-
### general notes
-
-
- PRs get merged "as-is" (fast-forward) -- like applying a patch-series
-
using `git am`. At present, there is no squashing -- so please author
-
your commits as they would appear on `master`, following the above
-
guidelines.
-
- If there is a lot of nesting, for example "appview:
-
pages/templates/repo/fragments: ...", these can be truncated down to
-
just "appview: repo/fragments: ...". If the change affects a lot of
-
subdirectories, you may abbreviate to just the top-level names, e.g.
-
"appview: ..." or "knotserver: ...".
-
- Keep commits lowercased with no trailing period.
-
- Use the imperative mood in the summary line (e.g., "fix bug" not
-
"fixed bug" or "fixes bug").
-
- Try to keep the summary line under 72 characters, but we aren't too
-
fussed about this.
-
- Follow the same formatting for PR titles if filled manually.
-
- Don't include unrelated changes in the same commit.
-
- Avoid noisy commit messages like "wip" or "final fix"—rewrite history
-
before submitting if necessary.
-
-
## code formatting
-
-
We use a variety of tools to format our code, and multiplex them with
-
[`treefmt`](https://treefmt.com): all you need to do to format your changes
-
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
-
-
## proposals for bigger changes
-
-
Small fixes like typos, minor bugs, or trivial refactors can be
-
submitted directly as PRs.
-
-
For larger changes—especially those introducing new features, significant
-
refactoring, or altering system behavior—please open a proposal first. This
-
helps us evaluate the scope, design, and potential impact before implementation.
-
-
### proposal format
-
-
Create a new issue titled:
-
-
```
-
proposal: <affected scope>: <summary of change>
-
```
-
-
In the description, explain:
-
-
- What the change is
-
- Why it's needed
-
- How you plan to implement it (roughly)
-
- Any open questions or tradeoffs
-
-
We'll use the issue thread to discuss and refine the idea before moving
-
forward.
-
-
## developer certificate of origin (DCO)
-
-
We require all contributors to certify that they have the right to
-
submit the code they're contributing. To do this, we follow the
-
[Developer Certificate of Origin
-
(DCO)](https://developercertificate.org/).
-
-
By signing your commits, you're stating that the contribution is your
-
own work, or that you have the right to submit it under the project's
-
license. This helps us keep things clean and legally sound.
-
-
To sign your commit, just add the `-s` flag when committing:
-
-
```sh
-
git commit -s -m "your commit message"
-
```
-
-
This appends a line like:
-
-
```
-
Signed-off-by: Your Name <your.email@example.com>
-
```
-
-
We won't merge commits if they aren't signed off. If you forget, you can
-
amend the last commit like this:
-
-
```sh
-
git commit --amend -s
-
```
-
-
If you're submitting a PR with multiple commits, make sure each one is
-
signed.
-
-
For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
-
to make it sign off commits in the tangled repo:
-
-
```shell
-
# Safety check, should say "No matching config key..."
-
jj config list templates.commit_trailers
-
# The command below may need to be adjusted if the command above returned something.
-
jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
-
```
-
-
Refer to the [jj
-
documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
-
for more information.
-172
docs/hacking.md
···
-
# hacking on tangled
-
-
We highly recommend [installing
-
nix](https://nixos.org/download/) (the package manager)
-
before working on the codebase. The nix flake provides a lot
-
of helpers to get started and most importantly, builds and
-
dev shells are entirely deterministic.
-
-
To set up your dev environment:
-
-
```bash
-
nix develop
-
```
-
-
Non-nix users can look at the `devShell` attribute in the
-
`flake.nix` file to determine necessary dependencies.
-
-
## running the appview
-
-
The nix flake also exposes a few `app` attributes (run `nix
-
flake show` to see a full list of what the flake provides),
-
one of the apps runs the appview with the `air`
-
live-reloader:
-
-
```bash
-
TANGLED_DEV=true nix run .#watch-appview
-
-
# TANGLED_DB_PATH might be of interest to point to
-
# different sqlite DBs
-
-
# in a separate shell, you can live-reload tailwind
-
nix run .#watch-tailwind
-
```
-
-
To authenticate with the appview, you will need redis and
-
OAUTH JWKs to be setup:
-
-
```
-
# oauth jwks should already be setup by the nix devshell:
-
echo $TANGLED_OAUTH_CLIENT_SECRET
-
z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
-
-
echo $TANGLED_OAUTH_CLIENT_KID
-
1761667908
-
-
# if not, you can set it up yourself:
-
goat key generate -t P-256
-
Key Type: P-256 / secp256r1 / ES256 private key
-
Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
-
z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
-
Public Key (DID Key Syntax): share or publish this (eg, in DID document)
-
did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
-
-
# the secret key from above
-
export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
-
-
# run redis in at a new shell to store oauth sessions
-
redis-server
-
```
-
-
## running knots and spindles
-
-
An end-to-end knot setup requires setting up a machine with
-
`sshd`, `AuthorizedKeysCommand`, and git user, which is
-
quite cumbersome. So the nix flake provides a
-
`nixosConfiguration` to do so.
-
-
<details>
-
<summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
-
-
In order to build Tangled's dev VM on macOS, you will
-
first need to set up a Linux Nix builder. The recommended
-
way to do so is to run a [`darwin.linux-builder`
-
VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
-
and to register it in `nix.conf` as a builder for Linux
-
with the same architecture as your Mac (`linux-aarch64` if
-
you are using Apple Silicon).
-
-
> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
-
> the tangled repo so that it doesn't conflict with the other VM. For example,
-
> you can do
-
>
-
> ```shell
-
> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
-
> ```
-
>
-
> to store the builder VM in a temporary dir.
-
>
-
> You should read and follow [all the other intructions][darwin builder vm] to
-
> avoid subtle problems.
-
-
Alternatively, you can use any other method to set up a
-
Linux machine with `nix` installed that you can `sudo ssh`
-
into (in other words, root user on your Mac has to be able
-
to ssh into the Linux machine without entering a password)
-
and that has the same architecture as your Mac. See
-
[remote builder
-
instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
-
for how to register such a builder in `nix.conf`.
-
-
> WARNING: If you'd like to use
-
> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
-
> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
-
> ssh` works can be tricky. It seems to be [possible with
-
> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
-
-
</details>
-
-
To begin, grab your DID from http://localhost:3000/settings.
-
Then, set `TANGLED_VM_KNOT_OWNER` and
-
`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
-
lightweight NixOS VM like so:
-
-
```bash
-
nix run --impure .#vm
-
-
# type `poweroff` at the shell to exit the VM
-
```
-
-
This starts a knot on port 6444, a spindle on port 6555
-
with `ssh` exposed on port 2222.
-
-
Once the services are running, head to
-
http://localhost:3000/settings/knots and hit verify. It should
-
verify the ownership of the services instantly if everything
-
went smoothly.
-
-
You can push repositories to this VM with this ssh config
-
block on your main machine:
-
-
```bash
-
Host nixos-shell
-
Hostname localhost
-
Port 2222
-
User git
-
IdentityFile ~/.ssh/my_tangled_key
-
```
-
-
Set up a remote called `local-dev` on a git repo:
-
-
```bash
-
git remote add local-dev git@nixos-shell:user/repo
-
git push local-dev main
-
```
-
-
### running a spindle
-
-
The above VM should already be running a spindle on
-
`localhost:6555`. Head to http://localhost:3000/settings/spindles and
-
hit verify. You can then configure each repository to use
-
this spindle and run CI jobs.
-
-
Of interest when debugging spindles:
-
-
```
-
# service logs from journald:
-
journalctl -xeu spindle
-
-
# CI job logs from disk:
-
ls /var/log/spindle
-
-
# debugging spindle db:
-
sqlite3 /var/lib/spindle/spindle.db
-
-
# litecli has a nicer REPL interface:
-
litecli /var/lib/spindle/spindle.db
-
```
-
-
If for any reason you wish to disable either one of the
-
services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
-
`services.tangled.spindle.enable` (or
-
`services.tangled.knot.enable`) to `false`.
+93
docs/highlight.theme
···
+
{
+
"text-color": null,
+
"background-color": null,
+
"line-number-color": null,
+
"line-number-background-color": null,
+
"text-styles": {
+
"Annotation": {
+
"text-color": null,
+
"background-color": null,
+
"bold": false,
+
"italic": true,
+
"underline": false
+
},
+
"ControlFlow": {
+
"text-color": null,
+
"background-color": null,
+
"bold": true,
+
"italic": false,
+
"underline": false
+
},
+
"Error": {
+
"text-color": null,
+
"background-color": null,
+
"bold": true,
+
"italic": false,
+
"underline": false
+
},
+
"Alert": {
+
"text-color": null,
+
"background-color": null,
+
"bold": true,
+
"italic": false,
+
"underline": false
+
},
+
"Preprocessor": {
+
"text-color": null,
+
"background-color": null,
+
"bold": true,
+
"italic": false,
+
"underline": false
+
},
+
"Information": {
+
"text-color": null,
+
"background-color": null,
+
"bold": false,
+
"italic": true,
+
"underline": false
+
},
+
"Warning": {
+
"text-color": null,
+
"background-color": null,
+
"bold": false,
+
"italic": true,
+
"underline": false
+
},
+
"Documentation": {
+
"text-color": null,
+
"background-color": null,
+
"bold": false,
+
"italic": true,
+
"underline": false
+
},
+
"DataType": {
+
"text-color": "#8f4e8b",
+
"background-color": null,
+
"bold": false,
+
"italic": false,
+
"underline": false
+
},
+
"Comment": {
+
"text-color": null,
+
"background-color": null,
+
"bold": false,
+
"italic": true,
+
"underline": false
+
},
+
"CommentVar": {
+
"text-color": null,
+
"background-color": null,
+
"bold": false,
+
"italic": true,
+
"underline": false
+
},
+
"Keyword": {
+
"text-color": null,
+
"background-color": null,
+
"bold": true,
+
"italic": false,
+
"underline": false
+
}
+
}
+
}
+
-214
docs/knot-hosting.md
···
-
# knot self-hosting guide
-
-
So you want to run your own knot server? Great! Here are a few prerequisites:
-
-
1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
-
2. A (sub)domain name. People generally use `knot.example.com`.
-
3. A valid SSL certificate for your domain.
-
-
There's a couple of ways to get started:
-
* NixOS: refer to
-
[flake.nix](https://tangled.sh/@tangled.sh/core/blob/master/flake.nix)
-
* Docker: Documented at
-
[@tangled.sh/knot-docker](https://tangled.sh/@tangled.sh/knot-docker)
-
(community maintained: support is not guaranteed!)
-
* Manual: Documented below.
-
-
## manual setup
-
-
First, clone this repository:
-
-
```
-
git clone https://tangled.org/@tangled.org/core
-
```
-
-
Then, build the `knot` CLI. This is the knot administration and operation tool.
-
For the purpose of this guide, we're only concerned with these subcommands:
-
-
* `knot server`: the main knot server process, typically run as a
-
supervised service
-
* `knot guard`: handles role-based access control for git over SSH
-
(you'll never have to run this yourself)
-
* `knot keys`: fetches SSH keys associated with your knot; we'll use
-
this to generate the SSH `AuthorizedKeysCommand`
-
-
```
-
cd core
-
export CGO_ENABLED=1
-
go build -o knot ./cmd/knot
-
```
-
-
Next, move the `knot` binary to a location owned by `root` --
-
`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
-
-
```
-
sudo mv knot /usr/local/bin/knot
-
sudo chown root:root /usr/local/bin/knot
-
```
-
-
This is necessary because SSH `AuthorizedKeysCommand` requires [really
-
specific permissions](https://stackoverflow.com/a/27638306). The
-
`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
-
retrieve a user's public SSH keys dynamically for authentication. Let's
-
set that up.
-
-
```
-
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
-
Match User git
-
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
-
AuthorizedKeysCommandUser nobody
-
EOF
-
```
-
-
Then, reload `sshd`:
-
-
```
-
sudo systemctl reload ssh
-
```
-
-
Next, create the `git` user. We'll use the `git` user's home directory
-
to store repositories:
-
-
```
-
sudo adduser git
-
```
-
-
Create `/home/git/.knot.env` with the following, updating the values as
-
necessary. The `KNOT_SERVER_OWNER` should be set to your
-
DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
-
-
```
-
KNOT_REPO_SCAN_PATH=/home/git
-
KNOT_SERVER_HOSTNAME=knot.example.com
-
APPVIEW_ENDPOINT=https://tangled.sh
-
KNOT_SERVER_OWNER=did:plc:foobar
-
KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
-
KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
-
```
-
-
If you run a Linux distribution that uses systemd, you can use the provided
-
service file to run the server. Copy
-
[`knotserver.service`](/systemd/knotserver.service)
-
to `/etc/systemd/system/`. Then, run:
-
-
```
-
systemctl enable knotserver
-
systemctl start knotserver
-
```
-
-
The last step is to configure a reverse proxy like Nginx or Caddy to front your
-
knot. Here's an example configuration for Nginx:
-
-
```
-
server {
-
listen 80;
-
listen [::]:80;
-
server_name knot.example.com;
-
-
location / {
-
proxy_pass http://localhost:5555;
-
proxy_set_header Host $host;
-
proxy_set_header X-Real-IP $remote_addr;
-
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
-
proxy_set_header X-Forwarded-Proto $scheme;
-
}
-
-
# wss endpoint for git events
-
location /events {
-
proxy_set_header X-Forwarded-For $remote_addr;
-
proxy_set_header Host $http_host;
-
proxy_set_header Upgrade websocket;
-
proxy_set_header Connection Upgrade;
-
proxy_pass http://localhost:5555;
-
}
-
# additional config for SSL/TLS go here.
-
}
-
-
```
-
-
Remember to use Let's Encrypt or similar to procure a certificate for your
-
knot domain.
-
-
You should now have a running knot server! You can finalize
-
your registration by hitting the `verify` button on the
-
[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
-
a record on your PDS to announce the existence of the knot.
-
-
### custom paths
-
-
(This section applies to manual setup only. Docker users should edit the mounts
-
in `docker-compose.yml` instead.)
-
-
Right now, the database and repositories of your knot lives in `/home/git`. You
-
can move these paths if you'd like to store them in another folder. Be careful
-
when adjusting these paths:
-
-
* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
-
any possible side effects. Remember to restart it once you're done.
-
* Make backups before moving in case something goes wrong.
-
* Make sure the `git` user can read and write from the new paths.
-
-
#### database
-
-
As an example, let's say the current database is at `/home/git/knotserver.db`,
-
and we want to move it to `/home/git/database/knotserver.db`.
-
-
Copy the current database to the new location. Make sure to copy the `.db-shm`
-
and `.db-wal` files if they exist.
-
-
```
-
mkdir /home/git/database
-
cp /home/git/knotserver.db* /home/git/database
-
```
-
-
In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
-
the new file path (_not_ the directory):
-
-
```
-
KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
-
```
-
-
#### repositories
-
-
As an example, let's say the repositories are currently in `/home/git`, and we
-
want to move them into `/home/git/repositories`.
-
-
Create the new folder, then move the existing repositories (if there are any):
-
-
```
-
mkdir /home/git/repositories
-
# move all DIDs into the new folder; these will vary for you!
-
mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
-
```
-
-
In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
-
to the new directory:
-
-
```
-
KNOT_REPO_SCAN_PATH=/home/git/repositories
-
```
-
-
Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
-
repository path:
-
-
```
-
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
-
Match User git
-
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
-
AuthorizedKeysCommandUser nobody
-
EOF
-
```
-
-
Make sure to restart your SSH server!
-
-
#### MOTD (message of the day)
-
-
To configure the MOTD used ("Welcome to this knot!" by default), edit the
-
`/home/git/motd` file:
-
-
```
-
printf "Hi from this knot!\n" > /home/git/motd
-
```
-
-
Note that you should add a newline at the end if setting a non-empty message
-
since the knot won't do this for you.
-59
docs/migrations.md
···
-
# Migrations
-
-
This document is laid out in reverse-chronological order.
-
Newer migration guides are listed first, and older guides
-
are further down the page.
-
-
## Upgrading from v1.8.x
-
-
After v1.8.2, the HTTP API for knot and spindles have been
-
deprecated and replaced with XRPC. Repositories on outdated
-
knots will not be viewable from the appview. Upgrading is
-
straightforward however.
-
-
For knots:
-
-
- Upgrade to latest tag (v1.9.0 or above)
-
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
-
hit the "retry" button to verify your knot
-
-
For spindles:
-
-
- Upgrade to latest tag (v1.9.0 or above)
-
- Head to the [spindle
-
dashboard](https://tangled.org/settings/spindles) and hit the
-
"retry" button to verify your spindle
-
-
## Upgrading from v1.7.x
-
-
After v1.7.0, knot secrets have been deprecated. You no
-
longer need a secret from the appview to run a knot. All
-
authorized commands to knots are managed via [Inter-Service
-
Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
-
Knots will be read-only until upgraded.
-
-
Upgrading is quite easy, in essence:
-
-
- `KNOT_SERVER_SECRET` is no more, you can remove this
-
environment variable entirely
-
- `KNOT_SERVER_OWNER` is now required on boot, set this to
-
your DID. You can find your DID in the
-
[settings](https://tangled.org/settings) page.
-
- Restart your knot once you have replaced the environment
-
variable
-
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
-
hit the "retry" button to verify your knot. This simply
-
writes a `sh.tangled.knot` record to your PDS.
-
-
If you use the nix module, simply bump the flake to the
-
latest revision, and change your config block like so:
-
-
```diff
-
services.tangled.knot = {
-
enable = true;
-
server = {
-
- secretFile = /path/to/secret;
-
+ owner = "did:plc:foo";
-
};
-
};
-
```
-25
docs/spindle/architecture.md
···
-
# spindle architecture
-
-
Spindle is a small CI runner service. Here's a high level overview of how it operates:
-
-
* listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
-
[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
-
* when a new repo record comes through (typically when you add a spindle to a
-
repo from the settings), spindle then resolves the underlying knot and
-
subscribes to repo events (see:
-
[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
-
* the spindle engine then handles execution of the pipeline, with results and
-
logs beamed on the spindle event stream over wss
-
-
### the engine
-
-
At present, the only supported backend is Docker (and Podman, if Docker
-
compatibility is enabled, so that `/run/docker.sock` is created). Spindle
-
executes each step in the pipeline in a fresh container, with state persisted
-
across steps within the `/tangled/workspace` directory.
-
-
The base image for the container is constructed on the fly using
-
[Nixery](https://nixery.dev), which is handy for caching layers for frequently
-
used packages.
-
-
The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
-52
docs/spindle/hosting.md
···
-
# spindle self-hosting guide
-
-
## prerequisites
-
-
* Go
-
* Docker (the only supported backend currently)
-
-
## configuration
-
-
Spindle is configured using environment variables. The following environment variables are available:
-
-
* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
-
* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
-
* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
-
* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
-
* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
-
* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
-
* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
-
* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
-
* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
-
-
## running spindle
-
-
1. **Set the environment variables.** For example:
-
-
```shell
-
export SPINDLE_SERVER_HOSTNAME="your-hostname"
-
export SPINDLE_SERVER_OWNER="your-did"
-
```
-
-
2. **Build the Spindle binary.**
-
-
```shell
-
cd core
-
go mod download
-
go build -o cmd/spindle/spindle cmd/spindle/main.go
-
```
-
-
3. **Create the log directory.**
-
-
```shell
-
sudo mkdir -p /var/log/spindle
-
sudo chown $USER:$USER -R /var/log/spindle
-
```
-
-
4. **Run the Spindle binary.**
-
-
```shell
-
./cmd/spindle/spindle
-
```
-
-
Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
-285
docs/spindle/openbao.md
···
-
# spindle secrets with openbao
-
-
This document covers setting up Spindle to use OpenBao for secrets
-
management via OpenBao Proxy instead of the default SQLite backend.
-
-
## overview
-
-
Spindle now uses OpenBao Proxy for secrets management. The proxy handles
-
authentication automatically using AppRole credentials, while Spindle
-
connects to the local proxy instead of directly to the OpenBao server.
-
-
This approach provides better security, automatic token renewal, and
-
simplified application code.
-
-
## installation
-
-
Install OpenBao from nixpkgs:
-
-
```bash
-
nix shell nixpkgs#openbao # for a local server
-
```
-
-
## setup
-
-
The setup process can is documented for both local development and production.
-
-
### local development
-
-
Start OpenBao in dev mode:
-
-
```bash
-
bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
-
```
-
-
This starts OpenBao on `http://localhost:8201` with a root token.
-
-
Set up environment for bao CLI:
-
-
```bash
-
export BAO_ADDR=http://localhost:8200
-
export BAO_TOKEN=root
-
```
-
-
### production
-
-
You would typically use a systemd service with a configuration file. Refer to
-
[@tangled.org/infra](https://tangled.org/@tangled.org/infra) for how this can be
-
achieved using Nix.
-
-
Then, initialize the bao server:
-
```bash
-
bao operator init -key-shares=1 -key-threshold=1
-
```
-
-
This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up:
-
```bash
-
bao operator unseal <unseal_key>
-
```
-
-
All steps below remain the same across both dev and production setups.
-
-
### configure openbao server
-
-
Create the spindle KV mount:
-
-
```bash
-
bao secrets enable -path=spindle -version=2 kv
-
```
-
-
Set up AppRole authentication and policy:
-
-
Create a policy file `spindle-policy.hcl`:
-
-
```hcl
-
# Full access to spindle KV v2 data
-
path "spindle/data/*" {
-
capabilities = ["create", "read", "update", "delete"]
-
}
-
-
# Access to metadata for listing and management
-
path "spindle/metadata/*" {
-
capabilities = ["list", "read", "delete", "update"]
-
}
-
-
# Allow listing at root level
-
path "spindle/" {
-
capabilities = ["list"]
-
}
-
-
# Required for connection testing and health checks
-
path "auth/token/lookup-self" {
-
capabilities = ["read"]
-
}
-
```
-
-
Apply the policy and create an AppRole:
-
-
```bash
-
bao policy write spindle-policy spindle-policy.hcl
-
bao auth enable approle
-
bao write auth/approle/role/spindle \
-
token_policies="spindle-policy" \
-
token_ttl=1h \
-
token_max_ttl=4h \
-
bind_secret_id=true \
-
secret_id_ttl=0 \
-
secret_id_num_uses=0
-
```
-
-
Get the credentials:
-
-
```bash
-
# Get role ID (static)
-
ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
-
-
# Generate secret ID
-
SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
-
-
echo "Role ID: $ROLE_ID"
-
echo "Secret ID: $SECRET_ID"
-
```
-
-
### create proxy configuration
-
-
Create the credential files:
-
-
```bash
-
# Create directory for OpenBao files
-
mkdir -p /tmp/openbao
-
-
# Save credentials
-
echo "$ROLE_ID" > /tmp/openbao/role-id
-
echo "$SECRET_ID" > /tmp/openbao/secret-id
-
chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
-
```
-
-
Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
-
-
```hcl
-
# OpenBao server connection
-
vault {
-
address = "http://localhost:8200"
-
}
-
-
# Auto-Auth using AppRole
-
auto_auth {
-
method "approle" {
-
mount_path = "auth/approle"
-
config = {
-
role_id_file_path = "/tmp/openbao/role-id"
-
secret_id_file_path = "/tmp/openbao/secret-id"
-
}
-
}
-
-
# Optional: write token to file for debugging
-
sink "file" {
-
config = {
-
path = "/tmp/openbao/token"
-
mode = 0640
-
}
-
}
-
}
-
-
# Proxy listener for Spindle
-
listener "tcp" {
-
address = "127.0.0.1:8201"
-
tls_disable = true
-
}
-
-
# Enable API proxy with auto-auth token
-
api_proxy {
-
use_auto_auth_token = true
-
}
-
-
# Enable response caching
-
cache {
-
use_auto_auth_token = true
-
}
-
-
# Logging
-
log_level = "info"
-
```
-
-
### start the proxy
-
-
Start OpenBao Proxy:
-
-
```bash
-
bao proxy -config=/tmp/openbao/proxy.hcl
-
```
-
-
The proxy will authenticate with OpenBao and start listening on
-
`127.0.0.1:8201`.
-
-
### configure spindle
-
-
Set these environment variables for Spindle:
-
-
```bash
-
export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
-
export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
-
export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
-
```
-
-
Start Spindle:
-
-
Spindle will now connect to the local proxy, which handles all
-
authentication automatically.
-
-
## production setup for proxy
-
-
For production, you'll want to run the proxy as a service:
-
-
Place your production configuration in `/etc/openbao/proxy.hcl` with
-
proper TLS settings for the vault connection.
-
-
## verifying setup
-
-
Test the proxy directly:
-
-
```bash
-
# Check proxy health
-
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
-
-
# Test token lookup through proxy
-
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
-
```
-
-
Test OpenBao operations through the server:
-
-
```bash
-
# List all secrets
-
bao kv list spindle/
-
-
# Add a test secret via Spindle API, then check it exists
-
bao kv list spindle/repos/
-
-
# Get a specific secret
-
bao kv get spindle/repos/your_repo_path/SECRET_NAME
-
```
-
-
## how it works
-
-
- Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201)
-
- The proxy authenticates with OpenBao using AppRole credentials
-
- All Spindle requests go through the proxy, which injects authentication tokens
-
- Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}`
-
- Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo`
-
- The proxy handles all token renewal automatically
-
- Spindle no longer manages tokens or authentication directly
-
-
## troubleshooting
-
-
**Connection refused**: Check that the OpenBao Proxy is running and
-
listening on the configured address.
-
-
**403 errors**: Verify the AppRole credentials are correct and the policy
-
has the necessary permissions.
-
-
**404 route errors**: The spindle KV mount probably doesn't exist - run
-
the mount creation step again.
-
-
**Proxy authentication failures**: Check the proxy logs and verify the
-
role-id and secret-id files are readable and contain valid credentials.
-
-
**Secret not found after writing**: This can indicate policy permission
-
issues. Verify the policy includes both `spindle/data/*` and
-
`spindle/metadata/*` paths with appropriate capabilities.
-
-
Check proxy logs:
-
-
```bash
-
# If running as systemd service
-
journalctl -u openbao-proxy -f
-
-
# If running directly, check the console output
-
```
-
-
Test AppRole authentication manually:
-
-
```bash
-
bao write auth/approle/login \
-
role_id="$(cat /tmp/openbao/role-id)" \
-
secret_id="$(cat /tmp/openbao/secret-id)"
-
```
-183
docs/spindle/pipeline.md
···
-
# spindle pipelines
-
-
Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML.
-
-
The fields are:
-
-
- [Trigger](#trigger): A **required** field that defines when a workflow should be triggered.
-
- [Engine](#engine): A **required** field that defines which engine a workflow should run on.
-
- [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned.
-
- [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need.
-
- [Environment](#environment): An **optional** field that allows you to define environment variables.
-
- [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow.
-
-
## Trigger
-
-
The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields:
-
-
- `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values:
-
- `push`: The workflow should run every time a commit is pushed to the repository.
-
- `pull_request`: The workflow should run every time a pull request is made or updated.
-
- `manual`: The workflow can be triggered manually.
-
- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
-
- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
-
-
For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
-
-
```yaml
-
when:
-
- event: ["push", "manual"]
-
branch: ["main", "develop"]
-
- event: ["pull_request"]
-
branch: ["main"]
-
```
-
-
You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
-
-
```yaml
-
when:
-
- event: ["push"]
-
tag: ["v*"]
-
```
-
-
You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
-
-
```yaml
-
when:
-
- event: ["push"]
-
branch: ["main", "release-*"]
-
tag: ["v*", "stable"]
-
```
-
-
## Engine
-
-
Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are:
-
-
- `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there.
-
-
Example:
-
-
```yaml
-
engine: "nixery"
-
```
-
-
## Clone options
-
-
When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields:
-
-
- `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default.
-
- `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow.
-
- `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default.
-
-
The default settings are:
-
-
```yaml
-
clone:
-
skip: false
-
depth: 1
-
submodules: false
-
```
-
-
## Dependencies
-
-
Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch.
-
-
Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so:
-
-
```yaml
-
dependencies:
-
# nixpkgs
-
nixpkgs:
-
- nodejs
-
- go
-
# custom registry
-
git+https://tangled.org/@example.com/my_pkg:
-
- my_pkg
-
```
-
-
Now these dependencies are available to use in your workflow!
-
-
## Environment
-
-
The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
-
-
Example:
-
-
```yaml
-
environment:
-
GOOS: "linux"
-
GOARCH: "arm64"
-
NODE_ENV: "production"
-
MY_ENV_VAR: "MY_ENV_VALUE"
-
```
-
-
## Steps
-
-
The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields:
-
-
- `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing.
-
- `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here.
-
- `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
-
-
Example:
-
-
```yaml
-
steps:
-
- name: "Build backend"
-
command: "go build"
-
environment:
-
GOOS: "darwin"
-
GOARCH: "arm64"
-
- name: "Build frontend"
-
command: "npm run build"
-
environment:
-
NODE_ENV: "production"
-
```
-
-
## Complete workflow
-
-
```yaml
-
# .tangled/workflows/build.yml
-
-
when:
-
- event: ["push", "manual"]
-
branch: ["main", "develop"]
-
- event: ["pull_request"]
-
branch: ["main"]
-
-
engine: "nixery"
-
-
# using the default values
-
clone:
-
skip: false
-
depth: 1
-
submodules: false
-
-
dependencies:
-
# nixpkgs
-
nixpkgs:
-
- nodejs
-
- go
-
# custom registry
-
git+https://tangled.org/@example.com/my_pkg:
-
- my_pkg
-
-
environment:
-
GOOS: "linux"
-
GOARCH: "arm64"
-
NODE_ENV: "production"
-
MY_ENV_VAR: "MY_ENV_VALUE"
-
-
steps:
-
- name: "Build backend"
-
command: "go build"
-
environment:
-
GOOS: "darwin"
-
GOARCH: "arm64"
-
- name: "Build frontend"
-
command: "npm run build"
-
environment:
-
NODE_ENV: "production"
-
```
-
-
If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
+101
docs/styles.css
···
+
svg {
+
width: 16px;
+
height: 16px;
+
}
+
+
:root {
+
--syntax-alert: #d20f39;
+
--syntax-annotation: #fe640b;
+
--syntax-attribute: #df8e1d;
+
--syntax-basen: #40a02b;
+
--syntax-builtin: #1e66f5;
+
--syntax-controlflow: #8839ef;
+
--syntax-char: #04a5e5;
+
--syntax-constant: #fe640b;
+
--syntax-comment: #9ca0b0;
+
--syntax-commentvar: #7c7f93;
+
--syntax-documentation: #9ca0b0;
+
--syntax-datatype: #df8e1d;
+
--syntax-decval: #40a02b;
+
--syntax-error: #d20f39;
+
--syntax-extension: #4c4f69;
+
--syntax-float: #40a02b;
+
--syntax-function: #1e66f5;
+
--syntax-import: #40a02b;
+
--syntax-information: #04a5e5;
+
--syntax-keyword: #8839ef;
+
--syntax-operator: #179299;
+
--syntax-other: #8839ef;
+
--syntax-preprocessor: #ea76cb;
+
--syntax-specialchar: #04a5e5;
+
--syntax-specialstring: #ea76cb;
+
--syntax-string: #40a02b;
+
--syntax-variable: #8839ef;
+
--syntax-verbatimstring: #40a02b;
+
--syntax-warning: #df8e1d;
+
}
+
+
@media (prefers-color-scheme: dark) {
+
:root {
+
--syntax-alert: #f38ba8;
+
--syntax-annotation: #fab387;
+
--syntax-attribute: #f9e2af;
+
--syntax-basen: #a6e3a1;
+
--syntax-builtin: #89b4fa;
+
--syntax-controlflow: #cba6f7;
+
--syntax-char: #89dceb;
+
--syntax-constant: #fab387;
+
--syntax-comment: #6c7086;
+
--syntax-commentvar: #585b70;
+
--syntax-documentation: #6c7086;
+
--syntax-datatype: #f9e2af;
+
--syntax-decval: #a6e3a1;
+
--syntax-error: #f38ba8;
+
--syntax-extension: #cdd6f4;
+
--syntax-float: #a6e3a1;
+
--syntax-function: #89b4fa;
+
--syntax-import: #a6e3a1;
+
--syntax-information: #89dceb;
+
--syntax-keyword: #cba6f7;
+
--syntax-operator: #94e2d5;
+
--syntax-other: #cba6f7;
+
--syntax-preprocessor: #f5c2e7;
+
--syntax-specialchar: #89dceb;
+
--syntax-specialstring: #f5c2e7;
+
--syntax-string: #a6e3a1;
+
--syntax-variable: #cba6f7;
+
--syntax-verbatimstring: #a6e3a1;
+
--syntax-warning: #f9e2af;
+
}
+
}
+
+
/* pandoc syntax highlighting classes */
+
code span.al { color: var(--syntax-alert); font-weight: bold; } /* alert */
+
code span.an { color: var(--syntax-annotation); font-weight: bold; font-style: italic; } /* annotation */
+
code span.at { color: var(--syntax-attribute); } /* attribute */
+
code span.bn { color: var(--syntax-basen); } /* basen */
+
code span.bu { color: var(--syntax-builtin); } /* builtin */
+
code span.cf { color: var(--syntax-controlflow); font-weight: bold; } /* controlflow */
+
code span.ch { color: var(--syntax-char); } /* char */
+
code span.cn { color: var(--syntax-constant); } /* constant */
+
code span.co { color: var(--syntax-comment); font-style: italic; } /* comment */
+
code span.cv { color: var(--syntax-commentvar); font-weight: bold; font-style: italic; } /* commentvar */
+
code span.do { color: var(--syntax-documentation); font-style: italic; } /* documentation */
+
code span.dt { color: var(--syntax-datatype); } /* datatype */
+
code span.dv { color: var(--syntax-decval); } /* decval */
+
code span.er { color: var(--syntax-error); font-weight: bold; } /* error */
+
code span.ex { color: var(--syntax-extension); } /* extension */
+
code span.fl { color: var(--syntax-float); } /* float */
+
code span.fu { color: var(--syntax-function); } /* function */
+
code span.im { color: var(--syntax-import); font-weight: bold; } /* import */
+
code span.in { color: var(--syntax-information); font-weight: bold; font-style: italic; } /* information */
+
code span.kw { color: var(--syntax-keyword); font-weight: bold; } /* keyword */
+
code span.op { color: var(--syntax-operator); } /* operator */
+
code span.ot { color: var(--syntax-other); } /* other */
+
code span.pp { color: var(--syntax-preprocessor); } /* preprocessor */
+
code span.sc { color: var(--syntax-specialchar); } /* specialchar */
+
code span.ss { color: var(--syntax-specialstring); } /* specialstring */
+
code span.st { color: var(--syntax-string); } /* string */
+
code span.va { color: var(--syntax-variable); } /* variable */
+
code span.vs { color: var(--syntax-verbatimstring); } /* verbatimstring */
+
code span.wa { color: var(--syntax-warning); font-weight: bold; font-style: italic; } /* warning */
+117
docs/template.html
···
+
<!DOCTYPE html>
+
<html xmlns="http://www.w3.org/1999/xhtml" lang="$lang$" xml:lang="$lang$"$if(dir)$ dir="$dir$"$endif$>
+
<head>
+
<meta charset="utf-8" />
+
<meta name="generator" content="pandoc" />
+
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+
$for(author-meta)$
+
<meta name="author" content="$author-meta$" />
+
$endfor$
+
+
$if(date-meta)$
+
<meta name="dcterms.date" content="$date-meta$" />
+
$endif$
+
+
$if(keywords)$
+
<meta name="keywords" content="$for(keywords)$$keywords$$sep$, $endfor$" />
+
$endif$
+
+
$if(description-meta)$
+
<meta name="description" content="$description-meta$" />
+
$endif$
+
+
<title>$pagetitle$</title>
+
+
<style>
+
$styles.css()$
+
</style>
+
+
$for(css)$
+
<link rel="stylesheet" href="$css$" />
+
$endfor$
+
+
$for(header-includes)$
+
$header-includes$
+
$endfor$
+
+
<link rel="preload" href="/static/fonts/InterVariable.woff2" as="font" type="font/woff2" crossorigin />
+
+
</head>
+
<body class="bg-white dark:bg-gray-900 min-h-screen flex flex-col min-h-screen">
+
$for(include-before)$
+
$include-before$
+
$endfor$
+
+
$if(toc)$
+
<!-- mobile topbar toc -->
+
<details id="mobile-$idprefix$TOC" role="doc-toc" class="md:hidden bg-gray-50 dark:bg-gray-800 border-b border-gray-200 dark:border-gray-700 z-50 space-y-4 group px-6 py-4">
+
<summary class="cursor-pointer list-none text-sm font-semibold select-none flex gap-2 justify-between items-center dark:text-white">
+
$if(toc-title)$$toc-title$$else$Table of Contents$endif$
+
<span class="group-open:hidden inline">${ menu.svg() }</span>
+
<span class="hidden group-open:inline">${ x.svg() }</span>
+
</summary>
+
${ table-of-contents:toc.html() }
+
</details>
+
<!-- desktop sidebar toc -->
+
<nav id="$idprefix$TOC" role="doc-toc" class="hidden md:block fixed left-0 top-0 w-80 h-screen bg-gray-50 dark:bg-gray-800 border-r border-gray-200 dark:border-gray-700 overflow-y-auto p-4 z-50">
+
$if(toc-title)$
+
<h2 id="$idprefix$toc-title" class="text-lg font-semibold mb-4 text-gray-900">$toc-title$</h2>
+
$endif$
+
${ table-of-contents:toc.html() }
+
</nav>
+
$endif$
+
+
<div class="$if(toc)$md:ml-80$endif$ flex-1 flex flex-col">
+
<main class="max-w-4xl w-full mx-auto p-6 flex-1">
+
$if(top)$
+
$-- only print title block if this is NOT the top page
+
$else$
+
$if(title)$
+
<header id="title-block-header" class="mb-8 pb-8 border-b border-gray-200 dark:border-gray-700">
+
<h1 class="text-4xl font-bold mb-2 text-black dark:text-white">$title$</h1>
+
$if(subtitle)$
+
<p class="text-xl text-gray-500 dark:text-gray-400 mb-2">$subtitle$</p>
+
$endif$
+
$for(author)$
+
<p class="text-sm text-gray-500 dark:text-gray-400">$author$</p>
+
$endfor$
+
$if(date)$
+
<p class="text-sm text-gray-500 dark:text-gray-400">Updated on $date$</p>
+
$endif$
+
$if(abstract)$
+
<div class="mt-6 p-4 bg-gray-50 rounded-lg">
+
<div class="text-sm font-semibold text-gray-700 uppercase mb-2">$abstract-title$</div>
+
<div class="text-gray-700">$abstract$</div>
+
</div>
+
$endif$
+
$endif$
+
</header>
+
$endif$
+
<article class="prose dark:prose-invert max-w-none">
+
$body$
+
</article>
+
</main>
+
<nav id="sitenav" class="border-t border-gray-200 dark:border-gray-700 bg-gray-50 dark:bg-gray-800 ">
+
<div class="max-w-4xl mx-auto px-8 py-4">
+
<div class="flex justify-between gap-4">
+
<span class="flex-1">
+
$if(previous.url)$
+
<span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Previous</span>
+
<a href="$previous.url$" accesskey="p" rel="previous">$previous.title$</a>
+
$endif$
+
</span>
+
<span class="flex-1 text-right">
+
$if(next.url)$
+
<span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Next</span>
+
<a href="$next.url$" accesskey="n" rel="next">$next.title$</a>
+
$endif$
+
</span>
+
</div>
+
</div>
+
</nav>
+
</div>
+
$for(include-after)$
+
$include-after$
+
$endfor$
+
</body>
+
</html>
+4
docs/toc.html
···
+
<div class="[&_ul]:space-y-6 [&_ul]:pl-0 [&_ul]:font-bold [&_ul_ul]:pl-4 [&_ul_ul]:font-normal [&_ul_ul]:space-y-2 [&_li]:space-y-2">
+
$table-of-contents$
+
</div>
+
+9 -9
flake.lock
···
"systems": "systems"
},
"locked": {
-
"lastModified": 1694529238,
-
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
+
"lastModified": 1731533236,
+
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
-
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
+
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
···
]
},
"locked": {
-
"lastModified": 1754078208,
-
"narHash": "sha256-YVoIFDCDpYuU3riaDEJ3xiGdPOtsx4sR5eTzHTytPV8=",
+
"lastModified": 1763982521,
+
"narHash": "sha256-ur4QIAHwgFc0vXiaxn5No/FuZicxBr2p0gmT54xZkUQ=",
"owner": "nix-community",
"repo": "gomod2nix",
-
"rev": "7f963246a71626c7fc70b431a315c4388a0c95cf",
+
"rev": "02e63a239d6eabd595db56852535992c898eba72",
"type": "github"
},
"original": {
···
},
"nixpkgs": {
"locked": {
-
"lastModified": 1765186076,
-
"narHash": "sha256-hM20uyap1a0M9d344I692r+ik4gTMyj60cQWO+hAYP8=",
+
"lastModified": 1766070988,
+
"narHash": "sha256-G/WVghka6c4bAzMhTwT2vjLccg/awmHkdKSd2JrycLc=",
"owner": "nixos",
"repo": "nixpkgs",
-
"rev": "addf7cf5f383a3101ecfba091b98d0a1263dc9b8",
+
"rev": "c6245e83d836d0433170a16eb185cefe0572f8b8",
"type": "github"
},
"original": {
+5 -2
flake.nix
···
inherit htmx-src htmx-ws-src lucide-src inter-fonts-src ibm-plex-mono-src actor-typeahead-src;
};
appview = self.callPackage ./nix/pkgs/appview.nix {};
+
docs = self.callPackage ./nix/pkgs/docs.nix {
+
inherit inter-fonts-src ibm-plex-mono-src lucide-src;
+
};
spindle = self.callPackage ./nix/pkgs/spindle.nix {};
knot-unwrapped = self.callPackage ./nix/pkgs/knot-unwrapped.nix {};
knot = self.callPackage ./nix/pkgs/knot.nix {};
});
in {
overlays.default = final: prev: {
-
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview;
+
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview docs;
};
packages = forAllSystems (system: let
···
staticPackages = mkPackageSet pkgs.pkgsStatic;
crossPackages = mkPackageSet pkgs.pkgsCross.gnu64.pkgsStatic;
in {
-
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib;
+
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib docs;
pkgsStatic-appview = staticPackages.appview;
pkgsStatic-knot = staticPackages.knot;
+2 -1
go.mod
···
github.com/urfave/cli/v3 v3.3.3
github.com/whyrusleeping/cbor-gen v0.3.1
github.com/yuin/goldmark v1.7.13
+
github.com/yuin/goldmark-emoji v1.0.6
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc
gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab
golang.org/x/crypto v0.40.0
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b
golang.org/x/image v0.31.0
golang.org/x/net v0.42.0
-
golang.org/x/sync v0.17.0
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da
gopkg.in/yaml.v3 v3.0.1
)
···
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
+
golang.org/x/sync v0.17.0 // indirect
golang.org/x/sys v0.34.0 // indirect
golang.org/x/text v0.29.0 // indirect
golang.org/x/time v0.12.0 // indirect
+2
go.sum
···
github.com/yuin/goldmark v1.4.15/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA=
github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg=
+
github.com/yuin/goldmark-emoji v1.0.6 h1:QWfF2FYaXwL74tfGOW5izeiZepUDroDJfWubQI9HTHs=
+
github.com/yuin/goldmark-emoji v1.0.6/go.mod h1:ukxJDKFpdFb5x0a5HqbdlcKtebh086iJpI31LTKmWuA=
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc h1:+IAOyRda+RLrxa1WC7umKOZRsGq4QrFFMYApOeHzQwQ=
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc/go.mod h1:ovIvrum6DQJA4QsJSovrkC4saKHQVs7TvcaeO8AIl5I=
gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab h1:gK9tS6QJw5F0SIhYJnGG2P83kuabOdmWBbSmZhJkz2A=
+4 -4
hook/hook.go
···
},
Commands: []*cli.Command{
{
-
Name: "post-recieve",
-
Usage: "sends a post-recieve hook to the knot (waits for stdin)",
-
Action: postRecieve,
+
Name: "post-receive",
+
Usage: "sends a post-receive hook to the knot (waits for stdin)",
+
Action: postReceive,
},
},
}
}
-
func postRecieve(ctx context.Context, cmd *cli.Command) error {
+
func postReceive(ctx context.Context, cmd *cli.Command) error {
gitDir := cmd.String("git-dir")
userDid := cmd.String("user-did")
userHandle := cmd.String("user-handle")
+1 -1
hook/setup.go
···
option_var="GIT_PUSH_OPTION_$i"
push_options+=(-push-option "${!option_var}")
done
-
%s hook -git-dir "$GIT_DIR" -user-did "$GIT_USER_DID" -user-handle "$GIT_USER_HANDLE" -internal-api "%s" "${push_options[@]}" post-recieve
+
%s hook -git-dir "$GIT_DIR" -user-did "$GIT_USER_DID" -user-handle "$GIT_USER_HANDLE" -internal-api "%s" "${push_options[@]}" post-receive
`, executablePath, config.internalApi)
return os.WriteFile(hookPath, []byte(hookContent), 0755)
+1 -1
input.css
···
}
.prose a.mention {
-
@apply no-underline hover:underline;
+
@apply no-underline hover:underline font-bold;
}
.prose li {
+11 -4
knotserver/db/db.go
···
return nil, err
}
-
// NOTE: If any other migration is added here, you MUST
-
// copy the pattern in appview: use a single sql.Conn
-
// for every migration.
+
conn, err := db.Conn(ctx)
+
if err != nil {
+
return nil, err
+
}
+
defer conn.Close()
-
_, err = db.Exec(`
+
_, err = conn.ExecContext(ctx, `
create table if not exists known_dids (
did text primary key
);
···
event text not null, -- json
created integer not null default (strftime('%s', 'now')),
primary key (rkey, nsid)
+
);
+
+
create table if not exists migrations (
+
id integer primary key autoincrement,
+
name text unique
);
`)
if err != nil {
+13 -1
knotserver/git/service/service.go
···
return c.RunService(cmd)
}
+
func (c *ServiceCommand) UploadArchive() error {
+
cmd := exec.Command("git", []string{
+
"upload-archive",
+
".",
+
}...)
+
+
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
+
cmd.Env = append(cmd.Env, fmt.Sprintf("GIT_PROTOCOL=%s", c.GitProtocol))
+
cmd.Dir = c.Dir
+
+
return c.RunService(cmd)
+
}
+
func (c *ServiceCommand) UploadPack() error {
cmd := exec.Command("git", []string{
-
"-c", "uploadpack.allowFilter=true",
"upload-pack",
"--stateless-rpc",
".",
+47
knotserver/git.go
···
}
}
+
func (h *Knot) UploadArchive(w http.ResponseWriter, r *http.Request) {
+
did := chi.URLParam(r, "did")
+
name := chi.URLParam(r, "name")
+
repo, err := securejoin.SecureJoin(h.c.Repo.ScanPath, filepath.Join(did, name))
+
if err != nil {
+
gitError(w, err.Error(), http.StatusInternalServerError)
+
h.l.Error("git: failed to secure join repo path", "handler", "UploadPack", "error", err)
+
return
+
}
+
+
const expectedContentType = "application/x-git-upload-archive-request"
+
contentType := r.Header.Get("Content-Type")
+
if contentType != expectedContentType {
+
gitError(w, fmt.Sprintf("Expected Content-Type: '%s', but received '%s'.", expectedContentType, contentType), http.StatusUnsupportedMediaType)
+
}
+
+
var bodyReader io.ReadCloser = r.Body
+
if r.Header.Get("Content-Encoding") == "gzip" {
+
gzipReader, err := gzip.NewReader(r.Body)
+
if err != nil {
+
gitError(w, err.Error(), http.StatusInternalServerError)
+
h.l.Error("git: failed to create gzip reader", "handler", "UploadArchive", "error", err)
+
return
+
}
+
defer gzipReader.Close()
+
bodyReader = gzipReader
+
}
+
+
w.Header().Set("Content-Type", "application/x-git-upload-archive-result")
+
+
h.l.Info("git: executing git-upload-archive", "handler", "UploadArchive", "repo", repo)
+
+
cmd := service.ServiceCommand{
+
GitProtocol: r.Header.Get("Git-Protocol"),
+
Dir: repo,
+
Stdout: w,
+
Stdin: bodyReader,
+
}
+
+
w.WriteHeader(http.StatusOK)
+
+
if err := cmd.UploadArchive(); err != nil {
+
h.l.Error("git: failed to execute git-upload-pack", "handler", "UploadPack", "error", err)
+
return
+
}
+
}
+
func (h *Knot) UploadPack(w http.ResponseWriter, r *http.Request) {
did := chi.URLParam(r, "did")
name := chi.URLParam(r, "name")
+1
knotserver/router.go
···
r.Route("/{name}", func(r chi.Router) {
// routes for git operations
r.Get("/info/refs", h.InfoRefs)
+
r.Post("/git-upload-archive", h.UploadArchive)
r.Post("/git-upload-pack", h.UploadPack)
r.Post("/git-receive-pack", h.ReceivePack)
})
+8 -2
lexicons/pulls/pull.json
···
"required": [
"target",
"title",
-
"patch",
+
"patchBlob",
"createdAt"
],
"properties": {
···
"type": "string"
},
"patch": {
-
"type": "string"
+
"type": "string",
+
"description": "(deprecated) use patchBlob instead"
+
},
+
"patchBlob": {
+
"type": "blob",
+
"accept": "text/x-patch",
+
"description": "patch content"
},
"source": {
"type": "ref",
+3
nix/gomod2nix.toml
···
[mod."github.com/yuin/goldmark"]
version = "v1.7.13"
hash = "sha256-vBCxZrPYPc8x/nvAAv3Au59dCCyfS80Vw3/a9EXK7TE="
+
[mod."github.com/yuin/goldmark-emoji"]
+
version = "v1.0.6"
+
hash = "sha256-+d6bZzOPE+JSFsZbQNZMCWE+n3jgcQnkPETVk47mxSY="
[mod."github.com/yuin/goldmark-highlighting/v2"]
version = "v2.0.0-20230729083705-37449abec8cc"
hash = "sha256-HpiwU7jIeDUAg2zOpTIiviQir8dpRPuXYh2nqFFccpg="
+41
nix/pkgs/docs.nix
···
+
{
+
pandoc,
+
tailwindcss,
+
runCommandLocal,
+
inter-fonts-src,
+
ibm-plex-mono-src,
+
lucide-src,
+
src,
+
}:
+
runCommandLocal "docs" {} ''
+
mkdir -p working
+
+
# copy templates, themes, styles, filters to working directory
+
cp ${src}/docs/*.html working/
+
cp ${src}/docs/*.theme working/
+
cp ${src}/docs/*.css working/
+
+
# icons
+
cp -rf ${lucide-src}/*.svg working/
+
+
# content
+
${pandoc}/bin/pandoc ${src}/docs/DOCS.md \
+
-o $out/ \
+
-t chunkedhtml \
+
--variable toc \
+
--toc-depth=2 \
+
--css=stylesheet.css \
+
--chunk-template="%i.html" \
+
--highlight-style=working/highlight.theme \
+
--template=working/template.html
+
+
# fonts
+
mkdir -p $out/static/fonts
+
cp -f ${inter-fonts-src}/web/InterVariable*.woff2 $out/static/fonts/
+
cp -f ${inter-fonts-src}/web/InterDisplay*.woff2 $out/static/fonts/
+
cp -f ${inter-fonts-src}/InterVariable*.ttf $out/static/fonts/
+
cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono*.woff2 $out/static/fonts/
+
+
# styles
+
cd ${src} && ${tailwindcss}/bin/tailwindcss -i input.css -o $out/stylesheet.css
+
''
+1 -1
nix/vm.nix
···
var = builtins.getEnv name;
in
if var == ""
-
then throw "\$${name} must be defined, see docs/hacking.md for more details"
+
then throw "\$${name} must be defined, see https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled for more details"
else var;
envVarOr = name: default: let
var = builtins.getEnv name;
+3 -3
readme.md
···
## docs
-
* [knot hosting guide](/docs/knot-hosting.md)
-
* [contributing guide](/docs/contributing.md) **please read before opening a PR!**
-
* [hacking on tangled](/docs/hacking.md)
+
- [knot hosting guide](https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide)
+
- [contributing guide](https://docs.tangled.org/contribution-guide.html#contribution-guide) **please read before opening a PR!**
+
- [hacking on tangled](https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled)
## security
+1
spindle/db/repos.go
···
if err != nil {
return nil, err
}
+
defer rows.Close()
var knots []string
for rows.Next() {
+22 -21
spindle/engine/engine.go
···
import (
"context"
"errors"
-
"fmt"
"log/slog"
+
"sync"
securejoin "github.com/cyphar/filepath-securejoin"
-
"golang.org/x/sync/errgroup"
"tangled.org/core/notifier"
"tangled.org/core/spindle/config"
"tangled.org/core/spindle/db"
···
}
}
-
eg, ctx := errgroup.WithContext(ctx)
+
var wg sync.WaitGroup
for eng, wfs := range pipeline.Workflows {
workflowTimeout := eng.WorkflowTimeout()
l.Info("using workflow timeout", "timeout", workflowTimeout)
for _, w := range wfs {
-
eg.Go(func() error {
+
wg.Add(1)
+
go func() {
+
defer wg.Done()
+
wid := models.WorkflowId{
PipelineId: pipelineId,
Name: w.Name,
···
err := db.StatusRunning(wid, n)
if err != nil {
-
return err
+
l.Error("failed to set workflow status to running", "wid", wid, "err", err)
+
return
}
err = eng.SetupWorkflow(ctx, wid, &w)
···
dbErr := db.StatusFailed(wid, err.Error(), -1, n)
if dbErr != nil {
-
return dbErr
+
l.Error("failed to set workflow status to failed", "wid", wid, "err", dbErr)
}
-
return err
+
return
}
defer eng.DestroyWorkflow(ctx, wid)
-
wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid)
+
secretValues := make([]string, len(allSecrets))
+
for i, s := range allSecrets {
+
secretValues[i] = s.Value
+
}
+
wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid, secretValues)
if err != nil {
l.Warn("failed to setup step logger; logs will not be persisted", "error", err)
wfLogger = nil
···
if errors.Is(err, ErrTimedOut) {
dbErr := db.StatusTimeout(wid, n)
if dbErr != nil {
-
return dbErr
+
l.Error("failed to set workflow status to timeout", "wid", wid, "err", dbErr)
}
} else {
dbErr := db.StatusFailed(wid, err.Error(), -1, n)
if dbErr != nil {
-
return dbErr
+
l.Error("failed to set workflow status to failed", "wid", wid, "err", dbErr)
}
}
-
-
return fmt.Errorf("starting steps image: %w", err)
+
return
}
}
err = db.StatusSuccess(wid, n)
if err != nil {
-
return err
+
l.Error("failed to set workflow status to success", "wid", wid, "err", err)
}
-
-
return nil
-
})
+
}()
}
}
-
if err := eg.Wait(); err != nil {
-
l.Error("failed to run one or more workflows", "err", err)
-
} else {
-
l.Info("successfully ran full pipeline")
-
}
+
wg.Wait()
+
l.Info("all workflows completed")
}
+6 -1
spindle/models/logger.go
···
type WorkflowLogger struct {
file *os.File
encoder *json.Encoder
+
mask *SecretMask
}
-
func NewWorkflowLogger(baseDir string, wid WorkflowId) (*WorkflowLogger, error) {
+
func NewWorkflowLogger(baseDir string, wid WorkflowId, secretValues []string) (*WorkflowLogger, error) {
path := LogFilePath(baseDir, wid)
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
···
return &WorkflowLogger{
file: file,
encoder: json.NewEncoder(file),
+
mask: NewSecretMask(secretValues),
}, nil
}
···
func (w *dataWriter) Write(p []byte) (int, error) {
line := strings.TrimRight(string(p), "\r\n")
+
if w.logger.mask != nil {
+
line = w.logger.mask.Mask(line)
+
}
entry := NewDataLogLine(w.idx, line, w.stream)
if err := w.logger.encoder.Encode(entry); err != nil {
return 0, err
+51
spindle/models/secret_mask.go
···
+
package models
+
+
import (
+
"encoding/base64"
+
"strings"
+
)
+
+
// SecretMask replaces secret values in strings with "***".
+
type SecretMask struct {
+
replacer *strings.Replacer
+
}
+
+
// NewSecretMask creates a mask for the given secret values.
+
// Also registers base64-encoded variants of each secret.
+
func NewSecretMask(values []string) *SecretMask {
+
var pairs []string
+
+
for _, value := range values {
+
if value == "" {
+
continue
+
}
+
+
pairs = append(pairs, value, "***")
+
+
b64 := base64.StdEncoding.EncodeToString([]byte(value))
+
if b64 != value {
+
pairs = append(pairs, b64, "***")
+
}
+
+
b64NoPad := strings.TrimRight(b64, "=")
+
if b64NoPad != b64 && b64NoPad != value {
+
pairs = append(pairs, b64NoPad, "***")
+
}
+
}
+
+
if len(pairs) == 0 {
+
return nil
+
}
+
+
return &SecretMask{
+
replacer: strings.NewReplacer(pairs...),
+
}
+
}
+
+
// Mask replaces all registered secret values with "***".
+
func (m *SecretMask) Mask(input string) string {
+
if m == nil || m.replacer == nil {
+
return input
+
}
+
return m.replacer.Replace(input)
+
}
+135
spindle/models/secret_mask_test.go
···
+
package models
+
+
import (
+
"encoding/base64"
+
"testing"
+
)
+
+
func TestSecretMask_BasicMasking(t *testing.T) {
+
mask := NewSecretMask([]string{"mysecret123"})
+
+
input := "The password is mysecret123 in this log"
+
expected := "The password is *** in this log"
+
+
result := mask.Mask(input)
+
if result != expected {
+
t.Errorf("expected %q, got %q", expected, result)
+
}
+
}
+
+
func TestSecretMask_Base64Encoded(t *testing.T) {
+
secret := "mysecret123"
+
mask := NewSecretMask([]string{secret})
+
+
b64 := base64.StdEncoding.EncodeToString([]byte(secret))
+
input := "Encoded: " + b64
+
expected := "Encoded: ***"
+
+
result := mask.Mask(input)
+
if result != expected {
+
t.Errorf("expected %q, got %q", expected, result)
+
}
+
}
+
+
func TestSecretMask_Base64NoPadding(t *testing.T) {
+
// "test" encodes to "dGVzdA==" with padding
+
secret := "test"
+
mask := NewSecretMask([]string{secret})
+
+
b64NoPad := "dGVzdA" // base64 without padding
+
input := "Token: " + b64NoPad
+
expected := "Token: ***"
+
+
result := mask.Mask(input)
+
if result != expected {
+
t.Errorf("expected %q, got %q", expected, result)
+
}
+
}
+
+
func TestSecretMask_MultipleSecrets(t *testing.T) {
+
mask := NewSecretMask([]string{"password1", "apikey123"})
+
+
input := "Using password1 and apikey123 for auth"
+
expected := "Using *** and *** for auth"
+
+
result := mask.Mask(input)
+
if result != expected {
+
t.Errorf("expected %q, got %q", expected, result)
+
}
+
}
+
+
func TestSecretMask_MultipleOccurrences(t *testing.T) {
+
mask := NewSecretMask([]string{"secret"})
+
+
input := "secret appears twice: secret"
+
expected := "*** appears twice: ***"
+
+
result := mask.Mask(input)
+
if result != expected {
+
t.Errorf("expected %q, got %q", expected, result)
+
}
+
}
+
+
func TestSecretMask_ShortValues(t *testing.T) {
+
mask := NewSecretMask([]string{"abc", "xy", ""})
+
+
if mask == nil {
+
t.Fatal("expected non-nil mask")
+
}
+
+
input := "abc xy test"
+
expected := "*** *** test"
+
result := mask.Mask(input)
+
if result != expected {
+
t.Errorf("expected %q, got %q", expected, result)
+
}
+
}
+
+
func TestSecretMask_NilMask(t *testing.T) {
+
var mask *SecretMask
+
+
input := "some input text"
+
result := mask.Mask(input)
+
if result != input {
+
t.Errorf("expected %q, got %q", input, result)
+
}
+
}
+
+
func TestSecretMask_EmptyInput(t *testing.T) {
+
mask := NewSecretMask([]string{"secret"})
+
+
result := mask.Mask("")
+
if result != "" {
+
t.Errorf("expected empty string, got %q", result)
+
}
+
}
+
+
func TestSecretMask_NoMatch(t *testing.T) {
+
mask := NewSecretMask([]string{"secretvalue"})
+
+
input := "nothing to mask here"
+
result := mask.Mask(input)
+
if result != input {
+
t.Errorf("expected %q, got %q", input, result)
+
}
+
}
+
+
func TestSecretMask_EmptySecretsList(t *testing.T) {
+
mask := NewSecretMask([]string{})
+
+
if mask != nil {
+
t.Error("expected nil mask for empty secrets list")
+
}
+
}
+
+
func TestSecretMask_EmptySecretsFiltered(t *testing.T) {
+
mask := NewSecretMask([]string{"ab", "validpassword", "", "xyz"})
+
+
input := "Using validpassword here"
+
expected := "Using *** here"
+
+
result := mask.Mask(input)
+
if result != expected {
+
t.Errorf("expected %q, got %q", expected, result)
+
}
+
}
+1 -1
spindle/motd
···
**
********
-
This is a spindle server. More info at https://tangled.sh/@tangled.sh/core/tree/master/docs/spindle
+
This is a spindle server. More info at https://docs.tangled.org/spindles.html#spindles
Most API routes are under /xrpc/
+1 -1
tailwind.config.js
···
const colors = require("tailwindcss/colors");
module.exports = {
-
content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go"],
+
content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go", "./docs/*.html"],
darkMode: "media",
theme: {
container: {
+3
types/diff.go
···
// used by html elements as a unique ID for hrefs
func (d *Diff) Id() string {
+
if d.IsDelete {
+
return d.Name.Old
+
}
return d.Name.New
}
+112
types/diff_test.go
···
+
package types
+
+
import "testing"
+
+
func TestDiffId(t *testing.T) {
+
tests := []struct {
+
name string
+
diff Diff
+
expected string
+
}{
+
{
+
name: "regular file uses new name",
+
diff: Diff{
+
Name: struct {
+
Old string `json:"old"`
+
New string `json:"new"`
+
}{Old: "", New: "src/main.go"},
+
},
+
expected: "src/main.go",
+
},
+
{
+
name: "new file uses new name",
+
diff: Diff{
+
Name: struct {
+
Old string `json:"old"`
+
New string `json:"new"`
+
}{Old: "", New: "src/new.go"},
+
IsNew: true,
+
},
+
expected: "src/new.go",
+
},
+
{
+
name: "deleted file uses old name",
+
diff: Diff{
+
Name: struct {
+
Old string `json:"old"`
+
New string `json:"new"`
+
}{Old: "src/deleted.go", New: ""},
+
IsDelete: true,
+
},
+
expected: "src/deleted.go",
+
},
+
{
+
name: "renamed file uses new name",
+
diff: Diff{
+
Name: struct {
+
Old string `json:"old"`
+
New string `json:"new"`
+
}{Old: "src/old.go", New: "src/renamed.go"},
+
IsRename: true,
+
},
+
expected: "src/renamed.go",
+
},
+
}
+
+
for _, tt := range tests {
+
t.Run(tt.name, func(t *testing.T) {
+
if got := tt.diff.Id(); got != tt.expected {
+
t.Errorf("Diff.Id() = %q, want %q", got, tt.expected)
+
}
+
})
+
}
+
}
+
+
func TestChangedFilesMatchesDiffId(t *testing.T) {
+
// ChangedFiles() must return values matching each Diff's Id()
+
// so that sidebar links point to the correct anchors.
+
// Tests existing, deleted, new, and renamed files.
+
nd := NiceDiff{
+
Diff: []Diff{
+
{
+
Name: struct {
+
Old string `json:"old"`
+
New string `json:"new"`
+
}{Old: "", New: "src/modified.go"},
+
},
+
{
+
Name: struct {
+
Old string `json:"old"`
+
New string `json:"new"`
+
}{Old: "src/deleted.go", New: ""},
+
IsDelete: true,
+
},
+
{
+
Name: struct {
+
Old string `json:"old"`
+
New string `json:"new"`
+
}{Old: "", New: "src/new.go"},
+
IsNew: true,
+
},
+
{
+
Name: struct {
+
Old string `json:"old"`
+
New string `json:"new"`
+
}{Old: "src/old.go", New: "src/renamed.go"},
+
IsRename: true,
+
},
+
},
+
}
+
+
changedFiles := nd.ChangedFiles()
+
+
if len(changedFiles) != len(nd.Diff) {
+
t.Fatalf("ChangedFiles() returned %d items, want %d", len(changedFiles), len(nd.Diff))
+
}
+
+
for i, diff := range nd.Diff {
+
if changedFiles[i] != diff.Id() {
+
t.Errorf("ChangedFiles()[%d] = %q, but Diff.Id() = %q", i, changedFiles[i], diff.Id())
+
}
+
}
+
}