forked from tangled.org/core
Monorepo for Tangled — https://tangled.org

Compare changes

Choose any two refs to compare.

Changed files
+7423 -2964
api
appview
commitverify
db
email
issues
knots
labels
mentions
middleware
models
notifications
notify
oauth
pages
pipelines
pulls
repo
reporesolver
serververify
settings
spindles
state
strings
validator
crypto
docs
hook
jetstream
knotserver
lexicons
nix
orm
patchutil
rbac
sets
spindle
types
+649 -8
api/tangled/cbor_gen.go
···
cw := cbg.NewCborWriter(w)
-
fieldCount := 5
+
fieldCount := 7
if t.Body == nil {
+
fieldCount--
+
}
+
+
if t.Mentions == nil {
+
fieldCount--
+
}
+
+
if t.References == nil {
fieldCount--
···
return err
+
// t.Mentions ([]string) (slice)
+
if t.Mentions != nil {
+
+
if len("mentions") > 1000000 {
+
return xerrors.Errorf("Value in field \"mentions\" was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string("mentions")); err != nil {
+
return err
+
}
+
+
if len(t.Mentions) > 8192 {
+
return xerrors.Errorf("Slice value in field t.Mentions was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil {
+
return err
+
}
+
for _, v := range t.Mentions {
+
if len(v) > 1000000 {
+
return xerrors.Errorf("Value in field v was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string(v)); err != nil {
+
return err
+
}
+
+
}
+
}
+
// t.CreatedAt (string) (string)
if len("createdAt") > 1000000 {
return xerrors.Errorf("Value in field \"createdAt\" was too long")
···
if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
return err
+
+
// t.References ([]string) (slice)
+
if t.References != nil {
+
+
if len("references") > 1000000 {
+
return xerrors.Errorf("Value in field \"references\" was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string("references")); err != nil {
+
return err
+
}
+
+
if len(t.References) > 8192 {
+
return xerrors.Errorf("Slice value in field t.References was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil {
+
return err
+
}
+
for _, v := range t.References {
+
if len(v) > 1000000 {
+
return xerrors.Errorf("Value in field v was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string(v)); err != nil {
+
return err
+
}
+
+
}
+
}
return nil
···
n := extra
-
nameBuf := make([]byte, 9)
+
nameBuf := make([]byte, 10)
for i := uint64(0); i < n; i++ {
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
if err != nil {
···
t.Title = string(sval)
+
}
+
// t.Mentions ([]string) (slice)
+
case "mentions":
+
+
maj, extra, err = cr.ReadHeader()
+
if err != nil {
+
return err
+
}
+
+
if extra > 8192 {
+
return fmt.Errorf("t.Mentions: array too large (%d)", extra)
+
}
+
+
if maj != cbg.MajArray {
+
return fmt.Errorf("expected cbor array")
+
}
+
+
if extra > 0 {
+
t.Mentions = make([]string, extra)
+
}
+
+
for i := 0; i < int(extra); i++ {
+
{
+
var maj byte
+
var extra uint64
+
var err error
+
_ = maj
+
_ = extra
+
_ = err
+
+
{
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
+
if err != nil {
+
return err
+
}
+
+
t.Mentions[i] = string(sval)
+
}
+
+
}
// t.CreatedAt (string) (string)
case "createdAt":
···
t.CreatedAt = string(sval)
+
// t.References ([]string) (slice)
+
case "references":
+
+
maj, extra, err = cr.ReadHeader()
+
if err != nil {
+
return err
+
}
+
+
if extra > 8192 {
+
return fmt.Errorf("t.References: array too large (%d)", extra)
+
}
+
+
if maj != cbg.MajArray {
+
return fmt.Errorf("expected cbor array")
+
}
+
+
if extra > 0 {
+
t.References = make([]string, extra)
+
}
+
+
for i := 0; i < int(extra); i++ {
+
{
+
var maj byte
+
var extra uint64
+
var err error
+
_ = maj
+
_ = extra
+
_ = err
+
+
{
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
+
if err != nil {
+
return err
+
}
+
+
t.References[i] = string(sval)
+
}
+
+
}
+
}
default:
// Field doesn't exist on this type, so ignore it
···
cw := cbg.NewCborWriter(w)
-
fieldCount := 5
+
fieldCount := 7
+
+
if t.Mentions == nil {
+
fieldCount--
+
}
+
+
if t.References == nil {
+
fieldCount--
+
}
if t.ReplyTo == nil {
fieldCount--
···
+
// t.Mentions ([]string) (slice)
+
if t.Mentions != nil {
+
+
if len("mentions") > 1000000 {
+
return xerrors.Errorf("Value in field \"mentions\" was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string("mentions")); err != nil {
+
return err
+
}
+
+
if len(t.Mentions) > 8192 {
+
return xerrors.Errorf("Slice value in field t.Mentions was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil {
+
return err
+
}
+
for _, v := range t.Mentions {
+
if len(v) > 1000000 {
+
return xerrors.Errorf("Value in field v was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string(v)); err != nil {
+
return err
+
}
+
+
}
+
}
+
// t.CreatedAt (string) (string)
if len("createdAt") > 1000000 {
return xerrors.Errorf("Value in field \"createdAt\" was too long")
···
if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
return err
+
+
// t.References ([]string) (slice)
+
if t.References != nil {
+
+
if len("references") > 1000000 {
+
return xerrors.Errorf("Value in field \"references\" was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string("references")); err != nil {
+
return err
+
}
+
+
if len(t.References) > 8192 {
+
return xerrors.Errorf("Slice value in field t.References was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil {
+
return err
+
}
+
for _, v := range t.References {
+
if len(v) > 1000000 {
+
return xerrors.Errorf("Value in field v was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string(v)); err != nil {
+
return err
+
}
+
+
}
+
}
return nil
···
n := extra
-
nameBuf := make([]byte, 9)
+
nameBuf := make([]byte, 10)
for i := uint64(0); i < n; i++ {
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
if err != nil {
···
t.ReplyTo = (*string)(&sval)
+
}
+
}
+
// t.Mentions ([]string) (slice)
+
case "mentions":
+
+
maj, extra, err = cr.ReadHeader()
+
if err != nil {
+
return err
+
}
+
+
if extra > 8192 {
+
return fmt.Errorf("t.Mentions: array too large (%d)", extra)
+
}
+
+
if maj != cbg.MajArray {
+
return fmt.Errorf("expected cbor array")
+
}
+
+
if extra > 0 {
+
t.Mentions = make([]string, extra)
+
}
+
+
for i := 0; i < int(extra); i++ {
+
{
+
var maj byte
+
var extra uint64
+
var err error
+
_ = maj
+
_ = extra
+
_ = err
+
+
{
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
+
if err != nil {
+
return err
+
}
+
+
t.Mentions[i] = string(sval)
+
}
+
// t.CreatedAt (string) (string)
···
t.CreatedAt = string(sval)
+
}
+
// t.References ([]string) (slice)
+
case "references":
+
+
maj, extra, err = cr.ReadHeader()
+
if err != nil {
+
return err
+
}
+
+
if extra > 8192 {
+
return fmt.Errorf("t.References: array too large (%d)", extra)
+
}
+
+
if maj != cbg.MajArray {
+
return fmt.Errorf("expected cbor array")
+
}
+
+
if extra > 0 {
+
t.References = make([]string, extra)
+
}
+
+
for i := 0; i < int(extra); i++ {
+
{
+
var maj byte
+
var extra uint64
+
var err error
+
_ = maj
+
_ = extra
+
_ = err
+
+
{
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
+
if err != nil {
+
return err
+
}
+
+
t.References[i] = string(sval)
+
}
+
+
}
default:
···
cw := cbg.NewCborWriter(w)
-
fieldCount := 7
+
fieldCount := 9
if t.Body == nil {
+
fieldCount--
+
}
+
+
if t.Mentions == nil {
+
fieldCount--
+
}
+
+
if t.References == nil {
fieldCount--
···
return err
+
// t.Mentions ([]string) (slice)
+
if t.Mentions != nil {
+
+
if len("mentions") > 1000000 {
+
return xerrors.Errorf("Value in field \"mentions\" was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string("mentions")); err != nil {
+
return err
+
}
+
+
if len(t.Mentions) > 8192 {
+
return xerrors.Errorf("Slice value in field t.Mentions was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil {
+
return err
+
}
+
for _, v := range t.Mentions {
+
if len(v) > 1000000 {
+
return xerrors.Errorf("Value in field v was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string(v)); err != nil {
+
return err
+
}
+
+
}
+
}
+
// t.CreatedAt (string) (string)
if len("createdAt") > 1000000 {
return xerrors.Errorf("Value in field \"createdAt\" was too long")
···
if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
return err
+
+
// t.References ([]string) (slice)
+
if t.References != nil {
+
+
if len("references") > 1000000 {
+
return xerrors.Errorf("Value in field \"references\" was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string("references")); err != nil {
+
return err
+
}
+
+
if len(t.References) > 8192 {
+
return xerrors.Errorf("Slice value in field t.References was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil {
+
return err
+
}
+
for _, v := range t.References {
+
if len(v) > 1000000 {
+
return xerrors.Errorf("Value in field v was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string(v)); err != nil {
+
return err
+
}
+
+
}
+
}
return nil
···
n := extra
-
nameBuf := make([]byte, 9)
+
nameBuf := make([]byte, 10)
for i := uint64(0); i < n; i++ {
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
if err != nil {
···
+
}
+
// t.Mentions ([]string) (slice)
+
case "mentions":
+
+
maj, extra, err = cr.ReadHeader()
+
if err != nil {
+
return err
+
}
+
+
if extra > 8192 {
+
return fmt.Errorf("t.Mentions: array too large (%d)", extra)
+
}
+
+
if maj != cbg.MajArray {
+
return fmt.Errorf("expected cbor array")
+
}
+
+
if extra > 0 {
+
t.Mentions = make([]string, extra)
+
}
+
+
for i := 0; i < int(extra); i++ {
+
{
+
var maj byte
+
var extra uint64
+
var err error
+
_ = maj
+
_ = extra
+
_ = err
+
+
{
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
+
if err != nil {
+
return err
+
}
+
+
t.Mentions[i] = string(sval)
+
}
+
+
}
// t.CreatedAt (string) (string)
case "createdAt":
···
t.CreatedAt = string(sval)
+
// t.References ([]string) (slice)
+
case "references":
+
+
maj, extra, err = cr.ReadHeader()
+
if err != nil {
+
return err
+
}
+
+
if extra > 8192 {
+
return fmt.Errorf("t.References: array too large (%d)", extra)
+
}
+
+
if maj != cbg.MajArray {
+
return fmt.Errorf("expected cbor array")
+
}
+
+
if extra > 0 {
+
t.References = make([]string, extra)
+
}
+
+
for i := 0; i < int(extra); i++ {
+
{
+
var maj byte
+
var extra uint64
+
var err error
+
_ = maj
+
_ = extra
+
_ = err
+
+
{
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
+
if err != nil {
+
return err
+
}
+
+
t.References[i] = string(sval)
+
}
+
+
}
+
}
default:
// Field doesn't exist on this type, so ignore it
···
cw := cbg.NewCborWriter(w)
+
fieldCount := 6
-
if _, err := cw.Write([]byte{164}); err != nil {
+
if t.Mentions == nil {
+
fieldCount--
+
}
+
+
if t.References == nil {
+
fieldCount--
+
}
+
+
if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
return err
···
return err
+
// t.Mentions ([]string) (slice)
+
if t.Mentions != nil {
+
+
if len("mentions") > 1000000 {
+
return xerrors.Errorf("Value in field \"mentions\" was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string("mentions")); err != nil {
+
return err
+
}
+
+
if len(t.Mentions) > 8192 {
+
return xerrors.Errorf("Slice value in field t.Mentions was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil {
+
return err
+
}
+
for _, v := range t.Mentions {
+
if len(v) > 1000000 {
+
return xerrors.Errorf("Value in field v was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string(v)); err != nil {
+
return err
+
}
+
+
}
+
}
+
// t.CreatedAt (string) (string)
if len("createdAt") > 1000000 {
return xerrors.Errorf("Value in field \"createdAt\" was too long")
···
if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
return err
+
}
+
+
// t.References ([]string) (slice)
+
if t.References != nil {
+
+
if len("references") > 1000000 {
+
return xerrors.Errorf("Value in field \"references\" was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string("references")); err != nil {
+
return err
+
}
+
+
if len(t.References) > 8192 {
+
return xerrors.Errorf("Slice value in field t.References was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil {
+
return err
+
}
+
for _, v := range t.References {
+
if len(v) > 1000000 {
+
return xerrors.Errorf("Value in field v was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string(v)); err != nil {
+
return err
+
}
+
+
}
return nil
···
n := extra
-
nameBuf := make([]byte, 9)
+
nameBuf := make([]byte, 10)
for i := uint64(0); i < n; i++ {
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
if err != nil {
···
t.LexiconTypeID = string(sval)
+
// t.Mentions ([]string) (slice)
+
case "mentions":
+
+
maj, extra, err = cr.ReadHeader()
+
if err != nil {
+
return err
+
}
+
+
if extra > 8192 {
+
return fmt.Errorf("t.Mentions: array too large (%d)", extra)
+
}
+
+
if maj != cbg.MajArray {
+
return fmt.Errorf("expected cbor array")
+
}
+
+
if extra > 0 {
+
t.Mentions = make([]string, extra)
+
}
+
+
for i := 0; i < int(extra); i++ {
+
{
+
var maj byte
+
var extra uint64
+
var err error
+
_ = maj
+
_ = extra
+
_ = err
+
+
{
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
+
if err != nil {
+
return err
+
}
+
+
t.Mentions[i] = string(sval)
+
}
+
+
}
+
}
// t.CreatedAt (string) (string)
case "createdAt":
···
t.CreatedAt = string(sval)
+
}
+
// t.References ([]string) (slice)
+
case "references":
+
+
maj, extra, err = cr.ReadHeader()
+
if err != nil {
+
return err
+
}
+
+
if extra > 8192 {
+
return fmt.Errorf("t.References: array too large (%d)", extra)
+
}
+
+
if maj != cbg.MajArray {
+
return fmt.Errorf("expected cbor array")
+
}
+
+
if extra > 0 {
+
t.References = make([]string, extra)
+
}
+
+
for i := 0; i < int(extra); i++ {
+
{
+
var maj byte
+
var extra uint64
+
var err error
+
_ = maj
+
_ = extra
+
_ = err
+
+
{
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
+
if err != nil {
+
return err
+
}
+
+
t.References[i] = string(sval)
+
}
+
+
}
default:
+7 -5
api/tangled/issuecomment.go
···
} //
// RECORDTYPE: RepoIssueComment
type RepoIssueComment struct {
-
LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue.comment" cborgen:"$type,const=sh.tangled.repo.issue.comment"`
-
Body string `json:"body" cborgen:"body"`
-
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
-
Issue string `json:"issue" cborgen:"issue"`
-
ReplyTo *string `json:"replyTo,omitempty" cborgen:"replyTo,omitempty"`
+
LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue.comment" cborgen:"$type,const=sh.tangled.repo.issue.comment"`
+
Body string `json:"body" cborgen:"body"`
+
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
+
Issue string `json:"issue" cborgen:"issue"`
+
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
+
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
+
ReplyTo *string `json:"replyTo,omitempty" cborgen:"replyTo,omitempty"`
}
+6 -4
api/tangled/pullcomment.go
···
} //
// RECORDTYPE: RepoPullComment
type RepoPullComment struct {
-
LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull.comment" cborgen:"$type,const=sh.tangled.repo.pull.comment"`
-
Body string `json:"body" cborgen:"body"`
-
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
-
Pull string `json:"pull" cborgen:"pull"`
+
LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull.comment" cborgen:"$type,const=sh.tangled.repo.pull.comment"`
+
Body string `json:"body" cborgen:"body"`
+
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
+
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
+
Pull string `json:"pull" cborgen:"pull"`
+
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
}
+7 -5
api/tangled/repoissue.go
···
} //
// RECORDTYPE: RepoIssue
type RepoIssue struct {
-
LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue" cborgen:"$type,const=sh.tangled.repo.issue"`
-
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
-
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
-
Repo string `json:"repo" cborgen:"repo"`
-
Title string `json:"title" cborgen:"title"`
+
LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue" cborgen:"$type,const=sh.tangled.repo.issue"`
+
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
+
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
+
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
+
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
+
Repo string `json:"repo" cborgen:"repo"`
+
Title string `json:"title" cborgen:"title"`
}
+2
api/tangled/repopull.go
···
LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"`
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
+
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
Patch string `json:"patch" cborgen:"patch"`
+
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"`
Target *RepoPull_Target `json:"target" cborgen:"target"`
Title string `json:"title" cborgen:"title"`
+6 -45
appview/commitverify/verify.go
···
import (
"log"
-
"github.com/go-git/go-git/v5/plumbing/object"
"tangled.org/core/appview/db"
"tangled.org/core/appview/models"
"tangled.org/core/crypto"
···
return ""
}
-
func GetVerifiedObjectCommits(e db.Execer, emailToDid map[string]string, commits []*object.Commit) (VerifiedCommits, error) {
-
ndCommits := []types.NiceDiff{}
-
for _, commit := range commits {
-
ndCommits = append(ndCommits, ObjectCommitToNiceDiff(commit))
-
}
-
return GetVerifiedCommits(e, emailToDid, ndCommits)
-
}
-
-
func GetVerifiedCommits(e db.Execer, emailToDid map[string]string, ndCommits []types.NiceDiff) (VerifiedCommits, error) {
+
func GetVerifiedCommits(e db.Execer, emailToDid map[string]string, ndCommits []types.Commit) (VerifiedCommits, error) {
vcs := VerifiedCommits{}
didPubkeyCache := make(map[string][]models.PublicKey)
for _, commit := range ndCommits {
-
c := commit.Commit
-
-
committerEmail := c.Committer.Email
+
committerEmail := commit.Committer.Email
if did, exists := emailToDid[committerEmail]; exists {
// check if we've already fetched public keys for this did
pubKeys, ok := didPubkeyCache[did]
···
}
// try to verify with any associated pubkeys
+
payload := commit.Payload()
+
signature := commit.PGPSignature
for _, pk := range pubKeys {
-
if _, ok := crypto.VerifyCommitSignature(pk.Key, commit); ok {
+
if _, ok := crypto.VerifySignature([]byte(pk.Key), []byte(signature), []byte(payload)); ok {
fp, err := crypto.SSHFingerprint(pk.Key)
if err != nil {
log.Println("error computing ssh fingerprint:", err)
}
-
vc := verifiedCommit{fingerprint: fp, hash: c.This}
+
vc := verifiedCommit{fingerprint: fp, hash: commit.This}
vcs[vc] = struct{}{}
break
}
···
return vcs, nil
}
-
-
// ObjectCommitToNiceDiff is a compatibility function to convert a
-
// commit object into a NiceDiff structure.
-
func ObjectCommitToNiceDiff(c *object.Commit) types.NiceDiff {
-
var niceDiff types.NiceDiff
-
-
// set commit information
-
niceDiff.Commit.Message = c.Message
-
niceDiff.Commit.Author = c.Author
-
niceDiff.Commit.This = c.Hash.String()
-
niceDiff.Commit.Committer = c.Committer
-
niceDiff.Commit.Tree = c.TreeHash.String()
-
niceDiff.Commit.PGPSignature = c.PGPSignature
-
-
changeId, ok := c.ExtraHeaders["change-id"]
-
if ok {
-
niceDiff.Commit.ChangedId = string(changeId)
-
}
-
-
// set parent hash if available
-
if len(c.ParentHashes) > 0 {
-
niceDiff.Commit.Parent = c.ParentHashes[0].String()
-
}
-
-
// XXX: Stats and Diff fields are typically populated
-
// after fetching the actual diff information, which isn't
-
// directly available in the commit object itself.
-
-
return niceDiff
-
}
+3 -2
appview/db/artifact.go
···
"github.com/go-git/go-git/v5/plumbing"
"github.com/ipfs/go-cid"
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
)
func AddArtifact(e Execer, artifact models.Artifact) error {
···
return err
}
-
func GetArtifact(e Execer, filters ...filter) ([]models.Artifact, error) {
+
func GetArtifact(e Execer, filters ...orm.Filter) ([]models.Artifact, error) {
var artifacts []models.Artifact
var conditions []string
···
return artifacts, nil
}
-
func DeleteArtifact(e Execer, filters ...filter) error {
+
func DeleteArtifact(e Execer, filters ...orm.Filter) error {
var conditions []string
var args []any
for _, filter := range filters {
+4 -3
appview/db/collaborators.go
···
"time"
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
)
func AddCollaborator(e Execer, c models.Collaborator) error {
···
return err
}
-
func DeleteCollaborator(e Execer, filters ...filter) error {
+
func DeleteCollaborator(e Execer, filters ...orm.Filter) error {
var conditions []string
var args []any
for _, filter := range filters {
···
return nil, nil
}
-
return GetRepos(e, 0, FilterIn("at_uri", repoAts))
+
return GetRepos(e, 0, orm.FilterIn("at_uri", repoAts))
}
-
func GetCollaborators(e Execer, filters ...filter) ([]models.Collaborator, error) {
+
func GetCollaborators(e Execer, filters ...orm.Filter) ([]models.Collaborator, error) {
var collaborators []models.Collaborator
var conditions []string
var args []any
+33 -137
appview/db/db.go
···
import (
"context"
"database/sql"
-
"fmt"
"log/slog"
-
"reflect"
"strings"
_ "github.com/mattn/go-sqlite3"
"tangled.org/core/log"
+
"tangled.org/core/orm"
)
type DB struct {
···
email_notifications integer not null default 0
);
+
create table if not exists reference_links (
+
id integer primary key autoincrement,
+
from_at text not null,
+
to_at text not null,
+
unique (from_at, to_at)
+
);
+
create table if not exists migrations (
id integer primary key autoincrement,
name text unique
···
-- indexes for better performance
create index if not exists idx_notifications_recipient_created on notifications(recipient_did, created desc);
create index if not exists idx_notifications_recipient_read on notifications(recipient_did, read);
+
create index if not exists idx_references_from_at on reference_links(from_at);
+
create index if not exists idx_references_to_at on reference_links(to_at);
`)
if err != nil {
return nil, err
}
// run migrations
-
runMigration(conn, logger, "add-description-to-repos", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "add-description-to-repos", func(tx *sql.Tx) error {
tx.Exec(`
alter table repos add column description text check (length(description) <= 200);
`)
return nil
})
-
runMigration(conn, logger, "add-rkey-to-pubkeys", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "add-rkey-to-pubkeys", func(tx *sql.Tx) error {
// add unconstrained column
_, err := tx.Exec(`
alter table public_keys
···
return nil
})
-
runMigration(conn, logger, "add-rkey-to-comments", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "add-rkey-to-comments", func(tx *sql.Tx) error {
_, err := tx.Exec(`
alter table comments drop column comment_at;
alter table comments add column rkey text;
···
return err
})
-
runMigration(conn, logger, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error {
_, err := tx.Exec(`
alter table comments add column deleted text; -- timestamp
alter table comments add column edited text; -- timestamp
···
return err
})
-
runMigration(conn, logger, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error {
_, err := tx.Exec(`
alter table pulls add column source_branch text;
alter table pulls add column source_repo_at text;
···
return err
})
-
runMigration(conn, logger, "add-source-to-repos", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "add-source-to-repos", func(tx *sql.Tx) error {
_, err := tx.Exec(`
alter table repos add column source text;
`)
···
//
// [0]: https://sqlite.org/pragma.html#pragma_foreign_keys
conn.ExecContext(ctx, "pragma foreign_keys = off;")
-
runMigration(conn, logger, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error {
_, err := tx.Exec(`
create table pulls_new (
-- identifiers
···
})
conn.ExecContext(ctx, "pragma foreign_keys = on;")
-
runMigration(conn, logger, "add-spindle-to-repos", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "add-spindle-to-repos", func(tx *sql.Tx) error {
tx.Exec(`
alter table repos add column spindle text;
`)
···
// drop all knot secrets, add unique constraint to knots
//
// knots will henceforth use service auth for signed requests
-
runMigration(conn, logger, "no-more-secrets", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "no-more-secrets", func(tx *sql.Tx) error {
_, err := tx.Exec(`
create table registrations_new (
id integer primary key autoincrement,
···
})
// recreate and add rkey + created columns with default constraint
-
runMigration(conn, logger, "rework-collaborators-table", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "rework-collaborators-table", func(tx *sql.Tx) error {
// create new table
// - repo_at instead of repo integer
// - rkey field
···
return err
})
-
runMigration(conn, logger, "add-rkey-to-issues", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "add-rkey-to-issues", func(tx *sql.Tx) error {
_, err := tx.Exec(`
alter table issues add column rkey text not null default '';
···
})
// repurpose the read-only column to "needs-upgrade"
-
runMigration(conn, logger, "rename-registrations-read-only-to-needs-upgrade", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "rename-registrations-read-only-to-needs-upgrade", func(tx *sql.Tx) error {
_, err := tx.Exec(`
alter table registrations rename column read_only to needs_upgrade;
`)
···
})
// require all knots to upgrade after the release of total xrpc
-
runMigration(conn, logger, "migrate-knots-to-total-xrpc", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "migrate-knots-to-total-xrpc", func(tx *sql.Tx) error {
_, err := tx.Exec(`
update registrations set needs_upgrade = 1;
`)
···
})
// require all knots to upgrade after the release of total xrpc
-
runMigration(conn, logger, "migrate-spindles-to-xrpc-owner", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "migrate-spindles-to-xrpc-owner", func(tx *sql.Tx) error {
_, err := tx.Exec(`
alter table spindles add column needs_upgrade integer not null default 0;
`)
···
//
// disable foreign-keys for the next migration
conn.ExecContext(ctx, "pragma foreign_keys = off;")
-
runMigration(conn, logger, "remove-issue-at-from-issues", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "remove-issue-at-from-issues", func(tx *sql.Tx) error {
_, err := tx.Exec(`
create table if not exists issues_new (
-- identifiers
···
// - new columns
// * column "reply_to" which can be any other comment
// * column "at-uri" which is a generated column
-
runMigration(conn, logger, "rework-issue-comments", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "rework-issue-comments", func(tx *sql.Tx) error {
_, err := tx.Exec(`
create table if not exists issue_comments (
-- identifiers
···
//
// disable foreign-keys for the next migration
conn.ExecContext(ctx, "pragma foreign_keys = off;")
-
runMigration(conn, logger, "add-at-uri-to-pulls", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "add-at-uri-to-pulls", func(tx *sql.Tx) error {
_, err := tx.Exec(`
create table if not exists pulls_new (
-- identifiers
···
//
// disable foreign-keys for the next migration
conn.ExecContext(ctx, "pragma foreign_keys = off;")
-
runMigration(conn, logger, "remove-repo-at-pull-id-from-pull-submissions", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "remove-repo-at-pull-id-from-pull-submissions", func(tx *sql.Tx) error {
_, err := tx.Exec(`
create table if not exists pull_submissions_new (
-- identifiers
···
// knots may report the combined patch for a comparison, we can store that on the appview side
// (but not on the pds record), because calculating the combined patch requires a git index
-
runMigration(conn, logger, "add-combined-column-submissions", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "add-combined-column-submissions", func(tx *sql.Tx) error {
_, err := tx.Exec(`
alter table pull_submissions add column combined text;
`)
return err
})
-
runMigration(conn, logger, "add-pronouns-profile", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "add-pronouns-profile", func(tx *sql.Tx) error {
_, err := tx.Exec(`
alter table profile add column pronouns text;
`)
return err
})
-
runMigration(conn, logger, "add-meta-column-repos", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "add-meta-column-repos", func(tx *sql.Tx) error {
_, err := tx.Exec(`
alter table repos add column website text;
alter table repos add column topics text;
···
return err
})
-
runMigration(conn, logger, "add-usermentioned-preference", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "add-usermentioned-preference", func(tx *sql.Tx) error {
_, err := tx.Exec(`
alter table notification_preferences add column user_mentioned integer not null default 1;
`)
···
})
// remove the foreign key constraints from stars.
-
runMigration(conn, logger, "generalize-stars-subject", func(tx *sql.Tx) error {
+
orm.RunMigration(conn, logger, "generalize-stars-subject", func(tx *sql.Tx) error {
_, err := tx.Exec(`
create table stars_new (
id integer primary key autoincrement,
···
}, nil
-
type migrationFn = func(*sql.Tx) error
-
-
func runMigration(c *sql.Conn, logger *slog.Logger, name string, migrationFn migrationFn) error {
-
logger = logger.With("migration", name)
-
-
tx, err := c.BeginTx(context.Background(), nil)
-
if err != nil {
-
return err
-
}
-
defer tx.Rollback()
-
-
var exists bool
-
err = tx.QueryRow("select exists (select 1 from migrations where name = ?)", name).Scan(&exists)
-
if err != nil {
-
return err
-
}
-
-
if !exists {
-
// run migration
-
err = migrationFn(tx)
-
if err != nil {
-
logger.Error("failed to run migration", "err", err)
-
return err
-
}
-
-
// mark migration as complete
-
_, err = tx.Exec("insert into migrations (name) values (?)", name)
-
if err != nil {
-
logger.Error("failed to mark migration as complete", "err", err)
-
return err
-
}
-
-
// commit the transaction
-
if err := tx.Commit(); err != nil {
-
return err
-
}
-
-
logger.Info("migration applied successfully")
-
} else {
-
logger.Warn("skipped migration, already applied")
-
}
-
-
return nil
-
}
-
func (d *DB) Close() error {
return d.DB.Close()
-
-
type filter struct {
-
key string
-
arg any
-
cmp string
-
}
-
-
func newFilter(key, cmp string, arg any) filter {
-
return filter{
-
key: key,
-
arg: arg,
-
cmp: cmp,
-
}
-
}
-
-
func FilterEq(key string, arg any) filter { return newFilter(key, "=", arg) }
-
func FilterNotEq(key string, arg any) filter { return newFilter(key, "<>", arg) }
-
func FilterGte(key string, arg any) filter { return newFilter(key, ">=", arg) }
-
func FilterLte(key string, arg any) filter { return newFilter(key, "<=", arg) }
-
func FilterIs(key string, arg any) filter { return newFilter(key, "is", arg) }
-
func FilterIsNot(key string, arg any) filter { return newFilter(key, "is not", arg) }
-
func FilterIn(key string, arg any) filter { return newFilter(key, "in", arg) }
-
func FilterLike(key string, arg any) filter { return newFilter(key, "like", arg) }
-
func FilterNotLike(key string, arg any) filter { return newFilter(key, "not like", arg) }
-
func FilterContains(key string, arg any) filter {
-
return newFilter(key, "like", fmt.Sprintf("%%%v%%", arg))
-
}
-
-
func (f filter) Condition() string {
-
rv := reflect.ValueOf(f.arg)
-
kind := rv.Kind()
-
-
// if we have `FilterIn(k, [1, 2, 3])`, compile it down to `k in (?, ?, ?)`
-
if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array {
-
if rv.Len() == 0 {
-
// always false
-
return "1 = 0"
-
}
-
-
placeholders := make([]string, rv.Len())
-
for i := range placeholders {
-
placeholders[i] = "?"
-
}
-
-
return fmt.Sprintf("%s %s (%s)", f.key, f.cmp, strings.Join(placeholders, ", "))
-
}
-
-
return fmt.Sprintf("%s %s ?", f.key, f.cmp)
-
}
-
-
func (f filter) Arg() []any {
-
rv := reflect.ValueOf(f.arg)
-
kind := rv.Kind()
-
if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array {
-
if rv.Len() == 0 {
-
return nil
-
}
-
-
out := make([]any, rv.Len())
-
for i := range rv.Len() {
-
out[i] = rv.Index(i).Interface()
-
}
-
return out
-
}
-
-
return []any{f.arg}
-
}
+6 -3
appview/db/follow.go
···
"time"
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
)
func AddFollow(e Execer, follow *models.Follow) error {
···
return result, nil
}
-
func GetFollows(e Execer, limit int, filters ...filter) ([]models.Follow, error) {
+
func GetFollows(e Execer, limit int, filters ...orm.Filter) ([]models.Follow, error) {
var follows []models.Follow
var conditions []string
···
if err != nil {
return nil, err
}
+
defer rows.Close()
+
for rows.Next() {
var follow models.Follow
var followedAt string
···
}
func GetFollowers(e Execer, did string) ([]models.Follow, error) {
-
return GetFollows(e, 0, FilterEq("subject_did", did))
+
return GetFollows(e, 0, orm.FilterEq("subject_did", did))
}
func GetFollowing(e Execer, did string) ([]models.Follow, error) {
-
return GetFollows(e, 0, FilterEq("user_did", did))
+
return GetFollows(e, 0, orm.FilterEq("user_did", did))
}
func getFollowStatuses(e Execer, userDid string, subjectDids []string) (map[string]models.FollowStatus, error) {
+93 -36
appview/db/issues.go
···
"time"
"github.com/bluesky-social/indigo/atproto/syntax"
+
"tangled.org/core/api/tangled"
"tangled.org/core/appview/models"
"tangled.org/core/appview/pagination"
+
"tangled.org/core/orm"
)
func PutIssue(tx *sql.Tx, issue *models.Issue) error {
···
issues, err := GetIssues(
tx,
-
FilterEq("did", issue.Did),
-
FilterEq("rkey", issue.Rkey),
+
orm.FilterEq("did", issue.Did),
+
orm.FilterEq("rkey", issue.Rkey),
)
switch {
case err != nil:
···
returning rowid, issue_id
`, issue.RepoAt, issue.Did, issue.Rkey, newIssueId, issue.Title, issue.Body)
-
return row.Scan(&issue.Id, &issue.IssueId)
+
err = row.Scan(&issue.Id, &issue.IssueId)
+
if err != nil {
+
return fmt.Errorf("scan row: %w", err)
+
}
+
+
if err := putReferences(tx, issue.AtUri(), issue.References); err != nil {
+
return fmt.Errorf("put reference_links: %w", err)
+
}
+
return nil
}
func updateIssue(tx *sql.Tx, issue *models.Issue) error {
···
set title = ?, body = ?, edited = ?
where did = ? and rkey = ?
`, issue.Title, issue.Body, time.Now().Format(time.RFC3339), issue.Did, issue.Rkey)
-
return err
+
if err != nil {
+
return err
+
}
+
+
if err := putReferences(tx, issue.AtUri(), issue.References); err != nil {
+
return fmt.Errorf("put reference_links: %w", err)
+
}
+
return nil
}
-
func GetIssuesPaginated(e Execer, page pagination.Page, filters ...filter) ([]models.Issue, error) {
+
func GetIssuesPaginated(e Execer, page pagination.Page, filters ...orm.Filter) ([]models.Issue, error) {
issueMap := make(map[string]*models.Issue) // at-uri -> issue
var conditions []string
···
whereClause = " where " + strings.Join(conditions, " and ")
}
-
pLower := FilterGte("row_num", page.Offset+1)
-
pUpper := FilterLte("row_num", page.Offset+page.Limit)
+
pLower := orm.FilterGte("row_num", page.Offset+1)
+
pUpper := orm.FilterLte("row_num", page.Offset+page.Limit)
pageClause := ""
if page.Limit > 0 {
···
repoAts = append(repoAts, string(issue.RepoAt))
}
-
repos, err := GetRepos(e, 0, FilterIn("at_uri", repoAts))
+
repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", repoAts))
if err != nil {
return nil, fmt.Errorf("failed to build repo mappings: %w", err)
}
···
// collect comments
issueAts := slices.Collect(maps.Keys(issueMap))
-
comments, err := GetIssueComments(e, FilterIn("issue_at", issueAts))
+
comments, err := GetIssueComments(e, orm.FilterIn("issue_at", issueAts))
if err != nil {
return nil, fmt.Errorf("failed to query comments: %w", err)
}
···
}
// collect allLabels for each issue
-
allLabels, err := GetLabels(e, FilterIn("subject", issueAts))
+
allLabels, err := GetLabels(e, orm.FilterIn("subject", issueAts))
if err != nil {
return nil, fmt.Errorf("failed to query labels: %w", err)
}
···
}
}
+
// collect references for each issue
+
allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", issueAts))
+
if err != nil {
+
return nil, fmt.Errorf("failed to query reference_links: %w", err)
+
}
+
for issueAt, references := range allReferencs {
+
if issue, ok := issueMap[issueAt.String()]; ok {
+
issue.References = references
+
}
+
}
+
var issues []models.Issue
for _, i := range issueMap {
issues = append(issues, *i)
···
issues, err := GetIssuesPaginated(
e,
pagination.Page{},
-
FilterEq("repo_at", repoAt),
-
FilterEq("issue_id", issueId),
+
orm.FilterEq("repo_at", repoAt),
+
orm.FilterEq("issue_id", issueId),
)
if err != nil {
return nil, err
···
return &issues[0], nil
}
-
func GetIssues(e Execer, filters ...filter) ([]models.Issue, error) {
+
func GetIssues(e Execer, filters ...orm.Filter) ([]models.Issue, error) {
return GetIssuesPaginated(e, pagination.Page{}, filters...)
}
···
func GetIssueIDs(e Execer, opts models.IssueSearchOptions) ([]int64, error) {
var ids []int64
-
var filters []filter
+
var filters []orm.Filter
openValue := 0
if opts.IsOpen {
openValue = 1
}
-
filters = append(filters, FilterEq("open", openValue))
+
filters = append(filters, orm.FilterEq("open", openValue))
if opts.RepoAt != "" {
-
filters = append(filters, FilterEq("repo_at", opts.RepoAt))
+
filters = append(filters, orm.FilterEq("repo_at", opts.RepoAt))
}
var conditions []string
···
return ids, nil
}
-
func AddIssueComment(e Execer, c models.IssueComment) (int64, error) {
-
result, err := e.Exec(
+
func AddIssueComment(tx *sql.Tx, c models.IssueComment) (int64, error) {
+
result, err := tx.Exec(
`insert into issue_comments (
did,
rkey,
···
return 0, err
}
+
if err := putReferences(tx, c.AtUri(), c.References); err != nil {
+
return 0, fmt.Errorf("put reference_links: %w", err)
+
}
+
return id, nil
}
-
func DeleteIssueComments(e Execer, filters ...filter) error {
+
func DeleteIssueComments(e Execer, filters ...orm.Filter) error {
var conditions []string
var args []any
for _, filter := range filters {
···
return err
}
-
func GetIssueComments(e Execer, filters ...filter) ([]models.IssueComment, error) {
-
var comments []models.IssueComment
+
func GetIssueComments(e Execer, filters ...orm.Filter) ([]models.IssueComment, error) {
+
commentMap := make(map[string]*models.IssueComment)
var conditions []string
var args []any
···
if err != nil {
return nil, err
}
+
defer rows.Close()
for rows.Next() {
var comment models.IssueComment
···
comment.ReplyTo = &replyTo.V
}
-
comments = append(comments, comment)
+
atUri := comment.AtUri().String()
+
commentMap[atUri] = &comment
}
if err = rows.Err(); err != nil {
return nil, err
}
+
// collect references for each comments
+
commentAts := slices.Collect(maps.Keys(commentMap))
+
allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", commentAts))
+
if err != nil {
+
return nil, fmt.Errorf("failed to query reference_links: %w", err)
+
}
+
for commentAt, references := range allReferencs {
+
if comment, ok := commentMap[commentAt.String()]; ok {
+
comment.References = references
+
}
+
}
+
+
var comments []models.IssueComment
+
for _, c := range commentMap {
+
comments = append(comments, *c)
+
}
+
+
sort.Slice(comments, func(i, j int) bool {
+
return comments[i].Created.After(comments[j].Created)
+
})
+
return comments, nil
}
-
func DeleteIssues(e Execer, filters ...filter) error {
-
var conditions []string
-
var args []any
-
for _, filter := range filters {
-
conditions = append(conditions, filter.Condition())
-
args = append(args, filter.Arg()...)
+
func DeleteIssues(tx *sql.Tx, did, rkey string) error {
+
_, err := tx.Exec(
+
`delete from issues
+
where did = ? and rkey = ?`,
+
did,
+
rkey,
+
)
+
if err != nil {
+
return fmt.Errorf("delete issue: %w", err)
}
-
whereClause := ""
-
if conditions != nil {
-
whereClause = " where " + strings.Join(conditions, " and ")
+
uri := syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", did, tangled.RepoIssueNSID, rkey))
+
err = deleteReferences(tx, uri)
+
if err != nil {
+
return fmt.Errorf("delete reference_links: %w", err)
}
-
query := fmt.Sprintf(`delete from issues %s`, whereClause)
-
_, err := e.Exec(query, args...)
-
return err
+
return nil
}
-
func CloseIssues(e Execer, filters ...filter) error {
+
func CloseIssues(e Execer, filters ...orm.Filter) error {
var conditions []string
var args []any
for _, filter := range filters {
···
return err
}
-
func ReopenIssues(e Execer, filters ...filter) error {
+
func ReopenIssues(e Execer, filters ...orm.Filter) error {
var conditions []string
var args []any
for _, filter := range filters {
+8 -7
appview/db/label.go
···
"github.com/bluesky-social/indigo/atproto/syntax"
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
)
// no updating type for now
···
return id, nil
}
-
func DeleteLabelDefinition(e Execer, filters ...filter) error {
+
func DeleteLabelDefinition(e Execer, filters ...orm.Filter) error {
var conditions []string
var args []any
for _, filter := range filters {
···
return err
}
-
func GetLabelDefinitions(e Execer, filters ...filter) ([]models.LabelDefinition, error) {
+
func GetLabelDefinitions(e Execer, filters ...orm.Filter) ([]models.LabelDefinition, error) {
var labelDefinitions []models.LabelDefinition
var conditions []string
var args []any
···
}
// helper to get exactly one label def
-
func GetLabelDefinition(e Execer, filters ...filter) (*models.LabelDefinition, error) {
+
func GetLabelDefinition(e Execer, filters ...orm.Filter) (*models.LabelDefinition, error) {
labels, err := GetLabelDefinitions(e, filters...)
if err != nil {
return nil, err
···
return id, nil
}
-
func GetLabelOps(e Execer, filters ...filter) ([]models.LabelOp, error) {
+
func GetLabelOps(e Execer, filters ...orm.Filter) ([]models.LabelOp, error) {
var labelOps []models.LabelOp
var conditions []string
var args []any
···
}
// get labels for a given list of subject URIs
-
func GetLabels(e Execer, filters ...filter) (map[syntax.ATURI]models.LabelState, error) {
+
func GetLabels(e Execer, filters ...orm.Filter) (map[syntax.ATURI]models.LabelState, error) {
ops, err := GetLabelOps(e, filters...)
if err != nil {
return nil, err
···
}
labelAts := slices.Collect(maps.Keys(labelAtSet))
-
actx, err := NewLabelApplicationCtx(e, FilterIn("at_uri", labelAts))
+
actx, err := NewLabelApplicationCtx(e, orm.FilterIn("at_uri", labelAts))
if err != nil {
return nil, err
}
···
return results, nil
}
-
func NewLabelApplicationCtx(e Execer, filters ...filter) (*models.LabelApplicationCtx, error) {
+
func NewLabelApplicationCtx(e Execer, filters ...orm.Filter) (*models.LabelApplicationCtx, error) {
labels, err := GetLabelDefinitions(e, filters...)
if err != nil {
return nil, err
+6 -5
appview/db/language.go
···
"github.com/bluesky-social/indigo/atproto/syntax"
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
)
-
func GetRepoLanguages(e Execer, filters ...filter) ([]models.RepoLanguage, error) {
+
func GetRepoLanguages(e Execer, filters ...orm.Filter) ([]models.RepoLanguage, error) {
var conditions []string
var args []any
for _, filter := range filters {
···
whereClause,
)
rows, err := e.Query(query, args...)
-
if err != nil {
return nil, fmt.Errorf("failed to execute query: %w ", err)
}
+
defer rows.Close()
var langs []models.RepoLanguage
for rows.Next() {
···
return nil
}
-
func DeleteRepoLanguages(e Execer, filters ...filter) error {
+
func DeleteRepoLanguages(e Execer, filters ...orm.Filter) error {
var conditions []string
var args []any
for _, filter := range filters {
···
func UpdateRepoLanguages(tx *sql.Tx, repoAt syntax.ATURI, ref string, langs []models.RepoLanguage) error {
err := DeleteRepoLanguages(
tx,
-
FilterEq("repo_at", repoAt),
-
FilterEq("ref", ref),
+
orm.FilterEq("repo_at", repoAt),
+
orm.FilterEq("ref", ref),
)
if err != nil {
return fmt.Errorf("failed to delete existing languages: %w", err)
+14 -13
appview/db/notifications.go
···
"github.com/bluesky-social/indigo/atproto/syntax"
"tangled.org/core/appview/models"
"tangled.org/core/appview/pagination"
+
"tangled.org/core/orm"
)
func CreateNotification(e Execer, notification *models.Notification) error {
···
}
// GetNotificationsPaginated retrieves notifications with filters and pagination
-
func GetNotificationsPaginated(e Execer, page pagination.Page, filters ...filter) ([]*models.Notification, error) {
+
func GetNotificationsPaginated(e Execer, page pagination.Page, filters ...orm.Filter) ([]*models.Notification, error) {
var conditions []string
var args []any
···
}
// GetNotificationsWithEntities retrieves notifications with their related entities
-
func GetNotificationsWithEntities(e Execer, page pagination.Page, filters ...filter) ([]*models.NotificationWithEntity, error) {
+
func GetNotificationsWithEntities(e Execer, page pagination.Page, filters ...orm.Filter) ([]*models.NotificationWithEntity, error) {
var conditions []string
var args []any
···
}
// GetNotifications retrieves notifications with filters
-
func GetNotifications(e Execer, filters ...filter) ([]*models.Notification, error) {
+
func GetNotifications(e Execer, filters ...orm.Filter) ([]*models.Notification, error) {
return GetNotificationsPaginated(e, pagination.FirstPage(), filters...)
}
-
func CountNotifications(e Execer, filters ...filter) (int64, error) {
+
func CountNotifications(e Execer, filters ...orm.Filter) (int64, error) {
var conditions []string
var args []any
for _, filter := range filters {
···
}
func MarkNotificationRead(e Execer, notificationID int64, userDID string) error {
-
idFilter := FilterEq("id", notificationID)
-
recipientFilter := FilterEq("recipient_did", userDID)
+
idFilter := orm.FilterEq("id", notificationID)
+
recipientFilter := orm.FilterEq("recipient_did", userDID)
query := fmt.Sprintf(`
UPDATE notifications
···
}
func MarkAllNotificationsRead(e Execer, userDID string) error {
-
recipientFilter := FilterEq("recipient_did", userDID)
-
readFilter := FilterEq("read", 0)
+
recipientFilter := orm.FilterEq("recipient_did", userDID)
+
readFilter := orm.FilterEq("read", 0)
query := fmt.Sprintf(`
UPDATE notifications
···
}
func DeleteNotification(e Execer, notificationID int64, userDID string) error {
-
idFilter := FilterEq("id", notificationID)
-
recipientFilter := FilterEq("recipient_did", userDID)
+
idFilter := orm.FilterEq("id", notificationID)
+
recipientFilter := orm.FilterEq("recipient_did", userDID)
query := fmt.Sprintf(`
DELETE FROM notifications
···
}
func GetNotificationPreference(e Execer, userDid string) (*models.NotificationPreferences, error) {
-
prefs, err := GetNotificationPreferences(e, FilterEq("user_did", userDid))
+
prefs, err := GetNotificationPreferences(e, orm.FilterEq("user_did", userDid))
if err != nil {
return nil, err
}
···
return p, nil
}
-
func GetNotificationPreferences(e Execer, filters ...filter) (map[syntax.DID]*models.NotificationPreferences, error) {
+
func GetNotificationPreferences(e Execer, filters ...orm.Filter) (map[syntax.DID]*models.NotificationPreferences, error) {
prefsMap := make(map[syntax.DID]*models.NotificationPreferences)
var conditions []string
···
func (d *DB) ClearOldNotifications(ctx context.Context, olderThan time.Duration) error {
cutoff := time.Now().Add(-olderThan)
-
createdFilter := FilterLte("created", cutoff)
+
createdFilter := orm.FilterLte("created", cutoff)
query := fmt.Sprintf(`
DELETE FROM notifications
+6 -5
appview/db/pipeline.go
···
"time"
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
)
-
func GetPipelines(e Execer, filters ...filter) ([]models.Pipeline, error) {
+
func GetPipelines(e Execer, filters ...orm.Filter) ([]models.Pipeline, error) {
var pipelines []models.Pipeline
var conditions []string
···
// this is a mega query, but the most useful one:
// get N pipelines, for each one get the latest status of its N workflows
-
func GetPipelineStatuses(e Execer, limit int, filters ...filter) ([]models.Pipeline, error) {
+
func GetPipelineStatuses(e Execer, limit int, filters ...orm.Filter) ([]models.Pipeline, error) {
var conditions []string
var args []any
for _, filter := range filters {
-
filter.key = "p." + filter.key // the table is aliased in the query to `p`
+
filter.Key = "p." + filter.Key // the table is aliased in the query to `p`
conditions = append(conditions, filter.Condition())
args = append(args, filter.Arg()...)
}
···
conditions = nil
args = nil
for _, p := range pipelines {
-
knotFilter := FilterEq("pipeline_knot", p.Knot)
-
rkeyFilter := FilterEq("pipeline_rkey", p.Rkey)
+
knotFilter := orm.FilterEq("pipeline_knot", p.Knot)
+
rkeyFilter := orm.FilterEq("pipeline_rkey", p.Rkey)
conditions = append(conditions, fmt.Sprintf("(%s and %s)", knotFilter.Condition(), rkeyFilter.Condition()))
args = append(args, p.Knot)
args = append(args, p.Rkey)
+11 -5
appview/db/profile.go
···
"github.com/bluesky-social/indigo/atproto/syntax"
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
)
const TimeframeMonths = 7
···
issues, err := GetIssues(
e,
-
FilterEq("did", forDid),
-
FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)),
+
orm.FilterEq("did", forDid),
+
orm.FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)),
)
if err != nil {
return nil, fmt.Errorf("error getting issues by owner did: %w", err)
···
*items = append(*items, &issue)
}
-
repos, err := GetRepos(e, 0, FilterEq("did", forDid))
+
repos, err := GetRepos(e, 0, orm.FilterEq("did", forDid))
if err != nil {
return nil, fmt.Errorf("error getting all repos by did: %w", err)
}
···
return tx.Commit()
}
-
func GetProfiles(e Execer, filters ...filter) (map[string]*models.Profile, error) {
+
func GetProfiles(e Execer, filters ...orm.Filter) (map[string]*models.Profile, error) {
var conditions []string
var args []any
for _, filter := range filters {
···
if err != nil {
return nil, err
}
+
defer rows.Close()
profileMap := make(map[string]*models.Profile)
for rows.Next() {
···
if err != nil {
return nil, err
}
+
defer rows.Close()
+
idxs := make(map[string]int)
for did := range profileMap {
idxs[did] = 0
···
if err != nil {
return nil, err
}
+
defer rows.Close()
+
idxs = make(map[string]int)
for did := range profileMap {
idxs[did] = 0
···
}
// ensure all pinned repos are either own repos or collaborating repos
-
repos, err := GetRepos(e, 0, FilterEq("did", profile.Did))
+
repos, err := GetRepos(e, 0, orm.FilterEq("did", profile.Did))
if err != nil {
log.Printf("getting repos for %s: %s", profile.Did, err)
}
+69 -24
appview/db/pulls.go
···
"github.com/bluesky-social/indigo/atproto/syntax"
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
)
func NewPull(tx *sql.Tx, pull *models.Pull) error {
···
insert into pull_submissions (pull_at, round_number, patch, combined, source_rev)
values (?, ?, ?, ?, ?)
`, pull.AtUri(), 0, pull.Submissions[0].Patch, pull.Submissions[0].Combined, pull.Submissions[0].SourceRev)
-
return err
+
if err != nil {
+
return err
+
}
+
+
if err := putReferences(tx, pull.AtUri(), pull.References); err != nil {
+
return fmt.Errorf("put reference_links: %w", err)
+
}
+
+
return nil
}
func GetPullAt(e Execer, repoAt syntax.ATURI, pullId int) (syntax.ATURI, error) {
···
return pullId - 1, err
}
-
func GetPullsWithLimit(e Execer, limit int, filters ...filter) ([]*models.Pull, error) {
+
func GetPullsWithLimit(e Execer, limit int, filters ...orm.Filter) ([]*models.Pull, error) {
pulls := make(map[syntax.ATURI]*models.Pull)
var conditions []string
···
for _, p := range pulls {
pullAts = append(pullAts, p.AtUri())
}
-
submissionsMap, err := GetPullSubmissions(e, FilterIn("pull_at", pullAts))
+
submissionsMap, err := GetPullSubmissions(e, orm.FilterIn("pull_at", pullAts))
if err != nil {
return nil, fmt.Errorf("failed to get submissions: %w", err)
}
···
}
// collect allLabels for each issue
-
allLabels, err := GetLabels(e, FilterIn("subject", pullAts))
+
allLabels, err := GetLabels(e, orm.FilterIn("subject", pullAts))
if err != nil {
return nil, fmt.Errorf("failed to query labels: %w", err)
}
···
sourceAts = append(sourceAts, *p.PullSource.RepoAt)
}
}
-
sourceRepos, err := GetRepos(e, 0, FilterIn("at_uri", sourceAts))
+
sourceRepos, err := GetRepos(e, 0, orm.FilterIn("at_uri", sourceAts))
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return nil, fmt.Errorf("failed to get source repos: %w", err)
}
···
}
}
+
allReferences, err := GetReferencesAll(e, orm.FilterIn("from_at", pullAts))
+
if err != nil {
+
return nil, fmt.Errorf("failed to query reference_links: %w", err)
+
}
+
for pullAt, references := range allReferences {
+
if pull, ok := pulls[pullAt]; ok {
+
pull.References = references
+
}
+
}
+
orderedByPullId := []*models.Pull{}
for _, p := range pulls {
orderedByPullId = append(orderedByPullId, p)
···
return orderedByPullId, nil
}
-
func GetPulls(e Execer, filters ...filter) ([]*models.Pull, error) {
+
func GetPulls(e Execer, filters ...orm.Filter) ([]*models.Pull, error) {
return GetPullsWithLimit(e, 0, filters...)
}
func GetPullIDs(e Execer, opts models.PullSearchOptions) ([]int64, error) {
var ids []int64
-
var filters []filter
-
filters = append(filters, FilterEq("state", opts.State))
+
var filters []orm.Filter
+
filters = append(filters, orm.FilterEq("state", opts.State))
if opts.RepoAt != "" {
-
filters = append(filters, FilterEq("repo_at", opts.RepoAt))
+
filters = append(filters, orm.FilterEq("repo_at", opts.RepoAt))
}
var conditions []string
···
}
func GetPull(e Execer, repoAt syntax.ATURI, pullId int) (*models.Pull, error) {
-
pulls, err := GetPullsWithLimit(e, 1, FilterEq("repo_at", repoAt), FilterEq("pull_id", pullId))
+
pulls, err := GetPullsWithLimit(e, 1, orm.FilterEq("repo_at", repoAt), orm.FilterEq("pull_id", pullId))
if err != nil {
return nil, err
}
···
}
// mapping from pull -> pull submissions
-
func GetPullSubmissions(e Execer, filters ...filter) (map[syntax.ATURI][]*models.PullSubmission, error) {
+
func GetPullSubmissions(e Execer, filters ...orm.Filter) (map[syntax.ATURI][]*models.PullSubmission, error) {
var conditions []string
var args []any
for _, filter := range filters {
···
// Get comments for all submissions using GetPullComments
submissionIds := slices.Collect(maps.Keys(submissionMap))
-
comments, err := GetPullComments(e, FilterIn("submission_id", submissionIds))
+
comments, err := GetPullComments(e, orm.FilterIn("submission_id", submissionIds))
if err != nil {
-
return nil, err
+
return nil, fmt.Errorf("failed to get pull comments: %w", err)
}
for _, comment := range comments {
if submission, ok := submissionMap[comment.SubmissionId]; ok {
···
return m, nil
}
-
func GetPullComments(e Execer, filters ...filter) ([]models.PullComment, error) {
+
func GetPullComments(e Execer, filters ...orm.Filter) ([]models.PullComment, error) {
var conditions []string
var args []any
for _, filter := range filters {
···
}
defer rows.Close()
-
var comments []models.PullComment
+
commentMap := make(map[string]*models.PullComment)
for rows.Next() {
var comment models.PullComment
var createdAt string
···
comment.Created = t
}
-
comments = append(comments, comment)
+
atUri := comment.AtUri().String()
+
commentMap[atUri] = &comment
}
if err := rows.Err(); err != nil {
return nil, err
}
+
// collect references for each comments
+
commentAts := slices.Collect(maps.Keys(commentMap))
+
allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", commentAts))
+
if err != nil {
+
return nil, fmt.Errorf("failed to query reference_links: %w", err)
+
}
+
for commentAt, references := range allReferencs {
+
if comment, ok := commentMap[commentAt.String()]; ok {
+
comment.References = references
+
}
+
}
+
+
var comments []models.PullComment
+
for _, c := range commentMap {
+
comments = append(comments, *c)
+
}
+
+
sort.Slice(comments, func(i, j int) bool {
+
return comments[i].Created.Before(comments[j].Created)
+
})
+
return comments, nil
}
···
return pulls, nil
}
-
func NewPullComment(e Execer, comment *models.PullComment) (int64, error) {
+
func NewPullComment(tx *sql.Tx, comment *models.PullComment) (int64, error) {
query := `insert into pull_comments (owner_did, repo_at, submission_id, comment_at, pull_id, body) values (?, ?, ?, ?, ?, ?)`
-
res, err := e.Exec(
+
res, err := tx.Exec(
query,
comment.OwnerDid,
comment.RepoAt,
···
i, err := res.LastInsertId()
if err != nil {
return 0, err
+
}
+
+
if err := putReferences(tx, comment.AtUri(), comment.References); err != nil {
+
return 0, fmt.Errorf("put reference_links: %w", err)
}
return i, nil
···
return err
}
-
func SetPullParentChangeId(e Execer, parentChangeId string, filters ...filter) error {
+
func SetPullParentChangeId(e Execer, parentChangeId string, filters ...orm.Filter) error {
var conditions []string
var args []any
···
// Only used when stacking to update contents in the event of a rebase (the interdiff should be empty).
// otherwise submissions are immutable
-
func UpdatePull(e Execer, newPatch, sourceRev string, filters ...filter) error {
+
func UpdatePull(e Execer, newPatch, sourceRev string, filters ...orm.Filter) error {
var conditions []string
var args []any
···
func GetStack(e Execer, stackId string) (models.Stack, error) {
unorderedPulls, err := GetPulls(
e,
-
FilterEq("stack_id", stackId),
-
FilterNotEq("state", models.PullDeleted),
+
orm.FilterEq("stack_id", stackId),
+
orm.FilterNotEq("state", models.PullDeleted),
)
if err != nil {
return nil, err
···
func GetAbandonedPulls(e Execer, stackId string) ([]*models.Pull, error) {
pulls, err := GetPulls(
e,
-
FilterEq("stack_id", stackId),
-
FilterEq("state", models.PullDeleted),
+
orm.FilterEq("stack_id", stackId),
+
orm.FilterEq("state", models.PullDeleted),
)
if err != nil {
return nil, err
+2 -1
appview/db/punchcard.go
···
"time"
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
)
// this adds to the existing count
···
return err
}
-
func MakePunchcard(e Execer, filters ...filter) (*models.Punchcard, error) {
+
func MakePunchcard(e Execer, filters ...orm.Filter) (*models.Punchcard, error) {
punchcard := &models.Punchcard{}
now := time.Now()
startOfYear := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, time.UTC)
+463
appview/db/reference.go
···
+
package db
+
+
import (
+
"database/sql"
+
"fmt"
+
"strings"
+
+
"github.com/bluesky-social/indigo/atproto/syntax"
+
"tangled.org/core/api/tangled"
+
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
+
)
+
+
// ValidateReferenceLinks resolves refLinks to Issue/PR/IssueComment/PullComment ATURIs.
+
// It will ignore missing refLinks.
+
func ValidateReferenceLinks(e Execer, refLinks []models.ReferenceLink) ([]syntax.ATURI, error) {
+
var (
+
issueRefs []models.ReferenceLink
+
pullRefs []models.ReferenceLink
+
)
+
for _, ref := range refLinks {
+
switch ref.Kind {
+
case models.RefKindIssue:
+
issueRefs = append(issueRefs, ref)
+
case models.RefKindPull:
+
pullRefs = append(pullRefs, ref)
+
}
+
}
+
issueUris, err := findIssueReferences(e, issueRefs)
+
if err != nil {
+
return nil, fmt.Errorf("find issue references: %w", err)
+
}
+
pullUris, err := findPullReferences(e, pullRefs)
+
if err != nil {
+
return nil, fmt.Errorf("find pull references: %w", err)
+
}
+
+
return append(issueUris, pullUris...), nil
+
}
+
+
func findIssueReferences(e Execer, refLinks []models.ReferenceLink) ([]syntax.ATURI, error) {
+
if len(refLinks) == 0 {
+
return nil, nil
+
}
+
vals := make([]string, len(refLinks))
+
args := make([]any, 0, len(refLinks)*4)
+
for i, ref := range refLinks {
+
vals[i] = "(?, ?, ?, ?)"
+
args = append(args, ref.Handle, ref.Repo, ref.SubjectId, ref.CommentId)
+
}
+
query := fmt.Sprintf(
+
`with input(owner_did, name, issue_id, comment_id) as (
+
values %s
+
)
+
select
+
i.did, i.rkey,
+
c.did, c.rkey
+
from input inp
+
join repos r
+
on r.did = inp.owner_did
+
and r.name = inp.name
+
join issues i
+
on i.repo_at = r.at_uri
+
and i.issue_id = inp.issue_id
+
left join issue_comments c
+
on inp.comment_id is not null
+
and c.issue_at = i.at_uri
+
and c.id = inp.comment_id
+
`,
+
strings.Join(vals, ","),
+
)
+
rows, err := e.Query(query, args...)
+
if err != nil {
+
return nil, err
+
}
+
defer rows.Close()
+
+
var uris []syntax.ATURI
+
+
for rows.Next() {
+
// Scan rows
+
var issueOwner, issueRkey string
+
var commentOwner, commentRkey sql.NullString
+
var uri syntax.ATURI
+
if err := rows.Scan(&issueOwner, &issueRkey, &commentOwner, &commentRkey); err != nil {
+
return nil, err
+
}
+
if commentOwner.Valid && commentRkey.Valid {
+
uri = syntax.ATURI(fmt.Sprintf(
+
"at://%s/%s/%s",
+
commentOwner.String,
+
tangled.RepoIssueCommentNSID,
+
commentRkey.String,
+
))
+
} else {
+
uri = syntax.ATURI(fmt.Sprintf(
+
"at://%s/%s/%s",
+
issueOwner,
+
tangled.RepoIssueNSID,
+
issueRkey,
+
))
+
}
+
uris = append(uris, uri)
+
}
+
if err := rows.Err(); err != nil {
+
return nil, fmt.Errorf("iterate rows: %w", err)
+
}
+
+
return uris, nil
+
}
+
+
func findPullReferences(e Execer, refLinks []models.ReferenceLink) ([]syntax.ATURI, error) {
+
if len(refLinks) == 0 {
+
return nil, nil
+
}
+
vals := make([]string, len(refLinks))
+
args := make([]any, 0, len(refLinks)*4)
+
for i, ref := range refLinks {
+
vals[i] = "(?, ?, ?, ?)"
+
args = append(args, ref.Handle, ref.Repo, ref.SubjectId, ref.CommentId)
+
}
+
query := fmt.Sprintf(
+
`with input(owner_did, name, pull_id, comment_id) as (
+
values %s
+
)
+
select
+
p.owner_did, p.rkey,
+
c.comment_at
+
from input inp
+
join repos r
+
on r.did = inp.owner_did
+
and r.name = inp.name
+
join pulls p
+
on p.repo_at = r.at_uri
+
and p.pull_id = inp.pull_id
+
left join pull_comments c
+
on inp.comment_id is not null
+
and c.repo_at = r.at_uri and c.pull_id = p.pull_id
+
and c.id = inp.comment_id
+
`,
+
strings.Join(vals, ","),
+
)
+
rows, err := e.Query(query, args...)
+
if err != nil {
+
return nil, err
+
}
+
defer rows.Close()
+
+
var uris []syntax.ATURI
+
+
for rows.Next() {
+
// Scan rows
+
var pullOwner, pullRkey string
+
var commentUri sql.NullString
+
var uri syntax.ATURI
+
if err := rows.Scan(&pullOwner, &pullRkey, &commentUri); err != nil {
+
return nil, err
+
}
+
if commentUri.Valid {
+
// no-op
+
uri = syntax.ATURI(commentUri.String)
+
} else {
+
uri = syntax.ATURI(fmt.Sprintf(
+
"at://%s/%s/%s",
+
pullOwner,
+
tangled.RepoPullNSID,
+
pullRkey,
+
))
+
}
+
uris = append(uris, uri)
+
}
+
return uris, nil
+
}
+
+
func putReferences(tx *sql.Tx, fromAt syntax.ATURI, references []syntax.ATURI) error {
+
err := deleteReferences(tx, fromAt)
+
if err != nil {
+
return fmt.Errorf("delete old reference_links: %w", err)
+
}
+
if len(references) == 0 {
+
return nil
+
}
+
+
values := make([]string, 0, len(references))
+
args := make([]any, 0, len(references)*2)
+
for _, ref := range references {
+
values = append(values, "(?, ?)")
+
args = append(args, fromAt, ref)
+
}
+
_, err = tx.Exec(
+
fmt.Sprintf(
+
`insert into reference_links (from_at, to_at)
+
values %s`,
+
strings.Join(values, ","),
+
),
+
args...,
+
)
+
if err != nil {
+
return fmt.Errorf("insert new reference_links: %w", err)
+
}
+
return nil
+
}
+
+
func deleteReferences(tx *sql.Tx, fromAt syntax.ATURI) error {
+
_, err := tx.Exec(`delete from reference_links where from_at = ?`, fromAt)
+
return err
+
}
+
+
func GetReferencesAll(e Execer, filters ...orm.Filter) (map[syntax.ATURI][]syntax.ATURI, error) {
+
var (
+
conditions []string
+
args []any
+
)
+
for _, filter := range filters {
+
conditions = append(conditions, filter.Condition())
+
args = append(args, filter.Arg()...)
+
}
+
+
whereClause := ""
+
if conditions != nil {
+
whereClause = " where " + strings.Join(conditions, " and ")
+
}
+
+
rows, err := e.Query(
+
fmt.Sprintf(
+
`select from_at, to_at from reference_links %s`,
+
whereClause,
+
),
+
args...,
+
)
+
if err != nil {
+
return nil, fmt.Errorf("query reference_links: %w", err)
+
}
+
defer rows.Close()
+
+
result := make(map[syntax.ATURI][]syntax.ATURI)
+
+
for rows.Next() {
+
var from, to syntax.ATURI
+
if err := rows.Scan(&from, &to); err != nil {
+
return nil, fmt.Errorf("scan row: %w", err)
+
}
+
+
result[from] = append(result[from], to)
+
}
+
if err := rows.Err(); err != nil {
+
return nil, fmt.Errorf("iterate rows: %w", err)
+
}
+
+
return result, nil
+
}
+
+
func GetBacklinks(e Execer, target syntax.ATURI) ([]models.RichReferenceLink, error) {
+
rows, err := e.Query(
+
`select from_at from reference_links
+
where to_at = ?`,
+
target,
+
)
+
if err != nil {
+
return nil, fmt.Errorf("query backlinks: %w", err)
+
}
+
defer rows.Close()
+
+
var (
+
backlinks []models.RichReferenceLink
+
backlinksMap = make(map[string][]syntax.ATURI)
+
)
+
for rows.Next() {
+
var from syntax.ATURI
+
if err := rows.Scan(&from); err != nil {
+
return nil, fmt.Errorf("scan row: %w", err)
+
}
+
nsid := from.Collection().String()
+
backlinksMap[nsid] = append(backlinksMap[nsid], from)
+
}
+
if err := rows.Err(); err != nil {
+
return nil, fmt.Errorf("iterate rows: %w", err)
+
}
+
+
var ls []models.RichReferenceLink
+
ls, err = getIssueBacklinks(e, backlinksMap[tangled.RepoIssueNSID])
+
if err != nil {
+
return nil, fmt.Errorf("get issue backlinks: %w", err)
+
}
+
backlinks = append(backlinks, ls...)
+
ls, err = getIssueCommentBacklinks(e, backlinksMap[tangled.RepoIssueCommentNSID])
+
if err != nil {
+
return nil, fmt.Errorf("get issue_comment backlinks: %w", err)
+
}
+
backlinks = append(backlinks, ls...)
+
ls, err = getPullBacklinks(e, backlinksMap[tangled.RepoPullNSID])
+
if err != nil {
+
return nil, fmt.Errorf("get pull backlinks: %w", err)
+
}
+
backlinks = append(backlinks, ls...)
+
ls, err = getPullCommentBacklinks(e, backlinksMap[tangled.RepoPullCommentNSID])
+
if err != nil {
+
return nil, fmt.Errorf("get pull_comment backlinks: %w", err)
+
}
+
backlinks = append(backlinks, ls...)
+
+
return backlinks, nil
+
}
+
+
func getIssueBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) {
+
if len(aturis) == 0 {
+
return nil, nil
+
}
+
vals := make([]string, len(aturis))
+
args := make([]any, 0, len(aturis)*2)
+
for i, aturi := range aturis {
+
vals[i] = "(?, ?)"
+
did := aturi.Authority().String()
+
rkey := aturi.RecordKey().String()
+
args = append(args, did, rkey)
+
}
+
rows, err := e.Query(
+
fmt.Sprintf(
+
`select r.did, r.name, i.issue_id, i.title, i.open
+
from issues i
+
join repos r
+
on r.at_uri = i.repo_at
+
where (i.did, i.rkey) in (%s)`,
+
strings.Join(vals, ","),
+
),
+
args...,
+
)
+
if err != nil {
+
return nil, err
+
}
+
defer rows.Close()
+
var refLinks []models.RichReferenceLink
+
for rows.Next() {
+
var l models.RichReferenceLink
+
l.Kind = models.RefKindIssue
+
if err := rows.Scan(&l.Handle, &l.Repo, &l.SubjectId, &l.Title, &l.State); err != nil {
+
return nil, err
+
}
+
refLinks = append(refLinks, l)
+
}
+
if err := rows.Err(); err != nil {
+
return nil, fmt.Errorf("iterate rows: %w", err)
+
}
+
return refLinks, nil
+
}
+
+
func getIssueCommentBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) {
+
if len(aturis) == 0 {
+
return nil, nil
+
}
+
filter := orm.FilterIn("c.at_uri", aturis)
+
rows, err := e.Query(
+
fmt.Sprintf(
+
`select r.did, r.name, i.issue_id, c.id, i.title, i.open
+
from issue_comments c
+
join issues i
+
on i.at_uri = c.issue_at
+
join repos r
+
on r.at_uri = i.repo_at
+
where %s`,
+
filter.Condition(),
+
),
+
filter.Arg()...,
+
)
+
if err != nil {
+
return nil, err
+
}
+
defer rows.Close()
+
var refLinks []models.RichReferenceLink
+
for rows.Next() {
+
var l models.RichReferenceLink
+
l.Kind = models.RefKindIssue
+
l.CommentId = new(int)
+
if err := rows.Scan(&l.Handle, &l.Repo, &l.SubjectId, l.CommentId, &l.Title, &l.State); err != nil {
+
return nil, err
+
}
+
refLinks = append(refLinks, l)
+
}
+
if err := rows.Err(); err != nil {
+
return nil, fmt.Errorf("iterate rows: %w", err)
+
}
+
return refLinks, nil
+
}
+
+
func getPullBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) {
+
if len(aturis) == 0 {
+
return nil, nil
+
}
+
vals := make([]string, len(aturis))
+
args := make([]any, 0, len(aturis)*2)
+
for i, aturi := range aturis {
+
vals[i] = "(?, ?)"
+
did := aturi.Authority().String()
+
rkey := aturi.RecordKey().String()
+
args = append(args, did, rkey)
+
}
+
rows, err := e.Query(
+
fmt.Sprintf(
+
`select r.did, r.name, p.pull_id, p.title, p.state
+
from pulls p
+
join repos r
+
on r.at_uri = p.repo_at
+
where (p.owner_did, p.rkey) in (%s)`,
+
strings.Join(vals, ","),
+
),
+
args...,
+
)
+
if err != nil {
+
return nil, err
+
}
+
defer rows.Close()
+
var refLinks []models.RichReferenceLink
+
for rows.Next() {
+
var l models.RichReferenceLink
+
l.Kind = models.RefKindPull
+
if err := rows.Scan(&l.Handle, &l.Repo, &l.SubjectId, &l.Title, &l.State); err != nil {
+
return nil, err
+
}
+
refLinks = append(refLinks, l)
+
}
+
if err := rows.Err(); err != nil {
+
return nil, fmt.Errorf("iterate rows: %w", err)
+
}
+
return refLinks, nil
+
}
+
+
func getPullCommentBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) {
+
if len(aturis) == 0 {
+
return nil, nil
+
}
+
filter := orm.FilterIn("c.comment_at", aturis)
+
rows, err := e.Query(
+
fmt.Sprintf(
+
`select r.did, r.name, p.pull_id, c.id, p.title, p.state
+
from repos r
+
join pulls p
+
on r.at_uri = p.repo_at
+
join pull_comments c
+
on r.at_uri = c.repo_at and p.pull_id = c.pull_id
+
where %s`,
+
filter.Condition(),
+
),
+
filter.Arg()...,
+
)
+
if err != nil {
+
return nil, err
+
}
+
defer rows.Close()
+
var refLinks []models.RichReferenceLink
+
for rows.Next() {
+
var l models.RichReferenceLink
+
l.Kind = models.RefKindPull
+
l.CommentId = new(int)
+
if err := rows.Scan(&l.Handle, &l.Repo, &l.SubjectId, l.CommentId, &l.Title, &l.State); err != nil {
+
return nil, err
+
}
+
refLinks = append(refLinks, l)
+
}
+
if err := rows.Err(); err != nil {
+
return nil, fmt.Errorf("iterate rows: %w", err)
+
}
+
return refLinks, nil
+
}
+5 -3
appview/db/registration.go
···
"time"
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
)
-
func GetRegistrations(e Execer, filters ...filter) ([]models.Registration, error) {
+
func GetRegistrations(e Execer, filters ...orm.Filter) ([]models.Registration, error) {
var registrations []models.Registration
var conditions []string
···
if err != nil {
return nil, err
}
+
defer rows.Close()
for rows.Next() {
var createdAt string
···
return registrations, nil
}
-
func MarkRegistered(e Execer, filters ...filter) error {
+
func MarkRegistered(e Execer, filters ...orm.Filter) error {
var conditions []string
var args []any
for _, filter := range filters {
···
return err
}
-
func DeleteKnot(e Execer, filters ...filter) error {
+
func DeleteKnot(e Execer, filters ...orm.Filter) error {
var conditions []string
var args []any
for _, filter := range filters {
+29 -34
appview/db/repos.go
···
"time"
"github.com/bluesky-social/indigo/atproto/syntax"
-
securejoin "github.com/cyphar/filepath-securejoin"
-
"tangled.org/core/api/tangled"
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
)
-
type Repo struct {
-
Id int64
-
Did string
-
Name string
-
Knot string
-
Rkey string
-
Created time.Time
-
Description string
-
Spindle string
-
-
// optionally, populate this when querying for reverse mappings
-
RepoStats *models.RepoStats
-
-
// optional
-
Source string
-
}
-
-
func (r Repo) RepoAt() syntax.ATURI {
-
return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", r.Did, tangled.RepoNSID, r.Rkey))
-
}
-
-
func (r Repo) DidSlashRepo() string {
-
p, _ := securejoin.SecureJoin(r.Did, r.Name)
-
return p
-
}
-
-
func GetRepos(e Execer, limit int, filters ...filter) ([]models.Repo, error) {
+
func GetRepos(e Execer, limit int, filters ...orm.Filter) ([]models.Repo, error) {
repoMap := make(map[syntax.ATURI]*models.Repo)
var conditions []string
···
limitClause,
)
rows, err := e.Query(repoQuery, args...)
-
if err != nil {
return nil, fmt.Errorf("failed to execute repo query: %w ", err)
}
+
defer rows.Close()
for rows.Next() {
var repo models.Repo
···
if err != nil {
return nil, fmt.Errorf("failed to execute labels query: %w ", err)
}
+
defer rows.Close()
+
for rows.Next() {
var repoat, labelat string
if err := rows.Scan(&repoat, &labelat); err != nil {
···
from repo_languages
where repo_at in (%s)
and is_default_ref = 1
+
and language <> ''
)
where rn = 1
`,
···
if err != nil {
return nil, fmt.Errorf("failed to execute lang query: %w ", err)
}
+
defer rows.Close()
+
for rows.Next() {
var repoat, lang string
if err := rows.Scan(&repoat, &lang); err != nil {
···
if err != nil {
return nil, fmt.Errorf("failed to execute star-count query: %w ", err)
}
+
defer rows.Close()
+
for rows.Next() {
var repoat string
var count int
···
if err != nil {
return nil, fmt.Errorf("failed to execute issue-count query: %w ", err)
}
+
defer rows.Close()
+
for rows.Next() {
var repoat string
var open, closed int
···
if err != nil {
return nil, fmt.Errorf("failed to execute pulls-count query: %w ", err)
}
+
defer rows.Close()
+
for rows.Next() {
var repoat string
var open, merged, closed, deleted int
···
}
// helper to get exactly one repo
-
func GetRepo(e Execer, filters ...filter) (*models.Repo, error) {
+
func GetRepo(e Execer, filters ...orm.Filter) (*models.Repo, error) {
repos, err := GetRepos(e, 0, filters...)
if err != nil {
return nil, err
···
return &repos[0], nil
}
-
func CountRepos(e Execer, filters ...filter) (int64, error) {
+
func CountRepos(e Execer, filters ...orm.Filter) (int64, error) {
var conditions []string
var args []any
for _, filter := range filters {
···
return nullableSource.String, nil
}
+
func GetRepoSourceRepo(e Execer, repoAt syntax.ATURI) (*models.Repo, error) {
+
source, err := GetRepoSource(e, repoAt)
+
if source == "" || errors.Is(err, sql.ErrNoRows) {
+
return nil, nil
+
}
+
if err != nil {
+
return nil, err
+
}
+
return GetRepoByAtUri(e, source)
+
}
+
func GetForksByDid(e Execer, did string) ([]models.Repo, error) {
var repos []models.Repo
···
return err
}
-
func UnsubscribeLabel(e Execer, filters ...filter) error {
+
func UnsubscribeLabel(e Execer, filters ...orm.Filter) error {
var conditions []string
var args []any
for _, filter := range filters {
···
return err
}
-
func GetRepoLabels(e Execer, filters ...filter) ([]models.RepoLabel, error) {
+
func GetRepoLabels(e Execer, filters ...orm.Filter) ([]models.RepoLabel, error) {
var conditions []string
var args []any
for _, filter := range filters {
+6 -5
appview/db/spindle.go
···
"time"
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
)
-
func GetSpindles(e Execer, filters ...filter) ([]models.Spindle, error) {
+
func GetSpindles(e Execer, filters ...orm.Filter) ([]models.Spindle, error) {
var spindles []models.Spindle
var conditions []string
···
return err
}
-
func VerifySpindle(e Execer, filters ...filter) (int64, error) {
+
func VerifySpindle(e Execer, filters ...orm.Filter) (int64, error) {
var conditions []string
var args []any
for _, filter := range filters {
···
return res.RowsAffected()
}
-
func DeleteSpindle(e Execer, filters ...filter) error {
+
func DeleteSpindle(e Execer, filters ...orm.Filter) error {
var conditions []string
var args []any
for _, filter := range filters {
···
return err
}
-
func RemoveSpindleMember(e Execer, filters ...filter) error {
+
func RemoveSpindleMember(e Execer, filters ...orm.Filter) error {
var conditions []string
var args []any
for _, filter := range filters {
···
return err
}
-
func GetSpindleMembers(e Execer, filters ...filter) ([]models.SpindleMember, error) {
+
func GetSpindleMembers(e Execer, filters ...orm.Filter) ([]models.SpindleMember, error) {
var members []models.SpindleMember
var conditions []string
+6 -4
appview/db/star.go
···
"github.com/bluesky-social/indigo/atproto/syntax"
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
)
func AddStar(e Execer, star *models.Star) error {
···
// GetRepoStars return a list of stars each holding target repository.
// If there isn't known repo with starred at-uri, those stars will be ignored.
-
func GetRepoStars(e Execer, limit int, filters ...filter) ([]models.RepoStar, error) {
+
func GetRepoStars(e Execer, limit int, filters ...orm.Filter) ([]models.RepoStar, error) {
var conditions []string
var args []any
for _, filter := range filters {
···
if err != nil {
return nil, err
}
+
defer rows.Close()
starMap := make(map[string][]models.Star)
for rows.Next() {
···
return nil, nil
}
-
repos, err := GetRepos(e, 0, FilterIn("at_uri", args))
+
repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", args))
if err != nil {
return nil, err
}
···
return repoStars, nil
}
-
func CountStars(e Execer, filters ...filter) (int64, error) {
+
func CountStars(e Execer, filters ...orm.Filter) (int64, error) {
var conditions []string
var args []any
for _, filter := range filters {
···
}
// get full repo data
-
repos, err := GetRepos(e, 0, FilterIn("at_uri", repoUris))
+
repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", repoUris))
if err != nil {
return nil, err
}
+4 -3
appview/db/strings.go
···
"time"
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
)
func AddString(e Execer, s models.String) error {
···
return err
}
-
func GetStrings(e Execer, limit int, filters ...filter) ([]models.String, error) {
+
func GetStrings(e Execer, limit int, filters ...orm.Filter) ([]models.String, error) {
var all []models.String
var conditions []string
···
return all, nil
}
-
func CountStrings(e Execer, filters ...filter) (int64, error) {
+
func CountStrings(e Execer, filters ...orm.Filter) (int64, error) {
var conditions []string
var args []any
for _, filter := range filters {
···
return count, nil
}
-
func DeleteString(e Execer, filters ...filter) error {
+
func DeleteString(e Execer, filters ...orm.Filter) error {
var conditions []string
var args []any
for _, filter := range filters {
+9 -8
appview/db/timeline.go
···
"github.com/bluesky-social/indigo/atproto/syntax"
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
)
// TODO: this gathers heterogenous events from different sources and aggregates
···
}
func getTimelineRepos(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) {
-
filters := make([]filter, 0)
+
filters := make([]orm.Filter, 0)
if userIsFollowing != nil {
-
filters = append(filters, FilterIn("did", userIsFollowing))
+
filters = append(filters, orm.FilterIn("did", userIsFollowing))
}
repos, err := GetRepos(e, limit, filters...)
···
var origRepos []models.Repo
if args != nil {
-
origRepos, err = GetRepos(e, 0, FilterIn("at_uri", args))
+
origRepos, err = GetRepos(e, 0, orm.FilterIn("at_uri", args))
}
if err != nil {
return nil, err
···
}
func getTimelineStars(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) {
-
filters := make([]filter, 0)
+
filters := make([]orm.Filter, 0)
if userIsFollowing != nil {
-
filters = append(filters, FilterIn("did", userIsFollowing))
+
filters = append(filters, orm.FilterIn("did", userIsFollowing))
}
stars, err := GetRepoStars(e, limit, filters...)
···
}
func getTimelineFollows(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) {
-
filters := make([]filter, 0)
+
filters := make([]orm.Filter, 0)
if userIsFollowing != nil {
-
filters = append(filters, FilterIn("user_did", userIsFollowing))
+
filters = append(filters, orm.FilterIn("user_did", userIsFollowing))
}
follows, err := GetFollows(e, limit, filters...)
···
return nil, nil
}
-
profiles, err := GetProfiles(e, FilterIn("did", subjects))
+
profiles, err := GetProfiles(e, orm.FilterIn("did", subjects))
if err != nil {
return nil, err
}
+7 -12
appview/email/email.go
···
import (
"fmt"
"net"
-
"regexp"
+
"net/mail"
"strings"
"github.com/resend/resend-go/v2"
···
}
func IsValidEmail(email string) bool {
-
// Basic length check
-
if len(email) < 3 || len(email) > 254 {
+
// Reject whitespace (ParseAddress normalizes it away)
+
if strings.ContainsAny(email, " \t\n\r") {
return false
}
-
// Regular expression for email validation (RFC 5322 compliant)
-
pattern := `^[a-zA-Z0-9.!#$%&'*+/=?^_\x60{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$`
-
-
// Compile regex
-
regex := regexp.MustCompile(pattern)
-
-
// Check if email matches regex pattern
-
if !regex.MatchString(email) {
+
// Use stdlib RFC 5322 parser
+
addr, err := mail.ParseAddress(email)
+
if err != nil {
return false
}
// Split email into local and domain parts
-
parts := strings.Split(email, "@")
+
parts := strings.Split(addr.Address, "@")
domain := parts[1]
mx, err := net.LookupMX(domain)
+53
appview/email/email_test.go
···
+
package email
+
+
import (
+
"testing"
+
)
+
+
func TestIsValidEmail(t *testing.T) {
+
tests := []struct {
+
name string
+
email string
+
want bool
+
}{
+
// Valid emails using RFC 2606 reserved domains
+
{"standard email", "user@example.com", true},
+
{"single char local", "a@example.com", true},
+
{"dot in middle", "first.last@example.com", true},
+
{"multiple dots", "a.b.c@example.com", true},
+
{"plus tag", "user+tag@example.com", true},
+
{"numbers", "user123@example.com", true},
+
{"example.org", "user@example.org", true},
+
{"example.net", "user@example.net", true},
+
+
// Invalid format - rejected by mail.ParseAddress
+
{"empty string", "", false},
+
{"no at sign", "userexample.com", false},
+
{"no domain", "user@", false},
+
{"no local part", "@example.com", false},
+
{"double at", "user@@example.com", false},
+
{"just at sign", "@", false},
+
{"leading dot", ".user@example.com", false},
+
{"trailing dot", "user.@example.com", false},
+
{"consecutive dots", "user..name@example.com", false},
+
+
// Whitespace - rejected before parsing
+
{"space in local", "user @example.com", false},
+
{"space in domain", "user@ example.com", false},
+
{"tab", "user\t@example.com", false},
+
{"newline", "user\n@example.com", false},
+
+
// MX lookup - using RFC 2606 reserved TLDs (guaranteed no MX)
+
{"invalid TLD", "user@example.invalid", false},
+
{"test TLD", "user@mail.test", false},
+
}
+
+
for _, tt := range tests {
+
t.Run(tt.name, func(t *testing.T) {
+
got := IsValidEmail(tt.email)
+
if got != tt.want {
+
t.Errorf("IsValidEmail(%q) = %v, want %v", tt.email, got, tt.want)
+
}
+
})
+
}
+
}
+47 -29
appview/ingester.go
···
"tangled.org/core/appview/serververify"
"tangled.org/core/appview/validator"
"tangled.org/core/idresolver"
+
"tangled.org/core/orm"
"tangled.org/core/rbac"
)
···
err = db.AddArtifact(i.Db, artifact)
case jmodels.CommitOperationDelete:
-
err = db.DeleteArtifact(i.Db, db.FilterEq("did", did), db.FilterEq("rkey", e.Commit.RKey))
+
err = db.DeleteArtifact(i.Db, orm.FilterEq("did", did), orm.FilterEq("rkey", e.Commit.RKey))
}
if err != nil {
···
err = db.UpsertProfile(tx, &profile)
case jmodels.CommitOperationDelete:
-
err = db.DeleteArtifact(i.Db, db.FilterEq("did", did), db.FilterEq("rkey", e.Commit.RKey))
+
err = db.DeleteArtifact(i.Db, orm.FilterEq("did", did), orm.FilterEq("rkey", e.Commit.RKey))
}
if err != nil {
···
// get record from db first
members, err := db.GetSpindleMembers(
ddb,
-
db.FilterEq("did", did),
-
db.FilterEq("rkey", rkey),
+
orm.FilterEq("did", did),
+
orm.FilterEq("rkey", rkey),
)
if err != nil || len(members) != 1 {
return fmt.Errorf("failed to get member: %w, len(members) = %d", err, len(members))
···
// remove record by rkey && update enforcer
if err = db.RemoveSpindleMember(
tx,
-
db.FilterEq("did", did),
-
db.FilterEq("rkey", rkey),
+
orm.FilterEq("did", did),
+
orm.FilterEq("rkey", rkey),
); err != nil {
return fmt.Errorf("failed to remove from db: %w", err)
}
···
// get record from db first
spindles, err := db.GetSpindles(
ddb,
-
db.FilterEq("owner", did),
-
db.FilterEq("instance", instance),
+
orm.FilterEq("owner", did),
+
orm.FilterEq("instance", instance),
)
if err != nil || len(spindles) != 1 {
return fmt.Errorf("failed to get spindles: %w, len(spindles) = %d", err, len(spindles))
···
// remove spindle members first
err = db.RemoveSpindleMember(
tx,
-
db.FilterEq("owner", did),
-
db.FilterEq("instance", instance),
+
orm.FilterEq("owner", did),
+
orm.FilterEq("instance", instance),
)
if err != nil {
return err
···
err = db.DeleteSpindle(
tx,
-
db.FilterEq("owner", did),
-
db.FilterEq("instance", instance),
+
orm.FilterEq("owner", did),
+
orm.FilterEq("instance", instance),
)
if err != nil {
return err
···
case jmodels.CommitOperationDelete:
if err := db.DeleteString(
ddb,
-
db.FilterEq("did", did),
-
db.FilterEq("rkey", rkey),
+
orm.FilterEq("did", did),
+
orm.FilterEq("rkey", rkey),
); err != nil {
l.Error("failed to delete", "err", err)
return fmt.Errorf("failed to delete string record: %w", err)
···
// get record from db first
registrations, err := db.GetRegistrations(
ddb,
-
db.FilterEq("domain", domain),
-
db.FilterEq("did", did),
+
orm.FilterEq("domain", domain),
+
orm.FilterEq("did", did),
)
if err != nil {
return fmt.Errorf("failed to get registration: %w", err)
···
err = db.DeleteKnot(
tx,
-
db.FilterEq("did", did),
-
db.FilterEq("domain", domain),
+
orm.FilterEq("did", did),
+
orm.FilterEq("domain", domain),
)
if err != nil {
return err
···
return nil
case jmodels.CommitOperationDelete:
+
tx, err := ddb.BeginTx(ctx, nil)
+
if err != nil {
+
l.Error("failed to begin transaction", "err", err)
+
return err
+
}
+
defer tx.Rollback()
+
if err := db.DeleteIssues(
-
ddb,
-
db.FilterEq("did", did),
-
db.FilterEq("rkey", rkey),
+
tx,
+
did,
+
rkey,
); err != nil {
l.Error("failed to delete", "err", err)
return fmt.Errorf("failed to delete issue record: %w", err)
+
}
+
if err := tx.Commit(); err != nil {
+
l.Error("failed to commit txn", "err", err)
+
return err
}
return nil
···
return fmt.Errorf("failed to validate comment: %w", err)
}
-
_, err = db.AddIssueComment(ddb, *comment)
+
tx, err := ddb.Begin()
+
if err != nil {
+
return fmt.Errorf("failed to start transaction: %w", err)
+
}
+
defer tx.Rollback()
+
+
_, err = db.AddIssueComment(tx, *comment)
if err != nil {
return fmt.Errorf("failed to create issue comment: %w", err)
}
-
return nil
+
return tx.Commit()
case jmodels.CommitOperationDelete:
if err := db.DeleteIssueComments(
ddb,
-
db.FilterEq("did", did),
-
db.FilterEq("rkey", rkey),
+
orm.FilterEq("did", did),
+
orm.FilterEq("rkey", rkey),
); err != nil {
return fmt.Errorf("failed to delete issue comment record: %w", err)
}
···
case jmodels.CommitOperationDelete:
if err := db.DeleteLabelDefinition(
ddb,
-
db.FilterEq("did", did),
-
db.FilterEq("rkey", rkey),
+
orm.FilterEq("did", did),
+
orm.FilterEq("rkey", rkey),
); err != nil {
return fmt.Errorf("failed to delete labeldef record: %w", err)
}
···
var repo *models.Repo
switch collection {
case tangled.RepoIssueNSID:
-
i, err := db.GetIssues(ddb, db.FilterEq("at_uri", subject))
+
i, err := db.GetIssues(ddb, orm.FilterEq("at_uri", subject))
if err != nil || len(i) != 1 {
return fmt.Errorf("failed to find subject: %w || subject count %d", err, len(i))
}
···
return fmt.Errorf("unsupport label subject: %s", collection)
-
actx, err := db.NewLabelApplicationCtx(ddb, db.FilterIn("at_uri", repo.Labels))
+
actx, err := db.NewLabelApplicationCtx(ddb, orm.FilterIn("at_uri", repo.Labels))
if err != nil {
return fmt.Errorf("failed to build label application ctx: %w", err)
+152 -135
appview/issues/issues.go
···
"fmt"
"log/slog"
"net/http"
-
"slices"
"time"
comatproto "github.com/bluesky-social/indigo/api/atproto"
···
"tangled.org/core/appview/config"
"tangled.org/core/appview/db"
issues_indexer "tangled.org/core/appview/indexer/issues"
+
"tangled.org/core/appview/mentions"
"tangled.org/core/appview/models"
"tangled.org/core/appview/notify"
"tangled.org/core/appview/oauth"
"tangled.org/core/appview/pages"
-
"tangled.org/core/appview/pages/markup"
+
"tangled.org/core/appview/pages/repoinfo"
"tangled.org/core/appview/pagination"
"tangled.org/core/appview/reporesolver"
"tangled.org/core/appview/validator"
"tangled.org/core/idresolver"
+
"tangled.org/core/orm"
+
"tangled.org/core/rbac"
"tangled.org/core/tid"
)
type Issues struct {
-
oauth *oauth.OAuth
-
repoResolver *reporesolver.RepoResolver
-
pages *pages.Pages
-
idResolver *idresolver.Resolver
-
db *db.DB
-
config *config.Config
-
notifier notify.Notifier
-
logger *slog.Logger
-
validator *validator.Validator
-
indexer *issues_indexer.Indexer
+
oauth *oauth.OAuth
+
repoResolver *reporesolver.RepoResolver
+
enforcer *rbac.Enforcer
+
pages *pages.Pages
+
idResolver *idresolver.Resolver
+
mentionsResolver *mentions.Resolver
+
db *db.DB
+
config *config.Config
+
notifier notify.Notifier
+
logger *slog.Logger
+
validator *validator.Validator
+
indexer *issues_indexer.Indexer
}
func New(
oauth *oauth.OAuth,
repoResolver *reporesolver.RepoResolver,
+
enforcer *rbac.Enforcer,
pages *pages.Pages,
idResolver *idresolver.Resolver,
+
mentionsResolver *mentions.Resolver,
db *db.DB,
config *config.Config,
notifier notify.Notifier,
···
logger *slog.Logger,
) *Issues {
return &Issues{
-
oauth: oauth,
-
repoResolver: repoResolver,
-
pages: pages,
-
idResolver: idResolver,
-
db: db,
-
config: config,
-
notifier: notifier,
-
logger: logger,
-
validator: validator,
-
indexer: indexer,
+
oauth: oauth,
+
repoResolver: repoResolver,
+
enforcer: enforcer,
+
pages: pages,
+
idResolver: idResolver,
+
mentionsResolver: mentionsResolver,
+
db: db,
+
config: config,
+
notifier: notifier,
+
logger: logger,
+
validator: validator,
+
indexer: indexer,
}
}
···
userReactions = db.GetReactionStatusMap(rp.db, user.Did, issue.AtUri())
}
+
backlinks, err := db.GetBacklinks(rp.db, issue.AtUri())
+
if err != nil {
+
l.Error("failed to fetch backlinks", "err", err)
+
rp.pages.Error503(w)
+
return
+
}
+
labelDefs, err := db.GetLabelDefinitions(
rp.db,
-
db.FilterIn("at_uri", f.Repo.Labels),
-
db.FilterContains("scope", tangled.RepoIssueNSID),
+
orm.FilterIn("at_uri", f.Labels),
+
orm.FilterContains("scope", tangled.RepoIssueNSID),
)
if err != nil {
l.Error("failed to fetch labels", "err", err)
···
rp.pages.RepoSingleIssue(w, pages.RepoSingleIssueParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Issue: issue,
CommentList: issue.CommentList(),
+
Backlinks: backlinks,
OrderedReactionKinds: models.OrderedReactionKinds,
Reactions: reactionMap,
UserReacted: userReactions,
···
func (rp *Issues) EditIssue(w http.ResponseWriter, r *http.Request) {
l := rp.logger.With("handler", "EditIssue")
user := rp.oauth.GetUser(r)
-
f, err := rp.repoResolver.Resolve(r)
-
if err != nil {
-
l.Error("failed to get repo and knot", "err", err)
-
return
-
}
issue, ok := r.Context().Value("issue").(*models.Issue)
if !ok {
···
case http.MethodGet:
rp.pages.EditIssueFragment(w, pages.EditIssueParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Issue: issue,
})
case http.MethodPost:
···
newIssue := issue
newIssue.Title = r.FormValue("title")
newIssue.Body = r.FormValue("body")
+
newIssue.Mentions, newIssue.References = rp.mentionsResolver.Resolve(r.Context(), newIssue.Body)
if err := rp.validator.ValidateIssue(newIssue); err != nil {
l.Error("validation error", "err", err)
···
l := rp.logger.With("handler", "DeleteIssue")
noticeId := "issue-actions-error"
-
user := rp.oauth.GetUser(r)
-
f, err := rp.repoResolver.Resolve(r)
if err != nil {
l.Error("failed to get repo and knot", "err", err)
···
}
l = l.With("did", issue.Did, "rkey", issue.Rkey)
+
tx, err := rp.db.Begin()
+
if err != nil {
+
l.Error("failed to start transaction", "err", err)
+
rp.pages.Notice(w, "issue-comment", "Failed to create comment, try again later.")
+
return
+
}
+
defer tx.Rollback()
+
// delete from PDS
client, err := rp.oauth.AuthorizedClient(r)
if err != nil {
···
}
// delete from db
-
if err := db.DeleteIssues(rp.db, db.FilterEq("id", issue.Id)); err != nil {
+
if err := db.DeleteIssues(tx, issue.Did, issue.Rkey); err != nil {
l.Error("failed to delete issue", "err", err)
rp.pages.Notice(w, noticeId, "Failed to delete issue.")
return
}
+
tx.Commit()
rp.notifier.DeleteIssue(r.Context(), issue)
// return to all issues page
-
rp.pages.HxRedirect(w, "/"+f.RepoInfo(user).FullName()+"/issues")
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
+
rp.pages.HxRedirect(w, "/"+ownerSlashRepo+"/issues")
}
func (rp *Issues) CloseIssue(w http.ResponseWriter, r *http.Request) {
···
return
}
-
collaborators, err := f.Collaborators(r.Context())
-
if err != nil {
-
l.Error("failed to fetch repo collaborators", "err", err)
-
}
-
isCollaborator := slices.ContainsFunc(collaborators, func(collab pages.Collaborator) bool {
-
return user.Did == collab.Did
-
})
+
roles := repoinfo.RolesInRepo{Roles: rp.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())}
+
isRepoOwner := roles.IsOwner()
+
isCollaborator := roles.IsCollaborator()
isIssueOwner := user.Did == issue.Did
// TODO: make this more granular
-
if isIssueOwner || isCollaborator {
+
if isIssueOwner || isRepoOwner || isCollaborator {
err = db.CloseIssues(
rp.db,
-
db.FilterEq("id", issue.Id),
+
orm.FilterEq("id", issue.Id),
)
if err != nil {
l.Error("failed to close issue", "err", err)
···
// notify about the issue closure
rp.notifier.NewIssueState(r.Context(), syntax.DID(user.Did), issue)
-
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", f.OwnerSlashRepo(), issue.IssueId))
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
+
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", ownerSlashRepo, issue.IssueId))
return
} else {
l.Error("user is not permitted to close issue")
···
return
}
-
collaborators, err := f.Collaborators(r.Context())
-
if err != nil {
-
l.Error("failed to fetch repo collaborators", "err", err)
-
}
-
isCollaborator := slices.ContainsFunc(collaborators, func(collab pages.Collaborator) bool {
-
return user.Did == collab.Did
-
})
+
roles := repoinfo.RolesInRepo{Roles: rp.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())}
+
isRepoOwner := roles.IsOwner()
+
isCollaborator := roles.IsCollaborator()
isIssueOwner := user.Did == issue.Did
-
if isCollaborator || isIssueOwner {
+
if isCollaborator || isRepoOwner || isIssueOwner {
err := db.ReopenIssues(
rp.db,
-
db.FilterEq("id", issue.Id),
+
orm.FilterEq("id", issue.Id),
)
if err != nil {
l.Error("failed to reopen issue", "err", err)
···
// notify about the issue reopen
rp.notifier.NewIssueState(r.Context(), syntax.DID(user.Did), issue)
-
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", f.OwnerSlashRepo(), issue.IssueId))
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
+
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", ownerSlashRepo, issue.IssueId))
return
} else {
l.Error("user is not the owner of the repo")
···
if replyToUri != "" {
replyTo = &replyToUri
}
+
+
mentions, references := rp.mentionsResolver.Resolve(r.Context(), body)
comment := models.IssueComment{
-
Did: user.Did,
-
Rkey: tid.TID(),
-
IssueAt: issue.AtUri().String(),
-
ReplyTo: replyTo,
-
Body: body,
-
Created: time.Now(),
+
Did: user.Did,
+
Rkey: tid.TID(),
+
IssueAt: issue.AtUri().String(),
+
ReplyTo: replyTo,
+
Body: body,
+
Created: time.Now(),
+
Mentions: mentions,
+
References: references,
}
if err = rp.validator.ValidateIssueComment(&comment); err != nil {
l.Error("failed to validate comment", "err", err)
···
}
}()
-
commentId, err := db.AddIssueComment(rp.db, comment)
+
tx, err := rp.db.Begin()
+
if err != nil {
+
l.Error("failed to start transaction", "err", err)
+
rp.pages.Notice(w, "issue-comment", "Failed to create comment, try again later.")
+
return
+
}
+
defer tx.Rollback()
+
+
commentId, err := db.AddIssueComment(tx, comment)
if err != nil {
l.Error("failed to create comment", "err", err)
rp.pages.Notice(w, "issue-comment", "Failed to create comment.")
return
}
+
err = tx.Commit()
+
if err != nil {
+
l.Error("failed to commit transaction", "err", err)
+
rp.pages.Notice(w, "issue-comment", "Failed to create comment, try again later.")
+
return
+
}
// reset atUri to make rollback a no-op
atUri = ""
···
// notify about the new comment
comment.Id = commentId
-
rawMentions := markup.FindUserMentions(comment.Body)
-
idents := rp.idResolver.ResolveIdents(r.Context(), rawMentions)
-
l.Debug("parsed mentions", "raw", rawMentions, "idents", idents)
-
var mentions []syntax.DID
-
for _, ident := range idents {
-
if ident != nil && !ident.Handle.IsInvalidHandle() {
-
mentions = append(mentions, ident.DID)
-
}
-
}
rp.notifier.NewIssueComment(r.Context(), &comment, mentions)
-
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d#comment-%d", f.OwnerSlashRepo(), issue.IssueId, commentId))
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
+
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d#comment-%d", ownerSlashRepo, issue.IssueId, commentId))
}
func (rp *Issues) IssueComment(w http.ResponseWriter, r *http.Request) {
l := rp.logger.With("handler", "IssueComment")
user := rp.oauth.GetUser(r)
-
f, err := rp.repoResolver.Resolve(r)
-
if err != nil {
-
l.Error("failed to get repo and knot", "err", err)
-
return
-
}
issue, ok := r.Context().Value("issue").(*models.Issue)
if !ok {
···
commentId := chi.URLParam(r, "commentId")
comments, err := db.GetIssueComments(
rp.db,
-
db.FilterEq("id", commentId),
+
orm.FilterEq("id", commentId),
)
if err != nil {
l.Error("failed to fetch comment", "id", commentId)
···
rp.pages.IssueCommentBodyFragment(w, pages.IssueCommentBodyParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Issue: issue,
Comment: &comment,
})
···
func (rp *Issues) EditIssueComment(w http.ResponseWriter, r *http.Request) {
l := rp.logger.With("handler", "EditIssueComment")
user := rp.oauth.GetUser(r)
-
f, err := rp.repoResolver.Resolve(r)
-
if err != nil {
-
l.Error("failed to get repo and knot", "err", err)
-
return
-
}
issue, ok := r.Context().Value("issue").(*models.Issue)
if !ok {
···
commentId := chi.URLParam(r, "commentId")
comments, err := db.GetIssueComments(
rp.db,
-
db.FilterEq("id", commentId),
+
orm.FilterEq("id", commentId),
)
if err != nil {
l.Error("failed to fetch comment", "id", commentId)
···
case http.MethodGet:
rp.pages.EditIssueCommentFragment(w, pages.EditIssueCommentParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Issue: issue,
Comment: &comment,
})
···
newComment := comment
newComment.Body = newBody
newComment.Edited = &now
+
newComment.Mentions, newComment.References = rp.mentionsResolver.Resolve(r.Context(), newBody)
+
record := newComment.AsRecord()
-
_, err = db.AddIssueComment(rp.db, newComment)
+
tx, err := rp.db.Begin()
+
if err != nil {
+
l.Error("failed to start transaction", "err", err)
+
rp.pages.Notice(w, "repo-notice", "Failed to update description, try again later.")
+
return
+
}
+
defer tx.Rollback()
+
+
_, err = db.AddIssueComment(tx, newComment)
if err != nil {
l.Error("failed to perferom update-description query", "err", err)
rp.pages.Notice(w, "repo-notice", "Failed to update description, try again later.")
return
}
+
tx.Commit()
// rkey is optional, it was introduced later
if newComment.Rkey != "" {
···
// return new comment body with htmx
rp.pages.IssueCommentBodyFragment(w, pages.IssueCommentBodyParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Issue: issue,
Comment: &newComment,
})
···
func (rp *Issues) ReplyIssueCommentPlaceholder(w http.ResponseWriter, r *http.Request) {
l := rp.logger.With("handler", "ReplyIssueCommentPlaceholder")
user := rp.oauth.GetUser(r)
-
f, err := rp.repoResolver.Resolve(r)
-
if err != nil {
-
l.Error("failed to get repo and knot", "err", err)
-
return
-
}
issue, ok := r.Context().Value("issue").(*models.Issue)
if !ok {
···
commentId := chi.URLParam(r, "commentId")
comments, err := db.GetIssueComments(
rp.db,
-
db.FilterEq("id", commentId),
+
orm.FilterEq("id", commentId),
)
if err != nil {
l.Error("failed to fetch comment", "id", commentId)
···
rp.pages.ReplyIssueCommentPlaceholderFragment(w, pages.ReplyIssueCommentPlaceholderParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Issue: issue,
Comment: &comment,
})
···
func (rp *Issues) ReplyIssueComment(w http.ResponseWriter, r *http.Request) {
l := rp.logger.With("handler", "ReplyIssueComment")
user := rp.oauth.GetUser(r)
-
f, err := rp.repoResolver.Resolve(r)
-
if err != nil {
-
l.Error("failed to get repo and knot", "err", err)
-
return
-
}
issue, ok := r.Context().Value("issue").(*models.Issue)
if !ok {
···
commentId := chi.URLParam(r, "commentId")
comments, err := db.GetIssueComments(
rp.db,
-
db.FilterEq("id", commentId),
+
orm.FilterEq("id", commentId),
)
if err != nil {
l.Error("failed to fetch comment", "id", commentId)
···
rp.pages.ReplyIssueCommentFragment(w, pages.ReplyIssueCommentParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Issue: issue,
Comment: &comment,
})
···
func (rp *Issues) DeleteIssueComment(w http.ResponseWriter, r *http.Request) {
l := rp.logger.With("handler", "DeleteIssueComment")
user := rp.oauth.GetUser(r)
-
f, err := rp.repoResolver.Resolve(r)
-
if err != nil {
-
l.Error("failed to get repo and knot", "err", err)
-
return
-
}
issue, ok := r.Context().Value("issue").(*models.Issue)
if !ok {
···
commentId := chi.URLParam(r, "commentId")
comments, err := db.GetIssueComments(
rp.db,
-
db.FilterEq("id", commentId),
+
orm.FilterEq("id", commentId),
)
if err != nil {
l.Error("failed to fetch comment", "id", commentId)
···
// optimistic deletion
deleted := time.Now()
-
err = db.DeleteIssueComments(rp.db, db.FilterEq("id", comment.Id))
+
err = db.DeleteIssueComments(rp.db, orm.FilterEq("id", comment.Id))
if err != nil {
l.Error("failed to delete comment", "err", err)
rp.pages.Notice(w, fmt.Sprintf("comment-%s-status", commentId), "failed to delete comment")
···
// htmx fragment of comment after deletion
rp.pages.IssueCommentBodyFragment(w, pages.IssueCommentBodyParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Issue: issue,
Comment: &comment,
})
···
return
}
+
totalIssues := 0
+
if isOpen {
+
totalIssues = f.RepoStats.IssueCount.Open
+
} else {
+
totalIssues = f.RepoStats.IssueCount.Closed
+
}
+
keyword := params.Get("q")
var issues []models.Issue
···
return
}
l.Debug("searched issues with indexer", "count", len(res.Hits))
+
totalIssues = int(res.Total)
issues, err = db.GetIssues(
rp.db,
-
db.FilterIn("id", res.Hits),
+
orm.FilterIn("id", res.Hits),
)
if err != nil {
l.Error("failed to get issues", "err", err)
···
issues, err = db.GetIssuesPaginated(
rp.db,
page,
-
db.FilterEq("repo_at", f.RepoAt()),
-
db.FilterEq("open", openInt),
+
orm.FilterEq("repo_at", f.RepoAt()),
+
orm.FilterEq("open", openInt),
)
if err != nil {
l.Error("failed to get issues", "err", err)
···
labelDefs, err := db.GetLabelDefinitions(
rp.db,
-
db.FilterIn("at_uri", f.Repo.Labels),
-
db.FilterContains("scope", tangled.RepoIssueNSID),
+
orm.FilterIn("at_uri", f.Labels),
+
orm.FilterContains("scope", tangled.RepoIssueNSID),
)
if err != nil {
l.Error("failed to fetch labels", "err", err)
···
rp.pages.RepoIssues(w, pages.RepoIssuesParams{
LoggedInUser: rp.oauth.GetUser(r),
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Issues: issues,
+
IssueCount: totalIssues,
LabelDefs: defs,
FilteringByOpen: isOpen,
FilterQuery: keyword,
···
case http.MethodGet:
rp.pages.RepoNewIssue(w, pages.RepoNewIssueParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
})
case http.MethodPost:
+
body := r.FormValue("body")
+
mentions, references := rp.mentionsResolver.Resolve(r.Context(), body)
+
issue := &models.Issue{
-
RepoAt: f.RepoAt(),
-
Rkey: tid.TID(),
-
Title: r.FormValue("title"),
-
Body: r.FormValue("body"),
-
Open: true,
-
Did: user.Did,
-
Created: time.Now(),
-
Repo: &f.Repo,
+
RepoAt: f.RepoAt(),
+
Rkey: tid.TID(),
+
Title: r.FormValue("title"),
+
Body: body,
+
Open: true,
+
Did: user.Did,
+
Created: time.Now(),
+
Mentions: mentions,
+
References: references,
+
Repo: f,
}
if err := rp.validator.ValidateIssue(issue); err != nil {
···
// everything is successful, do not rollback the atproto record
atUri = ""
-
rawMentions := markup.FindUserMentions(issue.Body)
-
idents := rp.idResolver.ResolveIdents(r.Context(), rawMentions)
-
l.Debug("parsed mentions", "raw", rawMentions, "idents", idents)
-
var mentions []syntax.DID
-
for _, ident := range idents {
-
if ident != nil && !ident.Handle.IsInvalidHandle() {
-
mentions = append(mentions, ident.DID)
-
}
-
}
rp.notifier.NewIssue(r.Context(), issue, mentions)
-
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", f.OwnerSlashRepo(), issue.IssueId))
+
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
+
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", ownerSlashRepo, issue.IssueId))
return
}
}
+3 -3
appview/issues/opengraph.go
···
// Get owner handle for avatar
var ownerHandle string
-
owner, err := rp.idResolver.ResolveIdent(r.Context(), f.Repo.Did)
+
owner, err := rp.idResolver.ResolveIdent(r.Context(), f.Did)
if err != nil {
-
ownerHandle = f.Repo.Did
+
ownerHandle = f.Did
} else {
ownerHandle = "@" + owner.Handle.String()
}
-
card, err := rp.drawIssueSummaryCard(issue, &f.Repo, commentCount, ownerHandle)
+
card, err := rp.drawIssueSummaryCard(issue, f, commentCount, ownerHandle)
if err != nil {
log.Println("failed to draw issue summary card", err)
http.Error(w, "failed to draw issue summary card", http.StatusInternalServerError)
+37 -19
appview/knots/knots.go
···
"tangled.org/core/appview/xrpcclient"
"tangled.org/core/eventconsumer"
"tangled.org/core/idresolver"
+
"tangled.org/core/orm"
"tangled.org/core/rbac"
"tangled.org/core/tid"
···
Knotstream *eventconsumer.Consumer
}
+
type tab = map[string]any
+
+
var (
+
knotsTabs []tab = []tab{
+
{"Name": "profile", "Icon": "user"},
+
{"Name": "keys", "Icon": "key"},
+
{"Name": "emails", "Icon": "mail"},
+
{"Name": "notifications", "Icon": "bell"},
+
{"Name": "knots", "Icon": "volleyball"},
+
{"Name": "spindles", "Icon": "spool"},
+
}
+
)
+
func (k *Knots) Router() http.Handler {
r := chi.NewRouter()
···
user := k.OAuth.GetUser(r)
registrations, err := db.GetRegistrations(
k.Db,
-
db.FilterEq("did", user.Did),
+
orm.FilterEq("did", user.Did),
)
if err != nil {
k.Logger.Error("failed to fetch knot registrations", "err", err)
···
k.Pages.Knots(w, pages.KnotsParams{
LoggedInUser: user,
Registrations: registrations,
+
Tabs: knotsTabs,
+
Tab: "knots",
})
}
···
registrations, err := db.GetRegistrations(
k.Db,
-
db.FilterEq("did", user.Did),
-
db.FilterEq("domain", domain),
+
orm.FilterEq("did", user.Did),
+
orm.FilterEq("domain", domain),
)
if err != nil {
l.Error("failed to get registrations", "err", err)
···
repos, err := db.GetRepos(
k.Db,
0,
-
db.FilterEq("knot", domain),
+
orm.FilterEq("knot", domain),
)
if err != nil {
l.Error("failed to get knot repos", "err", err)
···
Members: members,
Repos: repoMap,
IsOwner: true,
+
Tabs: knotsTabs,
+
Tab: "knots",
})
}
···
// get record from db first
registrations, err := db.GetRegistrations(
k.Db,
-
db.FilterEq("did", user.Did),
-
db.FilterEq("domain", domain),
+
orm.FilterEq("did", user.Did),
+
orm.FilterEq("domain", domain),
)
if err != nil {
l.Error("failed to get registration", "err", err)
···
err = db.DeleteKnot(
tx,
-
db.FilterEq("did", user.Did),
-
db.FilterEq("domain", domain),
+
orm.FilterEq("did", user.Did),
+
orm.FilterEq("domain", domain),
)
if err != nil {
l.Error("failed to delete registration", "err", err)
···
// get record from db first
registrations, err := db.GetRegistrations(
k.Db,
-
db.FilterEq("did", user.Did),
-
db.FilterEq("domain", domain),
+
orm.FilterEq("did", user.Did),
+
orm.FilterEq("domain", domain),
)
if err != nil {
l.Error("failed to get registration", "err", err)
···
// Get updated registration to show
registrations, err = db.GetRegistrations(
k.Db,
-
db.FilterEq("did", user.Did),
-
db.FilterEq("domain", domain),
+
orm.FilterEq("did", user.Did),
+
orm.FilterEq("domain", domain),
)
if err != nil {
l.Error("failed to get registration", "err", err)
···
registrations, err := db.GetRegistrations(
k.Db,
-
db.FilterEq("did", user.Did),
-
db.FilterEq("domain", domain),
-
db.FilterIsNot("registered", "null"),
+
orm.FilterEq("did", user.Did),
+
orm.FilterEq("domain", domain),
+
orm.FilterIsNot("registered", "null"),
)
if err != nil {
l.Error("failed to get registration", "err", err)
···
}
// success
-
k.Pages.HxRedirect(w, fmt.Sprintf("/knots/%s", domain))
+
k.Pages.HxRedirect(w, fmt.Sprintf("/settings/knots/%s", domain))
}
func (k *Knots) removeMember(w http.ResponseWriter, r *http.Request) {
···
registrations, err := db.GetRegistrations(
k.Db,
-
db.FilterEq("did", user.Did),
-
db.FilterEq("domain", domain),
-
db.FilterIsNot("registered", "null"),
+
orm.FilterEq("did", user.Did),
+
orm.FilterEq("domain", domain),
+
orm.FilterIsNot("registered", "null"),
)
if err != nil {
l.Error("failed to get registration", "err", err)
+5 -4
appview/labels/labels.go
···
"tangled.org/core/appview/oauth"
"tangled.org/core/appview/pages"
"tangled.org/core/appview/validator"
+
"tangled.org/core/orm"
"tangled.org/core/rbac"
"tangled.org/core/tid"
···
repoAt := r.Form.Get("repo")
subjectUri := r.Form.Get("subject")
-
repo, err := db.GetRepo(l.db, db.FilterEq("at_uri", repoAt))
+
repo, err := db.GetRepo(l.db, orm.FilterEq("at_uri", repoAt))
if err != nil {
fail("Failed to get repository.", err)
return
}
// find all the labels that this repo subscribes to
-
repoLabels, err := db.GetRepoLabels(l.db, db.FilterEq("repo_at", repoAt))
+
repoLabels, err := db.GetRepoLabels(l.db, orm.FilterEq("repo_at", repoAt))
if err != nil {
fail("Failed to get labels for this repository.", err)
return
···
labelAts = append(labelAts, rl.LabelAt.String())
}
-
actx, err := db.NewLabelApplicationCtx(l.db, db.FilterIn("at_uri", labelAts))
+
actx, err := db.NewLabelApplicationCtx(l.db, orm.FilterIn("at_uri", labelAts))
if err != nil {
fail("Invalid form data.", err)
return
}
// calculate the start state by applying already known labels
-
existingOps, err := db.GetLabelOps(l.db, db.FilterEq("subject", subjectUri))
+
existingOps, err := db.GetLabelOps(l.db, orm.FilterEq("subject", subjectUri))
if err != nil {
fail("Invalid form data.", err)
return
+67
appview/mentions/resolver.go
···
+
package mentions
+
+
import (
+
"context"
+
"log/slog"
+
+
"github.com/bluesky-social/indigo/atproto/syntax"
+
"tangled.org/core/appview/config"
+
"tangled.org/core/appview/db"
+
"tangled.org/core/appview/models"
+
"tangled.org/core/appview/pages/markup"
+
"tangled.org/core/idresolver"
+
)
+
+
type Resolver struct {
+
config *config.Config
+
idResolver *idresolver.Resolver
+
execer db.Execer
+
logger *slog.Logger
+
}
+
+
func New(
+
config *config.Config,
+
idResolver *idresolver.Resolver,
+
execer db.Execer,
+
logger *slog.Logger,
+
) *Resolver {
+
return &Resolver{
+
config,
+
idResolver,
+
execer,
+
logger,
+
}
+
}
+
+
func (r *Resolver) Resolve(ctx context.Context, source string) ([]syntax.DID, []syntax.ATURI) {
+
l := r.logger.With("method", "Resolve")
+
+
rawMentions, rawRefs := markup.FindReferences(r.config.Core.AppviewHost, source)
+
l.Debug("found possible references", "mentions", rawMentions, "refs", rawRefs)
+
+
idents := r.idResolver.ResolveIdents(ctx, rawMentions)
+
var mentions []syntax.DID
+
for _, ident := range idents {
+
if ident != nil && !ident.Handle.IsInvalidHandle() {
+
mentions = append(mentions, ident.DID)
+
}
+
}
+
l.Debug("found mentions", "mentions", mentions)
+
+
var resolvedRefs []models.ReferenceLink
+
for _, rawRef := range rawRefs {
+
ident, err := r.idResolver.ResolveIdent(ctx, rawRef.Handle)
+
if err != nil || ident == nil || ident.Handle.IsInvalidHandle() {
+
continue
+
}
+
rawRef.Handle = string(ident.DID)
+
resolvedRefs = append(resolvedRefs, rawRef)
+
}
+
aturiRefs, err := db.ValidateReferenceLinks(r.execer, resolvedRefs)
+
if err != nil {
+
l.Error("failed running query", "err", err)
+
}
+
l.Debug("found references", "refs", aturiRefs)
+
+
return mentions, aturiRefs
+
}
+5 -4
appview/middleware/middleware.go
···
"tangled.org/core/appview/pagination"
"tangled.org/core/appview/reporesolver"
"tangled.org/core/idresolver"
+
"tangled.org/core/orm"
"tangled.org/core/rbac"
)
···
ok, err := mw.enforcer.E.Enforce(actor.Did, f.Knot, f.DidSlashRepo(), requiredPerm)
if err != nil || !ok {
// we need a logged in user
-
log.Printf("%s does not have perms of a %s in repo %s", actor.Did, requiredPerm, f.OwnerSlashRepo())
+
log.Printf("%s does not have perms of a %s in repo %s", actor.Did, requiredPerm, f.DidSlashRepo())
http.Error(w, "Forbiden", http.StatusUnauthorized)
return
}
···
repo, err := db.GetRepo(
mw.db,
-
db.FilterEq("did", id.DID.String()),
-
db.FilterEq("name", repoName),
+
orm.FilterEq("did", id.DID.String()),
+
orm.FilterEq("name", repoName),
)
if err != nil {
log.Println("failed to resolve repo", "err", err)
···
return
}
-
fullName := f.OwnerHandle() + "/" + f.Name
+
fullName := reporesolver.GetBaseRepoPath(r, f)
if r.Header.Get("User-Agent") == "Go-http-client/1.1" {
if r.URL.Query().Get("go-get") == "1" {
+70 -34
appview/models/issue.go
···
)
type Issue struct {
-
Id int64
-
Did string
-
Rkey string
-
RepoAt syntax.ATURI
-
IssueId int
-
Created time.Time
-
Edited *time.Time
-
Deleted *time.Time
-
Title string
-
Body string
-
Open bool
+
Id int64
+
Did string
+
Rkey string
+
RepoAt syntax.ATURI
+
IssueId int
+
Created time.Time
+
Edited *time.Time
+
Deleted *time.Time
+
Title string
+
Body string
+
Open bool
+
Mentions []syntax.DID
+
References []syntax.ATURI
// optionally, populate this when querying for reverse mappings
// like comment counts, parent repo etc.
···
}
func (i *Issue) AsRecord() tangled.RepoIssue {
+
mentions := make([]string, len(i.Mentions))
+
for i, did := range i.Mentions {
+
mentions[i] = string(did)
+
}
+
references := make([]string, len(i.References))
+
for i, uri := range i.References {
+
references[i] = string(uri)
+
}
return tangled.RepoIssue{
-
Repo: i.RepoAt.String(),
-
Title: i.Title,
-
Body: &i.Body,
-
CreatedAt: i.Created.Format(time.RFC3339),
+
Repo: i.RepoAt.String(),
+
Title: i.Title,
+
Body: &i.Body,
+
Mentions: mentions,
+
References: references,
+
CreatedAt: i.Created.Format(time.RFC3339),
}
}
···
}
type IssueComment struct {
-
Id int64
-
Did string
-
Rkey string
-
IssueAt string
-
ReplyTo *string
-
Body string
-
Created time.Time
-
Edited *time.Time
-
Deleted *time.Time
+
Id int64
+
Did string
+
Rkey string
+
IssueAt string
+
ReplyTo *string
+
Body string
+
Created time.Time
+
Edited *time.Time
+
Deleted *time.Time
+
Mentions []syntax.DID
+
References []syntax.ATURI
}
func (i *IssueComment) AtUri() syntax.ATURI {
···
}
func (i *IssueComment) AsRecord() tangled.RepoIssueComment {
+
mentions := make([]string, len(i.Mentions))
+
for i, did := range i.Mentions {
+
mentions[i] = string(did)
+
}
+
references := make([]string, len(i.References))
+
for i, uri := range i.References {
+
references[i] = string(uri)
+
}
return tangled.RepoIssueComment{
-
Body: i.Body,
-
Issue: i.IssueAt,
-
CreatedAt: i.Created.Format(time.RFC3339),
-
ReplyTo: i.ReplyTo,
+
Body: i.Body,
+
Issue: i.IssueAt,
+
CreatedAt: i.Created.Format(time.RFC3339),
+
ReplyTo: i.ReplyTo,
+
Mentions: mentions,
+
References: references,
}
}
···
return nil, err
}
+
i := record
+
mentions := make([]syntax.DID, len(record.Mentions))
+
for i, did := range record.Mentions {
+
mentions[i] = syntax.DID(did)
+
}
+
references := make([]syntax.ATURI, len(record.References))
+
for i, uri := range i.References {
+
references[i] = syntax.ATURI(uri)
+
}
+
comment := IssueComment{
-
Did: ownerDid,
-
Rkey: rkey,
-
Body: record.Body,
-
IssueAt: record.Issue,
-
ReplyTo: record.ReplyTo,
-
Created: created,
+
Did: ownerDid,
+
Rkey: rkey,
+
Body: record.Body,
+
IssueAt: record.Issue,
+
ReplyTo: record.ReplyTo,
+
Created: created,
+
Mentions: mentions,
+
References: references,
}
return &comment, nil
+3 -1
appview/models/profile.go
···
}
type ByMonth struct {
+
Commits int
RepoEvents []RepoEvent
IssueEvents IssueEvents
PullEvents PullEvents
···
func (b ByMonth) IsEmpty() bool {
return len(b.RepoEvents) == 0 &&
len(b.IssueEvents.Items) == 0 &&
-
len(b.PullEvents.Items) == 0
+
len(b.PullEvents.Items) == 0 &&
+
b.Commits == 0
}
type IssueEvents struct {
+41 -3
appview/models/pull.go
···
TargetBranch string
State PullState
Submissions []*PullSubmission
+
Mentions []syntax.DID
+
References []syntax.ATURI
// stacking
StackId string // nullable string
···
source.Repo = &s
}
}
+
mentions := make([]string, len(p.Mentions))
+
for i, did := range p.Mentions {
+
mentions[i] = string(did)
+
}
+
references := make([]string, len(p.References))
+
for i, uri := range p.References {
+
references[i] = string(uri)
+
}
record := tangled.RepoPull{
-
Title: p.Title,
-
Body: &p.Body,
-
CreatedAt: p.Created.Format(time.RFC3339),
+
Title: p.Title,
+
Body: &p.Body,
+
Mentions: mentions,
+
References: references,
+
CreatedAt: p.Created.Format(time.RFC3339),
Target: &tangled.RepoPull_Target{
Repo: p.RepoAt.String(),
Branch: p.TargetBranch,
···
// content
Body string
+
+
// meta
+
Mentions []syntax.DID
+
References []syntax.ATURI
// meta
Created time.Time
}
+
+
func (p *PullComment) AtUri() syntax.ATURI {
+
return syntax.ATURI(p.CommentAt)
+
}
+
+
// func (p *PullComment) AsRecord() tangled.RepoPullComment {
+
// mentions := make([]string, len(p.Mentions))
+
// for i, did := range p.Mentions {
+
// mentions[i] = string(did)
+
// }
+
// references := make([]string, len(p.References))
+
// for i, uri := range p.References {
+
// references[i] = string(uri)
+
// }
+
// return tangled.RepoPullComment{
+
// Pull: p.PullAt,
+
// Body: p.Body,
+
// Mentions: mentions,
+
// References: references,
+
// CreatedAt: p.Created.Format(time.RFC3339),
+
// }
+
// }
func (p *Pull) LastRoundNumber() int {
return len(p.Submissions) - 1
+49
appview/models/reference.go
···
+
package models
+
+
import "fmt"
+
+
type RefKind int
+
+
const (
+
RefKindIssue RefKind = iota
+
RefKindPull
+
)
+
+
func (k RefKind) String() string {
+
if k == RefKindIssue {
+
return "issues"
+
} else {
+
return "pulls"
+
}
+
}
+
+
// /@alice.com/cool-proj/issues/123
+
// /@alice.com/cool-proj/issues/123#comment-321
+
type ReferenceLink struct {
+
Handle string
+
Repo string
+
Kind RefKind
+
SubjectId int
+
CommentId *int
+
}
+
+
func (l ReferenceLink) String() string {
+
comment := ""
+
if l.CommentId != nil {
+
comment = fmt.Sprintf("#comment-%d", *l.CommentId)
+
}
+
return fmt.Sprintf("/%s/%s/%s/%d%s",
+
l.Handle,
+
l.Repo,
+
l.Kind.String(),
+
l.SubjectId,
+
comment,
+
)
+
}
+
+
type RichReferenceLink struct {
+
ReferenceLink
+
Title string
+
// reusing PullState for both issue & PR
+
State PullState
+
}
+5 -4
appview/notifications/notifications.go
···
"tangled.org/core/appview/oauth"
"tangled.org/core/appview/pages"
"tangled.org/core/appview/pagination"
+
"tangled.org/core/orm"
)
type Notifications struct {
···
total, err := db.CountNotifications(
n.db,
-
db.FilterEq("recipient_did", user.Did),
+
orm.FilterEq("recipient_did", user.Did),
)
if err != nil {
l.Error("failed to get total notifications", "err", err)
···
notifications, err := db.GetNotificationsWithEntities(
n.db,
page,
-
db.FilterEq("recipient_did", user.Did),
+
orm.FilterEq("recipient_did", user.Did),
)
if err != nil {
l.Error("failed to get notifications", "err", err)
···
count, err := db.CountNotifications(
n.db,
-
db.FilterEq("recipient_did", user.Did),
-
db.FilterEq("read", 0),
+
orm.FilterEq("recipient_did", user.Did),
+
orm.FilterEq("read", 0),
)
if err != nil {
http.Error(w, "Failed to get unread count", http.StatusInternalServerError)
+77 -66
appview/notify/db/db.go
···
import (
"context"
"log"
-
"maps"
"slices"
"github.com/bluesky-social/indigo/atproto/syntax"
···
"tangled.org/core/appview/models"
"tangled.org/core/appview/notify"
"tangled.org/core/idresolver"
+
"tangled.org/core/orm"
+
"tangled.org/core/sets"
)
const (
-
maxMentions = 5
+
maxMentions = 8
)
type databaseNotifier struct {
···
return
}
var err error
-
repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(star.RepoAt)))
+
repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(star.RepoAt)))
if err != nil {
log.Printf("NewStar: failed to get repos: %v", err)
return
}
actorDid := syntax.DID(star.Did)
-
recipients := []syntax.DID{syntax.DID(repo.Did)}
+
recipients := sets.Singleton(syntax.DID(repo.Did))
eventType := models.NotificationTypeRepoStarred
entityType := "repo"
entityId := star.RepoAt.String()
···
}
func (n *databaseNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) {
-
-
// build the recipients list
-
// - owner of the repo
-
// - collaborators in the repo
-
var recipients []syntax.DID
-
recipients = append(recipients, syntax.DID(issue.Repo.Did))
-
collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", issue.Repo.RepoAt()))
+
collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt()))
if err != nil {
log.Printf("failed to fetch collaborators: %v", err)
return
}
+
+
// build the recipients list
+
// - owner of the repo
+
// - collaborators in the repo
+
// - remove users already mentioned
+
recipients := sets.Singleton(syntax.DID(issue.Repo.Did))
for _, c := range collaborators {
-
recipients = append(recipients, c.SubjectDid)
+
recipients.Insert(c.SubjectDid)
+
}
+
for _, m := range mentions {
+
recipients.Remove(m)
}
actorDid := syntax.DID(issue.Did)
···
)
n.notifyEvent(
actorDid,
-
mentions,
+
sets.Collect(slices.Values(mentions)),
models.NotificationTypeUserMentioned,
entityType,
entityId,
···
}
func (n *databaseNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) {
-
issues, err := db.GetIssues(n.db, db.FilterEq("at_uri", comment.IssueAt))
+
issues, err := db.GetIssues(n.db, orm.FilterEq("at_uri", comment.IssueAt))
if err != nil {
log.Printf("NewIssueComment: failed to get issues: %v", err)
return
···
}
issue := issues[0]
-
var recipients []syntax.DID
-
recipients = append(recipients, syntax.DID(issue.Repo.Did))
+
// built the recipients list:
+
// - the owner of the repo
+
// - | if the comment is a reply -> everybody on that thread
+
// | if the comment is a top level -> just the issue owner
+
// - remove mentioned users from the recipients list
+
recipients := sets.Singleton(syntax.DID(issue.Repo.Did))
if comment.IsReply() {
// if this comment is a reply, then notify everybody in that thread
parentAtUri := *comment.ReplyTo
-
allThreads := issue.CommentList()
// find the parent thread, and add all DIDs from here to the recipient list
-
for _, t := range allThreads {
+
for _, t := range issue.CommentList() {
if t.Self.AtUri().String() == parentAtUri {
-
recipients = append(recipients, t.Participants()...)
+
for _, p := range t.Participants() {
+
recipients.Insert(p)
+
}
}
}
} else {
// not a reply, notify just the issue author
-
recipients = append(recipients, syntax.DID(issue.Did))
+
recipients.Insert(syntax.DID(issue.Did))
+
}
+
+
for _, m := range mentions {
+
recipients.Remove(m)
}
actorDid := syntax.DID(comment.Did)
···
)
n.notifyEvent(
actorDid,
-
mentions,
+
sets.Collect(slices.Values(mentions)),
models.NotificationTypeUserMentioned,
entityType,
entityId,
···
func (n *databaseNotifier) NewFollow(ctx context.Context, follow *models.Follow) {
actorDid := syntax.DID(follow.UserDid)
-
recipients := []syntax.DID{syntax.DID(follow.SubjectDid)}
+
recipients := sets.Singleton(syntax.DID(follow.SubjectDid))
eventType := models.NotificationTypeFollowed
entityType := "follow"
entityId := follow.UserDid
···
}
func (n *databaseNotifier) NewPull(ctx context.Context, pull *models.Pull) {
-
repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(pull.RepoAt)))
+
repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt)))
if err != nil {
log.Printf("NewPull: failed to get repos: %v", err)
return
}
-
-
// build the recipients list
-
// - owner of the repo
-
// - collaborators in the repo
-
var recipients []syntax.DID
-
recipients = append(recipients, syntax.DID(repo.Did))
-
collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", repo.RepoAt()))
+
collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt()))
if err != nil {
log.Printf("failed to fetch collaborators: %v", err)
return
}
+
+
// build the recipients list
+
// - owner of the repo
+
// - collaborators in the repo
+
recipients := sets.Singleton(syntax.DID(repo.Did))
for _, c := range collaborators {
-
recipients = append(recipients, c.SubjectDid)
+
recipients.Insert(c.SubjectDid)
}
actorDid := syntax.DID(pull.OwnerDid)
···
return
}
-
repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", comment.RepoAt))
+
repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", comment.RepoAt))
if err != nil {
log.Printf("NewPullComment: failed to get repos: %v", err)
return
···
// build up the recipients list:
// - repo owner
// - all pull participants
-
var recipients []syntax.DID
-
recipients = append(recipients, syntax.DID(repo.Did))
+
// - remove those already mentioned
+
recipients := sets.Singleton(syntax.DID(repo.Did))
for _, p := range pull.Participants() {
-
recipients = append(recipients, syntax.DID(p))
+
recipients.Insert(syntax.DID(p))
+
}
+
for _, m := range mentions {
+
recipients.Remove(m)
}
actorDid := syntax.DID(comment.OwnerDid)
···
)
n.notifyEvent(
actorDid,
-
mentions,
+
sets.Collect(slices.Values(mentions)),
models.NotificationTypeUserMentioned,
entityType,
entityId,
···
}
func (n *databaseNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) {
-
// build up the recipients list:
-
// - repo owner
-
// - repo collaborators
-
// - all issue participants
-
var recipients []syntax.DID
-
recipients = append(recipients, syntax.DID(issue.Repo.Did))
-
collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", issue.Repo.RepoAt()))
+
collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt()))
if err != nil {
log.Printf("failed to fetch collaborators: %v", err)
return
}
+
+
// build up the recipients list:
+
// - repo owner
+
// - repo collaborators
+
// - all issue participants
+
recipients := sets.Singleton(syntax.DID(issue.Repo.Did))
for _, c := range collaborators {
-
recipients = append(recipients, c.SubjectDid)
+
recipients.Insert(c.SubjectDid)
}
for _, p := range issue.Participants() {
-
recipients = append(recipients, syntax.DID(p))
+
recipients.Insert(syntax.DID(p))
}
entityType := "pull"
···
func (n *databaseNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) {
// Get repo details
-
repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(pull.RepoAt)))
+
repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt)))
if err != nil {
log.Printf("NewPullState: failed to get repos: %v", err)
return
}
-
// build up the recipients list:
-
// - repo owner
-
// - all pull participants
-
var recipients []syntax.DID
-
recipients = append(recipients, syntax.DID(repo.Did))
-
collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", repo.RepoAt()))
+
collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt()))
if err != nil {
log.Printf("failed to fetch collaborators: %v", err)
return
}
+
+
// build up the recipients list:
+
// - repo owner
+
// - all pull participants
+
recipients := sets.Singleton(syntax.DID(repo.Did))
for _, c := range collaborators {
-
recipients = append(recipients, c.SubjectDid)
+
recipients.Insert(c.SubjectDid)
}
for _, p := range pull.Participants() {
-
recipients = append(recipients, syntax.DID(p))
+
recipients.Insert(syntax.DID(p))
}
entityType := "pull"
···
func (n *databaseNotifier) notifyEvent(
actorDid syntax.DID,
-
recipients []syntax.DID,
+
recipients sets.Set[syntax.DID],
eventType models.NotificationType,
entityType string,
entityId string,
···
issueId *int64,
pullId *int64,
) {
-
if eventType == models.NotificationTypeUserMentioned && len(recipients) > maxMentions {
-
recipients = recipients[:maxMentions]
+
// if the user is attempting to mention >maxMentions users, this is probably spam, do not mention anybody
+
if eventType == models.NotificationTypeUserMentioned && recipients.Len() > maxMentions {
+
return
}
-
recipientSet := make(map[syntax.DID]struct{})
-
for _, did := range recipients {
-
// everybody except actor themselves
-
if did != actorDid {
-
recipientSet[did] = struct{}{}
-
}
-
}
+
+
recipients.Remove(actorDid)
prefMap, err := db.GetNotificationPreferences(
n.db,
-
db.FilterIn("user_did", slices.Collect(maps.Keys(recipientSet))),
+
orm.FilterIn("user_did", slices.Collect(recipients.All())),
)
if err != nil {
// failed to get prefs for users
···
defer tx.Rollback()
// filter based on preferences
-
for recipientDid := range recipientSet {
+
for recipientDid := range recipients.All() {
prefs, ok := prefMap[recipientDid]
if !ok {
prefs = models.DefaultNotificationPreferences(recipientDid)
-1
appview/notify/merged_notifier.go
···
v.Call(in)
}(n)
}
-
wg.Wait()
}
func (m *mergedNotifier) NewRepo(ctx context.Context, repo *models.Repo) {
+3 -2
appview/oauth/handler.go
···
"tangled.org/core/api/tangled"
"tangled.org/core/appview/db"
"tangled.org/core/consts"
+
"tangled.org/core/orm"
"tangled.org/core/tid"
)
···
// and create an sh.tangled.spindle.member record with that
spindleMembers, err := db.GetSpindleMembers(
o.Db,
-
db.FilterEq("instance", "spindle.tangled.sh"),
-
db.FilterEq("subject", did),
+
orm.FilterEq("instance", "spindle.tangled.sh"),
+
orm.FilterEq("subject", did),
)
if err != nil {
l.Error("failed to get spindle members", "err", err)
+44 -10
appview/pages/funcmap.go
···
chromahtml "github.com/alecthomas/chroma/v2/formatters/html"
"github.com/alecthomas/chroma/v2/lexers"
"github.com/alecthomas/chroma/v2/styles"
-
"github.com/bluesky-social/indigo/atproto/syntax"
"github.com/dustin/go-humanize"
"github.com/go-enry/go-enry/v2"
"github.com/yuin/goldmark"
+
emoji "github.com/yuin/goldmark-emoji"
"tangled.org/core/appview/filetree"
+
"tangled.org/core/appview/models"
"tangled.org/core/appview/pages/markup"
"tangled.org/core/crypto"
)
···
}
return identity.Handle.String()
+
},
+
"ownerSlashRepo": func(repo *models.Repo) string {
+
ownerId, err := p.resolver.ResolveIdent(context.Background(), repo.Did)
+
if err != nil {
+
return repo.DidSlashRepo()
+
}
+
handle := ownerId.Handle
+
if handle != "" && !handle.IsInvalidHandle() {
+
return string(handle) + "/" + repo.Name
+
}
+
return repo.DidSlashRepo()
},
"truncateAt30": func(s string) string {
if len(s) <= 30 {
···
"sub": func(a, b int) int {
return a - b
},
+
"mul": func(a, b int) int {
+
return a * b
+
},
+
"div": func(a, b int) int {
+
return a / b
+
},
+
"mod": func(a, b int) int {
+
return a % b
+
},
"f64": func(a int) float64 {
return float64(a)
},
···
return b
},
-
"didOrHandle": func(did, handle string) string {
-
if handle != "" && handle != syntax.HandleInvalid.String() {
-
return handle
-
} else {
-
return did
-
}
-
},
"assoc": func(values ...string) ([][]string, error) {
if len(values)%2 != 0 {
return nil, fmt.Errorf("invalid assoc call, must have an even number of arguments")
···
}
return pairs, nil
},
-
"append": func(s []string, values ...string) []string {
+
"append": func(s []any, values ...any) []any {
s = append(s, values...)
return s
},
···
},
"description": func(text string) template.HTML {
p.rctx.RendererType = markup.RendererTypeDefault
-
htmlString := p.rctx.RenderMarkdownWith(text, goldmark.New())
+
htmlString := p.rctx.RenderMarkdownWith(text, goldmark.New(
+
goldmark.WithExtensions(
+
emoji.Emoji,
+
),
+
))
sanitized := p.rctx.SanitizeDescription(htmlString)
return template.HTML(sanitized)
},
···
}
}
+
func (p *Pages) resolveDid(did string) string {
+
identity, err := p.resolver.ResolveIdent(context.Background(), did)
+
+
if err != nil {
+
return did
+
}
+
+
if identity.Handle.IsInvalidHandle() {
+
return "handle.invalid"
+
}
+
+
return identity.Handle.String()
+
}
+
func (p *Pages) AvatarUrl(handle, size string) string {
handle = strings.TrimPrefix(handle, "@")
+
+
handle = p.resolveDid(handle)
secret := p.avatar.SharedSecret
h := hmac.New(sha256.New, []byte(secret))
+13 -3
appview/pages/markup/extension/atlink.go
···
return KindAt
}
-
var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9.-]+)(\b)`)
+
var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\b)`)
+
var markdownLinkRegexp = regexp.MustCompile(`(?ms)\[.*\]\(.*\)`)
type atParser struct{}
···
if m == nil {
return nil
}
+
+
// Check for all links in the markdown to see if the handle found is inside one
+
linksIndexes := markdownLinkRegexp.FindAllIndex(block.Source(), -1)
+
for _, linkMatch := range linksIndexes {
+
if linkMatch[0] < segment.Start && segment.Start < linkMatch[1] {
+
return nil
+
}
+
}
+
atSegment := text.NewSegment(segment.Start, segment.Start+m[1])
block.Advance(m[1])
node := &AtNode{}
···
func (r *atHtmlRenderer) renderAt(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
if entering {
-
w.WriteString(`<a href="/@`)
+
w.WriteString(`<a href="/`)
w.WriteString(n.(*AtNode).Handle)
-
w.WriteString(`" class="mention font-bold">`)
+
w.WriteString(`" class="mention">`)
} else {
w.WriteString("</a>")
}
+2 -26
appview/pages/markup/markdown.go
···
chromahtml "github.com/alecthomas/chroma/v2/formatters/html"
"github.com/alecthomas/chroma/v2/styles"
-
treeblood "github.com/wyatt915/goldmark-treeblood"
"github.com/yuin/goldmark"
+
"github.com/yuin/goldmark-emoji"
highlighting "github.com/yuin/goldmark-highlighting/v2"
"github.com/yuin/goldmark/ast"
"github.com/yuin/goldmark/extension"
···
extension.NewFootnote(
extension.WithFootnoteIDPrefix([]byte("footnote")),
),
-
treeblood.MathML(),
callout.CalloutExtention,
textension.AtExt,
+
emoji.Emoji,
),
goldmark.WithParserOptions(
parser.WithAutoHeadingID(),
···
}
return path.Join(rctx.CurrentDir, dst)
-
}
-
-
// FindUserMentions returns Set of user handles from given markup soruce.
-
// It doesn't guarntee unique DIDs
-
func FindUserMentions(source string) []string {
-
var (
-
mentions []string
-
mentionsSet = make(map[string]struct{})
-
md = NewMarkdown()
-
sourceBytes = []byte(source)
-
root = md.Parser().Parse(text.NewReader(sourceBytes))
-
)
-
ast.Walk(root, func(n ast.Node, entering bool) (ast.WalkStatus, error) {
-
if entering && n.Kind() == textension.KindAt {
-
handle := n.(*textension.AtNode).Handle
-
mentionsSet[handle] = struct{}{}
-
return ast.WalkSkipChildren, nil
-
}
-
return ast.WalkContinue, nil
-
})
-
for handle := range mentionsSet {
-
mentions = append(mentions, handle)
-
}
-
return mentions
}
func isAbsoluteUrl(link string) bool {
+121
appview/pages/markup/markdown_test.go
···
+
package markup
+
+
import (
+
"bytes"
+
"testing"
+
)
+
+
func TestAtExtension_Rendering(t *testing.T) {
+
tests := []struct {
+
name string
+
markdown string
+
expected string
+
}{
+
{
+
name: "renders simple at mention",
+
markdown: "Hello @user.tngl.sh!",
+
expected: `<p>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>!</p>`,
+
},
+
{
+
name: "renders multiple at mentions",
+
markdown: "Hi @alice.tngl.sh and @bob.example.com",
+
expected: `<p>Hi <a href="/alice.tngl.sh" class="mention">@alice.tngl.sh</a> and <a href="/bob.example.com" class="mention">@bob.example.com</a></p>`,
+
},
+
{
+
name: "renders at mention in parentheses",
+
markdown: "Check this out (@user.tngl.sh)",
+
expected: `<p>Check this out (<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>)</p>`,
+
},
+
{
+
name: "does not render email",
+
markdown: "Contact me at test@example.com",
+
expected: `<p>Contact me at <a href="mailto:test@example.com">test@example.com</a></p>`,
+
},
+
{
+
name: "renders at mention with hyphen",
+
markdown: "Follow @user-name.tngl.sh",
+
expected: `<p>Follow <a href="/user-name.tngl.sh" class="mention">@user-name.tngl.sh</a></p>`,
+
},
+
{
+
name: "renders at mention with numbers",
+
markdown: "@user123.test456.social",
+
expected: `<p><a href="/user123.test456.social" class="mention">@user123.test456.social</a></p>`,
+
},
+
{
+
name: "at mention at start of line",
+
markdown: "@user.tngl.sh is cool",
+
expected: `<p><a href="/user.tngl.sh" class="mention">@user.tngl.sh</a> is cool</p>`,
+
},
+
}
+
+
for _, tt := range tests {
+
t.Run(tt.name, func(t *testing.T) {
+
md := NewMarkdown()
+
+
var buf bytes.Buffer
+
if err := md.Convert([]byte(tt.markdown), &buf); err != nil {
+
t.Fatalf("failed to convert markdown: %v", err)
+
}
+
+
result := buf.String()
+
if result != tt.expected+"\n" {
+
t.Errorf("expected:\n%s\ngot:\n%s", tt.expected, result)
+
}
+
})
+
}
+
}
+
+
func TestAtExtension_WithOtherMarkdown(t *testing.T) {
+
tests := []struct {
+
name string
+
markdown string
+
contains string
+
}{
+
{
+
name: "at mention with bold",
+
markdown: "**Hello @user.tngl.sh**",
+
contains: `<strong>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></strong>`,
+
},
+
{
+
name: "at mention with italic",
+
markdown: "*Check @user.tngl.sh*",
+
contains: `<em>Check <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></em>`,
+
},
+
{
+
name: "at mention in list",
+
markdown: "- Item 1\n- @user.tngl.sh\n- Item 3",
+
contains: `<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>`,
+
},
+
{
+
name: "at mention in link",
+
markdown: "[@regnault.dev](https://regnault.dev)",
+
contains: `<a href="https://regnault.dev">@regnault.dev</a>`,
+
},
+
{
+
name: "at mention in link again",
+
markdown: "[check out @regnault.dev](https://regnault.dev)",
+
contains: `<a href="https://regnault.dev">check out @regnault.dev</a>`,
+
},
+
{
+
name: "at mention in link again, multiline",
+
markdown: "[\ncheck out @regnault.dev](https://regnault.dev)",
+
contains: "<a href=\"https://regnault.dev\">\ncheck out @regnault.dev</a>",
+
},
+
}
+
+
for _, tt := range tests {
+
t.Run(tt.name, func(t *testing.T) {
+
md := NewMarkdown()
+
+
var buf bytes.Buffer
+
if err := md.Convert([]byte(tt.markdown), &buf); err != nil {
+
t.Fatalf("failed to convert markdown: %v", err)
+
}
+
+
result := buf.String()
+
if !bytes.Contains([]byte(result), []byte(tt.contains)) {
+
t.Errorf("expected output to contain:\n%s\ngot:\n%s", tt.contains, result)
+
}
+
})
+
}
+
}
+124
appview/pages/markup/reference_link.go
···
+
package markup
+
+
import (
+
"maps"
+
"net/url"
+
"path"
+
"slices"
+
"strconv"
+
"strings"
+
+
"github.com/yuin/goldmark/ast"
+
"github.com/yuin/goldmark/text"
+
"tangled.org/core/appview/models"
+
textension "tangled.org/core/appview/pages/markup/extension"
+
)
+
+
// FindReferences collects all links referencing tangled-related objects
+
// like issues, PRs, comments or even @-mentions
+
// This funciton doesn't actually check for the existence of records in the DB
+
// or the PDS; it merely returns a list of what are presumed to be references.
+
func FindReferences(baseUrl string, source string) ([]string, []models.ReferenceLink) {
+
var (
+
refLinkSet = make(map[models.ReferenceLink]struct{})
+
mentionsSet = make(map[string]struct{})
+
md = NewMarkdown()
+
sourceBytes = []byte(source)
+
root = md.Parser().Parse(text.NewReader(sourceBytes))
+
)
+
// trim url scheme. the SSL shouldn't matter
+
baseUrl = strings.TrimPrefix(baseUrl, "https://")
+
baseUrl = strings.TrimPrefix(baseUrl, "http://")
+
+
ast.Walk(root, func(n ast.Node, entering bool) (ast.WalkStatus, error) {
+
if !entering {
+
return ast.WalkContinue, nil
+
}
+
switch n.Kind() {
+
case textension.KindAt:
+
handle := n.(*textension.AtNode).Handle
+
mentionsSet[handle] = struct{}{}
+
return ast.WalkSkipChildren, nil
+
case ast.KindLink:
+
dest := string(n.(*ast.Link).Destination)
+
ref := parseTangledLink(baseUrl, dest)
+
if ref != nil {
+
refLinkSet[*ref] = struct{}{}
+
}
+
return ast.WalkSkipChildren, nil
+
case ast.KindAutoLink:
+
an := n.(*ast.AutoLink)
+
if an.AutoLinkType == ast.AutoLinkURL {
+
dest := string(an.URL(sourceBytes))
+
ref := parseTangledLink(baseUrl, dest)
+
if ref != nil {
+
refLinkSet[*ref] = struct{}{}
+
}
+
}
+
return ast.WalkSkipChildren, nil
+
}
+
return ast.WalkContinue, nil
+
})
+
mentions := slices.Collect(maps.Keys(mentionsSet))
+
references := slices.Collect(maps.Keys(refLinkSet))
+
return mentions, references
+
}
+
+
func parseTangledLink(baseHost string, urlStr string) *models.ReferenceLink {
+
u, err := url.Parse(urlStr)
+
if err != nil {
+
return nil
+
}
+
+
if u.Host != "" && !strings.EqualFold(u.Host, baseHost) {
+
return nil
+
}
+
+
p := path.Clean(u.Path)
+
parts := strings.FieldsFunc(p, func(r rune) bool { return r == '/' })
+
if len(parts) < 4 {
+
// need at least: handle / repo / kind / id
+
return nil
+
}
+
+
var (
+
handle = parts[0]
+
repo = parts[1]
+
kindSeg = parts[2]
+
subjectSeg = parts[3]
+
)
+
+
handle = strings.TrimPrefix(handle, "@")
+
+
var kind models.RefKind
+
switch kindSeg {
+
case "issues":
+
kind = models.RefKindIssue
+
case "pulls":
+
kind = models.RefKindPull
+
default:
+
return nil
+
}
+
+
subjectId, err := strconv.Atoi(subjectSeg)
+
if err != nil {
+
return nil
+
}
+
var commentId *int
+
if u.Fragment != "" {
+
if strings.HasPrefix(u.Fragment, "comment-") {
+
commentIdStr := u.Fragment[len("comment-"):]
+
if id, err := strconv.Atoi(commentIdStr); err == nil {
+
commentId = &id
+
}
+
}
+
}
+
+
return &models.ReferenceLink{
+
Handle: handle,
+
Repo: repo,
+
Kind: kind,
+
SubjectId: subjectId,
+
CommentId: commentId,
+
}
+
}
+22 -13
appview/pages/pages.go
···
"github.com/bluesky-social/indigo/atproto/identity"
"github.com/bluesky-social/indigo/atproto/syntax"
"github.com/go-git/go-git/v5/plumbing"
-
"github.com/go-git/go-git/v5/plumbing/object"
)
//go:embed templates/* static legal
···
type KnotsParams struct {
LoggedInUser *oauth.User
Registrations []models.Registration
+
Tabs []map[string]any
+
Tab string
}
func (p *Pages) Knots(w io.Writer, params KnotsParams) error {
···
Members []string
Repos map[string][]models.Repo
IsOwner bool
+
Tabs []map[string]any
+
Tab string
}
func (p *Pages) Knot(w io.Writer, params KnotParams) error {
···
type SpindlesParams struct {
LoggedInUser *oauth.User
Spindles []models.Spindle
+
Tabs []map[string]any
+
Tab string
}
func (p *Pages) Spindles(w io.Writer, params SpindlesParams) error {
···
type SpindleListingParams struct {
models.Spindle
+
Tabs []map[string]any
+
Tab string
}
func (p *Pages) SpindleListing(w io.Writer, params SpindleListingParams) error {
···
Spindle models.Spindle
Members []string
Repos map[string][]models.Repo
+
Tabs []map[string]any
+
Tab string
}
func (p *Pages) SpindleDashboard(w io.Writer, params SpindleDashboardParams) error {
···
type ProfileCard struct {
UserDid string
-
UserHandle string
FollowStatus models.FollowStatus
Punchcard *models.Punchcard
Profile *models.Profile
···
}
func (p *Pages) StarBtnFragment(w io.Writer, params StarBtnFragmentParams) error {
-
return p.executePlain("fragments/starBtn", w, params)
+
return p.executePlain("fragments/starBtn-oob", w, params)
}
type RepoIndexParams struct {
···
RepoInfo repoinfo.RepoInfo
Active string
TagMap map[string][]string
-
CommitsTrunc []*object.Commit
+
CommitsTrunc []types.Commit
TagsTrunc []*types.TagReference
BranchesTrunc []types.Branch
// ForkInfo *types.ForkInfo
···
}
type Collaborator struct {
-
Did string
-
Handle string
-
Role string
+
Did string
+
Role string
}
type RepoSettingsParams struct {
···
RepoInfo repoinfo.RepoInfo
Active string
Issues []models.Issue
+
IssueCount int
LabelDefs map[string]*models.LabelDefinition
Page pagination.Page
FilteringByOpen bool
···
Active string
Issue *models.Issue
CommentList []models.CommentListItem
+
Backlinks []models.RichReferenceLink
LabelDefs map[string]*models.LabelDefinition
OrderedReactionKinds []models.ReactionKind
···
Pull *models.Pull
Stack models.Stack
AbandonedPulls []*models.Pull
+
Backlinks []models.RichReferenceLink
BranchDeleteStatus *models.BranchDeleteStatus
MergeCheck types.MergeCheckResponse
ResubmitCheck ResubmitResult
···
return p.executePlain("repo/fragments/compareAllowPull", w, params)
-
type RepoCompareDiffParams struct {
-
LoggedInUser *oauth.User
-
RepoInfo repoinfo.RepoInfo
-
Diff types.NiceDiff
+
type RepoCompareDiffFragmentParams struct {
+
Diff types.NiceDiff
+
DiffOpts types.DiffOpts
-
func (p *Pages) RepoCompareDiff(w io.Writer, params RepoCompareDiffParams) error {
-
return p.executePlain("repo/fragments/diff", w, []any{params.RepoInfo.FullName, &params.Diff})
+
func (p *Pages) RepoCompareDiffFragment(w io.Writer, params RepoCompareDiffFragmentParams) error {
+
return p.executePlain("repo/fragments/diff", w, []any{&params.Diff, &params.DiffOpts})
type LabelPanelParams struct {
+25 -22
appview/pages/repoinfo/repoinfo.go
···
package repoinfo
import (
+
"fmt"
"path"
"slices"
"github.com/bluesky-social/indigo/atproto/syntax"
+
"tangled.org/core/api/tangled"
"tangled.org/core/appview/models"
"tangled.org/core/appview/state/userutil"
)
-
func (r RepoInfo) Owner() string {
+
func (r RepoInfo) owner() string {
if r.OwnerHandle != "" {
return r.OwnerHandle
} else {
···
}
func (r RepoInfo) FullName() string {
-
return path.Join(r.Owner(), r.Name)
+
return path.Join(r.owner(), r.Name)
}
-
func (r RepoInfo) OwnerWithoutAt() string {
+
func (r RepoInfo) ownerWithoutAt() string {
if r.OwnerHandle != "" {
return r.OwnerHandle
} else {
···
}
func (r RepoInfo) FullNameWithoutAt() string {
-
return path.Join(r.OwnerWithoutAt(), r.Name)
+
return path.Join(r.ownerWithoutAt(), r.Name)
}
func (r RepoInfo) GetTabs() [][]string {
···
return tabs
}
+
func (r RepoInfo) RepoAt() syntax.ATURI {
+
return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", r.OwnerDid, tangled.RepoNSID, r.Rkey))
+
}
+
type RepoInfo struct {
-
Name string
-
Rkey string
-
OwnerDid string
-
OwnerHandle string
-
Description string
-
Website string
-
Topics []string
-
Knot string
-
Spindle string
-
RepoAt syntax.ATURI
-
IsStarred bool
-
Stats models.RepoStats
-
Roles RolesInRepo
-
Source *models.Repo
-
SourceHandle string
-
Ref string
-
DisableFork bool
-
CurrentDir string
+
Name string
+
Rkey string
+
OwnerDid string
+
OwnerHandle string
+
Description string
+
Website string
+
Topics []string
+
Knot string
+
Spindle string
+
IsStarred bool
+
Stats models.RepoStats
+
Roles RolesInRepo
+
Source *models.Repo
+
Ref string
+
CurrentDir string
}
// each tab on a repo could have some metadata:
+5
appview/pages/templates/fragments/starBtn-oob.html
···
+
{{ define "fragments/starBtn-oob" }}
+
<div hx-swap-oob='outerHTML:#starBtn[data-star-subject-at="{{ .SubjectAt }}"]'>
+
{{ template "fragments/starBtn" . }}
+
</div>
+
{{ end }}
+1 -3
appview/pages/templates/fragments/starBtn.html
···
{{ define "fragments/starBtn" }}
+
{{/* NOTE: this fragment is always replaced with hx-swap-oob */}}
<button
id="starBtn"
class="btn disabled:opacity-50 disabled:cursor-not-allowed flex gap-2 items-center group"
···
{{ end }}
hx-trigger="click"
-
hx-target="this"
-
hx-swap="outerHTML"
-
hx-swap-oob='outerHTML:#starBtn[data-star-subject-at="{{ .SubjectAt }}"]'
hx-disabled-elt="#starBtn"
>
{{ if .IsStarred }}
+8
appview/pages/templates/fragments/tabSelector.html
···
{{ $name := .Name }}
{{ $all := .Values }}
{{ $active := .Active }}
+
{{ $include := .Include }}
<div class="flex justify-between divide-x divide-gray-200 dark:divide-gray-700 rounded border border-gray-200 dark:border-gray-700 overflow-hidden">
{{ $activeTab := "bg-white dark:bg-gray-700 shadow-sm" }}
{{ $inactiveTab := "bg-gray-100 dark:bg-gray-800 shadow-inner" }}
{{ range $index, $value := $all }}
{{ $isActive := eq $value.Key $active }}
<a href="?{{ $name }}={{ $value.Key }}"
+
{{ if $include }}
+
hx-get="?{{ $name }}={{ $value.Key }}"
+
hx-include="{{ $include }}"
+
hx-push-url="true"
+
hx-target="body"
+
hx-on:htmx:config-request="if(!event.detail.parameters.q) delete event.detail.parameters.q"
+
{{ end }}
class="p-2 whitespace-nowrap flex justify-center items-center gap-2 text-sm w-full block hover:no-underline text-center {{ if $isActive }} {{$activeTab }} {{ else }} {{ $inactiveTab }} {{ end }}">
{{ if $value.Icon }}
{{ i $value.Icon "size-4" }}
+22
appview/pages/templates/fragments/tinyAvatarList.html
···
+
{{ define "fragments/tinyAvatarList" }}
+
{{ $all := .all }}
+
{{ $classes := .classes }}
+
{{ $ps := take $all 5 }}
+
<div class="inline-flex items-center -space-x-3">
+
{{ $c := "z-50 z-40 z-30 z-20 z-10" }}
+
{{ range $i, $p := $ps }}
+
<img
+
src="{{ tinyAvatar . }}"
+
alt=""
+
class="rounded-full size-8 mr-1 border-2 border-gray-100 dark:border-gray-900 z-{{sub 5 $i}}0 {{ $classes }}"
+
/>
+
{{ end }}
+
+
{{ if gt (len $all) 5 }}
+
<span class="pl-4 text-gray-500 dark:text-gray-400 text-sm">
+
+{{ sub (len $all) 5 }}
+
</span>
+
{{ end }}
+
</div>
+
{{ end }}
+
+23 -7
appview/pages/templates/knots/dashboard.html
···
-
{{ define "title" }}{{ .Registration.Domain }} &middot; knots{{ end }}
+
{{ define "title" }}{{ .Registration.Domain }} &middot; {{ .Tab }} settings{{ end }}
{{ define "content" }}
-
<div class="px-6 py-4">
+
<div class="p-6">
+
<p class="text-xl font-bold dark:text-white">Settings</p>
+
</div>
+
<div class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white">
+
<section class="w-full grid grid-cols-1 md:grid-cols-4 gap-6">
+
<div class="col-span-1">
+
{{ template "user/settings/fragments/sidebar" . }}
+
</div>
+
<div class="col-span-1 md:col-span-3 flex flex-col gap-6">
+
{{ template "knotDash" . }}
+
</div>
+
</section>
+
</div>
+
{{ end }}
+
+
{{ define "knotDash" }}
+
<div>
<div class="flex justify-between items-center">
-
<h1 class="text-xl font-bold dark:text-white">{{ .Registration.Domain }}</h1>
+
<h2 class="text-sm pb-2 uppercase font-bold">{{ .Tab }} &middot; {{ .Registration.Domain }}</h2>
<div id="right-side" class="flex gap-2">
{{ $style := "px-2 py-1 rounded flex items-center flex-shrink-0 gap-2" }}
{{ $isOwner := and .LoggedInUser (eq .LoggedInUser.Did .Registration.ByDid) }}
···
</div>
{{ if .Members }}
-
<section class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white">
+
<section class="bg-white dark:bg-gray-800 rounded relative w-full mx-auto drop-shadow-sm dark:text-white">
<div class="flex flex-col gap-2">
{{ block "member" . }} {{ end }}
</div>
···
<button
class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group"
title="Delete knot"
-
hx-delete="/knots/{{ .Domain }}"
+
hx-delete="/settings/knots/{{ .Domain }}"
hx-swap="outerHTML"
hx-confirm="Are you sure you want to delete the knot '{{ .Domain }}'?"
hx-headers='{"shouldRedirect": "true"}'
···
<button
class="btn gap-2 group"
title="Retry knot verification"
-
hx-post="/knots/{{ .Domain }}/retry"
+
hx-post="/settings/knots/{{ .Domain }}/retry"
hx-swap="none"
hx-headers='{"shouldRefresh": "true"}'
>
···
<button
class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group"
title="Remove member"
-
hx-post="/knots/{{ $root.Registration.Domain }}/remove"
+
hx-post="/settings/knots/{{ $root.Registration.Domain }}/remove"
hx-swap="none"
hx-vals='{"member": "{{$member}}" }'
hx-confirm="Are you sure you want to remove {{ $memberHandle }} from this knot?"
+1 -1
appview/pages/templates/knots/fragments/addMemberModal.html
···
{{ define "addKnotMemberPopover" }}
<form
-
hx-post="/knots/{{ .Domain }}/add"
+
hx-post="/settings/knots/{{ .Domain }}/add"
hx-indicator="#spinner"
hx-swap="none"
class="flex flex-col gap-2"
+3 -3
appview/pages/templates/knots/fragments/knotListing.html
···
{{ define "knotLeftSide" }}
{{ if .Registered }}
-
<a href="/knots/{{ .Domain }}" class="hover:no-underline flex items-center gap-2 min-w-0 max-w-[60%]">
+
<a href="/settings/knots/{{ .Domain }}" class="hover:no-underline flex items-center gap-2 min-w-0 max-w-[60%]">
{{ i "hard-drive" "w-4 h-4" }}
<span class="hover:underline">
{{ .Domain }}
···
<button
class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group"
title="Delete knot"
-
hx-delete="/knots/{{ .Domain }}"
+
hx-delete="/settings/knots/{{ .Domain }}"
hx-swap="outerHTML"
hx-target="#knot-{{.Id}}"
hx-confirm="Are you sure you want to delete the knot '{{ .Domain }}'?"
···
<button
class="btn gap-2 group"
title="Retry knot verification"
-
hx-post="/knots/{{ .Domain }}/retry"
+
hx-post="/settings/knots/{{ .Domain }}/retry"
hx-swap="none"
hx-target="#knot-{{.Id}}"
>
+42 -11
appview/pages/templates/knots/index.html
···
-
{{ define "title" }}knots{{ end }}
+
{{ define "title" }}{{ .Tab }} settings{{ end }}
{{ define "content" }}
-
<div class="px-6 py-4 flex items-center justify-between gap-4 align-bottom">
-
<h1 class="text-xl font-bold dark:text-white">Knots</h1>
-
<span class="flex items-center gap-1">
-
{{ i "book" "w-3 h-3" }}
-
<a href="https://tangled.org/@tangled.org/core/blob/master/docs/knot-hosting.md">docs</a>
-
</span>
-
</div>
+
<div class="p-6">
+
<p class="text-xl font-bold dark:text-white">Settings</p>
+
</div>
+
<div class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white">
+
<section class="w-full grid grid-cols-1 md:grid-cols-4 gap-6">
+
<div class="col-span-1">
+
{{ template "user/settings/fragments/sidebar" . }}
+
</div>
+
<div class="col-span-1 md:col-span-3 flex flex-col gap-6">
+
{{ template "knotsList" . }}
+
</div>
+
</section>
+
</div>
+
{{ end }}
+
+
{{ define "knotsList" }}
+
<div class="grid grid-cols-1 md:grid-cols-3 gap-4 items-center">
+
<div class="col-span-1 md:col-span-2">
+
<h2 class="text-sm pb-2 uppercase font-bold">Knots</h2>
+
{{ block "about" . }} {{ end }}
+
</div>
+
<div class="col-span-1 md:col-span-1 md:justify-self-end">
+
{{ template "docsButton" . }}
+
</div>
+
</div>
-
<section class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white">
+
<section>
<div class="flex flex-col gap-6">
-
{{ block "about" . }} {{ end }}
{{ block "list" . }} {{ end }}
{{ block "register" . }} {{ end }}
</div>
···
<h2 class="text-sm font-bold py-2 uppercase dark:text-gray-300">register a knot</h2>
<p class="mb-2 dark:text-gray-300">Enter the hostname of your knot to get started.</p>
<form
-
hx-post="/knots/register"
+
hx-post="/settings/knots/register"
class="max-w-2xl mb-2 space-y-4"
hx-indicator="#register-button"
hx-swap="none"
···
</section>
{{ end }}
+
+
{{ define "docsButton" }}
+
<a
+
class="btn flex items-center gap-2"
+
href="https://tangled.org/@tangled.org/core/blob/master/docs/knot-hosting.md">
+
{{ i "book" "size-4" }}
+
docs
+
</a>
+
<div
+
id="add-email-modal"
+
popover
+
class="bg-white w-full md:w-96 dark:bg-gray-800 p-4 rounded border border-gray-200 dark:border-gray-700 drop-shadow dark:text-white backdrop:bg-gray-400/50 dark:backdrop:bg-gray-800/50">
+
</div>
+
{{ end }}
-2
appview/pages/templates/layouts/fragments/topbar.html
···
<a href="/{{ $user }}">profile</a>
<a href="/{{ $user }}?tab=repos">repositories</a>
<a href="/{{ $user }}?tab=strings">strings</a>
-
<a href="/knots">knots</a>
-
<a href="/spindles">spindles</a>
<a href="/settings">settings</a>
<a href="#"
hx-post="/logout"
+8 -7
appview/pages/templates/layouts/profilebase.html
···
-
{{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }}{{ end }}
+
{{ define "title" }}{{ resolve .Card.UserDid }}{{ end }}
{{ define "extrameta" }}
-
{{ $avatarUrl := fullAvatar .Card.UserHandle }}
-
<meta property="og:title" content="{{ or .Card.UserHandle .Card.UserDid }}" />
+
{{ $handle := resolve .Card.UserDid }}
+
{{ $avatarUrl := fullAvatar $handle }}
+
<meta property="og:title" content="{{ $handle }}" />
<meta property="og:type" content="profile" />
-
<meta property="og:url" content="https://tangled.org/{{ or .Card.UserHandle .Card.UserDid }}?tab={{ .Active }}" />
-
<meta property="og:description" content="{{ or .Card.Profile.Description .Card.UserHandle .Card.UserDid }}" />
+
<meta property="og:url" content="https://tangled.org/{{ $handle }}?tab={{ .Active }}" />
+
<meta property="og:description" content="{{ or .Card.Profile.Description $handle }}" />
<meta property="og:image" content="{{ $avatarUrl }}" />
<meta property="og:image:width" content="512" />
<meta property="og:image:height" content="512" />
<meta name="twitter:card" content="summary" />
-
<meta name="twitter:title" content="{{ or .Card.UserHandle .Card.UserDid }}" />
-
<meta name="twitter:description" content="{{ or .Card.Profile.Description .Card.UserHandle .Card.UserDid }}" />
+
<meta name="twitter:title" content="{{ $handle }}" />
+
<meta name="twitter:description" content="{{ or .Card.Profile.Description $handle }}" />
<meta name="twitter:image" content="{{ $avatarUrl }}" />
{{ end }}
+35 -10
appview/pages/templates/repo/commit.html
···
</div>
<div class="flex flex-wrap items-center space-x-2">
-
<p class="flex flex-wrap items-center gap-2 text-sm text-gray-500 dark:text-gray-300">
-
{{ $did := index $.EmailToDid $commit.Author.Email }}
-
-
{{ if $did }}
-
{{ template "user/fragments/picHandleLink" $did }}
-
{{ else }}
-
<a href="mailto:{{ $commit.Author.Email }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ $commit.Author.Name }}</a>
-
{{ end }}
+
<p class="flex flex-wrap items-center gap-1 text-sm text-gray-500 dark:text-gray-300">
+
{{ template "attribution" . }}
<span class="px-1 select-none before:content-['\00B7']"></span>
-
{{ template "repo/fragments/time" $commit.Author.When }}
+
{{ template "repo/fragments/time" $commit.Committer.When }}
<span class="px-1 select-none before:content-['\00B7']"></span>
<a href="/{{ $repo }}/commit/{{ $commit.This }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ slice $commit.This 0 8 }}</a>
···
</section>
{{end}}
+
{{ define "attribution" }}
+
{{ $commit := .Diff.Commit }}
+
{{ $showCommitter := true }}
+
{{ if eq $commit.Author.Email $commit.Committer.Email }}
+
{{ $showCommitter = false }}
+
{{ end }}
+
+
{{ if $showCommitter }}
+
authored by {{ template "attributedUser" (list $commit.Author.Email $commit.Author.Name $.EmailToDid) }}
+
{{ range $commit.CoAuthors }}
+
{{ template "attributedUser" (list .Email .Name $.EmailToDid) }}
+
{{ end }}
+
and committed by {{ template "attributedUser" (list $commit.Committer.Email $commit.Committer.Name $.EmailToDid) }}
+
{{ else }}
+
{{ template "attributedUser" (list $commit.Author.Email $commit.Author.Name $.EmailToDid )}}
+
{{ end }}
+
{{ end }}
+
+
{{ define "attributedUser" }}
+
{{ $email := index . 0 }}
+
{{ $name := index . 1 }}
+
{{ $map := index . 2 }}
+
{{ $did := index $map $email }}
+
+
{{ if $did }}
+
{{ template "user/fragments/picHandleLink" $did }}
+
{{ else }}
+
<a href="mailto:{{ $email }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ $name }}</a>
+
{{ end }}
+
{{ end }}
+
{{ define "topbarLayout" }}
<header class="col-span-full" style="z-index: 20;">
{{ template "layouts/fragments/topbar" . }}
···
{{ end }}
{{ define "contentAfter" }}
-
{{ template "repo/fragments/diff" (list .RepoInfo.FullName .Diff .DiffOpts) }}
+
{{ template "repo/fragments/diff" (list .Diff .DiffOpts) }}
{{end}}
{{ define "contentAfterLeft" }}
+1 -1
appview/pages/templates/repo/compare/compare.html
···
{{ end }}
{{ define "contentAfter" }}
-
{{ template "repo/fragments/diff" (list .RepoInfo.FullName .Diff .DiffOpts) }}
+
{{ template "repo/fragments/diff" (list .Diff .DiffOpts) }}
{{end}}
{{ define "contentAfterLeft" }}
+2 -2
appview/pages/templates/repo/empty.html
···
{{ else if (and .LoggedInUser (eq .LoggedInUser.Did .RepoInfo.OwnerDid)) }}
{{ $knot := .RepoInfo.Knot }}
{{ if eq $knot "knot1.tangled.sh" }}
-
{{ $knot = "tangled.sh" }}
+
{{ $knot = "tangled.org" }}
{{ end }}
<div class="w-full flex place-content-center">
<div class="py-6 w-fit flex flex-col gap-4">
···
<p><span class="{{$bullet}}">1</span>First, generate a new <a href="https://git-scm.com/book/en/v2/Git-on-the-Server-Generating-Your-SSH-Public-Key" class="underline">SSH key pair</a>.</p>
<p><span class="{{$bullet}}">2</span>Then add the public key to your account from the <a href="/settings" class="underline">settings</a> page.</p>
-
<p><span class="{{$bullet}}">3</span>Configure your remote to <code>git@{{ $knot | stripPort }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}</code></p>
+
<p><span class="{{$bullet}}">3</span>Configure your remote to <code>git@{{ $knot | stripPort }}:{{ resolve .RepoInfo.OwnerDid }}/{{ .RepoInfo.Name }}</code></p>
<p><span class="{{$bullet}}">4</span>Push!</p>
</div>
</div>
+1 -1
appview/pages/templates/repo/fork.html
···
{{ end }}
</div>
</div>
-
<p class="text-sm text-gray-500 dark:text-gray-400">A knot hosts repository data. <a href="/knots" class="underline">Learn how to register your own knot.</a></p>
+
<p class="text-sm text-gray-500 dark:text-gray-400">A knot hosts repository data. <a href="/settings/knots" class="underline">Learn how to register your own knot.</a></p>
</fieldset>
<div class="space-y-2">
+49
appview/pages/templates/repo/fragments/backlinks.html
···
+
{{ define "repo/fragments/backlinks" }}
+
{{ if .Backlinks }}
+
<div id="at-uri-panel" class="px-2 md:px-0">
+
<div>
+
<span class="text-sm py-1 font-bold text-gray-500 dark:text-gray-400">Referenced by</span>
+
</div>
+
<ul>
+
{{ range .Backlinks }}
+
<li>
+
{{ $repoOwner := resolve .Handle }}
+
{{ $repoName := .Repo }}
+
{{ $repoUrl := printf "%s/%s" $repoOwner $repoName }}
+
<div class="flex flex-col">
+
<div class="flex gap-2 items-center">
+
{{ if .State.IsClosed }}
+
<span class="text-gray-500 dark:text-gray-400">
+
{{ i "ban" "size-3" }}
+
</span>
+
{{ else if eq .Kind.String "issues" }}
+
<span class="text-green-600 dark:text-green-500">
+
{{ i "circle-dot" "size-3" }}
+
</span>
+
{{ else if .State.IsOpen }}
+
<span class="text-green-600 dark:text-green-500">
+
{{ i "git-pull-request" "size-3" }}
+
</span>
+
{{ else if .State.IsMerged }}
+
<span class="text-purple-600 dark:text-purple-500">
+
{{ i "git-merge" "size-3" }}
+
</span>
+
{{ else }}
+
<span class="text-gray-600 dark:text-gray-300">
+
{{ i "git-pull-request-closed" "size-3" }}
+
</span>
+
{{ end }}
+
<a href="{{ . }}" class="line-clamp-1 text-sm"><span class="text-gray-500 dark:text-gray-400">#{{ .SubjectId }}</span> {{ .Title }}</a>
+
</div>
+
{{ if not (eq $.RepoInfo.FullName $repoUrl) }}
+
<div>
+
<span>on <a href="/{{ $repoUrl }}">{{ $repoUrl }}</a></span>
+
</div>
+
{{ end }}
+
</div>
+
</li>
+
{{ end }}
+
</ul>
+
</div>
+
{{ end }}
+
{{ end }}
+3 -2
appview/pages/templates/repo/fragments/cloneDropdown.html
···
<!-- SSH Clone -->
<div class="mb-3">
+
{{ $repoOwnerHandle := resolve .RepoInfo.OwnerDid }}
<label class="block text-xs font-medium text-gray-700 dark:text-gray-300 mb-1">SSH</label>
<div class="flex items-center border border-gray-300 dark:border-gray-600 rounded">
<code
class="flex-1 px-3 py-2 text-sm bg-gray-50 dark:bg-gray-700 text-gray-900 dark:text-gray-100 rounded-l select-all cursor-pointer whitespace-nowrap overflow-x-auto"
onclick="window.getSelection().selectAllChildren(this)"
-
data-url="git@{{ $knot | stripPort }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}"
-
>git@{{ $knot | stripPort }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}</code>
+
data-url="git@{{ $knot | stripPort }}:{{ $repoOwnerHandle }}/{{ .RepoInfo.Name }}"
+
>git@{{ $knot | stripPort }}:{{ $repoOwnerHandle }}/{{ .RepoInfo.Name }}</code>
<button
onclick="copyToClipboard(this, this.previousElementSibling.getAttribute('data-url'))"
class="px-3 py-2 text-gray-500 hover:text-gray-700 dark:text-gray-400 dark:hover:text-gray-200 border-l border-gray-300 dark:border-gray-600"
+2 -3
appview/pages/templates/repo/fragments/diff.html
···
{{ define "repo/fragments/diff" }}
-
{{ $repo := index . 0 }}
-
{{ $diff := index . 1 }}
-
{{ $opts := index . 2 }}
+
{{ $diff := index . 0 }}
+
{{ $opts := index . 1 }}
{{ $commit := $diff.Commit }}
{{ $diff := $diff.Diff }}
+15 -1
appview/pages/templates/repo/fragments/editLabelPanel.html
···
{{ $fieldName := $def.AtUri }}
{{ $valueType := $def.ValueType }}
{{ $value := .value }}
+
{{ if $valueType.IsDidFormat }}
{{ $value = trimPrefix (resolve .value) "@" }}
+
<actor-typeahead>
+
<input
+
autocapitalize="none"
+
autocorrect="off"
+
autocomplete="off"
+
placeholder="user.tngl.sh"
+
value="{{$value}}"
+
name="{{$fieldName}}"
+
type="text"
+
class="p-1 w-full text-sm"
+
/>
+
</actor-typeahead>
+
{{ else }}
+
<input class="p-1 w-full" type="text" name="{{$fieldName}}" value="{{$value}}">
{{ end }}
-
<input class="p-1 w-full" type="text" name="{{$fieldName}}" value="{{$value}}">
{{ end }}
{{ define "nullTypeInput" }}
+1 -16
appview/pages/templates/repo/fragments/participants.html
···
<span class="font-bold text-gray-500 dark:text-gray-400 capitalize">Participants</span>
<span class="bg-gray-200 dark:bg-gray-700 rounded py-1/2 px-1 ml-1">{{ len $all }}</span>
</div>
-
<div class="flex items-center -space-x-3 mt-2">
-
{{ $c := "z-50 z-40 z-30 z-20 z-10" }}
-
{{ range $i, $p := $ps }}
-
<img
-
src="{{ tinyAvatar . }}"
-
alt=""
-
class="rounded-full h-8 w-8 mr-1 border-2 border-gray-100 dark:border-gray-900 z-{{sub 5 $i}}0"
-
/>
-
{{ end }}
-
-
{{ if gt (len $all) 5 }}
-
<span class="pl-4 text-gray-500 dark:text-gray-400 text-sm">
-
+{{ sub (len $all) 5 }}
-
</span>
-
{{ end }}
-
</div>
+
{{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "w-8 h-8") }}
</div>
{{ end }}
+31 -9
appview/pages/templates/repo/index.html
···
{{ end }}
<div class="flex items-center justify-between pb-5">
{{ block "branchSelector" . }}{{ end }}
-
<div class="flex md:hidden items-center gap-2">
+
<div class="flex md:hidden items-center gap-3">
<a href="/{{ .RepoInfo.FullName }}/commits/{{ .Ref | urlquery }}" class="inline-flex items-center text-sm gap-1 font-bold">
{{ i "git-commit-horizontal" "w-4" "h-4" }} {{ .TotalCommits }}
</a>
···
<div class="px-4 py-2 border-b border-gray-200 dark:border-gray-600 flex items-center gap-4 flex-wrap">
{{ range $value := .Languages }}
<div
-
class="flex flex-grow items-center gap-2 text-xs align-items-center justify-center"
+
class="flex items-center gap-2 text-xs align-items-center justify-center"
>
{{ template "repo/fragments/colorBall" (dict "color" (langColor $value.Name)) }}
<div>{{ or $value.Name "Other" }}
···
{{ define "branchSelector" }}
<div class="flex gap-2 items-center justify-between w-full">
-
<div class="flex gap-2 items-center">
+
<div class="flex gap-2 items-stretch">
<select
onchange="window.location.href = '/{{ .RepoInfo.FullName }}/tree/' + encodeURIComponent(this.value)"
class="p-1 border max-w-32 border-gray-200 bg-white dark:bg-gray-800 dark:text-white dark:border-gray-700"
···
<span
class="mx-1 before:content-['·'] before:select-none"
></span>
-
<span>
-
{{ $did := index $.EmailToDid .Author.Email }}
-
<a href="{{ if $did }}/{{ resolve $did }}{{ else }}mailto:{{ .Author.Email }}{{ end }}"
-
class="text-gray-500 dark:text-gray-400 no-underline hover:underline"
-
>{{ if $did }}{{ template "user/fragments/picHandleLink" $did }}{{ else }}{{ .Author.Name }}{{ end }}</a>
-
</span>
+
{{ template "attribution" (list . $.EmailToDid) }}
<div class="inline-block px-1 select-none after:content-['·']"></div>
{{ template "repo/fragments/time" .Committer.When }}
···
{{ end }}
</div>
</div>
+
{{ end }}
+
+
{{ define "attribution" }}
+
{{ $commit := index . 0 }}
+
{{ $map := index . 1 }}
+
<span class="flex items-center">
+
{{ $author := index $map $commit.Author.Email }}
+
{{ $coauthors := $commit.CoAuthors }}
+
{{ $all := list }}
+
+
{{ if $author }}
+
{{ $all = append $all $author }}
+
{{ end }}
+
{{ range $coauthors }}
+
{{ $co := index $map .Email }}
+
{{ if $co }}
+
{{ $all = append $all $co }}
+
{{ end }}
+
{{ end }}
+
+
{{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "size-6") }}
+
<a href="{{ if $author }}/{{ $author }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}"
+
class="no-underline hover:underline">
+
{{ if $author }}{{ resolve $author }}{{ else }}{{ $commit.Author.Name }}{{ end }}
+
{{ if $coauthors }} +{{ length $coauthors }}{{ end }}
+
</a>
+
</span>
{{ end }}
{{ define "branchList" }}
+2 -2
appview/pages/templates/repo/issues/fragments/issueCommentHeader.html
···
{{ end }}
{{ define "timestamp" }}
-
<a href="#{{ .Comment.Id }}"
+
<a href="#comment-{{ .Comment.Id }}"
class="text-gray-500 dark:text-gray-400 hover:text-gray-500 dark:hover:text-gray-400 hover:underline no-underline"
-
id="{{ .Comment.Id }}">
+
id="comment-{{ .Comment.Id }}">
{{ if .Comment.Deleted }}
{{ template "repo/fragments/shortTimeAgo" .Comment.Deleted }}
{{ else if .Comment.Edited }}
+3
appview/pages/templates/repo/issues/issue.html
···
"Subject" $.Issue.AtUri
"State" $.Issue.Labels) }}
{{ template "repo/fragments/participants" $.Issue.Participants }}
+
{{ template "repo/fragments/backlinks"
+
(dict "RepoInfo" $.RepoInfo
+
"Backlinks" $.Backlinks) }}
{{ template "repo/fragments/externalLinkPanel" $.Issue.AtUri }}
</div>
</div>
+97 -27
appview/pages/templates/repo/issues/issues.html
···
<input type="hidden" name="state" value="{{ if .FilteringByOpen }}open{{ else }}closed{{ end }}">
<div class="flex-1 flex relative">
<input
+
id="search-q"
class="flex-1 py-1 pl-2 pr-10 mr-[-1px] rounded-r-none focus:border-0 focus:outline-none focus:ring focus:ring-blue-400 ring-inset peer"
type="text"
name="q"
···
</button>
</form>
<div class="sm:row-start-1">
-
{{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active) }}
+
{{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active "Include" "#search-q") }}
</div>
<a
href="/{{ .RepoInfo.FullName }}/issues/new"
···
<div class="mt-2">
{{ template "repo/issues/fragments/issueListing" (dict "Issues" .Issues "RepoPrefix" .RepoInfo.FullName "LabelDefs" .LabelDefs) }}
</div>
-
{{ block "pagination" . }} {{ end }}
+
{{if gt .IssueCount .Page.Limit }}
+
{{ block "pagination" . }} {{ end }}
+
{{ end }}
{{ end }}
{{ define "pagination" }}
-
<div class="flex justify-end mt-4 gap-2">
-
{{ $currentState := "closed" }}
-
{{ if .FilteringByOpen }}
-
{{ $currentState = "open" }}
-
{{ end }}
+
<div class="flex justify-center items-center mt-4 gap-2">
+
{{ $currentState := "closed" }}
+
{{ if .FilteringByOpen }}
+
{{ $currentState = "open" }}
+
{{ end }}
+
+
{{ $prev := .Page.Previous.Offset }}
+
{{ $next := .Page.Next.Offset }}
+
{{ $lastPage := sub .IssueCount (mod .IssueCount .Page.Limit) }}
+
<a
+
class="
+
btn flex items-center gap-2 no-underline hover:no-underline
+
dark:text-white dark:hover:bg-gray-700
+
{{ if le .Page.Offset 0 }}
+
cursor-not-allowed opacity-50
+
{{ end }}
+
"
{{ if gt .Page.Offset 0 }}
-
{{ $prev := .Page.Previous }}
-
<a
-
class="btn flex items-center gap-2 no-underline hover:no-underline dark:text-white dark:hover:bg-gray-700"
-
hx-boost="true"
-
href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $prev.Offset }}&limit={{ $prev.Limit }}"
-
>
-
{{ i "chevron-left" "w-4 h-4" }}
-
previous
-
</a>
-
{{ else }}
-
<div></div>
+
hx-boost="true"
+
href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $prev }}&limit={{ .Page.Limit }}"
{{ end }}
+
>
+
{{ i "chevron-left" "w-4 h-4" }}
+
previous
+
</a>
+
<!-- dont show first page if current page is first page -->
+
{{ if gt .Page.Offset 0 }}
+
<a
+
hx-boost="true"
+
href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset=0&limit={{ .Page.Limit }}"
+
>
+
1
+
</a>
+
{{ end }}
+
+
<!-- if previous page is not first or second page (prev > limit) -->
+
{{ if gt $prev .Page.Limit }}
+
<span>...</span>
+
{{ end }}
+
+
<!-- if previous page is not the first page -->
+
{{ if gt $prev 0 }}
+
<a
+
hx-boost="true"
+
href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $prev }}&limit={{ .Page.Limit }}"
+
>
+
{{ add (div $prev .Page.Limit) 1 }}
+
</a>
+
{{ end }}
+
+
<!-- current page. this is always visible -->
+
<span class="font-bold">
+
{{ add (div .Page.Offset .Page.Limit) 1 }}
+
</span>
+
+
<!-- if next page is not last page -->
+
{{ if lt $next $lastPage }}
+
<a
+
hx-boost="true"
+
href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $next }}&limit={{ .Page.Limit }}"
+
>
+
{{ add (div $next .Page.Limit) 1 }}
+
</a>
+
{{ end }}
+
+
<!-- if next page is not second last or last page (next < issues - 2 * limit) -->
+
{{ if lt ($next) (sub .IssueCount (mul (2) .Page.Limit)) }}
+
<span>...</span>
+
{{ end }}
+
+
<!-- if its not the last page -->
+
{{ if lt .Page.Offset $lastPage }}
+
<a
+
hx-boost="true"
+
href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $lastPage }}&limit={{ .Page.Limit }}"
+
>
+
{{ add (div $lastPage .Page.Limit) 1 }}
+
</a>
+
{{ end }}
+
+
<a
+
class="
+
btn flex items-center gap-2 no-underline hover:no-underline
+
dark:text-white dark:hover:bg-gray-700
+
{{ if ne (len .Issues) .Page.Limit }}
+
cursor-not-allowed opacity-50
+
{{ end }}
+
"
{{ if eq (len .Issues) .Page.Limit }}
-
{{ $next := .Page.Next }}
-
<a
-
class="btn flex items-center gap-2 no-underline hover:no-underline dark:text-white dark:hover:bg-gray-700"
-
hx-boost="true"
-
href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $next.Offset }}&limit={{ $next.Limit }}"
-
>
-
next
-
{{ i "chevron-right" "w-4 h-4" }}
-
</a>
+
hx-boost="true"
+
href="/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $next }}&limit={{ .Page.Limit }}"
{{ end }}
+
>
+
next
+
{{ i "chevron-right" "w-4 h-4" }}
+
</a>
</div>
{{ end }}
+40 -23
appview/pages/templates/repo/log.html
···
<div class="hidden md:flex md:flex-col divide-y divide-gray-200 dark:divide-gray-700">
{{ $grid := "grid grid-cols-14 gap-4" }}
<div class="{{ $grid }}">
-
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-2">Author</div>
+
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-3">Author</div>
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-3">Commit</div>
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-6">Message</div>
-
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-1"></div>
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-2 justify-self-end">Date</div>
</div>
{{ range $index, $commit := .Commits }}
{{ $messageParts := splitN $commit.Message "\n\n" 2 }}
<div class="{{ $grid }} py-3">
-
<div class="align-top truncate col-span-2">
-
{{ $did := index $.EmailToDid $commit.Author.Email }}
-
{{ if $did }}
-
{{ template "user/fragments/picHandleLink" $did }}
-
{{ else }}
-
<a href="mailto:{{ $commit.Author.Email }}" class="text-gray-700 dark:text-gray-300 no-underline hover:underline">{{ $commit.Author.Name }}</a>
-
{{ end }}
+
<div class="align-top col-span-3">
+
{{ template "attribution" (list $commit $.EmailToDid) }}
</div>
<div class="align-top font-mono flex items-start col-span-3">
{{ $verified := $.VerifiedCommits.IsVerified $commit.Hash.String }}
···
<div class="align-top col-span-6">
<div>
<a href="/{{ $.RepoInfo.FullName }}/commit/{{ $commit.Hash.String }}" class="dark:text-white no-underline hover:underline">{{ index $messageParts 0 }}</a>
+
{{ if gt (len $messageParts) 1 }}
<button class="py-1/2 px-1 bg-gray-200 hover:bg-gray-400 dark:bg-gray-700 dark:hover:bg-gray-600 rounded" hx-on:click="this.parentElement.nextElementSibling.classList.toggle('hidden')">{{ i "ellipsis" "w-3 h-3" }}</button>
{{ end }}
···
</span>
{{ end }}
{{ end }}
+
+
<!-- ci status -->
+
<span class="text-xs">
+
{{ $pipeline := index $.Pipelines .Hash.String }}
+
{{ if and $pipeline (gt (len $pipeline.Statuses) 0) }}
+
{{ template "repo/pipelines/fragments/pipelineSymbolLong" (dict "Pipeline" $pipeline "RepoInfo" $.RepoInfo) }}
+
{{ end }}
+
</span>
</div>
{{ if gt (len $messageParts) 1 }}
<p class="hidden mt-1 text-sm text-gray-600 dark:text-gray-400">{{ nl2br (index $messageParts 1) }}</p>
{{ end }}
-
</div>
-
<div class="align-top col-span-1">
-
<!-- ci status -->
-
{{ $pipeline := index $.Pipelines .Hash.String }}
-
{{ if and $pipeline (gt (len $pipeline.Statuses) 0) }}
-
{{ template "repo/pipelines/fragments/pipelineSymbolLong" (dict "Pipeline" $pipeline "RepoInfo" $.RepoInfo) }}
-
{{ end }}
</div>
<div class="align-top justify-self-end text-gray-500 dark:text-gray-400 col-span-2">{{ template "repo/fragments/shortTimeAgo" $commit.Committer.When }}</div>
</div>
···
</a>
</span>
<span class="mx-2 before:content-['·'] before:select-none"></span>
-
<span>
-
{{ $did := index $.EmailToDid $commit.Author.Email }}
-
<a href="{{ if $did }}/{{ $did }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}"
-
class="text-gray-500 dark:text-gray-400 no-underline hover:underline">
-
{{ if $did }}{{ template "user/fragments/picHandleLink" $did }}{{ else }}{{ $commit.Author.Name }}{{ end }}
-
</a>
-
</span>
+
{{ template "attribution" (list $commit $.EmailToDid) }}
<div class="inline-block px-1 select-none after:content-['·']"></div>
<span>{{ template "repo/fragments/shortTime" $commit.Committer.When }}</span>
···
</div>
</section>
+
{{ end }}
+
+
{{ define "attribution" }}
+
{{ $commit := index . 0 }}
+
{{ $map := index . 1 }}
+
<span class="flex items-center gap-1">
+
{{ $author := index $map $commit.Author.Email }}
+
{{ $coauthors := $commit.CoAuthors }}
+
{{ $all := list }}
+
+
{{ if $author }}
+
{{ $all = append $all $author }}
+
{{ end }}
+
{{ range $coauthors }}
+
{{ $co := index $map .Email }}
+
{{ if $co }}
+
{{ $all = append $all $co }}
+
{{ end }}
+
{{ end }}
+
+
{{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "size-6") }}
+
<a href="{{ if $author }}/{{ $author }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}"
+
class="no-underline hover:underline">
+
{{ if $author }}{{ resolve $author }}{{ else }}{{ $commit.Author.Name }}{{ end }}
+
{{ if $coauthors }} +{{ length $coauthors }}{{ end }}
+
</a>
+
</span>
{{ end }}
{{ define "repoAfter" }}
+1 -1
appview/pages/templates/repo/new.html
···
</div>
<p class="text-sm text-gray-500 dark:text-gray-400 mt-1">
A knot hosts repository data and handles Git operations.
-
You can also <a href="/knots" class="underline">register your own knot</a>.
+
You can also <a href="/settings/knots" class="underline">register your own knot</a>.
</p>
</div>
{{ end }}
+1 -1
appview/pages/templates/repo/pulls/patch.html
···
{{ end }}
{{ define "contentAfter" }}
-
{{ template "repo/fragments/diff" (list .RepoInfo.FullName .Diff .DiffOpts) }}
+
{{ template "repo/fragments/diff" (list .Diff .DiffOpts) }}
{{end}}
{{ define "contentAfterLeft" }}
+3
appview/pages/templates/repo/pulls/pull.html
···
"Subject" $.Pull.AtUri
"State" $.Pull.Labels) }}
{{ template "repo/fragments/participants" $.Pull.Participants }}
+
{{ template "repo/fragments/backlinks"
+
(dict "RepoInfo" $.RepoInfo
+
"Backlinks" $.Backlinks) }}
{{ template "repo/fragments/externalLinkPanel" $.Pull.AtUri }}
</div>
</div>
+2 -1
appview/pages/templates/repo/pulls/pulls.html
···
<input type="hidden" name="state" value="{{ .FilteringBy.String }}">
<div class="flex-1 flex relative">
<input
+
id="search-q"
class="flex-1 py-1 pl-2 pr-10 mr-[-1px] rounded-r-none focus:border-0 focus:outline-none focus:ring focus:ring-blue-400 ring-inset peer"
type="text"
name="q"
···
</button>
</form>
<div class="sm:row-start-1">
-
{{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active) }}
+
{{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active "Include" "#search-q") }}
</div>
<a
href="/{{ .RepoInfo.FullName }}/pulls/new"
+5 -4
appview/pages/templates/repo/settings/access.html
···
{{ template "addCollaboratorButton" . }}
{{ end }}
{{ range .Collaborators }}
+
{{ $handle := resolve .Did }}
<div class="border border-gray-200 dark:border-gray-700 rounded p-4">
<div class="flex items-center gap-3">
<img
-
src="{{ fullAvatar .Handle }}"
-
alt="{{ .Handle }}"
+
src="{{ fullAvatar $handle }}"
+
alt="{{ $handle }}"
class="rounded-full h-10 w-10 border border-gray-300 dark:border-gray-600 flex-shrink-0"/>
<div class="flex-1 min-w-0">
-
<a href="/{{ .Handle }}" class="block truncate">
-
{{ didOrHandle .Did .Handle }}
+
<a href="/{{ $handle }}" class="block truncate">
+
{{ $handle }}
</a>
<p class="text-sm text-gray-500 dark:text-gray-400">{{ .Role }}</p>
</div>
+22 -6
appview/pages/templates/spindles/dashboard.html
···
-
{{ define "title" }}{{.Spindle.Instance}} &middot; spindles{{ end }}
+
{{ define "title" }}{{.Spindle.Instance}} &middot; {{ .Tab }} settings{{ end }}
{{ define "content" }}
-
<div class="px-6 py-4">
+
<div class="p-6">
+
<p class="text-xl font-bold dark:text-white">Settings</p>
+
</div>
+
<div class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white">
+
<section class="w-full grid grid-cols-1 md:grid-cols-4 gap-6">
+
<div class="col-span-1">
+
{{ template "user/settings/fragments/sidebar" . }}
+
</div>
+
<div class="col-span-1 md:col-span-3 flex flex-col gap-6">
+
{{ template "spindleDash" . }}
+
</div>
+
</section>
+
</div>
+
{{ end }}
+
+
{{ define "spindleDash" }}
+
<div>
<div class="flex justify-between items-center">
-
<h1 class="text-xl font-bold dark:text-white">{{ .Spindle.Instance }}</h1>
+
<h2 class="text-sm pb-2 uppercase font-bold">{{ .Tab }} &middot; {{ .Spindle.Instance }}</h2>
<div id="right-side" class="flex gap-2">
{{ $style := "px-2 py-1 rounded flex items-center flex-shrink-0 gap-2" }}
{{ $isOwner := and .LoggedInUser (eq .LoggedInUser.Did .Spindle.Owner) }}
···
<button
class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group"
title="Delete spindle"
-
hx-delete="/spindles/{{ .Instance }}"
+
hx-delete="/settings/spindles/{{ .Instance }}"
hx-swap="outerHTML"
hx-confirm="Are you sure you want to delete the spindle '{{ .Instance }}'?"
hx-headers='{"shouldRedirect": "true"}'
···
<button
class="btn gap-2 group"
title="Retry spindle verification"
-
hx-post="/spindles/{{ .Instance }}/retry"
+
hx-post="/settings/spindles/{{ .Instance }}/retry"
hx-swap="none"
hx-headers='{"shouldRefresh": "true"}'
>
···
<button
class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group"
title="Remove member"
-
hx-post="/spindles/{{ $root.Spindle.Instance }}/remove"
+
hx-post="/settings/spindles/{{ $root.Spindle.Instance }}/remove"
hx-swap="none"
hx-vals='{"member": "{{$member}}" }'
hx-confirm="Are you sure you want to remove {{ resolve $member }} from this instance?"
+1 -1
appview/pages/templates/spindles/fragments/addMemberModal.html
···
{{ define "addSpindleMemberPopover" }}
<form
-
hx-post="/spindles/{{ .Instance }}/add"
+
hx-post="/settings/spindles/{{ .Instance }}/add"
hx-indicator="#spinner"
hx-swap="none"
class="flex flex-col gap-2"
+3 -3
appview/pages/templates/spindles/fragments/spindleListing.html
···
{{ define "spindleLeftSide" }}
{{ if .Verified }}
-
<a href="/spindles/{{ .Instance }}" class="hover:no-underline flex items-center gap-2 min-w-0 max-w-[60%]">
+
<a href="/settings/spindles/{{ .Instance }}" class="hover:no-underline flex items-center gap-2 min-w-0 max-w-[60%]">
{{ i "hard-drive" "w-4 h-4" }}
<span class="hover:underline">
{{ .Instance }}
···
<button
class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group"
title="Delete spindle"
-
hx-delete="/spindles/{{ .Instance }}"
+
hx-delete="/settings/spindles/{{ .Instance }}"
hx-swap="outerHTML"
hx-target="#spindle-{{.Id}}"
hx-confirm="Are you sure you want to delete the spindle '{{ .Instance }}'?"
···
<button
class="btn gap-2 group"
title="Retry spindle verification"
-
hx-post="/spindles/{{ .Instance }}/retry"
+
hx-post="/settings/spindles/{{ .Instance }}/retry"
hx-swap="none"
hx-target="#spindle-{{.Id}}"
>
+90 -59
appview/pages/templates/spindles/index.html
···
-
{{ define "title" }}spindles{{ end }}
+
{{ define "title" }}{{ .Tab }} settings{{ end }}
{{ define "content" }}
-
<div class="px-6 py-4 flex items-center justify-between gap-4 align-bottom">
-
<h1 class="text-xl font-bold dark:text-white">Spindles</h1>
-
<span class="flex items-center gap-1">
-
{{ i "book" "w-3 h-3" }}
-
<a href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">docs</a>
-
</span>
+
<div class="p-6">
+
<p class="text-xl font-bold dark:text-white">Settings</p>
+
</div>
+
<div class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white">
+
<section class="w-full grid grid-cols-1 md:grid-cols-4 gap-6">
+
<div class="col-span-1">
+
{{ template "user/settings/fragments/sidebar" . }}
+
</div>
+
<div class="col-span-1 md:col-span-3 flex flex-col gap-6">
+
{{ template "spindleList" . }}
+
</div>
+
</section>
+
</div>
+
{{ end }}
+
+
{{ define "spindleList" }}
+
<div class="grid grid-cols-1 md:grid-cols-3 gap-4 items-center">
+
<div class="col-span-1 md:col-span-2">
+
<h2 class="text-sm pb-2 uppercase font-bold">Spindle</h2>
+
{{ block "about" . }} {{ end }}
+
</div>
+
<div class="col-span-1 md:col-span-1 md:justify-self-end">
+
{{ template "docsButton" . }}
+
</div>
</div>
-
<section class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white">
+
<section>
<div class="flex flex-col gap-6">
-
{{ block "about" . }} {{ end }}
{{ block "list" . }} {{ end }}
{{ block "register" . }} {{ end }}
</div>
···
{{ define "about" }}
<section class="rounded flex items-center gap-2">
-
<p class="text-gray-500 dark:text-gray-400">
-
Spindles are small CI runners.
-
</p>
+
<p class="text-gray-500 dark:text-gray-400">
+
Spindles are small CI runners.
+
</p>
</section>
{{ end }}
{{ define "list" }}
-
<section class="rounded w-full flex flex-col gap-2">
-
<h2 class="text-sm font-bold py-2 uppercase dark:text-gray-300">your spindles</h2>
-
<div class="flex flex-col rounded border border-gray-200 dark:border-gray-700 w-full">
-
{{ range $spindle := .Spindles }}
-
{{ template "spindles/fragments/spindleListing" . }}
-
{{ else }}
-
<div class="flex items-center justify-center p-2 border-b border-gray-200 dark:border-gray-700 text-gray-500">
-
no spindles registered yet
-
</div>
-
{{ end }}
+
<section class="rounded w-full flex flex-col gap-2">
+
<h2 class="text-sm font-bold py-2 uppercase dark:text-gray-300">your spindles</h2>
+
<div class="flex flex-col rounded border border-gray-200 dark:border-gray-700 w-full">
+
{{ range $spindle := .Spindles }}
+
{{ template "spindles/fragments/spindleListing" . }}
+
{{ else }}
+
<div class="flex items-center justify-center p-2 border-b border-gray-200 dark:border-gray-700 text-gray-500">
+
no spindles registered yet
</div>
-
<div id="operation-error" class="text-red-500 dark:text-red-400"></div>
-
</section>
+
{{ end }}
+
</div>
+
<div id="operation-error" class="text-red-500 dark:text-red-400"></div>
+
</section>
{{ end }}
{{ define "register" }}
-
<section class="rounded w-full lg:w-fit flex flex-col gap-2">
-
<h2 class="text-sm font-bold py-2 uppercase dark:text-gray-300">register a spindle</h2>
-
<p class="mb-2 dark:text-gray-300">Enter the hostname of your spindle to get started.</p>
-
<form
-
hx-post="/spindles/register"
-
class="max-w-2xl mb-2 space-y-4"
-
hx-indicator="#register-button"
-
hx-swap="none"
-
>
-
<div class="flex gap-2">
-
<input
-
type="text"
-
id="instance"
-
name="instance"
-
placeholder="spindle.example.com"
-
required
-
class="flex-1 w-full dark:bg-gray-700 dark:text-white dark:border-gray-600 dark:placeholder-gray-400 px-3 py-2 border rounded"
-
>
-
<button
-
type="submit"
-
id="register-button"
-
class="btn rounded flex items-center py-2 dark:bg-gray-700 dark:text-white dark:hover:bg-gray-600 group"
-
>
-
<span class="inline-flex items-center gap-2">
-
{{ i "plus" "w-4 h-4" }}
-
register
-
</span>
-
<span class="pl-2 hidden group-[.htmx-request]:inline">
-
{{ i "loader-circle" "w-4 h-4 animate-spin" }}
-
</span>
-
</button>
-
</div>
+
<section class="rounded w-full lg:w-fit flex flex-col gap-2">
+
<h2 class="text-sm font-bold py-2 uppercase dark:text-gray-300">register a spindle</h2>
+
<p class="mb-2 dark:text-gray-300">Enter the hostname of your spindle to get started.</p>
+
<form
+
hx-post="/settings/spindles/register"
+
class="max-w-2xl mb-2 space-y-4"
+
hx-indicator="#register-button"
+
hx-swap="none"
+
>
+
<div class="flex gap-2">
+
<input
+
type="text"
+
id="instance"
+
name="instance"
+
placeholder="spindle.example.com"
+
required
+
class="flex-1 w-full dark:bg-gray-700 dark:text-white dark:border-gray-600 dark:placeholder-gray-400 px-3 py-2 border rounded"
+
>
+
<button
+
type="submit"
+
id="register-button"
+
class="btn rounded flex items-center py-2 dark:bg-gray-700 dark:text-white dark:hover:bg-gray-600 group"
+
>
+
<span class="inline-flex items-center gap-2">
+
{{ i "plus" "w-4 h-4" }}
+
register
+
</span>
+
<span class="pl-2 hidden group-[.htmx-request]:inline">
+
{{ i "loader-circle" "w-4 h-4 animate-spin" }}
+
</span>
+
</button>
+
</div>
-
<div id="register-error" class="dark:text-red-400"></div>
-
</form>
+
<div id="register-error" class="dark:text-red-400"></div>
+
</form>
+
+
</section>
+
{{ end }}
-
</section>
+
{{ define "docsButton" }}
+
<a
+
class="btn flex items-center gap-2"
+
href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
+
{{ i "book" "size-4" }}
+
docs
+
</a>
+
<div
+
id="add-email-modal"
+
popover
+
class="bg-white w-full md:w-96 dark:bg-gray-800 p-4 rounded border border-gray-200 dark:border-gray-700 drop-shadow dark:text-white backdrop:bg-gray-400/50 dark:backdrop:bg-gray-800/50">
+
</div>
{{ end }}
+6 -5
appview/pages/templates/strings/dashboard.html
···
-
{{ define "title" }}strings by {{ or .Card.UserHandle .Card.UserDid }}{{ end }}
+
{{ define "title" }}strings by {{ resolve .Card.UserDid }}{{ end }}
{{ define "extrameta" }}
-
<meta property="og:title" content="{{ or .Card.UserHandle .Card.UserDid }}" />
+
{{ $handle := resolve .Card.UserDid }}
+
<meta property="og:title" content="{{ $handle }}" />
<meta property="og:type" content="profile" />
-
<meta property="og:url" content="https://tangled.org/{{ or .Card.UserHandle .Card.UserDid }}" />
-
<meta property="og:description" content="{{ or .Card.Profile.Description .Card.UserHandle .Card.UserDid }}" />
+
<meta property="og:url" content="https://tangled.org/{{ $handle }}" />
+
<meta property="og:description" content="{{ or .Card.Profile.Description $handle }}" />
{{ end }}
···
{{ $s := index . 1 }}
<div class="py-4 px-6 drop-shadow-sm rounded bg-white dark:bg-gray-800">
<div class="font-medium dark:text-white flex gap-2 items-center">
-
<a href="/strings/{{ or $root.Card.UserHandle $root.Card.UserDid }}/{{ $s.Rkey }}">{{ $s.Filename }}</a>
+
<a href="/strings/{{ resolve $root.Card.UserDid }}/{{ $s.Rkey }}">{{ $s.Filename }}</a>
</div>
{{ with $s.Description }}
<div class="text-gray-600 dark:text-gray-300 text-sm">
+4 -4
appview/pages/templates/strings/string.html
···
-
{{ define "title" }}{{ .String.Filename }} · by {{ didOrHandle .Owner.DID.String .Owner.Handle.String }}{{ end }}
+
{{ define "title" }}{{ .String.Filename }} · by {{ resolve .Owner.DID.String }}{{ end }}
{{ define "extrameta" }}
-
{{ $ownerId := didOrHandle .Owner.DID.String .Owner.Handle.String }}
+
{{ $ownerId := resolve .Owner.DID.String }}
<meta property="og:title" content="{{ .String.Filename }} · by {{ $ownerId }}" />
<meta property="og:type" content="object" />
<meta property="og:url" content="https://tangled.org/strings/{{ $ownerId }}/{{ .String.Rkey }}" />
···
{{ end }}
{{ define "content" }}
-
{{ $ownerId := didOrHandle .Owner.DID.String .Owner.Handle.String }}
+
{{ $ownerId := resolve .Owner.DID.String }}
<section id="string-header" class="mb-4 py-2 px-6 dark:text-white">
<div class="text-lg flex items-center justify-between">
<div>
···
<span class="select-none">/</span>
<a href="/strings/{{ $ownerId }}/{{ .String.Rkey }}" class="font-bold">{{ .String.Filename }}</a>
</div>
-
<div class="flex gap-2 text-base">
+
<div class="flex gap-2 items-stretch text-base">
{{ if and .LoggedInUser (eq .LoggedInUser.Did .String.Did) }}
<a class="btn flex items-center gap-2 no-underline hover:no-underline p-2 group"
hx-boost="true"
+4 -2
appview/pages/templates/user/followers.html
···
-
{{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} · followers {{ end }}
+
{{ define "title" }}{{ resolve .Card.UserDid }} · followers {{ end }}
{{ define "profileContent" }}
<div id="all-followers" class="md:col-span-8 order-2 md:order-2">
···
"FollowersCount" .FollowersCount
"FollowingCount" .FollowingCount) }}
{{ else }}
-
<p class="px-6 dark:text-white">This user does not have any followers yet.</p>
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
+
<span>This user does not have any followers yet.</span>
+
</div>
{{ end }}
</div>
{{ end }}
+4 -2
appview/pages/templates/user/following.html
···
-
{{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} · following {{ end }}
+
{{ define "title" }}{{ resolve .Card.UserDid }} · following {{ end }}
{{ define "profileContent" }}
<div id="all-following" class="md:col-span-8 order-2 md:order-2">
···
"FollowersCount" .FollowersCount
"FollowingCount" .FollowingCount) }}
{{ else }}
-
<p class="px-6 dark:text-white">This user does not follow anyone yet.</p>
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
+
<span>This user does not follow anyone yet.</span>
+
</div>
{{ end }}
</div>
{{ end }}
+2 -2
appview/pages/templates/user/fragments/followCard.html
···
<img class="object-cover rounded-full p-2" src="{{ fullAvatar $userIdent }}" alt="{{ $userIdent }}" />
</div>
-
<div class="flex flex-col md:flex-row md:items-center md:justify-between gap-2 w-full">
+
<div class="flex flex-col md:flex-row md:items-center md:justify-between gap-2 w-full min-w-0">
<div class="flex-1 min-h-0 justify-around flex flex-col">
<a href="/{{ $userIdent }}">
<span class="font-bold dark:text-white overflow-hidden text-ellipsis whitespace-nowrap max-w-full">{{ $userIdent | truncateAt30 }}</span>
</a>
{{ with .Profile }}
-
<p class="text-sm pb-2 md:pb-2">{{.Description}}</p>
+
<p class="text-sm pb-2 md:pb-2 break-words">{{.Description}}</p>
{{ end }}
<div class="text-sm flex items-center gap-2 my-2 overflow-hidden text-ellipsis whitespace-nowrap max-w-full">
<span class="flex-shrink-0">{{ i "users" "size-4" }}</span>
+1 -1
appview/pages/templates/user/fragments/profileCard.html
···
{{ define "user/fragments/profileCard" }}
-
{{ $userIdent := didOrHandle .UserDid .UserHandle }}
+
{{ $userIdent := resolve .UserDid }}
<div class="grid grid-cols-3 md:grid-cols-1 gap-1 items-center">
<div id="avatar" class="col-span-1 flex justify-center items-center">
<div class="w-3/4 aspect-square relative">
+22 -4
appview/pages/templates/user/overview.html
···
-
{{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }}{{ end }}
+
{{ define "title" }}{{ resolve .Card.UserDid }}{{ end }}
{{ define "profileContent" }}
<div id="all-repos" class="md:col-span-4 order-2 md:order-2">
···
<p class="text-sm font-bold px-2 pb-4 dark:text-white">ACTIVITY</p>
<div class="flex flex-col gap-4 relative">
{{ if .ProfileTimeline.IsEmpty }}
-
<p class="dark:text-white">This user does not have any activity yet.</p>
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
+
<span class="flex items-center gap-2">
+
This user does not have any activity yet.
+
</span>
+
</div>
{{ end }}
{{ with .ProfileTimeline }}
···
</p>
<div class="flex flex-col gap-1">
+
{{ block "commits" .Commits }} {{ end }}
{{ block "repoEvents" .RepoEvents }} {{ end }}
{{ block "issueEvents" .IssueEvents }} {{ end }}
{{ block "pullEvents" .PullEvents }} {{ end }}
···
{{ end }}
{{ end }}
</div>
+
{{ end }}
+
+
{{ define "commits" }}
+
{{ if . }}
+
<div class="flex flex-wrap items-center gap-1">
+
{{ i "git-commit-horizontal" "size-5" }}
+
created {{ . }} commits
+
</div>
+
{{ end }}
{{ end }}
{{ define "repoEvents" }}
···
{{ define "ownRepos" }}
<div>
<div class="text-sm font-bold px-2 pb-4 dark:text-white flex items-center gap-2">
-
<a href="/@{{ or $.Card.UserHandle $.Card.UserDid }}?tab=repos"
+
<a href="/{{ resolve $.Card.UserDid }}?tab=repos"
class="flex text-black dark:text-white items-center gap-2 no-underline hover:no-underline group">
<span>PINNED REPOS</span>
</a>
···
{{ template "user/fragments/repoCard" (list $ . false) }}
</div>
{{ else }}
-
<p class="dark:text-white">This user does not have any pinned repos.</p>
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
+
<span class="flex items-center gap-2">
+
This user does not have any pinned repos.
+
</span>
+
</div>
{{ end }}
</div>
</div>
+4 -2
appview/pages/templates/user/repos.html
···
-
{{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} · repos {{ end }}
+
{{ define "title" }}{{ resolve .Card.UserDid }} · repos {{ end }}
{{ define "profileContent" }}
<div id="all-repos" class="md:col-span-8 order-2 md:order-2">
···
{{ template "user/fragments/repoCard" (list $ . false) }}
</div>
{{ else }}
-
<p class="px-6 dark:text-white">This user does not have any repos yet.</p>
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
+
<span>This user does not have any repos yet.</span>
+
</div>
{{ end }}
</div>
{{ end }}
+1 -1
appview/pages/templates/user/settings/notifications.html
···
</div>
</div>
<label class="flex items-center gap-2">
-
<input type="checkbox" name="mentioned" {{if .Preferences.UserMentioned}}checked{{end}}>
+
<input type="checkbox" name="user_mentioned" {{if .Preferences.UserMentioned}}checked{{end}}>
</label>
</div>
+9 -6
appview/pages/templates/user/signup.html
···
page to complete your registration.
</span>
<div class="w-full mt-4 text-center">
-
<div class="cf-turnstile" data-sitekey="{{ .CloudflareSiteKey }}"></div>
+
<div class="cf-turnstile" data-sitekey="{{ .CloudflareSiteKey }}" data-size="flexible"></div>
</div>
<button class="btn text-base w-full my-2 mt-6" type="submit" id="signup-button" tabindex="7" >
<span>join now</span>
</button>
+
<p class="text-sm text-gray-500">
+
Already have an AT Protocol account? <a href="/login" class="underline">Login to Tangled</a>.
+
</p>
+
+
<p id="signup-msg" class="error w-full"></p>
+
<p class="text-sm text-gray-500 pt-4">
+
By signing up, you agree to our <a href="/terms" class="underline">Terms of Service</a> and <a href="/privacy" class="underline">Privacy Policy</a>.
+
</p>
</form>
-
<p class="text-sm text-gray-500">
-
Already have an AT Protocol account? <a href="/login" class="underline">Login to Tangled</a>.
-
</p>
-
-
<p id="signup-msg" class="error w-full"></p>
</main>
</body>
</html>
+4 -2
appview/pages/templates/user/starred.html
···
-
{{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} · repos {{ end }}
+
{{ define "title" }}{{ resolve .Card.UserDid }} · repos {{ end }}
{{ define "profileContent" }}
<div id="all-repos" class="md:col-span-8 order-2 md:order-2">
···
{{ template "user/fragments/repoCard" (list $ . true) }}
</div>
{{ else }}
-
<p class="px-6 dark:text-white">This user does not have any starred repos yet.</p>
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
+
<span>This user does not have any starred repos yet.</span>
+
</div>
{{ end }}
</div>
{{ end }}
+5 -3
appview/pages/templates/user/strings.html
···
-
{{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} · strings {{ end }}
+
{{ define "title" }}{{ resolve .Card.UserDid }} · strings {{ end }}
{{ define "profileContent" }}
<div id="all-strings" class="md:col-span-8 order-2 md:order-2">
···
{{ template "singleString" (list $ .) }}
</div>
{{ else }}
-
<p class="px-6 dark:text-white">This user does not have any strings yet.</p>
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
+
<span>This user does not have any strings yet.</span>
+
</div>
{{ end }}
</div>
{{ end }}
···
{{ $s := index . 1 }}
<div class="py-4 px-6 rounded bg-white dark:bg-gray-800">
<div class="font-medium dark:text-white flex gap-2 items-center">
-
<a href="/strings/{{ or $root.Card.UserHandle $root.Card.UserDid }}/{{ $s.Rkey }}">{{ $s.Filename }}</a>
+
<a href="/strings/{{ resolve $root.Card.UserDid }}/{{ $s.Rkey }}">{{ $s.Filename }}</a>
</div>
{{ with $s.Description }}
<div class="text-gray-600 dark:text-gray-300 text-sm">
+16 -22
appview/pipelines/pipelines.go
···
"tangled.org/core/appview/reporesolver"
"tangled.org/core/eventconsumer"
"tangled.org/core/idresolver"
+
"tangled.org/core/orm"
"tangled.org/core/rbac"
spindlemodel "tangled.org/core/spindle/models"
···
return
}
-
repoInfo := f.RepoInfo(user)
-
ps, err := db.GetPipelineStatuses(
p.db,
30,
-
db.FilterEq("repo_owner", repoInfo.OwnerDid),
-
db.FilterEq("repo_name", repoInfo.Name),
-
db.FilterEq("knot", repoInfo.Knot),
+
orm.FilterEq("repo_owner", f.Did),
+
orm.FilterEq("repo_name", f.Name),
+
orm.FilterEq("knot", f.Knot),
)
if err != nil {
l.Error("failed to query db", "err", err)
···
p.pages.Pipelines(w, pages.PipelinesParams{
LoggedInUser: user,
-
RepoInfo: repoInfo,
+
RepoInfo: p.repoResolver.GetRepoInfo(r, user),
Pipelines: ps,
})
}
···
l.Error("failed to get repo and knot", "err", err)
return
}
-
-
repoInfo := f.RepoInfo(user)
pipelineId := chi.URLParam(r, "pipeline")
if pipelineId == "" {
···
ps, err := db.GetPipelineStatuses(
p.db,
1,
-
db.FilterEq("repo_owner", repoInfo.OwnerDid),
-
db.FilterEq("repo_name", repoInfo.Name),
-
db.FilterEq("knot", repoInfo.Knot),
-
db.FilterEq("id", pipelineId),
+
orm.FilterEq("repo_owner", f.Did),
+
orm.FilterEq("repo_name", f.Name),
+
orm.FilterEq("knot", f.Knot),
+
orm.FilterEq("id", pipelineId),
)
if err != nil {
l.Error("failed to query db", "err", err)
···
p.pages.Workflow(w, pages.WorkflowParams{
LoggedInUser: user,
-
RepoInfo: repoInfo,
+
RepoInfo: p.repoResolver.GetRepoInfo(r, user),
Pipeline: singlePipeline,
Workflow: workflow,
})
···
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
-
user := p.oauth.GetUser(r)
f, err := p.repoResolver.Resolve(r)
if err != nil {
l.Error("failed to get repo and knot", "err", err)
···
return
}
-
repoInfo := f.RepoInfo(user)
-
pipelineId := chi.URLParam(r, "pipeline")
workflow := chi.URLParam(r, "workflow")
if pipelineId == "" || workflow == "" {
···
ps, err := db.GetPipelineStatuses(
p.db,
1,
-
db.FilterEq("repo_owner", repoInfo.OwnerDid),
-
db.FilterEq("repo_name", repoInfo.Name),
-
db.FilterEq("knot", repoInfo.Knot),
-
db.FilterEq("id", pipelineId),
+
orm.FilterEq("repo_owner", f.Did),
+
orm.FilterEq("repo_name", f.Name),
+
orm.FilterEq("knot", f.Knot),
+
orm.FilterEq("id", pipelineId),
)
if err != nil || len(ps) != 1 {
l.Error("pipeline query failed", "err", err, "count", len(ps))
···
}
singlePipeline := ps[0]
-
spindle := repoInfo.Spindle
-
knot := repoInfo.Knot
+
spindle := f.Spindle
+
knot := f.Knot
rkey := singlePipeline.Rkey
if spindle == "" || knot == "" || rkey == "" {
+3 -2
appview/pulls/opengraph.go
···
"tangled.org/core/appview/db"
"tangled.org/core/appview/models"
"tangled.org/core/appview/ogcard"
+
"tangled.org/core/orm"
"tangled.org/core/patchutil"
"tangled.org/core/types"
)
···
}
// Get comment count from database
-
comments, err := db.GetPullComments(s.db, db.FilterEq("pull_id", pull.ID))
+
comments, err := db.GetPullComments(s.db, orm.FilterEq("pull_id", pull.ID))
if err != nil {
log.Printf("failed to get pull comments: %v", err)
}
···
filesChanged = niceDiff.Stat.FilesChanged
}
-
card, err := s.drawPullSummaryCard(pull, &f.Repo, commentCount, diffStats, filesChanged)
+
card, err := s.drawPullSummaryCard(pull, f, commentCount, diffStats, filesChanged)
if err != nil {
log.Println("failed to draw pull summary card", err)
http.Error(w, "failed to draw pull summary card", http.StatusInternalServerError)
+146 -142
appview/pulls/pulls.go
···
package pulls
import (
+
"context"
"database/sql"
"encoding/json"
"errors"
···
"tangled.org/core/appview/config"
"tangled.org/core/appview/db"
pulls_indexer "tangled.org/core/appview/indexer/pulls"
+
"tangled.org/core/appview/mentions"
"tangled.org/core/appview/models"
"tangled.org/core/appview/notify"
"tangled.org/core/appview/oauth"
"tangled.org/core/appview/pages"
"tangled.org/core/appview/pages/markup"
+
"tangled.org/core/appview/pages/repoinfo"
"tangled.org/core/appview/reporesolver"
"tangled.org/core/appview/validator"
"tangled.org/core/appview/xrpcclient"
"tangled.org/core/idresolver"
+
"tangled.org/core/orm"
"tangled.org/core/patchutil"
"tangled.org/core/rbac"
"tangled.org/core/tid"
···
)
type Pulls struct {
-
oauth *oauth.OAuth
-
repoResolver *reporesolver.RepoResolver
-
pages *pages.Pages
-
idResolver *idresolver.Resolver
-
db *db.DB
-
config *config.Config
-
notifier notify.Notifier
-
enforcer *rbac.Enforcer
-
logger *slog.Logger
-
validator *validator.Validator
-
indexer *pulls_indexer.Indexer
+
oauth *oauth.OAuth
+
repoResolver *reporesolver.RepoResolver
+
pages *pages.Pages
+
idResolver *idresolver.Resolver
+
mentionsResolver *mentions.Resolver
+
db *db.DB
+
config *config.Config
+
notifier notify.Notifier
+
enforcer *rbac.Enforcer
+
logger *slog.Logger
+
validator *validator.Validator
+
indexer *pulls_indexer.Indexer
}
func New(
···
repoResolver *reporesolver.RepoResolver,
pages *pages.Pages,
resolver *idresolver.Resolver,
+
mentionsResolver *mentions.Resolver,
db *db.DB,
config *config.Config,
notifier notify.Notifier,
···
logger *slog.Logger,
) *Pulls {
return &Pulls{
-
oauth: oauth,
-
repoResolver: repoResolver,
-
pages: pages,
-
idResolver: resolver,
-
db: db,
-
config: config,
-
notifier: notifier,
-
enforcer: enforcer,
-
logger: logger,
-
validator: validator,
-
indexer: indexer,
+
oauth: oauth,
+
repoResolver: repoResolver,
+
pages: pages,
+
idResolver: resolver,
+
mentionsResolver: mentionsResolver,
+
db: db,
+
config: config,
+
notifier: notifier,
+
enforcer: enforcer,
+
logger: logger,
+
validator: validator,
+
indexer: indexer,
}
}
···
s.pages.PullActionsFragment(w, pages.PullActionsParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
Pull: pull,
RoundNumber: roundNumber,
MergeCheck: mergeCheckResponse,
···
return
}
+
backlinks, err := db.GetBacklinks(s.db, pull.AtUri())
+
if err != nil {
+
log.Println("failed to get pull backlinks", err)
+
s.pages.Notice(w, "pull-error", "Failed to get pull. Try again later.")
+
return
+
}
+
// can be nil if this pull is not stacked
stack, _ := r.Context().Value("stack").(models.Stack)
abandonedPulls, _ := r.Context().Value("abandonedPulls").([]*models.Pull)
···
if user != nil && user.Did == pull.OwnerDid {
resubmitResult = s.resubmitCheck(r, f, pull, stack)
}
-
-
repoInfo := f.RepoInfo(user)
m := make(map[string]models.Pipeline)
···
ps, err := db.GetPipelineStatuses(
s.db,
len(shas),
-
db.FilterEq("repo_owner", repoInfo.OwnerDid),
-
db.FilterEq("repo_name", repoInfo.Name),
-
db.FilterEq("knot", repoInfo.Knot),
-
db.FilterIn("sha", shas),
+
orm.FilterEq("repo_owner", f.Did),
+
orm.FilterEq("repo_name", f.Name),
+
orm.FilterEq("knot", f.Knot),
+
orm.FilterIn("sha", shas),
)
if err != nil {
log.Printf("failed to fetch pipeline statuses: %s", err)
···
labelDefs, err := db.GetLabelDefinitions(
s.db,
-
db.FilterIn("at_uri", f.Repo.Labels),
-
db.FilterContains("scope", tangled.RepoPullNSID),
+
orm.FilterIn("at_uri", f.Labels),
+
orm.FilterContains("scope", tangled.RepoPullNSID),
)
if err != nil {
log.Println("failed to fetch labels", err)
···
s.pages.RepoSinglePull(w, pages.RepoSinglePullParams{
LoggedInUser: user,
-
RepoInfo: repoInfo,
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
Pull: pull,
Stack: stack,
AbandonedPulls: abandonedPulls,
+
Backlinks: backlinks,
BranchDeleteStatus: branchDeleteStatus,
MergeCheck: mergeCheckResponse,
ResubmitCheck: resubmitResult,
···
})
}
-
func (s *Pulls) mergeCheck(r *http.Request, f *reporesolver.ResolvedRepo, pull *models.Pull, stack models.Stack) types.MergeCheckResponse {
+
func (s *Pulls) mergeCheck(r *http.Request, f *models.Repo, pull *models.Pull, stack models.Stack) types.MergeCheckResponse {
if pull.State == models.PullMerged {
return types.MergeCheckResponse{}
}
···
r.Context(),
&xrpcc,
&tangled.RepoMergeCheck_Input{
-
Did: f.OwnerDid(),
+
Did: f.Did,
Name: f.Name,
Branch: pull.TargetBranch,
Patch: patch,
···
return result
}
-
func (s *Pulls) branchDeleteStatus(r *http.Request, f *reporesolver.ResolvedRepo, pull *models.Pull) *models.BranchDeleteStatus {
+
func (s *Pulls) branchDeleteStatus(r *http.Request, repo *models.Repo, pull *models.Pull) *models.BranchDeleteStatus {
if pull.State != models.PullMerged {
return nil
}
···
}
var branch string
-
var repo *models.Repo
// check if the branch exists
// NOTE: appview could cache branches/tags etc. for every repo by listening for gitRefUpdates
if pull.IsBranchBased() {
branch = pull.PullSource.Branch
-
repo = &f.Repo
} else if pull.IsForkBased() {
branch = pull.PullSource.Branch
repo = pull.PullSource.Repo
···
}
}
-
func (s *Pulls) resubmitCheck(r *http.Request, f *reporesolver.ResolvedRepo, pull *models.Pull, stack models.Stack) pages.ResubmitResult {
+
func (s *Pulls) resubmitCheck(r *http.Request, repo *models.Repo, pull *models.Pull, stack models.Stack) pages.ResubmitResult {
if pull.State == models.PullMerged || pull.State == models.PullDeleted || pull.PullSource == nil {
return pages.Unknown
}
···
repoName = sourceRepo.Name
} else {
// pulls within the same repo
-
knot = f.Knot
-
ownerDid = f.OwnerDid()
-
repoName = f.Name
+
knot = repo.Knot
+
ownerDid = repo.Did
+
repoName = repo.Name
}
scheme := "http"
···
Host: host,
}
-
repo := fmt.Sprintf("%s/%s", ownerDid, repoName)
-
branchResp, err := tangled.RepoBranch(r.Context(), xrpcc, pull.PullSource.Branch, repo)
+
didSlashName := fmt.Sprintf("%s/%s", ownerDid, repoName)
+
branchResp, err := tangled.RepoBranch(r.Context(), xrpcc, pull.PullSource.Branch, didSlashName)
if err != nil {
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
log.Println("failed to call XRPC repo.branches", xrpcerr)
···
func (s *Pulls) RepoPullPatch(w http.ResponseWriter, r *http.Request) {
user := s.oauth.GetUser(r)
-
f, err := s.repoResolver.Resolve(r)
-
if err != nil {
-
log.Println("failed to get repo and knot", err)
-
return
-
}
var diffOpts types.DiffOpts
if d := r.URL.Query().Get("diff"); d == "split" {
···
s.pages.RepoPullPatchPage(w, pages.RepoPullPatchParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
Pull: pull,
Stack: stack,
Round: roundIdInt,
···
func (s *Pulls) RepoPullInterdiff(w http.ResponseWriter, r *http.Request) {
user := s.oauth.GetUser(r)
-
f, err := s.repoResolver.Resolve(r)
-
if err != nil {
-
log.Println("failed to get repo and knot", err)
-
return
-
}
-
var diffOpts types.DiffOpts
if d := r.URL.Query().Get("diff"); d == "split" {
diffOpts.Split = true
···
s.pages.RepoPullInterdiffPage(w, pages.RepoPullInterdiffParams{
LoggedInUser: s.oauth.GetUser(r),
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
Pull: pull,
Round: roundIdInt,
Interdiff: interdiff,
···
pulls, err := db.GetPulls(
s.db,
-
db.FilterIn("id", ids),
+
orm.FilterIn("id", ids),
)
if err != nil {
log.Println("failed to get pulls", err)
···
}
pulls = pulls[:n]
-
repoInfo := f.RepoInfo(user)
ps, err := db.GetPipelineStatuses(
s.db,
len(shas),
-
db.FilterEq("repo_owner", repoInfo.OwnerDid),
-
db.FilterEq("repo_name", repoInfo.Name),
-
db.FilterEq("knot", repoInfo.Knot),
-
db.FilterIn("sha", shas),
+
orm.FilterEq("repo_owner", f.Did),
+
orm.FilterEq("repo_name", f.Name),
+
orm.FilterEq("knot", f.Knot),
+
orm.FilterIn("sha", shas),
)
if err != nil {
log.Printf("failed to fetch pipeline statuses: %s", err)
···
labelDefs, err := db.GetLabelDefinitions(
s.db,
-
db.FilterIn("at_uri", f.Repo.Labels),
-
db.FilterContains("scope", tangled.RepoPullNSID),
+
orm.FilterIn("at_uri", f.Labels),
+
orm.FilterContains("scope", tangled.RepoPullNSID),
)
if err != nil {
log.Println("failed to fetch labels", err)
···
s.pages.RepoPulls(w, pages.RepoPullsParams{
LoggedInUser: s.oauth.GetUser(r),
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
Pulls: pulls,
LabelDefs: defs,
FilteringBy: state,
···
}
func (s *Pulls) PullComment(w http.ResponseWriter, r *http.Request) {
-
l := s.logger.With("handler", "PullComment")
user := s.oauth.GetUser(r)
f, err := s.repoResolver.Resolve(r)
if err != nil {
···
case http.MethodGet:
s.pages.PullNewCommentFragment(w, pages.PullNewCommentParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
Pull: pull,
RoundNumber: roundNumber,
})
···
s.pages.Notice(w, "pull", "Comment body is required")
return
}
+
+
mentions, references := s.mentionsResolver.Resolve(r.Context(), body)
// Start a transaction
tx, err := s.db.BeginTx(r.Context(), nil)
···
Body: body,
CommentAt: atResp.Uri,
SubmissionId: pull.Submissions[roundNumber].ID,
+
Mentions: mentions,
+
References: references,
}
// Create the pull comment in the database with the commentAt field
···
return
}
-
rawMentions := markup.FindUserMentions(comment.Body)
-
idents := s.idResolver.ResolveIdents(r.Context(), rawMentions)
-
l.Debug("parsed mentions", "raw", rawMentions, "idents", idents)
-
var mentions []syntax.DID
-
for _, ident := range idents {
-
if ident != nil && !ident.Handle.IsInvalidHandle() {
-
mentions = append(mentions, ident.DID)
-
}
-
}
s.notifier.NewPullComment(r.Context(), comment, mentions)
-
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d#comment-%d", f.OwnerSlashRepo(), pull.PullId, commentId))
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d#comment-%d", ownerSlashRepo, pull.PullId, commentId))
return
}
}
···
Host: host,
}
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo)
if err != nil {
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
···
s.pages.RepoNewPull(w, pages.RepoNewPullParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
Branches: result.Branches,
Strategy: strategy,
SourceBranch: sourceBranch,
···
}
// Determine PR type based on input parameters
-
isPushAllowed := f.RepoInfo(user).Roles.IsPushAllowed()
+
roles := repoinfo.RolesInRepo{Roles: s.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())}
+
isPushAllowed := roles.IsPushAllowed()
isBranchBased := isPushAllowed && sourceBranch != "" && fromFork == ""
isForkBased := fromFork != "" && sourceBranch != ""
isPatchBased := patch != "" && !isBranchBased && !isForkBased
···
func (s *Pulls) handleBranchBasedPull(
w http.ResponseWriter,
r *http.Request,
-
f *reporesolver.ResolvedRepo,
+
repo *models.Repo,
user *oauth.User,
title,
body,
···
if !s.config.Core.Dev {
scheme = "https"
}
-
host := fmt.Sprintf("%s://%s", scheme, f.Knot)
+
host := fmt.Sprintf("%s://%s", scheme, repo.Knot)
xrpcc := &indigoxrpc.Client{
Host: host,
}
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
-
xrpcBytes, err := tangled.RepoCompare(r.Context(), xrpcc, repo, targetBranch, sourceBranch)
+
didSlashRepo := fmt.Sprintf("%s/%s", repo.Did, repo.Name)
+
xrpcBytes, err := tangled.RepoCompare(r.Context(), xrpcc, didSlashRepo, targetBranch, sourceBranch)
if err != nil {
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
log.Println("failed to call XRPC repo.compare", xrpcerr)
···
Sha: comparison.Rev2,
-
s.createPullRequest(w, r, f, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked)
+
s.createPullRequest(w, r, repo, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked)
-
func (s *Pulls) handlePatchBasedPull(w http.ResponseWriter, r *http.Request, f *reporesolver.ResolvedRepo, user *oauth.User, title, body, targetBranch, patch string, isStacked bool) {
+
func (s *Pulls) handlePatchBasedPull(w http.ResponseWriter, r *http.Request, repo *models.Repo, user *oauth.User, title, body, targetBranch, patch string, isStacked bool) {
if err := s.validator.ValidatePatch(&patch); err != nil {
s.logger.Error("patch validation failed", "err", err)
s.pages.Notice(w, "pull", "Invalid patch format. Please provide a valid diff.")
return
-
s.createPullRequest(w, r, f, user, title, body, targetBranch, patch, "", "", nil, nil, isStacked)
+
s.createPullRequest(w, r, repo, user, title, body, targetBranch, patch, "", "", nil, nil, isStacked)
-
func (s *Pulls) handleForkBasedPull(w http.ResponseWriter, r *http.Request, f *reporesolver.ResolvedRepo, user *oauth.User, forkRepo string, title, body, targetBranch, sourceBranch string, isStacked bool) {
+
func (s *Pulls) handleForkBasedPull(w http.ResponseWriter, r *http.Request, repo *models.Repo, user *oauth.User, forkRepo string, title, body, targetBranch, sourceBranch string, isStacked bool) {
repoString := strings.SplitN(forkRepo, "/", 2)
forkOwnerDid := repoString[0]
repoName := repoString[1]
···
Sha: sourceRev,
-
s.createPullRequest(w, r, f, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked)
+
s.createPullRequest(w, r, repo, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked)
func (s *Pulls) createPullRequest(
w http.ResponseWriter,
r *http.Request,
-
f *reporesolver.ResolvedRepo,
+
repo *models.Repo,
user *oauth.User,
title, body, targetBranch string,
patch string,
···
s.createStackedPullRequest(
w,
r,
-
f,
+
repo,
user,
targetBranch,
patch,
···
+
mentions, references := s.mentionsResolver.Resolve(r.Context(), body)
+
rkey := tid.TID()
initialSubmission := models.PullSubmission{
Patch: patch,
···
Body: body,
TargetBranch: targetBranch,
OwnerDid: user.Did,
-
RepoAt: f.RepoAt(),
+
RepoAt: repo.RepoAt(),
Rkey: rkey,
+
Mentions: mentions,
+
References: references,
Submissions: []*models.PullSubmission{
&initialSubmission,
},
···
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
return
-
pullId, err := db.NextPullId(tx, f.RepoAt())
+
pullId, err := db.NextPullId(tx, repo.RepoAt())
if err != nil {
log.Println("failed to get pull id", err)
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
···
Val: &tangled.RepoPull{
Title: title,
Target: &tangled.RepoPull_Target{
-
Repo: string(f.RepoAt()),
+
Repo: string(repo.RepoAt()),
Branch: targetBranch,
},
Patch: patch,
···
s.notifier.NewPull(r.Context(), pull)
-
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pullId))
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo)
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pullId))
func (s *Pulls) createStackedPullRequest(
w http.ResponseWriter,
r *http.Request,
-
f *reporesolver.ResolvedRepo,
+
repo *models.Repo,
user *oauth.User,
targetBranch string,
patch string,
···
// build a stack out of this patch
stackId := uuid.New()
-
stack, err := newStack(f, user, targetBranch, patch, pullSource, stackId.String())
+
stack, err := s.newStack(r.Context(), repo, user, targetBranch, patch, pullSource, stackId.String())
if err != nil {
log.Println("failed to create stack", err)
s.pages.Notice(w, "pull", fmt.Sprintf("Failed to create stack: %v", err))
···
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
return
+
if err = tx.Commit(); err != nil {
···
return
-
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls", f.OwnerSlashRepo()))
+
// notify about each pull
+
//
+
// this is performed after tx.Commit, because it could result in a locked DB otherwise
+
for _, p := range stack {
+
s.notifier.NewPull(r.Context(), p)
+
}
+
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo)
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls", ownerSlashRepo))
func (s *Pulls) ValidatePatch(w http.ResponseWriter, r *http.Request) {
···
func (s *Pulls) PatchUploadFragment(w http.ResponseWriter, r *http.Request) {
user := s.oauth.GetUser(r)
-
f, err := s.repoResolver.Resolve(r)
-
if err != nil {
-
log.Println("failed to get repo and knot", err)
-
return
-
}
s.pages.PullPatchUploadFragment(w, pages.PullPatchUploadParams{
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
})
···
Host: host,
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo)
if err != nil {
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
···
s.pages.PullCompareBranchesFragment(w, pages.PullCompareBranchesParams{
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
Branches: withoutDefault,
})
func (s *Pulls) CompareForksFragment(w http.ResponseWriter, r *http.Request) {
user := s.oauth.GetUser(r)
-
f, err := s.repoResolver.Resolve(r)
-
if err != nil {
-
log.Println("failed to get repo and knot", err)
-
return
-
}
forks, err := db.GetForksByDid(s.db, user.Did)
if err != nil {
···
s.pages.PullCompareForkFragment(w, pages.PullCompareForkParams{
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
Forks: forks,
Selected: r.URL.Query().Get("fork"),
})
···
// fork repo
repo, err := db.GetRepo(
s.db,
-
db.FilterEq("did", forkOwnerDid),
-
db.FilterEq("name", forkName),
+
orm.FilterEq("did", forkOwnerDid),
+
orm.FilterEq("name", forkName),
if err != nil {
log.Println("failed to get repo", "did", forkOwnerDid, "name", forkName, "err", err)
···
Host: targetHost,
-
targetRepo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
+
targetRepo := fmt.Sprintf("%s/%s", f.Did, f.Name)
targetXrpcBytes, err := tangled.RepoBranches(r.Context(), targetXrpcc, "", 0, targetRepo)
if err != nil {
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
···
})
s.pages.PullCompareForkBranchesFragment(w, pages.PullCompareForkBranchesParams{
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
SourceBranches: sourceBranches.Branches,
TargetBranches: targetBranches.Branches,
})
···
func (s *Pulls) ResubmitPull(w http.ResponseWriter, r *http.Request) {
user := s.oauth.GetUser(r)
-
f, err := s.repoResolver.Resolve(r)
-
if err != nil {
-
log.Println("failed to get repo and knot", err)
-
return
-
}
pull, ok := r.Context().Value("pull").(*models.Pull)
if !ok {
···
switch r.Method {
case http.MethodGet:
s.pages.PullResubmitFragment(w, pages.PullResubmitParams{
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
Pull: pull,
})
return
···
return
-
if !f.RepoInfo(user).Roles.IsPushAllowed() {
+
roles := repoinfo.RolesInRepo{Roles: s.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())}
+
if !roles.IsPushAllowed() {
log.Println("unauthorized user")
w.WriteHeader(http.StatusUnauthorized)
return
···
Host: host,
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
xrpcBytes, err := tangled.RepoCompare(r.Context(), xrpcc, repo, pull.TargetBranch, pull.PullSource.Branch)
if err != nil {
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
···
func (s *Pulls) resubmitPullHelper(
w http.ResponseWriter,
r *http.Request,
-
f *reporesolver.ResolvedRepo,
+
repo *models.Repo,
user *oauth.User,
pull *models.Pull,
patch string,
···
) {
if pull.IsStacked() {
log.Println("resubmitting stacked PR")
-
s.resubmitStackedPullHelper(w, r, f, user, pull, patch, pull.StackId)
+
s.resubmitStackedPullHelper(w, r, repo, user, pull, patch, pull.StackId)
return
···
Val: &tangled.RepoPull{
Title: pull.Title,
Target: &tangled.RepoPull_Target{
-
Repo: string(f.RepoAt()),
+
Repo: string(repo.RepoAt()),
Branch: pull.TargetBranch,
},
Patch: patch, // new patch
···
return
-
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId))
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo)
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId))
func (s *Pulls) resubmitStackedPullHelper(
w http.ResponseWriter,
r *http.Request,
-
f *reporesolver.ResolvedRepo,
+
repo *models.Repo,
user *oauth.User,
pull *models.Pull,
patch string,
···
targetBranch := pull.TargetBranch
origStack, _ := r.Context().Value("stack").(models.Stack)
-
newStack, err := newStack(f, user, targetBranch, patch, pull.PullSource, stackId)
+
newStack, err := s.newStack(r.Context(), repo, user, targetBranch, patch, pull.PullSource, stackId)
if err != nil {
log.Println("failed to create resubmitted stack", err)
s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.")
···
tx,
p.ParentChangeId,
// these should be enough filters to be unique per-stack
-
db.FilterEq("repo_at", p.RepoAt.String()),
-
db.FilterEq("owner_did", p.OwnerDid),
-
db.FilterEq("change_id", p.ChangeId),
+
orm.FilterEq("repo_at", p.RepoAt.String()),
+
orm.FilterEq("owner_did", p.OwnerDid),
+
orm.FilterEq("change_id", p.ChangeId),
if err != nil {
···
return
-
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId))
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo)
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId))
func (s *Pulls) MergePull(w http.ResponseWriter, r *http.Request) {
···
authorName := ident.Handle.String()
mergeInput := &tangled.RepoMerge_Input{
-
Did: f.OwnerDid(),
+
Did: f.Did,
Name: f.Name,
Branch: pull.TargetBranch,
Patch: patch,
···
s.notifier.NewPullState(r.Context(), syntax.DID(user.Did), p)
-
s.pages.HxLocation(w, fmt.Sprintf("/@%s/%s/pulls/%d", f.OwnerHandle(), f.Name, pull.PullId))
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId))
func (s *Pulls) ClosePull(w http.ResponseWriter, r *http.Request) {
···
// auth filter: only owner or collaborators can close
-
roles := f.RolesInRepo(user)
+
roles := repoinfo.RolesInRepo{Roles: s.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())}
isOwner := roles.IsOwner()
isCollaborator := roles.IsCollaborator()
isPullAuthor := user.Did == pull.OwnerDid
···
s.notifier.NewPullState(r.Context(), syntax.DID(user.Did), p)
-
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId))
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId))
func (s *Pulls) ReopenPull(w http.ResponseWriter, r *http.Request) {
···
// auth filter: only owner or collaborators can close
-
roles := f.RolesInRepo(user)
+
roles := repoinfo.RolesInRepo{Roles: s.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())}
isOwner := roles.IsOwner()
isCollaborator := roles.IsCollaborator()
isPullAuthor := user.Did == pull.OwnerDid
···
s.notifier.NewPullState(r.Context(), syntax.DID(user.Did), p)
-
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId))
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId))
-
func newStack(f *reporesolver.ResolvedRepo, user *oauth.User, targetBranch, patch string, pullSource *models.PullSource, stackId string) (models.Stack, error) {
+
func (s *Pulls) newStack(ctx context.Context, repo *models.Repo, user *oauth.User, targetBranch, patch string, pullSource *models.PullSource, stackId string) (models.Stack, error) {
formatPatches, err := patchutil.ExtractPatches(patch)
if err != nil {
return nil, fmt.Errorf("Failed to extract patches: %v", err)
···
body := fp.Body
rkey := tid.TID()
+
mentions, references := s.mentionsResolver.Resolve(ctx, body)
+
initialSubmission := models.PullSubmission{
Patch: fp.Raw,
SourceRev: fp.SHA,
···
Body: body,
TargetBranch: targetBranch,
OwnerDid: user.Did,
-
RepoAt: f.RepoAt(),
+
RepoAt: repo.RepoAt(),
Rkey: rkey,
+
Mentions: mentions,
+
References: references,
Submissions: []*models.PullSubmission{
&initialSubmission,
},
+2 -2
appview/repo/archive.go
···
xrpcc := &indigoxrpc.Client{
Host: host,
}
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
-
archiveBytes, err := tangled.RepoArchive(r.Context(), xrpcc, "tar.gz", "", ref, repo)
+
didSlashRepo := f.DidSlashRepo()
+
archiveBytes, err := tangled.RepoArchive(r.Context(), xrpcc, "tar.gz", "", ref, didSlashRepo)
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
l.Error("failed to call XRPC repo.archive", "err", xrpcerr)
rp.pages.Error503(w)
+21 -14
appview/repo/artifact.go
···
"tangled.org/core/appview/db"
"tangled.org/core/appview/models"
"tangled.org/core/appview/pages"
-
"tangled.org/core/appview/reporesolver"
"tangled.org/core/appview/xrpcclient"
+
"tangled.org/core/orm"
"tangled.org/core/tid"
"tangled.org/core/types"
···
rp.pages.RepoArtifactFragment(w, pages.RepoArtifactParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Artifact: artifact,
})
}
···
artifacts, err := db.GetArtifact(
rp.db,
-
db.FilterEq("repo_at", f.RepoAt()),
-
db.FilterEq("tag", tag.Tag.Hash[:]),
-
db.FilterEq("name", filename),
+
orm.FilterEq("repo_at", f.RepoAt()),
+
orm.FilterEq("tag", tag.Tag.Hash[:]),
+
orm.FilterEq("name", filename),
)
if err != nil {
log.Println("failed to get artifacts", err)
···
artifact := artifacts[0]
-
ownerPds := f.OwnerId.PDSEndpoint()
+
ownerId, err := rp.idResolver.ResolveIdent(r.Context(), f.Did)
+
if err != nil {
+
log.Println("failed to resolve repo owner did", f.Did, err)
+
http.Error(w, "repository owner not found", http.StatusNotFound)
+
return
+
}
+
+
ownerPds := ownerId.PDSEndpoint()
url, _ := url.Parse(fmt.Sprintf("%s/xrpc/com.atproto.sync.getBlob", ownerPds))
q := url.Query()
q.Set("cid", artifact.BlobCid.String())
···
artifacts, err := db.GetArtifact(
rp.db,
-
db.FilterEq("repo_at", f.RepoAt()),
-
db.FilterEq("tag", tag[:]),
-
db.FilterEq("name", filename),
+
orm.FilterEq("repo_at", f.RepoAt()),
+
orm.FilterEq("tag", tag[:]),
+
orm.FilterEq("name", filename),
)
if err != nil {
log.Println("failed to get artifacts", err)
···
defer tx.Rollback()
err = db.DeleteArtifact(tx,
-
db.FilterEq("repo_at", f.RepoAt()),
-
db.FilterEq("tag", artifact.Tag[:]),
-
db.FilterEq("name", filename),
+
orm.FilterEq("repo_at", f.RepoAt()),
+
orm.FilterEq("tag", artifact.Tag[:]),
+
orm.FilterEq("name", filename),
)
if err != nil {
log.Println("failed to remove artifact record from db", err)
···
w.Write([]byte{})
}
-
func (rp *Repo) resolveTag(ctx context.Context, f *reporesolver.ResolvedRepo, tagParam string) (*types.TagReference, error) {
+
func (rp *Repo) resolveTag(ctx context.Context, f *models.Repo, tagParam string) (*types.TagReference, error) {
tagParam, err := url.QueryUnescape(tagParam)
if err != nil {
return nil, err
···
Host: host,
}
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
xrpcBytes, err := tangled.RepoTags(ctx, xrpcc, "", 0, repo)
if err != nil {
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
+11 -9
appview/repo/blob.go
···
xrpcc := &indigoxrpc.Client{
Host: host,
}
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Repo.Name)
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
resp, err := tangled.RepoBlob(r.Context(), xrpcc, filePath, false, ref, repo)
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
l.Error("failed to call XRPC repo.blob", "err", xrpcerr)
···
return
}
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
+
// Use XRPC response directly instead of converting to internal types
var breadcrumbs [][]string
-
breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", f.OwnerSlashRepo(), url.PathEscape(ref))})
+
breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", ownerSlashRepo, url.PathEscape(ref))})
if filePath != "" {
for idx, elem := range strings.Split(filePath, "/") {
breadcrumbs = append(breadcrumbs, []string{elem, fmt.Sprintf("%s/%s", breadcrumbs[idx][1], url.PathEscape(elem))})
···
rp.pages.RepoBlob(w, pages.RepoBlobParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
BreadCrumbs: breadcrumbs,
BlobView: blobView,
RepoBlob_Output: resp,
···
if !rp.config.Core.Dev {
scheme = "https"
}
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Repo.Name)
+
repo := f.DidSlashRepo()
baseURL := &url.URL{
Scheme: scheme,
Host: f.Knot,
···
}
// NewBlobView creates a BlobView from the XRPC response
-
func NewBlobView(resp *tangled.RepoBlob_Output, config *config.Config, f *reporesolver.ResolvedRepo, ref, filePath string, queryParams url.Values) models.BlobView {
+
func NewBlobView(resp *tangled.RepoBlob_Output, config *config.Config, repo *models.Repo, ref, filePath string, queryParams url.Values) models.BlobView {
view := models.BlobView{
Contents: "",
Lines: 0,
···
// Determine if binary
if resp.IsBinary != nil && *resp.IsBinary {
-
view.ContentSrc = generateBlobURL(config, f, ref, filePath)
+
view.ContentSrc = generateBlobURL(config, repo, ref, filePath)
ext := strings.ToLower(filepath.Ext(resp.Path))
switch ext {
···
return view
}
-
func generateBlobURL(config *config.Config, f *reporesolver.ResolvedRepo, ref, filePath string) string {
+
func generateBlobURL(config *config.Config, repo *models.Repo, ref, filePath string) string {
scheme := "http"
if !config.Core.Dev {
scheme = "https"
}
-
repoName := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
+
repoName := fmt.Sprintf("%s/%s", repo.Did, repo.Name)
baseURL := &url.URL{
Scheme: scheme,
-
Host: f.Knot,
+
Host: repo.Knot,
Path: "/xrpc/sh.tangled.repo.blob",
}
query := baseURL.Query()
+2 -2
appview/repo/branches.go
···
xrpcc := &indigoxrpc.Client{
Host: host,
}
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo)
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
l.Error("failed to call XRPC repo.branches", "err", xrpcerr)
···
user := rp.oauth.GetUser(r)
rp.pages.RepoBranches(w, pages.RepoBranchesParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
RepoBranchesResponse: result,
})
}
+4 -8
appview/repo/compare.go
···
Host: host,
}
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
branchBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo)
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
l.Error("failed to call XRPC repo.branches", "err", xrpcerr)
···
return
}
-
repoinfo := f.RepoInfo(user)
-
rp.pages.RepoCompareNew(w, pages.RepoCompareNewParams{
LoggedInUser: user,
-
RepoInfo: repoinfo,
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Branches: branches,
Tags: tags.Tags,
Base: base,
···
Host: host,
}
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
branchBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo)
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
···
diff = patchutil.AsNiceDiff(formatPatch.FormatPatchRaw, base)
}
-
repoinfo := f.RepoInfo(user)
-
rp.pages.RepoCompare(w, pages.RepoCompareParams{
LoggedInUser: user,
-
RepoInfo: repoinfo,
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Branches: branches.Branches,
Tags: tags.Tags,
Base: base,
+24 -17
appview/repo/feed.go
···
"tangled.org/core/appview/db"
"tangled.org/core/appview/models"
"tangled.org/core/appview/pagination"
-
"tangled.org/core/appview/reporesolver"
+
"tangled.org/core/orm"
+
"github.com/bluesky-social/indigo/atproto/identity"
"github.com/bluesky-social/indigo/atproto/syntax"
"github.com/gorilla/feeds"
)
-
func (rp *Repo) getRepoFeed(ctx context.Context, f *reporesolver.ResolvedRepo) (*feeds.Feed, error) {
+
func (rp *Repo) getRepoFeed(ctx context.Context, repo *models.Repo, ownerSlashRepo string) (*feeds.Feed, error) {
const feedLimitPerType = 100
-
pulls, err := db.GetPullsWithLimit(rp.db, feedLimitPerType, db.FilterEq("repo_at", f.RepoAt()))
+
pulls, err := db.GetPullsWithLimit(rp.db, feedLimitPerType, orm.FilterEq("repo_at", repo.RepoAt()))
if err != nil {
return nil, err
}
···
issues, err := db.GetIssuesPaginated(
rp.db,
pagination.Page{Limit: feedLimitPerType},
-
db.FilterEq("repo_at", f.RepoAt()),
+
orm.FilterEq("repo_at", repo.RepoAt()),
)
if err != nil {
return nil, err
}
feed := &feeds.Feed{
-
Title: fmt.Sprintf("activity feed for %s", f.OwnerSlashRepo()),
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s", rp.config.Core.AppviewHost, f.OwnerSlashRepo()), Type: "text/html", Rel: "alternate"},
+
Title: fmt.Sprintf("activity feed for @%s", ownerSlashRepo),
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s", rp.config.Core.AppviewHost, ownerSlashRepo), Type: "text/html", Rel: "alternate"},
Items: make([]*feeds.Item, 0),
Updated: time.UnixMilli(0),
}
for _, pull := range pulls {
-
items, err := rp.createPullItems(ctx, pull, f)
+
items, err := rp.createPullItems(ctx, pull, repo, ownerSlashRepo)
if err != nil {
return nil, err
}
···
}
for _, issue := range issues {
-
item, err := rp.createIssueItem(ctx, issue, f)
+
item, err := rp.createIssueItem(ctx, issue, repo, ownerSlashRepo)
if err != nil {
return nil, err
}
···
return feed, nil
}
-
func (rp *Repo) createPullItems(ctx context.Context, pull *models.Pull, f *reporesolver.ResolvedRepo) ([]*feeds.Item, error) {
+
func (rp *Repo) createPullItems(ctx context.Context, pull *models.Pull, repo *models.Repo, ownerSlashRepo string) ([]*feeds.Item, error) {
owner, err := rp.idResolver.ResolveIdent(ctx, pull.OwnerDid)
if err != nil {
return nil, err
···
var items []*feeds.Item
state := rp.getPullState(pull)
-
description := rp.buildPullDescription(owner.Handle, state, pull, f.OwnerSlashRepo())
+
description := rp.buildPullDescription(owner.Handle, state, pull, ownerSlashRepo)
mainItem := &feeds.Item{
Title: fmt.Sprintf("[PR #%d] %s", pull.PullId, pull.Title),
Description: description,
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), pull.PullId)},
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d", rp.config.Core.AppviewHost, ownerSlashRepo, pull.PullId)},
Created: pull.Created,
Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)},
}
···
roundItem := &feeds.Item{
Title: fmt.Sprintf("[PR #%d] %s (round #%d)", pull.PullId, pull.Title, round.RoundNumber),
-
Description: fmt.Sprintf("@%s submitted changes (at round #%d) on PR #%d in %s", owner.Handle, round.RoundNumber, pull.PullId, f.OwnerSlashRepo()),
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d/round/%d/", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), pull.PullId, round.RoundNumber)},
+
Description: fmt.Sprintf("@%s submitted changes (at round #%d) on PR #%d in @%s", owner.Handle, round.RoundNumber, pull.PullId, ownerSlashRepo),
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d/round/%d/", rp.config.Core.AppviewHost, ownerSlashRepo, pull.PullId, round.RoundNumber)},
Created: round.Created,
Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)},
}
···
return items, nil
}
-
func (rp *Repo) createIssueItem(ctx context.Context, issue models.Issue, f *reporesolver.ResolvedRepo) (*feeds.Item, error) {
+
func (rp *Repo) createIssueItem(ctx context.Context, issue models.Issue, repo *models.Repo, ownerSlashRepo string) (*feeds.Item, error) {
owner, err := rp.idResolver.ResolveIdent(ctx, issue.Did)
if err != nil {
return nil, err
···
return &feeds.Item{
Title: fmt.Sprintf("[Issue #%d] %s", issue.IssueId, issue.Title),
-
Description: fmt.Sprintf("@%s %s issue #%d in %s", owner.Handle, state, issue.IssueId, f.OwnerSlashRepo()),
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/issues/%d", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), issue.IssueId)},
+
Description: fmt.Sprintf("@%s %s issue #%d in @%s", owner.Handle, state, issue.IssueId, ownerSlashRepo),
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/issues/%d", rp.config.Core.AppviewHost, ownerSlashRepo, issue.IssueId)},
Created: issue.Created,
Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)},
}, nil
···
log.Println("failed to fully resolve repo:", err)
return
}
+
repoOwnerId, ok := r.Context().Value("resolvedId").(identity.Identity)
+
if !ok || repoOwnerId.Handle.IsInvalidHandle() {
+
log.Println("failed to get resolved repo owner id")
+
return
+
}
+
ownerSlashRepo := repoOwnerId.Handle.String() + "/" + f.Name
-
feed, err := rp.getRepoFeed(r.Context(), f)
+
feed, err := rp.getRepoFeed(r.Context(), f, ownerSlashRepo)
if err != nil {
log.Println("failed to get repo feed:", err)
rp.pages.Error500(w)
+18 -19
appview/repo/index.go
···
"tangled.org/core/appview/db"
"tangled.org/core/appview/models"
"tangled.org/core/appview/pages"
-
"tangled.org/core/appview/reporesolver"
"tangled.org/core/appview/xrpcclient"
+
"tangled.org/core/orm"
"tangled.org/core/types"
"github.com/go-chi/chi/v5"
···
}
user := rp.oauth.GetUser(r)
-
repoInfo := f.RepoInfo(user)
// Build index response from multiple XRPC calls
result, err := rp.buildIndexResponse(r.Context(), xrpcc, f, ref)
···
rp.pages.RepoIndexPage(w, pages.RepoIndexParams{
LoggedInUser: user,
NeedsKnotUpgrade: true,
-
RepoInfo: repoInfo,
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
})
return
}
···
l.Error("failed to get email to did map", "err", err)
}
-
vc, err := commitverify.GetVerifiedObjectCommits(rp.db, emailToDidMap, commitsTrunc)
+
vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, commitsTrunc)
if err != nil {
l.Error("failed to GetVerifiedObjectCommits", "err", err)
}
···
for _, c := range commitsTrunc {
shas = append(shas, c.Hash.String())
}
-
pipelines, err := getPipelineStatuses(rp.db, repoInfo, shas)
+
pipelines, err := getPipelineStatuses(rp.db, f, shas)
if err != nil {
l.Error("failed to fetch pipeline statuses", "err", err)
// non-fatal
···
rp.pages.RepoIndexPage(w, pages.RepoIndexParams{
LoggedInUser: user,
-
RepoInfo: repoInfo,
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
TagMap: tagMap,
RepoIndexResponse: *result,
CommitsTrunc: commitsTrunc,
···
func (rp *Repo) getLanguageInfo(
ctx context.Context,
l *slog.Logger,
-
f *reporesolver.ResolvedRepo,
+
repo *models.Repo,
xrpcc *indigoxrpc.Client,
currentRef string,
isDefaultRef bool,
···
// first attempt to fetch from db
langs, err := db.GetRepoLanguages(
rp.db,
-
db.FilterEq("repo_at", f.RepoAt()),
-
db.FilterEq("ref", currentRef),
+
orm.FilterEq("repo_at", repo.RepoAt()),
+
orm.FilterEq("ref", currentRef),
)
if err != nil || langs == nil {
// non-fatal, fetch langs from ks via XRPC
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
-
ls, err := tangled.RepoLanguages(ctx, xrpcc, currentRef, repo)
+
didSlashRepo := fmt.Sprintf("%s/%s", repo.Did, repo.Name)
+
ls, err := tangled.RepoLanguages(ctx, xrpcc, currentRef, didSlashRepo)
if err != nil {
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
l.Error("failed to call XRPC repo.languages", "err", xrpcerr)
···
for _, lang := range ls.Languages {
langs = append(langs, models.RepoLanguage{
-
RepoAt: f.RepoAt(),
+
RepoAt: repo.RepoAt(),
Ref: currentRef,
IsDefaultRef: isDefaultRef,
Language: lang.Name,
···
defer tx.Rollback()
// update appview's cache
-
err = db.UpdateRepoLanguages(tx, f.RepoAt(), currentRef, langs)
+
err = db.UpdateRepoLanguages(tx, repo.RepoAt(), currentRef, langs)
if err != nil {
// non-fatal
l.Error("failed to cache lang results", "err", err)
···
}
// buildIndexResponse creates a RepoIndexResponse by combining multiple xrpc calls in parallel
-
func (rp *Repo) buildIndexResponse(ctx context.Context, xrpcc *indigoxrpc.Client, f *reporesolver.ResolvedRepo, ref string) (*types.RepoIndexResponse, error) {
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
+
func (rp *Repo) buildIndexResponse(ctx context.Context, xrpcc *indigoxrpc.Client, repo *models.Repo, ref string) (*types.RepoIndexResponse, error) {
+
didSlashRepo := fmt.Sprintf("%s/%s", repo.Did, repo.Name)
// first get branches to determine the ref if not specified
-
branchesBytes, err := tangled.RepoBranches(ctx, xrpcc, "", 0, repo)
+
branchesBytes, err := tangled.RepoBranches(ctx, xrpcc, "", 0, didSlashRepo)
if err != nil {
return nil, fmt.Errorf("failed to call repoBranches: %w", err)
}
···
wg.Add(1)
go func() {
defer wg.Done()
-
tagsBytes, err := tangled.RepoTags(ctx, xrpcc, "", 0, repo)
+
tagsBytes, err := tangled.RepoTags(ctx, xrpcc, "", 0, didSlashRepo)
if err != nil {
errs = errors.Join(errs, fmt.Errorf("failed to call repoTags: %w", err))
return
···
wg.Add(1)
go func() {
defer wg.Done()
-
resp, err := tangled.RepoTree(ctx, xrpcc, "", ref, repo)
+
resp, err := tangled.RepoTree(ctx, xrpcc, "", ref, didSlashRepo)
if err != nil {
errs = errors.Join(errs, fmt.Errorf("failed to call repoTree: %w", err))
return
···
wg.Add(1)
go func() {
defer wg.Done()
-
logBytes, err := tangled.RepoLog(ctx, xrpcc, "", 50, "", ref, repo)
+
logBytes, err := tangled.RepoLog(ctx, xrpcc, "", 50, "", ref, didSlashRepo)
if err != nil {
errs = errors.Join(errs, fmt.Errorf("failed to call repoLog: %w", err))
return
+8 -11
appview/repo/log.go
···
cursor = strconv.Itoa(offset)
}
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
xrpcBytes, err := tangled.RepoLog(r.Context(), xrpcc, cursor, limit, "", ref, repo)
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
l.Error("failed to call XRPC repo.log", "err", xrpcerr)
···
l.Error("failed to fetch email to did mapping", "err", err)
}
-
vc, err := commitverify.GetVerifiedObjectCommits(rp.db, emailToDidMap, xrpcResp.Commits)
+
vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, xrpcResp.Commits)
if err != nil {
l.Error("failed to GetVerifiedObjectCommits", "err", err)
}
-
-
repoInfo := f.RepoInfo(user)
var shas []string
for _, c := range xrpcResp.Commits {
shas = append(shas, c.Hash.String())
}
-
pipelines, err := getPipelineStatuses(rp.db, repoInfo, shas)
+
pipelines, err := getPipelineStatuses(rp.db, f, shas)
if err != nil {
l.Error("failed to getPipelineStatuses", "err", err)
// non-fatal
···
rp.pages.RepoLog(w, pages.RepoLogParams{
LoggedInUser: user,
TagMap: tagMap,
-
RepoInfo: repoInfo,
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
RepoLogResponse: xrpcResp,
EmailToDid: emailToDidMap,
VerifiedCommits: vc,
···
Host: host,
}
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
xrpcBytes, err := tangled.RepoDiff(r.Context(), xrpcc, ref, repo)
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
l.Error("failed to call XRPC repo.diff", "err", xrpcerr)
···
l.Error("failed to get email to did mapping", "err", err)
}
-
vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, []types.NiceDiff{*result.Diff})
+
vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, []types.Commit{result.Diff.Commit})
if err != nil {
l.Error("failed to GetVerifiedCommits", "err", err)
}
user := rp.oauth.GetUser(r)
-
repoInfo := f.RepoInfo(user)
-
pipelines, err := getPipelineStatuses(rp.db, repoInfo, []string{result.Diff.Commit.This})
+
pipelines, err := getPipelineStatuses(rp.db, f, []string{result.Diff.Commit.This})
if err != nil {
l.Error("failed to getPipelineStatuses", "err", err)
// non-fatal
···
rp.pages.RepoCommit(w, pages.RepoCommitParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
RepoCommitResponse: result,
EmailToDid: emailToDidMap,
VerifiedCommit: vc,
+4 -3
appview/repo/opengraph.go
···
"tangled.org/core/appview/db"
"tangled.org/core/appview/models"
"tangled.org/core/appview/ogcard"
+
"tangled.org/core/orm"
"tangled.org/core/types"
)
···
var languageStats []types.RepoLanguageDetails
langs, err := db.GetRepoLanguages(
rp.db,
-
db.FilterEq("repo_at", f.RepoAt()),
-
db.FilterEq("is_default_ref", 1),
+
orm.FilterEq("repo_at", f.RepoAt()),
+
orm.FilterEq("is_default_ref", 1),
)
if err != nil {
log.Printf("failed to get language stats from db: %v", err)
···
})
}
-
card, err := rp.drawRepoSummaryCard(&f.Repo, languageStats)
+
card, err := rp.drawRepoSummaryCard(f, languageStats)
if err != nil {
log.Println("failed to draw repo summary card", err)
http.Error(w, "failed to draw repo summary card", http.StatusInternalServerError)
+35 -35
appview/repo/repo.go
···
xrpcclient "tangled.org/core/appview/xrpcclient"
"tangled.org/core/eventconsumer"
"tangled.org/core/idresolver"
+
"tangled.org/core/orm"
"tangled.org/core/rbac"
"tangled.org/core/tid"
"tangled.org/core/xrpc/serviceauth"
···
}
}
-
newRepo := f.Repo
+
newRepo := *f
newRepo.Spindle = newSpindle
record := newRepo.AsRecord()
···
l.Info("wrote label record to PDS")
// update the repo to subscribe to this label
-
newRepo := f.Repo
+
newRepo := *f
newRepo.Labels = append(newRepo.Labels, aturi)
repoRecord := newRepo.AsRecord()
···
// get form values
labelId := r.FormValue("label-id")
-
label, err := db.GetLabelDefinition(rp.db, db.FilterEq("id", labelId))
+
label, err := db.GetLabelDefinition(rp.db, orm.FilterEq("id", labelId))
if err != nil {
fail("Failed to find label definition.", err)
return
···
}
// update repo record to remove the label reference
-
newRepo := f.Repo
+
newRepo := *f
var updated []string
removedAt := label.AtUri().String()
for _, l := range newRepo.Labels {
···
err = db.UnsubscribeLabel(
tx,
-
db.FilterEq("repo_at", f.RepoAt()),
-
db.FilterEq("label_at", removedAt),
+
orm.FilterEq("repo_at", f.RepoAt()),
+
orm.FilterEq("label_at", removedAt),
)
if err != nil {
fail("Failed to unsubscribe label.", err)
return
}
-
err = db.DeleteLabelDefinition(tx, db.FilterEq("id", label.Id))
+
err = db.DeleteLabelDefinition(tx, orm.FilterEq("id", label.Id))
if err != nil {
fail("Failed to delete label definition.", err)
return
···
}
labelAts := r.Form["label"]
-
_, err = db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", labelAts))
+
_, err = db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", labelAts))
if err != nil {
fail("Failed to subscribe to label.", err)
return
}
-
newRepo := f.Repo
+
newRepo := *f
newRepo.Labels = append(newRepo.Labels, labelAts...)
// dedup
···
return
}
-
ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoNSID, f.Repo.Did, f.Repo.Rkey)
+
ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoNSID, f.Did, f.Rkey)
if err != nil {
fail("Failed to update labels, no record found on PDS.", err)
return
···
}
labelAts := r.Form["label"]
-
_, err = db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", labelAts))
+
_, err = db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", labelAts))
if err != nil {
fail("Failed to unsubscribe to label.", err)
return
}
// update repo record to remove the label reference
-
newRepo := f.Repo
+
newRepo := *f
var updated []string
for _, l := range newRepo.Labels {
if !slices.Contains(labelAts, l) {
···
return
}
-
ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoNSID, f.Repo.Did, f.Repo.Rkey)
+
ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoNSID, f.Did, f.Rkey)
if err != nil {
fail("Failed to update labels, no record found on PDS.", err)
return
···
err = db.UnsubscribeLabel(
rp.db,
-
db.FilterEq("repo_at", f.RepoAt()),
-
db.FilterIn("label_at", labelAts),
+
orm.FilterEq("repo_at", f.RepoAt()),
+
orm.FilterIn("label_at", labelAts),
)
if err != nil {
fail("Failed to unsubscribe label.", err)
···
labelDefs, err := db.GetLabelDefinitions(
rp.db,
-
db.FilterIn("at_uri", f.Repo.Labels),
-
db.FilterContains("scope", subject.Collection().String()),
+
orm.FilterIn("at_uri", f.Labels),
+
orm.FilterContains("scope", subject.Collection().String()),
)
if err != nil {
l.Error("failed to fetch label defs", "err", err)
···
defs[l.AtUri().String()] = &l
}
-
states, err := db.GetLabels(rp.db, db.FilterEq("subject", subject))
+
states, err := db.GetLabels(rp.db, orm.FilterEq("subject", subject))
if err != nil {
l.Error("failed to build label state", "err", err)
return
···
user := rp.oauth.GetUser(r)
rp.pages.LabelPanel(w, pages.LabelPanelParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Defs: defs,
Subject: subject.String(),
State: state,
···
labelDefs, err := db.GetLabelDefinitions(
rp.db,
-
db.FilterIn("at_uri", f.Repo.Labels),
-
db.FilterContains("scope", subject.Collection().String()),
+
orm.FilterIn("at_uri", f.Labels),
+
orm.FilterContains("scope", subject.Collection().String()),
)
if err != nil {
l.Error("failed to fetch labels", "err", err)
···
defs[l.AtUri().String()] = &l
}
-
states, err := db.GetLabels(rp.db, db.FilterEq("subject", subject))
+
states, err := db.GetLabels(rp.db, orm.FilterEq("subject", subject))
if err != nil {
l.Error("failed to build label state", "err", err)
return
···
user := rp.oauth.GetUser(r)
rp.pages.EditLabelPanel(w, pages.EditLabelPanelParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Defs: defs,
Subject: subject.String(),
State: state,
···
r.Context(),
client,
&tangled.RepoDelete_Input{
-
Did: f.OwnerDid(),
+
Did: f.Did,
Name: f.Name,
Rkey: f.Rkey,
},
···
l.Info("removed collaborators")
// remove repo RBAC
-
err = rp.enforcer.RemoveRepo(f.OwnerDid(), f.Knot, f.DidSlashRepo())
+
err = rp.enforcer.RemoveRepo(f.Did, f.Knot, f.DidSlashRepo())
if err != nil {
rp.pages.Notice(w, noticeId, "Failed to update RBAC rules")
return
}
// remove repo from db
-
err = db.RemoveRepo(tx, f.OwnerDid(), f.Name)
+
err = db.RemoveRepo(tx, f.Did, f.Name)
if err != nil {
rp.pages.Notice(w, noticeId, "Failed to update appview")
return
···
return
}
-
rp.pages.HxRedirect(w, fmt.Sprintf("/%s", f.OwnerDid()))
+
rp.pages.HxRedirect(w, fmt.Sprintf("/%s", f.Did))
}
func (rp *Repo) SyncRepoFork(w http.ResponseWriter, r *http.Request) {
···
return
}
-
repoInfo := f.RepoInfo(user)
-
if repoInfo.Source == nil {
+
if f.Source == "" {
rp.pages.Notice(w, "repo", "This repository is not a fork.")
return
}
···
&tangled.RepoForkSync_Input{
Did: user.Did,
Name: f.Name,
-
Source: repoInfo.Source.RepoAt().String(),
+
Source: f.Source,
Branch: ref,
},
)
···
rp.pages.ForkRepo(w, pages.ForkRepoParams{
LoggedInUser: user,
Knots: knots,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
})
case http.MethodPost:
···
// in the user's account.
existingRepo, err := db.GetRepo(
rp.db,
-
db.FilterEq("did", user.Did),
-
db.FilterEq("name", forkName),
+
orm.FilterEq("did", user.Did),
+
orm.FilterEq("name", forkName),
if err != nil {
if !errors.Is(err, sql.ErrNoRows) {
···
uri = "http"
-
forkSourceUrl := fmt.Sprintf("%s://%s/%s/%s", uri, f.Knot, f.OwnerDid(), f.Repo.Name)
+
forkSourceUrl := fmt.Sprintf("%s://%s/%s/%s", uri, f.Knot, f.Did, f.Name)
l = l.With("cloneUrl", forkSourceUrl)
sourceAt := f.RepoAt().String()
···
Knot: targetKnot,
Rkey: rkey,
Source: sourceAt,
-
Description: f.Repo.Description,
+
Description: f.Description,
Created: time.Now(),
Labels: rp.config.Label.DefaultLabelDefs,
+17 -19
appview/repo/repo_util.go
···
package repo
import (
+
"maps"
"slices"
"sort"
"strings"
"tangled.org/core/appview/db"
"tangled.org/core/appview/models"
-
"tangled.org/core/appview/pages/repoinfo"
+
"tangled.org/core/orm"
"tangled.org/core/types"
-
-
"github.com/go-git/go-git/v5/plumbing/object"
)
func sortFiles(files []types.NiceTree) {
···
})
}
-
func uniqueEmails(commits []*object.Commit) []string {
+
func uniqueEmails(commits []types.Commit) []string {
emails := make(map[string]struct{})
for _, commit := range commits {
-
if commit.Author.Email != "" {
-
emails[commit.Author.Email] = struct{}{}
-
}
-
if commit.Committer.Email != "" {
-
emails[commit.Committer.Email] = struct{}{}
+
emails[commit.Author.Email] = struct{}{}
+
emails[commit.Committer.Email] = struct{}{}
+
for _, c := range commit.CoAuthors() {
+
emails[c.Email] = struct{}{}
}
}
-
var uniqueEmails []string
-
for email := range emails {
-
uniqueEmails = append(uniqueEmails, email)
-
}
-
return uniqueEmails
+
+
// delete empty emails if any, from the set
+
delete(emails, "")
+
+
return slices.Collect(maps.Keys(emails))
}
func balanceIndexItems(commitCount, branchCount, tagCount, fileCount int) (commitsTrunc int, branchesTrunc int, tagsTrunc int) {
···
// golang is so blessed that it requires 35 lines of imperative code for this
func getPipelineStatuses(
d *db.DB,
-
repoInfo repoinfo.RepoInfo,
+
repo *models.Repo,
shas []string,
) (map[string]models.Pipeline, error) {
m := make(map[string]models.Pipeline)
···
ps, err := db.GetPipelineStatuses(
d,
len(shas),
-
db.FilterEq("repo_owner", repoInfo.OwnerDid),
-
db.FilterEq("repo_name", repoInfo.Name),
-
db.FilterEq("knot", repoInfo.Knot),
-
db.FilterIn("sha", shas),
+
orm.FilterEq("repo_owner", repo.Did),
+
orm.FilterEq("repo_name", repo.Name),
+
orm.FilterEq("knot", repo.Knot),
+
orm.FilterIn("sha", shas),
)
if err != nil {
return nil, err
+40 -11
appview/repo/settings.go
···
"tangled.org/core/api/tangled"
"tangled.org/core/appview/db"
+
"tangled.org/core/appview/models"
"tangled.org/core/appview/oauth"
"tangled.org/core/appview/pages"
xrpcclient "tangled.org/core/appview/xrpcclient"
+
"tangled.org/core/orm"
"tangled.org/core/types"
comatproto "github.com/bluesky-social/indigo/api/atproto"
···
Host: host,
}
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo)
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
l.Error("failed to call XRPC repo.branches", "err", xrpcerr)
···
return
}
-
defaultLabels, err := db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", rp.config.Label.DefaultLabelDefs))
+
defaultLabels, err := db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", rp.config.Label.DefaultLabelDefs))
if err != nil {
l.Error("failed to fetch labels", "err", err)
rp.pages.Error503(w)
return
}
-
labels, err := db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", f.Repo.Labels))
+
labels, err := db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", f.Labels))
if err != nil {
l.Error("failed to fetch labels", "err", err)
rp.pages.Error503(w)
···
labels = labels[:n]
subscribedLabels := make(map[string]struct{})
-
for _, l := range f.Repo.Labels {
+
for _, l := range f.Labels {
subscribedLabels[l] = struct{}{}
}
···
rp.pages.RepoGeneralSettings(w, pages.RepoGeneralSettingsParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Branches: result.Branches,
Labels: labels,
DefaultLabels: defaultLabels,
···
f, err := rp.repoResolver.Resolve(r)
user := rp.oauth.GetUser(r)
-
repoCollaborators, err := f.Collaborators(r.Context())
+
collaborators, err := func(repo *models.Repo) ([]pages.Collaborator, error) {
+
repoCollaborators, err := rp.enforcer.E.GetImplicitUsersForResourceByDomain(repo.DidSlashRepo(), repo.Knot)
+
if err != nil {
+
return nil, err
+
}
+
var collaborators []pages.Collaborator
+
for _, item := range repoCollaborators {
+
// currently only two roles: owner and member
+
var role string
+
switch item[3] {
+
case "repo:owner":
+
role = "owner"
+
case "repo:collaborator":
+
role = "collaborator"
+
default:
+
continue
+
}
+
+
did := item[0]
+
+
c := pages.Collaborator{
+
Did: did,
+
Role: role,
+
}
+
collaborators = append(collaborators, c)
+
}
+
return collaborators, nil
+
}(f)
if err != nil {
l.Error("failed to get collaborators", "err", err)
}
rp.pages.RepoAccessSettings(w, pages.RepoAccessSettingsParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Tabs: settingsTabs,
Tab: "access",
-
Collaborators: repoCollaborators,
+
Collaborators: collaborators,
})
}
···
user := rp.oauth.GetUser(r)
// all spindles that the repo owner is a member of
-
spindles, err := rp.enforcer.GetSpindlesForUser(f.OwnerDid())
+
spindles, err := rp.enforcer.GetSpindlesForUser(f.Did)
if err != nil {
l.Error("failed to fetch spindles", "err", err)
return
···
rp.pages.RepoPipelineSettings(w, pages.RepoPipelineSettingsParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
Tabs: settingsTabs,
Tab: "pipelines",
Spindles: spindles,
···
}
l.Debug("got", "topicsStr", topicStr, "topics", topics)
-
newRepo := f.Repo
+
newRepo := *f
newRepo.Description = description
newRepo.Website = website
newRepo.Topics = topics
+4 -3
appview/repo/tags.go
···
"tangled.org/core/appview/models"
"tangled.org/core/appview/pages"
xrpcclient "tangled.org/core/appview/xrpcclient"
+
"tangled.org/core/orm"
"tangled.org/core/types"
indigoxrpc "github.com/bluesky-social/indigo/xrpc"
···
xrpcc := &indigoxrpc.Client{
Host: host,
}
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
xrpcBytes, err := tangled.RepoTags(r.Context(), xrpcc, "", 0, repo)
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
l.Error("failed to call XRPC repo.tags", "err", xrpcerr)
···
rp.pages.Error503(w)
return
}
-
artifacts, err := db.GetArtifact(rp.db, db.FilterEq("repo_at", f.RepoAt()))
+
artifacts, err := db.GetArtifact(rp.db, orm.FilterEq("repo_at", f.RepoAt()))
if err != nil {
l.Error("failed grab artifacts", "err", err)
return
···
user := rp.oauth.GetUser(r)
rp.pages.RepoTags(w, pages.RepoTagsParams{
LoggedInUser: user,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
RepoTagsResponse: result,
ArtifactMap: artifactMap,
DanglingArtifacts: danglingArtifacts,
+6 -4
appview/repo/tree.go
···
"tangled.org/core/api/tangled"
"tangled.org/core/appview/pages"
+
"tangled.org/core/appview/reporesolver"
xrpcclient "tangled.org/core/appview/xrpcclient"
"tangled.org/core/types"
···
xrpcc := &indigoxrpc.Client{
Host: host,
}
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
xrpcResp, err := tangled.RepoTree(r.Context(), xrpcc, treePath, ref, repo)
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
l.Error("failed to call XRPC repo.tree", "err", xrpcerr)
···
result.ReadmeFileName = xrpcResp.Readme.Filename
result.Readme = xrpcResp.Readme.Contents
}
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
// redirects tree paths trying to access a blob; in this case the result.Files is unpopulated,
// so we can safely redirect to the "parent" (which is the same file).
if len(result.Files) == 0 && result.Parent == treePath {
-
redirectTo := fmt.Sprintf("/%s/blob/%s/%s", f.OwnerSlashRepo(), url.PathEscape(ref), result.Parent)
+
redirectTo := fmt.Sprintf("/%s/blob/%s/%s", ownerSlashRepo, url.PathEscape(ref), result.Parent)
http.Redirect(w, r, redirectTo, http.StatusFound)
return
}
user := rp.oauth.GetUser(r)
var breadcrumbs [][]string
-
breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", f.OwnerSlashRepo(), url.PathEscape(ref))})
+
breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", ownerSlashRepo, url.PathEscape(ref))})
if treePath != "" {
for idx, elem := range strings.Split(treePath, "/") {
breadcrumbs = append(breadcrumbs, []string{elem, fmt.Sprintf("%s/%s", breadcrumbs[idx][1], url.PathEscape(elem))})
···
LoggedInUser: user,
BreadCrumbs: breadcrumbs,
TreePath: treePath,
-
RepoInfo: f.RepoInfo(user),
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
RepoTreeResponse: result,
})
}
+76 -164
appview/reporesolver/resolver.go
···
package reporesolver
import (
-
"context"
-
"database/sql"
-
"errors"
"fmt"
"log"
"net/http"
···
"strings"
"github.com/bluesky-social/indigo/atproto/identity"
-
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/go-chi/chi/v5"
"tangled.org/core/appview/config"
"tangled.org/core/appview/db"
"tangled.org/core/appview/models"
"tangled.org/core/appview/oauth"
-
"tangled.org/core/appview/pages"
"tangled.org/core/appview/pages/repoinfo"
-
"tangled.org/core/idresolver"
"tangled.org/core/rbac"
)
-
type ResolvedRepo struct {
-
models.Repo
-
OwnerId identity.Identity
-
CurrentDir string
-
Ref string
-
-
rr *RepoResolver
+
type RepoResolver struct {
+
config *config.Config
+
enforcer *rbac.Enforcer
+
execer db.Execer
}
-
type RepoResolver struct {
-
config *config.Config
-
enforcer *rbac.Enforcer
-
idResolver *idresolver.Resolver
-
execer db.Execer
+
func New(config *config.Config, enforcer *rbac.Enforcer, execer db.Execer) *RepoResolver {
+
return &RepoResolver{config: config, enforcer: enforcer, execer: execer}
}
-
func New(config *config.Config, enforcer *rbac.Enforcer, resolver *idresolver.Resolver, execer db.Execer) *RepoResolver {
-
return &RepoResolver{config: config, enforcer: enforcer, idResolver: resolver, execer: execer}
+
// NOTE: this... should not even be here. the entire package will be removed in future refactor
+
func GetBaseRepoPath(r *http.Request, repo *models.Repo) string {
+
var (
+
user = chi.URLParam(r, "user")
+
name = chi.URLParam(r, "repo")
+
)
+
if user == "" || name == "" {
+
return repo.DidSlashRepo()
+
}
+
return path.Join(user, name)
}
-
func (rr *RepoResolver) Resolve(r *http.Request) (*ResolvedRepo, error) {
+
// TODO: move this out of `RepoResolver` struct
+
func (rr *RepoResolver) Resolve(r *http.Request) (*models.Repo, error) {
repo, ok := r.Context().Value("repo").(*models.Repo)
if !ok {
log.Println("malformed middleware: `repo` not exist in context")
return nil, fmt.Errorf("malformed middleware")
}
-
id, ok := r.Context().Value("resolvedId").(identity.Identity)
-
if !ok {
-
log.Println("malformed middleware")
-
return nil, fmt.Errorf("malformed middleware")
-
}
-
currentDir := path.Dir(extractPathAfterRef(r.URL.EscapedPath()))
-
ref := chi.URLParam(r, "ref")
-
-
return &ResolvedRepo{
-
Repo: *repo,
-
OwnerId: id,
-
CurrentDir: currentDir,
-
Ref: ref,
-
-
rr: rr,
-
}, nil
-
}
-
-
func (f *ResolvedRepo) OwnerDid() string {
-
return f.OwnerId.DID.String()
-
}
-
-
func (f *ResolvedRepo) OwnerHandle() string {
-
return f.OwnerId.Handle.String()
+
return repo, nil
}
-
func (f *ResolvedRepo) OwnerSlashRepo() string {
-
handle := f.OwnerId.Handle
-
-
var p string
-
if handle != "" && !handle.IsInvalidHandle() {
-
p, _ = securejoin.SecureJoin(fmt.Sprintf("@%s", handle), f.Name)
-
} else {
-
p, _ = securejoin.SecureJoin(f.OwnerDid(), f.Name)
+
// 1. [x] replace `RepoInfo` to `reporesolver.GetRepoInfo(r *http.Request, repo, user)`
+
// 2. [x] remove `rr`, `CurrentDir`, `Ref` fields from `ResolvedRepo`
+
// 3. [x] remove `ResolvedRepo`
+
// 4. [ ] replace reporesolver to reposervice
+
func (rr *RepoResolver) GetRepoInfo(r *http.Request, user *oauth.User) repoinfo.RepoInfo {
+
ownerId, ook := r.Context().Value("resolvedId").(identity.Identity)
+
repo, rok := r.Context().Value("repo").(*models.Repo)
+
if !ook || !rok {
+
log.Println("malformed request, failed to get repo from context")
}
-
return p
-
}
+
// get dir/ref
+
currentDir := path.Dir(extractPathAfterRef(r.URL.EscapedPath()))
+
ref := chi.URLParam(r, "ref")
-
func (f *ResolvedRepo) Collaborators(ctx context.Context) ([]pages.Collaborator, error) {
-
repoCollaborators, err := f.rr.enforcer.E.GetImplicitUsersForResourceByDomain(f.DidSlashRepo(), f.Knot)
-
if err != nil {
-
return nil, err
+
repoAt := repo.RepoAt()
+
isStarred := false
+
roles := repoinfo.RolesInRepo{}
+
if user != nil {
+
isStarred = db.GetStarStatus(rr.execer, user.Did, repoAt)
+
roles.Roles = rr.enforcer.GetPermissionsInRepo(user.Did, repo.Knot, repo.DidSlashRepo())
}
-
var collaborators []pages.Collaborator
-
for _, item := range repoCollaborators {
-
// currently only two roles: owner and member
-
var role string
-
switch item[3] {
-
case "repo:owner":
-
role = "owner"
-
case "repo:collaborator":
-
role = "collaborator"
-
default:
-
continue
+
stats := repo.RepoStats
+
if stats == nil {
+
starCount, err := db.GetStarCount(rr.execer, repoAt)
+
if err != nil {
+
log.Println("failed to get star count for ", repoAt)
}
-
-
did := item[0]
-
-
c := pages.Collaborator{
-
Did: did,
-
Handle: "",
-
Role: role,
+
issueCount, err := db.GetIssueCount(rr.execer, repoAt)
+
if err != nil {
+
log.Println("failed to get issue count for ", repoAt)
}
-
collaborators = append(collaborators, c)
-
}
-
-
// populate all collborators with handles
-
identsToResolve := make([]string, len(collaborators))
-
for i, collab := range collaborators {
-
identsToResolve[i] = collab.Did
-
}
-
-
resolvedIdents := f.rr.idResolver.ResolveIdents(ctx, identsToResolve)
-
for i, resolved := range resolvedIdents {
-
if resolved != nil {
-
collaborators[i].Handle = resolved.Handle.String()
+
pullCount, err := db.GetPullCount(rr.execer, repoAt)
+
if err != nil {
+
log.Println("failed to get pull count for ", repoAt)
}
-
}
-
-
return collaborators, nil
-
}
-
-
// this function is a bit weird since it now returns RepoInfo from an entirely different
-
// package. we should refactor this or get rid of RepoInfo entirely.
-
func (f *ResolvedRepo) RepoInfo(user *oauth.User) repoinfo.RepoInfo {
-
repoAt := f.RepoAt()
-
isStarred := false
-
if user != nil {
-
isStarred = db.GetStarStatus(f.rr.execer, user.Did, repoAt)
-
}
-
-
starCount, err := db.GetStarCount(f.rr.execer, repoAt)
-
if err != nil {
-
log.Println("failed to get star count for ", repoAt)
-
}
-
issueCount, err := db.GetIssueCount(f.rr.execer, repoAt)
-
if err != nil {
-
log.Println("failed to get issue count for ", repoAt)
-
}
-
pullCount, err := db.GetPullCount(f.rr.execer, repoAt)
-
if err != nil {
-
log.Println("failed to get issue count for ", repoAt)
-
}
-
source, err := db.GetRepoSource(f.rr.execer, repoAt)
-
if errors.Is(err, sql.ErrNoRows) {
-
source = ""
-
} else if err != nil {
-
log.Println("failed to get repo source for ", repoAt, err)
+
stats = &models.RepoStats{
+
StarCount: starCount,
+
IssueCount: issueCount,
+
PullCount: pullCount,
+
}
}
var sourceRepo *models.Repo
-
if source != "" {
-
sourceRepo, err = db.GetRepoByAtUri(f.rr.execer, source)
+
var err error
+
if repo.Source != "" {
+
sourceRepo, err = db.GetRepoByAtUri(rr.execer, repo.Source)
if err != nil {
log.Println("failed to get repo by at uri", err)
}
}
-
var sourceHandle *identity.Identity
-
if sourceRepo != nil {
-
sourceHandle, err = f.rr.idResolver.ResolveIdent(context.Background(), sourceRepo.Did)
-
if err != nil {
-
log.Println("failed to resolve source repo", err)
-
}
-
}
+
repoInfo := repoinfo.RepoInfo{
+
// this is basically a models.Repo
+
OwnerDid: ownerId.DID.String(),
+
OwnerHandle: ownerId.Handle.String(),
+
Name: repo.Name,
+
Rkey: repo.Rkey,
+
Description: repo.Description,
+
Website: repo.Website,
+
Topics: repo.Topics,
+
Knot: repo.Knot,
+
Spindle: repo.Spindle,
+
Stats: *stats,
-
knot := f.Knot
+
// fork repo upstream
+
Source: sourceRepo,
-
repoInfo := repoinfo.RepoInfo{
-
OwnerDid: f.OwnerDid(),
-
OwnerHandle: f.OwnerHandle(),
-
Name: f.Name,
-
Rkey: f.Repo.Rkey,
-
RepoAt: repoAt,
-
Description: f.Description,
-
Website: f.Website,
-
Topics: f.Topics,
-
IsStarred: isStarred,
-
Knot: knot,
-
Spindle: f.Spindle,
-
Roles: f.RolesInRepo(user),
-
Stats: models.RepoStats{
-
StarCount: starCount,
-
IssueCount: issueCount,
-
PullCount: pullCount,
-
},
-
CurrentDir: f.CurrentDir,
-
Ref: f.Ref,
-
}
+
// page context
+
CurrentDir: currentDir,
+
Ref: ref,
-
if sourceRepo != nil {
-
repoInfo.Source = sourceRepo
-
repoInfo.SourceHandle = sourceHandle.Handle.String()
+
// info related to the session
+
IsStarred: isStarred,
+
Roles: roles,
}
return repoInfo
-
}
-
-
func (f *ResolvedRepo) RolesInRepo(u *oauth.User) repoinfo.RolesInRepo {
-
if u != nil {
-
r := f.rr.enforcer.GetPermissionsInRepo(u.Did, f.Knot, f.DidSlashRepo())
-
return repoinfo.RolesInRepo{Roles: r}
-
} else {
-
return repoinfo.RolesInRepo{}
-
}
}
// extractPathAfterRef gets the actual repository path
+5 -4
appview/serververify/verify.go
···
"tangled.org/core/api/tangled"
"tangled.org/core/appview/db"
"tangled.org/core/appview/xrpcclient"
+
"tangled.org/core/orm"
"tangled.org/core/rbac"
)
···
// mark this spindle as verified in the db
rowId, err := db.VerifySpindle(
tx,
-
db.FilterEq("owner", owner),
-
db.FilterEq("instance", instance),
+
orm.FilterEq("owner", owner),
+
orm.FilterEq("instance", instance),
)
if err != nil {
return 0, fmt.Errorf("failed to write to DB: %w", err)
···
// mark as registered
err = db.MarkRegistered(
tx,
-
db.FilterEq("did", owner),
-
db.FilterEq("domain", domain),
+
orm.FilterEq("did", owner),
+
orm.FilterEq("domain", domain),
)
if err != nil {
return fmt.Errorf("failed to register domain: %w", err)
+2
appview/settings/settings.go
···
{"Name": "keys", "Icon": "key"},
{"Name": "emails", "Icon": "mail"},
{"Name": "notifications", "Icon": "bell"},
+
{"Name": "knots", "Icon": "volleyball"},
+
{"Name": "spindles", "Icon": "spool"},
}
)
+44 -26
appview/spindles/spindles.go
···
"tangled.org/core/appview/serververify"
"tangled.org/core/appview/xrpcclient"
"tangled.org/core/idresolver"
+
"tangled.org/core/orm"
"tangled.org/core/rbac"
"tangled.org/core/tid"
···
Logger *slog.Logger
}
+
type tab = map[string]any
+
+
var (
+
spindlesTabs []tab = []tab{
+
{"Name": "profile", "Icon": "user"},
+
{"Name": "keys", "Icon": "key"},
+
{"Name": "emails", "Icon": "mail"},
+
{"Name": "notifications", "Icon": "bell"},
+
{"Name": "knots", "Icon": "volleyball"},
+
{"Name": "spindles", "Icon": "spool"},
+
}
+
)
+
func (s *Spindles) Router() http.Handler {
r := chi.NewRouter()
···
user := s.OAuth.GetUser(r)
all, err := db.GetSpindles(
s.Db,
-
db.FilterEq("owner", user.Did),
+
orm.FilterEq("owner", user.Did),
)
if err != nil {
s.Logger.Error("failed to fetch spindles", "err", err)
···
s.Pages.Spindles(w, pages.SpindlesParams{
LoggedInUser: user,
Spindles: all,
+
Tabs: spindlesTabs,
+
Tab: "spindles",
})
}
···
spindles, err := db.GetSpindles(
s.Db,
-
db.FilterEq("instance", instance),
-
db.FilterEq("owner", user.Did),
-
db.FilterIsNot("verified", "null"),
+
orm.FilterEq("instance", instance),
+
orm.FilterEq("owner", user.Did),
+
orm.FilterIsNot("verified", "null"),
)
if err != nil || len(spindles) != 1 {
l.Error("failed to get spindle", "err", err, "len(spindles)", len(spindles))
···
repos, err := db.GetRepos(
s.Db,
0,
-
db.FilterEq("spindle", instance),
+
orm.FilterEq("spindle", instance),
)
if err != nil {
l.Error("failed to get spindle repos", "err", err)
···
Spindle: spindle,
Members: members,
Repos: repoMap,
+
Tabs: spindlesTabs,
+
Tab: "spindles",
})
}
···
spindles, err := db.GetSpindles(
s.Db,
-
db.FilterEq("owner", user.Did),
-
db.FilterEq("instance", instance),
+
orm.FilterEq("owner", user.Did),
+
orm.FilterEq("instance", instance),
)
if err != nil || len(spindles) != 1 {
l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles))
···
// remove spindle members first
err = db.RemoveSpindleMember(
tx,
-
db.FilterEq("did", user.Did),
-
db.FilterEq("instance", instance),
+
orm.FilterEq("did", user.Did),
+
orm.FilterEq("instance", instance),
)
if err != nil {
l.Error("failed to remove spindle members", "err", err)
···
err = db.DeleteSpindle(
tx,
-
db.FilterEq("owner", user.Did),
-
db.FilterEq("instance", instance),
+
orm.FilterEq("owner", user.Did),
+
orm.FilterEq("instance", instance),
)
if err != nil {
l.Error("failed to delete spindle", "err", err)
···
shouldRedirect := r.Header.Get("shouldRedirect")
if shouldRedirect == "true" {
-
s.Pages.HxRedirect(w, "/spindles")
+
s.Pages.HxRedirect(w, "/settings/spindles")
return
}
···
spindles, err := db.GetSpindles(
s.Db,
-
db.FilterEq("owner", user.Did),
-
db.FilterEq("instance", instance),
+
orm.FilterEq("owner", user.Did),
+
orm.FilterEq("instance", instance),
)
if err != nil || len(spindles) != 1 {
l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles))
···
verifiedSpindle, err := db.GetSpindles(
s.Db,
-
db.FilterEq("id", rowId),
+
orm.FilterEq("id", rowId),
)
if err != nil || len(verifiedSpindle) != 1 {
l.Error("failed get new spindle", "err", err)
···
spindles, err := db.GetSpindles(
s.Db,
-
db.FilterEq("owner", user.Did),
-
db.FilterEq("instance", instance),
+
orm.FilterEq("owner", user.Did),
+
orm.FilterEq("instance", instance),
)
if err != nil || len(spindles) != 1 {
l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles))
···
}
// success
-
s.Pages.HxRedirect(w, fmt.Sprintf("/spindles/%s", instance))
+
s.Pages.HxRedirect(w, fmt.Sprintf("/settings/spindles/%s", instance))
}
func (s *Spindles) removeMember(w http.ResponseWriter, r *http.Request) {
···
spindles, err := db.GetSpindles(
s.Db,
-
db.FilterEq("owner", user.Did),
-
db.FilterEq("instance", instance),
+
orm.FilterEq("owner", user.Did),
+
orm.FilterEq("instance", instance),
)
if err != nil || len(spindles) != 1 {
l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles))
···
// get the record from the DB first:
members, err := db.GetSpindleMembers(
s.Db,
-
db.FilterEq("did", user.Did),
-
db.FilterEq("instance", instance),
-
db.FilterEq("subject", memberId.DID),
+
orm.FilterEq("did", user.Did),
+
orm.FilterEq("instance", instance),
+
orm.FilterEq("subject", memberId.DID),
)
if err != nil || len(members) != 1 {
l.Error("failed to get member", "err", err)
···
// remove from db
if err = db.RemoveSpindleMember(
tx,
-
db.FilterEq("did", user.Did),
-
db.FilterEq("instance", instance),
-
db.FilterEq("subject", memberId.DID),
+
orm.FilterEq("did", user.Did),
+
orm.FilterEq("instance", instance),
+
orm.FilterEq("subject", memberId.DID),
); err != nil {
l.Error("failed to remove spindle member", "err", err)
fail()
+6 -5
appview/state/gfi.go
···
"tangled.org/core/appview/pages"
"tangled.org/core/appview/pagination"
"tangled.org/core/consts"
+
"tangled.org/core/orm"
)
func (s *State) GoodFirstIssues(w http.ResponseWriter, r *http.Request) {
···
goodFirstIssueLabel := s.config.Label.GoodFirstIssue
-
gfiLabelDef, err := db.GetLabelDefinition(s.db, db.FilterEq("at_uri", goodFirstIssueLabel))
+
gfiLabelDef, err := db.GetLabelDefinition(s.db, orm.FilterEq("at_uri", goodFirstIssueLabel))
if err != nil {
log.Println("failed to get gfi label def", err)
s.pages.Error500(w)
return
}
-
repoLabels, err := db.GetRepoLabels(s.db, db.FilterEq("label_at", goodFirstIssueLabel))
+
repoLabels, err := db.GetRepoLabels(s.db, orm.FilterEq("label_at", goodFirstIssueLabel))
if err != nil {
log.Println("failed to get repo labels", err)
s.pages.Error503(w)
···
pagination.Page{
Limit: 500,
},
-
db.FilterIn("repo_at", repoUris),
-
db.FilterEq("open", 1),
+
orm.FilterIn("repo_at", repoUris),
+
orm.FilterEq("open", 1),
)
if err != nil {
log.Println("failed to get issues", err)
···
}
if len(uriList) > 0 {
-
allLabelDefs, err = db.GetLabelDefinitions(s.db, db.FilterIn("at_uri", uriList))
+
allLabelDefs, err = db.GetLabelDefinitions(s.db, orm.FilterIn("at_uri", uriList))
if err != nil {
log.Println("failed to fetch labels", err)
}
+17
appview/state/git_http.go
···
}
+
func (s *State) UploadArchive(w http.ResponseWriter, r *http.Request) {
+
user, ok := r.Context().Value("resolvedId").(identity.Identity)
+
if !ok {
+
http.Error(w, "failed to resolve user", http.StatusInternalServerError)
+
return
+
}
+
repo := r.Context().Value("repo").(*models.Repo)
+
+
scheme := "https"
+
if s.config.Core.Dev {
+
scheme = "http"
+
}
+
+
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-archive?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
+
s.proxyRequest(w, r, targetURL)
+
}
+
func (s *State) UploadPack(w http.ResponseWriter, r *http.Request) {
user, ok := r.Context().Value("resolvedId").(identity.Identity)
if !ok {
+6 -5
appview/state/knotstream.go
···
ec "tangled.org/core/eventconsumer"
"tangled.org/core/eventconsumer/cursor"
"tangled.org/core/log"
+
"tangled.org/core/orm"
"tangled.org/core/rbac"
"tangled.org/core/workflow"
···
knots, err := db.GetRegistrations(
d,
-
db.FilterIsNot("registered", "null"),
+
orm.FilterIsNot("registered", "null"),
)
if err != nil {
return nil, err
···
repos, err := db.GetRepos(
d,
0,
-
db.FilterEq("did", record.RepoDid),
-
db.FilterEq("name", record.RepoName),
+
orm.FilterEq("did", record.RepoDid),
+
orm.FilterEq("name", record.RepoName),
)
if err != nil {
return fmt.Errorf("failed to look for repo in DB (%s/%s): %w", record.RepoDid, record.RepoName, err)
···
repos, err := db.GetRepos(
d,
0,
-
db.FilterEq("did", record.TriggerMetadata.Repo.Did),
-
db.FilterEq("name", record.TriggerMetadata.Repo.Repo),
+
orm.FilterEq("did", record.TriggerMetadata.Repo.Did),
+
orm.FilterEq("name", record.TriggerMetadata.Repo.Repo),
)
if err != nil {
return fmt.Errorf("failed to look for repo in DB: nsid %s, rkey %s, %w", msg.Nsid, msg.Rkey, err)
+27 -18
appview/state/profile.go
···
"tangled.org/core/appview/db"
"tangled.org/core/appview/models"
"tangled.org/core/appview/pages"
+
"tangled.org/core/orm"
)
func (s *State) Profile(w http.ResponseWriter, r *http.Request) {
···
return nil, fmt.Errorf("failed to get profile: %w", err)
}
-
repoCount, err := db.CountRepos(s.db, db.FilterEq("did", did))
+
repoCount, err := db.CountRepos(s.db, orm.FilterEq("did", did))
if err != nil {
return nil, fmt.Errorf("failed to get repo count: %w", err)
}
-
stringCount, err := db.CountStrings(s.db, db.FilterEq("did", did))
+
stringCount, err := db.CountStrings(s.db, orm.FilterEq("did", did))
if err != nil {
return nil, fmt.Errorf("failed to get string count: %w", err)
}
-
starredCount, err := db.CountStars(s.db, db.FilterEq("did", did))
+
starredCount, err := db.CountStars(s.db, orm.FilterEq("did", did))
if err != nil {
return nil, fmt.Errorf("failed to get starred repo count: %w", err)
}
···
startOfYear := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, time.UTC)
punchcard, err := db.MakePunchcard(
s.db,
-
db.FilterEq("did", did),
-
db.FilterGte("date", startOfYear.Format(time.DateOnly)),
-
db.FilterLte("date", now.Format(time.DateOnly)),
+
orm.FilterEq("did", did),
+
orm.FilterGte("date", startOfYear.Format(time.DateOnly)),
+
orm.FilterLte("date", now.Format(time.DateOnly)),
)
if err != nil {
return nil, fmt.Errorf("failed to get punchcard for %s: %w", did, err)
···
return &pages.ProfileCard{
UserDid: did,
-
UserHandle: ident.Handle.String(),
Profile: profile,
FollowStatus: followStatus,
Stats: pages.ProfileStats{
···
s.pages.Error500(w)
return
}
-
l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle)
+
l = l.With("profileDid", profile.UserDid)
repos, err := db.GetRepos(
s.db,
0,
-
db.FilterEq("did", profile.UserDid),
+
orm.FilterEq("did", profile.UserDid),
)
if err != nil {
l.Error("failed to fetch repos", "err", err)
···
l.Error("failed to create timeline", "err", err)
}
+
// populate commit counts in the timeline, using the punchcard
+
currentMonth := time.Now().Month()
+
for _, p := range profile.Punchcard.Punches {
+
idx := currentMonth - p.Date.Month()
+
if int(idx) < len(timeline.ByMonth) {
+
timeline.ByMonth[idx].Commits += p.Count
+
}
+
}
+
s.pages.ProfileOverview(w, pages.ProfileOverviewParams{
LoggedInUser: s.oauth.GetUser(r),
Card: profile,
···
s.pages.Error500(w)
return
}
-
l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle)
+
l = l.With("profileDid", profile.UserDid)
repos, err := db.GetRepos(
s.db,
0,
-
db.FilterEq("did", profile.UserDid),
+
orm.FilterEq("did", profile.UserDid),
)
if err != nil {
l.Error("failed to get repos", "err", err)
···
s.pages.Error500(w)
return
}
-
l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle)
+
l = l.With("profileDid", profile.UserDid)
-
stars, err := db.GetRepoStars(s.db, 0, db.FilterEq("did", profile.UserDid))
+
stars, err := db.GetRepoStars(s.db, 0, orm.FilterEq("did", profile.UserDid))
if err != nil {
l.Error("failed to get stars", "err", err)
s.pages.Error500(w)
···
s.pages.Error500(w)
return
}
-
l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle)
+
l = l.With("profileDid", profile.UserDid)
-
strings, err := db.GetStrings(s.db, 0, db.FilterEq("did", profile.UserDid))
+
strings, err := db.GetStrings(s.db, 0, orm.FilterEq("did", profile.UserDid))
if err != nil {
l.Error("failed to get strings", "err", err)
s.pages.Error500(w)
···
if err != nil {
return nil, err
}
-
l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle)
+
l = l.With("profileDid", profile.UserDid)
loggedInUser := s.oauth.GetUser(r)
params := FollowsPageParams{
···
followDids = append(followDids, extractDid(follow))
}
-
profiles, err := db.GetProfiles(s.db, db.FilterIn("did", followDids))
+
profiles, err := db.GetProfiles(s.db, orm.FilterIn("did", followDids))
if err != nil {
l.Error("failed to get profiles", "followDids", followDids, "err", err)
return &params, err
···
log.Printf("getting profile data for %s: %s", user.Did, err)
}
-
repos, err := db.GetRepos(s.db, 0, db.FilterEq("did", user.Did))
+
repos, err := db.GetRepos(s.db, 0, orm.FilterEq("did", user.Did))
if err != nil {
log.Printf("getting repos for %s: %s", user.Did, err)
}
+8 -2
appview/state/router.go
···
// These routes get proxied to the knot
r.Get("/info/refs", s.InfoRefs)
+
r.Post("/git-upload-archive", s.UploadArchive)
r.Post("/git-upload-pack", s.UploadPack)
r.Post("/git-receive-pack", s.ReceivePack)
···
r.Mount("/settings", s.SettingsRouter())
r.Mount("/strings", s.StringsRouter(mw))
-
r.Mount("/knots", s.KnotsRouter())
-
r.Mount("/spindles", s.SpindlesRouter())
+
+
r.Mount("/settings/knots", s.KnotsRouter())
+
r.Mount("/settings/spindles", s.SpindlesRouter())
+
r.Mount("/notifications", s.NotificationsRouter(mw))
r.Mount("/signup", s.SignupRouter())
···
issues := issues.New(
s.oauth,
s.repoResolver,
+
s.enforcer,
s.pages,
s.idResolver,
+
s.mentionsResolver,
s.db,
s.config,
s.notifier,
···
s.repoResolver,
s.pages,
s.idResolver,
+
s.mentionsResolver,
s.db,
s.config,
s.notifier,
+2 -1
appview/state/spindlestream.go
···
ec "tangled.org/core/eventconsumer"
"tangled.org/core/eventconsumer/cursor"
"tangled.org/core/log"
+
"tangled.org/core/orm"
"tangled.org/core/rbac"
spindle "tangled.org/core/spindle/models"
)
···
spindles, err := db.GetSpindles(
d,
-
db.FilterIsNot("verified", "null"),
+
orm.FilterIsNot("verified", "null"),
)
if err != nil {
return nil, err
+30 -24
appview/state/state.go
···
"tangled.org/core/appview/config"
"tangled.org/core/appview/db"
"tangled.org/core/appview/indexer"
+
"tangled.org/core/appview/mentions"
"tangled.org/core/appview/models"
"tangled.org/core/appview/notify"
dbnotify "tangled.org/core/appview/notify/db"
···
"tangled.org/core/jetstream"
"tangled.org/core/log"
tlog "tangled.org/core/log"
+
"tangled.org/core/orm"
"tangled.org/core/rbac"
"tangled.org/core/tid"
···
)
type State struct {
-
db *db.DB
-
notifier notify.Notifier
-
indexer *indexer.Indexer
-
oauth *oauth.OAuth
-
enforcer *rbac.Enforcer
-
pages *pages.Pages
-
idResolver *idresolver.Resolver
-
posthog posthog.Client
-
jc *jetstream.JetstreamClient
-
config *config.Config
-
repoResolver *reporesolver.RepoResolver
-
knotstream *eventconsumer.Consumer
-
spindlestream *eventconsumer.Consumer
-
logger *slog.Logger
-
validator *validator.Validator
+
db *db.DB
+
notifier notify.Notifier
+
indexer *indexer.Indexer
+
oauth *oauth.OAuth
+
enforcer *rbac.Enforcer
+
pages *pages.Pages
+
idResolver *idresolver.Resolver
+
mentionsResolver *mentions.Resolver
+
posthog posthog.Client
+
jc *jetstream.JetstreamClient
+
config *config.Config
+
repoResolver *reporesolver.RepoResolver
+
knotstream *eventconsumer.Consumer
+
spindlestream *eventconsumer.Consumer
+
logger *slog.Logger
+
validator *validator.Validator
}
func Make(ctx context.Context, config *config.Config) (*State, error) {
···
}
validator := validator.New(d, res, enforcer)
-
repoResolver := reporesolver.New(config, enforcer, res, d)
+
repoResolver := reporesolver.New(config, enforcer, d)
+
+
mentionsResolver := mentions.New(config, res, d, log.SubLogger(logger, "mentionsResolver"))
wrapper := db.DbWrapper{Execer: d}
jc, err := jetstream.NewJetstreamClient(
···
enforcer,
pages,
res,
+
mentionsResolver,
posthog,
jc,
config,
···
return
}
-
gfiLabel, err := db.GetLabelDefinition(s.db, db.FilterEq("at_uri", s.config.Label.GoodFirstIssue))
+
gfiLabel, err := db.GetLabelDefinition(s.db, orm.FilterEq("at_uri", s.config.Label.GoodFirstIssue))
if err != nil {
// non-fatal
}
···
regs, err := db.GetRegistrations(
s.db,
-
db.FilterEq("did", user.Did),
-
db.FilterEq("needs_upgrade", 1),
+
orm.FilterEq("did", user.Did),
+
orm.FilterEq("needs_upgrade", 1),
)
if err != nil {
l.Error("non-fatal: failed to get registrations", "err", err)
···
spindles, err := db.GetSpindles(
s.db,
-
db.FilterEq("owner", user.Did),
-
db.FilterEq("needs_upgrade", 1),
+
orm.FilterEq("owner", user.Did),
+
orm.FilterEq("needs_upgrade", 1),
)
if err != nil {
l.Error("non-fatal: failed to get spindles", "err", err)
···
// Check for existing repos
existingRepo, err := db.GetRepo(
s.db,
-
db.FilterEq("did", user.Did),
-
db.FilterEq("name", repoName),
+
orm.FilterEq("did", user.Did),
+
orm.FilterEq("name", repoName),
)
if err == nil && existingRepo != nil {
l.Info("repo exists")
···
}
func BackfillDefaultDefs(e db.Execer, r *idresolver.Resolver, defaults []string) error {
-
defaultLabels, err := db.GetLabelDefinitions(e, db.FilterIn("at_uri", defaults))
+
defaultLabels, err := db.GetLabelDefinitions(e, orm.FilterIn("at_uri", defaults))
if err != nil {
return err
}
+7 -6
appview/strings/strings.go
···
"tangled.org/core/appview/pages"
"tangled.org/core/appview/pages/markup"
"tangled.org/core/idresolver"
+
"tangled.org/core/orm"
"tangled.org/core/tid"
"github.com/bluesky-social/indigo/api/atproto"
···
strings, err := db.GetStrings(
s.Db,
0,
-
db.FilterEq("did", id.DID),
-
db.FilterEq("rkey", rkey),
+
orm.FilterEq("did", id.DID),
+
orm.FilterEq("rkey", rkey),
)
if err != nil {
l.Error("failed to fetch string", "err", err)
···
all, err := db.GetStrings(
s.Db,
0,
-
db.FilterEq("did", id.DID),
-
db.FilterEq("rkey", rkey),
+
orm.FilterEq("did", id.DID),
+
orm.FilterEq("rkey", rkey),
)
if err != nil {
l.Error("failed to fetch string", "err", err)
···
if err := db.DeleteString(
s.Db,
-
db.FilterEq("did", user.Did),
-
db.FilterEq("rkey", rkey),
+
orm.FilterEq("did", user.Did),
+
orm.FilterEq("rkey", rkey),
); err != nil {
fail("Failed to delete string.", err)
return
+2 -1
appview/validator/issue.go
···
"tangled.org/core/appview/db"
"tangled.org/core/appview/models"
+
"tangled.org/core/orm"
)
func (v *Validator) ValidateIssueComment(comment *models.IssueComment) error {
// if comments have parents, only ingest ones that are 1 level deep
if comment.ReplyTo != nil {
-
parents, err := db.GetIssueComments(v.db, db.FilterEq("at_uri", *comment.ReplyTo))
+
parents, err := db.GetIssueComments(v.db, orm.FilterEq("at_uri", *comment.ReplyTo))
if err != nil {
return fmt.Errorf("failed to fetch parent comment: %w", err)
}
+1 -34
crypto/verify.go
···
"crypto/sha256"
"encoding/base64"
"fmt"
-
"strings"
"github.com/hiddeco/sshsig"
"golang.org/x/crypto/ssh"
-
"tangled.org/core/types"
)
func VerifySignature(pubKey, signature, payload []byte) (error, bool) {
···
// multiple algorithms but sha-512 is most secure, and git's ssh signing defaults
// to sha-512 for all key types anyway.
err = sshsig.Verify(buf, sig, pub, sshsig.HashSHA512, "git")
-
return err, err == nil
-
}
-
// VerifyCommitSignature reconstructs the payload used to sign a commit. This is
-
// essentially the git cat-file output but without the gpgsig header.
-
//
-
// Caveats: signature verification will fail on commits with more than one parent,
-
// i.e. merge commits, because types.NiceDiff doesn't carry more than one Parent field
-
// and we are unable to reconstruct the payload correctly.
-
//
-
// Ideally this should directly operate on an *object.Commit.
-
func VerifyCommitSignature(pubKey string, commit types.NiceDiff) (error, bool) {
-
signature := commit.Commit.PGPSignature
-
-
author := bytes.NewBuffer([]byte{})
-
committer := bytes.NewBuffer([]byte{})
-
commit.Commit.Author.Encode(author)
-
commit.Commit.Committer.Encode(committer)
-
-
payload := strings.Builder{}
-
-
fmt.Fprintf(&payload, "tree %s\n", commit.Commit.Tree)
-
if commit.Commit.Parent != "" {
-
fmt.Fprintf(&payload, "parent %s\n", commit.Commit.Parent)
-
}
-
fmt.Fprintf(&payload, "author %s\n", author.String())
-
fmt.Fprintf(&payload, "committer %s\n", committer.String())
-
if commit.Commit.ChangedId != "" {
-
fmt.Fprintf(&payload, "change-id %s\n", commit.Commit.ChangedId)
-
}
-
fmt.Fprintf(&payload, "\n%s", commit.Commit.Message)
-
-
return VerifySignature([]byte(pubKey), []byte(signature), []byte(payload.String()))
+
return err, err == nil
}
// SSHFingerprint computes the fingerprint of the supplied ssh pubkey.
+1530
docs/DOCS.md
···
+
---
+
title: Tangled Documentation
+
author: The Tangled Contributors
+
date: 21 Sun, Dec 2025
+
---
+
+
# Introduction
+
+
Tangled is a decentralized code hosting and collaboration
+
platform. Every component of Tangled is open-source and
+
selfhostable. [tangled.org](https://tangled.org) also
+
provides hosting and CI services that are free to use.
+
+
There are several models for decentralized code
+
collaboration platforms, ranging from ActivityPub’s
+
(Forgejo) federated model, to Radicle’s entirely P2P model.
+
Our approach attempts to be the best of both worlds by
+
adopting atproto—a protocol for building decentralized
+
social applications with a central identity
+
+
Our approach to this is the idea of “knots”. Knots are
+
lightweight, headless servers that enable users to host Git
+
repositories with ease. Knots are designed for either single
+
or multi-tenant use which is perfect for self-hosting on a
+
Raspberry Pi at home, or larger “community” servers. By
+
default, Tangled provides managed knots where you can host
+
your repositories for free.
+
+
The "appview" at tangled.org acts as a consolidated “view”
+
into the whole network, allowing users to access, clone and
+
contribute to repositories hosted across different knots
+
seamlessly.
+
+
# Quick Start Guide
+
+
## Login or Sign up
+
+
You can [login](https://tangled.org) by using your AT
+
account. If you are unclear on what that means, simply head
+
to the [signup](https://tangled.org/signup) page and create
+
an account. By doing so, you will be choosing Tangled as
+
your account provider (you will be granted a handle of the
+
form `user.tngl.sh`).
+
+
In the AT network, users are free to choose their account
+
provider (known as a "Personal Data Service", or PDS), and
+
login to applications that support AT accounts.
+
+
You can think of it as "one account for all of the
+
atmosphere"!
+
+
If you already have an AT account (you may have one if you
+
signed up to Bluesky, for example), you can login with the
+
same handle on Tangled (so just use `user.bsky.social` on
+
the login page).
+
+
## Add an SSH Key
+
+
Once you are logged in, you can start creating repositories
+
and pushing code. Tangled supports pushing git repositories
+
over SSH.
+
+
First, you'll need to generate an SSH key if you don't
+
already have one:
+
+
```bash
+
ssh-keygen -t ed25519 -C "foo@bar.com"
+
```
+
+
When prompted, save the key to the default location
+
(`~/.ssh/id_ed25519`) and optionally set a passphrase.
+
+
Copy your public key to your clipboard:
+
+
```bash
+
# on X11
+
cat ~/.ssh/id_ed25519.pub | xclip -sel c
+
+
# on wayland
+
cat ~/.ssh/id_ed25519.pub | wl-copy
+
+
# on macos
+
cat ~/.ssh/id_ed25519.pub | pbcopy
+
```
+
+
Now, navigate to 'Settings' -> 'Keys' and hit 'Add Key',
+
paste your public key, give it a descriptive name, and hit
+
save.
+
+
## Create a Repository
+
+
Once your SSH key is added, create your first repository:
+
+
1. Hit the green `+` icon on the topbar, and select
+
repository
+
2. Enter a repository name
+
3. Add a description
+
4. Choose a knotserver to host this repository on
+
5. Hit create
+
+
"Knots" are selfhostable, lightweight git servers that can
+
host your repository. Unlike traditional code forges, your
+
code can live on any server. Read the [Knots](TODO) section
+
for more.
+
+
## Configure SSH
+
+
To ensure Git uses the correct SSH key and connects smoothly
+
to Tangled, add this configuration to your `~/.ssh/config`
+
file:
+
+
```
+
Host tangled.org
+
Hostname tangled.org
+
User git
+
IdentityFile ~/.ssh/id_ed25519
+
AddressFamily inet
+
```
+
+
This tells SSH to use your specific key when connecting to
+
Tangled and prevents authentication issues if you have
+
multiple SSH keys.
+
+
Note that this configuration only works for knotservers that
+
are hosted by tangled.org. If you use a custom knot, refer
+
to the [Knots](TODO) section.
+
+
## Push Your First Repository
+
+
Initialize a new git repository:
+
+
```bash
+
mkdir my-project
+
cd my-project
+
+
git init
+
echo "# My Project" > README.md
+
```
+
+
Add some content and push!
+
+
```bash
+
git add README.md
+
git commit -m "Initial commit"
+
git remote add origin git@tangled.org:user.tngl.sh/my-project
+
git push -u origin main
+
```
+
+
That's it! Your code is now hosted on Tangled.
+
+
## Migrating an existing repository
+
+
Moving your repositories from GitHub, GitLab, Bitbucket, or
+
any other Git forge to Tangled is straightforward. You'll
+
simply change your repository's remote URL. At the moment,
+
Tangled does not have any tooling to migrate data such as
+
GitHub issues or pull requests.
+
+
First, create a new repository on tangled.org as described
+
in the [Quick Start Guide](#create-a-repository).
+
+
Navigate to your existing local repository:
+
+
```bash
+
cd /path/to/your/existing/repo
+
```
+
+
You can inspect your existing git remote like so:
+
+
```bash
+
git remote -v
+
```
+
+
You'll see something like:
+
+
```
+
origin git@github.com:username/my-project (fetch)
+
origin git@github.com:username/my-project (push)
+
```
+
+
Update the remote URL to point to tangled:
+
+
```bash
+
git remote set-url origin git@tangled.org:user.tngl.sh/my-project
+
```
+
+
Verify the change:
+
+
```bash
+
git remote -v
+
```
+
+
You should now see:
+
+
```
+
origin git@tangled.org:user.tngl.sh/my-project (fetch)
+
origin git@tangled.org:user.tngl.sh/my-project (push)
+
```
+
+
Push all your branches and tags to tangled:
+
+
```bash
+
git push -u origin --all
+
git push -u origin --tags
+
```
+
+
Your repository is now migrated to Tangled! All commit
+
history, branches, and tags have been preserved.
+
+
## Mirroring a repository to Tangled
+
+
If you want to maintain your repository on multiple forges
+
simultaneously, for example, keeping your primary repository
+
on GitHub while mirroring to Tangled for backup or
+
redundancy, you can do so by adding multiple remotes.
+
+
You can configure your local repository to push to both
+
Tangled and, say, GitHub. You may already have the following
+
setup:
+
+
```
+
$ git remote -v
+
origin git@github.com:username/my-project (fetch)
+
origin git@github.com:username/my-project (push)
+
```
+
+
Now add Tangled as an additional push URL to the same
+
remote:
+
+
```bash
+
git remote set-url --add --push origin git@tangled.org:user.tngl.sh/my-project
+
```
+
+
You also need to re-add the original URL as a push
+
destination (git replaces the push URL when you use `--add`
+
the first time):
+
+
```bash
+
git remote set-url --add --push origin git@github.com:username/my-project
+
```
+
+
Verify your configuration:
+
+
```
+
$ git remote -v
+
origin git@github.com:username/repo (fetch)
+
origin git@tangled.org:username/my-project (push)
+
origin git@github.com:username/repo (push)
+
```
+
+
Notice that there's one fetch URL (the primary remote) and
+
two push URLs. Now, whenever you push, git will
+
automatically push to both remotes:
+
+
```bash
+
git push origin main
+
```
+
+
This single command pushes your `main` branch to both GitHub
+
and Tangled simultaneously.
+
+
To push all branches and tags:
+
+
```bash
+
git push origin --all
+
git push origin --tags
+
```
+
+
If you prefer more control over which remote you push to,
+
you can maintain separate remotes:
+
+
```bash
+
git remote add github git@github.com:username/my-project
+
git remote add tangled git@tangled.org:username/my-project
+
```
+
+
Then push to each explicitly:
+
+
```bash
+
git push github main
+
git push tangled main
+
```
+
+
# Knot self-hosting guide
+
+
So you want to run your own knot server? Great! Here are a few prerequisites:
+
+
1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
+
2. A (sub)domain name. People generally use `knot.example.com`.
+
3. A valid SSL certificate for your domain.
+
+
## NixOS
+
+
Refer to the [knot
+
module](https://tangled.org/tangled.org/core/blob/master/nix/modules/knot.nix)
+
for a full list of options. Sample configurations:
+
+
- [The test VM](https://tangled.org/tangled.org/core/blob/master/nix/vm.nix#L85)
+
- [@pyrox.dev/nix](https://tangled.org/pyrox.dev/nix/blob/d19571cc1b5fe01035e1e6951ec8cf8a476b4dee/hosts/marvin/services/tangled.nix#L15-25)
+
+
## Docker
+
+
Refer to
+
[@tangled.org/knot-docker](https://tangled.sh/@tangled.sh/knot-docker).
+
Note that this is community maintained.
+
+
## Manual setup
+
+
First, clone this repository:
+
+
```
+
git clone https://tangled.org/@tangled.org/core
+
```
+
+
Then, build the `knot` CLI. This is the knot administration
+
and operation tool. For the purpose of this guide, we're
+
only concerned with these subcommands:
+
+
* `knot server`: the main knot server process, typically
+
run as a supervised service
+
* `knot guard`: handles role-based access control for git
+
over SSH (you'll never have to run this yourself)
+
* `knot keys`: fetches SSH keys associated with your knot;
+
we'll use this to generate the SSH
+
`AuthorizedKeysCommand`
+
+
```
+
cd core
+
export CGO_ENABLED=1
+
go build -o knot ./cmd/knot
+
```
+
+
Next, move the `knot` binary to a location owned by `root` --
+
`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
+
+
```
+
sudo mv knot /usr/local/bin/knot
+
sudo chown root:root /usr/local/bin/knot
+
```
+
+
This is necessary because SSH `AuthorizedKeysCommand` requires [really
+
specific permissions](https://stackoverflow.com/a/27638306). The
+
`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
+
retrieve a user's public SSH keys dynamically for authentication. Let's
+
set that up.
+
+
```
+
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
+
Match User git
+
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
+
AuthorizedKeysCommandUser nobody
+
EOF
+
```
+
+
Then, reload `sshd`:
+
+
```
+
sudo systemctl reload ssh
+
```
+
+
Next, create the `git` user. We'll use the `git` user's home directory
+
to store repositories:
+
+
```
+
sudo adduser git
+
```
+
+
Create `/home/git/.knot.env` with the following, updating the values as
+
necessary. The `KNOT_SERVER_OWNER` should be set to your
+
DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
+
+
```
+
KNOT_REPO_SCAN_PATH=/home/git
+
KNOT_SERVER_HOSTNAME=knot.example.com
+
APPVIEW_ENDPOINT=https://tangled.sh
+
KNOT_SERVER_OWNER=did:plc:foobar
+
KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
+
KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
+
```
+
+
If you run a Linux distribution that uses systemd, you can use the provided
+
service file to run the server. Copy
+
[`knotserver.service`](/systemd/knotserver.service)
+
to `/etc/systemd/system/`. Then, run:
+
+
```
+
systemctl enable knotserver
+
systemctl start knotserver
+
```
+
+
The last step is to configure a reverse proxy like Nginx or Caddy to front your
+
knot. Here's an example configuration for Nginx:
+
+
```
+
server {
+
listen 80;
+
listen [::]:80;
+
server_name knot.example.com;
+
+
location / {
+
proxy_pass http://localhost:5555;
+
proxy_set_header Host $host;
+
proxy_set_header X-Real-IP $remote_addr;
+
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+
proxy_set_header X-Forwarded-Proto $scheme;
+
}
+
+
# wss endpoint for git events
+
location /events {
+
proxy_set_header X-Forwarded-For $remote_addr;
+
proxy_set_header Host $http_host;
+
proxy_set_header Upgrade websocket;
+
proxy_set_header Connection Upgrade;
+
proxy_pass http://localhost:5555;
+
}
+
# additional config for SSL/TLS go here.
+
}
+
+
```
+
+
Remember to use Let's Encrypt or similar to procure a certificate for your
+
knot domain.
+
+
You should now have a running knot server! You can finalize
+
your registration by hitting the `verify` button on the
+
[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
+
a record on your PDS to announce the existence of the knot.
+
+
### Custom paths
+
+
(This section applies to manual setup only. Docker users should edit the mounts
+
in `docker-compose.yml` instead.)
+
+
Right now, the database and repositories of your knot lives in `/home/git`. You
+
can move these paths if you'd like to store them in another folder. Be careful
+
when adjusting these paths:
+
+
* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
+
any possible side effects. Remember to restart it once you're done.
+
* Make backups before moving in case something goes wrong.
+
* Make sure the `git` user can read and write from the new paths.
+
+
#### Database
+
+
As an example, let's say the current database is at `/home/git/knotserver.db`,
+
and we want to move it to `/home/git/database/knotserver.db`.
+
+
Copy the current database to the new location. Make sure to copy the `.db-shm`
+
and `.db-wal` files if they exist.
+
+
```
+
mkdir /home/git/database
+
cp /home/git/knotserver.db* /home/git/database
+
```
+
+
In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
+
the new file path (_not_ the directory):
+
+
```
+
KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
+
```
+
+
#### Repositories
+
+
As an example, let's say the repositories are currently in `/home/git`, and we
+
want to move them into `/home/git/repositories`.
+
+
Create the new folder, then move the existing repositories (if there are any):
+
+
```
+
mkdir /home/git/repositories
+
# move all DIDs into the new folder; these will vary for you!
+
mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
+
```
+
+
In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
+
to the new directory:
+
+
```
+
KNOT_REPO_SCAN_PATH=/home/git/repositories
+
```
+
+
Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
+
repository path:
+
+
```
+
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
+
Match User git
+
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
+
AuthorizedKeysCommandUser nobody
+
EOF
+
```
+
+
Make sure to restart your SSH server!
+
+
#### MOTD (message of the day)
+
+
To configure the MOTD used ("Welcome to this knot!" by default), edit the
+
`/home/git/motd` file:
+
+
```
+
printf "Hi from this knot!\n" > /home/git/motd
+
```
+
+
Note that you should add a newline at the end if setting a non-empty message
+
since the knot won't do this for you.
+
+
# Spindles
+
+
## Pipelines
+
+
Spindle workflows allow you to write CI/CD pipelines in a
+
simple format. They're located in the `.tangled/workflows`
+
directory at the root of your repository, and are defined
+
using YAML.
+
+
The fields are:
+
+
- [Trigger](#trigger): A **required** field that defines
+
when a workflow should be triggered.
+
- [Engine](#engine): A **required** field that defines which
+
engine a workflow should run on.
+
- [Clone options](#clone-options): An **optional** field
+
that defines how the repository should be cloned.
+
- [Dependencies](#dependencies): An **optional** field that
+
allows you to list dependencies you may need.
+
- [Environment](#environment): An **optional** field that
+
allows you to define environment variables.
+
- [Steps](#steps): An **optional** field that allows you to
+
define what steps should run in the workflow.
+
+
### Trigger
+
+
The first thing to add to a workflow is the trigger, which
+
defines when a workflow runs. This is defined using a `when`
+
field, which takes in a list of conditions. Each condition
+
has the following fields:
+
+
- `event`: This is a **required** field that defines when
+
your workflow should run. It's a list that can take one or
+
more of the following values:
+
- `push`: The workflow should run every time a commit is
+
pushed to the repository.
+
- `pull_request`: The workflow should run every time a
+
pull request is made or updated.
+
- `manual`: The workflow can be triggered manually.
+
- `branch`: Defines which branches the workflow should run
+
for. If used with the `push` event, commits to the
+
branch(es) listed here will trigger the workflow. If used
+
with the `pull_request` event, updates to pull requests
+
targeting the branch(es) listed here will trigger the
+
workflow. This field has no effect with the `manual`
+
event. Supports glob patterns using `*` and `**` (e.g.,
+
`main`, `develop`, `release-*`). Either `branch` or `tag`
+
(or both) must be specified for `push` events.
+
- `tag`: Defines which tags the workflow should run for.
+
Only used with the `push` event - when tags matching the
+
pattern(s) listed here are pushed, the workflow will
+
trigger. This field has no effect with `pull_request` or
+
`manual` events. Supports glob patterns using `*` and `**`
+
(e.g., `v*`, `v1.*`, `release-**`). Either `branch` or
+
`tag` (or both) must be specified for `push` events.
+
+
For example, if you'd like to define a workflow that runs
+
when commits are pushed to the `main` and `develop`
+
branches, or when pull requests that target the `main`
+
branch are updated, or manually, you can do so with:
+
+
```yaml
+
when:
+
- event: ["push", "manual"]
+
branch: ["main", "develop"]
+
- event: ["pull_request"]
+
branch: ["main"]
+
```
+
+
You can also trigger workflows on tag pushes. For instance,
+
to run a deployment workflow when tags matching `v*` are
+
pushed:
+
+
```yaml
+
when:
+
- event: ["push"]
+
tag: ["v*"]
+
```
+
+
You can even combine branch and tag patterns in a single
+
constraint (the workflow triggers if either matches):
+
+
```yaml
+
when:
+
- event: ["push"]
+
branch: ["main", "release-*"]
+
tag: ["v*", "stable"]
+
```
+
+
### Engine
+
+
Next is the engine on which the workflow should run, defined
+
using the **required** `engine` field. The currently
+
supported engines are:
+
+
- `nixery`: This uses an instance of
+
[Nixery](https://nixery.dev) to run steps, which allows
+
you to add [dependencies](#dependencies) from
+
[Nixpkgs](https://github.com/NixOS/nixpkgs). You can
+
search for packages on https://search.nixos.org, and
+
there's a pretty good chance the package(s) you're looking
+
for will be there.
+
+
Example:
+
+
```yaml
+
engine: "nixery"
+
```
+
+
### Clone options
+
+
When a workflow starts, the first step is to clone the
+
repository. You can customize this behavior using the
+
**optional** `clone` field. It has the following fields:
+
+
- `skip`: Setting this to `true` will skip cloning the
+
repository. This can be useful if your workflow is doing
+
something that doesn't require anything from the
+
repository itself. This is `false` by default.
+
- `depth`: This sets the number of commits, or the "clone
+
depth", to fetch from the repository. For example, if you
+
set this to 2, the last 2 commits will be fetched. By
+
default, the depth is set to 1, meaning only the most
+
recent commit will be fetched, which is the commit that
+
triggered the workflow.
+
- `submodules`: If you use [git
+
submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules)
+
in your repository, setting this field to `true` will
+
recursively fetch all submodules. This is `false` by
+
default.
+
+
The default settings are:
+
+
```yaml
+
clone:
+
skip: false
+
depth: 1
+
submodules: false
+
```
+
+
### Dependencies
+
+
Usually when you're running a workflow, you'll need
+
additional dependencies. The `dependencies` field lets you
+
define which dependencies to get, and from where. It's a
+
key-value map, with the key being the registry to fetch
+
dependencies from, and the value being the list of
+
dependencies to fetch.
+
+
Say you want to fetch Node.js and Go from `nixpkgs`, and a
+
package called `my_pkg` you've made from your own registry
+
at your repository at
+
`https://tangled.sh/@example.com/my_pkg`. You can define
+
those dependencies like so:
+
+
```yaml
+
dependencies:
+
# nixpkgs
+
nixpkgs:
+
- nodejs
+
- go
+
# custom registry
+
git+https://tangled.org/@example.com/my_pkg:
+
- my_pkg
+
```
+
+
Now these dependencies are available to use in your
+
workflow!
+
+
### Environment
+
+
The `environment` field allows you define environment
+
variables that will be available throughout the entire
+
workflow. **Do not put secrets here, these environment
+
variables are visible to anyone viewing the repository. You
+
can add secrets for pipelines in your repository's
+
settings.**
+
+
Example:
+
+
```yaml
+
environment:
+
GOOS: "linux"
+
GOARCH: "arm64"
+
NODE_ENV: "production"
+
MY_ENV_VAR: "MY_ENV_VALUE"
+
```
+
+
### Steps
+
+
The `steps` field allows you to define what steps should run
+
in the workflow. It's a list of step objects, each with the
+
following fields:
+
+
- `name`: This field allows you to give your step a name.
+
This name is visible in your workflow runs, and is used to
+
describe what the step is doing.
+
- `command`: This field allows you to define a command to
+
run in that step. The step is run in a Bash shell, and the
+
logs from the command will be visible in the pipelines
+
page on the Tangled website. The
+
[dependencies](#dependencies) you added will be available
+
to use here.
+
- `environment`: Similar to the global
+
[environment](#environment) config, this **optional**
+
field is a key-value map that allows you to set
+
environment variables for the step. **Do not put secrets
+
here, these environment variables are visible to anyone
+
viewing the repository. You can add secrets for pipelines
+
in your repository's settings.**
+
+
Example:
+
+
```yaml
+
steps:
+
- name: "Build backend"
+
command: "go build"
+
environment:
+
GOOS: "darwin"
+
GOARCH: "arm64"
+
- name: "Build frontend"
+
command: "npm run build"
+
environment:
+
NODE_ENV: "production"
+
```
+
+
### Complete workflow
+
+
```yaml
+
# .tangled/workflows/build.yml
+
+
when:
+
- event: ["push", "manual"]
+
branch: ["main", "develop"]
+
- event: ["pull_request"]
+
branch: ["main"]
+
+
engine: "nixery"
+
+
# using the default values
+
clone:
+
skip: false
+
depth: 1
+
submodules: false
+
+
dependencies:
+
# nixpkgs
+
nixpkgs:
+
- nodejs
+
- go
+
# custom registry
+
git+https://tangled.org/@example.com/my_pkg:
+
- my_pkg
+
+
environment:
+
GOOS: "linux"
+
GOARCH: "arm64"
+
NODE_ENV: "production"
+
MY_ENV_VAR: "MY_ENV_VALUE"
+
+
steps:
+
- name: "Build backend"
+
command: "go build"
+
environment:
+
GOOS: "darwin"
+
GOARCH: "arm64"
+
- name: "Build frontend"
+
command: "npm run build"
+
environment:
+
NODE_ENV: "production"
+
```
+
+
If you want another example of a workflow, you can look at
+
the one [Tangled uses to build the
+
project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
+
+
## Self-hosting guide
+
+
### Prerequisites
+
+
* Go
+
* Docker (the only supported backend currently)
+
+
### Configuration
+
+
Spindle is configured using environment variables. The following environment variables are available:
+
+
* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
+
* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
+
* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
+
* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
+
* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
+
* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
+
* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
+
* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
+
* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
+
+
### Running spindle
+
+
1. **Set the environment variables.** For example:
+
+
```shell
+
export SPINDLE_SERVER_HOSTNAME="your-hostname"
+
export SPINDLE_SERVER_OWNER="your-did"
+
```
+
+
2. **Build the Spindle binary.**
+
+
```shell
+
cd core
+
go mod download
+
go build -o cmd/spindle/spindle cmd/spindle/main.go
+
```
+
+
3. **Create the log directory.**
+
+
```shell
+
sudo mkdir -p /var/log/spindle
+
sudo chown $USER:$USER -R /var/log/spindle
+
```
+
+
4. **Run the Spindle binary.**
+
+
```shell
+
./cmd/spindle/spindle
+
```
+
+
Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
+
+
## Architecture
+
+
Spindle is a small CI runner service. Here's a high level overview of how it operates:
+
+
* listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
+
[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
+
* when a new repo record comes through (typically when you add a spindle to a
+
repo from the settings), spindle then resolves the underlying knot and
+
subscribes to repo events (see:
+
[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
+
* the spindle engine then handles execution of the pipeline, with results and
+
logs beamed on the spindle event stream over wss
+
+
### The engine
+
+
At present, the only supported backend is Docker (and Podman, if Docker
+
compatibility is enabled, so that `/run/docker.sock` is created). Spindle
+
executes each step in the pipeline in a fresh container, with state persisted
+
across steps within the `/tangled/workspace` directory.
+
+
The base image for the container is constructed on the fly using
+
[Nixery](https://nixery.dev), which is handy for caching layers for frequently
+
used packages.
+
+
The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
+
+
## Secrets with openbao
+
+
This document covers setting up Spindle to use OpenBao for secrets
+
management via OpenBao Proxy instead of the default SQLite backend.
+
+
### Overview
+
+
Spindle now uses OpenBao Proxy for secrets management. The proxy handles
+
authentication automatically using AppRole credentials, while Spindle
+
connects to the local proxy instead of directly to the OpenBao server.
+
+
This approach provides better security, automatic token renewal, and
+
simplified application code.
+
+
### Installation
+
+
Install OpenBao from nixpkgs:
+
+
```bash
+
nix shell nixpkgs#openbao # for a local server
+
```
+
+
### Setup
+
+
The setup process can is documented for both local development and production.
+
+
#### Local development
+
+
Start OpenBao in dev mode:
+
+
```bash
+
bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
+
```
+
+
This starts OpenBao on `http://localhost:8201` with a root token.
+
+
Set up environment for bao CLI:
+
+
```bash
+
export BAO_ADDR=http://localhost:8200
+
export BAO_TOKEN=root
+
```
+
+
#### Production
+
+
You would typically use a systemd service with a
+
configuration file. Refer to
+
[@tangled.org/infra](https://tangled.org/@tangled.org/infra)
+
for how this can be achieved using Nix.
+
+
Then, initialize the bao server:
+
+
```bash
+
bao operator init -key-shares=1 -key-threshold=1
+
```
+
+
This will print out an unseal key and a root key. Save them
+
somewhere (like a password manager). Then unseal the vault
+
to begin setting it up:
+
+
```bash
+
bao operator unseal <unseal_key>
+
```
+
+
All steps below remain the same across both dev and
+
production setups.
+
+
#### Configure openbao server
+
+
Create the spindle KV mount:
+
+
```bash
+
bao secrets enable -path=spindle -version=2 kv
+
```
+
+
Set up AppRole authentication and policy:
+
+
Create a policy file `spindle-policy.hcl`:
+
+
```hcl
+
# Full access to spindle KV v2 data
+
path "spindle/data/*" {
+
capabilities = ["create", "read", "update", "delete"]
+
}
+
+
# Access to metadata for listing and management
+
path "spindle/metadata/*" {
+
capabilities = ["list", "read", "delete", "update"]
+
}
+
+
# Allow listing at root level
+
path "spindle/" {
+
capabilities = ["list"]
+
}
+
+
# Required for connection testing and health checks
+
path "auth/token/lookup-self" {
+
capabilities = ["read"]
+
}
+
```
+
+
Apply the policy and create an AppRole:
+
+
```bash
+
bao policy write spindle-policy spindle-policy.hcl
+
bao auth enable approle
+
bao write auth/approle/role/spindle \
+
token_policies="spindle-policy" \
+
token_ttl=1h \
+
token_max_ttl=4h \
+
bind_secret_id=true \
+
secret_id_ttl=0 \
+
secret_id_num_uses=0
+
```
+
+
Get the credentials:
+
+
```bash
+
# Get role ID (static)
+
ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
+
+
# Generate secret ID
+
SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
+
+
echo "Role ID: $ROLE_ID"
+
echo "Secret ID: $SECRET_ID"
+
```
+
+
#### Create proxy configuration
+
+
Create the credential files:
+
+
```bash
+
# Create directory for OpenBao files
+
mkdir -p /tmp/openbao
+
+
# Save credentials
+
echo "$ROLE_ID" > /tmp/openbao/role-id
+
echo "$SECRET_ID" > /tmp/openbao/secret-id
+
chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
+
```
+
+
Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
+
+
```hcl
+
# OpenBao server connection
+
vault {
+
address = "http://localhost:8200"
+
}
+
+
# Auto-Auth using AppRole
+
auto_auth {
+
method "approle" {
+
mount_path = "auth/approle"
+
config = {
+
role_id_file_path = "/tmp/openbao/role-id"
+
secret_id_file_path = "/tmp/openbao/secret-id"
+
}
+
}
+
+
# Optional: write token to file for debugging
+
sink "file" {
+
config = {
+
path = "/tmp/openbao/token"
+
mode = 0640
+
}
+
}
+
}
+
+
# Proxy listener for Spindle
+
listener "tcp" {
+
address = "127.0.0.1:8201"
+
tls_disable = true
+
}
+
+
# Enable API proxy with auto-auth token
+
api_proxy {
+
use_auto_auth_token = true
+
}
+
+
# Enable response caching
+
cache {
+
use_auto_auth_token = true
+
}
+
+
# Logging
+
log_level = "info"
+
```
+
+
#### Start the proxy
+
+
Start OpenBao Proxy:
+
+
```bash
+
bao proxy -config=/tmp/openbao/proxy.hcl
+
```
+
+
The proxy will authenticate with OpenBao and start listening on
+
`127.0.0.1:8201`.
+
+
#### Configure spindle
+
+
Set these environment variables for Spindle:
+
+
```bash
+
export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
+
export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
+
export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
+
```
+
+
On startup, the spindle will now connect to the local proxy,
+
which handles all authentication automatically.
+
+
### Production setup for proxy
+
+
For production, you'll want to run the proxy as a service:
+
+
Place your production configuration in
+
`/etc/openbao/proxy.hcl` with proper TLS settings for the
+
vault connection.
+
+
### Verifying setup
+
+
Test the proxy directly:
+
+
```bash
+
# Check proxy health
+
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
+
+
# Test token lookup through proxy
+
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
+
```
+
+
Test OpenBao operations through the server:
+
+
```bash
+
# List all secrets
+
bao kv list spindle/
+
+
# Add a test secret via Spindle API, then check it exists
+
bao kv list spindle/repos/
+
+
# Get a specific secret
+
bao kv get spindle/repos/your_repo_path/SECRET_NAME
+
```
+
+
### How it works
+
+
- Spindle connects to OpenBao Proxy on localhost (typically
+
port 8200 or 8201)
+
- The proxy authenticates with OpenBao using AppRole
+
credentials
+
- All Spindle requests go through the proxy, which injects
+
authentication tokens
+
- Secrets are stored at
+
`spindle/repos/{sanitized_repo_path}/{secret_key}`
+
- Repository paths like `did:plc:alice/myrepo` become
+
`did_plc_alice_myrepo`
+
- The proxy handles all token renewal automatically
+
- Spindle no longer manages tokens or authentication
+
directly
+
+
### Troubleshooting
+
+
**Connection refused**: Check that the OpenBao Proxy is
+
running and listening on the configured address.
+
+
**403 errors**: Verify the AppRole credentials are correct
+
and the policy has the necessary permissions.
+
+
**404 route errors**: The spindle KV mount probably doesn't
+
exist - run the mount creation step again.
+
+
**Proxy authentication failures**: Check the proxy logs and
+
verify the role-id and secret-id files are readable and
+
contain valid credentials.
+
+
**Secret not found after writing**: This can indicate policy
+
permission issues. Verify the policy includes both
+
`spindle/data/*` and `spindle/metadata/*` paths with
+
appropriate capabilities.
+
+
Check proxy logs:
+
+
```bash
+
# If running as systemd service
+
journalctl -u openbao-proxy -f
+
+
# If running directly, check the console output
+
```
+
+
Test AppRole authentication manually:
+
+
```bash
+
bao write auth/approle/login \
+
role_id="$(cat /tmp/openbao/role-id)" \
+
secret_id="$(cat /tmp/openbao/secret-id)"
+
```
+
+
# Migrating knots & spindles
+
+
Sometimes, non-backwards compatible changes are made to the
+
knot/spindle XRPC APIs. If you host a knot or a spindle, you
+
will need to follow this guide to upgrade. Typically, this
+
only requires you to deploy the newest version.
+
+
This document is laid out in reverse-chronological order.
+
Newer migration guides are listed first, and older guides
+
are further down the page.
+
+
## Upgrading from v1.8.x
+
+
After v1.8.2, the HTTP API for knot and spindles have been
+
deprecated and replaced with XRPC. Repositories on outdated
+
knots will not be viewable from the appview. Upgrading is
+
straightforward however.
+
+
For knots:
+
+
- Upgrade to latest tag (v1.9.0 or above)
+
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
+
hit the "retry" button to verify your knot
+
+
For spindles:
+
+
- Upgrade to latest tag (v1.9.0 or above)
+
- Head to the [spindle
+
dashboard](https://tangled.org/settings/spindles) and hit the
+
"retry" button to verify your spindle
+
+
## Upgrading from v1.7.x
+
+
After v1.7.0, knot secrets have been deprecated. You no
+
longer need a secret from the appview to run a knot. All
+
authorized commands to knots are managed via [Inter-Service
+
Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
+
Knots will be read-only until upgraded.
+
+
Upgrading is quite easy, in essence:
+
+
- `KNOT_SERVER_SECRET` is no more, you can remove this
+
environment variable entirely
+
- `KNOT_SERVER_OWNER` is now required on boot, set this to
+
your DID. You can find your DID in the
+
[settings](https://tangled.org/settings) page.
+
- Restart your knot once you have replaced the environment
+
variable
+
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
+
hit the "retry" button to verify your knot. This simply
+
writes a `sh.tangled.knot` record to your PDS.
+
+
If you use the nix module, simply bump the flake to the
+
latest revision, and change your config block like so:
+
+
```diff
+
services.tangled.knot = {
+
enable = true;
+
server = {
+
- secretFile = /path/to/secret;
+
+ owner = "did:plc:foo";
+
};
+
};
+
```
+
+
# Hacking on Tangled
+
+
We highly recommend [installing
+
nix](https://nixos.org/download/) (the package manager)
+
before working on the codebase. The nix flake provides a lot
+
of helpers to get started and most importantly, builds and
+
dev shells are entirely deterministic.
+
+
To set up your dev environment:
+
+
```bash
+
nix develop
+
```
+
+
Non-nix users can look at the `devShell` attribute in the
+
`flake.nix` file to determine necessary dependencies.
+
+
## Running the appview
+
+
The nix flake also exposes a few `app` attributes (run `nix
+
flake show` to see a full list of what the flake provides),
+
one of the apps runs the appview with the `air`
+
live-reloader:
+
+
```bash
+
TANGLED_DEV=true nix run .#watch-appview
+
+
# TANGLED_DB_PATH might be of interest to point to
+
# different sqlite DBs
+
+
# in a separate shell, you can live-reload tailwind
+
nix run .#watch-tailwind
+
```
+
+
To authenticate with the appview, you will need redis and
+
OAUTH JWKs to be setup:
+
+
```
+
# oauth jwks should already be setup by the nix devshell:
+
echo $TANGLED_OAUTH_CLIENT_SECRET
+
z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
+
+
echo $TANGLED_OAUTH_CLIENT_KID
+
1761667908
+
+
# if not, you can set it up yourself:
+
goat key generate -t P-256
+
Key Type: P-256 / secp256r1 / ES256 private key
+
Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
+
z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
+
Public Key (DID Key Syntax): share or publish this (eg, in DID document)
+
did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
+
+
# the secret key from above
+
export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
+
+
# run redis in at a new shell to store oauth sessions
+
redis-server
+
```
+
+
## Running knots and spindles
+
+
An end-to-end knot setup requires setting up a machine with
+
`sshd`, `AuthorizedKeysCommand`, and git user, which is
+
quite cumbersome. So the nix flake provides a
+
`nixosConfiguration` to do so.
+
+
<details>
+
<summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
+
+
In order to build Tangled's dev VM on macOS, you will
+
first need to set up a Linux Nix builder. The recommended
+
way to do so is to run a [`darwin.linux-builder`
+
VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
+
and to register it in `nix.conf` as a builder for Linux
+
with the same architecture as your Mac (`linux-aarch64` if
+
you are using Apple Silicon).
+
+
> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
+
> the tangled repo so that it doesn't conflict with the other VM. For example,
+
> you can do
+
>
+
> ```shell
+
> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
+
> ```
+
>
+
> to store the builder VM in a temporary dir.
+
>
+
> You should read and follow [all the other intructions][darwin builder vm] to
+
> avoid subtle problems.
+
+
Alternatively, you can use any other method to set up a
+
Linux machine with `nix` installed that you can `sudo ssh`
+
into (in other words, root user on your Mac has to be able
+
to ssh into the Linux machine without entering a password)
+
and that has the same architecture as your Mac. See
+
[remote builder
+
instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
+
for how to register such a builder in `nix.conf`.
+
+
> WARNING: If you'd like to use
+
> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
+
> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
+
> ssh` works can be tricky. It seems to be [possible with
+
> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
+
+
</details>
+
+
To begin, grab your DID from http://localhost:3000/settings.
+
Then, set `TANGLED_VM_KNOT_OWNER` and
+
`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
+
lightweight NixOS VM like so:
+
+
```bash
+
nix run --impure .#vm
+
+
# type `poweroff` at the shell to exit the VM
+
```
+
+
This starts a knot on port 6444, a spindle on port 6555
+
with `ssh` exposed on port 2222.
+
+
Once the services are running, head to
+
http://localhost:3000/settings/knots and hit verify. It should
+
verify the ownership of the services instantly if everything
+
went smoothly.
+
+
You can push repositories to this VM with this ssh config
+
block on your main machine:
+
+
```bash
+
Host nixos-shell
+
Hostname localhost
+
Port 2222
+
User git
+
IdentityFile ~/.ssh/my_tangled_key
+
```
+
+
Set up a remote called `local-dev` on a git repo:
+
+
```bash
+
git remote add local-dev git@nixos-shell:user/repo
+
git push local-dev main
+
```
+
+
The above VM should already be running a spindle on
+
`localhost:6555`. Head to http://localhost:3000/settings/spindles and
+
hit verify. You can then configure each repository to use
+
this spindle and run CI jobs.
+
+
Of interest when debugging spindles:
+
+
```
+
# service logs from journald:
+
journalctl -xeu spindle
+
+
# CI job logs from disk:
+
ls /var/log/spindle
+
+
# debugging spindle db:
+
sqlite3 /var/lib/spindle/spindle.db
+
+
# litecli has a nicer REPL interface:
+
litecli /var/lib/spindle/spindle.db
+
```
+
+
If for any reason you wish to disable either one of the
+
services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
+
`services.tangled.spindle.enable` (or
+
`services.tangled.knot.enable`) to `false`.
+
+
# Contribution guide
+
+
## Commit guidelines
+
+
We follow a commit style similar to the Go project. Please keep commits:
+
+
* **atomic**: each commit should represent one logical change
+
* **descriptive**: the commit message should clearly describe what the
+
change does and why it's needed
+
+
### Message format
+
+
```
+
<service/top-level directory>/<affected package/directory>: <short summary of change>
+
+
Optional longer description can go here, if necessary. Explain what the
+
change does and why, especially if not obvious. Reference relevant
+
issues or PRs when applicable. These can be links for now since we don't
+
auto-link issues/PRs yet.
+
```
+
+
Here are some examples:
+
+
```
+
appview/state: fix token expiry check in middleware
+
+
The previous check did not account for clock drift, leading to premature
+
token invalidation.
+
```
+
+
```
+
knotserver/git/service: improve error checking in upload-pack
+
```
+
+
+
### General notes
+
+
- PRs get merged "as-is" (fast-forward) -- like applying a patch-series
+
using `git am`. At present, there is no squashing -- so please author
+
your commits as they would appear on `master`, following the above
+
guidelines.
+
- If there is a lot of nesting, for example "appview:
+
pages/templates/repo/fragments: ...", these can be truncated down to
+
just "appview: repo/fragments: ...". If the change affects a lot of
+
subdirectories, you may abbreviate to just the top-level names, e.g.
+
"appview: ..." or "knotserver: ...".
+
- Keep commits lowercased with no trailing period.
+
- Use the imperative mood in the summary line (e.g., "fix bug" not
+
"fixed bug" or "fixes bug").
+
- Try to keep the summary line under 72 characters, but we aren't too
+
fussed about this.
+
- Follow the same formatting for PR titles if filled manually.
+
- Don't include unrelated changes in the same commit.
+
- Avoid noisy commit messages like "wip" or "final fix"—rewrite history
+
before submitting if necessary.
+
+
## Code formatting
+
+
We use a variety of tools to format our code, and multiplex them with
+
[`treefmt`](https://treefmt.com): all you need to do to format your changes
+
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
+
+
## Proposals for bigger changes
+
+
Small fixes like typos, minor bugs, or trivial refactors can be
+
submitted directly as PRs.
+
+
For larger changes—especially those introducing new features, significant
+
refactoring, or altering system behavior—please open a proposal first. This
+
helps us evaluate the scope, design, and potential impact before implementation.
+
+
Create a new issue titled:
+
+
```
+
proposal: <affected scope>: <summary of change>
+
```
+
+
In the description, explain:
+
+
- What the change is
+
- Why it's needed
+
- How you plan to implement it (roughly)
+
- Any open questions or tradeoffs
+
+
We'll use the issue thread to discuss and refine the idea before moving
+
forward.
+
+
## Developer certificate of origin (DCO)
+
+
We require all contributors to certify that they have the right to
+
submit the code they're contributing. To do this, we follow the
+
[Developer Certificate of Origin
+
(DCO)](https://developercertificate.org/).
+
+
By signing your commits, you're stating that the contribution is your
+
own work, or that you have the right to submit it under the project's
+
license. This helps us keep things clean and legally sound.
+
+
To sign your commit, just add the `-s` flag when committing:
+
+
```sh
+
git commit -s -m "your commit message"
+
```
+
+
This appends a line like:
+
+
```
+
Signed-off-by: Your Name <your.email@example.com>
+
```
+
+
We won't merge commits if they aren't signed off. If you forget, you can
+
amend the last commit like this:
+
+
```sh
+
git commit --amend -s
+
```
+
+
If you're submitting a PR with multiple commits, make sure each one is
+
signed.
+
+
For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
+
to make it sign off commits in the tangled repo:
+
+
```shell
+
# Safety check, should say "No matching config key..."
+
jj config list templates.commit_trailers
+
# The command below may need to be adjusted if the command above returned something.
+
jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
+
```
+
+
Refer to the [jujutsu
+
documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
+
for more information.
-136
docs/contributing.md
···
-
# tangled contributing guide
-
-
## commit guidelines
-
-
We follow a commit style similar to the Go project. Please keep commits:
-
-
* **atomic**: each commit should represent one logical change
-
* **descriptive**: the commit message should clearly describe what the
-
change does and why it's needed
-
-
### message format
-
-
```
-
<service/top-level directory>/<affected package/directory>: <short summary of change>
-
-
-
Optional longer description can go here, if necessary. Explain what the
-
change does and why, especially if not obvious. Reference relevant
-
issues or PRs when applicable. These can be links for now since we don't
-
auto-link issues/PRs yet.
-
```
-
-
Here are some examples:
-
-
```
-
appview/state: fix token expiry check in middleware
-
-
The previous check did not account for clock drift, leading to premature
-
token invalidation.
-
```
-
-
```
-
knotserver/git/service: improve error checking in upload-pack
-
```
-
-
-
### general notes
-
-
- PRs get merged "as-is" (fast-forward) -- like applying a patch-series
-
using `git am`. At present, there is no squashing -- so please author
-
your commits as they would appear on `master`, following the above
-
guidelines.
-
- If there is a lot of nesting, for example "appview:
-
pages/templates/repo/fragments: ...", these can be truncated down to
-
just "appview: repo/fragments: ...". If the change affects a lot of
-
subdirectories, you may abbreviate to just the top-level names, e.g.
-
"appview: ..." or "knotserver: ...".
-
- Keep commits lowercased with no trailing period.
-
- Use the imperative mood in the summary line (e.g., "fix bug" not
-
"fixed bug" or "fixes bug").
-
- Try to keep the summary line under 72 characters, but we aren't too
-
fussed about this.
-
- Follow the same formatting for PR titles if filled manually.
-
- Don't include unrelated changes in the same commit.
-
- Avoid noisy commit messages like "wip" or "final fix"—rewrite history
-
before submitting if necessary.
-
-
## code formatting
-
-
We use a variety of tools to format our code, and multiplex them with
-
[`treefmt`](https://treefmt.com): all you need to do to format your changes
-
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
-
-
## proposals for bigger changes
-
-
Small fixes like typos, minor bugs, or trivial refactors can be
-
submitted directly as PRs.
-
-
For larger changes—especially those introducing new features, significant
-
refactoring, or altering system behavior—please open a proposal first. This
-
helps us evaluate the scope, design, and potential impact before implementation.
-
-
### proposal format
-
-
Create a new issue titled:
-
-
```
-
proposal: <affected scope>: <summary of change>
-
```
-
-
In the description, explain:
-
-
- What the change is
-
- Why it's needed
-
- How you plan to implement it (roughly)
-
- Any open questions or tradeoffs
-
-
We'll use the issue thread to discuss and refine the idea before moving
-
forward.
-
-
## developer certificate of origin (DCO)
-
-
We require all contributors to certify that they have the right to
-
submit the code they're contributing. To do this, we follow the
-
[Developer Certificate of Origin
-
(DCO)](https://developercertificate.org/).
-
-
By signing your commits, you're stating that the contribution is your
-
own work, or that you have the right to submit it under the project's
-
license. This helps us keep things clean and legally sound.
-
-
To sign your commit, just add the `-s` flag when committing:
-
-
```sh
-
git commit -s -m "your commit message"
-
```
-
-
This appends a line like:
-
-
```
-
Signed-off-by: Your Name <your.email@example.com>
-
```
-
-
We won't merge commits if they aren't signed off. If you forget, you can
-
amend the last commit like this:
-
-
```sh
-
git commit --amend -s
-
```
-
-
If you're submitting a PR with multiple commits, make sure each one is
-
signed.
-
-
For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
-
to make it sign off commits in the tangled repo:
-
-
```shell
-
# Safety check, should say "No matching config key..."
-
jj config list templates.commit_trailers
-
# The command below may need to be adjusted if the command above returned something.
-
jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
-
```
-
-
Refer to the [jj
-
documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
-
for more information.
-172
docs/hacking.md
···
-
# hacking on tangled
-
-
We highly recommend [installing
-
nix](https://nixos.org/download/) (the package manager)
-
before working on the codebase. The nix flake provides a lot
-
of helpers to get started and most importantly, builds and
-
dev shells are entirely deterministic.
-
-
To set up your dev environment:
-
-
```bash
-
nix develop
-
```
-
-
Non-nix users can look at the `devShell` attribute in the
-
`flake.nix` file to determine necessary dependencies.
-
-
## running the appview
-
-
The nix flake also exposes a few `app` attributes (run `nix
-
flake show` to see a full list of what the flake provides),
-
one of the apps runs the appview with the `air`
-
live-reloader:
-
-
```bash
-
TANGLED_DEV=true nix run .#watch-appview
-
-
# TANGLED_DB_PATH might be of interest to point to
-
# different sqlite DBs
-
-
# in a separate shell, you can live-reload tailwind
-
nix run .#watch-tailwind
-
```
-
-
To authenticate with the appview, you will need redis and
-
OAUTH JWKs to be setup:
-
-
```
-
# oauth jwks should already be setup by the nix devshell:
-
echo $TANGLED_OAUTH_CLIENT_SECRET
-
z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
-
-
echo $TANGLED_OAUTH_CLIENT_KID
-
1761667908
-
-
# if not, you can set it up yourself:
-
goat key generate -t P-256
-
Key Type: P-256 / secp256r1 / ES256 private key
-
Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
-
z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
-
Public Key (DID Key Syntax): share or publish this (eg, in DID document)
-
did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
-
-
# the secret key from above
-
export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
-
-
# run redis in at a new shell to store oauth sessions
-
redis-server
-
```
-
-
## running knots and spindles
-
-
An end-to-end knot setup requires setting up a machine with
-
`sshd`, `AuthorizedKeysCommand`, and git user, which is
-
quite cumbersome. So the nix flake provides a
-
`nixosConfiguration` to do so.
-
-
<details>
-
<summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
-
-
In order to build Tangled's dev VM on macOS, you will
-
first need to set up a Linux Nix builder. The recommended
-
way to do so is to run a [`darwin.linux-builder`
-
VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
-
and to register it in `nix.conf` as a builder for Linux
-
with the same architecture as your Mac (`linux-aarch64` if
-
you are using Apple Silicon).
-
-
> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
-
> the tangled repo so that it doesn't conflict with the other VM. For example,
-
> you can do
-
>
-
> ```shell
-
> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
-
> ```
-
>
-
> to store the builder VM in a temporary dir.
-
>
-
> You should read and follow [all the other intructions][darwin builder vm] to
-
> avoid subtle problems.
-
-
Alternatively, you can use any other method to set up a
-
Linux machine with `nix` installed that you can `sudo ssh`
-
into (in other words, root user on your Mac has to be able
-
to ssh into the Linux machine without entering a password)
-
and that has the same architecture as your Mac. See
-
[remote builder
-
instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
-
for how to register such a builder in `nix.conf`.
-
-
> WARNING: If you'd like to use
-
> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
-
> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
-
> ssh` works can be tricky. It seems to be [possible with
-
> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
-
-
</details>
-
-
To begin, grab your DID from http://localhost:3000/settings.
-
Then, set `TANGLED_VM_KNOT_OWNER` and
-
`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
-
lightweight NixOS VM like so:
-
-
```bash
-
nix run --impure .#vm
-
-
# type `poweroff` at the shell to exit the VM
-
```
-
-
This starts a knot on port 6000, a spindle on port 6555
-
with `ssh` exposed on port 2222.
-
-
Once the services are running, head to
-
http://localhost:3000/knots and hit verify. It should
-
verify the ownership of the services instantly if everything
-
went smoothly.
-
-
You can push repositories to this VM with this ssh config
-
block on your main machine:
-
-
```bash
-
Host nixos-shell
-
Hostname localhost
-
Port 2222
-
User git
-
IdentityFile ~/.ssh/my_tangled_key
-
```
-
-
Set up a remote called `local-dev` on a git repo:
-
-
```bash
-
git remote add local-dev git@nixos-shell:user/repo
-
git push local-dev main
-
```
-
-
### running a spindle
-
-
The above VM should already be running a spindle on
-
`localhost:6555`. Head to http://localhost:3000/spindles and
-
hit verify. You can then configure each repository to use
-
this spindle and run CI jobs.
-
-
Of interest when debugging spindles:
-
-
```
-
# service logs from journald:
-
journalctl -xeu spindle
-
-
# CI job logs from disk:
-
ls /var/log/spindle
-
-
# debugging spindle db:
-
sqlite3 /var/lib/spindle/spindle.db
-
-
# litecli has a nicer REPL interface:
-
litecli /var/lib/spindle/spindle.db
-
```
-
-
If for any reason you wish to disable either one of the
-
services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
-
`services.tangled.spindle.enable` (or
-
`services.tangled.knot.enable`) to `false`.
+93
docs/highlight.theme
···
+
{
+
"text-color": null,
+
"background-color": null,
+
"line-number-color": null,
+
"line-number-background-color": null,
+
"text-styles": {
+
"Annotation": {
+
"text-color": null,
+
"background-color": null,
+
"bold": false,
+
"italic": true,
+
"underline": false
+
},
+
"ControlFlow": {
+
"text-color": null,
+
"background-color": null,
+
"bold": true,
+
"italic": false,
+
"underline": false
+
},
+
"Error": {
+
"text-color": null,
+
"background-color": null,
+
"bold": true,
+
"italic": false,
+
"underline": false
+
},
+
"Alert": {
+
"text-color": null,
+
"background-color": null,
+
"bold": true,
+
"italic": false,
+
"underline": false
+
},
+
"Preprocessor": {
+
"text-color": null,
+
"background-color": null,
+
"bold": true,
+
"italic": false,
+
"underline": false
+
},
+
"Information": {
+
"text-color": null,
+
"background-color": null,
+
"bold": false,
+
"italic": true,
+
"underline": false
+
},
+
"Warning": {
+
"text-color": null,
+
"background-color": null,
+
"bold": false,
+
"italic": true,
+
"underline": false
+
},
+
"Documentation": {
+
"text-color": null,
+
"background-color": null,
+
"bold": false,
+
"italic": true,
+
"underline": false
+
},
+
"DataType": {
+
"text-color": "#8f4e8b",
+
"background-color": null,
+
"bold": false,
+
"italic": false,
+
"underline": false
+
},
+
"Comment": {
+
"text-color": null,
+
"background-color": null,
+
"bold": false,
+
"italic": true,
+
"underline": false
+
},
+
"CommentVar": {
+
"text-color": null,
+
"background-color": null,
+
"bold": false,
+
"italic": true,
+
"underline": false
+
},
+
"Keyword": {
+
"text-color": null,
+
"background-color": null,
+
"bold": true,
+
"italic": false,
+
"underline": false
+
}
+
}
+
}
+
-214
docs/knot-hosting.md
···
-
# knot self-hosting guide
-
-
So you want to run your own knot server? Great! Here are a few prerequisites:
-
-
1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
-
2. A (sub)domain name. People generally use `knot.example.com`.
-
3. A valid SSL certificate for your domain.
-
-
There's a couple of ways to get started:
-
* NixOS: refer to
-
[flake.nix](https://tangled.sh/@tangled.sh/core/blob/master/flake.nix)
-
* Docker: Documented at
-
[@tangled.sh/knot-docker](https://tangled.sh/@tangled.sh/knot-docker)
-
(community maintained: support is not guaranteed!)
-
* Manual: Documented below.
-
-
## manual setup
-
-
First, clone this repository:
-
-
```
-
git clone https://tangled.org/@tangled.org/core
-
```
-
-
Then, build the `knot` CLI. This is the knot administration and operation tool.
-
For the purpose of this guide, we're only concerned with these subcommands:
-
-
* `knot server`: the main knot server process, typically run as a
-
supervised service
-
* `knot guard`: handles role-based access control for git over SSH
-
(you'll never have to run this yourself)
-
* `knot keys`: fetches SSH keys associated with your knot; we'll use
-
this to generate the SSH `AuthorizedKeysCommand`
-
-
```
-
cd core
-
export CGO_ENABLED=1
-
go build -o knot ./cmd/knot
-
```
-
-
Next, move the `knot` binary to a location owned by `root` --
-
`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
-
-
```
-
sudo mv knot /usr/local/bin/knot
-
sudo chown root:root /usr/local/bin/knot
-
```
-
-
This is necessary because SSH `AuthorizedKeysCommand` requires [really
-
specific permissions](https://stackoverflow.com/a/27638306). The
-
`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
-
retrieve a user's public SSH keys dynamically for authentication. Let's
-
set that up.
-
-
```
-
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
-
Match User git
-
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
-
AuthorizedKeysCommandUser nobody
-
EOF
-
```
-
-
Then, reload `sshd`:
-
-
```
-
sudo systemctl reload ssh
-
```
-
-
Next, create the `git` user. We'll use the `git` user's home directory
-
to store repositories:
-
-
```
-
sudo adduser git
-
```
-
-
Create `/home/git/.knot.env` with the following, updating the values as
-
necessary. The `KNOT_SERVER_OWNER` should be set to your
-
DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
-
-
```
-
KNOT_REPO_SCAN_PATH=/home/git
-
KNOT_SERVER_HOSTNAME=knot.example.com
-
APPVIEW_ENDPOINT=https://tangled.sh
-
KNOT_SERVER_OWNER=did:plc:foobar
-
KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
-
KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
-
```
-
-
If you run a Linux distribution that uses systemd, you can use the provided
-
service file to run the server. Copy
-
[`knotserver.service`](/systemd/knotserver.service)
-
to `/etc/systemd/system/`. Then, run:
-
-
```
-
systemctl enable knotserver
-
systemctl start knotserver
-
```
-
-
The last step is to configure a reverse proxy like Nginx or Caddy to front your
-
knot. Here's an example configuration for Nginx:
-
-
```
-
server {
-
listen 80;
-
listen [::]:80;
-
server_name knot.example.com;
-
-
location / {
-
proxy_pass http://localhost:5555;
-
proxy_set_header Host $host;
-
proxy_set_header X-Real-IP $remote_addr;
-
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
-
proxy_set_header X-Forwarded-Proto $scheme;
-
}
-
-
# wss endpoint for git events
-
location /events {
-
proxy_set_header X-Forwarded-For $remote_addr;
-
proxy_set_header Host $http_host;
-
proxy_set_header Upgrade websocket;
-
proxy_set_header Connection Upgrade;
-
proxy_pass http://localhost:5555;
-
}
-
# additional config for SSL/TLS go here.
-
}
-
-
```
-
-
Remember to use Let's Encrypt or similar to procure a certificate for your
-
knot domain.
-
-
You should now have a running knot server! You can finalize
-
your registration by hitting the `verify` button on the
-
[/knots](https://tangled.org/knots) page. This simply creates
-
a record on your PDS to announce the existence of the knot.
-
-
### custom paths
-
-
(This section applies to manual setup only. Docker users should edit the mounts
-
in `docker-compose.yml` instead.)
-
-
Right now, the database and repositories of your knot lives in `/home/git`. You
-
can move these paths if you'd like to store them in another folder. Be careful
-
when adjusting these paths:
-
-
* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
-
any possible side effects. Remember to restart it once you're done.
-
* Make backups before moving in case something goes wrong.
-
* Make sure the `git` user can read and write from the new paths.
-
-
#### database
-
-
As an example, let's say the current database is at `/home/git/knotserver.db`,
-
and we want to move it to `/home/git/database/knotserver.db`.
-
-
Copy the current database to the new location. Make sure to copy the `.db-shm`
-
and `.db-wal` files if they exist.
-
-
```
-
mkdir /home/git/database
-
cp /home/git/knotserver.db* /home/git/database
-
```
-
-
In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
-
the new file path (_not_ the directory):
-
-
```
-
KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
-
```
-
-
#### repositories
-
-
As an example, let's say the repositories are currently in `/home/git`, and we
-
want to move them into `/home/git/repositories`.
-
-
Create the new folder, then move the existing repositories (if there are any):
-
-
```
-
mkdir /home/git/repositories
-
# move all DIDs into the new folder; these will vary for you!
-
mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
-
```
-
-
In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
-
to the new directory:
-
-
```
-
KNOT_REPO_SCAN_PATH=/home/git/repositories
-
```
-
-
Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
-
repository path:
-
-
```
-
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
-
Match User git
-
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
-
AuthorizedKeysCommandUser nobody
-
EOF
-
```
-
-
Make sure to restart your SSH server!
-
-
#### MOTD (message of the day)
-
-
To configure the MOTD used ("Welcome to this knot!" by default), edit the
-
`/home/git/motd` file:
-
-
```
-
printf "Hi from this knot!\n" > /home/git/motd
-
```
-
-
Note that you should add a newline at the end if setting a non-empty message
-
since the knot won't do this for you.
-59
docs/migrations.md
···
-
# Migrations
-
-
This document is laid out in reverse-chronological order.
-
Newer migration guides are listed first, and older guides
-
are further down the page.
-
-
## Upgrading from v1.8.x
-
-
After v1.8.2, the HTTP API for knot and spindles have been
-
deprecated and replaced with XRPC. Repositories on outdated
-
knots will not be viewable from the appview. Upgrading is
-
straightforward however.
-
-
For knots:
-
-
- Upgrade to latest tag (v1.9.0 or above)
-
- Head to the [knot dashboard](https://tangled.org/knots) and
-
hit the "retry" button to verify your knot
-
-
For spindles:
-
-
- Upgrade to latest tag (v1.9.0 or above)
-
- Head to the [spindle
-
dashboard](https://tangled.org/spindles) and hit the
-
"retry" button to verify your spindle
-
-
## Upgrading from v1.7.x
-
-
After v1.7.0, knot secrets have been deprecated. You no
-
longer need a secret from the appview to run a knot. All
-
authorized commands to knots are managed via [Inter-Service
-
Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
-
Knots will be read-only until upgraded.
-
-
Upgrading is quite easy, in essence:
-
-
- `KNOT_SERVER_SECRET` is no more, you can remove this
-
environment variable entirely
-
- `KNOT_SERVER_OWNER` is now required on boot, set this to
-
your DID. You can find your DID in the
-
[settings](https://tangled.org/settings) page.
-
- Restart your knot once you have replaced the environment
-
variable
-
- Head to the [knot dashboard](https://tangled.org/knots) and
-
hit the "retry" button to verify your knot. This simply
-
writes a `sh.tangled.knot` record to your PDS.
-
-
If you use the nix module, simply bump the flake to the
-
latest revision, and change your config block like so:
-
-
```diff
-
services.tangled.knot = {
-
enable = true;
-
server = {
-
- secretFile = /path/to/secret;
-
+ owner = "did:plc:foo";
-
};
-
};
-
```
-25
docs/spindle/architecture.md
···
-
# spindle architecture
-
-
Spindle is a small CI runner service. Here's a high level overview of how it operates:
-
-
* listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
-
[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
-
* when a new repo record comes through (typically when you add a spindle to a
-
repo from the settings), spindle then resolves the underlying knot and
-
subscribes to repo events (see:
-
[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
-
* the spindle engine then handles execution of the pipeline, with results and
-
logs beamed on the spindle event stream over wss
-
-
### the engine
-
-
At present, the only supported backend is Docker (and Podman, if Docker
-
compatibility is enabled, so that `/run/docker.sock` is created). Spindle
-
executes each step in the pipeline in a fresh container, with state persisted
-
across steps within the `/tangled/workspace` directory.
-
-
The base image for the container is constructed on the fly using
-
[Nixery](https://nixery.dev), which is handy for caching layers for frequently
-
used packages.
-
-
The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
-52
docs/spindle/hosting.md
···
-
# spindle self-hosting guide
-
-
## prerequisites
-
-
* Go
-
* Docker (the only supported backend currently)
-
-
## configuration
-
-
Spindle is configured using environment variables. The following environment variables are available:
-
-
* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
-
* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
-
* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
-
* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
-
* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
-
* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
-
* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
-
* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
-
* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
-
-
## running spindle
-
-
1. **Set the environment variables.** For example:
-
-
```shell
-
export SPINDLE_SERVER_HOSTNAME="your-hostname"
-
export SPINDLE_SERVER_OWNER="your-did"
-
```
-
-
2. **Build the Spindle binary.**
-
-
```shell
-
cd core
-
go mod download
-
go build -o cmd/spindle/spindle cmd/spindle/main.go
-
```
-
-
3. **Create the log directory.**
-
-
```shell
-
sudo mkdir -p /var/log/spindle
-
sudo chown $USER:$USER -R /var/log/spindle
-
```
-
-
4. **Run the Spindle binary.**
-
-
```shell
-
./cmd/spindle/spindle
-
```
-
-
Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
-285
docs/spindle/openbao.md
···
-
# spindle secrets with openbao
-
-
This document covers setting up Spindle to use OpenBao for secrets
-
management via OpenBao Proxy instead of the default SQLite backend.
-
-
## overview
-
-
Spindle now uses OpenBao Proxy for secrets management. The proxy handles
-
authentication automatically using AppRole credentials, while Spindle
-
connects to the local proxy instead of directly to the OpenBao server.
-
-
This approach provides better security, automatic token renewal, and
-
simplified application code.
-
-
## installation
-
-
Install OpenBao from nixpkgs:
-
-
```bash
-
nix shell nixpkgs#openbao # for a local server
-
```
-
-
## setup
-
-
The setup process can is documented for both local development and production.
-
-
### local development
-
-
Start OpenBao in dev mode:
-
-
```bash
-
bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
-
```
-
-
This starts OpenBao on `http://localhost:8201` with a root token.
-
-
Set up environment for bao CLI:
-
-
```bash
-
export BAO_ADDR=http://localhost:8200
-
export BAO_TOKEN=root
-
```
-
-
### production
-
-
You would typically use a systemd service with a configuration file. Refer to
-
[@tangled.org/infra](https://tangled.org/@tangled.org/infra) for how this can be
-
achieved using Nix.
-
-
Then, initialize the bao server:
-
```bash
-
bao operator init -key-shares=1 -key-threshold=1
-
```
-
-
This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up:
-
```bash
-
bao operator unseal <unseal_key>
-
```
-
-
All steps below remain the same across both dev and production setups.
-
-
### configure openbao server
-
-
Create the spindle KV mount:
-
-
```bash
-
bao secrets enable -path=spindle -version=2 kv
-
```
-
-
Set up AppRole authentication and policy:
-
-
Create a policy file `spindle-policy.hcl`:
-
-
```hcl
-
# Full access to spindle KV v2 data
-
path "spindle/data/*" {
-
capabilities = ["create", "read", "update", "delete"]
-
}
-
-
# Access to metadata for listing and management
-
path "spindle/metadata/*" {
-
capabilities = ["list", "read", "delete", "update"]
-
}
-
-
# Allow listing at root level
-
path "spindle/" {
-
capabilities = ["list"]
-
}
-
-
# Required for connection testing and health checks
-
path "auth/token/lookup-self" {
-
capabilities = ["read"]
-
}
-
```
-
-
Apply the policy and create an AppRole:
-
-
```bash
-
bao policy write spindle-policy spindle-policy.hcl
-
bao auth enable approle
-
bao write auth/approle/role/spindle \
-
token_policies="spindle-policy" \
-
token_ttl=1h \
-
token_max_ttl=4h \
-
bind_secret_id=true \
-
secret_id_ttl=0 \
-
secret_id_num_uses=0
-
```
-
-
Get the credentials:
-
-
```bash
-
# Get role ID (static)
-
ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
-
-
# Generate secret ID
-
SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
-
-
echo "Role ID: $ROLE_ID"
-
echo "Secret ID: $SECRET_ID"
-
```
-
-
### create proxy configuration
-
-
Create the credential files:
-
-
```bash
-
# Create directory for OpenBao files
-
mkdir -p /tmp/openbao
-
-
# Save credentials
-
echo "$ROLE_ID" > /tmp/openbao/role-id
-
echo "$SECRET_ID" > /tmp/openbao/secret-id
-
chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
-
```
-
-
Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
-
-
```hcl
-
# OpenBao server connection
-
vault {
-
address = "http://localhost:8200"
-
}
-
-
# Auto-Auth using AppRole
-
auto_auth {
-
method "approle" {
-
mount_path = "auth/approle"
-
config = {
-
role_id_file_path = "/tmp/openbao/role-id"
-
secret_id_file_path = "/tmp/openbao/secret-id"
-
}
-
}
-
-
# Optional: write token to file for debugging
-
sink "file" {
-
config = {
-
path = "/tmp/openbao/token"
-
mode = 0640
-
}
-
}
-
}
-
-
# Proxy listener for Spindle
-
listener "tcp" {
-
address = "127.0.0.1:8201"
-
tls_disable = true
-
}
-
-
# Enable API proxy with auto-auth token
-
api_proxy {
-
use_auto_auth_token = true
-
}
-
-
# Enable response caching
-
cache {
-
use_auto_auth_token = true
-
}
-
-
# Logging
-
log_level = "info"
-
```
-
-
### start the proxy
-
-
Start OpenBao Proxy:
-
-
```bash
-
bao proxy -config=/tmp/openbao/proxy.hcl
-
```
-
-
The proxy will authenticate with OpenBao and start listening on
-
`127.0.0.1:8201`.
-
-
### configure spindle
-
-
Set these environment variables for Spindle:
-
-
```bash
-
export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
-
export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
-
export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
-
```
-
-
Start Spindle:
-
-
Spindle will now connect to the local proxy, which handles all
-
authentication automatically.
-
-
## production setup for proxy
-
-
For production, you'll want to run the proxy as a service:
-
-
Place your production configuration in `/etc/openbao/proxy.hcl` with
-
proper TLS settings for the vault connection.
-
-
## verifying setup
-
-
Test the proxy directly:
-
-
```bash
-
# Check proxy health
-
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
-
-
# Test token lookup through proxy
-
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
-
```
-
-
Test OpenBao operations through the server:
-
-
```bash
-
# List all secrets
-
bao kv list spindle/
-
-
# Add a test secret via Spindle API, then check it exists
-
bao kv list spindle/repos/
-
-
# Get a specific secret
-
bao kv get spindle/repos/your_repo_path/SECRET_NAME
-
```
-
-
## how it works
-
-
- Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201)
-
- The proxy authenticates with OpenBao using AppRole credentials
-
- All Spindle requests go through the proxy, which injects authentication tokens
-
- Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}`
-
- Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo`
-
- The proxy handles all token renewal automatically
-
- Spindle no longer manages tokens or authentication directly
-
-
## troubleshooting
-
-
**Connection refused**: Check that the OpenBao Proxy is running and
-
listening on the configured address.
-
-
**403 errors**: Verify the AppRole credentials are correct and the policy
-
has the necessary permissions.
-
-
**404 route errors**: The spindle KV mount probably doesn't exist - run
-
the mount creation step again.
-
-
**Proxy authentication failures**: Check the proxy logs and verify the
-
role-id and secret-id files are readable and contain valid credentials.
-
-
**Secret not found after writing**: This can indicate policy permission
-
issues. Verify the policy includes both `spindle/data/*` and
-
`spindle/metadata/*` paths with appropriate capabilities.
-
-
Check proxy logs:
-
-
```bash
-
# If running as systemd service
-
journalctl -u openbao-proxy -f
-
-
# If running directly, check the console output
-
```
-
-
Test AppRole authentication manually:
-
-
```bash
-
bao write auth/approle/login \
-
role_id="$(cat /tmp/openbao/role-id)" \
-
secret_id="$(cat /tmp/openbao/secret-id)"
-
```
-183
docs/spindle/pipeline.md
···
-
# spindle pipelines
-
-
Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML.
-
-
The fields are:
-
-
- [Trigger](#trigger): A **required** field that defines when a workflow should be triggered.
-
- [Engine](#engine): A **required** field that defines which engine a workflow should run on.
-
- [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned.
-
- [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need.
-
- [Environment](#environment): An **optional** field that allows you to define environment variables.
-
- [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow.
-
-
## Trigger
-
-
The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields:
-
-
- `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values:
-
- `push`: The workflow should run every time a commit is pushed to the repository.
-
- `pull_request`: The workflow should run every time a pull request is made or updated.
-
- `manual`: The workflow can be triggered manually.
-
- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
-
- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
-
-
For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
-
-
```yaml
-
when:
-
- event: ["push", "manual"]
-
branch: ["main", "develop"]
-
- event: ["pull_request"]
-
branch: ["main"]
-
```
-
-
You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
-
-
```yaml
-
when:
-
- event: ["push"]
-
tag: ["v*"]
-
```
-
-
You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
-
-
```yaml
-
when:
-
- event: ["push"]
-
branch: ["main", "release-*"]
-
tag: ["v*", "stable"]
-
```
-
-
## Engine
-
-
Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are:
-
-
- `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there.
-
-
Example:
-
-
```yaml
-
engine: "nixery"
-
```
-
-
## Clone options
-
-
When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields:
-
-
- `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default.
-
- `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow.
-
- `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default.
-
-
The default settings are:
-
-
```yaml
-
clone:
-
skip: false
-
depth: 1
-
submodules: false
-
```
-
-
## Dependencies
-
-
Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch.
-
-
Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so:
-
-
```yaml
-
dependencies:
-
# nixpkgs
-
nixpkgs:
-
- nodejs
-
- go
-
# custom registry
-
git+https://tangled.org/@example.com/my_pkg:
-
- my_pkg
-
```
-
-
Now these dependencies are available to use in your workflow!
-
-
## Environment
-
-
The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
-
-
Example:
-
-
```yaml
-
environment:
-
GOOS: "linux"
-
GOARCH: "arm64"
-
NODE_ENV: "production"
-
MY_ENV_VAR: "MY_ENV_VALUE"
-
```
-
-
## Steps
-
-
The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields:
-
-
- `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing.
-
- `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here.
-
- `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
-
-
Example:
-
-
```yaml
-
steps:
-
- name: "Build backend"
-
command: "go build"
-
environment:
-
GOOS: "darwin"
-
GOARCH: "arm64"
-
- name: "Build frontend"
-
command: "npm run build"
-
environment:
-
NODE_ENV: "production"
-
```
-
-
## Complete workflow
-
-
```yaml
-
# .tangled/workflows/build.yml
-
-
when:
-
- event: ["push", "manual"]
-
branch: ["main", "develop"]
-
- event: ["pull_request"]
-
branch: ["main"]
-
-
engine: "nixery"
-
-
# using the default values
-
clone:
-
skip: false
-
depth: 1
-
submodules: false
-
-
dependencies:
-
# nixpkgs
-
nixpkgs:
-
- nodejs
-
- go
-
# custom registry
-
git+https://tangled.org/@example.com/my_pkg:
-
- my_pkg
-
-
environment:
-
GOOS: "linux"
-
GOARCH: "arm64"
-
NODE_ENV: "production"
-
MY_ENV_VAR: "MY_ENV_VALUE"
-
-
steps:
-
- name: "Build backend"
-
command: "go build"
-
environment:
-
GOOS: "darwin"
-
GOARCH: "arm64"
-
- name: "Build frontend"
-
command: "npm run build"
-
environment:
-
NODE_ENV: "production"
-
```
-
-
If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
+101
docs/styles.css
···
+
svg {
+
width: 16px;
+
height: 16px;
+
}
+
+
:root {
+
--syntax-alert: #d20f39;
+
--syntax-annotation: #fe640b;
+
--syntax-attribute: #df8e1d;
+
--syntax-basen: #40a02b;
+
--syntax-builtin: #1e66f5;
+
--syntax-controlflow: #8839ef;
+
--syntax-char: #04a5e5;
+
--syntax-constant: #fe640b;
+
--syntax-comment: #9ca0b0;
+
--syntax-commentvar: #7c7f93;
+
--syntax-documentation: #9ca0b0;
+
--syntax-datatype: #df8e1d;
+
--syntax-decval: #40a02b;
+
--syntax-error: #d20f39;
+
--syntax-extension: #4c4f69;
+
--syntax-float: #40a02b;
+
--syntax-function: #1e66f5;
+
--syntax-import: #40a02b;
+
--syntax-information: #04a5e5;
+
--syntax-keyword: #8839ef;
+
--syntax-operator: #179299;
+
--syntax-other: #8839ef;
+
--syntax-preprocessor: #ea76cb;
+
--syntax-specialchar: #04a5e5;
+
--syntax-specialstring: #ea76cb;
+
--syntax-string: #40a02b;
+
--syntax-variable: #8839ef;
+
--syntax-verbatimstring: #40a02b;
+
--syntax-warning: #df8e1d;
+
}
+
+
@media (prefers-color-scheme: dark) {
+
:root {
+
--syntax-alert: #f38ba8;
+
--syntax-annotation: #fab387;
+
--syntax-attribute: #f9e2af;
+
--syntax-basen: #a6e3a1;
+
--syntax-builtin: #89b4fa;
+
--syntax-controlflow: #cba6f7;
+
--syntax-char: #89dceb;
+
--syntax-constant: #fab387;
+
--syntax-comment: #6c7086;
+
--syntax-commentvar: #585b70;
+
--syntax-documentation: #6c7086;
+
--syntax-datatype: #f9e2af;
+
--syntax-decval: #a6e3a1;
+
--syntax-error: #f38ba8;
+
--syntax-extension: #cdd6f4;
+
--syntax-float: #a6e3a1;
+
--syntax-function: #89b4fa;
+
--syntax-import: #a6e3a1;
+
--syntax-information: #89dceb;
+
--syntax-keyword: #cba6f7;
+
--syntax-operator: #94e2d5;
+
--syntax-other: #cba6f7;
+
--syntax-preprocessor: #f5c2e7;
+
--syntax-specialchar: #89dceb;
+
--syntax-specialstring: #f5c2e7;
+
--syntax-string: #a6e3a1;
+
--syntax-variable: #cba6f7;
+
--syntax-verbatimstring: #a6e3a1;
+
--syntax-warning: #f9e2af;
+
}
+
}
+
+
/* pandoc syntax highlighting classes */
+
code span.al { color: var(--syntax-alert); font-weight: bold; } /* alert */
+
code span.an { color: var(--syntax-annotation); font-weight: bold; font-style: italic; } /* annotation */
+
code span.at { color: var(--syntax-attribute); } /* attribute */
+
code span.bn { color: var(--syntax-basen); } /* basen */
+
code span.bu { color: var(--syntax-builtin); } /* builtin */
+
code span.cf { color: var(--syntax-controlflow); font-weight: bold; } /* controlflow */
+
code span.ch { color: var(--syntax-char); } /* char */
+
code span.cn { color: var(--syntax-constant); } /* constant */
+
code span.co { color: var(--syntax-comment); font-style: italic; } /* comment */
+
code span.cv { color: var(--syntax-commentvar); font-weight: bold; font-style: italic; } /* commentvar */
+
code span.do { color: var(--syntax-documentation); font-style: italic; } /* documentation */
+
code span.dt { color: var(--syntax-datatype); } /* datatype */
+
code span.dv { color: var(--syntax-decval); } /* decval */
+
code span.er { color: var(--syntax-error); font-weight: bold; } /* error */
+
code span.ex { color: var(--syntax-extension); } /* extension */
+
code span.fl { color: var(--syntax-float); } /* float */
+
code span.fu { color: var(--syntax-function); } /* function */
+
code span.im { color: var(--syntax-import); font-weight: bold; } /* import */
+
code span.in { color: var(--syntax-information); font-weight: bold; font-style: italic; } /* information */
+
code span.kw { color: var(--syntax-keyword); font-weight: bold; } /* keyword */
+
code span.op { color: var(--syntax-operator); } /* operator */
+
code span.ot { color: var(--syntax-other); } /* other */
+
code span.pp { color: var(--syntax-preprocessor); } /* preprocessor */
+
code span.sc { color: var(--syntax-specialchar); } /* specialchar */
+
code span.ss { color: var(--syntax-specialstring); } /* specialstring */
+
code span.st { color: var(--syntax-string); } /* string */
+
code span.va { color: var(--syntax-variable); } /* variable */
+
code span.vs { color: var(--syntax-verbatimstring); } /* verbatimstring */
+
code span.wa { color: var(--syntax-warning); font-weight: bold; font-style: italic; } /* warning */
+117
docs/template.html
···
+
<!DOCTYPE html>
+
<html xmlns="http://www.w3.org/1999/xhtml" lang="$lang$" xml:lang="$lang$"$if(dir)$ dir="$dir$"$endif$>
+
<head>
+
<meta charset="utf-8" />
+
<meta name="generator" content="pandoc" />
+
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+
$for(author-meta)$
+
<meta name="author" content="$author-meta$" />
+
$endfor$
+
+
$if(date-meta)$
+
<meta name="dcterms.date" content="$date-meta$" />
+
$endif$
+
+
$if(keywords)$
+
<meta name="keywords" content="$for(keywords)$$keywords$$sep$, $endfor$" />
+
$endif$
+
+
$if(description-meta)$
+
<meta name="description" content="$description-meta$" />
+
$endif$
+
+
<title>$pagetitle$ - Tangled docs</title>
+
+
<style>
+
$styles.css()$
+
</style>
+
+
$for(css)$
+
<link rel="stylesheet" href="$css$" />
+
$endfor$
+
+
$for(header-includes)$
+
$header-includes$
+
$endfor$
+
+
<link rel="preload" href="/static/fonts/InterVariable.woff2" as="font" type="font/woff2" crossorigin />
+
+
</head>
+
<body class="bg-white dark:bg-gray-900 min-h-screen flex flex-col min-h-screen">
+
$for(include-before)$
+
$include-before$
+
$endfor$
+
+
$if(toc)$
+
<!-- mobile topbar toc -->
+
<details id="mobile-$idprefix$TOC" role="doc-toc" class="md:hidden bg-gray-50 dark:bg-gray-800 border-b border-gray-200 dark:border-gray-700 z-50 space-y-4 group px-6 py-4">
+
<summary class="cursor-pointer list-none text-sm font-semibold select-none flex gap-2 justify-between items-center dark:text-white">
+
$if(toc-title)$$toc-title$$else$Table of Contents$endif$
+
<span class="group-open:hidden inline">${ menu.svg() }</span>
+
<span class="hidden group-open:inline">${ x.svg() }</span>
+
</summary>
+
${ table-of-contents:toc.html() }
+
</details>
+
<!-- desktop sidebar toc -->
+
<nav id="$idprefix$TOC" role="doc-toc" class="hidden md:block fixed left-0 top-0 w-80 h-screen bg-gray-50 dark:bg-gray-800 border-r border-gray-200 dark:border-gray-700 overflow-y-auto p-4 z-50">
+
$if(toc-title)$
+
<h2 id="$idprefix$toc-title" class="text-lg font-semibold mb-4 text-gray-900">$toc-title$</h2>
+
$endif$
+
${ table-of-contents:toc.html() }
+
</nav>
+
$endif$
+
+
<div class="$if(toc)$md:ml-80$endif$ flex-1 flex flex-col">
+
<main class="max-w-4xl w-full mx-auto p-6 flex-1">
+
$if(top)$
+
$-- only print title block if this is NOT the top page
+
$else$
+
$if(title)$
+
<header id="title-block-header" class="mb-8 pb-8 border-b border-gray-200 dark:border-gray-700">
+
<h1 class="text-4xl font-bold mb-2 text-black dark:text-white">$title$</h1>
+
$if(subtitle)$
+
<p class="text-xl text-gray-500 dark:text-gray-400 mb-2">$subtitle$</p>
+
$endif$
+
$for(author)$
+
<p class="text-sm text-gray-500 dark:text-gray-400">$author$</p>
+
$endfor$
+
$if(date)$
+
<p class="text-sm text-gray-500 dark:text-gray-400">Updated on $date$</p>
+
$endif$
+
$if(abstract)$
+
<div class="mt-6 p-4 bg-gray-50 rounded-lg">
+
<div class="text-sm font-semibold text-gray-700 uppercase mb-2">$abstract-title$</div>
+
<div class="text-gray-700">$abstract$</div>
+
</div>
+
$endif$
+
$endif$
+
</header>
+
$endif$
+
<article class="prose dark:prose-invert max-w-none">
+
$body$
+
</article>
+
</main>
+
<nav id="sitenav" class="border-t border-gray-200 dark:border-gray-700 bg-gray-50 dark:bg-gray-800 ">
+
<div class="max-w-4xl mx-auto px-8 py-4">
+
<div class="flex justify-between gap-4">
+
<span class="flex-1">
+
$if(previous.url)$
+
<span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Previous</span>
+
<a href="$previous.url$" accesskey="p" rel="previous">$previous.title$</a>
+
$endif$
+
</span>
+
<span class="flex-1 text-right">
+
$if(next.url)$
+
<span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Next</span>
+
<a href="$next.url$" accesskey="n" rel="next">$next.title$</a>
+
$endif$
+
</span>
+
</div>
+
</div>
+
</nav>
+
</div>
+
$for(include-after)$
+
$include-after$
+
$endfor$
+
</body>
+
</html>
+4
docs/toc.html
···
+
<div class="[&_ul]:space-y-6 [&_ul]:pl-0 [&_ul]:font-bold [&_ul_ul]:pl-4 [&_ul_ul]:font-normal [&_ul_ul]:space-y-2 [&_li]:space-y-2">
+
$table-of-contents$
+
</div>
+
+9 -9
flake.lock
···
"systems": "systems"
},
"locked": {
-
"lastModified": 1694529238,
-
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
+
"lastModified": 1731533236,
+
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
-
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
+
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
···
]
},
"locked": {
-
"lastModified": 1754078208,
-
"narHash": "sha256-YVoIFDCDpYuU3riaDEJ3xiGdPOtsx4sR5eTzHTytPV8=",
+
"lastModified": 1763982521,
+
"narHash": "sha256-ur4QIAHwgFc0vXiaxn5No/FuZicxBr2p0gmT54xZkUQ=",
"owner": "nix-community",
"repo": "gomod2nix",
-
"rev": "7f963246a71626c7fc70b431a315c4388a0c95cf",
+
"rev": "02e63a239d6eabd595db56852535992c898eba72",
"type": "github"
},
"original": {
···
},
"nixpkgs": {
"locked": {
-
"lastModified": 1751984180,
-
"narHash": "sha256-LwWRsENAZJKUdD3SpLluwDmdXY9F45ZEgCb0X+xgOL0=",
+
"lastModified": 1766070988,
+
"narHash": "sha256-G/WVghka6c4bAzMhTwT2vjLccg/awmHkdKSd2JrycLc=",
"owner": "nixos",
"repo": "nixpkgs",
-
"rev": "9807714d6944a957c2e036f84b0ff8caf9930bc0",
+
"rev": "c6245e83d836d0433170a16eb185cefe0572f8b8",
"type": "github"
},
"original": {
+5 -4
flake.nix
···
}).buildGoApplication;
modules = ./nix/gomod2nix.toml;
sqlite-lib = self.callPackage ./nix/pkgs/sqlite-lib.nix {
-
inherit (pkgs) gcc;
inherit sqlite-lib-src;
};
lexgen = self.callPackage ./nix/pkgs/lexgen.nix {inherit indigo;};
···
inherit htmx-src htmx-ws-src lucide-src inter-fonts-src ibm-plex-mono-src actor-typeahead-src;
};
appview = self.callPackage ./nix/pkgs/appview.nix {};
+
docs = self.callPackage ./nix/pkgs/docs.nix {
+
inherit inter-fonts-src ibm-plex-mono-src lucide-src;
+
};
spindle = self.callPackage ./nix/pkgs/spindle.nix {};
knot-unwrapped = self.callPackage ./nix/pkgs/knot-unwrapped.nix {};
knot = self.callPackage ./nix/pkgs/knot.nix {};
});
in {
overlays.default = final: prev: {
-
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview;
+
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview docs;
};
packages = forAllSystems (system: let
···
staticPackages = mkPackageSet pkgs.pkgsStatic;
crossPackages = mkPackageSet pkgs.pkgsCross.gnu64.pkgsStatic;
in {
-
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib;
+
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib docs;
pkgsStatic-appview = staticPackages.appview;
pkgsStatic-knot = staticPackages.knot;
···
nativeBuildInputs = [
pkgs.go
pkgs.air
-
pkgs.tilt
pkgs.gopls
pkgs.httpie
pkgs.litecli
+3 -4
go.mod
···
module tangled.org/core
-
go 1.24.4
+
go 1.25.0
require (
github.com/Blank-Xu/sql-adapter v1.1.1
···
github.com/stretchr/testify v1.10.0
github.com/urfave/cli/v3 v3.3.3
github.com/whyrusleeping/cbor-gen v0.3.1
-
github.com/wyatt915/goldmark-treeblood v0.0.1
github.com/yuin/goldmark v1.7.13
+
github.com/yuin/goldmark-emoji v1.0.6
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc
gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab
golang.org/x/crypto v0.40.0
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b
golang.org/x/image v0.31.0
golang.org/x/net v0.42.0
-
golang.org/x/sync v0.17.0
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da
gopkg.in/yaml.v3 v3.0.1
)
···
github.com/vmihailenco/go-tinylfu v0.2.2 // indirect
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
-
github.com/wyatt915/treeblood v0.1.16 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b // indirect
gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 // indirect
···
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
+
golang.org/x/sync v0.17.0 // indirect
golang.org/x/sys v0.34.0 // indirect
golang.org/x/text v0.29.0 // indirect
golang.org/x/time v0.12.0 // indirect
+2 -4
go.sum
···
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
github.com/whyrusleeping/cbor-gen v0.3.1 h1:82ioxmhEYut7LBVGhGq8xoRkXPLElVuh5mV67AFfdv0=
github.com/whyrusleeping/cbor-gen v0.3.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so=
-
github.com/wyatt915/goldmark-treeblood v0.0.1 h1:6vLJcjFrHgE4ASu2ga4hqIQmbvQLU37v53jlHZ3pqDs=
-
github.com/wyatt915/goldmark-treeblood v0.0.1/go.mod h1:SmcJp5EBaV17rroNlgNQFydYwy0+fv85CUr/ZaCz208=
-
github.com/wyatt915/treeblood v0.1.16 h1:byxNbWZhnPDxdTp7W5kQhCeaY8RBVmojTFz1tEHgg8Y=
-
github.com/wyatt915/treeblood v0.1.16/go.mod h1:i7+yhhmzdDP17/97pIsOSffw74EK/xk+qJ0029cSXUY=
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
···
github.com/yuin/goldmark v1.4.15/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA=
github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg=
+
github.com/yuin/goldmark-emoji v1.0.6 h1:QWfF2FYaXwL74tfGOW5izeiZepUDroDJfWubQI9HTHs=
+
github.com/yuin/goldmark-emoji v1.0.6/go.mod h1:ukxJDKFpdFb5x0a5HqbdlcKtebh086iJpI31LTKmWuA=
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc h1:+IAOyRda+RLrxa1WC7umKOZRsGq4QrFFMYApOeHzQwQ=
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc/go.mod h1:ovIvrum6DQJA4QsJSovrkC4saKHQVs7TvcaeO8AIl5I=
gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab h1:gK9tS6QJw5F0SIhYJnGG2P83kuabOdmWBbSmZhJkz2A=
+4 -4
hook/hook.go
···
},
Commands: []*cli.Command{
{
-
Name: "post-recieve",
-
Usage: "sends a post-recieve hook to the knot (waits for stdin)",
-
Action: postRecieve,
+
Name: "post-receive",
+
Usage: "sends a post-receive hook to the knot (waits for stdin)",
+
Action: postReceive,
},
},
}
}
-
func postRecieve(ctx context.Context, cmd *cli.Command) error {
+
func postReceive(ctx context.Context, cmd *cli.Command) error {
gitDir := cmd.String("git-dir")
userDid := cmd.String("user-did")
userHandle := cmd.String("user-handle")
+1 -1
hook/setup.go
···
option_var="GIT_PUSH_OPTION_$i"
push_options+=(-push-option "${!option_var}")
done
-
%s hook -git-dir "$GIT_DIR" -user-did "$GIT_USER_DID" -user-handle "$GIT_USER_HANDLE" -internal-api "%s" "${push_options[@]}" post-recieve
+
%s hook -git-dir "$GIT_DIR" -user-did "$GIT_USER_DID" -user-handle "$GIT_USER_HANDLE" -internal-api "%s" "${push_options[@]}" post-receive
`, executablePath, config.internalApi)
return os.WriteFile(hookPath, []byte(hookContent), 0755)
+1 -1
input.css
···
}
.prose a.mention {
-
@apply no-underline hover:underline;
+
@apply no-underline hover:underline font-bold;
}
.prose li {
+15 -4
jetstream/jetstream.go
···
// existing instances of the closure when j.WantedDids is mutated
return func(ctx context.Context, evt *models.Event) error {
+
j.mu.RLock()
// empty filter => all dids allowed
-
if len(j.wantedDids) == 0 {
-
return processFunc(ctx, evt)
+
matches := len(j.wantedDids) == 0
+
if !matches {
+
if _, ok := j.wantedDids[evt.Did]; ok {
+
matches = true
+
}
}
+
j.mu.RUnlock()
-
if _, ok := j.wantedDids[evt.Did]; ok {
+
if matches {
return processFunc(ctx, evt)
} else {
return nil
···
go func() {
if j.waitForDid {
-
for len(j.wantedDids) == 0 {
+
for {
+
j.mu.RLock()
+
hasDid := len(j.wantedDids) != 0
+
j.mu.RUnlock()
+
if hasDid {
+
break
+
}
time.Sleep(time.Second)
}
}
+81
knotserver/db/db.go
···
+
package db
+
+
import (
+
"context"
+
"database/sql"
+
"log/slog"
+
"strings"
+
+
_ "github.com/mattn/go-sqlite3"
+
"tangled.org/core/log"
+
)
+
+
type DB struct {
+
db *sql.DB
+
logger *slog.Logger
+
}
+
+
func Setup(ctx context.Context, dbPath string) (*DB, error) {
+
// https://github.com/mattn/go-sqlite3#connection-string
+
opts := []string{
+
"_foreign_keys=1",
+
"_journal_mode=WAL",
+
"_synchronous=NORMAL",
+
"_auto_vacuum=incremental",
+
}
+
+
logger := log.FromContext(ctx)
+
logger = log.SubLogger(logger, "db")
+
+
db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
+
if err != nil {
+
return nil, err
+
}
+
+
conn, err := db.Conn(ctx)
+
if err != nil {
+
return nil, err
+
}
+
defer conn.Close()
+
+
_, err = conn.ExecContext(ctx, `
+
create table if not exists known_dids (
+
did text primary key
+
);
+
+
create table if not exists public_keys (
+
id integer primary key autoincrement,
+
did text not null,
+
key text not null,
+
created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
+
unique(did, key),
+
foreign key (did) references known_dids(did) on delete cascade
+
);
+
+
create table if not exists _jetstream (
+
id integer primary key autoincrement,
+
last_time_us integer not null
+
);
+
+
create table if not exists events (
+
rkey text not null,
+
nsid text not null,
+
event text not null, -- json
+
created integer not null default (strftime('%s', 'now')),
+
primary key (rkey, nsid)
+
);
+
+
create table if not exists migrations (
+
id integer primary key autoincrement,
+
name text unique
+
);
+
`)
+
if err != nil {
+
return nil, err
+
}
+
+
return &DB{
+
db: db,
+
logger: logger,
+
}, nil
+
}
-64
knotserver/db/init.go
···
-
package db
-
-
import (
-
"database/sql"
-
"strings"
-
-
_ "github.com/mattn/go-sqlite3"
-
)
-
-
type DB struct {
-
db *sql.DB
-
}
-
-
func Setup(dbPath string) (*DB, error) {
-
// https://github.com/mattn/go-sqlite3#connection-string
-
opts := []string{
-
"_foreign_keys=1",
-
"_journal_mode=WAL",
-
"_synchronous=NORMAL",
-
"_auto_vacuum=incremental",
-
}
-
-
db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
-
if err != nil {
-
return nil, err
-
}
-
-
// NOTE: If any other migration is added here, you MUST
-
// copy the pattern in appview: use a single sql.Conn
-
// for every migration.
-
-
_, err = db.Exec(`
-
create table if not exists known_dids (
-
did text primary key
-
);
-
-
create table if not exists public_keys (
-
id integer primary key autoincrement,
-
did text not null,
-
key text not null,
-
created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
-
unique(did, key),
-
foreign key (did) references known_dids(did) on delete cascade
-
);
-
-
create table if not exists _jetstream (
-
id integer primary key autoincrement,
-
last_time_us integer not null
-
);
-
-
create table if not exists events (
-
rkey text not null,
-
nsid text not null,
-
event text not null, -- json
-
created integer not null default (strftime('%s', 'now')),
-
primary key (rkey, nsid)
-
);
-
`)
-
if err != nil {
-
return nil, err
-
}
-
-
return &DB{db: db}, nil
-
}
+1 -17
knotserver/git/diff.go
···
nd.Diff = append(nd.Diff, ndiff)
}
-
nd.Stat.FilesChanged = len(diffs)
-
nd.Commit.This = c.Hash.String()
-
nd.Commit.PGPSignature = c.PGPSignature
-
nd.Commit.Committer = c.Committer
-
nd.Commit.Tree = c.TreeHash.String()
-
-
if parent.Hash.IsZero() {
-
nd.Commit.Parent = ""
-
} else {
-
nd.Commit.Parent = parent.Hash.String()
-
}
-
nd.Commit.Author = c.Author
-
nd.Commit.Message = c.Message
-
-
if v, ok := c.ExtraHeaders["change-id"]; ok {
-
nd.Commit.ChangedId = string(v)
-
}
+
nd.Commit.FromGoGitCommit(c)
return &nd, nil
}
+38 -2
knotserver/git/fork.go
···
import (
"errors"
"fmt"
+
"log/slog"
+
"net/url"
"os/exec"
+
"path/filepath"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/config"
+
knotconfig "tangled.org/core/knotserver/config"
)
-
func Fork(repoPath, source string) error {
-
cloneCmd := exec.Command("git", "clone", "--bare", source, repoPath)
+
func Fork(repoPath, source string, cfg *knotconfig.Config) error {
+
u, err := url.Parse(source)
+
if err != nil {
+
return fmt.Errorf("failed to parse source URL: %w", err)
+
}
+
+
if o := optimizeClone(u, cfg); o != nil {
+
u = o
+
}
+
+
cloneCmd := exec.Command("git", "clone", "--bare", u.String(), repoPath)
if err := cloneCmd.Run(); err != nil {
return fmt.Errorf("failed to bare clone repository: %w", err)
}
···
}
return nil
+
}
+
+
func optimizeClone(u *url.URL, cfg *knotconfig.Config) *url.URL {
+
// only optimize if it's the same host
+
if u.Host != cfg.Server.Hostname {
+
return nil
+
}
+
+
local := filepath.Join(cfg.Repo.ScanPath, u.Path)
+
+
// sanity check: is there a git repo there?
+
if _, err := PlainOpen(local); err != nil {
+
return nil
+
}
+
+
// create optimized file:// URL
+
optimized := &url.URL{
+
Scheme: "file",
+
Path: local,
+
}
+
+
slog.Debug("performing local clone", "url", optimized.String())
+
return optimized
}
func (g *GitRepo) Sync() error {
+13 -1
knotserver/git/service/service.go
···
return c.RunService(cmd)
}
+
func (c *ServiceCommand) UploadArchive() error {
+
cmd := exec.Command("git", []string{
+
"upload-archive",
+
".",
+
}...)
+
+
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
+
cmd.Env = append(cmd.Env, fmt.Sprintf("GIT_PROTOCOL=%s", c.GitProtocol))
+
cmd.Dir = c.Dir
+
+
return c.RunService(cmd)
+
}
+
func (c *ServiceCommand) UploadPack() error {
cmd := exec.Command("git", []string{
-
"-c", "uploadpack.allowFilter=true",
"upload-pack",
"--stateless-rpc",
".",
+47
knotserver/git.go
···
}
}
+
func (h *Knot) UploadArchive(w http.ResponseWriter, r *http.Request) {
+
did := chi.URLParam(r, "did")
+
name := chi.URLParam(r, "name")
+
repo, err := securejoin.SecureJoin(h.c.Repo.ScanPath, filepath.Join(did, name))
+
if err != nil {
+
gitError(w, err.Error(), http.StatusInternalServerError)
+
h.l.Error("git: failed to secure join repo path", "handler", "UploadPack", "error", err)
+
return
+
}
+
+
const expectedContentType = "application/x-git-upload-archive-request"
+
contentType := r.Header.Get("Content-Type")
+
if contentType != expectedContentType {
+
gitError(w, fmt.Sprintf("Expected Content-Type: '%s', but received '%s'.", expectedContentType, contentType), http.StatusUnsupportedMediaType)
+
}
+
+
var bodyReader io.ReadCloser = r.Body
+
if r.Header.Get("Content-Encoding") == "gzip" {
+
gzipReader, err := gzip.NewReader(r.Body)
+
if err != nil {
+
gitError(w, err.Error(), http.StatusInternalServerError)
+
h.l.Error("git: failed to create gzip reader", "handler", "UploadArchive", "error", err)
+
return
+
}
+
defer gzipReader.Close()
+
bodyReader = gzipReader
+
}
+
+
w.Header().Set("Content-Type", "application/x-git-upload-archive-result")
+
+
h.l.Info("git: executing git-upload-archive", "handler", "UploadArchive", "repo", repo)
+
+
cmd := service.ServiceCommand{
+
GitProtocol: r.Header.Get("Git-Protocol"),
+
Dir: repo,
+
Stdout: w,
+
Stdin: bodyReader,
+
}
+
+
w.WriteHeader(http.StatusOK)
+
+
if err := cmd.UploadArchive(); err != nil {
+
h.l.Error("git: failed to execute git-upload-pack", "handler", "UploadPack", "error", err)
+
return
+
}
+
}
+
func (h *Knot) UploadPack(w http.ResponseWriter, r *http.Request) {
did := chi.URLParam(r, "did")
name := chi.URLParam(r, "name")
+1
knotserver/router.go
···
r.Route("/{name}", func(r chi.Router) {
// routes for git operations
r.Get("/info/refs", h.InfoRefs)
+
r.Post("/git-upload-archive", h.UploadArchive)
r.Post("/git-upload-pack", h.UploadPack)
r.Post("/git-receive-pack", h.ReceivePack)
})
+1 -1
knotserver/server.go
···
logger.Info("running in dev mode, signature verification is disabled")
}
-
db, err := db.Setup(c.Server.DBPath)
+
db, err := db.Setup(ctx, c.Server.DBPath)
if err != nil {
return fmt.Errorf("failed to load db: %w", err)
}
+1 -1
knotserver/xrpc/create_repo.go
···
repoPath, _ := securejoin.SecureJoin(h.Config.Repo.ScanPath, relativeRepoPath)
if data.Source != nil && *data.Source != "" {
-
err = git.Fork(repoPath, *data.Source)
+
err = git.Fork(repoPath, *data.Source, h.Config)
if err != nil {
l.Error("forking repo", "error", err.Error())
writeError(w, xrpcerr.GenericError(err), http.StatusInternalServerError)
+6 -1
knotserver/xrpc/repo_log.go
···
return
}
+
tcommits := make([]types.Commit, len(commits))
+
for i, c := range commits {
+
tcommits[i].FromGoGitCommit(c)
+
}
+
// Create response using existing types.RepoLogResponse
response := types.RepoLogResponse{
-
Commits: commits,
+
Commits: tcommits,
Ref: ref,
Page: (offset / limit) + 1,
PerPage: limit,
+14
lexicons/issue/comment.json
···
"replyTo": {
"type": "string",
"format": "at-uri"
+
},
+
"mentions": {
+
"type": "array",
+
"items": {
+
"type": "string",
+
"format": "did"
+
}
+
},
+
"references": {
+
"type": "array",
+
"items": {
+
"type": "string",
+
"format": "at-uri"
+
}
}
}
}
+14
lexicons/issue/issue.json
···
"createdAt": {
"type": "string",
"format": "datetime"
+
},
+
"mentions": {
+
"type": "array",
+
"items": {
+
"type": "string",
+
"format": "did"
+
}
+
},
+
"references": {
+
"type": "array",
+
"items": {
+
"type": "string",
+
"format": "at-uri"
+
}
}
}
}
+14
lexicons/pulls/comment.json
···
"createdAt": {
"type": "string",
"format": "datetime"
+
},
+
"mentions": {
+
"type": "array",
+
"items": {
+
"type": "string",
+
"format": "did"
+
}
+
},
+
"references": {
+
"type": "array",
+
"items": {
+
"type": "string",
+
"format": "at-uri"
+
}
}
}
}
+14
lexicons/pulls/pull.json
···
"createdAt": {
"type": "string",
"format": "datetime"
+
},
+
"mentions": {
+
"type": "array",
+
"items": {
+
"type": "string",
+
"format": "did"
+
}
+
},
+
"references": {
+
"type": "array",
+
"items": {
+
"type": "string",
+
"format": "at-uri"
+
}
}
}
}
+3 -30
nix/gomod2nix.toml
···
[mod."github.com/davecgh/go-spew"]
version = "v1.1.2-0.20180830191138-d8f796af33cc"
hash = "sha256-fV9oI51xjHdOmEx6+dlq7Ku2Ag+m/bmbzPo6A4Y74qc="
-
[mod."github.com/decred/dcrd/dcrec/secp256k1/v4"]
-
version = "v4.4.0"
-
hash = "sha256-qrhEIwhDll3cxoVpMbm1NQ9/HTI42S7ms8Buzlo5HCg="
[mod."github.com/dgraph-io/ristretto"]
version = "v0.2.0"
hash = "sha256-bnpxX+oO/Qf7IJevA0gsbloVoqRx+5bh7RQ9d9eLNYw="
···
[mod."github.com/klauspost/cpuid/v2"]
version = "v2.3.0"
hash = "sha256-50JhbQyT67BK38HIdJihPtjV7orYp96HknI2VP7A9Yc="
-
[mod."github.com/lestrrat-go/blackmagic"]
-
version = "v1.0.4"
-
hash = "sha256-HmWOpwoPDNMwLdOi7onNn3Sb+ZsAa3Ai3gVBbXmQ0e8="
-
[mod."github.com/lestrrat-go/httpcc"]
-
version = "v1.0.1"
-
hash = "sha256-SMRSwJpqDIs/xL0l2e8vP0W65qtCHX2wigcOeqPJmos="
-
[mod."github.com/lestrrat-go/httprc"]
-
version = "v1.0.6"
-
hash = "sha256-mfZzePEhrmyyu/avEBd2MsDXyto8dq5+fyu5lA8GUWM="
-
[mod."github.com/lestrrat-go/iter"]
-
version = "v1.0.2"
-
hash = "sha256-30tErRf7Qu/NOAt1YURXY/XJSA6sCr6hYQfO8QqHrtw="
-
[mod."github.com/lestrrat-go/jwx/v2"]
-
version = "v2.1.6"
-
hash = "sha256-0LszXRZIba+X8AOrs3T4uanAUafBdlVB8/MpUNEFpbc="
-
[mod."github.com/lestrrat-go/option"]
-
version = "v1.0.1"
-
hash = "sha256-jVcIYYVsxElIS/l2akEw32vdEPR8+anR6oeT1FoYULI="
[mod."github.com/lucasb-eyer/go-colorful"]
version = "v1.2.0"
hash = "sha256-Gg9dDJFCTaHrKHRR1SrJgZ8fWieJkybljybkI9x0gyE="
···
[mod."github.com/ryanuber/go-glob"]
version = "v1.0.0"
hash = "sha256-YkMl1utwUhi3E0sHK23ISpAsPyj4+KeXyXKoFYGXGVY="
-
[mod."github.com/segmentio/asm"]
-
version = "v1.2.0"
-
hash = "sha256-zbNuKxNrUDUc6IlmRQNuJQzVe5Ol/mqp7srDg9IMMqs="
[mod."github.com/sergi/go-diff"]
version = "v1.1.0"
hash = "sha256-8NJMabldpf40uwQN20T6QXx5KORDibCBJL02KD661xY="
···
[mod."github.com/whyrusleeping/cbor-gen"]
version = "v0.3.1"
hash = "sha256-PAd8M2Z8t6rVRBII+Rg8Bz+QaJIwbW64bfyqsv31kgc="
-
[mod."github.com/wyatt915/goldmark-treeblood"]
-
version = "v0.0.1"
-
hash = "sha256-hAVFaktO02MiiqZFffr8ZlvFEfwxw4Y84OZ2t7e5G7g="
-
[mod."github.com/wyatt915/treeblood"]
-
version = "v0.1.16"
-
hash = "sha256-T68sa+iVx0qY7dDjXEAJvRWQEGXYIpUsf9tcWwO1tIw="
[mod."github.com/xo/terminfo"]
version = "v0.0.0-20220910002029-abceb7e1c41e"
hash = "sha256-GyCDxxMQhXA3Pi/TsWXpA8cX5akEoZV7CFx4RO3rARU="
[mod."github.com/yuin/goldmark"]
version = "v1.7.13"
hash = "sha256-vBCxZrPYPc8x/nvAAv3Au59dCCyfS80Vw3/a9EXK7TE="
+
[mod."github.com/yuin/goldmark-emoji"]
+
version = "v1.0.6"
+
hash = "sha256-+d6bZzOPE+JSFsZbQNZMCWE+n3jgcQnkPETVk47mxSY="
[mod."github.com/yuin/goldmark-highlighting/v2"]
version = "v2.0.0-20230729083705-37449abec8cc"
hash = "sha256-HpiwU7jIeDUAg2zOpTIiviQir8dpRPuXYh2nqFFccpg="
+2
nix/modules/knot.nix
···
Match User ${cfg.gitUser}
AuthorizedKeysCommand /etc/ssh/keyfetch_wrapper
AuthorizedKeysCommandUser nobody
+
ChallengeResponseAuthentication no
+
PasswordAuthentication no
'';
};
+41
nix/pkgs/docs.nix
···
+
{
+
pandoc,
+
tailwindcss,
+
runCommandLocal,
+
inter-fonts-src,
+
ibm-plex-mono-src,
+
lucide-src,
+
src,
+
}:
+
runCommandLocal "docs" {} ''
+
mkdir -p working
+
+
# copy templates, themes, styles, filters to working directory
+
cp ${src}/docs/*.html working/
+
cp ${src}/docs/*.theme working/
+
cp ${src}/docs/*.css working/
+
+
# icons
+
cp -rf ${lucide-src}/*.svg working/
+
+
# content
+
${pandoc}/bin/pandoc ${src}/docs/DOCS.md \
+
-o $out/ \
+
-t chunkedhtml \
+
--variable toc \
+
--toc-depth=2 \
+
--css=stylesheet.css \
+
--chunk-template="%i.html" \
+
--highlight-style=working/highlight.theme \
+
--template=working/template.html
+
+
# fonts
+
mkdir -p $out/static/fonts
+
cp -f ${inter-fonts-src}/web/InterVariable*.woff2 $out/static/fonts/
+
cp -f ${inter-fonts-src}/web/InterDisplay*.woff2 $out/static/fonts/
+
cp -f ${inter-fonts-src}/InterVariable*.ttf $out/static/fonts/
+
cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono*.woff2 $out/static/fonts/
+
+
# styles
+
cd ${src} && ${tailwindcss}/bin/tailwindcss -i input.css -o $out/stylesheet.css
+
''
+7 -5
nix/pkgs/sqlite-lib.nix
···
{
-
gcc,
stdenv,
sqlite-lib-src,
}:
stdenv.mkDerivation {
name = "sqlite-lib";
src = sqlite-lib-src;
-
nativeBuildInputs = [gcc];
+
buildPhase = ''
-
gcc -c sqlite3.c
-
ar rcs libsqlite3.a sqlite3.o
-
ranlib libsqlite3.a
+
$CC -c sqlite3.c
+
$AR rcs libsqlite3.a sqlite3.o
+
$RANLIB libsqlite3.a
+
'';
+
+
installPhase = ''
mkdir -p $out/include $out/lib
cp *.h $out/include
cp libsqlite3.a $out/lib
+4 -4
nix/vm.nix
···
# knot
{
from = "host";
-
host.port = 6000;
-
guest.port = 6000;
+
host.port = 6444;
+
guest.port = 6444;
}
# spindle
{
···
motd = "Welcome to the development knot!\n";
server = {
owner = envVar "TANGLED_VM_KNOT_OWNER";
-
hostname = envVarOr "TANGLED_VM_KNOT_HOST" "localhost:6000";
+
hostname = envVarOr "TANGLED_VM_KNOT_HOST" "localhost:6444";
plcUrl = plcUrl;
jetstreamEndpoint = jetstream;
-
listenAddr = "0.0.0.0:6000";
+
listenAddr = "0.0.0.0:6444";
};
};
services.tangled.spindle = {
+122
orm/orm.go
···
+
package orm
+
+
import (
+
"context"
+
"database/sql"
+
"fmt"
+
"log/slog"
+
"reflect"
+
"strings"
+
)
+
+
type migrationFn = func(*sql.Tx) error
+
+
func RunMigration(c *sql.Conn, logger *slog.Logger, name string, migrationFn migrationFn) error {
+
logger = logger.With("migration", name)
+
+
tx, err := c.BeginTx(context.Background(), nil)
+
if err != nil {
+
return err
+
}
+
defer tx.Rollback()
+
+
var exists bool
+
err = tx.QueryRow("select exists (select 1 from migrations where name = ?)", name).Scan(&exists)
+
if err != nil {
+
return err
+
}
+
+
if !exists {
+
// run migration
+
err = migrationFn(tx)
+
if err != nil {
+
logger.Error("failed to run migration", "err", err)
+
return err
+
}
+
+
// mark migration as complete
+
_, err = tx.Exec("insert into migrations (name) values (?)", name)
+
if err != nil {
+
logger.Error("failed to mark migration as complete", "err", err)
+
return err
+
}
+
+
// commit the transaction
+
if err := tx.Commit(); err != nil {
+
return err
+
}
+
+
logger.Info("migration applied successfully")
+
} else {
+
logger.Warn("skipped migration, already applied")
+
}
+
+
return nil
+
}
+
+
type Filter struct {
+
Key string
+
arg any
+
Cmp string
+
}
+
+
func newFilter(key, cmp string, arg any) Filter {
+
return Filter{
+
Key: key,
+
arg: arg,
+
Cmp: cmp,
+
}
+
}
+
+
func FilterEq(key string, arg any) Filter { return newFilter(key, "=", arg) }
+
func FilterNotEq(key string, arg any) Filter { return newFilter(key, "<>", arg) }
+
func FilterGte(key string, arg any) Filter { return newFilter(key, ">=", arg) }
+
func FilterLte(key string, arg any) Filter { return newFilter(key, "<=", arg) }
+
func FilterIs(key string, arg any) Filter { return newFilter(key, "is", arg) }
+
func FilterIsNot(key string, arg any) Filter { return newFilter(key, "is not", arg) }
+
func FilterIn(key string, arg any) Filter { return newFilter(key, "in", arg) }
+
func FilterLike(key string, arg any) Filter { return newFilter(key, "like", arg) }
+
func FilterNotLike(key string, arg any) Filter { return newFilter(key, "not like", arg) }
+
func FilterContains(key string, arg any) Filter {
+
return newFilter(key, "like", fmt.Sprintf("%%%v%%", arg))
+
}
+
+
func (f Filter) Condition() string {
+
rv := reflect.ValueOf(f.arg)
+
kind := rv.Kind()
+
+
// if we have `FilterIn(k, [1, 2, 3])`, compile it down to `k in (?, ?, ?)`
+
if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array {
+
if rv.Len() == 0 {
+
// always false
+
return "1 = 0"
+
}
+
+
placeholders := make([]string, rv.Len())
+
for i := range placeholders {
+
placeholders[i] = "?"
+
}
+
+
return fmt.Sprintf("%s %s (%s)", f.Key, f.Cmp, strings.Join(placeholders, ", "))
+
}
+
+
return fmt.Sprintf("%s %s ?", f.Key, f.Cmp)
+
}
+
+
func (f Filter) Arg() []any {
+
rv := reflect.ValueOf(f.arg)
+
kind := rv.Kind()
+
if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array {
+
if rv.Len() == 0 {
+
return nil
+
}
+
+
out := make([]any, rv.Len())
+
for i := range rv.Len() {
+
out[i] = rv.Index(i).Interface()
+
}
+
return out
+
}
+
+
return []any{f.arg}
+
}
-1
patchutil/patchutil.go
···
}
nd := types.NiceDiff{}
-
nd.Commit.Parent = targetBranch
for _, d := range diffs {
ndiff := types.Diff{}
+8
rbac/rbac.go
···
return e.E.Enforce(user, domain, repo, "repo:delete")
}
+
func (e *Enforcer) IsRepoOwner(user, domain, repo string) (bool, error) {
+
return e.E.Enforce(user, domain, repo, "repo:owner")
+
}
+
+
func (e *Enforcer) IsRepoCollaborator(user, domain, repo string) (bool, error) {
+
return e.E.Enforce(user, domain, repo, "repo:collaborator")
+
}
+
func (e *Enforcer) IsPushAllowed(user, domain, repo string) (bool, error) {
return e.E.Enforce(user, domain, repo, "repo:push")
}
+31
sets/gen.go
···
+
package sets
+
+
import (
+
"math/rand"
+
"reflect"
+
"testing/quick"
+
)
+
+
func (_ Set[T]) Generate(rand *rand.Rand, size int) reflect.Value {
+
s := New[T]()
+
+
var zero T
+
itemType := reflect.TypeOf(zero)
+
+
for {
+
if s.Len() >= size {
+
break
+
}
+
+
item, ok := quick.Value(itemType, rand)
+
if !ok {
+
continue
+
}
+
+
if val, ok := item.Interface().(T); ok {
+
s.Insert(val)
+
}
+
}
+
+
return reflect.ValueOf(s)
+
}
+35
sets/readme.txt
···
+
sets
+
----
+
set datastructure for go with generics and iterators. the
+
api is supposed to mimic rust's std::collections::HashSet api.
+
+
s1 := sets.Collect(slices.Values([]int{1, 2, 3, 4}))
+
s2 := sets.Collect(slices.Values([]int{1, 2, 3, 4, 5, 6}))
+
+
union := sets.Collect(s1.Union(s2))
+
intersect := sets.Collect(s1.Intersection(s2))
+
diff := sets.Collect(s1.Difference(s2))
+
symdiff := sets.Collect(s1.SymmetricDifference(s2))
+
+
s1.Len() // 4
+
s1.Contains(1) // true
+
s1.IsEmpty() // false
+
s1.IsSubset(s2) // true
+
s1.IsSuperset(s2) // false
+
s1.IsDisjoint(s2) // false
+
+
if exists := s1.Insert(1); exists {
+
// already existed in set
+
}
+
+
if existed := s1.Remove(1); existed {
+
// existed in set, now removed
+
}
+
+
+
testing
+
-------
+
includes property-based tests using the wonderful
+
testing/quick module!
+
+
go test -v
+174
sets/set.go
···
+
package sets
+
+
import (
+
"iter"
+
"maps"
+
)
+
+
type Set[T comparable] struct {
+
data map[T]struct{}
+
}
+
+
func New[T comparable]() Set[T] {
+
return Set[T]{
+
data: make(map[T]struct{}),
+
}
+
}
+
+
func (s *Set[T]) Insert(item T) bool {
+
_, exists := s.data[item]
+
s.data[item] = struct{}{}
+
return !exists
+
}
+
+
func Singleton[T comparable](item T) Set[T] {
+
n := New[T]()
+
_ = n.Insert(item)
+
return n
+
}
+
+
func (s *Set[T]) Remove(item T) bool {
+
_, exists := s.data[item]
+
if exists {
+
delete(s.data, item)
+
}
+
return exists
+
}
+
+
func (s Set[T]) Contains(item T) bool {
+
_, exists := s.data[item]
+
return exists
+
}
+
+
func (s Set[T]) Len() int {
+
return len(s.data)
+
}
+
+
func (s Set[T]) IsEmpty() bool {
+
return len(s.data) == 0
+
}
+
+
func (s *Set[T]) Clear() {
+
s.data = make(map[T]struct{})
+
}
+
+
func (s Set[T]) All() iter.Seq[T] {
+
return func(yield func(T) bool) {
+
for item := range s.data {
+
if !yield(item) {
+
return
+
}
+
}
+
}
+
}
+
+
func (s Set[T]) Clone() Set[T] {
+
return Set[T]{
+
data: maps.Clone(s.data),
+
}
+
}
+
+
func (s Set[T]) Union(other Set[T]) iter.Seq[T] {
+
if s.Len() >= other.Len() {
+
return chain(s.All(), other.Difference(s))
+
} else {
+
return chain(other.All(), s.Difference(other))
+
}
+
}
+
+
func chain[T any](seqs ...iter.Seq[T]) iter.Seq[T] {
+
return func(yield func(T) bool) {
+
for _, seq := range seqs {
+
for item := range seq {
+
if !yield(item) {
+
return
+
}
+
}
+
}
+
}
+
}
+
+
func (s Set[T]) Intersection(other Set[T]) iter.Seq[T] {
+
return func(yield func(T) bool) {
+
for item := range s.data {
+
if other.Contains(item) {
+
if !yield(item) {
+
return
+
}
+
}
+
}
+
}
+
}
+
+
func (s Set[T]) Difference(other Set[T]) iter.Seq[T] {
+
return func(yield func(T) bool) {
+
for item := range s.data {
+
if !other.Contains(item) {
+
if !yield(item) {
+
return
+
}
+
}
+
}
+
}
+
}
+
+
func (s Set[T]) SymmetricDifference(other Set[T]) iter.Seq[T] {
+
return func(yield func(T) bool) {
+
for item := range s.data {
+
if !other.Contains(item) {
+
if !yield(item) {
+
return
+
}
+
}
+
}
+
for item := range other.data {
+
if !s.Contains(item) {
+
if !yield(item) {
+
return
+
}
+
}
+
}
+
}
+
}
+
+
func (s Set[T]) IsSubset(other Set[T]) bool {
+
for item := range s.data {
+
if !other.Contains(item) {
+
return false
+
}
+
}
+
return true
+
}
+
+
func (s Set[T]) IsSuperset(other Set[T]) bool {
+
return other.IsSubset(s)
+
}
+
+
func (s Set[T]) IsDisjoint(other Set[T]) bool {
+
for item := range s.data {
+
if other.Contains(item) {
+
return false
+
}
+
}
+
return true
+
}
+
+
func (s Set[T]) Equal(other Set[T]) bool {
+
if s.Len() != other.Len() {
+
return false
+
}
+
for item := range s.data {
+
if !other.Contains(item) {
+
return false
+
}
+
}
+
return true
+
}
+
+
func Collect[T comparable](seq iter.Seq[T]) Set[T] {
+
result := New[T]()
+
for item := range seq {
+
result.Insert(item)
+
}
+
return result
+
}
+411
sets/set_test.go
···
+
package sets
+
+
import (
+
"slices"
+
"testing"
+
"testing/quick"
+
)
+
+
func TestNew(t *testing.T) {
+
s := New[int]()
+
if s.Len() != 0 {
+
t.Errorf("New set should be empty, got length %d", s.Len())
+
}
+
if !s.IsEmpty() {
+
t.Error("New set should be empty")
+
}
+
}
+
+
func TestFromSlice(t *testing.T) {
+
s := Collect(slices.Values([]int{1, 2, 3, 2, 1}))
+
if s.Len() != 3 {
+
t.Errorf("Expected length 3, got %d", s.Len())
+
}
+
if !s.Contains(1) || !s.Contains(2) || !s.Contains(3) {
+
t.Error("Set should contain all unique elements from slice")
+
}
+
}
+
+
func TestInsert(t *testing.T) {
+
s := New[string]()
+
+
if !s.Insert("hello") {
+
t.Error("First insert should return true")
+
}
+
if s.Insert("hello") {
+
t.Error("Duplicate insert should return false")
+
}
+
if s.Len() != 1 {
+
t.Errorf("Expected length 1, got %d", s.Len())
+
}
+
}
+
+
func TestRemove(t *testing.T) {
+
s := Collect(slices.Values([]int{1, 2, 3}))
+
+
if !s.Remove(2) {
+
t.Error("Remove existing element should return true")
+
}
+
if s.Remove(2) {
+
t.Error("Remove non-existing element should return false")
+
}
+
if s.Contains(2) {
+
t.Error("Element should be removed")
+
}
+
if s.Len() != 2 {
+
t.Errorf("Expected length 2, got %d", s.Len())
+
}
+
}
+
+
func TestContains(t *testing.T) {
+
s := Collect(slices.Values([]int{1, 2, 3}))
+
+
if !s.Contains(1) {
+
t.Error("Should contain 1")
+
}
+
if s.Contains(4) {
+
t.Error("Should not contain 4")
+
}
+
}
+
+
func TestClear(t *testing.T) {
+
s := Collect(slices.Values([]int{1, 2, 3}))
+
s.Clear()
+
+
if !s.IsEmpty() {
+
t.Error("Set should be empty after clear")
+
}
+
if s.Len() != 0 {
+
t.Errorf("Expected length 0, got %d", s.Len())
+
}
+
}
+
+
func TestIterator(t *testing.T) {
+
s := Collect(slices.Values([]int{1, 2, 3}))
+
var items []int
+
+
for item := range s.All() {
+
items = append(items, item)
+
}
+
+
slices.Sort(items)
+
expected := []int{1, 2, 3}
+
if !slices.Equal(items, expected) {
+
t.Errorf("Expected %v, got %v", expected, items)
+
}
+
}
+
+
func TestClone(t *testing.T) {
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
+
s2 := s1.Clone()
+
+
if !s1.Equal(s2) {
+
t.Error("Cloned set should be equal to original")
+
}
+
+
s2.Insert(4)
+
if s1.Contains(4) {
+
t.Error("Modifying clone should not affect original")
+
}
+
}
+
+
func TestUnion(t *testing.T) {
+
s1 := Collect(slices.Values([]int{1, 2}))
+
s2 := Collect(slices.Values([]int{2, 3}))
+
+
result := Collect(s1.Union(s2))
+
expected := Collect(slices.Values([]int{1, 2, 3}))
+
+
if !result.Equal(expected) {
+
t.Errorf("Expected %v, got %v", expected, result)
+
}
+
}
+
+
func TestIntersection(t *testing.T) {
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
+
s2 := Collect(slices.Values([]int{2, 3, 4}))
+
+
expected := Collect(slices.Values([]int{2, 3}))
+
result := Collect(s1.Intersection(s2))
+
+
if !result.Equal(expected) {
+
t.Errorf("Expected %v, got %v", expected, result)
+
}
+
}
+
+
func TestDifference(t *testing.T) {
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
+
s2 := Collect(slices.Values([]int{2, 3, 4}))
+
+
expected := Collect(slices.Values([]int{1}))
+
result := Collect(s1.Difference(s2))
+
+
if !result.Equal(expected) {
+
t.Errorf("Expected %v, got %v", expected, result)
+
}
+
}
+
+
func TestSymmetricDifference(t *testing.T) {
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
+
s2 := Collect(slices.Values([]int{2, 3, 4}))
+
+
expected := Collect(slices.Values([]int{1, 4}))
+
result := Collect(s1.SymmetricDifference(s2))
+
+
if !result.Equal(expected) {
+
t.Errorf("Expected %v, got %v", expected, result)
+
}
+
}
+
+
func TestSymmetricDifferenceCommutativeProperty(t *testing.T) {
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
+
s2 := Collect(slices.Values([]int{2, 3, 4}))
+
+
result1 := Collect(s1.SymmetricDifference(s2))
+
result2 := Collect(s2.SymmetricDifference(s1))
+
+
if !result1.Equal(result2) {
+
t.Errorf("Expected %v, got %v", result1, result2)
+
}
+
}
+
+
func TestIsSubset(t *testing.T) {
+
s1 := Collect(slices.Values([]int{1, 2}))
+
s2 := Collect(slices.Values([]int{1, 2, 3}))
+
+
if !s1.IsSubset(s2) {
+
t.Error("s1 should be subset of s2")
+
}
+
if s2.IsSubset(s1) {
+
t.Error("s2 should not be subset of s1")
+
}
+
}
+
+
func TestIsSuperset(t *testing.T) {
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
+
s2 := Collect(slices.Values([]int{1, 2}))
+
+
if !s1.IsSuperset(s2) {
+
t.Error("s1 should be superset of s2")
+
}
+
if s2.IsSuperset(s1) {
+
t.Error("s2 should not be superset of s1")
+
}
+
}
+
+
func TestIsDisjoint(t *testing.T) {
+
s1 := Collect(slices.Values([]int{1, 2}))
+
s2 := Collect(slices.Values([]int{3, 4}))
+
s3 := Collect(slices.Values([]int{2, 3}))
+
+
if !s1.IsDisjoint(s2) {
+
t.Error("s1 and s2 should be disjoint")
+
}
+
if s1.IsDisjoint(s3) {
+
t.Error("s1 and s3 should not be disjoint")
+
}
+
}
+
+
func TestEqual(t *testing.T) {
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
+
s2 := Collect(slices.Values([]int{3, 2, 1}))
+
s3 := Collect(slices.Values([]int{1, 2}))
+
+
if !s1.Equal(s2) {
+
t.Error("s1 and s2 should be equal")
+
}
+
if s1.Equal(s3) {
+
t.Error("s1 and s3 should not be equal")
+
}
+
}
+
+
func TestCollect(t *testing.T) {
+
s1 := Collect(slices.Values([]int{1, 2}))
+
s2 := Collect(slices.Values([]int{2, 3}))
+
+
unionSet := Collect(s1.Union(s2))
+
if unionSet.Len() != 3 {
+
t.Errorf("Expected union set length 3, got %d", unionSet.Len())
+
}
+
if !unionSet.Contains(1) || !unionSet.Contains(2) || !unionSet.Contains(3) {
+
t.Error("Union set should contain 1, 2, and 3")
+
}
+
+
diffSet := Collect(s1.Difference(s2))
+
if diffSet.Len() != 1 {
+
t.Errorf("Expected difference set length 1, got %d", diffSet.Len())
+
}
+
if !diffSet.Contains(1) {
+
t.Error("Difference set should contain 1")
+
}
+
}
+
+
func TestPropertySingleonLen(t *testing.T) {
+
f := func(item int) bool {
+
single := Singleton(item)
+
return single.Len() == 1
+
}
+
+
if err := quick.Check(f, nil); err != nil {
+
t.Error(err)
+
}
+
}
+
+
func TestPropertyInsertIdempotent(t *testing.T) {
+
f := func(s Set[int], item int) bool {
+
clone := s.Clone()
+
+
clone.Insert(item)
+
firstLen := clone.Len()
+
+
clone.Insert(item)
+
secondLen := clone.Len()
+
+
return firstLen == secondLen
+
}
+
+
if err := quick.Check(f, nil); err != nil {
+
t.Error(err)
+
}
+
}
+
+
func TestPropertyUnionCommutative(t *testing.T) {
+
f := func(s1 Set[int], s2 Set[int]) bool {
+
union1 := Collect(s1.Union(s2))
+
union2 := Collect(s2.Union(s1))
+
return union1.Equal(union2)
+
}
+
+
if err := quick.Check(f, nil); err != nil {
+
t.Error(err)
+
}
+
}
+
+
func TestPropertyIntersectionCommutative(t *testing.T) {
+
f := func(s1 Set[int], s2 Set[int]) bool {
+
inter1 := Collect(s1.Intersection(s2))
+
inter2 := Collect(s2.Intersection(s1))
+
return inter1.Equal(inter2)
+
}
+
+
if err := quick.Check(f, nil); err != nil {
+
t.Error(err)
+
}
+
}
+
+
func TestPropertyCloneEquals(t *testing.T) {
+
f := func(s Set[int]) bool {
+
clone := s.Clone()
+
return s.Equal(clone)
+
}
+
+
if err := quick.Check(f, nil); err != nil {
+
t.Error(err)
+
}
+
}
+
+
func TestPropertyIntersectionIsSubset(t *testing.T) {
+
f := func(s1 Set[int], s2 Set[int]) bool {
+
inter := Collect(s1.Intersection(s2))
+
return inter.IsSubset(s1) && inter.IsSubset(s2)
+
}
+
+
if err := quick.Check(f, nil); err != nil {
+
t.Error(err)
+
}
+
}
+
+
func TestPropertyUnionIsSuperset(t *testing.T) {
+
f := func(s1 Set[int], s2 Set[int]) bool {
+
union := Collect(s1.Union(s2))
+
return union.IsSuperset(s1) && union.IsSuperset(s2)
+
}
+
+
if err := quick.Check(f, nil); err != nil {
+
t.Error(err)
+
}
+
}
+
+
func TestPropertyDifferenceDisjoint(t *testing.T) {
+
f := func(s1 Set[int], s2 Set[int]) bool {
+
diff := Collect(s1.Difference(s2))
+
return diff.IsDisjoint(s2)
+
}
+
+
if err := quick.Check(f, nil); err != nil {
+
t.Error(err)
+
}
+
}
+
+
func TestPropertySymmetricDifferenceCommutative(t *testing.T) {
+
f := func(s1 Set[int], s2 Set[int]) bool {
+
symDiff1 := Collect(s1.SymmetricDifference(s2))
+
symDiff2 := Collect(s2.SymmetricDifference(s1))
+
return symDiff1.Equal(symDiff2)
+
}
+
+
if err := quick.Check(f, nil); err != nil {
+
t.Error(err)
+
}
+
}
+
+
func TestPropertyRemoveWorks(t *testing.T) {
+
f := func(s Set[int], item int) bool {
+
clone := s.Clone()
+
clone.Insert(item)
+
clone.Remove(item)
+
return !clone.Contains(item)
+
}
+
+
if err := quick.Check(f, nil); err != nil {
+
t.Error(err)
+
}
+
}
+
+
func TestPropertyClearEmpty(t *testing.T) {
+
f := func(s Set[int]) bool {
+
s.Clear()
+
return s.IsEmpty() && s.Len() == 0
+
}
+
+
if err := quick.Check(f, nil); err != nil {
+
t.Error(err)
+
}
+
}
+
+
func TestPropertyIsSubsetReflexive(t *testing.T) {
+
f := func(s Set[int]) bool {
+
return s.IsSubset(s)
+
}
+
+
if err := quick.Check(f, nil); err != nil {
+
t.Error(err)
+
}
+
}
+
+
func TestPropertyDeMorganUnion(t *testing.T) {
+
f := func(s1 Set[int], s2 Set[int], universe Set[int]) bool {
+
// create a universe that contains both sets
+
u := universe.Clone()
+
for item := range s1.All() {
+
u.Insert(item)
+
}
+
for item := range s2.All() {
+
u.Insert(item)
+
}
+
+
// (A u B)' = A' n B'
+
union := Collect(s1.Union(s2))
+
complementUnion := Collect(u.Difference(union))
+
+
complementS1 := Collect(u.Difference(s1))
+
complementS2 := Collect(u.Difference(s2))
+
intersectionComplements := Collect(complementS1.Intersection(complementS2))
+
+
return complementUnion.Equal(intersectionComplements)
+
}
+
+
if err := quick.Check(f, nil); err != nil {
+
t.Error(err)
+
}
+
}
+1
spindle/db/repos.go
···
if err != nil {
return nil, err
}
+
defer rows.Close()
var knots []string
for rows.Next() {
+22 -21
spindle/engine/engine.go
···
import (
"context"
"errors"
-
"fmt"
"log/slog"
+
"sync"
securejoin "github.com/cyphar/filepath-securejoin"
-
"golang.org/x/sync/errgroup"
"tangled.org/core/notifier"
"tangled.org/core/spindle/config"
"tangled.org/core/spindle/db"
···
}
}
-
eg, ctx := errgroup.WithContext(ctx)
+
var wg sync.WaitGroup
for eng, wfs := range pipeline.Workflows {
workflowTimeout := eng.WorkflowTimeout()
l.Info("using workflow timeout", "timeout", workflowTimeout)
for _, w := range wfs {
-
eg.Go(func() error {
+
wg.Add(1)
+
go func() {
+
defer wg.Done()
+
wid := models.WorkflowId{
PipelineId: pipelineId,
Name: w.Name,
···
err := db.StatusRunning(wid, n)
if err != nil {
-
return err
+
l.Error("failed to set workflow status to running", "wid", wid, "err", err)
+
return
}
err = eng.SetupWorkflow(ctx, wid, &w)
···
dbErr := db.StatusFailed(wid, err.Error(), -1, n)
if dbErr != nil {
-
return dbErr
+
l.Error("failed to set workflow status to failed", "wid", wid, "err", dbErr)
}
-
return err
+
return
}
defer eng.DestroyWorkflow(ctx, wid)
-
wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid)
+
secretValues := make([]string, len(allSecrets))
+
for i, s := range allSecrets {
+
secretValues[i] = s.Value
+
}
+
wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid, secretValues)
if err != nil {
l.Warn("failed to setup step logger; logs will not be persisted", "error", err)
wfLogger = nil
···
if errors.Is(err, ErrTimedOut) {
dbErr := db.StatusTimeout(wid, n)
if dbErr != nil {
-
return dbErr
+
l.Error("failed to set workflow status to timeout", "wid", wid, "err", dbErr)
}
} else {
dbErr := db.StatusFailed(wid, err.Error(), -1, n)
if dbErr != nil {
-
return dbErr
+
l.Error("failed to set workflow status to failed", "wid", wid, "err", dbErr)
}
}
-
-
return fmt.Errorf("starting steps image: %w", err)
+
return
}
}
err = db.StatusSuccess(wid, n)
if err != nil {
-
return err
+
l.Error("failed to set workflow status to success", "wid", wid, "err", err)
}
-
-
return nil
-
})
+
}()
}
}
-
if err := eg.Wait(); err != nil {
-
l.Error("failed to run one or more workflows", "err", err)
-
} else {
-
l.Info("successfully ran full pipeline")
-
}
+
wg.Wait()
+
l.Info("all workflows completed")
}
+9 -8
spindle/engines/nixery/engine.go
···
type addlFields struct {
image string
container string
-
env map[string]string
}
func (e *Engine) InitWorkflow(twf tangled.Pipeline_Workflow, tpl tangled.Pipeline) (*models.Workflow, error) {
···
swf.Steps = append(swf.Steps, sstep)
}
swf.Name = twf.Name
-
addl.env = dwf.Environment
+
swf.Environment = dwf.Environment
addl.image = workflowImage(dwf.Dependencies, e.cfg.NixeryPipelines.Nixery)
setup := &setupSteps{}
···
func (e *Engine) RunStep(ctx context.Context, wid models.WorkflowId, w *models.Workflow, idx int, secrets []secrets.UnlockedSecret, wfLogger *models.WorkflowLogger) error {
addl := w.Data.(addlFields)
-
workflowEnvs := ConstructEnvs(addl.env)
+
workflowEnvs := ConstructEnvs(w.Environment)
// TODO(winter): should SetupWorkflow also have secret access?
// IMO yes, but probably worth thinking on.
for _, s := range secrets {
workflowEnvs.AddEnv(s.Key, s.Value)
}
-
step := w.Steps[idx].(Step)
+
step := w.Steps[idx]
select {
case <-ctx.Done():
···
}
envs := append(EnvVars(nil), workflowEnvs...)
-
for k, v := range step.environment {
-
envs.AddEnv(k, v)
+
if nixStep, ok := step.(Step); ok {
+
for k, v := range nixStep.environment {
+
envs.AddEnv(k, v)
+
}
}
envs.AddEnv("HOME", homeDir)
mkExecResp, err := e.docker.ContainerExecCreate(ctx, addl.container, container.ExecOptions{
-
Cmd: []string{"bash", "-c", step.command},
+
Cmd: []string{"bash", "-c", step.Command()},
AttachStdout: true,
AttachStderr: true,
Env: envs,
···
// Docker doesn't provide an API to kill an exec run
// (sure, we could grab the PID and kill it ourselves,
// but that's wasted effort)
-
e.l.Warn("step timed out", "step", step.Name)
+
e.l.Warn("step timed out", "step", step.Name())
<-tailDone
+6 -7
spindle/models/clone.go
···
}
}
-
repoURL := buildRepoURL(tr, dev)
+
repoURL := BuildRepoURL(tr.Repo, dev)
var cloneOpts tangled.Pipeline_CloneOpts
if twf.Clone != nil {
···
}
}
-
// buildRepoURL constructs the repository URL from trigger metadata
-
func buildRepoURL(tr tangled.Pipeline_TriggerMetadata, devMode bool) string {
-
if tr.Repo == nil {
+
// BuildRepoURL constructs the repository URL from repo metadata.
+
func BuildRepoURL(repo *tangled.Pipeline_TriggerRepo, devMode bool) string {
+
if repo == nil {
return ""
}
-
// Determine protocol
scheme := "https://"
if devMode {
scheme = "http://"
}
// Get host from knot
-
host := tr.Repo.Knot
+
host := repo.Knot
// In dev mode, replace localhost with host.docker.internal for Docker networking
if devMode && strings.Contains(host, "localhost") {
···
}
// Build URL: {scheme}{knot}/{did}/{repo}
-
return fmt.Sprintf("%s%s/%s/%s", scheme, host, tr.Repo.Did, tr.Repo.Repo)
+
return fmt.Sprintf("%s%s/%s/%s", scheme, host, repo.Did, repo.Repo)
}
// buildFetchArgs constructs the arguments for git fetch based on clone options
+6 -1
spindle/models/logger.go
···
type WorkflowLogger struct {
file *os.File
encoder *json.Encoder
+
mask *SecretMask
}
-
func NewWorkflowLogger(baseDir string, wid WorkflowId) (*WorkflowLogger, error) {
+
func NewWorkflowLogger(baseDir string, wid WorkflowId, secretValues []string) (*WorkflowLogger, error) {
path := LogFilePath(baseDir, wid)
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
···
return &WorkflowLogger{
file: file,
encoder: json.NewEncoder(file),
+
mask: NewSecretMask(secretValues),
}, nil
}
···
func (w *dataWriter) Write(p []byte) (int, error) {
line := strings.TrimRight(string(p), "\r\n")
+
if w.logger.mask != nil {
+
line = w.logger.mask.Mask(line)
+
}
entry := NewDataLogLine(w.idx, line, w.stream)
if err := w.logger.encoder.Encode(entry); err != nil {
return 0, err
+4 -3
spindle/models/pipeline.go
···
)
type Workflow struct {
-
Steps []Step
-
Name string
-
Data any
+
Steps []Step
+
Name string
+
Data any
+
Environment map[string]string
}
+77
spindle/models/pipeline_env.go
···
+
package models
+
+
import (
+
"strings"
+
+
"github.com/go-git/go-git/v5/plumbing"
+
"tangled.org/core/api/tangled"
+
"tangled.org/core/workflow"
+
)
+
+
// PipelineEnvVars extracts environment variables from pipeline trigger metadata.
+
// These are framework-provided variables that are injected into workflow steps.
+
func PipelineEnvVars(tr *tangled.Pipeline_TriggerMetadata, pipelineId PipelineId, devMode bool) map[string]string {
+
if tr == nil {
+
return nil
+
}
+
+
env := make(map[string]string)
+
+
// Standard CI environment variable
+
env["CI"] = "true"
+
+
env["TANGLED_PIPELINE_ID"] = pipelineId.Rkey
+
+
// Repo info
+
if tr.Repo != nil {
+
env["TANGLED_REPO_KNOT"] = tr.Repo.Knot
+
env["TANGLED_REPO_DID"] = tr.Repo.Did
+
env["TANGLED_REPO_NAME"] = tr.Repo.Repo
+
env["TANGLED_REPO_DEFAULT_BRANCH"] = tr.Repo.DefaultBranch
+
env["TANGLED_REPO_URL"] = BuildRepoURL(tr.Repo, devMode)
+
}
+
+
switch workflow.TriggerKind(tr.Kind) {
+
case workflow.TriggerKindPush:
+
if tr.Push != nil {
+
refName := plumbing.ReferenceName(tr.Push.Ref)
+
refType := "branch"
+
if refName.IsTag() {
+
refType = "tag"
+
}
+
+
env["TANGLED_REF"] = tr.Push.Ref
+
env["TANGLED_REF_NAME"] = refName.Short()
+
env["TANGLED_REF_TYPE"] = refType
+
env["TANGLED_SHA"] = tr.Push.NewSha
+
env["TANGLED_COMMIT_SHA"] = tr.Push.NewSha
+
}
+
+
case workflow.TriggerKindPullRequest:
+
if tr.PullRequest != nil {
+
// For PRs, the "ref" is the source branch
+
env["TANGLED_REF"] = "refs/heads/" + tr.PullRequest.SourceBranch
+
env["TANGLED_REF_NAME"] = tr.PullRequest.SourceBranch
+
env["TANGLED_REF_TYPE"] = "branch"
+
env["TANGLED_SHA"] = tr.PullRequest.SourceSha
+
env["TANGLED_COMMIT_SHA"] = tr.PullRequest.SourceSha
+
+
// PR-specific variables
+
env["TANGLED_PR_SOURCE_BRANCH"] = tr.PullRequest.SourceBranch
+
env["TANGLED_PR_TARGET_BRANCH"] = tr.PullRequest.TargetBranch
+
env["TANGLED_PR_SOURCE_SHA"] = tr.PullRequest.SourceSha
+
env["TANGLED_PR_ACTION"] = tr.PullRequest.Action
+
}
+
+
case workflow.TriggerKindManual:
+
// Manual triggers may not have ref/sha info
+
// Include any manual inputs if present
+
if tr.Manual != nil {
+
for _, pair := range tr.Manual.Inputs {
+
env["TANGLED_INPUT_"+strings.ToUpper(pair.Key)] = pair.Value
+
}
+
}
+
}
+
+
return env
+
}
+260
spindle/models/pipeline_env_test.go
···
+
package models
+
+
import (
+
"testing"
+
+
"tangled.org/core/api/tangled"
+
"tangled.org/core/workflow"
+
)
+
+
func TestPipelineEnvVars_PushBranch(t *testing.T) {
+
tr := &tangled.Pipeline_TriggerMetadata{
+
Kind: string(workflow.TriggerKindPush),
+
Push: &tangled.Pipeline_PushTriggerData{
+
NewSha: "abc123def456",
+
OldSha: "000000000000",
+
Ref: "refs/heads/main",
+
},
+
Repo: &tangled.Pipeline_TriggerRepo{
+
Knot: "example.com",
+
Did: "did:plc:user123",
+
Repo: "my-repo",
+
DefaultBranch: "main",
+
},
+
}
+
id := PipelineId{
+
Knot: "example.com",
+
Rkey: "123123",
+
}
+
env := PipelineEnvVars(tr, id, false)
+
+
// Check standard CI variable
+
if env["CI"] != "true" {
+
t.Errorf("Expected CI='true', got '%s'", env["CI"])
+
}
+
+
// Check ref variables
+
if env["TANGLED_REF"] != "refs/heads/main" {
+
t.Errorf("Expected TANGLED_REF='refs/heads/main', got '%s'", env["TANGLED_REF"])
+
}
+
if env["TANGLED_REF_NAME"] != "main" {
+
t.Errorf("Expected TANGLED_REF_NAME='main', got '%s'", env["TANGLED_REF_NAME"])
+
}
+
if env["TANGLED_REF_TYPE"] != "branch" {
+
t.Errorf("Expected TANGLED_REF_TYPE='branch', got '%s'", env["TANGLED_REF_TYPE"])
+
}
+
+
// Check SHA variables
+
if env["TANGLED_SHA"] != "abc123def456" {
+
t.Errorf("Expected TANGLED_SHA='abc123def456', got '%s'", env["TANGLED_SHA"])
+
}
+
if env["TANGLED_COMMIT_SHA"] != "abc123def456" {
+
t.Errorf("Expected TANGLED_COMMIT_SHA='abc123def456', got '%s'", env["TANGLED_COMMIT_SHA"])
+
}
+
+
// Check repo variables
+
if env["TANGLED_REPO_KNOT"] != "example.com" {
+
t.Errorf("Expected TANGLED_REPO_KNOT='example.com', got '%s'", env["TANGLED_REPO_KNOT"])
+
}
+
if env["TANGLED_REPO_DID"] != "did:plc:user123" {
+
t.Errorf("Expected TANGLED_REPO_DID='did:plc:user123', got '%s'", env["TANGLED_REPO_DID"])
+
}
+
if env["TANGLED_REPO_NAME"] != "my-repo" {
+
t.Errorf("Expected TANGLED_REPO_NAME='my-repo', got '%s'", env["TANGLED_REPO_NAME"])
+
}
+
if env["TANGLED_REPO_DEFAULT_BRANCH"] != "main" {
+
t.Errorf("Expected TANGLED_REPO_DEFAULT_BRANCH='main', got '%s'", env["TANGLED_REPO_DEFAULT_BRANCH"])
+
}
+
if env["TANGLED_REPO_URL"] != "https://example.com/did:plc:user123/my-repo" {
+
t.Errorf("Expected TANGLED_REPO_URL='https://example.com/did:plc:user123/my-repo', got '%s'", env["TANGLED_REPO_URL"])
+
}
+
}
+
+
func TestPipelineEnvVars_PushTag(t *testing.T) {
+
tr := &tangled.Pipeline_TriggerMetadata{
+
Kind: string(workflow.TriggerKindPush),
+
Push: &tangled.Pipeline_PushTriggerData{
+
NewSha: "abc123def456",
+
OldSha: "000000000000",
+
Ref: "refs/tags/v1.2.3",
+
},
+
Repo: &tangled.Pipeline_TriggerRepo{
+
Knot: "example.com",
+
Did: "did:plc:user123",
+
Repo: "my-repo",
+
},
+
}
+
id := PipelineId{
+
Knot: "example.com",
+
Rkey: "123123",
+
}
+
env := PipelineEnvVars(tr, id, false)
+
+
if env["TANGLED_REF"] != "refs/tags/v1.2.3" {
+
t.Errorf("Expected TANGLED_REF='refs/tags/v1.2.3', got '%s'", env["TANGLED_REF"])
+
}
+
if env["TANGLED_REF_NAME"] != "v1.2.3" {
+
t.Errorf("Expected TANGLED_REF_NAME='v1.2.3', got '%s'", env["TANGLED_REF_NAME"])
+
}
+
if env["TANGLED_REF_TYPE"] != "tag" {
+
t.Errorf("Expected TANGLED_REF_TYPE='tag', got '%s'", env["TANGLED_REF_TYPE"])
+
}
+
}
+
+
func TestPipelineEnvVars_PullRequest(t *testing.T) {
+
tr := &tangled.Pipeline_TriggerMetadata{
+
Kind: string(workflow.TriggerKindPullRequest),
+
PullRequest: &tangled.Pipeline_PullRequestTriggerData{
+
SourceBranch: "feature-branch",
+
TargetBranch: "main",
+
SourceSha: "pr-sha-789",
+
Action: "opened",
+
},
+
Repo: &tangled.Pipeline_TriggerRepo{
+
Knot: "example.com",
+
Did: "did:plc:user123",
+
Repo: "my-repo",
+
},
+
}
+
id := PipelineId{
+
Knot: "example.com",
+
Rkey: "123123",
+
}
+
env := PipelineEnvVars(tr, id, false)
+
+
// Check ref variables for PR
+
if env["TANGLED_REF"] != "refs/heads/feature-branch" {
+
t.Errorf("Expected TANGLED_REF='refs/heads/feature-branch', got '%s'", env["TANGLED_REF"])
+
}
+
if env["TANGLED_REF_NAME"] != "feature-branch" {
+
t.Errorf("Expected TANGLED_REF_NAME='feature-branch', got '%s'", env["TANGLED_REF_NAME"])
+
}
+
if env["TANGLED_REF_TYPE"] != "branch" {
+
t.Errorf("Expected TANGLED_REF_TYPE='branch', got '%s'", env["TANGLED_REF_TYPE"])
+
}
+
+
// Check SHA variables
+
if env["TANGLED_SHA"] != "pr-sha-789" {
+
t.Errorf("Expected TANGLED_SHA='pr-sha-789', got '%s'", env["TANGLED_SHA"])
+
}
+
if env["TANGLED_COMMIT_SHA"] != "pr-sha-789" {
+
t.Errorf("Expected TANGLED_COMMIT_SHA='pr-sha-789', got '%s'", env["TANGLED_COMMIT_SHA"])
+
}
+
+
// Check PR-specific variables
+
if env["TANGLED_PR_SOURCE_BRANCH"] != "feature-branch" {
+
t.Errorf("Expected TANGLED_PR_SOURCE_BRANCH='feature-branch', got '%s'", env["TANGLED_PR_SOURCE_BRANCH"])
+
}
+
if env["TANGLED_PR_TARGET_BRANCH"] != "main" {
+
t.Errorf("Expected TANGLED_PR_TARGET_BRANCH='main', got '%s'", env["TANGLED_PR_TARGET_BRANCH"])
+
}
+
if env["TANGLED_PR_SOURCE_SHA"] != "pr-sha-789" {
+
t.Errorf("Expected TANGLED_PR_SOURCE_SHA='pr-sha-789', got '%s'", env["TANGLED_PR_SOURCE_SHA"])
+
}
+
if env["TANGLED_PR_ACTION"] != "opened" {
+
t.Errorf("Expected TANGLED_PR_ACTION='opened', got '%s'", env["TANGLED_PR_ACTION"])
+
}
+
}
+
+
func TestPipelineEnvVars_ManualWithInputs(t *testing.T) {
+
tr := &tangled.Pipeline_TriggerMetadata{
+
Kind: string(workflow.TriggerKindManual),
+
Manual: &tangled.Pipeline_ManualTriggerData{
+
Inputs: []*tangled.Pipeline_Pair{
+
{Key: "version", Value: "1.0.0"},
+
{Key: "environment", Value: "production"},
+
},
+
},
+
Repo: &tangled.Pipeline_TriggerRepo{
+
Knot: "example.com",
+
Did: "did:plc:user123",
+
Repo: "my-repo",
+
},
+
}
+
id := PipelineId{
+
Knot: "example.com",
+
Rkey: "123123",
+
}
+
env := PipelineEnvVars(tr, id, false)
+
+
// Check manual input variables
+
if env["TANGLED_INPUT_VERSION"] != "1.0.0" {
+
t.Errorf("Expected TANGLED_INPUT_VERSION='1.0.0', got '%s'", env["TANGLED_INPUT_VERSION"])
+
}
+
if env["TANGLED_INPUT_ENVIRONMENT"] != "production" {
+
t.Errorf("Expected TANGLED_INPUT_ENVIRONMENT='production', got '%s'", env["TANGLED_INPUT_ENVIRONMENT"])
+
}
+
+
// Manual triggers shouldn't have ref/sha variables
+
if _, ok := env["TANGLED_REF"]; ok {
+
t.Error("Manual trigger should not have TANGLED_REF")
+
}
+
if _, ok := env["TANGLED_SHA"]; ok {
+
t.Error("Manual trigger should not have TANGLED_SHA")
+
}
+
}
+
+
func TestPipelineEnvVars_DevMode(t *testing.T) {
+
tr := &tangled.Pipeline_TriggerMetadata{
+
Kind: string(workflow.TriggerKindPush),
+
Push: &tangled.Pipeline_PushTriggerData{
+
NewSha: "abc123",
+
Ref: "refs/heads/main",
+
},
+
Repo: &tangled.Pipeline_TriggerRepo{
+
Knot: "localhost:3000",
+
Did: "did:plc:user123",
+
Repo: "my-repo",
+
},
+
}
+
id := PipelineId{
+
Knot: "example.com",
+
Rkey: "123123",
+
}
+
env := PipelineEnvVars(tr, id, true)
+
+
// Dev mode should use http:// and replace localhost with host.docker.internal
+
expectedURL := "http://host.docker.internal:3000/did:plc:user123/my-repo"
+
if env["TANGLED_REPO_URL"] != expectedURL {
+
t.Errorf("Expected TANGLED_REPO_URL='%s', got '%s'", expectedURL, env["TANGLED_REPO_URL"])
+
}
+
}
+
+
func TestPipelineEnvVars_NilTrigger(t *testing.T) {
+
id := PipelineId{
+
Knot: "example.com",
+
Rkey: "123123",
+
}
+
env := PipelineEnvVars(nil, id, false)
+
+
if env != nil {
+
t.Error("Expected nil env for nil trigger")
+
}
+
}
+
+
func TestPipelineEnvVars_NilPushData(t *testing.T) {
+
tr := &tangled.Pipeline_TriggerMetadata{
+
Kind: string(workflow.TriggerKindPush),
+
Push: nil,
+
Repo: &tangled.Pipeline_TriggerRepo{
+
Knot: "example.com",
+
Did: "did:plc:user123",
+
Repo: "my-repo",
+
},
+
}
+
id := PipelineId{
+
Knot: "example.com",
+
Rkey: "123123",
+
}
+
env := PipelineEnvVars(tr, id, false)
+
+
// Should still have repo variables
+
if env["TANGLED_REPO_KNOT"] != "example.com" {
+
t.Errorf("Expected TANGLED_REPO_KNOT='example.com', got '%s'", env["TANGLED_REPO_KNOT"])
+
}
+
+
// Should not have ref/sha variables
+
if _, ok := env["TANGLED_REF"]; ok {
+
t.Error("Should not have TANGLED_REF when push data is nil")
+
}
+
}
+51
spindle/models/secret_mask.go
···
+
package models
+
+
import (
+
"encoding/base64"
+
"strings"
+
)
+
+
// SecretMask replaces secret values in strings with "***".
+
type SecretMask struct {
+
replacer *strings.Replacer
+
}
+
+
// NewSecretMask creates a mask for the given secret values.
+
// Also registers base64-encoded variants of each secret.
+
func NewSecretMask(values []string) *SecretMask {
+
var pairs []string
+
+
for _, value := range values {
+
if value == "" {
+
continue
+
}
+
+
pairs = append(pairs, value, "***")
+
+
b64 := base64.StdEncoding.EncodeToString([]byte(value))
+
if b64 != value {
+
pairs = append(pairs, b64, "***")
+
}
+
+
b64NoPad := strings.TrimRight(b64, "=")
+
if b64NoPad != b64 && b64NoPad != value {
+
pairs = append(pairs, b64NoPad, "***")
+
}
+
}
+
+
if len(pairs) == 0 {
+
return nil
+
}
+
+
return &SecretMask{
+
replacer: strings.NewReplacer(pairs...),
+
}
+
}
+
+
// Mask replaces all registered secret values with "***".
+
func (m *SecretMask) Mask(input string) string {
+
if m == nil || m.replacer == nil {
+
return input
+
}
+
return m.replacer.Replace(input)
+
}
+135
spindle/models/secret_mask_test.go
···
+
package models
+
+
import (
+
"encoding/base64"
+
"testing"
+
)
+
+
func TestSecretMask_BasicMasking(t *testing.T) {
+
mask := NewSecretMask([]string{"mysecret123"})
+
+
input := "The password is mysecret123 in this log"
+
expected := "The password is *** in this log"
+
+
result := mask.Mask(input)
+
if result != expected {
+
t.Errorf("expected %q, got %q", expected, result)
+
}
+
}
+
+
func TestSecretMask_Base64Encoded(t *testing.T) {
+
secret := "mysecret123"
+
mask := NewSecretMask([]string{secret})
+
+
b64 := base64.StdEncoding.EncodeToString([]byte(secret))
+
input := "Encoded: " + b64
+
expected := "Encoded: ***"
+
+
result := mask.Mask(input)
+
if result != expected {
+
t.Errorf("expected %q, got %q", expected, result)
+
}
+
}
+
+
func TestSecretMask_Base64NoPadding(t *testing.T) {
+
// "test" encodes to "dGVzdA==" with padding
+
secret := "test"
+
mask := NewSecretMask([]string{secret})
+
+
b64NoPad := "dGVzdA" // base64 without padding
+
input := "Token: " + b64NoPad
+
expected := "Token: ***"
+
+
result := mask.Mask(input)
+
if result != expected {
+
t.Errorf("expected %q, got %q", expected, result)
+
}
+
}
+
+
func TestSecretMask_MultipleSecrets(t *testing.T) {
+
mask := NewSecretMask([]string{"password1", "apikey123"})
+
+
input := "Using password1 and apikey123 for auth"
+
expected := "Using *** and *** for auth"
+
+
result := mask.Mask(input)
+
if result != expected {
+
t.Errorf("expected %q, got %q", expected, result)
+
}
+
}
+
+
func TestSecretMask_MultipleOccurrences(t *testing.T) {
+
mask := NewSecretMask([]string{"secret"})
+
+
input := "secret appears twice: secret"
+
expected := "*** appears twice: ***"
+
+
result := mask.Mask(input)
+
if result != expected {
+
t.Errorf("expected %q, got %q", expected, result)
+
}
+
}
+
+
func TestSecretMask_ShortValues(t *testing.T) {
+
mask := NewSecretMask([]string{"abc", "xy", ""})
+
+
if mask == nil {
+
t.Fatal("expected non-nil mask")
+
}
+
+
input := "abc xy test"
+
expected := "*** *** test"
+
result := mask.Mask(input)
+
if result != expected {
+
t.Errorf("expected %q, got %q", expected, result)
+
}
+
}
+
+
func TestSecretMask_NilMask(t *testing.T) {
+
var mask *SecretMask
+
+
input := "some input text"
+
result := mask.Mask(input)
+
if result != input {
+
t.Errorf("expected %q, got %q", input, result)
+
}
+
}
+
+
func TestSecretMask_EmptyInput(t *testing.T) {
+
mask := NewSecretMask([]string{"secret"})
+
+
result := mask.Mask("")
+
if result != "" {
+
t.Errorf("expected empty string, got %q", result)
+
}
+
}
+
+
func TestSecretMask_NoMatch(t *testing.T) {
+
mask := NewSecretMask([]string{"secretvalue"})
+
+
input := "nothing to mask here"
+
result := mask.Mask(input)
+
if result != input {
+
t.Errorf("expected %q, got %q", input, result)
+
}
+
}
+
+
func TestSecretMask_EmptySecretsList(t *testing.T) {
+
mask := NewSecretMask([]string{})
+
+
if mask != nil {
+
t.Error("expected nil mask for empty secrets list")
+
}
+
}
+
+
func TestSecretMask_EmptySecretsFiltered(t *testing.T) {
+
mask := NewSecretMask([]string{"ab", "validpassword", "", "xyz"})
+
+
input := "Using validpassword here"
+
expected := "Using *** here"
+
+
result := mask.Mask(input)
+
if result != expected {
+
t.Errorf("expected %q, got %q", expected, result)
+
}
+
}
+11
spindle/server.go
···
"encoding/json"
"fmt"
"log/slog"
+
"maps"
"net/http"
"github.com/go-chi/chi/v5"
···
workflows := make(map[models.Engine][]models.Workflow)
+
// Build pipeline environment variables once for all workflows
+
pipelineEnv := models.PipelineEnvVars(tpl.TriggerMetadata, pipelineId, s.cfg.Server.Dev)
+
for _, w := range tpl.Workflows {
if w != nil {
if _, ok := s.engs[w.Engine]; !ok {
···
if err != nil {
return err
}
+
+
// inject TANGLED_* env vars after InitWorkflow
+
// This prevents user-defined env vars from overriding them
+
if ewf.Environment == nil {
+
ewf.Environment = make(map[string]string)
+
}
+
maps.Copy(ewf.Environment, pipelineEnv)
workflows[eng] = append(workflows[eng], *ewf)
+1 -1
tailwind.config.js
···
const colors = require("tailwindcss/colors");
module.exports = {
-
content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go"],
+
content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go", "./docs/*.html"],
darkMode: "media",
theme: {
container: {
+199
types/commit.go
···
+
package types
+
+
import (
+
"bytes"
+
"encoding/json"
+
"fmt"
+
"maps"
+
"regexp"
+
"strings"
+
+
"github.com/go-git/go-git/v5/plumbing"
+
"github.com/go-git/go-git/v5/plumbing/object"
+
)
+
+
type Commit struct {
+
// hash of the commit object.
+
Hash plumbing.Hash `json:"hash,omitempty"`
+
+
// author is the original author of the commit.
+
Author object.Signature `json:"author"`
+
+
// committer is the one performing the commit, might be different from author.
+
Committer object.Signature `json:"committer"`
+
+
// message is the commit message, contains arbitrary text.
+
Message string `json:"message"`
+
+
// treehash is the hash of the root tree of the commit.
+
Tree string `json:"tree"`
+
+
// parents are the hashes of the parent commits of the commit.
+
ParentHashes []plumbing.Hash `json:"parent_hashes,omitempty"`
+
+
// pgpsignature is the pgp signature of the commit.
+
PGPSignature string `json:"pgp_signature,omitempty"`
+
+
// mergetag is the embedded tag object when a merge commit is created by
+
// merging a signed tag.
+
MergeTag string `json:"merge_tag,omitempty"`
+
+
// changeid is a unique identifier for the change (e.g., gerrit change-id).
+
ChangeId string `json:"change_id,omitempty"`
+
+
// extraheaders contains additional headers not captured by other fields.
+
ExtraHeaders map[string][]byte `json:"extra_headers,omitempty"`
+
+
// deprecated: kept for backwards compatibility with old json format.
+
This string `json:"this,omitempty"`
+
+
// deprecated: kept for backwards compatibility with old json format.
+
Parent string `json:"parent,omitempty"`
+
}
+
+
// types.Commit is an unify two commit structs:
+
// - git.object.Commit from
+
// - types.NiceDiff.commit
+
//
+
// to do this in backwards compatible fashion, we define the base struct
+
// to use the same fields as NiceDiff.Commit, and then we also unmarshal
+
// the struct fields from go-git structs, this custom unmarshal makes sense
+
// of both representations and unifies them to have maximal data in either
+
// form.
+
func (c *Commit) UnmarshalJSON(data []byte) error {
+
type Alias Commit
+
+
aux := &struct {
+
*object.Commit
+
*Alias
+
}{
+
Alias: (*Alias)(c),
+
}
+
+
if err := json.Unmarshal(data, aux); err != nil {
+
return err
+
}
+
+
c.FromGoGitCommit(aux.Commit)
+
+
return nil
+
}
+
+
// fill in as much of Commit as possible from the given go-git commit
+
func (c *Commit) FromGoGitCommit(gc *object.Commit) {
+
if gc == nil {
+
return
+
}
+
+
if c.Hash.IsZero() {
+
c.Hash = gc.Hash
+
}
+
if c.This == "" {
+
c.This = gc.Hash.String()
+
}
+
if isEmptySignature(c.Author) {
+
c.Author = gc.Author
+
}
+
if isEmptySignature(c.Committer) {
+
c.Committer = gc.Committer
+
}
+
if c.Message == "" {
+
c.Message = gc.Message
+
}
+
if c.Tree == "" {
+
c.Tree = gc.TreeHash.String()
+
}
+
if c.PGPSignature == "" {
+
c.PGPSignature = gc.PGPSignature
+
}
+
if c.MergeTag == "" {
+
c.MergeTag = gc.MergeTag
+
}
+
+
if len(c.ParentHashes) == 0 {
+
c.ParentHashes = gc.ParentHashes
+
}
+
if c.Parent == "" && len(gc.ParentHashes) > 0 {
+
c.Parent = gc.ParentHashes[0].String()
+
}
+
+
if len(c.ExtraHeaders) == 0 {
+
c.ExtraHeaders = make(map[string][]byte)
+
maps.Copy(c.ExtraHeaders, gc.ExtraHeaders)
+
}
+
+
if c.ChangeId == "" {
+
if v, ok := gc.ExtraHeaders["change-id"]; ok {
+
c.ChangeId = string(v)
+
}
+
}
+
}
+
+
func isEmptySignature(s object.Signature) bool {
+
return s.Email == "" && s.Name == "" && s.When.IsZero()
+
}
+
+
// produce a verifiable payload from this commit's metadata
+
func (c *Commit) Payload() string {
+
author := bytes.NewBuffer([]byte{})
+
c.Author.Encode(author)
+
+
committer := bytes.NewBuffer([]byte{})
+
c.Committer.Encode(committer)
+
+
payload := strings.Builder{}
+
+
fmt.Fprintf(&payload, "tree %s\n", c.Tree)
+
+
if len(c.ParentHashes) > 0 {
+
for _, p := range c.ParentHashes {
+
fmt.Fprintf(&payload, "parent %s\n", p.String())
+
}
+
} else {
+
// present for backwards compatibility
+
fmt.Fprintf(&payload, "parent %s\n", c.Parent)
+
}
+
+
fmt.Fprintf(&payload, "author %s\n", author.String())
+
fmt.Fprintf(&payload, "committer %s\n", committer.String())
+
+
if c.ChangeId != "" {
+
fmt.Fprintf(&payload, "change-id %s\n", c.ChangeId)
+
} else if v, ok := c.ExtraHeaders["change-id"]; ok {
+
fmt.Fprintf(&payload, "change-id %s\n", string(v))
+
}
+
+
fmt.Fprintf(&payload, "\n%s", c.Message)
+
+
return payload.String()
+
}
+
+
var (
+
coAuthorRegex = regexp.MustCompile(`(?im)^Co-authored-by:\s*(.+?)\s*<([^>]+)>`)
+
)
+
+
func (commit Commit) CoAuthors() []object.Signature {
+
var coAuthors []object.Signature
+
seen := make(map[string]bool)
+
matches := coAuthorRegex.FindAllStringSubmatch(commit.Message, -1)
+
+
for _, match := range matches {
+
if len(match) >= 3 {
+
name := strings.TrimSpace(match[1])
+
email := strings.TrimSpace(match[2])
+
+
if seen[email] {
+
continue
+
}
+
seen[email] = true
+
+
coAuthors = append(coAuthors, object.Signature{
+
Name: name,
+
Email: email,
+
When: commit.Committer.When,
+
})
+
}
+
}
+
+
return coAuthors
+
}
+2 -12
types/diff.go
···
import (
"github.com/bluekeyes/go-gitdiff/gitdiff"
-
"github.com/go-git/go-git/v5/plumbing/object"
)
type DiffOpts struct {
···
// A nicer git diff representation.
type NiceDiff struct {
-
Commit struct {
-
Message string `json:"message"`
-
Author object.Signature `json:"author"`
-
This string `json:"this"`
-
Parent string `json:"parent"`
-
PGPSignature string `json:"pgp_signature"`
-
Committer object.Signature `json:"committer"`
-
Tree string `json:"tree"`
-
ChangedId string `json:"change_id"`
-
} `json:"commit"`
-
Stat struct {
+
Commit Commit `json:"commit"`
+
Stat struct {
FilesChanged int `json:"files_changed"`
Insertions int `json:"insertions"`
Deletions int `json:"deletions"`
+17 -17
types/repo.go
···
)
type RepoIndexResponse struct {
-
IsEmpty bool `json:"is_empty"`
-
Ref string `json:"ref,omitempty"`
-
Readme string `json:"readme,omitempty"`
-
ReadmeFileName string `json:"readme_file_name,omitempty"`
-
Commits []*object.Commit `json:"commits,omitempty"`
-
Description string `json:"description,omitempty"`
-
Files []NiceTree `json:"files,omitempty"`
-
Branches []Branch `json:"branches,omitempty"`
-
Tags []*TagReference `json:"tags,omitempty"`
-
TotalCommits int `json:"total_commits,omitempty"`
+
IsEmpty bool `json:"is_empty"`
+
Ref string `json:"ref,omitempty"`
+
Readme string `json:"readme,omitempty"`
+
ReadmeFileName string `json:"readme_file_name,omitempty"`
+
Commits []Commit `json:"commits,omitempty"`
+
Description string `json:"description,omitempty"`
+
Files []NiceTree `json:"files,omitempty"`
+
Branches []Branch `json:"branches,omitempty"`
+
Tags []*TagReference `json:"tags,omitempty"`
+
TotalCommits int `json:"total_commits,omitempty"`
}
type RepoLogResponse struct {
-
Commits []*object.Commit `json:"commits,omitempty"`
-
Ref string `json:"ref,omitempty"`
-
Description string `json:"description,omitempty"`
-
Log bool `json:"log,omitempty"`
-
Total int `json:"total,omitempty"`
-
Page int `json:"page,omitempty"`
-
PerPage int `json:"per_page,omitempty"`
+
Commits []Commit `json:"commits,omitempty"`
+
Ref string `json:"ref,omitempty"`
+
Description string `json:"description,omitempty"`
+
Log bool `json:"log,omitempty"`
+
Total int `json:"total,omitempty"`
+
Page int `json:"page,omitempty"`
+
PerPage int `json:"per_page,omitempty"`
}
type RepoCommitResponse struct {