forked from tangled.org/core
this repo has no description

spindle: rework db schema

Signed-off-by: oppiliappan <me@oppi.li>

oppi.li 82d62fcf a8a3163d

verified
Changed files
+653 -408
api
cmd
spindle
lexicons
pipeline
spindle
tid
+47 -70
api/tangled/cbor_gen.go
···
fieldCount--
}
-
if t.FinishedAt == nil {
-
fieldCount--
-
}
-
if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
return err
}
···
}
-
// t.StartedAt (string) (string)
-
if len("startedAt") > 1000000 {
-
return xerrors.Errorf("Value in field \"startedAt\" was too long")
}
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("startedAt"))); err != nil {
return err
}
-
if _, err := cw.WriteString(string("startedAt")); err != nil {
return err
}
-
if len(t.StartedAt) > 1000000 {
-
return xerrors.Errorf("Value in field t.StartedAt was too long")
}
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.StartedAt))); err != nil {
return err
}
-
if _, err := cw.WriteString(string(t.StartedAt)); err != nil {
return err
}
-
// t.UpdatedAt (string) (string)
-
if len("updatedAt") > 1000000 {
-
return xerrors.Errorf("Value in field \"updatedAt\" was too long")
}
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("updatedAt"))); err != nil {
return err
}
-
if _, err := cw.WriteString(string("updatedAt")); err != nil {
return err
}
-
if len(t.UpdatedAt) > 1000000 {
-
return xerrors.Errorf("Value in field t.UpdatedAt was too long")
}
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.UpdatedAt))); err != nil {
return err
}
-
if _, err := cw.WriteString(string(t.UpdatedAt)); err != nil {
return err
}
-
// t.FinishedAt (string) (string)
-
if t.FinishedAt != nil {
-
if len("finishedAt") > 1000000 {
-
return xerrors.Errorf("Value in field \"finishedAt\" was too long")
-
}
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("finishedAt"))); err != nil {
-
return err
-
}
-
if _, err := cw.WriteString(string("finishedAt")); err != nil {
-
return err
-
}
-
if t.FinishedAt == nil {
-
if _, err := cw.Write(cbg.CborNull); err != nil {
-
return err
-
}
-
} else {
-
if len(*t.FinishedAt) > 1000000 {
-
return xerrors.Errorf("Value in field t.FinishedAt was too long")
-
}
-
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.FinishedAt))); err != nil {
-
return err
-
}
-
if _, err := cw.WriteString(string(*t.FinishedAt)); err != nil {
-
return err
-
}
-
}
}
return nil
}
···
n := extra
-
nameBuf := make([]byte, 10)
for i := uint64(0); i < n; i++ {
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
if err != nil {
···
t.ExitCode = (*int64)(&extraI)
}
}
-
// t.StartedAt (string) (string)
-
case "startedAt":
{
sval, err := cbg.ReadStringWithMax(cr, 1000000)
···
return err
}
-
t.StartedAt = string(sval)
}
-
// t.UpdatedAt (string) (string)
-
case "updatedAt":
{
sval, err := cbg.ReadStringWithMax(cr, 1000000)
···
return err
}
-
t.UpdatedAt = string(sval)
}
-
// t.FinishedAt (string) (string)
-
case "finishedAt":
{
-
b, err := cr.ReadByte()
if err != nil {
return err
}
-
if b != cbg.CborNull[0] {
-
if err := cr.UnreadByte(); err != nil {
-
return err
-
}
-
sval, err := cbg.ReadStringWithMax(cr, 1000000)
-
if err != nil {
-
return err
-
}
-
-
t.FinishedAt = (*string)(&sval)
-
}
}
default:
···
fieldCount--
}
if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
return err
}
···
}
+
// t.Pipeline (string) (string)
+
if len("pipeline") > 1000000 {
+
return xerrors.Errorf("Value in field \"pipeline\" was too long")
}
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("pipeline"))); err != nil {
return err
}
+
if _, err := cw.WriteString(string("pipeline")); err != nil {
return err
}
+
if len(t.Pipeline) > 1000000 {
+
return xerrors.Errorf("Value in field t.Pipeline was too long")
}
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Pipeline))); err != nil {
return err
}
+
if _, err := cw.WriteString(string(t.Pipeline)); err != nil {
return err
}
+
// t.Workflow (string) (string)
+
if len("workflow") > 1000000 {
+
return xerrors.Errorf("Value in field \"workflow\" was too long")
}
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("workflow"))); err != nil {
return err
}
+
if _, err := cw.WriteString(string("workflow")); err != nil {
return err
}
+
if len(t.Workflow) > 1000000 {
+
return xerrors.Errorf("Value in field t.Workflow was too long")
}
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Workflow))); err != nil {
return err
}
+
if _, err := cw.WriteString(string(t.Workflow)); err != nil {
return err
}
+
// t.CreatedAt (string) (string)
+
if len("createdAt") > 1000000 {
+
return xerrors.Errorf("Value in field \"createdAt\" was too long")
+
}
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string("createdAt")); err != nil {
+
return err
+
}
+
if len(t.CreatedAt) > 1000000 {
+
return xerrors.Errorf("Value in field t.CreatedAt was too long")
+
}
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
+
return err
}
return nil
}
···
n := extra
+
nameBuf := make([]byte, 9)
for i := uint64(0); i < n; i++ {
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
if err != nil {
···
t.ExitCode = (*int64)(&extraI)
}
}
+
// t.Pipeline (string) (string)
+
case "pipeline":
{
sval, err := cbg.ReadStringWithMax(cr, 1000000)
···
return err
}
+
t.Pipeline = string(sval)
}
+
// t.Workflow (string) (string)
+
case "workflow":
{
sval, err := cbg.ReadStringWithMax(cr, 1000000)
···
return err
}
+
t.Workflow = string(sval)
}
+
// t.CreatedAt (string) (string)
+
case "createdAt":
{
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
if err != nil {
return err
}
+
t.CreatedAt = string(sval)
}
default:
+6 -8
api/tangled/pipelinestatus.go
···
// RECORDTYPE: PipelineStatus
type PipelineStatus struct {
LexiconTypeID string `json:"$type,const=sh.tangled.pipeline.status" cborgen:"$type,const=sh.tangled.pipeline.status"`
// error: error message if failed
Error *string `json:"error,omitempty" cborgen:"error,omitempty"`
// exitCode: exit code if failed
ExitCode *int64 `json:"exitCode,omitempty" cborgen:"exitCode,omitempty"`
-
// finishedAt: pipeline finish time, if finished
-
FinishedAt *string `json:"finishedAt,omitempty" cborgen:"finishedAt,omitempty"`
-
// pipeline: pipeline at ref
Pipeline string `json:"pipeline" cborgen:"pipeline"`
-
// startedAt: pipeline start time
-
StartedAt string `json:"startedAt" cborgen:"startedAt"`
-
// status: Pipeline status
Status string `json:"status" cborgen:"status"`
-
// updatedAt: pipeline last updated time
-
UpdatedAt string `json:"updatedAt" cborgen:"updatedAt"`
}
···
// RECORDTYPE: PipelineStatus
type PipelineStatus struct {
LexiconTypeID string `json:"$type,const=sh.tangled.pipeline.status" cborgen:"$type,const=sh.tangled.pipeline.status"`
+
// createdAt: time of creation of this status update
+
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
// error: error message if failed
Error *string `json:"error,omitempty" cborgen:"error,omitempty"`
// exitCode: exit code if failed
ExitCode *int64 `json:"exitCode,omitempty" cborgen:"exitCode,omitempty"`
+
// pipeline: ATURI of the pipeline
Pipeline string `json:"pipeline" cborgen:"pipeline"`
+
// status: status of the workflow
Status string `json:"status" cborgen:"status"`
+
// workflow: name of the workflow within this pipeline
+
Workflow string `json:"workflow" cborgen:"workflow"`
}
+1 -1
api/tangled/tangledpipeline.go
···
// Pipeline_Workflow is a "workflow" in the sh.tangled.pipeline schema.
type Pipeline_Workflow struct {
Clone *Pipeline_CloneOpts `json:"clone" cborgen:"clone"`
-
Dependencies []Pipeline_Dependencies_Elem `json:"dependencies" cborgen:"dependencies"`
Environment []*Pipeline_Workflow_Environment_Elem `json:"environment" cborgen:"environment"`
Name string `json:"name" cborgen:"name"`
Steps []*Pipeline_Step `json:"steps" cborgen:"steps"`
···
// Pipeline_Workflow is a "workflow" in the sh.tangled.pipeline schema.
type Pipeline_Workflow struct {
Clone *Pipeline_CloneOpts `json:"clone" cborgen:"clone"`
+
Dependencies []Pipeline_Dependencies_Elem `json:"dependencies" cborgen:"dependencies"`
Environment []*Pipeline_Workflow_Environment_Elem `json:"environment" cborgen:"environment"`
Name string `json:"name" cborgen:"name"`
Steps []*Pipeline_Step `json:"steps" cborgen:"steps"`
+1
cmd/spindle/main.go
···
"tangled.sh/tangled.sh/core/log"
"tangled.sh/tangled.sh/core/spindle"
)
func main() {
···
"tangled.sh/tangled.sh/core/log"
"tangled.sh/tangled.sh/core/spindle"
+
_ "tangled.sh/tangled.sh/core/tid"
)
func main() {
+13 -18
lexicons/pipeline/status.json
···
"key": "tid",
"record": {
"type": "object",
-
"required": ["pipeline", "status", "startedAt", "updatedAt"],
"properties": {
"pipeline": {
"type": "string",
"format": "at-uri",
-
"description": "pipeline at ref"
},
"status": {
"type": "string",
-
"description": "Pipeline status",
"enum": [
"pending",
"running",
···
"cancelled",
"success"
]
},
"error": {
"type": "string",
···
"exitCode": {
"type": "integer",
"description": "exit code if failed"
-
},
-
"startedAt": {
-
"type": "string",
-
"format": "datetime",
-
"description": "pipeline start time"
-
},
-
"updatedAt": {
-
"type": "string",
-
"format": "datetime",
-
"description": "pipeline last updated time"
-
},
-
"finishedAt": {
-
"type": "string",
-
"format": "datetime",
-
"description": "pipeline finish time, if finished"
}
}
}
···
"key": "tid",
"record": {
"type": "object",
+
"required": ["pipeline", "workflow", "status", "createdAt"],
"properties": {
"pipeline": {
"type": "string",
"format": "at-uri",
+
"description": "ATURI of the pipeline"
+
},
+
"workflow": {
+
"type": "string",
+
"format": "at-uri",
+
"description": "name of the workflow within this pipeline"
},
"status": {
"type": "string",
+
"description": "status of the workflow",
"enum": [
"pending",
"running",
···
"cancelled",
"success"
]
+
},
+
"createdAt": {
+
"type": "string",
+
"format": "datetime",
+
"description": "time of creation of this status update"
},
"error": {
"type": "string",
···
"exitCode": {
"type": "integer",
"description": "exit code if failed"
}
}
}
+5 -13
spindle/db/db.go
···
did text primary key
);
-
create table if not exists pipeline_status (
rkey text not null,
-
pipeline text not null,
-
status text not null,
-
-
-- only set if status is 'failed'
-
error text,
-
exit_code integer,
-
-
started_at timestamp not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
-
updated_at timestamp not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
-
finished_at timestamp,
-
-
primary key (rkey)
);
`)
if err != nil {
···
did text primary key
);
+
-- status event for a single workflow
+
create table if not exists events (
rkey text not null,
+
nsid text not null,
+
event text not null, -- json
+
created integer not null -- unix nanos
);
`)
if err != nil {
+148
spindle/db/events.go
···
···
+
package db
+
+
import (
+
"encoding/json"
+
"fmt"
+
"time"
+
+
"tangled.sh/tangled.sh/core/api/tangled"
+
"tangled.sh/tangled.sh/core/notifier"
+
"tangled.sh/tangled.sh/core/spindle/models"
+
"tangled.sh/tangled.sh/core/tid"
+
)
+
+
type Event struct {
+
Rkey string `json:"rkey"`
+
Nsid string `json:"nsid"`
+
Created int64 `json:"created"`
+
EventJson string `json:"event"`
+
}
+
+
func (d *DB) InsertEvent(event Event, notifier *notifier.Notifier) error {
+
_, err := d.Exec(
+
`insert into events (rkey, nsid, event, created) values (?, ?, ?, ?)`,
+
event.Rkey,
+
event.Nsid,
+
event.EventJson,
+
time.Now().UnixNano(),
+
)
+
+
notifier.NotifyAll()
+
+
return err
+
}
+
+
func (d *DB) GetEvents(cursor int64) ([]Event, error) {
+
whereClause := ""
+
args := []any{}
+
if cursor > 0 {
+
whereClause = "where created > ?"
+
args = append(args, cursor)
+
}
+
+
query := fmt.Sprintf(`
+
select rkey, nsid, event, created
+
from events
+
%s
+
order by created asc
+
limit 100
+
`, whereClause)
+
+
rows, err := d.Query(query, args...)
+
if err != nil {
+
return nil, err
+
}
+
defer rows.Close()
+
+
var evts []Event
+
for rows.Next() {
+
var ev Event
+
if err := rows.Scan(&ev.Rkey, &ev.Nsid, &ev.EventJson, &ev.Created); err != nil {
+
return nil, err
+
}
+
evts = append(evts, ev)
+
}
+
+
if err := rows.Err(); err != nil {
+
return nil, err
+
}
+
+
return evts, nil
+
}
+
+
func (d *DB) CreateStatusEvent(rkey string, s tangled.PipelineStatus, n *notifier.Notifier) error {
+
eventJson, err := json.Marshal(s)
+
if err != nil {
+
return err
+
}
+
+
event := Event{
+
Rkey: rkey,
+
Nsid: tangled.PipelineStatusNSID,
+
Created: time.Now().UnixNano(),
+
EventJson: string(eventJson),
+
}
+
+
return d.InsertEvent(event, n)
+
}
+
+
type StatusKind string
+
+
var (
+
StatusKindPending StatusKind = "pending"
+
StatusKindRunning StatusKind = "running"
+
StatusKindFailed StatusKind = "failed"
+
StatusKindTimeout StatusKind = "timeout"
+
StatusKindCancelled StatusKind = "cancelled"
+
StatusKindSuccess StatusKind = "success"
+
)
+
+
func (d *DB) createStatusEvent(
+
workflowId models.WorkflowId,
+
statusKind StatusKind,
+
workflowError *string,
+
exitCode *int64,
+
n *notifier.Notifier,
+
) error {
+
now := time.Now()
+
pipelineAtUri := workflowId.PipelineId.AtUri()
+
s := tangled.PipelineStatus{
+
CreatedAt: now.Format(time.RFC3339),
+
Error: workflowError,
+
ExitCode: exitCode,
+
Pipeline: string(pipelineAtUri),
+
Workflow: workflowId.Name,
+
Status: string(statusKind),
+
}
+
+
eventJson, err := json.Marshal(s)
+
if err != nil {
+
return err
+
}
+
+
event := Event{
+
Rkey: tid.TID(),
+
Nsid: tangled.PipelineStatusNSID,
+
Created: now.UnixNano(),
+
EventJson: string(eventJson),
+
}
+
+
return d.InsertEvent(event, n)
+
+
}
+
+
func (d *DB) StatusPending(workflowId models.WorkflowId, n *notifier.Notifier) error {
+
return d.createStatusEvent(workflowId, StatusKindPending, nil, nil, n)
+
}
+
+
func (d *DB) StatusRunning(workflowId models.WorkflowId, n *notifier.Notifier) error {
+
return d.createStatusEvent(workflowId, StatusKindRunning, nil, nil, n)
+
}
+
+
func (d *DB) StatusFailed(workflowId models.WorkflowId, workflowError string, exitCode int64, n *notifier.Notifier) error {
+
return d.createStatusEvent(workflowId, StatusKindFailed, &workflowError, &exitCode, n)
+
}
+
+
func (d *DB) StatusSuccess(workflowId models.WorkflowId, n *notifier.Notifier) error {
+
return d.createStatusEvent(workflowId, StatusKindSuccess, nil, nil, n)
+
}
+205 -177
spindle/db/pipelines.go
···
package db
-
import (
-
"fmt"
-
"time"
-
-
"tangled.sh/tangled.sh/core/api/tangled"
-
"tangled.sh/tangled.sh/core/notifier"
-
)
-
-
type PipelineRunStatus string
-
-
var (
-
PipelinePending PipelineRunStatus = "pending"
-
PipelineRunning PipelineRunStatus = "running"
-
PipelineFailed PipelineRunStatus = "failed"
-
PipelineTimeout PipelineRunStatus = "timeout"
-
PipelineCancelled PipelineRunStatus = "cancelled"
-
PipelineSuccess PipelineRunStatus = "success"
-
)
-
-
type PipelineStatus struct {
-
Rkey string `json:"rkey"`
-
Pipeline string `json:"pipeline"`
-
Status PipelineRunStatus `json:"status"`
-
-
// only if Failed
-
Error string `json:"error"`
-
ExitCode int `json:"exit_code"`
-
-
StartedAt time.Time `json:"started_at"`
-
UpdatedAt time.Time `json:"updated_at"`
-
FinishedAt time.Time `json:"finished_at"`
-
}
-
-
func (p PipelineStatus) AsRecord() *tangled.PipelineStatus {
-
exitCode64 := int64(p.ExitCode)
-
finishedAt := p.FinishedAt.String()
-
-
return &tangled.PipelineStatus{
-
LexiconTypeID: tangled.PipelineStatusNSID,
-
Pipeline: p.Pipeline,
-
Status: string(p.Status),
-
-
ExitCode: &exitCode64,
-
Error: &p.Error,
-
-
StartedAt: p.StartedAt.String(),
-
UpdatedAt: p.UpdatedAt.String(),
-
FinishedAt: &finishedAt,
-
}
-
}
-
-
func pipelineAtUri(rkey, knot string) string {
-
return fmt.Sprintf("at://%s/did:web:%s/%s", tangled.PipelineStatusNSID, knot, rkey)
-
}
-
-
func (db *DB) CreatePipeline(rkey, pipeline string, n *notifier.Notifier) error {
-
_, err := db.Exec(`
-
insert into pipeline_status (rkey, status, pipeline)
-
values (?, ?, ?)
-
`, rkey, PipelinePending, pipeline)
-
-
if err != nil {
-
return err
-
}
-
n.NotifyAll()
-
return nil
-
}
-
-
func (db *DB) MarkPipelineRunning(rkey string, n *notifier.Notifier) error {
-
_, err := db.Exec(`
-
update pipeline_status
-
set status = ?, updated_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
-
where rkey = ?
-
`, PipelineRunning, rkey)
-
-
if err != nil {
-
return err
-
}
-
n.NotifyAll()
-
return nil
-
}
-
-
func (db *DB) MarkPipelineFailed(rkey string, exitCode int, errorMsg string, n *notifier.Notifier) error {
-
_, err := db.Exec(`
-
update pipeline_status
-
set status = ?,
-
exit_code = ?,
-
error = ?,
-
updated_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now'),
-
finished_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
-
where rkey = ?
-
`, PipelineFailed, exitCode, errorMsg, rkey)
-
if err != nil {
-
return err
-
}
-
n.NotifyAll()
-
return nil
-
}
-
-
func (db *DB) MarkPipelineTimeout(rkey string, n *notifier.Notifier) error {
-
_, err := db.Exec(`
-
update pipeline_status
-
set status = ?, updated_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
-
where rkey = ?
-
`, PipelineTimeout, rkey)
-
if err != nil {
-
return err
-
}
-
n.NotifyAll()
-
return nil
-
}
-
-
func (db *DB) MarkPipelineSuccess(rkey string, n *notifier.Notifier) error {
-
_, err := db.Exec(`
-
update pipeline_status
-
set status = ?, updated_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now'),
-
finished_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
-
where rkey = ?
-
`, PipelineSuccess, rkey)
-
-
if err != nil {
-
return err
-
}
-
n.NotifyAll()
-
return nil
-
}
-
-
func (db *DB) GetPipelineStatus(rkey string) (PipelineStatus, error) {
-
var p PipelineStatus
-
err := db.QueryRow(`
-
select rkey, status, error, exit_code, started_at, updated_at, finished_at
-
from pipelines
-
where rkey = ?
-
`, rkey).Scan(&p.Rkey, &p.Status, &p.Error, &p.ExitCode, &p.StartedAt, &p.UpdatedAt, &p.FinishedAt)
-
return p, err
-
}
-
-
func (db *DB) GetPipelineStatusAsRecords(cursor string) ([]PipelineStatus, error) {
-
whereClause := ""
-
args := []any{}
-
if cursor != "" {
-
whereClause = "where rkey > ?"
-
args = append(args, cursor)
-
}
-
-
query := fmt.Sprintf(`
-
select rkey, status, error, exit_code, started_at, updated_at, finished_at
-
from pipeline_status
-
%s
-
order by rkey asc
-
limit 100
-
`, whereClause)
-
-
rows, err := db.Query(query, args...)
-
if err != nil {
-
return nil, err
-
}
-
defer rows.Close()
-
-
var pipelines []PipelineStatus
-
for rows.Next() {
-
var p PipelineStatus
-
rows.Scan(&p.Rkey, &p.Status, &p.Error, &p.ExitCode, &p.StartedAt, &p.UpdatedAt, &p.FinishedAt)
-
pipelines = append(pipelines, p)
-
}
-
-
if err := rows.Err(); err != nil {
-
return nil, err
-
}
-
-
records := []*tangled.PipelineStatus{}
-
for _, p := range pipelines {
-
records = append(records, p.AsRecord())
-
}
-
-
return pipelines, nil
-
}
···
package db
+
//
+
// import (
+
// "database/sql"
+
// "fmt"
+
// "time"
+
//
+
// "tangled.sh/tangled.sh/core/api/tangled"
+
// "tangled.sh/tangled.sh/core/notifier"
+
// )
+
//
+
// type PipelineRunStatus string
+
//
+
// var (
+
// PipelinePending PipelineRunStatus = "pending"
+
// PipelineRunning PipelineRunStatus = "running"
+
// PipelineFailed PipelineRunStatus = "failed"
+
// PipelineTimeout PipelineRunStatus = "timeout"
+
// PipelineCancelled PipelineRunStatus = "cancelled"
+
// PipelineSuccess PipelineRunStatus = "success"
+
// )
+
//
+
// type PipelineStatus struct {
+
// Rkey string `json:"rkey"`
+
// Pipeline string `json:"pipeline"`
+
// Status PipelineRunStatus `json:"status"`
+
//
+
// // only if Failed
+
// Error string `json:"error"`
+
// ExitCode int `json:"exit_code"`
+
//
+
// LastUpdate int64 `json:"last_update"`
+
// StartedAt time.Time `json:"started_at"`
+
// UpdatedAt time.Time `json:"updated_at"`
+
// FinishedAt time.Time `json:"finished_at"`
+
// }
+
//
+
// func (p PipelineStatus) AsRecord() *tangled.PipelineStatus {
+
// exitCode64 := int64(p.ExitCode)
+
// finishedAt := p.FinishedAt.String()
+
//
+
// return &tangled.PipelineStatus{
+
// LexiconTypeID: tangled.PipelineStatusNSID,
+
// Pipeline: p.Pipeline,
+
// Status: string(p.Status),
+
//
+
// ExitCode: &exitCode64,
+
// Error: &p.Error,
+
//
+
// StartedAt: p.StartedAt.String(),
+
// UpdatedAt: p.UpdatedAt.String(),
+
// FinishedAt: &finishedAt,
+
// }
+
// }
+
//
+
// func pipelineAtUri(rkey, knot string) string {
+
// return fmt.Sprintf("at://%s/did:web:%s/%s", tangled.PipelineStatusNSID, knot, rkey)
+
// }
+
//
+
// func (db *DB) CreatePipeline(rkey, pipeline string, n *notifier.Notifier) error {
+
// _, err := db.Exec(`
+
// insert into pipeline_status (rkey, status, pipeline, last_update)
+
// values (?, ?, ?, ?)
+
// `, rkey, PipelinePending, pipeline, time.Now().UnixNano())
+
//
+
// if err != nil {
+
// return err
+
// }
+
// n.NotifyAll()
+
// return nil
+
// }
+
//
+
// func (db *DB) MarkPipelineRunning(rkey string, n *notifier.Notifier) error {
+
// _, err := db.Exec(`
+
// update pipeline_status
+
// set status = ?, updated_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now'), last_update = ?
+
// where rkey = ?
+
// `, PipelineRunning, rkey, time.Now().UnixNano())
+
//
+
// if err != nil {
+
// return err
+
// }
+
// n.NotifyAll()
+
// return nil
+
// }
+
//
+
// func (db *DB) MarkPipelineFailed(rkey string, exitCode int, errorMsg string, n *notifier.Notifier) error {
+
// _, err := db.Exec(`
+
// update pipeline_status
+
// set status = ?,
+
// exit_code = ?,
+
// error = ?,
+
// updated_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now'),
+
// finished_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now'),
+
// last_update = ?
+
// where rkey = ?
+
// `, PipelineFailed, exitCode, errorMsg, rkey, time.Now().UnixNano())
+
// if err != nil {
+
// return err
+
// }
+
// n.NotifyAll()
+
// return nil
+
// }
+
//
+
// func (db *DB) MarkPipelineTimeout(rkey string, n *notifier.Notifier) error {
+
// _, err := db.Exec(`
+
// update pipeline_status
+
// set status = ?, updated_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
+
// where rkey = ?
+
// `, PipelineTimeout, rkey)
+
// if err != nil {
+
// return err
+
// }
+
// n.NotifyAll()
+
// return nil
+
// }
+
//
+
// func (db *DB) MarkPipelineSuccess(rkey string, n *notifier.Notifier) error {
+
// _, err := db.Exec(`
+
// update pipeline_status
+
// set status = ?, updated_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now'),
+
// finished_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
+
// where rkey = ?
+
// `, PipelineSuccess, rkey)
+
//
+
// if err != nil {
+
// return err
+
// }
+
// n.NotifyAll()
+
// return nil
+
// }
+
//
+
// func (db *DB) GetPipelineStatus(rkey string) (PipelineStatus, error) {
+
// var p PipelineStatus
+
// err := db.QueryRow(`
+
// select rkey, status, error, exit_code, started_at, updated_at, finished_at
+
// from pipelines
+
// where rkey = ?
+
// `, rkey).Scan(&p.Rkey, &p.Status, &p.Error, &p.ExitCode, &p.StartedAt, &p.UpdatedAt, &p.FinishedAt)
+
// return p, err
+
// }
+
//
+
// func (db *DB) GetPipelineStatusAsRecords(cursor int64) ([]PipelineStatus, error) {
+
// whereClause := ""
+
// args := []any{}
+
// if cursor != 0 {
+
// whereClause = "where created_at > ?"
+
// args = append(args, cursor)
+
// }
+
//
+
// query := fmt.Sprintf(`
+
// select rkey, status, error, exit_code, created_at, started_at, updated_at, finished_at
+
// from pipeline_status
+
// %s
+
// order by created_at asc
+
// limit 100
+
// `, whereClause)
+
//
+
// rows, err := db.Query(query, args...)
+
// if err != nil {
+
// return nil, err
+
// }
+
// defer rows.Close()
+
//
+
// var pipelines []PipelineStatus
+
// for rows.Next() {
+
// var p PipelineStatus
+
// var pipelineError sql.NullString
+
// var exitCode sql.NullInt64
+
// var startedAt, updatedAt string
+
// var finishedAt sql.NullTime
+
//
+
// err := rows.Scan(&p.Rkey, &p.Status, &pipelineError, &exitCode, &p.LastUpdate, &startedAt, &updatedAt, &finishedAt)
+
// if err != nil {
+
// return nil, err
+
// }
+
//
+
// if pipelineError.Valid {
+
// p.Error = pipelineError.String
+
// }
+
//
+
// if exitCode.Valid {
+
// p.ExitCode = int(exitCode.Int64)
+
// }
+
//
+
// if v, err := time.Parse(time.RFC3339, startedAt); err == nil {
+
// p.StartedAt = v
+
// }
+
//
+
// if v, err := time.Parse(time.RFC3339, updatedAt); err == nil {
+
// p.UpdatedAt = v
+
// }
+
//
+
// if finishedAt.Valid {
+
// p.FinishedAt = finishedAt.Time
+
// }
+
//
+
// pipelines = append(pipelines, p)
+
// }
+
//
+
// if err := rows.Err(); err != nil {
+
// return nil, err
+
// }
+
//
+
// return pipelines, nil
+
// }
+83 -73
spindle/engine/engine.go
···
"path"
"strings"
"sync"
-
"syscall"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/image"
···
"github.com/docker/docker/api/types/volume"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/stdcopy"
-
"golang.org/x/sync/errgroup"
"tangled.sh/tangled.sh/core/api/tangled"
"tangled.sh/tangled.sh/core/log"
"tangled.sh/tangled.sh/core/notifier"
"tangled.sh/tangled.sh/core/spindle/db"
)
const (
···
return e, nil
}
-
func (e *Engine) StartWorkflows(ctx context.Context, pipeline *tangled.Pipeline, id string) error {
-
e.l.Info("starting all workflows in parallel", "pipeline", id)
-
err := e.db.MarkPipelineRunning(id, e.n)
-
if err != nil {
-
return err
-
}
-
g := errgroup.Group{}
-
for _, w := range pipeline.Workflows {
-
g.Go(func() error {
-
err := e.SetupWorkflow(ctx, id, w.Name)
if err != nil {
return err
}
-
defer e.DestroyWorkflow(ctx, id, w.Name)
// TODO: actual checks for image/registry etc.
var deps string
···
cimg := path.Join("nixery.dev", deps)
reader, err := e.docker.ImagePull(ctx, cimg, image.PullOptions{})
if err != nil {
-
e.l.Error("pipeline failed!", "id", id, "error", err.Error())
-
err := e.db.MarkPipelineFailed(id, -1, err.Error(), e.n)
if err != nil {
return err
}
return fmt.Errorf("pulling image: %w", err)
}
defer reader.Close()
io.Copy(os.Stdout, reader)
-
err = e.StartSteps(ctx, w.Steps, w.Name, id, cimg)
if err != nil {
-
e.l.Error("pipeline failed!", "id", id, "error", err.Error())
-
return e.db.MarkPipelineFailed(id, -1, err.Error(), e.n)
}
return nil
-
})
}
-
err = g.Wait()
-
if err != nil {
-
e.l.Error("pipeline failed!", "id", id, "error", err.Error())
-
return e.db.MarkPipelineFailed(id, -1, err.Error(), e.n)
-
}
-
-
e.l.Info("pipeline success!", "id", id)
-
return e.db.MarkPipelineSuccess(id, e.n)
}
// SetupWorkflow sets up a new network for the workflow and volumes for
// the workspace and Nix store. These are persisted across steps and are
// destroyed at the end of the workflow.
-
func (e *Engine) SetupWorkflow(ctx context.Context, id, workflowName string) error {
-
e.l.Info("setting up workflow", "pipeline", id, "workflow", workflowName)
_, err := e.docker.VolumeCreate(ctx, volume.CreateOptions{
-
Name: workspaceVolume(id, workflowName),
Driver: "local",
})
if err != nil {
return err
}
-
e.registerCleanup(id, workflowName, func(ctx context.Context) error {
-
return e.docker.VolumeRemove(ctx, workspaceVolume(id, workflowName), true)
})
_, err = e.docker.VolumeCreate(ctx, volume.CreateOptions{
-
Name: nixVolume(id, workflowName),
Driver: "local",
})
if err != nil {
return err
}
-
e.registerCleanup(id, workflowName, func(ctx context.Context) error {
-
return e.docker.VolumeRemove(ctx, nixVolume(id, workflowName), true)
})
-
_, err = e.docker.NetworkCreate(ctx, networkName(id, workflowName), network.CreateOptions{
Driver: "bridge",
})
if err != nil {
return err
}
-
e.registerCleanup(id, workflowName, func(ctx context.Context) error {
-
return e.docker.NetworkRemove(ctx, networkName(id, workflowName))
})
return nil
···
// StartSteps starts all steps sequentially with the same base image.
// ONLY marks pipeline as failed if container's exit code is non-zero.
// All other errors are bubbled up.
-
func (e *Engine) StartSteps(ctx context.Context, steps []*tangled.Pipeline_Step, workflowName, id, image string) error {
// set up logging channels
e.chanMu.Lock()
-
if _, exists := e.stdoutChans[id]; !exists {
-
e.stdoutChans[id] = make(chan string, 100)
}
-
if _, exists := e.stderrChans[id]; !exists {
-
e.stderrChans[id] = make(chan string, 100)
}
e.chanMu.Unlock()
// close channels after all steps are complete
defer func() {
-
close(e.stdoutChans[id])
-
close(e.stderrChans[id])
}()
for _, step := range steps {
-
hostConfig := hostConfig(id, workflowName)
resp, err := e.docker.ContainerCreate(ctx, &container.Config{
Image: image,
Cmd: []string{"bash", "-c", step.Command},
···
return fmt.Errorf("creating container: %w", err)
}
-
err = e.docker.NetworkConnect(ctx, networkName(id, workflowName), resp.ID, nil)
if err != nil {
return fmt.Errorf("connecting network: %w", err)
}
···
wg.Add(1)
go func() {
defer wg.Done()
-
err := e.TailStep(ctx, resp.ID, id)
if err != nil {
e.l.Error("failed to tail container", "container", resp.ID)
return
···
return err
}
-
err = e.DestroyStep(ctx, resp.ID, id)
if err != nil {
return err
}
if state.ExitCode != 0 {
-
e.l.Error("pipeline failed!", "id", id, "error", state.Error, "exit_code", state.ExitCode)
-
return e.db.MarkPipelineFailed(id, state.ExitCode, state.Error, e.n)
}
}
···
return info.State, nil
}
-
func (e *Engine) TailStep(ctx context.Context, containerID, pipelineID string) error {
logs, err := e.docker.ContainerLogs(ctx, containerID, container.LogsOptions{
Follow: true,
ShowStdout: true,
···
// once all steps are done.
go func() {
e.chanMu.RLock()
-
stdoutCh := e.stdoutChans[pipelineID]
e.chanMu.RUnlock()
scanner := bufio.NewScanner(rpipeOut)
···
// once all steps are done.
go func() {
e.chanMu.RLock()
-
stderrCh := e.stderrChans[pipelineID]
e.chanMu.RUnlock()
scanner := bufio.NewScanner(rpipeErr)
···
return nil
}
-
func (e *Engine) DestroyStep(ctx context.Context, containerID, pipelineID string) error {
-
err := e.docker.ContainerKill(ctx, containerID, syscall.SIGKILL.String())
if err != nil && !isErrContainerNotFoundOrNotRunning(err) {
return err
}
···
return nil
}
-
func (e *Engine) DestroyWorkflow(ctx context.Context, pipelineID, workflowName string) error {
e.cleanupMu.Lock()
-
key := fmt.Sprintf("%s-%s", pipelineID, workflowName)
fns := e.cleanup[key]
delete(e.cleanup, key)
···
for _, fn := range fns {
if err := fn(ctx); err != nil {
-
e.l.Error("failed to cleanup workflow resource", "pipeline", pipelineID, "workflow", workflowName, "err", err)
}
}
return nil
}
-
func (e *Engine) LogChannels(pipelineID string) (stdout <-chan string, stderr <-chan string, ok bool) {
e.chanMu.RLock()
defer e.chanMu.RUnlock()
-
stdoutCh, ok1 := e.stdoutChans[pipelineID]
-
stderrCh, ok2 := e.stderrChans[pipelineID]
if !ok1 || !ok2 {
return nil, nil, false
···
return stdoutCh, stderrCh, true
}
-
func (e *Engine) registerCleanup(pipelineID, workflowName string, fn cleanupFunc) {
e.cleanupMu.Lock()
defer e.cleanupMu.Unlock()
-
key := fmt.Sprintf("%s-%s", pipelineID, workflowName)
e.cleanup[key] = append(e.cleanup[key], fn)
}
-
func workspaceVolume(id, name string) string {
-
return fmt.Sprintf("workspace-%s-%s", id, name)
}
-
func nixVolume(id, name string) string {
-
return fmt.Sprintf("nix-%s-%s", id, name)
}
-
func networkName(id, name string) string {
-
return fmt.Sprintf("workflow-network-%s-%s", id, name)
}
-
func hostConfig(id, name string) *container.HostConfig {
hostConfig := &container.HostConfig{
Mounts: []mount.Mount{
{
Type: mount.TypeVolume,
-
Source: workspaceVolume(id, name),
Target: workspaceDir,
},
{
Type: mount.TypeVolume,
-
Source: nixVolume(id, name),
Target: "/nix",
},
},
···
"path"
"strings"
"sync"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/image"
···
"github.com/docker/docker/api/types/volume"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/stdcopy"
"tangled.sh/tangled.sh/core/api/tangled"
"tangled.sh/tangled.sh/core/log"
"tangled.sh/tangled.sh/core/notifier"
"tangled.sh/tangled.sh/core/spindle/db"
+
"tangled.sh/tangled.sh/core/spindle/models"
)
const (
···
return e, nil
}
+
func (e *Engine) StartWorkflows(ctx context.Context, pipeline *tangled.Pipeline, pipelineId models.PipelineId) {
+
e.l.Info("starting all workflows in parallel", "pipeline", pipelineId)
+
wg := sync.WaitGroup{}
+
for _, w := range pipeline.Workflows {
+
wg.Add(1)
+
go func() error {
+
defer wg.Done()
+
wid := models.WorkflowId{
+
PipelineId: pipelineId,
+
Name: w.Name,
+
}
+
err := e.db.StatusRunning(wid, e.n)
if err != nil {
return err
}
+
err = e.SetupWorkflow(ctx, wid)
+
if err != nil {
+
e.l.Error("setting up worklow", "wid", wid, "err", err)
+
return err
+
}
+
defer e.DestroyWorkflow(ctx, wid)
// TODO: actual checks for image/registry etc.
var deps string
···
cimg := path.Join("nixery.dev", deps)
reader, err := e.docker.ImagePull(ctx, cimg, image.PullOptions{})
if err != nil {
+
e.l.Error("pipeline failed!", "workflowId", wid, "error", err.Error())
+
+
err := e.db.StatusFailed(wid, err.Error(), -1, e.n)
if err != nil {
return err
}
+
return fmt.Errorf("pulling image: %w", err)
}
defer reader.Close()
io.Copy(os.Stdout, reader)
+
err = e.StartSteps(ctx, w.Steps, wid, cimg)
if err != nil {
+
e.l.Error("workflow failed!", "wid", wid.String(), "error", err.Error())
+
+
err := e.db.StatusFailed(wid, err.Error(), -1, e.n)
+
if err != nil {
+
return err
+
}
+
}
+
+
err = e.db.StatusSuccess(wid, e.n)
+
if err != nil {
+
return err
}
return nil
+
}()
}
+
wg.Wait()
}
// SetupWorkflow sets up a new network for the workflow and volumes for
// the workspace and Nix store. These are persisted across steps and are
// destroyed at the end of the workflow.
+
func (e *Engine) SetupWorkflow(ctx context.Context, wid models.WorkflowId) error {
+
e.l.Info("setting up workflow", "workflow", wid)
_, err := e.docker.VolumeCreate(ctx, volume.CreateOptions{
+
Name: workspaceVolume(wid),
Driver: "local",
})
if err != nil {
return err
}
+
e.registerCleanup(wid, func(ctx context.Context) error {
+
return e.docker.VolumeRemove(ctx, workspaceVolume(wid), true)
})
_, err = e.docker.VolumeCreate(ctx, volume.CreateOptions{
+
Name: nixVolume(wid),
Driver: "local",
})
if err != nil {
return err
}
+
e.registerCleanup(wid, func(ctx context.Context) error {
+
return e.docker.VolumeRemove(ctx, nixVolume(wid), true)
})
+
_, err = e.docker.NetworkCreate(ctx, networkName(wid), network.CreateOptions{
Driver: "bridge",
})
if err != nil {
return err
}
+
e.registerCleanup(wid, func(ctx context.Context) error {
+
return e.docker.NetworkRemove(ctx, networkName(wid))
})
return nil
···
// StartSteps starts all steps sequentially with the same base image.
// ONLY marks pipeline as failed if container's exit code is non-zero.
// All other errors are bubbled up.
+
func (e *Engine) StartSteps(ctx context.Context, steps []*tangled.Pipeline_Step, wid models.WorkflowId, image string) error {
// set up logging channels
e.chanMu.Lock()
+
if _, exists := e.stdoutChans[wid.String()]; !exists {
+
e.stdoutChans[wid.String()] = make(chan string, 100)
}
+
if _, exists := e.stderrChans[wid.String()]; !exists {
+
e.stderrChans[wid.String()] = make(chan string, 100)
}
e.chanMu.Unlock()
// close channels after all steps are complete
defer func() {
+
close(e.stdoutChans[wid.String()])
+
close(e.stderrChans[wid.String()])
}()
for _, step := range steps {
+
hostConfig := hostConfig(wid)
resp, err := e.docker.ContainerCreate(ctx, &container.Config{
Image: image,
Cmd: []string{"bash", "-c", step.Command},
···
return fmt.Errorf("creating container: %w", err)
}
+
err = e.docker.NetworkConnect(ctx, networkName(wid), resp.ID, nil)
if err != nil {
return fmt.Errorf("connecting network: %w", err)
}
···
wg.Add(1)
go func() {
defer wg.Done()
+
err := e.TailStep(ctx, resp.ID, wid)
if err != nil {
e.l.Error("failed to tail container", "container", resp.ID)
return
···
return err
}
+
err = e.DestroyStep(ctx, resp.ID)
if err != nil {
return err
}
if state.ExitCode != 0 {
+
e.l.Error("workflow failed!", "workflow_id", wid.String(), "error", state.Error, "exit_code", state.ExitCode)
+
// return e.db.MarkPipelineFailed(id, state.ExitCode, state.Error, e.n)
}
}
···
return info.State, nil
}
+
func (e *Engine) TailStep(ctx context.Context, containerID string, wid models.WorkflowId) error {
logs, err := e.docker.ContainerLogs(ctx, containerID, container.LogsOptions{
Follow: true,
ShowStdout: true,
···
// once all steps are done.
go func() {
e.chanMu.RLock()
+
stdoutCh := e.stdoutChans[wid.String()]
e.chanMu.RUnlock()
scanner := bufio.NewScanner(rpipeOut)
···
// once all steps are done.
go func() {
e.chanMu.RLock()
+
stderrCh := e.stderrChans[wid.String()]
e.chanMu.RUnlock()
scanner := bufio.NewScanner(rpipeErr)
···
return nil
}
+
func (e *Engine) DestroyStep(ctx context.Context, containerID string) error {
+
err := e.docker.ContainerKill(ctx, containerID, "9") // SIGKILL
if err != nil && !isErrContainerNotFoundOrNotRunning(err) {
return err
}
···
return nil
}
+
func (e *Engine) DestroyWorkflow(ctx context.Context, wid models.WorkflowId) error {
e.cleanupMu.Lock()
+
key := wid.String()
fns := e.cleanup[key]
delete(e.cleanup, key)
···
for _, fn := range fns {
if err := fn(ctx); err != nil {
+
e.l.Error("failed to cleanup workflow resource", "workflowId", wid)
}
}
return nil
}
+
func (e *Engine) LogChannels(wid models.WorkflowId) (stdout <-chan string, stderr <-chan string, ok bool) {
e.chanMu.RLock()
defer e.chanMu.RUnlock()
+
stdoutCh, ok1 := e.stdoutChans[wid.String()]
+
stderrCh, ok2 := e.stderrChans[wid.String()]
if !ok1 || !ok2 {
return nil, nil, false
···
return stdoutCh, stderrCh, true
}
+
func (e *Engine) registerCleanup(wid models.WorkflowId, fn cleanupFunc) {
e.cleanupMu.Lock()
defer e.cleanupMu.Unlock()
+
key := wid.String()
e.cleanup[key] = append(e.cleanup[key], fn)
}
+
func workspaceVolume(wid models.WorkflowId) string {
+
return fmt.Sprintf("workspace-%s", wid)
}
+
func nixVolume(wid models.WorkflowId) string {
+
return fmt.Sprintf("nix-%s", wid)
}
+
func networkName(wid models.WorkflowId) string {
+
return fmt.Sprintf("workflow-network-%s", wid)
}
+
func hostConfig(wid models.WorkflowId) *container.HostConfig {
hostConfig := &container.HostConfig{
Mounts: []mount.Mount{
{
Type: mount.TypeVolume,
+
Source: workspaceVolume(wid),
Target: workspaceDir,
},
{
Type: mount.TypeVolume,
+
Source: nixVolume(wid),
Target: "/nix",
},
},
+37
spindle/models/models.go
···
···
+
package models
+
+
import (
+
"fmt"
+
"regexp"
+
+
"tangled.sh/tangled.sh/core/api/tangled"
+
+
"github.com/bluesky-social/indigo/atproto/syntax"
+
)
+
+
var (
+
re = regexp.MustCompile(`[^a-zA-Z0-9_.-]`)
+
)
+
+
type PipelineId struct {
+
Knot string
+
Rkey string
+
}
+
+
func (p *PipelineId) AtUri() syntax.ATURI {
+
return syntax.ATURI(fmt.Sprintf("at://did:web:%s/%s/%s", p.Knot, tangled.PipelineNSID, p.Rkey))
+
}
+
+
type WorkflowId struct {
+
PipelineId
+
Name string
+
}
+
+
func (wid WorkflowId) String() string {
+
return fmt.Sprintf("%s-%s-%s", normalize(wid.Knot), wid.Rkey, normalize(wid.Name))
+
}
+
+
func normalize(name string) string {
+
normalized := re.ReplaceAllString(name, "-")
+
return normalized
+
}
+29 -11
spindle/queue/queue.go
···
package queue
type Job struct {
Run func() error
OnFail func(error)
}
type Queue struct {
-
jobs chan Job
}
-
func NewQueue(size int) *Queue {
return &Queue{
-
jobs: make(chan Job, size),
}
}
···
}
}
-
func (q *Queue) StartRunner() {
-
go func() {
-
for job := range q.jobs {
-
if err := job.Run(); err != nil {
-
if job.OnFail != nil {
-
job.OnFail(err)
-
}
}
}
-
}()
}
···
package queue
+
import (
+
"sync"
+
)
+
type Job struct {
Run func() error
OnFail func(error)
}
type Queue struct {
+
jobs chan Job
+
workers int
+
wg sync.WaitGroup
}
+
func NewQueue(queueSize, numWorkers int) *Queue {
return &Queue{
+
jobs: make(chan Job, queueSize),
+
workers: numWorkers,
}
}
···
}
}
+
func (q *Queue) Start() {
+
for range q.workers {
+
q.wg.Add(1)
+
go q.worker()
+
}
+
}
+
+
func (q *Queue) worker() {
+
defer q.wg.Done()
+
for job := range q.jobs {
+
if err := job.Run(); err != nil {
+
if job.OnFail != nil {
+
job.OnFail(err)
}
}
+
}
+
}
+
+
func (q *Queue) Stop() {
+
close(q.jobs)
+
q.wg.Wait()
}
+23 -13
spindle/server.go
···
"tangled.sh/tangled.sh/core/spindle/config"
"tangled.sh/tangled.sh/core/spindle/db"
"tangled.sh/tangled.sh/core/spindle/engine"
"tangled.sh/tangled.sh/core/spindle/queue"
)
···
return err
}
-
jq := queue.NewQueue(100)
// starts a job queue runner in the background
-
jq.StartRunner()
spindle := Spindle{
jc: jc,
···
mux := chi.NewRouter()
mux.HandleFunc("/events", s.Events)
-
mux.HandleFunc("/logs/{pipelineID}", s.Logs)
return mux
}
···
return err
}
-
ok := s.jq.Enqueue(queue.Job{
-
Run: func() error {
-
// this is a "fake" at uri for now
-
pipelineAtUri := fmt.Sprintf("at://%s/did:web:%s/%s", tangled.PipelineNSID, pipeline.TriggerMetadata.Repo.Knot, msg.Rkey)
-
-
rkey := TID()
-
err = s.db.CreatePipeline(rkey, pipelineAtUri, s.n)
if err != nil {
return err
}
-
return s.eng.StartWorkflows(ctx, &pipeline, rkey)
},
-
OnFail: func(error) {
-
s.l.Error("pipeline run failed", "error", err)
},
})
if ok {
···
"tangled.sh/tangled.sh/core/spindle/config"
"tangled.sh/tangled.sh/core/spindle/db"
"tangled.sh/tangled.sh/core/spindle/engine"
+
"tangled.sh/tangled.sh/core/spindle/models"
"tangled.sh/tangled.sh/core/spindle/queue"
)
···
return err
}
+
jq := queue.NewQueue(100, 2)
// starts a job queue runner in the background
+
jq.Start()
+
defer jq.Stop()
spindle := Spindle{
jc: jc,
···
mux := chi.NewRouter()
mux.HandleFunc("/events", s.Events)
+
mux.HandleFunc("/logs/{knot}/{rkey}/{name}", s.Logs)
return mux
}
···
return err
}
+
pipelineId := models.PipelineId{
+
Knot: src.Knot,
+
Rkey: msg.Rkey,
+
}
+
for _, w := range pipeline.Workflows {
+
if w != nil {
+
err := s.db.StatusPending(models.WorkflowId{
+
PipelineId: pipelineId,
+
Name: w.Name,
+
}, s.n)
if err != nil {
return err
}
+
}
+
}
+
ok := s.jq.Enqueue(queue.Job{
+
Run: func() error {
+
s.eng.StartWorkflows(ctx, &pipeline, pipelineId)
+
return nil
},
+
OnFail: func(jobError error) {
+
s.l.Error("pipeline run failed", "error", jobError)
},
})
if ok {
+54 -23
spindle/stream.go
···
package spindle
import (
"fmt"
"net/http"
"time"
-
"context"
"github.com/go-chi/chi/v5"
"github.com/gorilla/websocket"
···
func (s *Spindle) Events(w http.ResponseWriter, r *http.Request) {
l := s.l.With("handler", "Events")
-
l.Info("received new connection")
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
···
return
}
defer conn.Close()
-
l.Info("upgraded http to wss")
ch := s.n.Subscribe()
defer s.n.Unsubscribe(ch)
···
}
}()
-
cursor := ""
// complete backfill first before going to live data
-
l.Info("going through backfill", "cursor", cursor)
if err := s.streamPipelines(conn, &cursor); err != nil {
l.Error("failed to backfill", "err", err)
return
···
// wait for new data or timeout
select {
case <-ctx.Done():
-
l.Info("stopping stream: client closed connection")
return
case <-ch:
// we have been notified of new data
-
l.Info("going through live data", "cursor", cursor)
if err := s.streamPipelines(conn, &cursor); err != nil {
l.Error("failed to stream", "err", err)
return
}
case <-time.After(30 * time.Second):
// send a keep-alive
-
l.Info("sent keepalive")
if err = conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(time.Second)); err != nil {
l.Error("failed to write control", "err", err)
}
···
func (s *Spindle) Logs(w http.ResponseWriter, r *http.Request) {
l := s.l.With("handler", "Logs")
-
pipelineID := chi.URLParam(r, "pipelineID")
-
if pipelineID == "" {
-
http.Error(w, "pipelineID required", http.StatusBadRequest)
return
}
-
l = l.With("pipelineID", pipelineID)
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
···
return
}
defer conn.Close()
-
l.Info("upgraded http to wss")
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
···
go func() {
for {
if _, _, err := conn.NextReader(); err != nil {
-
l.Info("client disconnected", "err", err)
cancel()
return
}
}
}()
-
if err := s.streamLogs(ctx, conn, pipelineID); err != nil {
l.Error("streamLogs failed", "err", err)
}
-
l.Info("logs connection closed")
}
-
func (s *Spindle) streamLogs(ctx context.Context, conn *websocket.Conn, pipelineID string) error {
-
l := s.l.With("pipelineID", pipelineID)
-
stdoutCh, stderrCh, ok := s.eng.LogChannels(pipelineID)
if !ok {
-
return fmt.Errorf("pipelineID %q not found", pipelineID)
}
done := make(chan struct{})
···
return nil
}
-
func (s *Spindle) streamPipelines(conn *websocket.Conn, cursor *string) error {
-
ops, err := s.db.GetPipelineStatusAsRecords(*cursor)
if err != nil {
s.l.Debug("err", "err", err)
return err
···
s.l.Debug("err", "err", err)
return err
}
-
*cursor = op.Rkey
}
return nil
···
package spindle
import (
+
"context"
"fmt"
"net/http"
+
"strconv"
"time"
+
"tangled.sh/tangled.sh/core/spindle/models"
"github.com/go-chi/chi/v5"
"github.com/gorilla/websocket"
···
func (s *Spindle) Events(w http.ResponseWriter, r *http.Request) {
l := s.l.With("handler", "Events")
+
l.Debug("received new connection")
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
···
return
}
defer conn.Close()
+
l.Debug("upgraded http to wss")
ch := s.n.Subscribe()
defer s.n.Unsubscribe(ch)
···
}
}()
+
defaultCursor := time.Now().UnixNano()
+
cursorStr := r.URL.Query().Get("cursor")
+
cursor, err := strconv.ParseInt(cursorStr, 10, 64)
+
if err != nil {
+
l.Error("empty or invalid cursor", "invalidCursor", cursorStr, "default", defaultCursor)
+
}
+
if cursor == 0 {
+
cursor = defaultCursor
+
}
// complete backfill first before going to live data
+
l.Debug("going through backfill", "cursor", cursor)
if err := s.streamPipelines(conn, &cursor); err != nil {
l.Error("failed to backfill", "err", err)
return
···
// wait for new data or timeout
select {
case <-ctx.Done():
+
l.Debug("stopping stream: client closed connection")
return
case <-ch:
// we have been notified of new data
+
l.Debug("going through live data", "cursor", cursor)
if err := s.streamPipelines(conn, &cursor); err != nil {
l.Error("failed to stream", "err", err)
return
}
case <-time.After(30 * time.Second):
// send a keep-alive
+
l.Debug("sent keepalive")
if err = conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(time.Second)); err != nil {
l.Error("failed to write control", "err", err)
}
···
func (s *Spindle) Logs(w http.ResponseWriter, r *http.Request) {
l := s.l.With("handler", "Logs")
+
knot := chi.URLParam(r, "knot")
+
if knot == "" {
+
http.Error(w, "knot required", http.StatusBadRequest)
+
return
+
}
+
+
rkey := chi.URLParam(r, "rkey")
+
if rkey == "" {
+
http.Error(w, "rkey required", http.StatusBadRequest)
+
return
+
}
+
+
name := chi.URLParam(r, "name")
+
if name == "" {
+
http.Error(w, "name required", http.StatusBadRequest)
return
}
+
+
wid := models.WorkflowId{
+
PipelineId: models.PipelineId{
+
Knot: knot,
+
Rkey: rkey,
+
},
+
Name: name,
+
}
+
+
l = l.With("knot", knot, "rkey", rkey, "name", name)
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
···
return
}
defer conn.Close()
+
l.Debug("upgraded http to wss")
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
···
go func() {
for {
if _, _, err := conn.NextReader(); err != nil {
+
l.Debug("client disconnected", "err", err)
cancel()
return
}
}
}()
+
if err := s.streamLogs(ctx, conn, wid); err != nil {
l.Error("streamLogs failed", "err", err)
}
+
l.Debug("logs connection closed")
}
+
func (s *Spindle) streamLogs(ctx context.Context, conn *websocket.Conn, wid models.WorkflowId) error {
+
l := s.l.With("workflow_id", wid.String())
+
stdoutCh, stderrCh, ok := s.eng.LogChannels(wid)
if !ok {
+
return fmt.Errorf("workflow_id %q not found", wid.String())
}
done := make(chan struct{})
···
return nil
}
+
func (s *Spindle) streamPipelines(conn *websocket.Conn, cursor *int64) error {
+
ops, err := s.db.GetEvents(*cursor)
if err != nil {
s.l.Debug("err", "err", err)
return err
···
s.l.Debug("err", "err", err)
return err
}
+
*cursor = op.Created
}
return nil
+1 -1
spindle/tid.go tid/tid.go
···
-
package spindle
import "github.com/bluesky-social/indigo/atproto/syntax"
···
+
package tid
import "github.com/bluesky-social/indigo/atproto/syntax"