forked from tangled.org/core
this repo has no description

Compare changes

Choose any two refs to compare.

Changed files
+544 -77
api
appview
cmd
docs
spindle
knotserver
config
lexicons
pulls
nix
+29 -1
appview/models/notifications.go
···
package models
-
import "time"
+
import (
+
"time"
+
)
type NotificationType string
···
PullId *int64
}
+
// lucide icon that represents this notification
+
func (n *Notification) Icon() string {
+
switch n.Type {
+
case NotificationTypeRepoStarred:
+
return "star"
+
case NotificationTypeIssueCreated:
+
return "circle-dot"
+
case NotificationTypeIssueCommented:
+
return "message-square"
+
case NotificationTypeIssueClosed:
+
return "ban"
+
case NotificationTypePullCreated:
+
return "git-pull-request-create"
+
case NotificationTypePullCommented:
+
return "message-square"
+
case NotificationTypePullMerged:
+
return "git-merge"
+
case NotificationTypePullClosed:
+
return "git-pull-request-closed"
+
case NotificationTypeFollowed:
+
return "user-plus"
+
default:
+
return ""
+
}
+
}
+
type NotificationWithEntity struct {
*Notification
Repo *Repo
+18 -13
appview/db/notifications.go
···
import (
"context"
"database/sql"
+
"errors"
"fmt"
+
"strings"
"time"
"tangled.org/core/appview/models"
···
return GetNotificationsPaginated(e, pagination.FirstPage(), filters...)
}
-
func (d *DB) GetUnreadNotificationCount(ctx context.Context, userDID string) (int, error) {
-
recipientFilter := FilterEq("recipient_did", userDID)
-
readFilter := FilterEq("read", 0)
+
func CountNotifications(e Execer, filters ...filter) (int64, error) {
+
var conditions []string
+
var args []any
+
for _, filter := range filters {
+
conditions = append(conditions, filter.Condition())
+
args = append(args, filter.Arg()...)
+
}
-
query := fmt.Sprintf(`
-
SELECT COUNT(*)
-
FROM notifications
-
WHERE %s AND %s
-
`, recipientFilter.Condition(), readFilter.Condition())
+
whereClause := ""
+
if conditions != nil {
+
whereClause = " where " + strings.Join(conditions, " and ")
+
}
-
args := append(recipientFilter.Arg(), readFilter.Arg()...)
+
query := fmt.Sprintf(`select count(1) from notifications %s`, whereClause)
+
var count int64
+
err := e.QueryRow(query, args...).Scan(&count)
-
var count int
-
err := d.DB.QueryRowContext(ctx, query, args...).Scan(&count)
-
if err != nil {
-
return 0, fmt.Errorf("failed to get unread count: %w", err)
+
if !errors.Is(err, sql.ErrNoRows) && err != nil {
+
return 0, err
}
return count, nil
+29 -36
appview/notifications/notifications.go
···
package notifications
import (
+
"fmt"
"log"
"net/http"
"strconv"
···
r.Use(middleware.AuthMiddleware(n.oauth))
-
r.Get("/", n.notificationsPage)
+
r.With(middleware.Paginate).Get("/", n.notificationsPage)
r.Get("/count", n.getUnreadCount)
r.Post("/{id}/read", n.markRead)
···
func (n *Notifications) notificationsPage(w http.ResponseWriter, r *http.Request) {
userDid := n.oauth.GetDid(r)
-
limitStr := r.URL.Query().Get("limit")
-
offsetStr := r.URL.Query().Get("offset")
-
-
limit := 20 // default
-
if limitStr != "" {
-
if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 100 {
-
limit = l
-
}
+
page, ok := r.Context().Value("page").(pagination.Page)
+
if !ok {
+
log.Println("failed to get page")
+
page = pagination.FirstPage()
}
-
offset := 0 // default
-
if offsetStr != "" {
-
if o, err := strconv.Atoi(offsetStr); err == nil && o >= 0 {
-
offset = o
-
}
+
total, err := db.CountNotifications(
+
n.db,
+
db.FilterEq("recipient_did", userDid),
+
)
+
if err != nil {
+
log.Println("failed to get total notifications:", err)
+
n.pages.Error500(w)
+
return
}
-
page := pagination.Page{Limit: limit + 1, Offset: offset}
-
notifications, err := db.GetNotificationsWithEntities(n.db, page, db.FilterEq("recipient_did", userDid))
+
notifications, err := db.GetNotificationsWithEntities(
+
n.db,
+
page,
+
db.FilterEq("recipient_did", userDid),
+
)
if err != nil {
log.Println("failed to get notifications:", err)
n.pages.Error500(w)
return
}
-
hasMore := len(notifications) > limit
-
if hasMore {
-
notifications = notifications[:limit]
-
}
-
err = n.db.MarkAllNotificationsRead(r.Context(), userDid)
if err != nil {
log.Println("failed to mark notifications as read:", err)
···
return
}
-
params := pages.NotificationsParams{
+
fmt.Println(n.pages.Notifications(w, pages.NotificationsParams{
LoggedInUser: user,
Notifications: notifications,
UnreadCount: unreadCount,
-
HasMore: hasMore,
-
NextOffset: offset + limit,
-
Limit: limit,
-
}
-
-
err = n.pages.Notifications(w, params)
-
if err != nil {
-
log.Println("failed to load notifs:", err)
-
n.pages.Error500(w)
-
return
-
}
+
Page: page,
+
Total: total,
+
}))
}
func (n *Notifications) getUnreadCount(w http.ResponseWriter, r *http.Request) {
-
userDid := n.oauth.GetDid(r)
-
-
count, err := n.db.GetUnreadNotificationCount(r.Context(), userDid)
+
user := n.oauth.GetUser(r)
+
count, err := db.CountNotifications(
+
n.db,
+
db.FilterEq("recipient_did", user.Did),
+
db.FilterEq("read", 0),
+
)
if err != nil {
http.Error(w, "Failed to get unread count", http.StatusInternalServerError)
return
+3 -4
appview/pages/pages.go
···
LoggedInUser *oauth.User
Notifications []*models.NotificationWithEntity
UnreadCount int
-
HasMore bool
-
NextOffset int
-
Limit int
+
Page pagination.Page
+
Total int64
}
func (p *Pages) Notifications(w io.Writer, params NotificationsParams) error {
···
}
type NotificationCountParams struct {
-
Count int
+
Count int64
}
func (p *Pages) NotificationCount(w io.Writer, params NotificationCountParams) error {
+1 -1
appview/pagination/page.go
···
func FirstPage() Page {
return Page{
Offset: 0,
-
Limit: 10,
+
Limit: 30,
}
}
+1 -1
nix/pkgs/knot-unwrapped.nix
···
sqlite-lib,
src,
}: let
-
version = "1.9.0-alpha";
+
version = "1.9.1-alpha";
in
buildGoApplication {
pname = "knot";
+1 -1
appview/pages/templates/layouts/fragments/footer.html
···
<a href="https://blog.tangled.org" class="{{ $linkStyle }}" target="_blank" rel="noopener noreferrer">{{ i "book-open" $iconStyle }} blog</a>
<a href="https://tangled.org/@tangled.org/core/tree/master/docs" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a>
<a href="https://tangled.org/@tangled.org/core" class="{{ $linkStyle }}">{{ i "code" $iconStyle }} source</a>
-
<a href="https://tangled.org/@tangled.org/core" class="{{ $linkStyle }}">{{ i "paintbrush" $iconStyle }} brand</a>
+
<a href="https://tangled.org/brand" class="{{ $linkStyle }}">{{ i "paintbrush" $iconStyle }} brand</a>
</div>
<div class="flex flex-col gap-1">
+1 -1
appview/signup/signup.go
···
noticeId := "signup-msg"
if err := s.validateCaptcha(cfToken, r); err != nil {
-
s.l.Warn("turnstile validation failed", "error", err)
+
s.l.Warn("turnstile validation failed", "error", err, "email", emailId)
s.pages.Notice(w, noticeId, "Captcha validation failed.")
return
}
+1 -1
appview/pages/templates/repo/fragments/cloneDropdown.html
···
{{ define "repo/fragments/cloneDropdown" }}
{{ $knot := .RepoInfo.Knot }}
{{ if eq $knot "knot1.tangled.sh" }}
-
{{ $knot = "tangled.sh" }}
+
{{ $knot = "tangled.org" }}
{{ end }}
<details id="clone-dropdown" class="relative inline-block text-left group">
+1 -1
docs/spindle/pipeline.md
···
- `manual`: The workflow can be triggered manually.
- `branch`: This is a **required** field that defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event.
-
For example, if you'd like define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
+
For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
```yaml
when:
+1 -1
knotserver/config/config.go
···
Repo Repo `env:",prefix=KNOT_REPO_"`
Server Server `env:",prefix=KNOT_SERVER_"`
Git Git `env:",prefix=KNOT_GIT_"`
-
AppViewEndpoint string `env:"APPVIEW_ENDPOINT, default=https://tangled.sh"`
+
AppViewEndpoint string `env:"APPVIEW_ENDPOINT, default=https://tangled.org"`
}
func Load(ctx context.Context) (*Config, error) {
+3
appview/pages/templates/layouts/base.html
···
<link rel="preconnect" href="https://avatar.tangled.sh" />
<link rel="preconnect" href="https://camo.tangled.sh" />
+
<!-- pwa manifest -->
+
<link rel="manifest" href="/pwa-manifest.json" />
+
<!-- preload main font -->
<link rel="preload" href="/static/fonts/InterVariable.woff2" as="font" type="font/woff2" crossorigin />
+1
appview/pages/templates/user/completeSignup.html
···
content="complete your signup for tangled"
/>
<script src="/static/htmx.min.js"></script>
+
<link rel="manifest" href="/pwa-manifest.json" />
<link
rel="stylesheet"
href="/static/tw.css?{{ cssContentHash }}"
+1
appview/pages/templates/user/login.html
···
<meta property="og:url" content="https://tangled.org/login" />
<meta property="og:description" content="login to for tangled" />
<script src="/static/htmx.min.js"></script>
+
<link rel="manifest" href="/pwa-manifest.json" />
<link rel="stylesheet" href="/static/tw.css?{{ cssContentHash }}" type="text/css" />
<title>login &middot; tangled</title>
</head>
+1
appview/pages/templates/user/signup.html
···
<meta property="og:url" content="https://tangled.org/signup" />
<meta property="og:description" content="sign up for tangled" />
<script src="/static/htmx.min.js"></script>
+
<link rel="manifest" href="/pwa-manifest.json" />
<link rel="stylesheet" href="/static/tw.css?{{ cssContentHash }}" type="text/css" />
<title>sign up &middot; tangled</title>
+1
appview/state/router.go
···
router.Use(middleware.TryRefreshSession())
router.Get("/favicon.svg", s.Favicon)
router.Get("/favicon.ico", s.Favicon)
+
router.Get("/pwa-manifest.json", s.PWAManifest)
userRouter := s.UserRouter(&middleware)
standardRouter := s.StandardRouter(&middleware)
+23
appview/state/state.go
···
s.pages.Favicon(w)
}
+
// https://developer.mozilla.org/en-US/docs/Web/Progressive_web_apps/Manifest
+
const manifestJson = `{
+
"name": "tangled",
+
"description": "tightly-knit social coding.",
+
"icons": [
+
{
+
"src": "/favicon.svg",
+
"sizes": "144x144"
+
}
+
],
+
"start_url": "/",
+
"id": "org.tangled",
+
+
"display": "standalone",
+
"background_color": "#111827",
+
"theme_color": "#111827"
+
}`
+
+
func (p *State) PWAManifest(w http.ResponseWriter, r *http.Request) {
+
w.Header().Set("Content-Type", "application/json")
+
w.Write([]byte(manifestJson))
+
}
+
func (s *State) TermsOfService(w http.ResponseWriter, r *http.Request) {
user := s.oauth.GetUser(r)
s.pages.TermsOfService(w, pages.TermsOfServiceParams{
+34
appview/db/language.go
···
package db
import (
+
"database/sql"
"fmt"
"strings"
+
"github.com/bluesky-social/indigo/atproto/syntax"
"tangled.org/core/appview/models"
)
···
return nil
}
+
+
func DeleteRepoLanguages(e Execer, filters ...filter) error {
+
var conditions []string
+
var args []any
+
for _, filter := range filters {
+
conditions = append(conditions, filter.Condition())
+
args = append(args, filter.Arg()...)
+
}
+
+
whereClause := ""
+
if conditions != nil {
+
whereClause = " where " + strings.Join(conditions, " and ")
+
}
+
+
query := fmt.Sprintf(`delete from repo_languages %s`, whereClause)
+
+
_, err := e.Exec(query, args...)
+
return err
+
}
+
+
func UpdateRepoLanguages(tx *sql.Tx, repoAt syntax.ATURI, ref string, langs []models.RepoLanguage) error {
+
err := DeleteRepoLanguages(
+
tx,
+
FilterEq("repo_at", repoAt),
+
FilterEq("ref", ref),
+
)
+
if err != nil {
+
return fmt.Errorf("failed to delete existing languages: %w", err)
+
}
+
+
return InsertRepoLanguages(tx, langs)
+
}
+12 -1
appview/repo/index.go
···
})
}
+
tx, err := rp.db.Begin()
+
if err != nil {
+
return nil, err
+
}
+
defer tx.Rollback()
+
// update appview's cache
-
err = db.InsertRepoLanguages(rp.db, langs)
+
err = db.UpdateRepoLanguages(tx, f.RepoAt(), currentRef, langs)
if err != nil {
// non-fatal
log.Println("failed to cache lang results", err)
}
+
+
err = tx.Commit()
+
if err != nil {
+
return nil, err
+
}
}
var total int64
+14 -1
appview/state/knotstream.go
···
})
}
-
return db.InsertRepoLanguages(d, langs)
+
tx, err := d.Begin()
+
if err != nil {
+
return err
+
}
+
defer tx.Rollback()
+
+
// update appview's cache
+
err = db.UpdateRepoLanguages(tx, repo.RepoAt(), ref.Short(), langs)
+
if err != nil {
+
fmt.Printf("failed; %s\n", err)
+
// non-fatal
+
}
+
+
return tx.Commit()
}
func ingestPipeline(d *db.DB, source ec.Source, msg ec.Message) error {
+202 -1
api/tangled/cbor_gen.go
···
cw := cbg.NewCborWriter(w)
-
fieldCount := 7
+
fieldCount := 8
if t.Body == nil {
fieldCount--
···
fieldCount--
+
if t.StackInfo == nil {
+
fieldCount--
+
}
+
if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
return err
···
if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
return err
+
+
// t.StackInfo (tangled.RepoPull_StackInfo) (struct)
+
if t.StackInfo != nil {
+
+
if len("stackInfo") > 1000000 {
+
return xerrors.Errorf("Value in field \"stackInfo\" was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("stackInfo"))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string("stackInfo")); err != nil {
+
return err
+
}
+
+
if err := t.StackInfo.MarshalCBOR(cw); err != nil {
+
return err
+
}
+
}
return nil
···
t.CreatedAt = string(sval)
+
// t.StackInfo (tangled.RepoPull_StackInfo) (struct)
+
case "stackInfo":
+
+
{
+
+
b, err := cr.ReadByte()
+
if err != nil {
+
return err
+
}
+
if b != cbg.CborNull[0] {
+
if err := cr.UnreadByte(); err != nil {
+
return err
+
}
+
t.StackInfo = new(RepoPull_StackInfo)
+
if err := t.StackInfo.UnmarshalCBOR(cr); err != nil {
+
return xerrors.Errorf("unmarshaling t.StackInfo pointer: %w", err)
+
}
+
}
+
+
}
default:
// Field doesn't exist on this type, so ignore it
···
return nil
+
func (t *RepoPull_StackInfo) MarshalCBOR(w io.Writer) error {
+
if t == nil {
+
_, err := w.Write(cbg.CborNull)
+
return err
+
}
+
+
cw := cbg.NewCborWriter(w)
+
fieldCount := 2
+
+
if t.Parent == nil {
+
fieldCount--
+
}
+
+
if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
+
return err
+
}
+
+
// t.Parent (string) (string)
+
if t.Parent != nil {
+
+
if len("parent") > 1000000 {
+
return xerrors.Errorf("Value in field \"parent\" was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("parent"))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string("parent")); err != nil {
+
return err
+
}
+
+
if t.Parent == nil {
+
if _, err := cw.Write(cbg.CborNull); err != nil {
+
return err
+
}
+
} else {
+
if len(*t.Parent) > 1000000 {
+
return xerrors.Errorf("Value in field t.Parent was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Parent))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string(*t.Parent)); err != nil {
+
return err
+
}
+
}
+
}
+
+
// t.ChangeId (string) (string)
+
if len("changeId") > 1000000 {
+
return xerrors.Errorf("Value in field \"changeId\" was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("changeId"))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string("changeId")); err != nil {
+
return err
+
}
+
+
if len(t.ChangeId) > 1000000 {
+
return xerrors.Errorf("Value in field t.ChangeId was too long")
+
}
+
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ChangeId))); err != nil {
+
return err
+
}
+
if _, err := cw.WriteString(string(t.ChangeId)); err != nil {
+
return err
+
}
+
return nil
+
}
+
+
func (t *RepoPull_StackInfo) UnmarshalCBOR(r io.Reader) (err error) {
+
*t = RepoPull_StackInfo{}
+
+
cr := cbg.NewCborReader(r)
+
+
maj, extra, err := cr.ReadHeader()
+
if err != nil {
+
return err
+
}
+
defer func() {
+
if err == io.EOF {
+
err = io.ErrUnexpectedEOF
+
}
+
}()
+
+
if maj != cbg.MajMap {
+
return fmt.Errorf("cbor input should be of type map")
+
}
+
+
if extra > cbg.MaxLength {
+
return fmt.Errorf("RepoPull_StackInfo: map struct too large (%d)", extra)
+
}
+
+
n := extra
+
+
nameBuf := make([]byte, 8)
+
for i := uint64(0); i < n; i++ {
+
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
+
if err != nil {
+
return err
+
}
+
+
if !ok {
+
// Field doesn't exist on this type, so ignore it
+
if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
+
return err
+
}
+
continue
+
}
+
+
switch string(nameBuf[:nameLen]) {
+
// t.Parent (string) (string)
+
case "parent":
+
+
{
+
b, err := cr.ReadByte()
+
if err != nil {
+
return err
+
}
+
if b != cbg.CborNull[0] {
+
if err := cr.UnreadByte(); err != nil {
+
return err
+
}
+
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
+
if err != nil {
+
return err
+
}
+
+
t.Parent = (*string)(&sval)
+
}
+
}
+
// t.ChangeId (string) (string)
+
case "changeId":
+
+
{
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
+
if err != nil {
+
return err
+
}
+
+
t.ChangeId = string(sval)
+
}
+
+
default:
+
// Field doesn't exist on this type, so ignore it
+
if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
+
return err
+
}
+
}
+
}
+
+
return nil
+
}
func (t *RepoPull_Source) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
+16 -7
api/tangled/repopull.go
···
} //
// RECORDTYPE: RepoPull
type RepoPull struct {
-
LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"`
-
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
-
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
-
Patch string `json:"patch" cborgen:"patch"`
-
Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"`
-
Target *RepoPull_Target `json:"target" cborgen:"target"`
-
Title string `json:"title" cborgen:"title"`
+
LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"`
+
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
+
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
+
Patch string `json:"patch" cborgen:"patch"`
+
Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"`
+
StackInfo *RepoPull_StackInfo `json:"stackInfo,omitempty" cborgen:"stackInfo,omitempty"`
+
Target *RepoPull_Target `json:"target" cborgen:"target"`
+
Title string `json:"title" cborgen:"title"`
}
// RepoPull_Source is a "source" in the sh.tangled.repo.pull schema.
···
Sha string `json:"sha" cborgen:"sha"`
}
+
// RepoPull_StackInfo is a "stackInfo" in the sh.tangled.repo.pull schema.
+
type RepoPull_StackInfo struct {
+
// changeId: Change ID of this commit/change.
+
ChangeId string `json:"changeId" cborgen:"changeId"`
+
// parent: AT-URI of the PR for the parent commit/change in the change stack.
+
Parent *string `json:"parent,omitempty" cborgen:"parent,omitempty"`
+
}
+
// RepoPull_Target is a "target" in the sh.tangled.repo.pull schema.
type RepoPull_Target struct {
Branch string `json:"branch" cborgen:"branch"`
+7
appview/db/db.go
···
})
conn.ExecContext(ctx, "pragma foreign_keys = on;")
+
runMigration(conn, "add-parent-at-for-stacks-to-pulls", func(tx *sql.Tx) error {
+
_, err := tx.Exec(`
+
alter table pulls add column parent_at text;
+
`)
+
return err
+
})
+
return &DB{db}, nil
+17 -4
appview/db/pulls.go
···
}
}
-
var stackId, changeId, parentChangeId *string
+
var stackId, changeId, parentAt, parentChangeId *string
if pull.StackId != "" {
stackId = &pull.StackId
}
if pull.ChangeId != "" {
changeId = &pull.ChangeId
}
+
if pull.ParentAt != nil {
+
parentAt = (*string)(pull.ParentAt)
+
}
if pull.ParentChangeId != "" {
parentChangeId = &pull.ParentChangeId
}
···
result, err := tx.Exec(
`
insert into pulls (
-
repo_at, owner_did, pull_id, title, target_branch, body, rkey, state, source_branch, source_repo_at, stack_id, change_id, parent_change_id
+
repo_at, owner_did, pull_id, title, target_branch, body, rkey, state, source_branch, source_repo_at, stack_id, change_id, parent_at, parent_change_id
)
-
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
+
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
pull.RepoAt,
pull.OwnerDid,
pull.PullId,
···
sourceRepoAt,
stackId,
changeId,
+
parentAt,
parentChangeId,
)
if err != nil {
···
source_repo_at,
stack_id,
change_id,
+
parent_at,
parent_change_id
from
pulls
···
for rows.Next() {
var pull models.Pull
var createdAt string
-
var sourceBranch, sourceRepoAt, stackId, changeId, parentChangeId sql.NullString
+
var sourceBranch, sourceRepoAt, stackId, changeId, parentAt, parentChangeId sql.NullString
err := rows.Scan(
&pull.ID,
&pull.OwnerDid,
···
&sourceRepoAt,
&stackId,
&changeId,
+
&parentAt,
&parentChangeId,
)
if err != nil {
···
if changeId.Valid {
pull.ChangeId = changeId.String
}
+
if parentAt.Valid {
+
parentAtParsed, err := syntax.ParseATURI(parentAt.String)
+
if err != nil {
+
return nil, err
+
}
+
pull.ParentAt = &parentAtParsed
+
}
if parentChangeId.Valid {
pull.ParentChangeId = parentChangeId.String
}
+5
appview/models/pull.go
···
// stacking
StackId string // nullable string
ChangeId string // nullable string
+
ParentAt *syntax.ATURI
ParentChangeId string // nullable string
// meta
···
},
Patch: p.LatestPatch(),
Source: source,
+
StackInfo: &tangled.RepoPull_StackInfo{
+
ChangeId: p.ChangeId,
+
Parent: (*string)(p.ParentAt),
+
},
}
return record
}
+24 -2
appview/pulls/pulls.go
···
"github.com/bluekeyes/go-gitdiff/gitdiff"
comatproto "github.com/bluesky-social/indigo/api/atproto"
+
"github.com/bluesky-social/indigo/atproto/syntax"
lexutil "github.com/bluesky-social/indigo/lex/util"
indigoxrpc "github.com/bluesky-social/indigo/xrpc"
"github.com/go-chi/chi/v5"
···
newStack, err := newStack(f, user, targetBranch, patch, pull.PullSource, stackId)
if err != nil {
log.Println("failed to create resubmitted stack", err)
-
s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.")
+
s.pages.Notice(w, "pull-resubmit-error", "Failed to merge pull request. Try again later.")
return
// find the diff between the stacks, first, map them by changeId
+
origById := make(map[string]*models.Pull)
newById := make(map[string]*models.Pull)
+
chIdToAtUri := make(map[string]*syntax.ATURI)
+
for _, p := range origStack {
origById[p.ChangeId] = p
+
+
// build map from change id to existing at uris (ignore error as it shouldnt be possible here)
+
pAtUri, _ := syntax.ParseATURI(fmt.Sprintf("at://%s/%s/%s", user.Did, tangled.RepoPullNSID, p.Rkey))
+
chIdToAtUri[p.ChangeId] = &pAtUri
for _, p := range newStack {
+
// if change id has already been given a PR use its at uri instead of the newly created (and thus incorrect)
+
// one made by newStack
+
if ppAt, ok := chIdToAtUri[p.ParentChangeId]; ok {
+
p.ParentAt = ppAt
+
}
+
newById[p.ChangeId] = p
···
// we still need to update the hash in submission.Patch and submission.SourceRev
if patchutil.Equal(newFiles, origFiles) &&
origHeader.Title == newHeader.Title &&
-
origHeader.Body == newHeader.Body {
+
origHeader.Body == newHeader.Body &&
+
op.ParentChangeId == np.ParentChangeId {
unchanged[op.ChangeId] = struct{}{}
} else {
updated[op.ChangeId] = struct{}{}
···
record := op.AsRecord()
record.Patch = submission.Patch
+
record.StackInfo.Parent = (*string)(np.ParentAt)
writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{
RepoApplyWrites_Update: &comatproto.RepoApplyWrites_Update{
···
// the stack is identified by a UUID
var stack models.Stack
parentChangeId := ""
+
var parentAt *syntax.ATURI = nil
for _, fp := range formatPatches {
// all patches must have a jj change-id
changeId, err := fp.ChangeId()
···
StackId: stackId,
ChangeId: changeId,
+
ParentAt: parentAt,
ParentChangeId: parentChangeId,
stack = append(stack, &pull)
parentChangeId = changeId
+
// this is a bit of an ugly way to create the ATURI but its the best we can do with the data flow here
+
// (igore error as it shouldnt be possible here)
+
parsedParentAt, _ := syntax.ParseATURI(fmt.Sprintf("at://%s/%s/%s", user.Did, tangled.RepoPullNSID, pull.Rkey));
+
parentAt = &parsedParentAt
return stack, nil
+1
cmd/gen.go
···
tangled.RepoIssueState{},
tangled.RepoPull{},
tangled.RepoPullComment{},
+
tangled.RepoPull_StackInfo{},
tangled.RepoPull_Source{},
tangled.RepoPullStatus{},
tangled.RepoPull_Target{},
+96
lexicons/pulls/round.json
···
+
{
+
"lexicon": 1,
+
"id": "sh.tangled.repo.pull.round",
+
"needsCbor": true,
+
"needsType": true,
+
"defs": {
+
"main": {
+
"type": "record",
+
"key": "tid",
+
"record": {
+
"type": "object",
+
"required": [
+
"pull",
+
"patch",
+
"sourceInfo",
+
"createdAt"
+
],
+
"properties": {
+
"pull": {
+
"type": "string",
+
"format": "at-uri"
+
},
+
"prevRound": {
+
"type": "ref",
+
"ref": "com.atproto.repo.strongRef"
+
},
+
"patch": {
+
"type": "string",
+
"description": "A patch describing this change. Either gotten directly from the user (patch-based PR) or from the knot based on a commit from another repo. The source of the patch and it's potential details are described by sourceInfo"
+
},
+
"sourceInfo": {
+
"type": "union",
+
"refs": [
+
"#patchSourceInfo",
+
"#commitSourceInfo"
+
]
+
},
+
"stackInfo": {
+
"type": "ref",
+
"ref": "#stackInfo"
+
},
+
"comment": {
+
"type": "string"
+
},
+
"createdAt": {
+
"type": "string",
+
"format": "datetime"
+
}
+
}
+
}
+
},
+
"patchSourceInfo": {
+
"type": "object",
+
"properties": {}
+
},
+
"commitSourceInfo": {
+
"type": "object",
+
"required": [
+
"repo",
+
"branch",
+
"sha"
+
],
+
"properties": {
+
"repo": {
+
"type": "string",
+
"format": "uri"
+
},
+
"branch": {
+
"type": "string"
+
},
+
"sha": {
+
"type": "string",
+
"minLength": 40,
+
"maxLength": 40
+
}
+
}
+
},
+
"stackInfo": {
+
"type": "object",
+
"required": [
+
"changeId"
+
],
+
"properties": {
+
"changeId": {
+
"type": "string",
+
"description": "Change ID of this commit/change."
+
},
+
"parent": {
+
"type": "string",
+
"description": "AT-URI of the PR for the parent commit/change in the change stack.",
+
"format": "at-uri"
+
}
+
}
+
}
+
}
+
}