API: implement ticket { subscription }

This commit is contained in:
Drew DeVault 2021-02-16 09:26:40 -05:00
parent f518bf2904
commit 78b187bb75
5 changed files with 292 additions and 2 deletions

View File

@ -340,7 +340,9 @@ func (r *ticketResolver) Events(ctx context.Context, obj *model.Ticket, cursor *
}
func (r *ticketResolver) Subscription(ctx context.Context, obj *model.Ticket) (*model.TicketSubscription, error) {
panic(fmt.Errorf("not implemented"))
// Regarding unsafe: if they have access to this ticket resource, they were
// already authenticated for it.
return loaders.ForContext(ctx).SubsByTicketIDUnsafe.Load(obj.PKID)
}
func (r *ticketResolver) ACL(ctx context.Context, obj *model.Ticket) (model.ACL, error) {

View File

@ -9,6 +9,7 @@ package loaders
//go:generate ./gen CommentsByIDLoader int api/graph/model.Comment
//go:generate go run github.com/vektah/dataloaden ParticipantsByIDLoader int git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model.Entity
//go:generate ./gen LabelsByIDLoader int api/graph/model.Label
//go:generate ./gen SubsByTicketIDLoader int api/graph/model.TicketSubscription
import (
"context"
@ -42,7 +43,8 @@ type Loaders struct {
ParticipantsByID ParticipantsByIDLoader
LabelsByID LabelsByIDLoader
CommentsByIDUnsafe CommentsByIDLoader
CommentsByIDUnsafe CommentsByIDLoader
SubsByTicketIDUnsafe SubsByTicketIDLoader
}
func fetchUsersByID(ctx context.Context) func(ids []int) ([]*model.User, []error) {
@ -598,6 +600,61 @@ func fetchLabelsByID(ctx context.Context) func(ids []int) ([]*model.Label, []err
}
}
func fetchSubsByTicketIDUnsafe(ctx context.Context) func(ids []int) ([]*model.TicketSubscription, []error) {
return func(ids []int) ([]*model.TicketSubscription, []error) {
subs := make([]*model.TicketSubscription, len(ids))
if err := database.WithTx(ctx, &sql.TxOptions{
Isolation: 0,
ReadOnly: true,
}, func (tx *sql.Tx) error {
var (
err error
rows *sql.Rows
)
query := database.
Select(ctx, (&model.SubscriptionInfo{}).As(`sub`)).
Column(`sub.ticket_id`).
From(`ticket_subscription sub`).
Join(`participant p ON p.id = sub.participant_id`).
Where(`p.user_id = ? AND sub.ticket_id = ANY(?)`,
auth.ForContext(ctx).UserID, pq.Array(ids))
if rows, err = query.RunWith(tx).QueryContext(ctx); err != nil {
return err
}
defer rows.Close()
subsByTicketID := map[int]*model.TicketSubscription{}
for rows.Next() {
var ticketID int
si := model.SubscriptionInfo{}
if err := rows.Scan(append(database.Scan(
ctx, &si), &ticketID)...); err != nil {
return err
}
subsByTicketID[ticketID] = &model.TicketSubscription{
ID: si.ID,
Created: si.Created,
TicketID: ticketID,
}
}
if err = rows.Err(); err != nil {
return err
}
for i, id := range ids {
subs[i] = subsByTicketID[id]
}
return nil
}); err != nil {
panic(err)
}
return subs, nil
}
}
func Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := context.WithValue(r.Context(), loadersCtxKey, &Loaders{
@ -646,6 +703,11 @@ func Middleware(next http.Handler) http.Handler {
wait: 1 * time.Millisecond,
fetch: fetchLabelsByID(r.Context()),
},
SubsByTicketIDUnsafe: SubsByTicketIDLoader{
maxBatch: 100,
wait: 1 * time.Millisecond,
fetch: fetchSubsByTicketIDUnsafe(r.Context()),
},
})
r = r.WithContext(ctx)
next.ServeHTTP(w, r)

View File

@ -0,0 +1,224 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model"
)
// SubsByTicketIDLoaderConfig captures the config to create a new SubsByTicketIDLoader
type SubsByTicketIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.TicketSubscription, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewSubsByTicketIDLoader creates a new SubsByTicketIDLoader given a fetch, wait, and maxBatch
func NewSubsByTicketIDLoader(config SubsByTicketIDLoaderConfig) *SubsByTicketIDLoader {
return &SubsByTicketIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// SubsByTicketIDLoader batches and caches requests
type SubsByTicketIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*model.TicketSubscription, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*model.TicketSubscription
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *subsByTicketIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type subsByTicketIDLoaderBatch struct {
keys []int
data []*model.TicketSubscription
error []error
closing bool
done chan struct{}
}
// Load a TicketSubscription by key, batching and caching will be applied automatically
func (l *SubsByTicketIDLoader) Load(key int) (*model.TicketSubscription, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a TicketSubscription.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *SubsByTicketIDLoader) LoadThunk(key int) func() (*model.TicketSubscription, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.TicketSubscription, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &subsByTicketIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.TicketSubscription, error) {
<-batch.done
var data *model.TicketSubscription
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *SubsByTicketIDLoader) LoadAll(keys []int) ([]*model.TicketSubscription, []error) {
results := make([]func() (*model.TicketSubscription, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
ticketSubscriptions := make([]*model.TicketSubscription, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
ticketSubscriptions[i], errors[i] = thunk()
}
return ticketSubscriptions, errors
}
// LoadAllThunk returns a function that when called will block waiting for a TicketSubscriptions.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *SubsByTicketIDLoader) LoadAllThunk(keys []int) func() ([]*model.TicketSubscription, []error) {
results := make([]func() (*model.TicketSubscription, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.TicketSubscription, []error) {
ticketSubscriptions := make([]*model.TicketSubscription, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
ticketSubscriptions[i], errors[i] = thunk()
}
return ticketSubscriptions, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *SubsByTicketIDLoader) Prime(key int, value *model.TicketSubscription) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *SubsByTicketIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *SubsByTicketIDLoader) unsafeSet(key int, value *model.TicketSubscription) {
if l.cache == nil {
l.cache = map[int]*model.TicketSubscription{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *subsByTicketIDLoaderBatch) keyIndex(l *SubsByTicketIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *subsByTicketIDLoaderBatch) startTimer(l *SubsByTicketIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *subsByTicketIDLoaderBatch) end(l *SubsByTicketIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -5,6 +5,7 @@
&mdash;
{{ cfg("sr.ht", "site-name") }} todo
</title>
<!-- Ticket ID: {{ticket.id}} -->
{% endblock %}
{% block body %}
<div class="container">

View File

@ -5,6 +5,7 @@
&mdash;
{{ cfg("sr.ht", "site-name") }} todo
</title>
<!-- Tracker ID: {{tracker.id}} -->
{% endblock %}
{% block body %}
<div class="header-tabbed">