API: overhaul build

This commit is contained in:
Drew DeVault 2022-03-24 14:19:29 +01:00
parent df756bee7c
commit b30051b461
18 changed files with 60 additions and 16179 deletions

2
.gitignore vendored
View File

@ -15,3 +15,5 @@ overrides/
.pgp
build
api/api
api/graph/api/generated.go
api/loaders/*_gen.go

View File

@ -1,3 +1,12 @@
SRHT_PATH?=/usr/lib/python3.8/site-packages/srht
SRHT_PATH?=/usr/lib/python3.9/site-packages/srht
MODULE=listssrht/
include ${SRHT_PATH}/Makefile
all: api
api:
cd api && go generate ./loaders
cd api && go generate ./graph
cd api && go build
.PHONY: all api

View File

@ -6,7 +6,7 @@ require (
git.sr.ht/~sircmpwn/core-go v0.0.0-20220113153027-e7ae287d2fec
git.sr.ht/~sircmpwn/getopt v1.0.0 // indirect
git.sr.ht/~sircmpwn/go-bare v0.0.0-20210406120253-ab86bc2846d9 // indirect
github.com/99designs/gqlgen v0.14.0
github.com/99designs/gqlgen v0.17.2
github.com/Masterminds/squirrel v1.5.2
github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f // indirect
github.com/agnivade/levenshtein v1.1.1 // indirect
@ -19,13 +19,12 @@ require (
github.com/google/uuid v1.3.0 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/lib/pq v1.10.4
github.com/matryer/moq v0.2.5 // indirect
github.com/matryer/moq v0.2.6 // indirect
github.com/mitchellh/mapstructure v1.4.3 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/urfave/cli/v2 v2.3.0 // indirect
github.com/vektah/dataloaden v0.3.0 // indirect
github.com/vektah/gqlparser/v2 v2.2.0
github.com/urfave/cli/v2 v2.4.0 // indirect
github.com/vektah/dataloaden v0.3.0
github.com/vektah/gqlparser/v2 v2.4.1
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce // indirect
golang.org/x/sys v0.0.0-20220111092808-5a964db01320 // indirect
golang.org/x/tools v0.1.8 // indirect
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 // indirect
)

View File

@ -43,6 +43,8 @@ git.sr.ht/~sircmpwn/go-bare v0.0.0-20210406120253-ab86bc2846d9 h1:Ahny8Ud1LjVMMA
git.sr.ht/~sircmpwn/go-bare v0.0.0-20210406120253-ab86bc2846d9/go.mod h1:BVJwbDfVjCjoFiKrhkei6NdGcZYpkDkdyCdg1ukytRA=
github.com/99designs/gqlgen v0.14.0 h1:Wg8aNYQUjMR/4v+W3xD+7SizOy6lSvVeQ06AobNQAXI=
github.com/99designs/gqlgen v0.14.0/go.mod h1:S7z4boV+Nx4VvzMUpVrY/YuHjFX4n7rDyuTqvAkuoRE=
github.com/99designs/gqlgen v0.17.2 h1:yczvlwMsfcVu/JtejqfrLwXuSP0yZFhmcss3caEvHw8=
github.com/99designs/gqlgen v0.17.2/go.mod h1:K5fzLKwtph+FFgh9j7nFbRUdBKvTcGnsta51fsMTn3o=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
@ -206,6 +208,7 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kavu/go_reuseport v1.5.0 h1:UNuiY2OblcqAtVDE8Gsg1kZz8zbBWg907sP1ceBV+bk=
github.com/kavu/go_reuseport v1.5.0/go.mod h1:CG8Ee7ceMFSMnx/xr25Vm0qXaj2Z4i5PWoUx+JZ5/CU=
github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -224,9 +227,13 @@ github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk=
github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
github.com/logrusorgru/aurora/v3 v3.0.0/go.mod h1:vsR12bk5grlLvLXAYrBsb5Oc/N+LxAlxggSjiwMnCUc=
github.com/matryer/moq v0.0.0-20200106131100-75d0ddfc0007/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/matryer/moq v0.2.3/go.mod h1:9RtPYjTnH1bSBIkpvtHkFN7nbWAnO7oRpdJkEIn6UtE=
github.com/matryer/moq v0.2.5 h1:BGQISyhl7Gc9W/gMYmAJONh9mT6AYeyeTjNupNPknMs=
github.com/matryer/moq v0.2.5/go.mod h1:9RtPYjTnH1bSBIkpvtHkFN7nbWAnO7oRpdJkEIn6UtE=
github.com/matryer/moq v0.2.6 h1:X4+LF09udTsi2P+Z+1UhSb4p3K8IyiF7KSNFDR9M3M0=
github.com/matryer/moq v0.2.6/go.mod h1:kITsx543GOENm48TUAQyJ9+SAvFSr7iGQXPoth/VUBk=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
@ -234,6 +241,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.2.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs=
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -308,6 +316,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/cli/v2 v2.4.0 h1:m2pxjjDFgDxSPtO8WSdbndj17Wu2y8vOT86wE/tjr+I=
github.com/urfave/cli/v2 v2.4.0/go.mod h1:NX9W0zmTvedE5oDoOMs2RTC8RvdK98NTYZE5LbaEYPg=
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec h1:DGmKwyZwEB8dI7tbLt/I/gQuP559o/0FrAkHKlQM/Ks=
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw=
github.com/vektah/dataloaden v0.2.1-0.20190515034641-a19b9a6e7c9e/go.mod h1:/HUdMve7rvxZma+2ZELQeNh88+003LL7Pf/CZ089j8U=
@ -317,6 +327,9 @@ github.com/vektah/gqlparser v1.3.1 h1:8b0IcD3qZKWJQHSzynbDlrtP3IxVydZ2DZepCGofqf
github.com/vektah/gqlparser v1.3.1/go.mod h1:bkVf0FX+Stjg/MHnm8mEyubuaArhNEqfQhF+OTiAL74=
github.com/vektah/gqlparser/v2 v2.2.0 h1:bAc3slekAAJW6sZTi07aGq0OrfaCjj4jxARAaC7g2EM=
github.com/vektah/gqlparser/v2 v2.2.0/go.mod h1:i3mQIGIrbK2PD1RrCeMTlVbkF2FJ6WkU1KJlJlC+3F4=
github.com/vektah/gqlparser/v2 v2.4.0/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0=
github.com/vektah/gqlparser/v2 v2.4.1 h1:QOyEn8DAPMUMARGMeshKDkDgNmVoEaEGiDB0uWxcSlQ=
github.com/vektah/gqlparser/v2 v2.4.1/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@ -334,6 +347,7 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce h1:Roh6XWxHFKrPgC/EQhVubSAGQ6Ozk6IdxHSzt1mR0EI=
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
@ -369,6 +383,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -471,6 +487,8 @@ golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220111092808-5a964db01320 h1:0jf+tOCoZ3LyutmCOWpVni1chK4VfFLhRsDK7MhqGRY=
golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 h1:OH54vjqzRWmbJ62fjuhxy7AxFFgoHN0/DPc/UrL8cAs=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -531,6 +549,9 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.8 h1:P1HhGGuLW4aAclzjtmJdf0mJOjVUZUzOTqkAkWL+l6w=
golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -627,6 +648,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=

File diff suppressed because it is too large Load Diff

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/lists.sr.ht/api/graph/model"
)
// ACLsByIDLoaderConfig captures the config to create a new ACLsByIDLoader
type ACLsByIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.MailingListACL, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewACLsByIDLoader creates a new ACLsByIDLoader given a fetch, wait, and maxBatch
func NewACLsByIDLoader(config ACLsByIDLoaderConfig) *ACLsByIDLoader {
return &ACLsByIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// ACLsByIDLoader batches and caches requests
type ACLsByIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*model.MailingListACL, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*model.MailingListACL
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *aCLsByIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type aCLsByIDLoaderBatch struct {
keys []int
data []*model.MailingListACL
error []error
closing bool
done chan struct{}
}
// Load a MailingListACL by key, batching and caching will be applied automatically
func (l *ACLsByIDLoader) Load(key int) (*model.MailingListACL, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a MailingListACL.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *ACLsByIDLoader) LoadThunk(key int) func() (*model.MailingListACL, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.MailingListACL, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &aCLsByIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.MailingListACL, error) {
<-batch.done
var data *model.MailingListACL
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *ACLsByIDLoader) LoadAll(keys []int) ([]*model.MailingListACL, []error) {
results := make([]func() (*model.MailingListACL, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
mailingListACLs := make([]*model.MailingListACL, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
mailingListACLs[i], errors[i] = thunk()
}
return mailingListACLs, errors
}
// LoadAllThunk returns a function that when called will block waiting for a MailingListACLs.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *ACLsByIDLoader) LoadAllThunk(keys []int) func() ([]*model.MailingListACL, []error) {
results := make([]func() (*model.MailingListACL, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.MailingListACL, []error) {
mailingListACLs := make([]*model.MailingListACL, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
mailingListACLs[i], errors[i] = thunk()
}
return mailingListACLs, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *ACLsByIDLoader) Prime(key int, value *model.MailingListACL) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *ACLsByIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *ACLsByIDLoader) unsafeSet(key int, value *model.MailingListACL) {
if l.cache == nil {
l.cache = map[int]*model.MailingListACL{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *aCLsByIDLoaderBatch) keyIndex(l *ACLsByIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *aCLsByIDLoaderBatch) startTimer(l *ACLsByIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *aCLsByIDLoaderBatch) end(l *ACLsByIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/lists.sr.ht/api/graph/model"
)
// EmailsByIDLoaderConfig captures the config to create a new EmailsByIDLoader
type EmailsByIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.Email, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewEmailsByIDLoader creates a new EmailsByIDLoader given a fetch, wait, and maxBatch
func NewEmailsByIDLoader(config EmailsByIDLoaderConfig) *EmailsByIDLoader {
return &EmailsByIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// EmailsByIDLoader batches and caches requests
type EmailsByIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*model.Email, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*model.Email
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *emailsByIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type emailsByIDLoaderBatch struct {
keys []int
data []*model.Email
error []error
closing bool
done chan struct{}
}
// Load a Email by key, batching and caching will be applied automatically
func (l *EmailsByIDLoader) Load(key int) (*model.Email, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Email.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *EmailsByIDLoader) LoadThunk(key int) func() (*model.Email, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.Email, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &emailsByIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.Email, error) {
<-batch.done
var data *model.Email
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *EmailsByIDLoader) LoadAll(keys []int) ([]*model.Email, []error) {
results := make([]func() (*model.Email, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
emails := make([]*model.Email, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
emails[i], errors[i] = thunk()
}
return emails, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Emails.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *EmailsByIDLoader) LoadAllThunk(keys []int) func() ([]*model.Email, []error) {
results := make([]func() (*model.Email, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.Email, []error) {
emails := make([]*model.Email, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
emails[i], errors[i] = thunk()
}
return emails, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *EmailsByIDLoader) Prime(key int, value *model.Email) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *EmailsByIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *EmailsByIDLoader) unsafeSet(key int, value *model.Email) {
if l.cache == nil {
l.cache = map[int]*model.Email{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *emailsByIDLoaderBatch) keyIndex(l *EmailsByIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *emailsByIDLoaderBatch) startTimer(l *EmailsByIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *emailsByIDLoaderBatch) end(l *EmailsByIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/lists.sr.ht/api/graph/model"
)
// EmailsByMessageIDLoaderConfig captures the config to create a new EmailsByMessageIDLoader
type EmailsByMessageIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []string) ([]*model.Email, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewEmailsByMessageIDLoader creates a new EmailsByMessageIDLoader given a fetch, wait, and maxBatch
func NewEmailsByMessageIDLoader(config EmailsByMessageIDLoaderConfig) *EmailsByMessageIDLoader {
return &EmailsByMessageIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// EmailsByMessageIDLoader batches and caches requests
type EmailsByMessageIDLoader struct {
// this method provides the data for the loader
fetch func(keys []string) ([]*model.Email, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[string]*model.Email
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *emailsByMessageIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type emailsByMessageIDLoaderBatch struct {
keys []string
data []*model.Email
error []error
closing bool
done chan struct{}
}
// Load a Email by key, batching and caching will be applied automatically
func (l *EmailsByMessageIDLoader) Load(key string) (*model.Email, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Email.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *EmailsByMessageIDLoader) LoadThunk(key string) func() (*model.Email, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.Email, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &emailsByMessageIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.Email, error) {
<-batch.done
var data *model.Email
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *EmailsByMessageIDLoader) LoadAll(keys []string) ([]*model.Email, []error) {
results := make([]func() (*model.Email, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
emails := make([]*model.Email, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
emails[i], errors[i] = thunk()
}
return emails, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Emails.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *EmailsByMessageIDLoader) LoadAllThunk(keys []string) func() ([]*model.Email, []error) {
results := make([]func() (*model.Email, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.Email, []error) {
emails := make([]*model.Email, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
emails[i], errors[i] = thunk()
}
return emails, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *EmailsByMessageIDLoader) Prime(key string, value *model.Email) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *EmailsByMessageIDLoader) Clear(key string) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *EmailsByMessageIDLoader) unsafeSet(key string, value *model.Email) {
if l.cache == nil {
l.cache = map[string]*model.Email{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *emailsByMessageIDLoaderBatch) keyIndex(l *EmailsByMessageIDLoader, key string) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *emailsByMessageIDLoaderBatch) startTimer(l *EmailsByMessageIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *emailsByMessageIDLoaderBatch) end(l *EmailsByMessageIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

20
api/loaders/generate.go Normal file
View File

@ -0,0 +1,20 @@
//go:build generate
// +build generate
package loaders
import (
_ "github.com/vektah/dataloaden"
)
//go:generate ./gen ACLsByIDLoader int api/graph/model.MailingListACL
//go:generate ./gen EmailsByIDLoader int api/graph/model.Email
//go:generate ./gen EmailsByMessageIDLoader string api/graph/model.Email
//go:generate ./gen MailingListsByIDLoader int api/graph/model.MailingList
//go:generate ./gen MailingListsByNameLoader string api/graph/model.MailingList
//go:generate ./gen MailingListsByOwnerNameLoader [2]string api/graph/model.MailingList
//go:generate ./gen PatchsetsByIDLoader int api/graph/model.Patchset
//go:generate go run github.com/vektah/dataloaden SubscriptionsByIDLoader int git.sr.ht/~sircmpwn/lists.sr.ht/api/graph/model.ActivitySubscription
//go:generate ./gen ThreadsByIDLoader int api/graph/model.Thread
//go:generate ./gen UsersByIDLoader int api/graph/model.User
//go:generate ./gen UsersByNameLoader string api/graph/model.User

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/lists.sr.ht/api/graph/model"
)
// MailingListsByIDLoaderConfig captures the config to create a new MailingListsByIDLoader
type MailingListsByIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.MailingList, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewMailingListsByIDLoader creates a new MailingListsByIDLoader given a fetch, wait, and maxBatch
func NewMailingListsByIDLoader(config MailingListsByIDLoaderConfig) *MailingListsByIDLoader {
return &MailingListsByIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// MailingListsByIDLoader batches and caches requests
type MailingListsByIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*model.MailingList, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*model.MailingList
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *mailingListsByIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type mailingListsByIDLoaderBatch struct {
keys []int
data []*model.MailingList
error []error
closing bool
done chan struct{}
}
// Load a MailingList by key, batching and caching will be applied automatically
func (l *MailingListsByIDLoader) Load(key int) (*model.MailingList, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a MailingList.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *MailingListsByIDLoader) LoadThunk(key int) func() (*model.MailingList, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.MailingList, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &mailingListsByIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.MailingList, error) {
<-batch.done
var data *model.MailingList
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *MailingListsByIDLoader) LoadAll(keys []int) ([]*model.MailingList, []error) {
results := make([]func() (*model.MailingList, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
mailingLists := make([]*model.MailingList, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
mailingLists[i], errors[i] = thunk()
}
return mailingLists, errors
}
// LoadAllThunk returns a function that when called will block waiting for a MailingLists.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *MailingListsByIDLoader) LoadAllThunk(keys []int) func() ([]*model.MailingList, []error) {
results := make([]func() (*model.MailingList, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.MailingList, []error) {
mailingLists := make([]*model.MailingList, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
mailingLists[i], errors[i] = thunk()
}
return mailingLists, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *MailingListsByIDLoader) Prime(key int, value *model.MailingList) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *MailingListsByIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *MailingListsByIDLoader) unsafeSet(key int, value *model.MailingList) {
if l.cache == nil {
l.cache = map[int]*model.MailingList{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *mailingListsByIDLoaderBatch) keyIndex(l *MailingListsByIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *mailingListsByIDLoaderBatch) startTimer(l *MailingListsByIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *mailingListsByIDLoaderBatch) end(l *MailingListsByIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/lists.sr.ht/api/graph/model"
)
// MailingListsByNameLoaderConfig captures the config to create a new MailingListsByNameLoader
type MailingListsByNameLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []string) ([]*model.MailingList, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewMailingListsByNameLoader creates a new MailingListsByNameLoader given a fetch, wait, and maxBatch
func NewMailingListsByNameLoader(config MailingListsByNameLoaderConfig) *MailingListsByNameLoader {
return &MailingListsByNameLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// MailingListsByNameLoader batches and caches requests
type MailingListsByNameLoader struct {
// this method provides the data for the loader
fetch func(keys []string) ([]*model.MailingList, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[string]*model.MailingList
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *mailingListsByNameLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type mailingListsByNameLoaderBatch struct {
keys []string
data []*model.MailingList
error []error
closing bool
done chan struct{}
}
// Load a MailingList by key, batching and caching will be applied automatically
func (l *MailingListsByNameLoader) Load(key string) (*model.MailingList, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a MailingList.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *MailingListsByNameLoader) LoadThunk(key string) func() (*model.MailingList, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.MailingList, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &mailingListsByNameLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.MailingList, error) {
<-batch.done
var data *model.MailingList
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *MailingListsByNameLoader) LoadAll(keys []string) ([]*model.MailingList, []error) {
results := make([]func() (*model.MailingList, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
mailingLists := make([]*model.MailingList, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
mailingLists[i], errors[i] = thunk()
}
return mailingLists, errors
}
// LoadAllThunk returns a function that when called will block waiting for a MailingLists.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *MailingListsByNameLoader) LoadAllThunk(keys []string) func() ([]*model.MailingList, []error) {
results := make([]func() (*model.MailingList, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.MailingList, []error) {
mailingLists := make([]*model.MailingList, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
mailingLists[i], errors[i] = thunk()
}
return mailingLists, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *MailingListsByNameLoader) Prime(key string, value *model.MailingList) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *MailingListsByNameLoader) Clear(key string) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *MailingListsByNameLoader) unsafeSet(key string, value *model.MailingList) {
if l.cache == nil {
l.cache = map[string]*model.MailingList{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *mailingListsByNameLoaderBatch) keyIndex(l *MailingListsByNameLoader, key string) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *mailingListsByNameLoaderBatch) startTimer(l *MailingListsByNameLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *mailingListsByNameLoaderBatch) end(l *MailingListsByNameLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/lists.sr.ht/api/graph/model"
)
// MailingListsByOwnerNameLoaderConfig captures the config to create a new MailingListsByOwnerNameLoader
type MailingListsByOwnerNameLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys [][2]string) ([]*model.MailingList, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewMailingListsByOwnerNameLoader creates a new MailingListsByOwnerNameLoader given a fetch, wait, and maxBatch
func NewMailingListsByOwnerNameLoader(config MailingListsByOwnerNameLoaderConfig) *MailingListsByOwnerNameLoader {
return &MailingListsByOwnerNameLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// MailingListsByOwnerNameLoader batches and caches requests
type MailingListsByOwnerNameLoader struct {
// this method provides the data for the loader
fetch func(keys [][2]string) ([]*model.MailingList, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[[2]string]*model.MailingList
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *mailingListsByOwnerNameLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type mailingListsByOwnerNameLoaderBatch struct {
keys [][2]string
data []*model.MailingList
error []error
closing bool
done chan struct{}
}
// Load a MailingList by key, batching and caching will be applied automatically
func (l *MailingListsByOwnerNameLoader) Load(key [2]string) (*model.MailingList, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a MailingList.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *MailingListsByOwnerNameLoader) LoadThunk(key [2]string) func() (*model.MailingList, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.MailingList, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &mailingListsByOwnerNameLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.MailingList, error) {
<-batch.done
var data *model.MailingList
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *MailingListsByOwnerNameLoader) LoadAll(keys [][2]string) ([]*model.MailingList, []error) {
results := make([]func() (*model.MailingList, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
mailingLists := make([]*model.MailingList, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
mailingLists[i], errors[i] = thunk()
}
return mailingLists, errors
}
// LoadAllThunk returns a function that when called will block waiting for a MailingLists.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *MailingListsByOwnerNameLoader) LoadAllThunk(keys [][2]string) func() ([]*model.MailingList, []error) {
results := make([]func() (*model.MailingList, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.MailingList, []error) {
mailingLists := make([]*model.MailingList, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
mailingLists[i], errors[i] = thunk()
}
return mailingLists, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *MailingListsByOwnerNameLoader) Prime(key [2]string, value *model.MailingList) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *MailingListsByOwnerNameLoader) Clear(key [2]string) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *MailingListsByOwnerNameLoader) unsafeSet(key [2]string, value *model.MailingList) {
if l.cache == nil {
l.cache = map[[2]string]*model.MailingList{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *mailingListsByOwnerNameLoaderBatch) keyIndex(l *MailingListsByOwnerNameLoader, key [2]string) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *mailingListsByOwnerNameLoaderBatch) startTimer(l *MailingListsByOwnerNameLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *mailingListsByOwnerNameLoaderBatch) end(l *MailingListsByOwnerNameLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,17 +1,5 @@
package loaders
//go:generate ./gen ACLsByIDLoader int api/graph/model.MailingListACL
//go:generate ./gen EmailsByIDLoader int api/graph/model.Email
//go:generate ./gen EmailsByMessageIDLoader string api/graph/model.Email
//go:generate ./gen MailingListsByIDLoader int api/graph/model.MailingList
//go:generate ./gen MailingListsByNameLoader string api/graph/model.MailingList
//go:generate ./gen MailingListsByOwnerNameLoader [2]string api/graph/model.MailingList
//go:generate ./gen PatchsetsByIDLoader int api/graph/model.Patchset
//go:generate go run github.com/vektah/dataloaden SubscriptionsByIDLoader int git.sr.ht/~sircmpwn/lists.sr.ht/api/graph/model.ActivitySubscription
//go:generate ./gen ThreadsByIDLoader int api/graph/model.Thread
//go:generate ./gen UsersByIDLoader int api/graph/model.User
//go:generate ./gen UsersByNameLoader string api/graph/model.User
import (
"context"
"database/sql"

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/lists.sr.ht/api/graph/model"
)
// PatchsetsByIDLoaderConfig captures the config to create a new PatchsetsByIDLoader
type PatchsetsByIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.Patchset, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewPatchsetsByIDLoader creates a new PatchsetsByIDLoader given a fetch, wait, and maxBatch
func NewPatchsetsByIDLoader(config PatchsetsByIDLoaderConfig) *PatchsetsByIDLoader {
return &PatchsetsByIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// PatchsetsByIDLoader batches and caches requests
type PatchsetsByIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*model.Patchset, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*model.Patchset
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *patchsetsByIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type patchsetsByIDLoaderBatch struct {
keys []int
data []*model.Patchset
error []error
closing bool
done chan struct{}
}
// Load a Patchset by key, batching and caching will be applied automatically
func (l *PatchsetsByIDLoader) Load(key int) (*model.Patchset, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Patchset.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *PatchsetsByIDLoader) LoadThunk(key int) func() (*model.Patchset, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.Patchset, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &patchsetsByIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.Patchset, error) {
<-batch.done
var data *model.Patchset
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *PatchsetsByIDLoader) LoadAll(keys []int) ([]*model.Patchset, []error) {
results := make([]func() (*model.Patchset, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
patchsets := make([]*model.Patchset, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
patchsets[i], errors[i] = thunk()
}
return patchsets, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Patchsets.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *PatchsetsByIDLoader) LoadAllThunk(keys []int) func() ([]*model.Patchset, []error) {
results := make([]func() (*model.Patchset, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.Patchset, []error) {
patchsets := make([]*model.Patchset, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
patchsets[i], errors[i] = thunk()
}
return patchsets, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *PatchsetsByIDLoader) Prime(key int, value *model.Patchset) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *PatchsetsByIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *PatchsetsByIDLoader) unsafeSet(key int, value *model.Patchset) {
if l.cache == nil {
l.cache = map[int]*model.Patchset{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *patchsetsByIDLoaderBatch) keyIndex(l *PatchsetsByIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *patchsetsByIDLoaderBatch) startTimer(l *PatchsetsByIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *patchsetsByIDLoaderBatch) end(l *PatchsetsByIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,221 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/lists.sr.ht/api/graph/model"
)
// SubscriptionsByIDLoaderConfig captures the config to create a new SubscriptionsByIDLoader
type SubscriptionsByIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]model.ActivitySubscription, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewSubscriptionsByIDLoader creates a new SubscriptionsByIDLoader given a fetch, wait, and maxBatch
func NewSubscriptionsByIDLoader(config SubscriptionsByIDLoaderConfig) *SubscriptionsByIDLoader {
return &SubscriptionsByIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// SubscriptionsByIDLoader batches and caches requests
type SubscriptionsByIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]model.ActivitySubscription, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]model.ActivitySubscription
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *subscriptionsByIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type subscriptionsByIDLoaderBatch struct {
keys []int
data []model.ActivitySubscription
error []error
closing bool
done chan struct{}
}
// Load a ActivitySubscription by key, batching and caching will be applied automatically
func (l *SubscriptionsByIDLoader) Load(key int) (model.ActivitySubscription, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a ActivitySubscription.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *SubscriptionsByIDLoader) LoadThunk(key int) func() (model.ActivitySubscription, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (model.ActivitySubscription, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &subscriptionsByIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (model.ActivitySubscription, error) {
<-batch.done
var data model.ActivitySubscription
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *SubscriptionsByIDLoader) LoadAll(keys []int) ([]model.ActivitySubscription, []error) {
results := make([]func() (model.ActivitySubscription, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
activitySubscriptions := make([]model.ActivitySubscription, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
activitySubscriptions[i], errors[i] = thunk()
}
return activitySubscriptions, errors
}
// LoadAllThunk returns a function that when called will block waiting for a ActivitySubscriptions.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *SubscriptionsByIDLoader) LoadAllThunk(keys []int) func() ([]model.ActivitySubscription, []error) {
results := make([]func() (model.ActivitySubscription, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]model.ActivitySubscription, []error) {
activitySubscriptions := make([]model.ActivitySubscription, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
activitySubscriptions[i], errors[i] = thunk()
}
return activitySubscriptions, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *SubscriptionsByIDLoader) Prime(key int, value model.ActivitySubscription) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
l.unsafeSet(key, value)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *SubscriptionsByIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *SubscriptionsByIDLoader) unsafeSet(key int, value model.ActivitySubscription) {
if l.cache == nil {
l.cache = map[int]model.ActivitySubscription{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *subscriptionsByIDLoaderBatch) keyIndex(l *SubscriptionsByIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *subscriptionsByIDLoaderBatch) startTimer(l *SubscriptionsByIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *subscriptionsByIDLoaderBatch) end(l *SubscriptionsByIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/lists.sr.ht/api/graph/model"
)
// ThreadsByIDLoaderConfig captures the config to create a new ThreadsByIDLoader
type ThreadsByIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.Thread, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewThreadsByIDLoader creates a new ThreadsByIDLoader given a fetch, wait, and maxBatch
func NewThreadsByIDLoader(config ThreadsByIDLoaderConfig) *ThreadsByIDLoader {
return &ThreadsByIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// ThreadsByIDLoader batches and caches requests
type ThreadsByIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*model.Thread, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*model.Thread
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *threadsByIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type threadsByIDLoaderBatch struct {
keys []int
data []*model.Thread
error []error
closing bool
done chan struct{}
}
// Load a Thread by key, batching and caching will be applied automatically
func (l *ThreadsByIDLoader) Load(key int) (*model.Thread, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Thread.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *ThreadsByIDLoader) LoadThunk(key int) func() (*model.Thread, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.Thread, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &threadsByIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.Thread, error) {
<-batch.done
var data *model.Thread
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *ThreadsByIDLoader) LoadAll(keys []int) ([]*model.Thread, []error) {
results := make([]func() (*model.Thread, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
threads := make([]*model.Thread, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
threads[i], errors[i] = thunk()
}
return threads, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Threads.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *ThreadsByIDLoader) LoadAllThunk(keys []int) func() ([]*model.Thread, []error) {
results := make([]func() (*model.Thread, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.Thread, []error) {
threads := make([]*model.Thread, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
threads[i], errors[i] = thunk()
}
return threads, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *ThreadsByIDLoader) Prime(key int, value *model.Thread) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *ThreadsByIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *ThreadsByIDLoader) unsafeSet(key int, value *model.Thread) {
if l.cache == nil {
l.cache = map[int]*model.Thread{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *threadsByIDLoaderBatch) keyIndex(l *ThreadsByIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *threadsByIDLoaderBatch) startTimer(l *ThreadsByIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *threadsByIDLoaderBatch) end(l *ThreadsByIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/lists.sr.ht/api/graph/model"
)
// UsersByIDLoaderConfig captures the config to create a new UsersByIDLoader
type UsersByIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.User, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewUsersByIDLoader creates a new UsersByIDLoader given a fetch, wait, and maxBatch
func NewUsersByIDLoader(config UsersByIDLoaderConfig) *UsersByIDLoader {
return &UsersByIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// UsersByIDLoader batches and caches requests
type UsersByIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*model.User, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*model.User
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *usersByIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type usersByIDLoaderBatch struct {
keys []int
data []*model.User
error []error
closing bool
done chan struct{}
}
// Load a User by key, batching and caching will be applied automatically
func (l *UsersByIDLoader) Load(key int) (*model.User, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a User.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *UsersByIDLoader) LoadThunk(key int) func() (*model.User, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.User, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &usersByIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.User, error) {
<-batch.done
var data *model.User
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *UsersByIDLoader) LoadAll(keys []int) ([]*model.User, []error) {
results := make([]func() (*model.User, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
users := make([]*model.User, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
users[i], errors[i] = thunk()
}
return users, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Users.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *UsersByIDLoader) LoadAllThunk(keys []int) func() ([]*model.User, []error) {
results := make([]func() (*model.User, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.User, []error) {
users := make([]*model.User, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
users[i], errors[i] = thunk()
}
return users, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *UsersByIDLoader) Prime(key int, value *model.User) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *UsersByIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *UsersByIDLoader) unsafeSet(key int, value *model.User) {
if l.cache == nil {
l.cache = map[int]*model.User{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *usersByIDLoaderBatch) keyIndex(l *UsersByIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *usersByIDLoaderBatch) startTimer(l *UsersByIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *usersByIDLoaderBatch) end(l *UsersByIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/lists.sr.ht/api/graph/model"
)
// UsersByNameLoaderConfig captures the config to create a new UsersByNameLoader
type UsersByNameLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []string) ([]*model.User, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewUsersByNameLoader creates a new UsersByNameLoader given a fetch, wait, and maxBatch
func NewUsersByNameLoader(config UsersByNameLoaderConfig) *UsersByNameLoader {
return &UsersByNameLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// UsersByNameLoader batches and caches requests
type UsersByNameLoader struct {
// this method provides the data for the loader
fetch func(keys []string) ([]*model.User, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[string]*model.User
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *usersByNameLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type usersByNameLoaderBatch struct {
keys []string
data []*model.User
error []error
closing bool
done chan struct{}
}
// Load a User by key, batching and caching will be applied automatically
func (l *UsersByNameLoader) Load(key string) (*model.User, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a User.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *UsersByNameLoader) LoadThunk(key string) func() (*model.User, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.User, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &usersByNameLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.User, error) {
<-batch.done
var data *model.User
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *UsersByNameLoader) LoadAll(keys []string) ([]*model.User, []error) {
results := make([]func() (*model.User, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
users := make([]*model.User, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
users[i], errors[i] = thunk()
}
return users, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Users.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *UsersByNameLoader) LoadAllThunk(keys []string) func() ([]*model.User, []error) {
results := make([]func() (*model.User, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.User, []error) {
users := make([]*model.User, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
users[i], errors[i] = thunk()
}
return users, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *UsersByNameLoader) Prime(key string, value *model.User) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *UsersByNameLoader) Clear(key string) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *UsersByNameLoader) unsafeSet(key string, value *model.User) {
if l.cache == nil {
l.cache = map[string]*model.User{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *usersByNameLoaderBatch) keyIndex(l *UsersByNameLoader, key string) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *usersByNameLoaderBatch) startTimer(l *UsersByNameLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *usersByNameLoaderBatch) end(l *UsersByNameLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}