api: overhaul build

This commit is contained in:
Drew DeVault 2022-03-24 14:09:55 +01:00
parent e03387006e
commit a00fbfc7a0
21 changed files with 79 additions and 18815 deletions

3
.gitignore vendored
View File

@ -14,3 +14,6 @@ pip-selfcheck.json
overrides/
.pgp
build
api/api
api/graph/api/generated.go
api/loaders/*_gen.go

View File

@ -1,3 +1,12 @@
SRHT_PATH?=/usr/lib/python3.8/site-packages/srht
SRHT_PATH?=/usr/lib/python3.9/site-packages/srht
MODULE=todosrht/
include ${SRHT_PATH}/Makefile
all: api
api:
cd api && go generate ./loaders
cd api && go generate ./graph
cd api && go build
.PHONY: all api

View File

@ -4,15 +4,20 @@ go 1.15
require (
git.sr.ht/~sircmpwn/core-go v0.0.0-20220314110514-33bc768cc765
github.com/99designs/gqlgen v0.14.0
github.com/99designs/gqlgen v0.17.2
github.com/Masterminds/squirrel v1.4.0
github.com/agnivade/levenshtein v1.1.1 // indirect
github.com/emersion/go-message v0.15.0
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lib/pq v1.8.0
github.com/matryer/moq v0.2.6 // indirect
github.com/mitchellh/mapstructure v1.3.2 // indirect
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/prometheus/common v0.30.0 // indirect
github.com/vektah/gqlparser/v2 v2.2.0
github.com/urfave/cli/v2 v2.4.0 // indirect
github.com/vektah/dataloaden v0.2.1-0.20190515034641-a19b9a6e7c9e
github.com/vektah/gqlparser/v2 v2.4.1
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
)

View File

@ -31,8 +31,6 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
git.sr.ht/~sircmpwn/core-go v0.0.0-20211218082756-f762ad220360 h1:KZwWE8xwafnRCldGTHeeuYH3QKhlrgsCqukIgXHzUgs=
git.sr.ht/~sircmpwn/core-go v0.0.0-20211218082756-f762ad220360/go.mod h1:uUqzeO5OLl/nRZfPk0igIAweRZiVwUmu/OGYfjS9fWc=
git.sr.ht/~sircmpwn/core-go v0.0.0-20220314110514-33bc768cc765 h1:QE7Jv8FFOct82B/voDbTQ5UWfrMOhuyj4LxQx2UU/28=
git.sr.ht/~sircmpwn/core-go v0.0.0-20220314110514-33bc768cc765/go.mod h1:uUqzeO5OLl/nRZfPk0igIAweRZiVwUmu/OGYfjS9fWc=
git.sr.ht/~sircmpwn/dowork v0.0.0-20210820133136-d3970e97def3 h1:9WCv5cK67s2SiY/R4DWT/OchEsFnfYDz3lbevKxZ4QI=
@ -41,8 +39,9 @@ git.sr.ht/~sircmpwn/getopt v0.0.0-20191230200459-23622cc906b3 h1:4wDp4BKF7NQqoh7
git.sr.ht/~sircmpwn/getopt v0.0.0-20191230200459-23622cc906b3/go.mod h1:wMEGFFFNuPos7vHmWXfszqImLppbc0wEhh6JBfJIUgw=
git.sr.ht/~sircmpwn/go-bare v0.0.0-20210227202403-5dae5c48f917 h1:/pfEvB399XDXksu4vyjfNTytWn/nbbKiNhvjtpgc4pY=
git.sr.ht/~sircmpwn/go-bare v0.0.0-20210227202403-5dae5c48f917/go.mod h1:BVJwbDfVjCjoFiKrhkei6NdGcZYpkDkdyCdg1ukytRA=
github.com/99designs/gqlgen v0.14.0 h1:Wg8aNYQUjMR/4v+W3xD+7SizOy6lSvVeQ06AobNQAXI=
github.com/99designs/gqlgen v0.14.0/go.mod h1:S7z4boV+Nx4VvzMUpVrY/YuHjFX4n7rDyuTqvAkuoRE=
github.com/99designs/gqlgen v0.17.2 h1:yczvlwMsfcVu/JtejqfrLwXuSP0yZFhmcss3caEvHw8=
github.com/99designs/gqlgen v0.17.2/go.mod h1:K5fzLKwtph+FFgh9j7nFbRUdBKvTcGnsta51fsMTn3o=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
@ -52,8 +51,9 @@ github.com/Masterminds/squirrel v1.4.0/go.mod h1:yaPeOnPG5ZRwL9oKdTsO/prlkPbXWZl
github.com/ProtonMail/go-crypto v0.0.0-20211112122917-428f8eabeeb3 h1:XcF0cTDJeiuZ5NU8w7WUDge0HRwwNRmxj/GGk6KSA6g=
github.com/ProtonMail/go-crypto v0.0.0-20211112122917-428f8eabeeb3/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
github.com/agnivade/levenshtein v1.1.0 h1:n6qGwyHG61v3ABce1rPVZklEYRT8NFpCMrpZdBUbYGM=
github.com/agnivade/levenshtein v1.1.0/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@ -76,8 +76,9 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@ -198,6 +199,7 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kavu/go_reuseport v1.5.0 h1:UNuiY2OblcqAtVDE8Gsg1kZz8zbBWg907sP1ceBV+bk=
github.com/kavu/go_reuseport v1.5.0/go.mod h1:CG8Ee7ceMFSMnx/xr25Vm0qXaj2Z4i5PWoUx+JZ5/CU=
github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -215,8 +217,11 @@ github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6Fm
github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg=
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
github.com/matryer/moq v0.0.0-20200106131100-75d0ddfc0007 h1:reVOUXwnhsYv/8UqjvhrMOu5CNT9UapHFLbQ2JcXsmg=
github.com/logrusorgru/aurora/v3 v3.0.0/go.mod h1:vsR12bk5grlLvLXAYrBsb5Oc/N+LxAlxggSjiwMnCUc=
github.com/matryer/moq v0.0.0-20200106131100-75d0ddfc0007/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/matryer/moq v0.2.3/go.mod h1:9RtPYjTnH1bSBIkpvtHkFN7nbWAnO7oRpdJkEIn6UtE=
github.com/matryer/moq v0.2.6 h1:X4+LF09udTsi2P+Z+1UhSb4p3K8IyiF7KSNFDR9M3M0=
github.com/matryer/moq v0.2.6/go.mod h1:kITsx543GOENm48TUAQyJ9+SAvFSr7iGQXPoth/VUBk=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
@ -224,6 +229,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.2.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg=
github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -276,13 +282,13 @@ github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/vfsgen v0.0.0-20180121065927-ffb13db8def0/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@ -297,20 +303,25 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/urfave/cli/v2 v2.1.1 h1:Qt8FeAtxE/vfdrLmR3rxR6JRE0RoVmbXu8+6kZtYU4k=
github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/cli/v2 v2.4.0 h1:m2pxjjDFgDxSPtO8WSdbndj17Wu2y8vOT86wE/tjr+I=
github.com/urfave/cli/v2 v2.4.0/go.mod h1:NX9W0zmTvedE5oDoOMs2RTC8RvdK98NTYZE5LbaEYPg=
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec h1:DGmKwyZwEB8dI7tbLt/I/gQuP559o/0FrAkHKlQM/Ks=
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw=
github.com/vektah/dataloaden v0.2.1-0.20190515034641-a19b9a6e7c9e h1:+w0Zm/9gaWpEAyDlU1eKOuk5twTjAjuevXqcJJw8hrg=
github.com/vektah/dataloaden v0.2.1-0.20190515034641-a19b9a6e7c9e/go.mod h1:/HUdMve7rvxZma+2ZELQeNh88+003LL7Pf/CZ089j8U=
github.com/vektah/gqlparser v1.3.1 h1:8b0IcD3qZKWJQHSzynbDlrtP3IxVydZ2DZepCGofqfU=
github.com/vektah/gqlparser v1.3.1/go.mod h1:bkVf0FX+Stjg/MHnm8mEyubuaArhNEqfQhF+OTiAL74=
github.com/vektah/gqlparser/v2 v2.2.0 h1:bAc3slekAAJW6sZTi07aGq0OrfaCjj4jxARAaC7g2EM=
github.com/vektah/gqlparser/v2 v2.2.0/go.mod h1:i3mQIGIrbK2PD1RrCeMTlVbkF2FJ6WkU1KJlJlC+3F4=
github.com/vektah/gqlparser/v2 v2.4.0/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0=
github.com/vektah/gqlparser/v2 v2.4.1 h1:QOyEn8DAPMUMARGMeshKDkDgNmVoEaEGiDB0uWxcSlQ=
github.com/vektah/gqlparser/v2 v2.4.1/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@ -323,6 +334,7 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e h1:MUP6MR3rJ7Gk9LEia0LP2ytiH6MuCfs7qYz+47jGdD8=
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -354,8 +366,10 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -390,6 +404,7 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -408,6 +423,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -452,8 +468,10 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55 h1:rw6UNGRMfarCepjI8qOepea/SXwIBVfTKjztZ5gBbq4=
golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 h1:OH54vjqzRWmbJ62fjuhxy7AxFFgoHN0/DPc/UrL8cAs=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -508,10 +526,13 @@ golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200815165600-90abf76919f3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -606,8 +627,10 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=

File diff suppressed because it is too large Load Diff

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model"
)
// CommentsByIDLoaderConfig captures the config to create a new CommentsByIDLoader
type CommentsByIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.Comment, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewCommentsByIDLoader creates a new CommentsByIDLoader given a fetch, wait, and maxBatch
func NewCommentsByIDLoader(config CommentsByIDLoaderConfig) *CommentsByIDLoader {
return &CommentsByIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// CommentsByIDLoader batches and caches requests
type CommentsByIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*model.Comment, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*model.Comment
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *commentsByIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type commentsByIDLoaderBatch struct {
keys []int
data []*model.Comment
error []error
closing bool
done chan struct{}
}
// Load a Comment by key, batching and caching will be applied automatically
func (l *CommentsByIDLoader) Load(key int) (*model.Comment, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Comment.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *CommentsByIDLoader) LoadThunk(key int) func() (*model.Comment, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.Comment, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &commentsByIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.Comment, error) {
<-batch.done
var data *model.Comment
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *CommentsByIDLoader) LoadAll(keys []int) ([]*model.Comment, []error) {
results := make([]func() (*model.Comment, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
comments := make([]*model.Comment, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
comments[i], errors[i] = thunk()
}
return comments, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Comments.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *CommentsByIDLoader) LoadAllThunk(keys []int) func() ([]*model.Comment, []error) {
results := make([]func() (*model.Comment, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.Comment, []error) {
comments := make([]*model.Comment, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
comments[i], errors[i] = thunk()
}
return comments, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *CommentsByIDLoader) Prime(key int, value *model.Comment) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *CommentsByIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *CommentsByIDLoader) unsafeSet(key int, value *model.Comment) {
if l.cache == nil {
l.cache = map[int]*model.Comment{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *commentsByIDLoaderBatch) keyIndex(l *CommentsByIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *commentsByIDLoaderBatch) startTimer(l *CommentsByIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *commentsByIDLoaderBatch) end(l *CommentsByIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,221 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model"
)
// EntitiesByParticipantIDLoaderConfig captures the config to create a new EntitiesByParticipantIDLoader
type EntitiesByParticipantIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]model.Entity, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewEntitiesByParticipantIDLoader creates a new EntitiesByParticipantIDLoader given a fetch, wait, and maxBatch
func NewEntitiesByParticipantIDLoader(config EntitiesByParticipantIDLoaderConfig) *EntitiesByParticipantIDLoader {
return &EntitiesByParticipantIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// EntitiesByParticipantIDLoader batches and caches requests
type EntitiesByParticipantIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]model.Entity, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]model.Entity
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *entitiesByParticipantIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type entitiesByParticipantIDLoaderBatch struct {
keys []int
data []model.Entity
error []error
closing bool
done chan struct{}
}
// Load a Entity by key, batching and caching will be applied automatically
func (l *EntitiesByParticipantIDLoader) Load(key int) (model.Entity, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Entity.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *EntitiesByParticipantIDLoader) LoadThunk(key int) func() (model.Entity, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (model.Entity, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &entitiesByParticipantIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (model.Entity, error) {
<-batch.done
var data model.Entity
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *EntitiesByParticipantIDLoader) LoadAll(keys []int) ([]model.Entity, []error) {
results := make([]func() (model.Entity, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
entitys := make([]model.Entity, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
entitys[i], errors[i] = thunk()
}
return entitys, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Entitys.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *EntitiesByParticipantIDLoader) LoadAllThunk(keys []int) func() ([]model.Entity, []error) {
results := make([]func() (model.Entity, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]model.Entity, []error) {
entitys := make([]model.Entity, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
entitys[i], errors[i] = thunk()
}
return entitys, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *EntitiesByParticipantIDLoader) Prime(key int, value model.Entity) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
l.unsafeSet(key, value)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *EntitiesByParticipantIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *EntitiesByParticipantIDLoader) unsafeSet(key int, value model.Entity) {
if l.cache == nil {
l.cache = map[int]model.Entity{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *entitiesByParticipantIDLoaderBatch) keyIndex(l *EntitiesByParticipantIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *entitiesByParticipantIDLoaderBatch) startTimer(l *EntitiesByParticipantIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *entitiesByParticipantIDLoaderBatch) end(l *EntitiesByParticipantIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

23
api/loaders/generate.go Normal file
View File

@ -0,0 +1,23 @@
//go:build generate
// +build generate
package loaders
import (
_ "github.com/vektah/dataloaden"
)
//go:generate go run github.com/vektah/dataloaden EntitiesByParticipantIDLoader int git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model.Entity
//go:generate ./gen UsersByIDLoader int api/graph/model.User
//go:generate ./gen UsersByNameLoader string api/graph/model.User
//go:generate ./gen TrackersByIDLoader int api/graph/model.Tracker
//go:generate ./gen TrackersByNameLoader string api/graph/model.Tracker
//go:generate ./gen TrackersByOwnerNameLoader [2]string api/graph/model.Tracker
//go:generate ./gen TicketsByIDLoader int api/graph/model.Ticket
//go:generate ./gen TicketsByTrackerIDLoader [2]int api/graph/model.Ticket
//go:generate ./gen CommentsByIDLoader int api/graph/model.Comment
//go:generate ./gen LabelsByIDLoader int api/graph/model.Label
//go:generate ./gen SubsByTicketIDLoader int api/graph/model.TicketSubscription
//go:generate ./gen SubsByTrackerIDLoader int api/graph/model.TrackerSubscription
//go:generate ./gen ParticipantsByUserIDLoader int api/graph/model.Participant
//go:generate ./gen ParticipantsByUsernameLoader string api/graph/model.Participant

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model"
)
// LabelsByIDLoaderConfig captures the config to create a new LabelsByIDLoader
type LabelsByIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.Label, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewLabelsByIDLoader creates a new LabelsByIDLoader given a fetch, wait, and maxBatch
func NewLabelsByIDLoader(config LabelsByIDLoaderConfig) *LabelsByIDLoader {
return &LabelsByIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// LabelsByIDLoader batches and caches requests
type LabelsByIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*model.Label, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*model.Label
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *labelsByIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type labelsByIDLoaderBatch struct {
keys []int
data []*model.Label
error []error
closing bool
done chan struct{}
}
// Load a Label by key, batching and caching will be applied automatically
func (l *LabelsByIDLoader) Load(key int) (*model.Label, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Label.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *LabelsByIDLoader) LoadThunk(key int) func() (*model.Label, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.Label, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &labelsByIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.Label, error) {
<-batch.done
var data *model.Label
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *LabelsByIDLoader) LoadAll(keys []int) ([]*model.Label, []error) {
results := make([]func() (*model.Label, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
labels := make([]*model.Label, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
labels[i], errors[i] = thunk()
}
return labels, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Labels.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *LabelsByIDLoader) LoadAllThunk(keys []int) func() ([]*model.Label, []error) {
results := make([]func() (*model.Label, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.Label, []error) {
labels := make([]*model.Label, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
labels[i], errors[i] = thunk()
}
return labels, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *LabelsByIDLoader) Prime(key int, value *model.Label) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *LabelsByIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *LabelsByIDLoader) unsafeSet(key int, value *model.Label) {
if l.cache == nil {
l.cache = map[int]*model.Label{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *labelsByIDLoaderBatch) keyIndex(l *LabelsByIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *labelsByIDLoaderBatch) startTimer(l *LabelsByIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *labelsByIDLoaderBatch) end(l *LabelsByIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,20 +1,5 @@
package loaders
//go:generate go run github.com/vektah/dataloaden EntitiesByParticipantIDLoader int git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model.Entity
//go:generate ./gen UsersByIDLoader int api/graph/model.User
//go:generate ./gen UsersByNameLoader string api/graph/model.User
//go:generate ./gen TrackersByIDLoader int api/graph/model.Tracker
//go:generate ./gen TrackersByNameLoader string api/graph/model.Tracker
//go:generate ./gen TrackersByOwnerNameLoader [2]string api/graph/model.Tracker
//go:generate ./gen TicketsByIDLoader int api/graph/model.Ticket
//go:generate ./gen TicketsByTrackerIDLoader [2]int api/graph/model.Ticket
//go:generate ./gen CommentsByIDLoader int api/graph/model.Comment
//go:generate ./gen LabelsByIDLoader int api/graph/model.Label
//go:generate ./gen SubsByTicketIDLoader int api/graph/model.TicketSubscription
//go:generate ./gen SubsByTrackerIDLoader int api/graph/model.TrackerSubscription
//go:generate ./gen ParticipantsByUserIDLoader int api/graph/model.Participant
//go:generate ./gen ParticipantsByUsernameLoader string api/graph/model.Participant
import (
"bytes"
"context"

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model"
)
// ParticipantsByUserIDLoaderConfig captures the config to create a new ParticipantsByUserIDLoader
type ParticipantsByUserIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.Participant, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewParticipantsByUserIDLoader creates a new ParticipantsByUserIDLoader given a fetch, wait, and maxBatch
func NewParticipantsByUserIDLoader(config ParticipantsByUserIDLoaderConfig) *ParticipantsByUserIDLoader {
return &ParticipantsByUserIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// ParticipantsByUserIDLoader batches and caches requests
type ParticipantsByUserIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*model.Participant, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*model.Participant
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *participantsByUserIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type participantsByUserIDLoaderBatch struct {
keys []int
data []*model.Participant
error []error
closing bool
done chan struct{}
}
// Load a Participant by key, batching and caching will be applied automatically
func (l *ParticipantsByUserIDLoader) Load(key int) (*model.Participant, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Participant.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *ParticipantsByUserIDLoader) LoadThunk(key int) func() (*model.Participant, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.Participant, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &participantsByUserIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.Participant, error) {
<-batch.done
var data *model.Participant
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *ParticipantsByUserIDLoader) LoadAll(keys []int) ([]*model.Participant, []error) {
results := make([]func() (*model.Participant, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
participants := make([]*model.Participant, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
participants[i], errors[i] = thunk()
}
return participants, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Participants.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *ParticipantsByUserIDLoader) LoadAllThunk(keys []int) func() ([]*model.Participant, []error) {
results := make([]func() (*model.Participant, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.Participant, []error) {
participants := make([]*model.Participant, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
participants[i], errors[i] = thunk()
}
return participants, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *ParticipantsByUserIDLoader) Prime(key int, value *model.Participant) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *ParticipantsByUserIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *ParticipantsByUserIDLoader) unsafeSet(key int, value *model.Participant) {
if l.cache == nil {
l.cache = map[int]*model.Participant{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *participantsByUserIDLoaderBatch) keyIndex(l *ParticipantsByUserIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *participantsByUserIDLoaderBatch) startTimer(l *ParticipantsByUserIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *participantsByUserIDLoaderBatch) end(l *ParticipantsByUserIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model"
)
// ParticipantsByUsernameLoaderConfig captures the config to create a new ParticipantsByUsernameLoader
type ParticipantsByUsernameLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []string) ([]*model.Participant, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewParticipantsByUsernameLoader creates a new ParticipantsByUsernameLoader given a fetch, wait, and maxBatch
func NewParticipantsByUsernameLoader(config ParticipantsByUsernameLoaderConfig) *ParticipantsByUsernameLoader {
return &ParticipantsByUsernameLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// ParticipantsByUsernameLoader batches and caches requests
type ParticipantsByUsernameLoader struct {
// this method provides the data for the loader
fetch func(keys []string) ([]*model.Participant, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[string]*model.Participant
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *participantsByUsernameLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type participantsByUsernameLoaderBatch struct {
keys []string
data []*model.Participant
error []error
closing bool
done chan struct{}
}
// Load a Participant by key, batching and caching will be applied automatically
func (l *ParticipantsByUsernameLoader) Load(key string) (*model.Participant, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Participant.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *ParticipantsByUsernameLoader) LoadThunk(key string) func() (*model.Participant, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.Participant, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &participantsByUsernameLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.Participant, error) {
<-batch.done
var data *model.Participant
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *ParticipantsByUsernameLoader) LoadAll(keys []string) ([]*model.Participant, []error) {
results := make([]func() (*model.Participant, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
participants := make([]*model.Participant, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
participants[i], errors[i] = thunk()
}
return participants, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Participants.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *ParticipantsByUsernameLoader) LoadAllThunk(keys []string) func() ([]*model.Participant, []error) {
results := make([]func() (*model.Participant, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.Participant, []error) {
participants := make([]*model.Participant, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
participants[i], errors[i] = thunk()
}
return participants, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *ParticipantsByUsernameLoader) Prime(key string, value *model.Participant) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *ParticipantsByUsernameLoader) Clear(key string) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *ParticipantsByUsernameLoader) unsafeSet(key string, value *model.Participant) {
if l.cache == nil {
l.cache = map[string]*model.Participant{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *participantsByUsernameLoaderBatch) keyIndex(l *ParticipantsByUsernameLoader, key string) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *participantsByUsernameLoaderBatch) startTimer(l *ParticipantsByUsernameLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *participantsByUsernameLoaderBatch) end(l *ParticipantsByUsernameLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model"
)
// SubsByTicketIDLoaderConfig captures the config to create a new SubsByTicketIDLoader
type SubsByTicketIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.TicketSubscription, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewSubsByTicketIDLoader creates a new SubsByTicketIDLoader given a fetch, wait, and maxBatch
func NewSubsByTicketIDLoader(config SubsByTicketIDLoaderConfig) *SubsByTicketIDLoader {
return &SubsByTicketIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// SubsByTicketIDLoader batches and caches requests
type SubsByTicketIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*model.TicketSubscription, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*model.TicketSubscription
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *subsByTicketIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type subsByTicketIDLoaderBatch struct {
keys []int
data []*model.TicketSubscription
error []error
closing bool
done chan struct{}
}
// Load a TicketSubscription by key, batching and caching will be applied automatically
func (l *SubsByTicketIDLoader) Load(key int) (*model.TicketSubscription, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a TicketSubscription.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *SubsByTicketIDLoader) LoadThunk(key int) func() (*model.TicketSubscription, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.TicketSubscription, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &subsByTicketIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.TicketSubscription, error) {
<-batch.done
var data *model.TicketSubscription
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *SubsByTicketIDLoader) LoadAll(keys []int) ([]*model.TicketSubscription, []error) {
results := make([]func() (*model.TicketSubscription, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
ticketSubscriptions := make([]*model.TicketSubscription, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
ticketSubscriptions[i], errors[i] = thunk()
}
return ticketSubscriptions, errors
}
// LoadAllThunk returns a function that when called will block waiting for a TicketSubscriptions.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *SubsByTicketIDLoader) LoadAllThunk(keys []int) func() ([]*model.TicketSubscription, []error) {
results := make([]func() (*model.TicketSubscription, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.TicketSubscription, []error) {
ticketSubscriptions := make([]*model.TicketSubscription, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
ticketSubscriptions[i], errors[i] = thunk()
}
return ticketSubscriptions, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *SubsByTicketIDLoader) Prime(key int, value *model.TicketSubscription) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *SubsByTicketIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *SubsByTicketIDLoader) unsafeSet(key int, value *model.TicketSubscription) {
if l.cache == nil {
l.cache = map[int]*model.TicketSubscription{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *subsByTicketIDLoaderBatch) keyIndex(l *SubsByTicketIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *subsByTicketIDLoaderBatch) startTimer(l *SubsByTicketIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *subsByTicketIDLoaderBatch) end(l *SubsByTicketIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model"
)
// SubsByTrackerIDLoaderConfig captures the config to create a new SubsByTrackerIDLoader
type SubsByTrackerIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.TrackerSubscription, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewSubsByTrackerIDLoader creates a new SubsByTrackerIDLoader given a fetch, wait, and maxBatch
func NewSubsByTrackerIDLoader(config SubsByTrackerIDLoaderConfig) *SubsByTrackerIDLoader {
return &SubsByTrackerIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// SubsByTrackerIDLoader batches and caches requests
type SubsByTrackerIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*model.TrackerSubscription, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*model.TrackerSubscription
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *subsByTrackerIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type subsByTrackerIDLoaderBatch struct {
keys []int
data []*model.TrackerSubscription
error []error
closing bool
done chan struct{}
}
// Load a TrackerSubscription by key, batching and caching will be applied automatically
func (l *SubsByTrackerIDLoader) Load(key int) (*model.TrackerSubscription, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a TrackerSubscription.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *SubsByTrackerIDLoader) LoadThunk(key int) func() (*model.TrackerSubscription, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.TrackerSubscription, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &subsByTrackerIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.TrackerSubscription, error) {
<-batch.done
var data *model.TrackerSubscription
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *SubsByTrackerIDLoader) LoadAll(keys []int) ([]*model.TrackerSubscription, []error) {
results := make([]func() (*model.TrackerSubscription, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
trackerSubscriptions := make([]*model.TrackerSubscription, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
trackerSubscriptions[i], errors[i] = thunk()
}
return trackerSubscriptions, errors
}
// LoadAllThunk returns a function that when called will block waiting for a TrackerSubscriptions.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *SubsByTrackerIDLoader) LoadAllThunk(keys []int) func() ([]*model.TrackerSubscription, []error) {
results := make([]func() (*model.TrackerSubscription, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.TrackerSubscription, []error) {
trackerSubscriptions := make([]*model.TrackerSubscription, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
trackerSubscriptions[i], errors[i] = thunk()
}
return trackerSubscriptions, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *SubsByTrackerIDLoader) Prime(key int, value *model.TrackerSubscription) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *SubsByTrackerIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *SubsByTrackerIDLoader) unsafeSet(key int, value *model.TrackerSubscription) {
if l.cache == nil {
l.cache = map[int]*model.TrackerSubscription{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *subsByTrackerIDLoaderBatch) keyIndex(l *SubsByTrackerIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *subsByTrackerIDLoaderBatch) startTimer(l *SubsByTrackerIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *subsByTrackerIDLoaderBatch) end(l *SubsByTrackerIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model"
)
// TicketsByIDLoaderConfig captures the config to create a new TicketsByIDLoader
type TicketsByIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.Ticket, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewTicketsByIDLoader creates a new TicketsByIDLoader given a fetch, wait, and maxBatch
func NewTicketsByIDLoader(config TicketsByIDLoaderConfig) *TicketsByIDLoader {
return &TicketsByIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// TicketsByIDLoader batches and caches requests
type TicketsByIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*model.Ticket, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*model.Ticket
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *ticketsByIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type ticketsByIDLoaderBatch struct {
keys []int
data []*model.Ticket
error []error
closing bool
done chan struct{}
}
// Load a Ticket by key, batching and caching will be applied automatically
func (l *TicketsByIDLoader) Load(key int) (*model.Ticket, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Ticket.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *TicketsByIDLoader) LoadThunk(key int) func() (*model.Ticket, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.Ticket, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &ticketsByIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.Ticket, error) {
<-batch.done
var data *model.Ticket
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *TicketsByIDLoader) LoadAll(keys []int) ([]*model.Ticket, []error) {
results := make([]func() (*model.Ticket, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
tickets := make([]*model.Ticket, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
tickets[i], errors[i] = thunk()
}
return tickets, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Tickets.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *TicketsByIDLoader) LoadAllThunk(keys []int) func() ([]*model.Ticket, []error) {
results := make([]func() (*model.Ticket, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.Ticket, []error) {
tickets := make([]*model.Ticket, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
tickets[i], errors[i] = thunk()
}
return tickets, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *TicketsByIDLoader) Prime(key int, value *model.Ticket) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *TicketsByIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *TicketsByIDLoader) unsafeSet(key int, value *model.Ticket) {
if l.cache == nil {
l.cache = map[int]*model.Ticket{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *ticketsByIDLoaderBatch) keyIndex(l *TicketsByIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *ticketsByIDLoaderBatch) startTimer(l *TicketsByIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *ticketsByIDLoaderBatch) end(l *TicketsByIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model"
)
// TicketsByTrackerIDLoaderConfig captures the config to create a new TicketsByTrackerIDLoader
type TicketsByTrackerIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys [][2]int) ([]*model.Ticket, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewTicketsByTrackerIDLoader creates a new TicketsByTrackerIDLoader given a fetch, wait, and maxBatch
func NewTicketsByTrackerIDLoader(config TicketsByTrackerIDLoaderConfig) *TicketsByTrackerIDLoader {
return &TicketsByTrackerIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// TicketsByTrackerIDLoader batches and caches requests
type TicketsByTrackerIDLoader struct {
// this method provides the data for the loader
fetch func(keys [][2]int) ([]*model.Ticket, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[[2]int]*model.Ticket
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *ticketsByTrackerIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type ticketsByTrackerIDLoaderBatch struct {
keys [][2]int
data []*model.Ticket
error []error
closing bool
done chan struct{}
}
// Load a Ticket by key, batching and caching will be applied automatically
func (l *TicketsByTrackerIDLoader) Load(key [2]int) (*model.Ticket, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Ticket.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *TicketsByTrackerIDLoader) LoadThunk(key [2]int) func() (*model.Ticket, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.Ticket, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &ticketsByTrackerIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.Ticket, error) {
<-batch.done
var data *model.Ticket
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *TicketsByTrackerIDLoader) LoadAll(keys [][2]int) ([]*model.Ticket, []error) {
results := make([]func() (*model.Ticket, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
tickets := make([]*model.Ticket, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
tickets[i], errors[i] = thunk()
}
return tickets, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Tickets.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *TicketsByTrackerIDLoader) LoadAllThunk(keys [][2]int) func() ([]*model.Ticket, []error) {
results := make([]func() (*model.Ticket, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.Ticket, []error) {
tickets := make([]*model.Ticket, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
tickets[i], errors[i] = thunk()
}
return tickets, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *TicketsByTrackerIDLoader) Prime(key [2]int, value *model.Ticket) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *TicketsByTrackerIDLoader) Clear(key [2]int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *TicketsByTrackerIDLoader) unsafeSet(key [2]int, value *model.Ticket) {
if l.cache == nil {
l.cache = map[[2]int]*model.Ticket{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *ticketsByTrackerIDLoaderBatch) keyIndex(l *TicketsByTrackerIDLoader, key [2]int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *ticketsByTrackerIDLoaderBatch) startTimer(l *TicketsByTrackerIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *ticketsByTrackerIDLoaderBatch) end(l *TicketsByTrackerIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model"
)
// TrackersByIDLoaderConfig captures the config to create a new TrackersByIDLoader
type TrackersByIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.Tracker, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewTrackersByIDLoader creates a new TrackersByIDLoader given a fetch, wait, and maxBatch
func NewTrackersByIDLoader(config TrackersByIDLoaderConfig) *TrackersByIDLoader {
return &TrackersByIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// TrackersByIDLoader batches and caches requests
type TrackersByIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*model.Tracker, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*model.Tracker
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *trackersByIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type trackersByIDLoaderBatch struct {
keys []int
data []*model.Tracker
error []error
closing bool
done chan struct{}
}
// Load a Tracker by key, batching and caching will be applied automatically
func (l *TrackersByIDLoader) Load(key int) (*model.Tracker, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Tracker.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *TrackersByIDLoader) LoadThunk(key int) func() (*model.Tracker, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.Tracker, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &trackersByIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.Tracker, error) {
<-batch.done
var data *model.Tracker
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *TrackersByIDLoader) LoadAll(keys []int) ([]*model.Tracker, []error) {
results := make([]func() (*model.Tracker, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
trackers := make([]*model.Tracker, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
trackers[i], errors[i] = thunk()
}
return trackers, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Trackers.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *TrackersByIDLoader) LoadAllThunk(keys []int) func() ([]*model.Tracker, []error) {
results := make([]func() (*model.Tracker, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.Tracker, []error) {
trackers := make([]*model.Tracker, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
trackers[i], errors[i] = thunk()
}
return trackers, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *TrackersByIDLoader) Prime(key int, value *model.Tracker) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *TrackersByIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *TrackersByIDLoader) unsafeSet(key int, value *model.Tracker) {
if l.cache == nil {
l.cache = map[int]*model.Tracker{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *trackersByIDLoaderBatch) keyIndex(l *TrackersByIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *trackersByIDLoaderBatch) startTimer(l *TrackersByIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *trackersByIDLoaderBatch) end(l *TrackersByIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model"
)
// TrackersByNameLoaderConfig captures the config to create a new TrackersByNameLoader
type TrackersByNameLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []string) ([]*model.Tracker, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewTrackersByNameLoader creates a new TrackersByNameLoader given a fetch, wait, and maxBatch
func NewTrackersByNameLoader(config TrackersByNameLoaderConfig) *TrackersByNameLoader {
return &TrackersByNameLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// TrackersByNameLoader batches and caches requests
type TrackersByNameLoader struct {
// this method provides the data for the loader
fetch func(keys []string) ([]*model.Tracker, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[string]*model.Tracker
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *trackersByNameLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type trackersByNameLoaderBatch struct {
keys []string
data []*model.Tracker
error []error
closing bool
done chan struct{}
}
// Load a Tracker by key, batching and caching will be applied automatically
func (l *TrackersByNameLoader) Load(key string) (*model.Tracker, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Tracker.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *TrackersByNameLoader) LoadThunk(key string) func() (*model.Tracker, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.Tracker, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &trackersByNameLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.Tracker, error) {
<-batch.done
var data *model.Tracker
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *TrackersByNameLoader) LoadAll(keys []string) ([]*model.Tracker, []error) {
results := make([]func() (*model.Tracker, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
trackers := make([]*model.Tracker, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
trackers[i], errors[i] = thunk()
}
return trackers, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Trackers.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *TrackersByNameLoader) LoadAllThunk(keys []string) func() ([]*model.Tracker, []error) {
results := make([]func() (*model.Tracker, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.Tracker, []error) {
trackers := make([]*model.Tracker, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
trackers[i], errors[i] = thunk()
}
return trackers, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *TrackersByNameLoader) Prime(key string, value *model.Tracker) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *TrackersByNameLoader) Clear(key string) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *TrackersByNameLoader) unsafeSet(key string, value *model.Tracker) {
if l.cache == nil {
l.cache = map[string]*model.Tracker{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *trackersByNameLoaderBatch) keyIndex(l *TrackersByNameLoader, key string) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *trackersByNameLoaderBatch) startTimer(l *TrackersByNameLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *trackersByNameLoaderBatch) end(l *TrackersByNameLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model"
)
// TrackersByOwnerNameLoaderConfig captures the config to create a new TrackersByOwnerNameLoader
type TrackersByOwnerNameLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys [][2]string) ([]*model.Tracker, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewTrackersByOwnerNameLoader creates a new TrackersByOwnerNameLoader given a fetch, wait, and maxBatch
func NewTrackersByOwnerNameLoader(config TrackersByOwnerNameLoaderConfig) *TrackersByOwnerNameLoader {
return &TrackersByOwnerNameLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// TrackersByOwnerNameLoader batches and caches requests
type TrackersByOwnerNameLoader struct {
// this method provides the data for the loader
fetch func(keys [][2]string) ([]*model.Tracker, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[[2]string]*model.Tracker
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *trackersByOwnerNameLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type trackersByOwnerNameLoaderBatch struct {
keys [][2]string
data []*model.Tracker
error []error
closing bool
done chan struct{}
}
// Load a Tracker by key, batching and caching will be applied automatically
func (l *TrackersByOwnerNameLoader) Load(key [2]string) (*model.Tracker, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a Tracker.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *TrackersByOwnerNameLoader) LoadThunk(key [2]string) func() (*model.Tracker, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.Tracker, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &trackersByOwnerNameLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.Tracker, error) {
<-batch.done
var data *model.Tracker
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *TrackersByOwnerNameLoader) LoadAll(keys [][2]string) ([]*model.Tracker, []error) {
results := make([]func() (*model.Tracker, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
trackers := make([]*model.Tracker, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
trackers[i], errors[i] = thunk()
}
return trackers, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Trackers.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *TrackersByOwnerNameLoader) LoadAllThunk(keys [][2]string) func() ([]*model.Tracker, []error) {
results := make([]func() (*model.Tracker, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.Tracker, []error) {
trackers := make([]*model.Tracker, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
trackers[i], errors[i] = thunk()
}
return trackers, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *TrackersByOwnerNameLoader) Prime(key [2]string, value *model.Tracker) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *TrackersByOwnerNameLoader) Clear(key [2]string) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *TrackersByOwnerNameLoader) unsafeSet(key [2]string, value *model.Tracker) {
if l.cache == nil {
l.cache = map[[2]string]*model.Tracker{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *trackersByOwnerNameLoaderBatch) keyIndex(l *TrackersByOwnerNameLoader, key [2]string) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *trackersByOwnerNameLoaderBatch) startTimer(l *TrackersByOwnerNameLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *trackersByOwnerNameLoaderBatch) end(l *TrackersByOwnerNameLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model"
)
// UsersByIDLoaderConfig captures the config to create a new UsersByIDLoader
type UsersByIDLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.User, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewUsersByIDLoader creates a new UsersByIDLoader given a fetch, wait, and maxBatch
func NewUsersByIDLoader(config UsersByIDLoaderConfig) *UsersByIDLoader {
return &UsersByIDLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// UsersByIDLoader batches and caches requests
type UsersByIDLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([]*model.User, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int]*model.User
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *usersByIDLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type usersByIDLoaderBatch struct {
keys []int
data []*model.User
error []error
closing bool
done chan struct{}
}
// Load a User by key, batching and caching will be applied automatically
func (l *UsersByIDLoader) Load(key int) (*model.User, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a User.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *UsersByIDLoader) LoadThunk(key int) func() (*model.User, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.User, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &usersByIDLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.User, error) {
<-batch.done
var data *model.User
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *UsersByIDLoader) LoadAll(keys []int) ([]*model.User, []error) {
results := make([]func() (*model.User, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
users := make([]*model.User, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
users[i], errors[i] = thunk()
}
return users, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Users.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *UsersByIDLoader) LoadAllThunk(keys []int) func() ([]*model.User, []error) {
results := make([]func() (*model.User, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.User, []error) {
users := make([]*model.User, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
users[i], errors[i] = thunk()
}
return users, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *UsersByIDLoader) Prime(key int, value *model.User) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *UsersByIDLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *UsersByIDLoader) unsafeSet(key int, value *model.User) {
if l.cache == nil {
l.cache = map[int]*model.User{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *usersByIDLoaderBatch) keyIndex(l *UsersByIDLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *usersByIDLoaderBatch) startTimer(l *UsersByIDLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *usersByIDLoaderBatch) end(l *UsersByIDLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@ -1,224 +0,0 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model"
)
// UsersByNameLoaderConfig captures the config to create a new UsersByNameLoader
type UsersByNameLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []string) ([]*model.User, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewUsersByNameLoader creates a new UsersByNameLoader given a fetch, wait, and maxBatch
func NewUsersByNameLoader(config UsersByNameLoaderConfig) *UsersByNameLoader {
return &UsersByNameLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// UsersByNameLoader batches and caches requests
type UsersByNameLoader struct {
// this method provides the data for the loader
fetch func(keys []string) ([]*model.User, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[string]*model.User
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *usersByNameLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type usersByNameLoaderBatch struct {
keys []string
data []*model.User
error []error
closing bool
done chan struct{}
}
// Load a User by key, batching and caching will be applied automatically
func (l *UsersByNameLoader) Load(key string) (*model.User, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a User.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *UsersByNameLoader) LoadThunk(key string) func() (*model.User, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (*model.User, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &usersByNameLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (*model.User, error) {
<-batch.done
var data *model.User
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *UsersByNameLoader) LoadAll(keys []string) ([]*model.User, []error) {
results := make([]func() (*model.User, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
users := make([]*model.User, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
users[i], errors[i] = thunk()
}
return users, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Users.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *UsersByNameLoader) LoadAllThunk(keys []string) func() ([]*model.User, []error) {
results := make([]func() (*model.User, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]*model.User, []error) {
users := make([]*model.User, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
users[i], errors[i] = thunk()
}
return users, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *UsersByNameLoader) Prime(key string, value *model.User) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *UsersByNameLoader) Clear(key string) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *UsersByNameLoader) unsafeSet(key string, value *model.User) {
if l.cache == nil {
l.cache = map[string]*model.User{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *usersByNameLoaderBatch) keyIndex(l *UsersByNameLoader, key string) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *usersByNameLoaderBatch) startTimer(l *UsersByNameLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *usersByNameLoaderBatch) end(l *UsersByNameLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}