// Code generated by github.com/vektah/dataloaden, DO NOT EDIT. package loaders import ( "sync" "time" "git.sr.ht/~sircmpwn/todo.sr.ht/api/graph/model" ) // EntitiesByParticipantIDLoaderConfig captures the config to create a new EntitiesByParticipantIDLoader type EntitiesByParticipantIDLoaderConfig struct { // Fetch is a method that provides the data for the loader Fetch func(keys []int) ([]model.Entity, []error) // Wait is how long wait before sending a batch Wait time.Duration // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit MaxBatch int } // NewEntitiesByParticipantIDLoader creates a new EntitiesByParticipantIDLoader given a fetch, wait, and maxBatch func NewEntitiesByParticipantIDLoader(config EntitiesByParticipantIDLoaderConfig) *EntitiesByParticipantIDLoader { return &EntitiesByParticipantIDLoader{ fetch: config.Fetch, wait: config.Wait, maxBatch: config.MaxBatch, } } // EntitiesByParticipantIDLoader batches and caches requests type EntitiesByParticipantIDLoader struct { // this method provides the data for the loader fetch func(keys []int) ([]model.Entity, []error) // how long to done before sending a batch wait time.Duration // this will limit the maximum number of keys to send in one batch, 0 = no limit maxBatch int // INTERNAL // lazily created cache cache map[int]model.Entity // the current batch. keys will continue to be collected until timeout is hit, // then everything will be sent to the fetch method and out to the listeners batch *entitiesByParticipantIDLoaderBatch // mutex to prevent races mu sync.Mutex } type entitiesByParticipantIDLoaderBatch struct { keys []int data []model.Entity error []error closing bool done chan struct{} } // Load a Entity by key, batching and caching will be applied automatically func (l *EntitiesByParticipantIDLoader) Load(key int) (model.Entity, error) { return l.LoadThunk(key)() } // LoadThunk returns a function that when called will block waiting for a Entity. // This method should be used if you want one goroutine to make requests to many // different data loaders without blocking until the thunk is called. func (l *EntitiesByParticipantIDLoader) LoadThunk(key int) func() (model.Entity, error) { l.mu.Lock() if it, ok := l.cache[key]; ok { l.mu.Unlock() return func() (model.Entity, error) { return it, nil } } if l.batch == nil { l.batch = &entitiesByParticipantIDLoaderBatch{done: make(chan struct{})} } batch := l.batch pos := batch.keyIndex(l, key) l.mu.Unlock() return func() (model.Entity, error) { <-batch.done var data model.Entity if pos < len(batch.data) { data = batch.data[pos] } var err error // its convenient to be able to return a single error for everything if len(batch.error) == 1 { err = batch.error[0] } else if batch.error != nil { err = batch.error[pos] } if err == nil { l.mu.Lock() l.unsafeSet(key, data) l.mu.Unlock() } return data, err } } // LoadAll fetches many keys at once. It will be broken into appropriate sized // sub batches depending on how the loader is configured func (l *EntitiesByParticipantIDLoader) LoadAll(keys []int) ([]model.Entity, []error) { results := make([]func() (model.Entity, error), len(keys)) for i, key := range keys { results[i] = l.LoadThunk(key) } entitys := make([]model.Entity, len(keys)) errors := make([]error, len(keys)) for i, thunk := range results { entitys[i], errors[i] = thunk() } return entitys, errors } // LoadAllThunk returns a function that when called will block waiting for a Entitys. // This method should be used if you want one goroutine to make requests to many // different data loaders without blocking until the thunk is called. func (l *EntitiesByParticipantIDLoader) LoadAllThunk(keys []int) func() ([]model.Entity, []error) { results := make([]func() (model.Entity, error), len(keys)) for i, key := range keys { results[i] = l.LoadThunk(key) } return func() ([]model.Entity, []error) { entitys := make([]model.Entity, len(keys)) errors := make([]error, len(keys)) for i, thunk := range results { entitys[i], errors[i] = thunk() } return entitys, errors } } // Prime the cache with the provided key and value. If the key already exists, no change is made // and false is returned. // (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).) func (l *EntitiesByParticipantIDLoader) Prime(key int, value model.Entity) bool { l.mu.Lock() var found bool if _, found = l.cache[key]; !found { l.unsafeSet(key, value) } l.mu.Unlock() return !found } // Clear the value at key from the cache, if it exists func (l *EntitiesByParticipantIDLoader) Clear(key int) { l.mu.Lock() delete(l.cache, key) l.mu.Unlock() } func (l *EntitiesByParticipantIDLoader) unsafeSet(key int, value model.Entity) { if l.cache == nil { l.cache = map[int]model.Entity{} } l.cache[key] = value } // keyIndex will return the location of the key in the batch, if its not found // it will add the key to the batch func (b *entitiesByParticipantIDLoaderBatch) keyIndex(l *EntitiesByParticipantIDLoader, key int) int { for i, existingKey := range b.keys { if key == existingKey { return i } } pos := len(b.keys) b.keys = append(b.keys, key) if pos == 0 { go b.startTimer(l) } if l.maxBatch != 0 && pos >= l.maxBatch-1 { if !b.closing { b.closing = true l.batch = nil go b.end(l) } } return pos } func (b *entitiesByParticipantIDLoaderBatch) startTimer(l *EntitiesByParticipantIDLoader) { time.Sleep(l.wait) l.mu.Lock() // we must have hit a batch limit and are already finalizing this batch if b.closing { l.mu.Unlock() return } l.batch = nil l.mu.Unlock() b.end(l) } func (b *entitiesByParticipantIDLoaderBatch) end(l *EntitiesByParticipantIDLoader) { b.data, b.error = l.fetch(b.keys) close(b.done) }