~edwargix/git.sr.ht

c369d2c199168d8bc341185c1ca44b57fddd459f — Drew DeVault 5 years ago 5be87e6
api/loaders: generate modules on build
4 files changed, 258 insertions(+), 27 deletions(-)

A api/loaders/gen
M api/loaders/middleware.go
A api/loaders/usersbyidloader_gen.go
R api/loaders/{userloader_gen.go => usersbynameloader_gen.go}
A api/loaders/gen => api/loaders/gen +3 -0
@@ 0,0 1,3 @@
#!/bin/sh
exec go run github.com/vektah/dataloaden \
	"$1" "$2" '*git.sr.ht/~sircmpwn/git.sr.ht/'"$3"

M api/loaders/middleware.go => api/loaders/middleware.go +6 -2
@@ 1,5 1,9 @@
package loaders

//go:generate ./gen RepositoriesByIDLoader int api/graph/model.Repository
//go:generate ./gen UsersByNameLoader string api/graph/model.User
//go:generate ./gen UsersByIDLoader int api/graph/model.User

import (
	"context"
	"database/sql"


@@ 19,7 23,7 @@ type contextKey struct {
}

type Loaders struct {
	UsersByID        UserLoader
	UsersByID        UsersByIDLoader
	RepositoriesByID RepositoriesByIDLoader
}



@@ 108,7 112,7 @@ func Middleware(db *sql.DB) func(http.Handler) http.Handler {
	return func(next http.Handler) http.Handler {
		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
			ctx := context.WithValue(r.Context(), loadersCtxKey, &Loaders{
				UsersByID: UserLoader{
				UsersByID: UsersByIDLoader{
					maxBatch: 100,
					wait: 1 * time.Millisecond,
					fetch: fetchUsersByID(r.Context(), db),

A api/loaders/usersbyidloader_gen.go => api/loaders/usersbyidloader_gen.go +224 -0
@@ 0,0 1,224 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.

package loaders

import (
	"sync"
	"time"

	"git.sr.ht/~sircmpwn/git.sr.ht/api/graph/model"
)

// UsersByIDLoaderConfig captures the config to create a new UsersByIDLoader
type UsersByIDLoaderConfig struct {
	// Fetch is a method that provides the data for the loader
	Fetch func(keys []int) ([]*model.User, []error)

	// Wait is how long wait before sending a batch
	Wait time.Duration

	// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
	MaxBatch int
}

// NewUsersByIDLoader creates a new UsersByIDLoader given a fetch, wait, and maxBatch
func NewUsersByIDLoader(config UsersByIDLoaderConfig) *UsersByIDLoader {
	return &UsersByIDLoader{
		fetch:    config.Fetch,
		wait:     config.Wait,
		maxBatch: config.MaxBatch,
	}
}

// UsersByIDLoader batches and caches requests
type UsersByIDLoader struct {
	// this method provides the data for the loader
	fetch func(keys []int) ([]*model.User, []error)

	// how long to done before sending a batch
	wait time.Duration

	// this will limit the maximum number of keys to send in one batch, 0 = no limit
	maxBatch int

	// INTERNAL

	// lazily created cache
	cache map[int]*model.User

	// the current batch. keys will continue to be collected until timeout is hit,
	// then everything will be sent to the fetch method and out to the listeners
	batch *usersByIDLoaderBatch

	// mutex to prevent races
	mu sync.Mutex
}

type usersByIDLoaderBatch struct {
	keys    []int
	data    []*model.User
	error   []error
	closing bool
	done    chan struct{}
}

// Load a User by key, batching and caching will be applied automatically
func (l *UsersByIDLoader) Load(key int) (*model.User, error) {
	return l.LoadThunk(key)()
}

// LoadThunk returns a function that when called will block waiting for a User.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *UsersByIDLoader) LoadThunk(key int) func() (*model.User, error) {
	l.mu.Lock()
	if it, ok := l.cache[key]; ok {
		l.mu.Unlock()
		return func() (*model.User, error) {
			return it, nil
		}
	}
	if l.batch == nil {
		l.batch = &usersByIDLoaderBatch{done: make(chan struct{})}
	}
	batch := l.batch
	pos := batch.keyIndex(l, key)
	l.mu.Unlock()

	return func() (*model.User, error) {
		<-batch.done

		var data *model.User
		if pos < len(batch.data) {
			data = batch.data[pos]
		}

		var err error
		// its convenient to be able to return a single error for everything
		if len(batch.error) == 1 {
			err = batch.error[0]
		} else if batch.error != nil {
			err = batch.error[pos]
		}

		if err == nil {
			l.mu.Lock()
			l.unsafeSet(key, data)
			l.mu.Unlock()
		}

		return data, err
	}
}

// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *UsersByIDLoader) LoadAll(keys []int) ([]*model.User, []error) {
	results := make([]func() (*model.User, error), len(keys))

	for i, key := range keys {
		results[i] = l.LoadThunk(key)
	}

	users := make([]*model.User, len(keys))
	errors := make([]error, len(keys))
	for i, thunk := range results {
		users[i], errors[i] = thunk()
	}
	return users, errors
}

// LoadAllThunk returns a function that when called will block waiting for a Users.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *UsersByIDLoader) LoadAllThunk(keys []int) func() ([]*model.User, []error) {
	results := make([]func() (*model.User, error), len(keys))
	for i, key := range keys {
		results[i] = l.LoadThunk(key)
	}
	return func() ([]*model.User, []error) {
		users := make([]*model.User, len(keys))
		errors := make([]error, len(keys))
		for i, thunk := range results {
			users[i], errors[i] = thunk()
		}
		return users, errors
	}
}

// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *UsersByIDLoader) Prime(key int, value *model.User) bool {
	l.mu.Lock()
	var found bool
	if _, found = l.cache[key]; !found {
		// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
		// and end up with the whole cache pointing to the same value.
		cpy := *value
		l.unsafeSet(key, &cpy)
	}
	l.mu.Unlock()
	return !found
}

// Clear the value at key from the cache, if it exists
func (l *UsersByIDLoader) Clear(key int) {
	l.mu.Lock()
	delete(l.cache, key)
	l.mu.Unlock()
}

func (l *UsersByIDLoader) unsafeSet(key int, value *model.User) {
	if l.cache == nil {
		l.cache = map[int]*model.User{}
	}
	l.cache[key] = value
}

// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *usersByIDLoaderBatch) keyIndex(l *UsersByIDLoader, key int) int {
	for i, existingKey := range b.keys {
		if key == existingKey {
			return i
		}
	}

	pos := len(b.keys)
	b.keys = append(b.keys, key)
	if pos == 0 {
		go b.startTimer(l)
	}

	if l.maxBatch != 0 && pos >= l.maxBatch-1 {
		if !b.closing {
			b.closing = true
			l.batch = nil
			go b.end(l)
		}
	}

	return pos
}

func (b *usersByIDLoaderBatch) startTimer(l *UsersByIDLoader) {
	time.Sleep(l.wait)
	l.mu.Lock()

	// we must have hit a batch limit and are already finalizing this batch
	if b.closing {
		l.mu.Unlock()
		return
	}

	l.batch = nil
	l.mu.Unlock()

	b.end(l)
}

func (b *usersByIDLoaderBatch) end(l *UsersByIDLoader) {
	b.data, b.error = l.fetch(b.keys)
	close(b.done)
}

R api/loaders/userloader_gen.go => api/loaders/usersbynameloader_gen.go +25 -25
@@ 9,10 9,10 @@ import (
	"git.sr.ht/~sircmpwn/git.sr.ht/api/graph/model"
)

// UserLoaderConfig captures the config to create a new UserLoader
type UserLoaderConfig struct {
// UsersByNameLoaderConfig captures the config to create a new UsersByNameLoader
type UsersByNameLoaderConfig struct {
	// Fetch is a method that provides the data for the loader
	Fetch func(keys []int) ([]*model.User, []error)
	Fetch func(keys []string) ([]*model.User, []error)

	// Wait is how long wait before sending a batch
	Wait time.Duration


@@ 21,19 21,19 @@ type UserLoaderConfig struct {
	MaxBatch int
}

// NewUserLoader creates a new UserLoader given a fetch, wait, and maxBatch
func NewUserLoader(config UserLoaderConfig) *UserLoader {
	return &UserLoader{
// NewUsersByNameLoader creates a new UsersByNameLoader given a fetch, wait, and maxBatch
func NewUsersByNameLoader(config UsersByNameLoaderConfig) *UsersByNameLoader {
	return &UsersByNameLoader{
		fetch:    config.Fetch,
		wait:     config.Wait,
		maxBatch: config.MaxBatch,
	}
}

// UserLoader batches and caches requests
type UserLoader struct {
// UsersByNameLoader batches and caches requests
type UsersByNameLoader struct {
	// this method provides the data for the loader
	fetch func(keys []int) ([]*model.User, []error)
	fetch func(keys []string) ([]*model.User, []error)

	// how long to done before sending a batch
	wait time.Duration


@@ 44,18 44,18 @@ type UserLoader struct {
	// INTERNAL

	// lazily created cache
	cache map[int]*model.User
	cache map[string]*model.User

	// the current batch. keys will continue to be collected until timeout is hit,
	// then everything will be sent to the fetch method and out to the listeners
	batch *userLoaderBatch
	batch *usersByNameLoaderBatch

	// mutex to prevent races
	mu sync.Mutex
}

type userLoaderBatch struct {
	keys    []int
type usersByNameLoaderBatch struct {
	keys    []string
	data    []*model.User
	error   []error
	closing bool


@@ 63,14 63,14 @@ type userLoaderBatch struct {
}

// Load a User by key, batching and caching will be applied automatically
func (l *UserLoader) Load(key int) (*model.User, error) {
func (l *UsersByNameLoader) Load(key string) (*model.User, error) {
	return l.LoadThunk(key)()
}

// LoadThunk returns a function that when called will block waiting for a User.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *UserLoader) LoadThunk(key int) func() (*model.User, error) {
func (l *UsersByNameLoader) LoadThunk(key string) func() (*model.User, error) {
	l.mu.Lock()
	if it, ok := l.cache[key]; ok {
		l.mu.Unlock()


@@ 79,7 79,7 @@ func (l *UserLoader) LoadThunk(key int) func() (*model.User, error) {
		}
	}
	if l.batch == nil {
		l.batch = &userLoaderBatch{done: make(chan struct{})}
		l.batch = &usersByNameLoaderBatch{done: make(chan struct{})}
	}
	batch := l.batch
	pos := batch.keyIndex(l, key)


@@ 113,7 113,7 @@ func (l *UserLoader) LoadThunk(key int) func() (*model.User, error) {

// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *UserLoader) LoadAll(keys []int) ([]*model.User, []error) {
func (l *UsersByNameLoader) LoadAll(keys []string) ([]*model.User, []error) {
	results := make([]func() (*model.User, error), len(keys))

	for i, key := range keys {


@@ 131,7 131,7 @@ func (l *UserLoader) LoadAll(keys []int) ([]*model.User, []error) {
// LoadAllThunk returns a function that when called will block waiting for a Users.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *UserLoader) LoadAllThunk(keys []int) func() ([]*model.User, []error) {
func (l *UsersByNameLoader) LoadAllThunk(keys []string) func() ([]*model.User, []error) {
	results := make([]func() (*model.User, error), len(keys))
	for i, key := range keys {
		results[i] = l.LoadThunk(key)


@@ 149,7 149,7 @@ func (l *UserLoader) LoadAllThunk(keys []int) func() ([]*model.User, []error) {
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *UserLoader) Prime(key int, value *model.User) bool {
func (l *UsersByNameLoader) Prime(key string, value *model.User) bool {
	l.mu.Lock()
	var found bool
	if _, found = l.cache[key]; !found {


@@ 163,22 163,22 @@ func (l *UserLoader) Prime(key int, value *model.User) bool {
}

// Clear the value at key from the cache, if it exists
func (l *UserLoader) Clear(key int) {
func (l *UsersByNameLoader) Clear(key string) {
	l.mu.Lock()
	delete(l.cache, key)
	l.mu.Unlock()
}

func (l *UserLoader) unsafeSet(key int, value *model.User) {
func (l *UsersByNameLoader) unsafeSet(key string, value *model.User) {
	if l.cache == nil {
		l.cache = map[int]*model.User{}
		l.cache = map[string]*model.User{}
	}
	l.cache[key] = value
}

// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *userLoaderBatch) keyIndex(l *UserLoader, key int) int {
func (b *usersByNameLoaderBatch) keyIndex(l *UsersByNameLoader, key string) int {
	for i, existingKey := range b.keys {
		if key == existingKey {
			return i


@@ 202,7 202,7 @@ func (b *userLoaderBatch) keyIndex(l *UserLoader, key int) int {
	return pos
}

func (b *userLoaderBatch) startTimer(l *UserLoader) {
func (b *usersByNameLoaderBatch) startTimer(l *UsersByNameLoader) {
	time.Sleep(l.wait)
	l.mu.Lock()



@@ 218,7 218,7 @@ func (b *userLoaderBatch) startTimer(l *UserLoader) {
	b.end(l)
}

func (b *userLoaderBatch) end(l *UserLoader) {
func (b *usersByNameLoaderBatch) end(l *UsersByNameLoader) {
	b.data, b.error = l.fetch(b.keys)
	close(b.done)
}