M graphql/graph/generated/generated.go => graphql/graph/generated/generated.go +174 -5
@@ 97,11 97,13 @@ type ComplexityRoot struct {
}
Query struct {
- Me func(childComplexity int) int
- Repositories func(childComplexity int, next *int, filter *model.FilterBy) int
- Repository func(childComplexity int, id int) int
- User func(childComplexity int, username string) int
- Version func(childComplexity int) int
+ Me func(childComplexity int) int
+ Repositories func(childComplexity int, next *int, filter *model.FilterBy) int
+ Repository func(childComplexity int, id int) int
+ RepositoryByName func(childComplexity int, name string) int
+ RepositoryByOwner func(childComplexity int, owner string, repo string) int
+ User func(childComplexity int, username string) int
+ Version func(childComplexity int) int
}
Reference struct {
@@ 196,6 198,8 @@ type QueryResolver interface {
User(ctx context.Context, username string) (*model.User, error)
Repositories(ctx context.Context, next *int, filter *model.FilterBy) ([]*model.Repository, error)
Repository(ctx context.Context, id int) (*model.Repository, error)
+ RepositoryByName(ctx context.Context, name string) (*model.Repository, error)
+ RepositoryByOwner(ctx context.Context, owner string, repo string) (*model.Repository, error)
}
type RepositoryResolver interface {
Owner(ctx context.Context, obj *model.Repository) (model.Entity, error)
@@ 525,6 529,30 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
return e.complexity.Query.Repository(childComplexity, args["id"].(int)), true
+ case "Query.repositoryByName":
+ if e.complexity.Query.RepositoryByName == nil {
+ break
+ }
+
+ args, err := ec.field_Query_repositoryByName_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.RepositoryByName(childComplexity, args["name"].(string)), true
+
+ case "Query.repositoryByOwner":
+ if e.complexity.Query.RepositoryByOwner == nil {
+ break
+ }
+
+ args, err := ec.field_Query_repositoryByOwner_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.RepositoryByOwner(childComplexity, args["owner"].(string), args["repo"].(string)), true
+
case "Query.user":
if e.complexity.Query.User == nil {
break
@@ 1254,6 1282,13 @@ type Query {
# Returns a specific repository
repository(id: Int!): Repository
+
+ # Returns a specific repository, owned by the authenticated user.
+ repositoryByName(name: String!): Repository
+
+ # Returns a specific repository, owned by the given canonical name (e.g.
+ # "~sircmpwn").
+ repositoryByOwner(owner: String!, repo: String!): Repository
}
# Details for repository creation or updates
@@ 1475,6 1510,42 @@ func (ec *executionContext) field_Query_repositories_args(ctx context.Context, r
return args, nil
}
+func (ec *executionContext) field_Query_repositoryByName_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["name"]; ok {
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["name"] = arg0
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_repositoryByOwner_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["owner"]; ok {
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["owner"] = arg0
+ var arg1 string
+ if tmp, ok := rawArgs["repo"]; ok {
+ arg1, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["repo"] = arg1
+ return args, nil
+}
+
func (ec *executionContext) field_Query_repository_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
@@ 3099,6 3170,82 @@ func (ec *executionContext) _Query_repository(ctx context.Context, field graphql
return ec.marshalORepository2ᚖgitᚗsrᚗhtᚋאsircmpwnᚋgitᚗsrᚗhtᚋgraphqlᚋgraphᚋmodelᚐRepository(ctx, field.Selections, res)
}
+func (ec *executionContext) _Query_repositoryByName(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ fc := &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ Args: nil,
+ IsMethod: true,
+ }
+
+ ctx = graphql.WithFieldContext(ctx, fc)
+ rawArgs := field.ArgumentMap(ec.Variables)
+ args, err := ec.field_Query_repositoryByName_args(ctx, rawArgs)
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ fc.Args = args
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().RepositoryByName(rctx, args["name"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*model.Repository)
+ fc.Result = res
+ return ec.marshalORepository2ᚖgitᚗsrᚗhtᚋאsircmpwnᚋgitᚗsrᚗhtᚋgraphqlᚋgraphᚋmodelᚐRepository(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) _Query_repositoryByOwner(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ fc := &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ Args: nil,
+ IsMethod: true,
+ }
+
+ ctx = graphql.WithFieldContext(ctx, fc)
+ rawArgs := field.ArgumentMap(ec.Variables)
+ args, err := ec.field_Query_repositoryByOwner_args(ctx, rawArgs)
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ fc.Args = args
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().RepositoryByOwner(rctx, args["owner"].(string), args["repo"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*model.Repository)
+ fc.Result = res
+ return ec.marshalORepository2ᚖgitᚗsrᚗhtᚋאsircmpwnᚋgitᚗsrᚗhtᚋgraphqlᚋgraphᚋmodelᚐRepository(ctx, field.Selections, res)
+}
+
func (ec *executionContext) _Query___type(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
defer func() {
if r := recover(); r != nil {
@@ 6477,6 6624,28 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr
res = ec._Query_repository(ctx, field)
return res
})
+ case "repositoryByName":
+ field := field
+ out.Concurrently(i, func() (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_repositoryByName(ctx, field)
+ return res
+ })
+ case "repositoryByOwner":
+ field := field
+ out.Concurrently(i, func() (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_repositoryByOwner(ctx, field)
+ return res
+ })
case "__type":
out.Values[i] = ec._Query___type(ctx, field)
case "__schema":
M graphql/graph/model/repository.go => graphql/graph/model/repository.go +24 -0
@@ 27,6 27,30 @@ type Repository struct {
repo *git.Repository
}
+func (r *Repository) Rows() string {
+ return `
+ repo.id,
+ repo.created, repo.updated,
+ repo.name, repo.description,
+ repo.visibility,
+ repo.upstream_uri,
+ repo.path,
+ repo.owner_id
+ `
+}
+
+func (r *Repository) Fields() []interface{} {
+ return []interface{}{
+ &r.ID,
+ &r.Created, &r.Updated,
+ &r.Name, &r.Description,
+ &r.Visibility,
+ &r.UpstreamURL,
+ &r.Path,
+ &r.OwnerID,
+ }
+}
+
func (r *Repository) Repo() *git.Repository {
if r.repo != nil {
return r.repo
M graphql/graph/schema.graphqls => graphql/graph/schema.graphqls +7 -0
@@ 240,6 240,13 @@ type Query {
# Returns a specific repository
repository(id: Int!): Repository
+
+ # Returns a specific repository, owned by the authenticated user.
+ repositoryByName(name: String!): Repository
+
+ # Returns a specific repository, owned by the given canonical name (e.g.
+ # "~sircmpwn").
+ repositoryByOwner(owner: String!, repo: String!): Repository
}
# Details for repository creation or updates
M graphql/graph/schema.resolvers.go => graphql/graph/schema.resolvers.go +9 -1
@@ 77,11 77,19 @@ func (r *queryResolver) Repositories(ctx context.Context, next *int, filter *mod
}
func (r *queryResolver) Repository(ctx context.Context, id int) (*model.Repository, error) {
+ return loaders.ForContext(ctx).RepositoriesByID.Load(id)
+}
+
+func (r *queryResolver) RepositoryByName(ctx context.Context, name string) (*model.Repository, error) {
+ panic(fmt.Errorf("not implemented"))
+}
+
+func (r *queryResolver) RepositoryByOwner(ctx context.Context, owner string, repo string) (*model.Repository, error) {
panic(fmt.Errorf("not implemented"))
}
func (r *repositoryResolver) Owner(ctx context.Context, obj *model.Repository) (model.Entity, error) {
- return loaders.ForContext(ctx).UsersById.Load(obj.OwnerID)
+ return loaders.ForContext(ctx).UsersByID.Load(obj.OwnerID)
}
func (r *repositoryResolver) References(ctx context.Context, obj *model.Repository, count *int, next *string, glob *string) ([]*model.Reference, error) {
M graphql/loaders/middleware.go => graphql/loaders/middleware.go +55 -4
@@ 18,10 18,11 @@ type contextKey struct {
}
type Loaders struct {
- UsersById UserLoader
+ UsersByID UserLoader
+ RepositoriesByID RepositoriesByIDLoader
}
-func fetchUsersById(ctx context.Context,
+func fetchUsersByID(ctx context.Context,
db *sql.DB) func (ids []int) ([]*model.User, []error) {
return func (ids []int) ([]*model.User, []error) {
@@ 59,14 60,64 @@ func fetchUsersById(ctx context.Context,
}
}
+func fetchRepositoriesByID(ctx context.Context,
+ db *sql.DB) func (ids []int) ([]*model.Repository, []error) {
+
+ return func (ids []int) ([]*model.Repository, []error) {
+ var (
+ err error
+ rows *sql.Rows
+ repo model.Repository
+ )
+ if rows, err = db.QueryContext(ctx, `
+ SELECT DISTINCT `+repo.Rows()+`
+ FROM repository repo
+ FULL OUTER JOIN
+ access ON repo.id = access.repo_id
+ WHERE
+ repo.id = ANY($1)
+ AND (access.user_id = 1
+ OR repo.owner_id = 1
+ OR repo.visibility != 'private')
+ `, pq.Array(ids)); err != nil {
+ panic(err)
+ }
+ defer rows.Close()
+
+ reposById := map[int]*model.Repository{}
+ for rows.Next() {
+ repo := model.Repository{}
+ if err := rows.Scan(repo.Fields()...); err != nil {
+ panic(err)
+ }
+ reposById[repo.ID] = &repo
+ }
+ if err = rows.Err(); err != nil {
+ panic(err)
+ }
+
+ repos := make([]*model.Repository, len(ids))
+ for i, id := range ids {
+ repos[i] = reposById[id]
+ }
+
+ return repos, nil
+ }
+}
+
func Middleware(db *sql.DB) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := context.WithValue(r.Context(), loadersCtxKey, &Loaders{
- UsersById: UserLoader{
+ UsersByID: UserLoader{
+ maxBatch: 100,
+ wait: 1 * time.Millisecond,
+ fetch: fetchUsersByID(r.Context(), db),
+ },
+ RepositoriesByID: RepositoriesByIDLoader{
maxBatch: 100,
wait: 1 * time.Millisecond,
- fetch: fetchUsersById(r.Context(), db),
+ fetch: fetchRepositoriesByID(r.Context(), db),
},
})
r = r.WithContext(ctx)
A graphql/loaders/repositoriesbyidloader_gen.go => graphql/loaders/repositoriesbyidloader_gen.go +224 -0
@@ 0,0 1,224 @@
+// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
+
+package loaders
+
+import (
+ "sync"
+ "time"
+
+ "git.sr.ht/~sircmpwn/git.sr.ht/graphql/graph/model"
+)
+
+// RepositoriesByIDLoaderConfig captures the config to create a new RepositoriesByIDLoader
+type RepositoriesByIDLoaderConfig struct {
+ // Fetch is a method that provides the data for the loader
+ Fetch func(keys []int) ([]*model.Repository, []error)
+
+ // Wait is how long wait before sending a batch
+ Wait time.Duration
+
+ // MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
+ MaxBatch int
+}
+
+// NewRepositoriesByIDLoader creates a new RepositoriesByIDLoader given a fetch, wait, and maxBatch
+func NewRepositoriesByIDLoader(config RepositoriesByIDLoaderConfig) *RepositoriesByIDLoader {
+ return &RepositoriesByIDLoader{
+ fetch: config.Fetch,
+ wait: config.Wait,
+ maxBatch: config.MaxBatch,
+ }
+}
+
+// RepositoriesByIDLoader batches and caches requests
+type RepositoriesByIDLoader struct {
+ // this method provides the data for the loader
+ fetch func(keys []int) ([]*model.Repository, []error)
+
+ // how long to done before sending a batch
+ wait time.Duration
+
+ // this will limit the maximum number of keys to send in one batch, 0 = no limit
+ maxBatch int
+
+ // INTERNAL
+
+ // lazily created cache
+ cache map[int]*model.Repository
+
+ // the current batch. keys will continue to be collected until timeout is hit,
+ // then everything will be sent to the fetch method and out to the listeners
+ batch *repositoriesByIDLoaderBatch
+
+ // mutex to prevent races
+ mu sync.Mutex
+}
+
+type repositoriesByIDLoaderBatch struct {
+ keys []int
+ data []*model.Repository
+ error []error
+ closing bool
+ done chan struct{}
+}
+
+// Load a Repository by key, batching and caching will be applied automatically
+func (l *RepositoriesByIDLoader) Load(key int) (*model.Repository, error) {
+ return l.LoadThunk(key)()
+}
+
+// LoadThunk returns a function that when called will block waiting for a Repository.
+// This method should be used if you want one goroutine to make requests to many
+// different data loaders without blocking until the thunk is called.
+func (l *RepositoriesByIDLoader) LoadThunk(key int) func() (*model.Repository, error) {
+ l.mu.Lock()
+ if it, ok := l.cache[key]; ok {
+ l.mu.Unlock()
+ return func() (*model.Repository, error) {
+ return it, nil
+ }
+ }
+ if l.batch == nil {
+ l.batch = &repositoriesByIDLoaderBatch{done: make(chan struct{})}
+ }
+ batch := l.batch
+ pos := batch.keyIndex(l, key)
+ l.mu.Unlock()
+
+ return func() (*model.Repository, error) {
+ <-batch.done
+
+ var data *model.Repository
+ if pos < len(batch.data) {
+ data = batch.data[pos]
+ }
+
+ var err error
+ // its convenient to be able to return a single error for everything
+ if len(batch.error) == 1 {
+ err = batch.error[0]
+ } else if batch.error != nil {
+ err = batch.error[pos]
+ }
+
+ if err == nil {
+ l.mu.Lock()
+ l.unsafeSet(key, data)
+ l.mu.Unlock()
+ }
+
+ return data, err
+ }
+}
+
+// LoadAll fetches many keys at once. It will be broken into appropriate sized
+// sub batches depending on how the loader is configured
+func (l *RepositoriesByIDLoader) LoadAll(keys []int) ([]*model.Repository, []error) {
+ results := make([]func() (*model.Repository, error), len(keys))
+
+ for i, key := range keys {
+ results[i] = l.LoadThunk(key)
+ }
+
+ repositorys := make([]*model.Repository, len(keys))
+ errors := make([]error, len(keys))
+ for i, thunk := range results {
+ repositorys[i], errors[i] = thunk()
+ }
+ return repositorys, errors
+}
+
+// LoadAllThunk returns a function that when called will block waiting for a Repositorys.
+// This method should be used if you want one goroutine to make requests to many
+// different data loaders without blocking until the thunk is called.
+func (l *RepositoriesByIDLoader) LoadAllThunk(keys []int) func() ([]*model.Repository, []error) {
+ results := make([]func() (*model.Repository, error), len(keys))
+ for i, key := range keys {
+ results[i] = l.LoadThunk(key)
+ }
+ return func() ([]*model.Repository, []error) {
+ repositorys := make([]*model.Repository, len(keys))
+ errors := make([]error, len(keys))
+ for i, thunk := range results {
+ repositorys[i], errors[i] = thunk()
+ }
+ return repositorys, errors
+ }
+}
+
+// Prime the cache with the provided key and value. If the key already exists, no change is made
+// and false is returned.
+// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
+func (l *RepositoriesByIDLoader) Prime(key int, value *model.Repository) bool {
+ l.mu.Lock()
+ var found bool
+ if _, found = l.cache[key]; !found {
+ // make a copy when writing to the cache, its easy to pass a pointer in from a loop var
+ // and end up with the whole cache pointing to the same value.
+ cpy := *value
+ l.unsafeSet(key, &cpy)
+ }
+ l.mu.Unlock()
+ return !found
+}
+
+// Clear the value at key from the cache, if it exists
+func (l *RepositoriesByIDLoader) Clear(key int) {
+ l.mu.Lock()
+ delete(l.cache, key)
+ l.mu.Unlock()
+}
+
+func (l *RepositoriesByIDLoader) unsafeSet(key int, value *model.Repository) {
+ if l.cache == nil {
+ l.cache = map[int]*model.Repository{}
+ }
+ l.cache[key] = value
+}
+
+// keyIndex will return the location of the key in the batch, if its not found
+// it will add the key to the batch
+func (b *repositoriesByIDLoaderBatch) keyIndex(l *RepositoriesByIDLoader, key int) int {
+ for i, existingKey := range b.keys {
+ if key == existingKey {
+ return i
+ }
+ }
+
+ pos := len(b.keys)
+ b.keys = append(b.keys, key)
+ if pos == 0 {
+ go b.startTimer(l)
+ }
+
+ if l.maxBatch != 0 && pos >= l.maxBatch-1 {
+ if !b.closing {
+ b.closing = true
+ l.batch = nil
+ go b.end(l)
+ }
+ }
+
+ return pos
+}
+
+func (b *repositoriesByIDLoaderBatch) startTimer(l *RepositoriesByIDLoader) {
+ time.Sleep(l.wait)
+ l.mu.Lock()
+
+ // we must have hit a batch limit and are already finalizing this batch
+ if b.closing {
+ l.mu.Unlock()
+ return
+ }
+
+ l.batch = nil
+ l.mu.Unlock()
+
+ b.end(l)
+}
+
+func (b *repositoriesByIDLoaderBatch) end(l *RepositoriesByIDLoader) {
+ b.data, b.error = l.fetch(b.keys)
+ close(b.done)
+}
M graphql/server.go => graphql/server.go +3 -0
@@ 51,6 51,9 @@ func main() {
}
router := chi.NewRouter()
+ // TODO: Add middleware to:
+ // - Gracefully handle panics
+ // - Log queries in debug mode
router.Use(auth.Middleware(db))
router.Use(loaders.Middleware(db))