diff --git a/chunker/chunk.go b/chunker/chunk.go index 3051df68ead..704ad51c38a 100644 --- a/chunker/chunk.go +++ b/chunker/chunk.go @@ -84,7 +84,8 @@ func NewChunker(inputFormat InputFormat, batchSize int) Chunker { nqs: NewNQuadBuffer(batchSize), } default: - panic("unknown input format") + x.Panic(errors.New("unknown input format")) + return nil } } diff --git a/chunker/json_parser_test.go b/chunker/json_parser_test.go index 0ab10f88ee1..3d653b163e2 100644 --- a/chunker/json_parser_test.go +++ b/chunker/json_parser_test.go @@ -87,6 +87,7 @@ func (exp *Experiment) verify() { require.NoError(exp.t, dg.Alter(ctx, &api.Operation{DropAll: true}), "drop all failed") require.NoError(exp.t, dg.Alter(ctx, &api.Operation{Schema: exp.schema}), "schema change failed") + require.NoError(exp.t, testutil.WaitForAlter(ctx, dg, exp.schema)) _, err = dg.NewTxn().Mutate(ctx, &api.Mutation{Set: exp.nqs, CommitNow: true}) @@ -134,14 +135,14 @@ func TestNquadsFromJson1(t *testing.T) { name age married -address +address }}`, expected: `{"alice": [ {"name": "Alice", "age": 26, "married": true, "address": {"coordinates": [2,1.1], "type": "Point"}} -]} +]} `} exp.verify() } diff --git a/codec/benchmark/benchmark.go b/codec/benchmark/benchmark.go index 1365207f90a..9a52edf5562 100644 --- a/codec/benchmark/benchmark.go +++ b/codec/benchmark/benchmark.go @@ -25,6 +25,7 @@ package main import ( "compress/gzip" "encoding/binary" + "errors" "fmt" "io" "os" @@ -59,20 +60,20 @@ const ( func read(filename string) []int { f, err := os.Open(filename) if err != nil { - panic(err) + x.Panic(err) } defer f.Close() fgzip, err := gzip.NewReader(f) if err != nil { - panic(err) + x.Panic(err) } defer fgzip.Close() buf := make([]byte, 4) _, err = fgzip.Read(buf) if err != nil && err != io.EOF { - panic(err) + x.Panic(err) } ndata := binary.LittleEndian.Uint32(buf) @@ -80,7 +81,7 @@ func read(filename string) []int { for i := range data { _, err = fgzip.Read(buf) if err != nil && err != io.EOF { - panic(err) + x.Panic(err) } data[i] = int(binary.LittleEndian.Uint32(buf)) @@ -176,7 +177,7 @@ func fmtBenchmark(name string, speed int) { func main() { data := read("clustered1M.bin.gz") if !sort.IsSorted(sort.IntSlice(data)) { - panic("test data must be sorted") + x.Panic(errors.New("test data must be sorted")) } chunks64 := chunkify64(data) diff --git a/contrib/integration/swap/main.go b/contrib/integration/swap/main.go index b30e77668f1..ecfdd18d17b 100644 --- a/contrib/integration/swap/main.go +++ b/contrib/integration/swap/main.go @@ -32,6 +32,7 @@ import ( "github.com/dgraph-io/dgo/v2/protos/api" "github.com/dgraph-io/dgraph/testutil" "github.com/dgraph-io/dgraph/x" + "github.com/pkg/errors" ) var ( @@ -367,7 +368,7 @@ func checkInvariants(c *dgo.Dgraph, uids []string, sentences []string) error { sort.Strings(gotUids) sort.Strings(uids) if !reflect.DeepEqual(gotUids, uids) { - panic(fmt.Sprintf(`query: %s\n + x.Panic(errors.Errorf(`query: %s\n Uids in index for %q didn't match calculated: %v. Len: %d got: %v diff --git a/contrib/integration/testtxn/main_test.go b/contrib/integration/testtxn/main_test.go index 18e8349dc60..d6040d29c73 100644 --- a/contrib/integration/testtxn/main_test.go +++ b/contrib/integration/testtxn/main_test.go @@ -371,6 +371,9 @@ func TestIgnoreIndexConflict(t *testing.T) { if err := s.dg.Alter(context.Background(), op); err != nil { log.Fatal(err) } + if err := testutil.WaitForAlter(context.Background(), s.dg, op.Schema); err != nil { + log.Fatal(err) + } txn := s.dg.NewTxn() mu := &api.Mutation{} @@ -424,9 +427,11 @@ func TestReadIndexKeySameTxn(t *testing.T) { if err := s.dg.Alter(context.Background(), op); err != nil { log.Fatal(err) } + if err := testutil.WaitForAlter(context.Background(), s.dg, op.Schema); err != nil { + log.Fatal(err) + } txn := s.dg.NewTxn() - mu := &api.Mutation{ CommitNow: true, SetJson: []byte(`{"name": "Manish"}`), @@ -933,8 +938,7 @@ func TestTxnDiscardBeforeCommit(t *testing.T) { } func alterSchema(dg *dgo.Dgraph, schema string) { - op := api.Operation{} - op.Schema = schema - err := dg.Alter(ctxb, &op) - x.Check(err) + op := api.Operation{Schema: schema} + x.Check(dg.Alter(ctxb, &op)) + x.Check(testutil.WaitForAlter(ctxb, dg, schema)) } diff --git a/dgraph/cmd/alpha/http_test.go b/dgraph/cmd/alpha/http_test.go index f3b5e9ce852..48c768361e1 100644 --- a/dgraph/cmd/alpha/http_test.go +++ b/dgraph/cmd/alpha/http_test.go @@ -57,7 +57,7 @@ func runGzipWithRetry(contentType, url string, buf io.Reader, gzReq, gzResp bool *http.Response, error) { client := &http.Client{} - numRetries := 2 + numRetries := 3 var resp *http.Response var err error diff --git a/dgraph/cmd/alpha/reindex_test.go b/dgraph/cmd/alpha/reindex_test.go index 1043e2ff3fa..62dcc8c8c19 100644 --- a/dgraph/cmd/alpha/reindex_test.go +++ b/dgraph/cmd/alpha/reindex_test.go @@ -17,7 +17,9 @@ package alpha import ( + "strings" "testing" + "time" "github.com/stretchr/testify/require" ) @@ -185,3 +187,54 @@ func TestReindexReverseCount(t *testing.T) { } }`, res) } + +func checkSchema(t *testing.T, query, key string) { + for i := 0; i < 10; i++ { + res, _, err := queryWithTs(query, "application/graphql+-", "", 0) + require.NoError(t, err) + if strings.Contains(res, key) { + return + } + time.Sleep(100 * time.Millisecond) + + if i == 9 { + t.Fatalf("expected %v, got schema: %v", key, res) + } + } +} + +func TestBgIndexSchemaReverse(t *testing.T) { + require.NoError(t, dropAll()) + q1 := `schema(pred: [value]) {}` + require.NoError(t, alterSchema(`value: [uid] .`)) + checkSchema(t, q1, "list") + require.NoError(t, alterSchema(`value: [uid] @count @reverse .`)) + checkSchema(t, q1, "reverse") +} + +func TestBgIndexSchemaTokenizers(t *testing.T) { + require.NoError(t, dropAll()) + q1 := `schema(pred: [value]) {}` + require.NoError(t, alterSchema(`value: string @index(fulltext, hash) .`)) + checkSchema(t, q1, "fulltext") + require.NoError(t, alterSchema(`value: string @index(term, hash) @upsert .`)) + checkSchema(t, q1, "term") +} + +func TestBgIndexSchemaCount(t *testing.T) { + require.NoError(t, dropAll()) + q1 := `schema(pred: [value]) {}` + require.NoError(t, alterSchema(`value: [uid] @count .`)) + checkSchema(t, q1, "count") + require.NoError(t, alterSchema(`value: [uid] @reverse .`)) + checkSchema(t, q1, "reverse") +} + +func TestBgIndexSchemaReverseAndCount(t *testing.T) { + require.NoError(t, dropAll()) + q1 := `schema(pred: [value]) {}` + require.NoError(t, alterSchema(`value: [uid] @reverse .`)) + checkSchema(t, q1, "reverse") + require.NoError(t, alterSchema(`value: [uid] @count .`)) + checkSchema(t, q1, "count") +} diff --git a/dgraph/cmd/alpha/run.go b/dgraph/cmd/alpha/run.go index 5d1be91ce16..889c1ce7ef6 100644 --- a/dgraph/cmd/alpha/run.go +++ b/dgraph/cmd/alpha/run.go @@ -592,8 +592,16 @@ func run() { x.Config.QueryEdgeLimit = cast.ToUint64(Alpha.Conf.GetString("query_edge_limit")) x.Config.NormalizeNodeLimit = cast.ToInt(Alpha.Conf.GetString("normalize_node_limit")) - x.PrintVersion() + x.InitSentry(enc.EeBuild) + defer x.FlushSentry() + x.ConfigureSentryScope("alpha") + x.WrapPanics() + + // Simulate a Sentry exception or panic event as shown below. + // x.CaptureSentryException(errors.New("alpha exception")) + // x.Panic(errors.New("alpha manual panic will send 2 events")) + x.PrintVersion() glog.Infof("x.Config: %+v", x.Config) glog.Infof("x.WorkerConfig: %+v", x.WorkerConfig) glog.Infof("worker.Config: %+v", worker.Config) diff --git a/dgraph/cmd/alpha/run_test.go b/dgraph/cmd/alpha/run_test.go index 7000ce22f40..3a724dc6fed 100644 --- a/dgraph/cmd/alpha/run_test.go +++ b/dgraph/cmd/alpha/run_test.go @@ -107,10 +107,22 @@ func runJSONMutation(m string) error { } func alterSchema(s string) error { - _, _, err := runWithRetries("PUT", "", addr+"/alter", s) - if err != nil { - return errors.Wrapf(err, "while running request with retries") + for { + _, _, err := runWithRetries("PUT", "", addr+"/alter", s) + if err != nil && strings.Contains(err.Error(), "is already being modified") { + time.Sleep(time.Second) + continue + } else if err != nil { + return errors.Wrapf(err, "while running request with retries") + } else { + break + } } + + if err := waitForAlter(s); err != nil { + return errors.Wrapf(err, "while waiting for alter to complete") + } + return nil } @@ -124,6 +136,48 @@ func alterSchemaWithRetry(s string) error { return err } +// waitForAlter waits for the alter operation to complete. +func waitForAlter(s string) error { + ps, err := schema.Parse(s) + if err != nil { + return err + } + + for { + resp, _, err := queryWithTs("schema{}", "application/graphql+-", "false", 0) + if err != nil { + return err + } + + var result struct { + Data struct { + Schema []*pb.SchemaNode + } + } + if err := json.Unmarshal([]byte(resp), &result); err != nil { + return err + } + + actual := make(map[string]*pb.SchemaNode) + for _, rs := range result.Data.Schema { + actual[rs.Predicate] = rs + } + + done := true + for _, su := range ps.Preds { + if n, ok := actual[su.Predicate]; !ok || !testutil.SameIndexes(su, n) { + done = false + break + } + } + if done { + return nil + } + + time.Sleep(time.Second) + } +} + func dropAll() error { op := `{"drop_all": true}` _, _, err := runWithRetries("PUT", "", addr+"/alter", op) @@ -405,7 +459,7 @@ func TestSchemaMutationUidError1(t *testing.T) { var s2 = ` friend: uid . ` - require.Error(t, alterSchemaWithRetry(s2)) + require.Error(t, alterSchema(s2)) } // add index diff --git a/dgraph/cmd/alpha/upsert_test.go b/dgraph/cmd/alpha/upsert_test.go index b76c44e286b..7151aa95d4b 100644 --- a/dgraph/cmd/alpha/upsert_test.go +++ b/dgraph/cmd/alpha/upsert_test.go @@ -1613,8 +1613,6 @@ func TestUpsertWithValueVar(t *testing.T) { require.NoError(t, alterSchema(`amount: int .`)) res, err := mutationWithTs(`{ set { _:p "0" . } }`, "application/rdf", false, true, 0) require.NoError(t, err) - b, _ := json.MarshalIndent(res, "", " ") - fmt.Printf("%s\n", b) const ( // this upsert block increments the value of the counter by one diff --git a/dgraph/cmd/counter/increment.go b/dgraph/cmd/counter/increment.go index 9849a9fafbd..affb54257b9 100644 --- a/dgraph/cmd/counter/increment.go +++ b/dgraph/cmd/counter/increment.go @@ -98,7 +98,7 @@ func queryCounter(ctx context.Context, txn *dgo.Txn, pred string) (Counter, erro case 1: counter = m["q"][0] default: - panic(fmt.Sprintf("Invalid response: %q", resp.Json)) + x.Panic(errors.Errorf("Invalid response: %q", resp.Json)) } span.Annotatef(nil, "Found counter: %+v", counter) counter.startTs = resp.GetTxn().GetStartTs() diff --git a/dgraph/cmd/live/load-json/load_test.go b/dgraph/cmd/live/load-json/load_test.go index f0bfe849e2c..43b04062a3a 100644 --- a/dgraph/cmd/live/load-json/load_test.go +++ b/dgraph/cmd/live/load-json/load_test.go @@ -26,9 +26,9 @@ import ( "strings" "testing" - "github.com/dgraph-io/dgo/v2" "github.com/stretchr/testify/require" + "github.com/dgraph-io/dgo/v2" "github.com/dgraph-io/dgraph/testutil" "github.com/dgraph-io/dgraph/x" ) diff --git a/dgraph/cmd/live/run.go b/dgraph/cmd/live/run.go index 4f5f998f84b..b147d71192d 100644 --- a/dgraph/cmd/live/run.go +++ b/dgraph/cmd/live/run.go @@ -43,6 +43,7 @@ import ( "github.com/dgraph-io/dgo/v2/protos/api" "github.com/dgraph-io/dgraph/chunker" + "github.com/dgraph-io/dgraph/testutil" "github.com/dgraph-io/dgraph/types" "github.com/dgraph-io/dgraph/x" "github.com/dgraph-io/dgraph/xidmap" @@ -194,7 +195,11 @@ func processSchemaFile(ctx context.Context, file string, dgraphClient *dgo.Dgrap op := &api.Operation{} op.Schema = string(b) - return dgraphClient.Alter(ctx, op) + if err := dgraphClient.Alter(ctx, op); err != nil { + return err + } + // TODO(Aman): avoid using functions from testutil. + return testutil.WaitForAlter(ctx, dgraphClient, op.Schema) } func (l *loader) uid(val string) string { diff --git a/dgraph/cmd/migrate/utils.go b/dgraph/cmd/migrate/utils.go index 4d03ae77ac5..adc433a1853 100644 --- a/dgraph/cmd/migrate/utils.go +++ b/dgraph/cmd/migrate/utils.go @@ -24,6 +24,7 @@ import ( "reflect" "strings" + "github.com/dgraph-io/dgraph/x" "github.com/go-sql-driver/mysql" "github.com/pkg/errors" ) @@ -119,7 +120,7 @@ func getColumnValues(columns []string, dataTypes []dataType, case datetimeType: valuePtrs = append(valuePtrs, new(mysql.NullTime)) default: - panic(fmt.Sprintf("detected unsupported type %s on column %s", + x.Panic(errors.Errorf("detected unsupported type %s on column %s", dataTypes[i], columns[i])) } } diff --git a/dgraph/cmd/zero/run.go b/dgraph/cmd/zero/run.go index 9f8bb534c5a..b6cf6a49c43 100644 --- a/dgraph/cmd/zero/run.go +++ b/dgraph/cmd/zero/run.go @@ -18,6 +18,7 @@ package zero import ( "context" + // "errors" "fmt" "log" "net" @@ -36,6 +37,7 @@ import ( "github.com/dgraph-io/badger/v2" "github.com/dgraph-io/badger/v2/y" "github.com/dgraph-io/dgraph/conn" + "github.com/dgraph-io/dgraph/ee/enc" "github.com/dgraph-io/dgraph/protos/pb" "github.com/dgraph-io/dgraph/raftwal" "github.com/dgraph-io/dgraph/x" @@ -197,14 +199,20 @@ func run() { if len(opts.myAddr) == 0 { opts.myAddr = fmt.Sprintf("localhost:%d", x.PortZeroGrpc+opts.portOffset) } + + x.InitSentry(enc.EeBuild) + defer x.FlushSentry() + x.ConfigureSentryScope("zero") + x.WrapPanics() + + // Simulate a Sentry exception or panic event as shown below. + // x.CaptureSentryException(errors.New("zero exception")) + // x.Panic(errors.New("zero manual panic will send 2 events")) + grpcListener, err := setupListener(addr, x.PortZeroGrpc+opts.portOffset, "grpc") - if err != nil { - log.Fatal(err) - } + x.Check(err) httpListener, err := setupListener(addr, x.PortZeroHTTP+opts.portOffset, "http") - if err != nil { - log.Fatal(err) - } + x.Check(err) // Open raft write-ahead log and initialize raft node. x.Checkf(os.MkdirAll(opts.w, 0700), "Error while creating WAL dir.") diff --git a/edgraph/server.go b/edgraph/server.go index 1c1c40e652d..9fe91ca89e5 100644 --- a/edgraph/server.go +++ b/edgraph/server.go @@ -28,9 +28,20 @@ import ( "time" "unicode" + "github.com/gogo/protobuf/jsonpb" + "github.com/golang/glog" + "github.com/pkg/errors" + ostats "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + otrace "go.opencensus.io/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" + "github.com/dgraph-io/dgo/v2" "github.com/dgraph-io/dgo/v2/protos/api" - "github.com/dgraph-io/dgraph/chunker" "github.com/dgraph-io/dgraph/conn" "github.com/dgraph-io/dgraph/dgraph/cmd/zero" @@ -44,18 +55,6 @@ import ( "github.com/dgraph-io/dgraph/types/facets" "github.com/dgraph-io/dgraph/worker" "github.com/dgraph-io/dgraph/x" - "github.com/gogo/protobuf/jsonpb" - "github.com/golang/glog" - "github.com/pkg/errors" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/status" - - ostats "go.opencensus.io/stats" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - otrace "go.opencensus.io/trace" ) const ( @@ -88,6 +87,10 @@ var ( numGraphQL uint64 ) +var ( + errIndexingInProgress = errors.New("schema is already being modified. Please retry") +) + // Server implements protos.DgraphServer type Server struct{} @@ -241,6 +244,11 @@ func (s *Server) Alter(ctx context.Context, op *api.Operation) (*api.Payload, er return empty, err } + // If a background task is already running, we should reject all the new alter requests. + if schema.State().IndexingInProgress() { + return nil, errIndexingInProgress + } + for _, update := range result.Preds { // Reserved predicates cannot be altered but let the update go through // if the update is equal to the existing one. diff --git a/go.mod b/go.mod index 3bda990bbda..6b7df9e1414 100644 --- a/go.mod +++ b/go.mod @@ -17,12 +17,13 @@ require ( github.com/blevesearch/snowballstem v0.0.0-20180110192139-26b06a2c243d // indirect github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd github.com/dgraph-io/badger/v2 v2.0.1-0.20191220102048-ab4352b00a17 - github.com/dgraph-io/dgo/v2 v2.1.1-0.20191127085444-c7a02678e8a6 + github.com/dgraph-io/dgo/v2 v2.2.0 github.com/dgraph-io/ristretto v0.0.1 // indirect github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dgryski/go-farm v0.0.0-20191112170834-c2139c5d712b github.com/dgryski/go-groupvarint v0.0.0-20190318181831-5ce5df8ca4e1 github.com/dustin/go-humanize v1.0.0 + github.com/getsentry/sentry-go v0.5.1 github.com/go-ini/ini v1.39.0 // indirect github.com/go-sql-driver/mysql v0.0.0-20190330032241-c0f6b444ad8f github.com/gogo/protobuf v1.3.1 @@ -31,9 +32,10 @@ require ( github.com/golang/protobuf v1.3.3 github.com/golang/snappy v0.0.1 github.com/google/codesearch v1.0.0 - github.com/google/go-cmp v0.3.1 + github.com/google/go-cmp v0.4.0 github.com/google/uuid v1.0.0 github.com/minio/minio-go v0.0.0-20181109183348-774475480ffe + github.com/mitchellh/panicwrap v1.0.0 github.com/paulmach/go.geojson v0.0.0-20170327170536-40612a87147b github.com/philhofer/fwd v1.0.0 // indirect github.com/pkg/errors v0.8.1 @@ -42,7 +44,6 @@ require ( github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect github.com/prometheus/common v0.4.1 // indirect github.com/prometheus/procfs v0.0.0-20190517135640-51af30a78b0e // indirect - github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 // indirect github.com/spf13/cast v1.3.0 github.com/spf13/cobra v0.0.5 github.com/spf13/pflag v1.0.3 diff --git a/go.sum b/go.sum index 5ea709738b1..f2b57d05c54 100644 --- a/go.sum +++ b/go.sum @@ -17,15 +17,19 @@ github.com/DataDog/opencensus-go-exporter-datadog v0.0.0-20190503082300-0f32ad59 github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.4 h1:+IawcoXhCBylN7ccwdwf8LOH2jKq7NavGpEPanrlTzE= github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= +github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/agnivade/levenshtein v1.0.3 h1:M5ZnqLOoZR8ygVq0FfkXsNOKzMCk0xRiow0R5+5VkQ0= github.com/agnivade/levenshtein v1.0.3/go.mod h1:4SFRZbbXWLF4MU1T9Qg0pGgH3Pjs+t6ie5efyrwRJXs= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= @@ -35,6 +39,7 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -53,6 +58,7 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= @@ -65,10 +71,14 @@ github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkE github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger v1.6.0 h1:DshxFxZWXUcO0xX476VJC07Xsr6ZCBVRHKZ93Oh7Evo= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger/v2 v2.0.1-0.20191220102048-ab4352b00a17 h1:BxXd8isFVcup+jlLnnEN22xhB+UwLMgAgk56kDxy9AY= github.com/dgraph-io/badger/v2 v2.0.1-0.20191220102048-ab4352b00a17/go.mod h1:YoRSIp1LmAJ7zH7tZwRvjNMUYLxB4wl3ebYkaIruZ04= github.com/dgraph-io/dgo/v2 v2.1.1-0.20191127085444-c7a02678e8a6 h1:5leDFqGys055YO3TbghBhk/QdRPEwyLPdgsSJfiR20I= github.com/dgraph-io/dgo/v2 v2.1.1-0.20191127085444-c7a02678e8a6/go.mod h1:LJCkLxm5fUMcU+yb8gHFjHt7ChgNuz3YnQQ6MQkmscI= +github.com/dgraph-io/dgo/v2 v2.2.0 h1:qYbm6mEF3wuKiRpgNOldk6PmPbBJFwj6vL7I7dTSdyc= +github.com/dgraph-io/dgo/v2 v2.2.0/go.mod h1:LJCkLxm5fUMcU+yb8gHFjHt7ChgNuz3YnQQ6MQkmscI= github.com/dgraph-io/ristretto v0.0.0-20191025175511-c1f00be0418e/go.mod h1:edzKIzGvqUCMzhTVWbiTSe75zD9Xxq0GtSBtFmaUTZs= github.com/dgraph-io/ristretto v0.0.1 h1:cJwdnj42uV8Jg4+KLrYovLiCgIfz9wtWm6E6KA+1tLs= github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXhuXRyumBd6RE= @@ -89,18 +99,34 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= +github.com/getsentry/sentry-go v0.5.1 h1:MIPe7ScHADsrK2vznqmhksIUFxq7m0JfTh+ZIMkI+VQ= +github.com/getsentry/sentry-go v0.5.1/go.mod h1:B8H7x8TYDPkeWPRzGpIiFO97LZP6rL8A3hEt8lUItMw= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-chi/chi v3.3.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-ini/ini v1.39.0 h1:/CyW/jTlZLjuzy52jc1XnhJm6IUKEuunpJFpecywNeI= github.com/go-ini/ini v1.39.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-sql-driver/mysql v0.0.0-20190330032241-c0f6b444ad8f h1:yooNaEJy76Nvbcy/J0moVJfoNK4fDmSAO31V5iBM47c= github.com/go-sql-driver/mysql v0.0.0-20190330032241-c0f6b444ad8f/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -121,12 +147,15 @@ github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaW github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/codesearch v1.0.0 h1:z4h5JoHkUS+GqxqPDrldC3Y0Qq0vHAGgaDEW5pWU/ys= github.com/google/codesearch v1.0.0/go.mod h1:qCnXDFnak/trCmLaE50kgPte3AX9jSeruZexWEOivi0= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= @@ -137,9 +166,11 @@ github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= @@ -147,12 +178,27 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= +github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= +github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= +github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= +github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= +github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= +github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -161,20 +207,29 @@ github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/matryer/moq v0.0.0-20200106131100-75d0ddfc0007/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= +github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/minio/minio-go v0.0.0-20181109183348-774475480ffe h1:cirnDgxoPBUK1M/wcd1a0KNY4FwPZTOoZ2iOVAmNS6M= github.com/minio/minio-go v0.0.0-20181109183348-774475480ffe/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -182,12 +237,22 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/panicwrap v1.0.0 h1:67zIyVakCIvcs69A0FGfZjBdPleaonSgGlXRSRlb6fE= +github.com/mitchellh/panicwrap v1.0.0/go.mod h1:pKvZHwWrZowLUzftuFq7coarnxbBXU4aQh3N0BJOeeA= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= +github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -202,6 +267,7 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -236,6 +302,7 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20180121065927-ffb13db8def0/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -244,6 +311,7 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykE github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= @@ -277,16 +345,31 @@ github.com/twpayne/go-geom v1.0.5/go.mod h1:gO3i8BeAvZuihwwXcw8dIOWXebCzTmy3uvXj github.com/twpayne/go-kml v1.0.0/go.mod h1:LlvLIQSfMqYk2O7Nx8vYAbSLv4K9rjMvLlEdUKWdjq0= github.com/twpayne/go-polyline v1.0.0/go.mod h1:ICh24bcLYBX8CknfvNPKqoTbe+eg+MX1NPyJmSBo7pU= github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vektah/dataloaden v0.2.1-0.20190515034641-a19b9a6e7c9e/go.mod h1:/HUdMve7rvxZma+2ZELQeNh88+003LL7Pf/CZ089j8U= github.com/vektah/gqlparser/v2 v2.0.1 h1:xgl5abVnsd4hkN9rk65OJID9bfcLSMuTaTcZj777q1o= github.com/vektah/gqlparser/v2 v2.0.1/go.mod h1:SyUiHgLATUR8BiYURfTirrTcGpcE+4XkV2se04Px1Ms= github.com/willf/bitset v0.0.0-20181014161241-71fa2377963f h1:gpNz6yJT2E7nm4WlhFendQ32tHE3uGE6P6lARnQgBnQ= github.com/willf/bitset v0.0.0-20181014161241-71fa2377963f/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20190228193606-a943ad0ee4c9 h1:3QcOf2A2G8CYue5DY60PR20dsJlfTT/vdnXEdU3ba7c= go.etcd.io/etcd v0.0.0-20190228193606-a943ad0ee4c9/go.mod h1:KSGwdbiFchh5KIC9My2+ZVl5/3ANcwohw50dpPwa2cw= @@ -300,6 +383,7 @@ golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -313,12 +397,16 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -341,6 +429,7 @@ golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -350,16 +439,19 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190515012406-7d7faa4812bd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20200114235610-7ae403b6b589/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.3.2 h1:iTp+3yyl/KOtxa/d1/JUE0GGSoR6FuW5udver22iwpw= google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= @@ -390,6 +482,7 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/ini.v1 v1.48.0 h1:URjZc+8ugRY5mL5uUeQH/a63JcHwdX9xZaWvmNWD7z8= gopkg.in/ini.v1 v1.48.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/graphql/admin/admin.go b/graphql/admin/admin.go index b927e2fceb1..87baa08245d 100644 --- a/graphql/admin/admin.go +++ b/graphql/admin/admin.go @@ -212,7 +212,7 @@ type adminServer struct { func NewServers(withIntrospection bool, closer *y.Closer) (web.IServeGraphQL, web.IServeGraphQL) { gqlSchema, err := schema.FromString("") if err != nil { - panic(err) + x.Panic(err) } resolvers := resolve.New(gqlSchema, resolverFactoryWithErrorMsg(errNoGraphQLSchema)) @@ -239,7 +239,7 @@ func newAdminResolver( adminSchema, err := schema.FromString(graphqlAdminSchema) if err != nil { - panic(err) + x.Panic(err) } rf := newAdminResolverFactory() diff --git a/graphql/e2e/common/common.go b/graphql/e2e/common/common.go index 8afb919ad60..ae0d28aa4b3 100644 --- a/graphql/e2e/common/common.go +++ b/graphql/e2e/common/common.go @@ -21,7 +21,6 @@ import ( "compress/gzip" "context" "encoding/json" - "fmt" "io/ioutil" "net/http" "strconv" @@ -158,36 +157,37 @@ type director struct { func BootstrapServer(schema, data []byte) { err := checkGraphQLStarted(graphqlAdminURL) if err != nil { - panic(fmt.Sprintf("Waited for GraphQL test server to become available, but it never did.\n"+ - "Got last error %+v", err.Error())) + x.Panic(errors.Errorf( + "Waited for GraphQL test server to become available, but it never did.\n"+ + "Got last error %+v", err.Error())) } err = checkGraphQLStarted(graphqlAdminTestAdminURL) if err != nil { - panic(fmt.Sprintf("Waited for GraphQL AdminTest server to become available, "+ - "but it never did.\n Got last error: %+v", err.Error())) + x.Panic(errors.Errorf( + "Waited for GraphQL AdminTest server to become available, "+ + "but it never did.\n Got last error: %+v", err.Error())) } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() d, err := grpc.DialContext(ctx, alphagRPC, grpc.WithInsecure()) if err != nil { - panic(err) + x.Panic(err) } client := dgo.NewDgraphClient(api.NewDgraphClient(d)) err = addSchema(graphqlAdminURL, string(schema)) if err != nil { - panic(err) + x.Panic(err) } err = populateGraphQLData(client, data) if err != nil { - panic(err) + x.Panic(err) } - if err = d.Close(); err != nil { - panic(err) + x.Panic(err) } } diff --git a/graphql/e2e/common/error.go b/graphql/e2e/common/error.go index 43d480a56c8..df1f462ed4f 100644 --- a/graphql/e2e/common/error.go +++ b/graphql/e2e/common/error.go @@ -19,6 +19,7 @@ package common import ( "context" "encoding/json" + "errors" "fmt" "io/ioutil" "net/http/httptest" @@ -42,8 +43,8 @@ import ( ) const ( - panicMsg = "\n****\nThis test should trap this panic.\n" + - "It's working as expected if this message is logged with a stack trace.\n****\n" + panicMsg = "\n****\nthis test should trap this panic.\n" + + "It's working as expected if this message is logged with a stack trace\n****" ) type ErrorCase struct { @@ -265,14 +266,16 @@ func panicCatcher(t *testing.T) { type panicClient struct{} func (dg *panicClient) Query(ctx context.Context, query *gql.GraphQuery) ([]byte, error) { - panic(panicMsg) + x.Panic(errors.New(panicMsg)) + return nil, nil } func (dg *panicClient) Mutate( ctx context.Context, query *gql.GraphQuery, mutations []*dgoapi.Mutation) (map[string]string, map[string]interface{}, error) { - panic(panicMsg) + x.Panic(errors.New(panicMsg)) + return nil, nil, nil } // clientInfoLogin check whether the client info(IP address) is propagated in the request. diff --git a/graphql/schema/gqlschema.go b/graphql/schema/gqlschema.go index 64c73269d24..1c43d65c54b 100644 --- a/graphql/schema/gqlschema.go +++ b/graphql/schema/gqlschema.go @@ -268,7 +268,7 @@ func copyAstFieldDef(src *ast.FieldDefinition) *ast.FieldDefinition { func expandSchema(doc *ast.SchemaDocument) { docExtras, gqlErr := parser.ParseSchema(&ast.Source{Input: schemaExtras}) if gqlErr != nil { - panic(gqlErr) + x.Panic(gqlErr) } // Cache the interface definitions in a map. They could also be defined after types which @@ -1375,7 +1375,7 @@ func Stringify(schema *ast.Schema, originalTypes []string) string { // the generated definitions. docExtras, gqlErr := parser.ParseSchema(&ast.Source{Input: schemaExtras}) if gqlErr != nil { - panic(gqlErr) + x.Panic(gqlErr) } for _, defn := range docExtras.Definitions { printed[defn.Name] = true diff --git a/graphql/web/http.go b/graphql/web/http.go index fd830e9a8d2..bb94144e2b4 100644 --- a/graphql/web/http.go +++ b/graphql/web/http.go @@ -103,7 +103,7 @@ func (gh *graphqlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer span.End() if !gh.isValid() { - panic("graphqlHandler not initialised") + x.Panic(errors.New("graphqlHandler not initialised")) } ctx = x.AttachAccessJwt(ctx, r) diff --git a/posting/index.go b/posting/index.go index cb8b85e41f4..47a8e63e949 100644 --- a/posting/index.go +++ b/posting/index.go @@ -24,11 +24,11 @@ import ( "io/ioutil" "math" "os" - "path/filepath" "sync/atomic" "time" "github.com/golang/glog" + "github.com/pkg/errors" ostats "go.opencensus.io/stats" otrace "go.opencensus.io/trace" @@ -40,7 +40,6 @@ import ( "github.com/dgraph-io/dgraph/tok" "github.com/dgraph-io/dgraph/types" "github.com/dgraph-io/dgraph/x" - "github.com/pkg/errors" ) var emptyCountParams countParams @@ -52,9 +51,9 @@ type indexMutationInfo struct { op pb.DirectedEdge_Op } -// indexTokensforTokenizers return tokens, without the predicate prefix and +// indexTokens return tokens, without the predicate prefix and // index rune, for specific tokenizers. -func indexTokens(info *indexMutationInfo) ([]string, error) { +func indexTokens(ctx context.Context, info *indexMutationInfo) ([]string, error) { attr := info.edge.Attr lang := info.edge.GetLang() @@ -63,7 +62,7 @@ func indexTokens(info *indexMutationInfo) ([]string, error) { return nil, errors.Errorf("Cannot index attribute %s of type object.", attr) } - if !schema.State().IsIndexed(attr) { + if !schema.State().IsIndexed(ctx, attr) { return nil, errors.Errorf("Attribute %s is not indexed.", attr) } sv, err := types.Convert(info.val, schemaType) @@ -87,14 +86,13 @@ func indexTokens(info *indexMutationInfo) ([]string, error) { // TODO - See if we need to pass op as argument as t should already have Op. func (txn *Txn) addIndexMutations(ctx context.Context, info *indexMutationInfo) error { if info.tokenizers == nil { - info.tokenizers = schema.State().Tokenizer(info.edge.Attr) + info.tokenizers = schema.State().Tokenizer(ctx, info.edge.Attr) } attr := info.edge.Attr uid := info.edge.Entity x.AssertTrue(uid != 0) - tokens, err := indexTokens(info) - + tokens, err := indexTokens(ctx, info) if err != nil { // This data is not indexable return err @@ -115,10 +113,8 @@ func (txn *Txn) addIndexMutations(ctx context.Context, info *indexMutationInfo) return nil } -func (txn *Txn) addIndexMutation(ctx context.Context, edge *pb.DirectedEdge, - token string) error { +func (txn *Txn) addIndexMutation(ctx context.Context, edge *pb.DirectedEdge, token string) error { key := x.IndexKey(edge.Attr, token) - plist, err := txn.cache.GetFromDelta(key) if err != nil { return err @@ -198,7 +194,7 @@ func (txn *Txn) addReverseMutation(ctx context.Context, t *pb.DirectedEdge) erro func (txn *Txn) addReverseAndCountMutation(ctx context.Context, t *pb.DirectedEdge) error { key := x.ReverseKey(t.Attr, t.ValueId) - hasCountIndex := schema.State().HasCount(t.Attr) + hasCountIndex := schema.State().HasCount(ctx, t.Attr) var getFn func(key []byte) (*List, error) if hasCountIndex { @@ -238,11 +234,10 @@ func (txn *Txn) addReverseAndCountMutation(ctx context.Context, t *pb.DirectedEd return nil } -func (l *List) handleDeleteAll(ctx context.Context, edge *pb.DirectedEdge, - txn *Txn) error { - isReversed := schema.State().IsReversed(edge.Attr) - isIndexed := schema.State().IsIndexed(edge.Attr) - hasCount := schema.State().HasCount(edge.Attr) +func (l *List) handleDeleteAll(ctx context.Context, edge *pb.DirectedEdge, txn *Txn) error { + isReversed := schema.State().IsReversed(ctx, edge.Attr) + isIndexed := schema.State().IsIndexed(ctx, edge.Attr) + hasCount := schema.State().HasCount(ctx, edge.Attr) delEdge := &pb.DirectedEdge{ Attr: edge.Attr, Op: edge.Op, @@ -264,7 +259,7 @@ func (l *List) handleDeleteAll(ctx context.Context, edge *pb.DirectedEdge, Value: p.Value, } return txn.addIndexMutations(ctx, &indexMutationInfo{ - tokenizers: schema.State().Tokenizer(edge.Attr), + tokenizers: schema.State().Tokenizer(ctx, edge.Attr), edge: edge, val: val, op: pb.DirectedEdge_DEL, @@ -404,8 +399,7 @@ func (txn *Txn) addMutationHelper(ctx context.Context, l *List, doUpdateIndex bo // AddMutationWithIndex is addMutation with support for indexing. It also // supports reverse edges. -func (l *List) AddMutationWithIndex(ctx context.Context, edge *pb.DirectedEdge, - txn *Txn) error { +func (l *List) AddMutationWithIndex(ctx context.Context, edge *pb.DirectedEdge, txn *Txn) error { if len(edge.Attr) == 0 { return errors.Errorf("Predicate cannot be empty for edge with subject: [%v], object: [%v]"+ " and value: [%v]", edge.Entity, edge.ValueId, edge.Value) @@ -415,8 +409,8 @@ func (l *List) AddMutationWithIndex(ctx context.Context, edge *pb.DirectedEdge, return l.handleDeleteAll(ctx, edge, txn) } - doUpdateIndex := pstore != nil && schema.State().IsIndexed(edge.Attr) - hasCountIndex := schema.State().HasCount(edge.Attr) + doUpdateIndex := pstore != nil && schema.State().IsIndexed(ctx, edge.Attr) + hasCountIndex := schema.State().HasCount(ctx, edge.Attr) val, found, cp, err := txn.addMutationHelper(ctx, l, doUpdateIndex, hasCountIndex, edge) if err != nil { return err @@ -431,7 +425,7 @@ func (l *List) AddMutationWithIndex(ctx context.Context, edge *pb.DirectedEdge, // Exact matches. if found && val.Value != nil { if err := txn.addIndexMutations(ctx, &indexMutationInfo{ - tokenizers: schema.State().Tokenizer(edge.Attr), + tokenizers: schema.State().Tokenizer(ctx, edge.Attr), edge: edge, val: val, op: pb.DirectedEdge_DEL, @@ -445,7 +439,7 @@ func (l *List) AddMutationWithIndex(ctx context.Context, edge *pb.DirectedEdge, Value: edge.Value, } if err := txn.addIndexMutations(ctx, &indexMutationInfo{ - tokenizers: schema.State().Tokenizer(edge.Attr), + tokenizers: schema.State().Tokenizer(ctx, edge.Attr), edge: edge, val: val, op: pb.DirectedEdge_SET, @@ -456,7 +450,7 @@ func (l *List) AddMutationWithIndex(ctx context.Context, edge *pb.DirectedEdge, } // Add reverse mutation irrespective of hasMutated, server crash can happen after // mutation is synced and before reverse edge is synced - if (pstore != nil) && (edge.ValueId != 0) && schema.State().IsReversed(edge.Attr) { + if (pstore != nil) && (edge.ValueId != 0) && schema.State().IsReversed(ctx, edge.Attr) { if err := txn.addReverseAndCountMutation(ctx, edge); err != nil { return err } @@ -539,22 +533,14 @@ type rebuilder struct { } func (r *rebuilder) Run(ctx context.Context) error { - // All the temp indexes go into the following directory. We delete the whole - // directory after the indexing step is complete. This deletes any other temp - // indexes that may have been left around in case defer wasn't executed. - // TODO(Aman): If users are not happy, we could add a flag to choose this dir. - tmpParentDir := filepath.Join(os.TempDir(), "dgraph_index") - // We write the index in a temporary badger first and then, // merge entries before writing them to p directory. - if err := os.MkdirAll(tmpParentDir, os.ModePerm); err != nil { - return errors.Wrap(err, "error creating in temp dir for reindexing") - } - tmpIndexDir, err := ioutil.TempDir(tmpParentDir, "") + // TODO(Aman): If users are not happy, we could add a flag to choose this dir. + tmpIndexDir, err := ioutil.TempDir("", "dgraph_index_") if err != nil { return errors.Wrap(err, "error creating temp dir for reindexing") } - defer os.RemoveAll(tmpParentDir) + defer os.RemoveAll(tmpIndexDir) glog.V(1).Infof("Rebuilding indexes using the temp folder %s\n", tmpIndexDir) dbOpts := badger.DefaultOptions(tmpIndexDir). @@ -576,11 +562,11 @@ func (r *rebuilder) Run(ctx context.Context) error { "Rebuilding index for predicate %s: Starting process. StartTs=%d. Prefix=\n%s\n", r.attr, r.startTs, hex.Dump(r.prefix)) - // Counter is used here to ensure that all keys are commited at different timestamp. + // Counter is used here to ensure that all keys are committed at different timestamp. // We set it to 1 in case there are no keys found and NewStreamAt is called with ts=0. var counter uint64 = 1 - // Todo(Aman): Replace TxnWriter with WriteBatch. While we do that we should ensure that + // TODO(Aman): Replace TxnWriter with WriteBatch. While we do that we should ensure that // WriteBatch has a mechanism for throttling. Also, find other places where TxnWriter // could be replaced with WriteBatch in the code tmpWriter := NewTxnWriter(tmpDB) @@ -714,12 +700,65 @@ const ( indexRebuild = iota // Index should be deleted and rebuilt. ) -// Run rebuilds all indices that need it. -func (rb *IndexRebuild) Run(ctx context.Context) error { - if err := rebuildListType(ctx, rb); err != nil { +// GetQuerySchema returns the schema that can be served while indexes are getting built. +// Query schema is defined as current schema minus tokens to delete from current schema. +func (rb *IndexRebuild) GetQuerySchema() *pb.SchemaUpdate { + // Copy the current schema. + querySchema := *rb.CurrentSchema + info := rb.needsTokIndexRebuild() + + // Compute old.Tokenizer minus info.tokenizersToDelete. + interimTokenizers := make([]string, 0) + for _, t1 := range rb.OldSchema.Tokenizer { + found := false + for _, t2 := range info.tokenizersToDelete { + if t1 == t2 { + found = true + break + } + } + if !found { + interimTokenizers = append(interimTokenizers, t1) + } + } + querySchema.Tokenizer = interimTokenizers + + if rb.needsCountIndexRebuild() == indexRebuild { + querySchema.Count = false + } + if rb.needsReverseEdgesRebuild() == indexRebuild { + querySchema.Directive = pb.SchemaUpdate_NONE + } + return &querySchema +} + +// DropIndexes drops the indexes that need to be rebuilt. +func (rb *IndexRebuild) DropIndexes(ctx context.Context) error { + if err := dropTokIndexes(ctx, rb); err != nil { return err } - if err := rebuildIndex(ctx, rb); err != nil { + if err := dropReverseEdges(ctx, rb); err != nil { + return err + } + return dropCountIndex(ctx, rb) +} + +// BuildData updates data. +func (rb *IndexRebuild) BuildData(ctx context.Context) error { + return rebuildListType(ctx, rb) +} + +// NeedIndexRebuild returns true if any of the tokenizer, reverse +// or count indexes need to be rebuilt. +func (rb *IndexRebuild) NeedIndexRebuild() bool { + return rb.needsTokIndexRebuild().op == indexRebuild || + rb.needsReverseEdgesRebuild() == indexRebuild || + rb.needsCountIndexRebuild() == indexRebuild +} + +// BuildIndexes builds indexes. +func (rb *IndexRebuild) BuildIndexes(ctx context.Context) error { + if err := rebuildTokIndex(ctx, rb); err != nil { return err } if err := rebuildReverseEdges(ctx, rb); err != nil { @@ -734,7 +773,7 @@ type indexRebuildInfo struct { tokenizersToRebuild []string } -func (rb *IndexRebuild) needsIndexRebuild() indexRebuildInfo { +func (rb *IndexRebuild) needsTokIndexRebuild() indexRebuildInfo { x.AssertTruef(rb.CurrentSchema != nil, "Current schema cannot be nil.") // If the old schema is nil, we can treat it as an empty schema. Copy it @@ -802,12 +841,8 @@ func (rb *IndexRebuild) needsIndexRebuild() indexRebuildInfo { } } -// rebuildIndex rebuilds index for a given attribute. -// We commit mutations with startTs and ignore the errors. -func rebuildIndex(ctx context.Context, rb *IndexRebuild) error { - // Exit early if indices do not need to be rebuilt. - rebuildInfo := rb.needsIndexRebuild() - +func dropTokIndexes(ctx context.Context, rb *IndexRebuild) error { + rebuildInfo := rb.needsTokIndexRebuild() if rebuildInfo.op == indexNoop { return nil } @@ -826,17 +861,7 @@ func rebuildIndex(ctx context.Context, rb *IndexRebuild) error { } } - // Exit early if the index only need to be deleted and not rebuilt. - if rebuildInfo.op == indexDelete { - return nil - } - - // Exit early if there are no tokenizers to rebuild. - if len(rebuildInfo.tokenizersToRebuild) == 0 { - return nil - } - - glog.Infof("Rebuilding index for attr %s and tokenizers %s", rb.Attr, + glog.Infof("Deleting index for attr %s and tokenizers %s", rb.Attr, rebuildInfo.tokenizersToRebuild) // Before rebuilding, the existing index needs to be deleted. for _, tokenizer := range rebuildInfo.tokenizersToRebuild { @@ -851,6 +876,24 @@ func rebuildIndex(ctx context.Context, rb *IndexRebuild) error { } } + return nil +} + +// rebuildTokIndex rebuilds index for a given attribute. +// We commit mutations with startTs and ignore the errors. +func rebuildTokIndex(ctx context.Context, rb *IndexRebuild) error { + rebuildInfo := rb.needsTokIndexRebuild() + if rebuildInfo.op != indexRebuild { + return nil + } + + // Exit early if there are no tokenizers to rebuild. + if len(rebuildInfo.tokenizersToRebuild) == 0 { + return nil + } + + glog.Infof("Rebuilding index for attr %s and tokenizers %s", rb.Attr, + rebuildInfo.tokenizersToRebuild) tokenizers, err := tok.GetTokenizers(rebuildInfo.tokenizersToRebuild) if err != nil { return err @@ -912,8 +955,8 @@ func (rb *IndexRebuild) needsCountIndexRebuild() indexOp { return indexRebuild } -// rebuildCountIndex rebuilds the count index for a given attribute. -func rebuildCountIndex(ctx context.Context, rb *IndexRebuild) error { +func dropCountIndex(ctx context.Context, rb *IndexRebuild) error { + // Exit early if indices do not need to be rebuilt. op := rb.needsCountIndexRebuild() if op == indexNoop { return nil @@ -924,8 +967,13 @@ func rebuildCountIndex(ctx context.Context, rb *IndexRebuild) error { return err } - // Exit early if attribute is index only needed to be deleted. - if op == indexDelete { + return nil +} + +// rebuildCountIndex rebuilds the count index for a given attribute. +func rebuildCountIndex(ctx context.Context, rb *IndexRebuild) error { + op := rb.needsCountIndexRebuild() + if op != indexRebuild { return nil } @@ -996,20 +1044,20 @@ func (rb *IndexRebuild) needsReverseEdgesRebuild() indexOp { return indexDelete } -// rebuildReverseEdges rebuilds the reverse edges for a given attribute. -func rebuildReverseEdges(ctx context.Context, rb *IndexRebuild) error { +func dropReverseEdges(ctx context.Context, rb *IndexRebuild) error { op := rb.needsReverseEdgesRebuild() if op == indexNoop { return nil } glog.Infof("Deleting reverse index for %s", rb.Attr) - if err := deleteReverseEdges(rb.Attr); err != nil { - return err - } + return deleteReverseEdges(rb.Attr) +} - // Exit early if index only needed to be deleted. - if op == indexDelete { +// rebuildReverseEdges rebuilds the reverse edges for a given attribute. +func rebuildReverseEdges(ctx context.Context, rb *IndexRebuild) error { + op := rb.needsReverseEdgesRebuild() + if op != indexRebuild { return nil } diff --git a/posting/index_test.go b/posting/index_test.go index dad66494484..3ee12949a78 100644 --- a/posting/index_test.go +++ b/posting/index_test.go @@ -40,8 +40,8 @@ func uids(l *List, readTs uint64) []uint64 { // indexTokensForTest is just a wrapper around indexTokens used for convenience. func indexTokensForTest(attr, lang string, val types.Val) ([]string, error) { - return indexTokens(&indexMutationInfo{ - tokenizers: schema.State().Tokenizer(attr), + return indexTokens(context.Background(), &indexMutationInfo{ + tokenizers: schema.State().Tokenizer(context.Background(), attr), edge: &pb.DirectedEdge{ Attr: attr, Lang: lang, @@ -258,19 +258,20 @@ func addEdgeToUID(t *testing.T, attr string, src uint64, addMutation(t, l, edge, Set, startTs, commitTs, false) } -func TestRebuildIndex(t *testing.T) { +func TestRebuildTokIndex(t *testing.T) { addEdgeToValue(t, "name2", 91, "Michonne", uint64(1), uint64(2)) addEdgeToValue(t, "name2", 92, "David", uint64(3), uint64(4)) require.NoError(t, schema.ParseBytes([]byte(schemaVal), 1)) - currentSchema, _ := schema.State().Get("name2") + currentSchema, _ := schema.State().Get(context.Background(), "name2") rb := IndexRebuild{ Attr: "name2", StartTs: 5, OldSchema: nil, CurrentSchema: ¤tSchema, } - require.NoError(t, rebuildIndex(context.Background(), &rb)) + require.NoError(t, dropTokIndexes(context.Background(), &rb)) + require.NoError(t, rebuildTokIndex(context.Background(), &rb)) // Check index entries in data store. txn := ps.NewTransactionAt(6, false) @@ -308,30 +309,32 @@ func TestRebuildIndex(t *testing.T) { require.EqualValues(t, 91, uids2[0]) } -func TestRebuildIndexWithDeletion(t *testing.T) { +func TestRebuildTokIndexWithDeletion(t *testing.T) { addEdgeToValue(t, "name2", 91, "Michonne", uint64(1), uint64(2)) addEdgeToValue(t, "name2", 92, "David", uint64(3), uint64(4)) require.NoError(t, schema.ParseBytes([]byte(schemaVal), 1)) - currentSchema, _ := schema.State().Get("name2") + currentSchema, _ := schema.State().Get(context.Background(), "name2") rb := IndexRebuild{ Attr: "name2", StartTs: 5, OldSchema: nil, CurrentSchema: ¤tSchema, } - require.NoError(t, rebuildIndex(context.Background(), &rb)) + require.NoError(t, dropTokIndexes(context.Background(), &rb)) + require.NoError(t, rebuildTokIndex(context.Background(), &rb)) // Mutate the schema (the index in name2 is deleted) and rebuild the index. require.NoError(t, schema.ParseBytes([]byte(mutatedSchemaVal), 1)) - newSchema, _ := schema.State().Get("name2") + newSchema, _ := schema.State().Get(context.Background(), "name2") rb = IndexRebuild{ Attr: "name2", StartTs: 6, OldSchema: ¤tSchema, CurrentSchema: &newSchema, } - require.NoError(t, rebuildIndex(context.Background(), &rb)) + require.NoError(t, dropTokIndexes(context.Background(), &rb)) + require.NoError(t, rebuildTokIndex(context.Background(), &rb)) // Check index entries in data store. txn := ps.NewTransactionAt(7, false) @@ -368,7 +371,7 @@ func TestRebuildReverseEdges(t *testing.T) { addEdgeToUID(t, "friend", 2, 23, uint64(14), uint64(15)) require.NoError(t, schema.ParseBytes([]byte(schemaVal), 1)) - currentSchema, _ := schema.State().Get("friend") + currentSchema, _ := schema.State().Get(context.Background(), "friend") rb := IndexRebuild{ Attr: "friend", StartTs: 16, @@ -416,17 +419,17 @@ func TestRebuildReverseEdges(t *testing.T) { require.EqualValues(t, 1, uids1[0]) } -func TestNeedsIndexRebuild(t *testing.T) { +func TestNeedsTokIndexRebuild(t *testing.T) { rb := IndexRebuild{} rb.OldSchema = &pb.SchemaUpdate{ValueType: pb.Posting_UID} rb.CurrentSchema = &pb.SchemaUpdate{ValueType: pb.Posting_UID} - rebuildInfo := rb.needsIndexRebuild() + rebuildInfo := rb.needsTokIndexRebuild() require.Equal(t, indexOp(indexNoop), rebuildInfo.op) require.Equal(t, []string(nil), rebuildInfo.tokenizersToDelete) require.Equal(t, []string(nil), rebuildInfo.tokenizersToRebuild) rb.OldSchema = nil - rebuildInfo = rb.needsIndexRebuild() + rebuildInfo = rb.needsTokIndexRebuild() require.Equal(t, indexOp(indexNoop), rebuildInfo.op) require.Equal(t, []string(nil), rebuildInfo.tokenizersToDelete) require.Equal(t, []string(nil), rebuildInfo.tokenizersToRebuild) @@ -436,7 +439,7 @@ func TestNeedsIndexRebuild(t *testing.T) { rb.CurrentSchema = &pb.SchemaUpdate{ValueType: pb.Posting_STRING, Directive: pb.SchemaUpdate_INDEX, Tokenizer: []string{"exact"}} - rebuildInfo = rb.needsIndexRebuild() + rebuildInfo = rb.needsTokIndexRebuild() require.Equal(t, indexOp(indexNoop), rebuildInfo.op) require.Equal(t, []string(nil), rebuildInfo.tokenizersToDelete) require.Equal(t, []string(nil), rebuildInfo.tokenizersToRebuild) @@ -445,7 +448,7 @@ func TestNeedsIndexRebuild(t *testing.T) { Tokenizer: []string{"term"}} rb.CurrentSchema = &pb.SchemaUpdate{ValueType: pb.Posting_STRING, Directive: pb.SchemaUpdate_INDEX} - rebuildInfo = rb.needsIndexRebuild() + rebuildInfo = rb.needsTokIndexRebuild() require.Equal(t, indexOp(indexRebuild), rebuildInfo.op) require.Equal(t, []string{"term"}, rebuildInfo.tokenizersToDelete) require.Equal(t, []string(nil), rebuildInfo.tokenizersToRebuild) @@ -455,7 +458,7 @@ func TestNeedsIndexRebuild(t *testing.T) { rb.CurrentSchema = &pb.SchemaUpdate{ValueType: pb.Posting_FLOAT, Directive: pb.SchemaUpdate_INDEX, Tokenizer: []string{"exact"}} - rebuildInfo = rb.needsIndexRebuild() + rebuildInfo = rb.needsTokIndexRebuild() require.Equal(t, indexOp(indexRebuild), rebuildInfo.op) require.Equal(t, []string{"exact"}, rebuildInfo.tokenizersToDelete) require.Equal(t, []string{"exact"}, rebuildInfo.tokenizersToRebuild) @@ -464,7 +467,7 @@ func TestNeedsIndexRebuild(t *testing.T) { Tokenizer: []string{"exact"}} rb.CurrentSchema = &pb.SchemaUpdate{ValueType: pb.Posting_FLOAT, Directive: pb.SchemaUpdate_NONE} - rebuildInfo = rb.needsIndexRebuild() + rebuildInfo = rb.needsTokIndexRebuild() require.Equal(t, indexOp(indexDelete), rebuildInfo.op) require.Equal(t, []string{"exact"}, rebuildInfo.tokenizersToDelete) require.Equal(t, []string(nil), rebuildInfo.tokenizersToRebuild) diff --git a/posting/list.go b/posting/list.go index b29e2f0311b..720f405e9fd 100644 --- a/posting/list.go +++ b/posting/list.go @@ -25,6 +25,7 @@ import ( "sort" "github.com/dgryski/go-farm" + "github.com/pkg/errors" bpb "github.com/dgraph-io/badger/v2/pb" "github.com/dgraph-io/dgraph/algo" @@ -35,7 +36,6 @@ import ( "github.com/dgraph-io/dgraph/types" "github.com/dgraph-io/dgraph/types/facets" "github.com/dgraph-io/dgraph/x" - "github.com/pkg/errors" ) var ( @@ -365,7 +365,7 @@ func TypeID(edge *pb.DirectedEdge) types.TypeID { func fingerprintEdge(t *pb.DirectedEdge) uint64 { // There could be a collision if the user gives us a value with Lang = "en" and later gives - // us a value = "en" for the same predicate. We would end up overwritting his older lang + // us a value = "en" for the same predicate. We would end up overwriting his older lang // value. // All edges with a value without LANGTAG, have the same UID. In other words, diff --git a/query/common_test.go b/query/common_test.go index a3892a5c9ed..e7b30f30c5a 100644 --- a/query/common_test.go +++ b/query/common_test.go @@ -35,6 +35,10 @@ func setSchema(schema string) { if err != nil { panic(fmt.Sprintf("Could not alter schema. Got error %v", err.Error())) } + + if err := testutil.WaitForAlter(context.Background(), client, schema); err != nil { + panic(err) + } } func dropPredicate(pred string) { diff --git a/query/query0_test.go b/query/query0_test.go index 2b3144f316b..7dcfb7389b7 100644 --- a/query/query0_test.go +++ b/query/query0_test.go @@ -2246,7 +2246,6 @@ func TestFilterUsingLenFunction(t *testing.T) { } for _, tc := range tests { - t.Log("Running: ", tc.name) js := processQueryNoErr(t, tc.in) require.JSONEq(t, tc.out, js) } diff --git a/schema/parse.go b/schema/parse.go index d41c3cbda73..4e2d39865d9 100644 --- a/schema/parse.go +++ b/schema/parse.go @@ -24,6 +24,7 @@ import ( "github.com/dgraph-io/dgraph/tok" "github.com/dgraph-io/dgraph/types" "github.com/dgraph-io/dgraph/x" + "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/schema/parse_test.go b/schema/parse_test.go index fdb651c80de..33847ab9b4f 100644 --- a/schema/parse_test.go +++ b/schema/parse_test.go @@ -17,13 +17,14 @@ package schema import ( + "context" "io/ioutil" "os" "testing" - "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/require" + "github.com/dgraph-io/badger/v2" "github.com/dgraph-io/dgraph/protos/pb" "github.com/dgraph-io/dgraph/types" "github.com/dgraph-io/dgraph/x" @@ -115,11 +116,6 @@ age:int @index(int) . name: string . address: string @index(term) .` -func TestSchemaIndex(t *testing.T) { - require.NoError(t, ParseBytes([]byte(schemaIndexVal1), 1)) - require.Equal(t, 2, len(State().IndexedFields())) -} - var schemaIndexVal2 = ` name: string @index(exact, exact) . address: string @index(term) . @@ -196,10 +192,9 @@ func TestSchemaIndexCustom(t *testing.T) { List: true, }}, }) - require.True(t, State().IsIndexed("name")) - require.False(t, State().IsReversed("name")) - require.Equal(t, "int", State().Tokenizer("age")[0].Name()) - require.Equal(t, 3, len(State().IndexedFields())) + require.True(t, State().IsIndexed(context.Background(), "name")) + require.False(t, State().IsReversed(context.Background(), "name")) + require.Equal(t, "int", State().Tokenizer(context.Background(), "age")[0].Name()) } func TestParse(t *testing.T) { diff --git a/schema/schema.go b/schema/schema.go index 5d8241bed38..05b306361ea 100644 --- a/schema/schema.go +++ b/schema/schema.go @@ -18,20 +18,21 @@ package schema import ( "bytes" + "context" "encoding/hex" "fmt" "sync" - "github.com/dgraph-io/badger/v2" "github.com/golang/glog" "github.com/golang/protobuf/proto" + "github.com/pkg/errors" "golang.org/x/net/trace" + "github.com/dgraph-io/badger/v2" "github.com/dgraph-io/dgraph/protos/pb" "github.com/dgraph-io/dgraph/tok" "github.com/dgraph-io/dgraph/types" "github.com/dgraph-io/dgraph/x" - "github.com/pkg/errors" ) var ( @@ -39,10 +40,27 @@ var ( pstore *badger.DB ) +// We maintain two schemas for a predicate if a background task is building indexes +// for that predicate. Now, we need to use the new schema for mutations whereas +// a query schema for queries. While calling functions in this package, we need +// to set the context correctly as to which schema should be returned. +// Query schema is defined as (old schema - tokenizers to drop based on new schema). +type contextKey int + +const ( + isWrite contextKey = iota +) + +// GetWriteContext returns a context that sets the schema context for writing. +func GetWriteContext(ctx context.Context) context.Context { + return context.WithValue(ctx, isWrite, true) +} + func (s *state) init() { s.predicate = make(map[string]*pb.SchemaUpdate) s.types = make(map[string]*pb.TypeUpdate) s.elog = trace.NewEventLog("Dgraph", "Schema") + s.mutSchema = make(map[string]*pb.SchemaUpdate) } type state struct { @@ -51,6 +69,8 @@ type state struct { predicate map[string]*pb.SchemaUpdate types map[string]*pb.TypeUpdate elog trace.EventLog + // mutSchema holds the schema update that is being applied in the background. + mutSchema map[string]*pb.SchemaUpdate } // State returns the struct holding the current schema. @@ -69,6 +89,10 @@ func (s *state) DeleteAll() { for typ := range s.types { delete(s.types, typ) } + + for pred := range s.mutSchema { + delete(s.mutSchema, pred) + } } // Delete updates the schema in memory and disk @@ -87,6 +111,7 @@ func (s *state) Delete(attr string) error { } delete(s.predicate, attr) + delete(s.mutSchema, attr) return nil } @@ -139,6 +164,20 @@ func (s *state) Set(pred string, schema *pb.SchemaUpdate) { s.elog.Printf(logUpdate(schema, pred)) } +// SetMutSchema sets the mutation schema for the given predicate. +func (s *state) SetMutSchema(pred string, schema *pb.SchemaUpdate) { + s.Lock() + defer s.Unlock() + s.mutSchema[pred] = schema +} + +// DeleteMutSchema deletes the schema for given predicate from mutSchema. +func (s *state) DeleteMutSchema(pred string) { + s.Lock() + defer s.Unlock() + delete(s.mutSchema, pred) +} + // SetType sets the type for the given predicate in memory. // schema mutations must flow through the update function, which are synced to the db. func (s *state) SetType(typeName string, typ pb.TypeUpdate) { @@ -149,11 +188,20 @@ func (s *state) SetType(typeName string, typ pb.TypeUpdate) { } // Get gets the schema for the given predicate. -func (s *state) Get(pred string) (pb.SchemaUpdate, bool) { +func (s *state) Get(ctx context.Context, pred string) (pb.SchemaUpdate, bool) { + isWrite, _ := ctx.Value(isWrite).(bool) s.RLock() defer s.RUnlock() - schema, has := s.predicate[pred] - if !has { + // If this is write context, mutSchema will have the updated schema. + // If mutSchema doesn't have the predicate key, we use the schema from s.predicate. + if isWrite { + if schema, ok := s.mutSchema[pred]; ok { + return *schema, true + } + } + + schema, ok := s.predicate[pred] + if !ok { return pb.SchemaUpdate{}, false } return *schema, true @@ -181,26 +229,22 @@ func (s *state) TypeOf(pred string) (types.TypeID, error) { } // IsIndexed returns whether the predicate is indexed or not -func (s *state) IsIndexed(pred string) bool { +func (s *state) IsIndexed(ctx context.Context, pred string) bool { + isWrite, _ := ctx.Value(isWrite).(bool) s.RLock() defer s.RUnlock() + if isWrite { + // TODO(Aman): we could return the query schema if it is a delete. + if schema, ok := s.mutSchema[pred]; ok && len(schema.Tokenizer) > 0 { + return true + } + } + if schema, ok := s.predicate[pred]; ok { return len(schema.Tokenizer) > 0 } - return false -} -// IndexedFields returns the list of indexed fields -func (s *state) IndexedFields() []string { - s.RLock() - defer s.RUnlock() - var out []string - for k, v := range s.predicate { - if len(v.Tokenizer) > 0 { - out = append(out, k) - } - } - return out + return false } // Predicates returns the list of predicates for given group @@ -226,13 +270,24 @@ func (s *state) Types() []string { } // Tokenizer returns the tokenizer for given predicate -func (s *state) Tokenizer(pred string) []tok.Tokenizer { +func (s *state) Tokenizer(ctx context.Context, pred string) []tok.Tokenizer { + isWrite, _ := ctx.Value(isWrite).(bool) s.RLock() defer s.RUnlock() - schema, ok := s.predicate[pred] - x.AssertTruef(ok, "schema state not found for %s", pred) - var tokenizers []tok.Tokenizer - for _, it := range schema.Tokenizer { + var su *pb.SchemaUpdate + if isWrite { + if schema, ok := s.mutSchema[pred]; ok { + su = schema + } + } + if su == nil { + if schema, ok := s.predicate[pred]; ok { + su = schema + } + } + x.AssertTruef(su != nil, "schema state not found for %s", pred) + tokenizers := make([]tok.Tokenizer, 0, len(su.Tokenizer)) + for _, it := range su.Tokenizer { t, found := tok.GetTokenizer(it) x.AssertTruef(found, "Invalid tokenizer %s", it) tokenizers = append(tokenizers, t) @@ -241,9 +296,9 @@ func (s *state) Tokenizer(pred string) []tok.Tokenizer { } // TokenizerNames returns the tokenizer names for given predicate -func (s *state) TokenizerNames(pred string) []string { +func (s *state) TokenizerNames(ctx context.Context, pred string) []string { var names []string - tokenizers := s.Tokenizer(pred) + tokenizers := s.Tokenizer(ctx, pred) for _, t := range tokenizers { names = append(names, t.Name()) } @@ -252,8 +307,8 @@ func (s *state) TokenizerNames(pred string) []string { // HasTokenizer is a convenience func that checks if a given tokenizer is found in pred. // Returns true if found, else false. -func (s *state) HasTokenizer(id byte, pred string) bool { - for _, t := range s.Tokenizer(pred) { +func (s *state) HasTokenizer(ctx context.Context, id byte, pred string) bool { + for _, t := range s.Tokenizer(ctx, pred) { if t.Identifier() == id { return true } @@ -262,9 +317,15 @@ func (s *state) HasTokenizer(id byte, pred string) bool { } // IsReversed returns whether the predicate has reverse edge or not -func (s *state) IsReversed(pred string) bool { +func (s *state) IsReversed(ctx context.Context, pred string) bool { + isWrite, _ := ctx.Value(isWrite).(bool) s.RLock() defer s.RUnlock() + if isWrite { + if schema, ok := s.mutSchema[pred]; ok && schema.Directive == pb.SchemaUpdate_REVERSE { + return true + } + } if schema, ok := s.predicate[pred]; ok { return schema.Directive == pb.SchemaUpdate_REVERSE } @@ -272,9 +333,15 @@ func (s *state) IsReversed(pred string) bool { } // HasCount returns whether we want to mantain a count index for the given predicate or not. -func (s *state) HasCount(pred string) bool { +func (s *state) HasCount(ctx context.Context, pred string) bool { + isWrite, _ := ctx.Value(isWrite).(bool) s.RLock() defer s.RUnlock() + if isWrite { + if schema, ok := s.mutSchema[pred]; ok && schema.Count { + return true + } + } if schema, ok := s.predicate[pred]; ok { return schema.Count } @@ -315,6 +382,13 @@ func (s *state) HasNoConflict(pred string) bool { return s.predicate[pred].GetNoConflict() } +// IndexingInProgress checks whether indexing is going on for a given predicate. +func (s *state) IndexingInProgress() bool { + s.RLock() + defer s.RUnlock() + return len(s.mutSchema) > 0 +} + // Init resets the schema state, setting the underlying DB to the given pointer. func Init(ps *badger.DB) { pstore = ps @@ -346,6 +420,7 @@ func Load(predicate string) error { } State().Set(predicate, &s) State().elog.Printf(logUpdate(&s, predicate)) + delete(State().mutSchema, predicate) glog.Infoln(logUpdate(&s, predicate)) return nil } diff --git a/systest/1million/1million_test.go b/systest/1million/1million_test.go index b1942107268..bab0f003915 100644 --- a/systest/1million/1million_test.go +++ b/systest/1million/1million_test.go @@ -20,7 +20,8 @@ package main import ( "context" - "log" + "io/ioutil" + "os" "testing" "time" @@ -9265,7 +9266,16 @@ var tc = []struct { func Test1Million(t *testing.T) { dg, err := testutil.DgraphClient(testutil.SockAddr) if err != nil { - log.Fatalf("Error while getting a dgraph client: %v", err) + t.Fatalf("Error while getting a dgraph client: %v", err) + } + + schemaFile := os.Getenv("SCHEMA_FILE") + data, err := ioutil.ReadFile(schemaFile) + if err != nil { + t.Fatalf("Error in reading the schema: %v", err) + } + if err := testutil.WaitForAlter(context.Background(), dg, string(data)); err != nil { + t.Fatalf("Error in waiting for alter to complete: %v", err) } for _, tt := range tc { diff --git a/systest/1million/test-reindex.sh b/systest/1million/test-reindex.sh index 775ea75cfef..6bcda81dfd9 100755 --- a/systest/1million/test-reindex.sh +++ b/systest/1million/test-reindex.sh @@ -68,6 +68,7 @@ if [[ ! -z "$TEAMCITY_VERSION" ]]; then fi Info "running regression queries" +export SCHEMA_FILE go test -v -tags systest || FOUND_DIFFS=1 Info "bringing down zero and alpha and data volumes" diff --git a/systest/bgindex/common_test.go b/systest/bgindex/common_test.go new file mode 100644 index 00000000000..eb678e8179f --- /dev/null +++ b/systest/bgindex/common_test.go @@ -0,0 +1,55 @@ +// +build systest + +/* + * Copyright 2020 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/dgraph-io/dgo/v2" +) + +func printStats(counter *uint64, quit <-chan struct{}, wg *sync.WaitGroup) { + defer wg.Done() + for { + select { + case <-quit: + return + case <-time.After(2 * time.Second): + } + + fmt.Println("mutations:", atomic.LoadUint64(counter)) + } +} + +// blocks until query returns no error. +func checkSchemaUpdate(query string, dg *dgo.Dgraph) { + for { + time.Sleep(2 * time.Second) + _, err := dg.NewReadOnlyTxn().Query(context.Background(), query) + if err != nil { + continue + } + + return + } +} diff --git a/systest/bgindex/count_test.go b/systest/bgindex/count_test.go new file mode 100644 index 00000000000..dc7a318b26d --- /dev/null +++ b/systest/bgindex/count_test.go @@ -0,0 +1,284 @@ +// +build systest + +/* + * Copyright 2020 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "math/rand" + "sort" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/dgraph-io/badger/v2/y" + "github.com/dgraph-io/dgo/v2" + "github.com/dgraph-io/dgo/v2/protos/api" + "github.com/dgraph-io/dgraph/testutil" +) + +func TestCountIndex(t *testing.T) { + total := 10000 + numUIDs := uint64(total) + edgeCount := make([]int, total+100000) + uidLocks := make([]sync.Mutex, total+100000) + + dg, err := testutil.DgraphClientWithGroot(testutil.SockAddr) + if err != nil { + t.Fatalf("Error while getting a dgraph client: %v", err) + } + + testutil.DropAll(t, dg) + if err := dg.Alter(context.Background(), &api.Operation{ + Schema: "value: [string] .", + }); err != nil { + t.Fatalf("error in setting up schema :: %v\n", err) + } + + if err := testutil.AssignUids(uint64(total * 10)); err != nil { + t.Fatalf("error in assigning UIDs :: %v", err) + } + + fmt.Println("inserting values") + th := y.NewThrottle(10000) + for i := 1; i <= int(numUIDs); i++ { + th.Do() + go func(uid int) { + defer th.Done(nil) + bb := &bytes.Buffer{} + edgeCount[uid] = rand.Intn(1000) + for j := 0; j < edgeCount[uid]; j++ { + _, err := bb.WriteString(fmt.Sprintf("<%v> \"%v\" .\n", uid, j)) + if err != nil { + panic(err) + } + } + if err := testutil.RetryMutation(dg, &api.Mutation{ + CommitNow: true, + SetNquads: bb.Bytes(), + }); err != nil { + t.Fatalf("error in mutation :: %v", err) + } + }(i) + } + th.Finish() + + fmt.Println("building indexes in background") + if err := dg.Alter(context.Background(), &api.Operation{ + Schema: "value: [string] @count .", + }); err != nil { + t.Fatalf("error in adding indexes :: %v\n", err) + } + + // perform mutations until ctrl+c + mutateUID := func(uid int) { + uidLocks[uid].Lock() + defer uidLocks[uid].Unlock() + ec := edgeCount[uid] + switch rand.Intn(1000) % 3 { + case 0: + // add new edge + if _, err := dg.NewTxn().Mutate(context.Background(), &api.Mutation{ + CommitNow: true, + SetNquads: []byte(fmt.Sprintf(`<%v> "%v" .`, uid, ec)), + }); err != nil && !errors.Is(err, dgo.ErrAborted) { + t.Fatalf("error in mutation :: %v\n", err) + } else if errors.Is(err, dgo.ErrAborted) { + return + } + ec++ + case 1: + if ec <= 0 { + return + } + // delete an edge + if _, err := dg.NewTxn().Mutate(context.Background(), &api.Mutation{ + CommitNow: true, + DelNquads: []byte(fmt.Sprintf(`<%v> "%v" .`, uid, ec-1)), + }); err != nil && !errors.Is(err, dgo.ErrAborted) { + t.Fatalf("error in deletion :: %v\n", err) + } else if errors.Is(err, dgo.ErrAborted) { + return + } + ec-- + case 2: + // new uid with one edge + uid = int(atomic.AddUint64(&numUIDs, 1)) + if _, err := dg.NewTxn().Mutate(context.Background(), &api.Mutation{ + CommitNow: true, + SetNquads: []byte(fmt.Sprintf(`<%v> "0" .`, uid)), + }); err != nil && !errors.Is(err, dgo.ErrAborted) { + t.Fatalf("error in insertion :: %v\n", err) + } else if errors.Is(err, dgo.ErrAborted) { + return + } + ec = 1 + } + + edgeCount[uid] = ec + } + + // perform mutations until ctrl+c + var swg sync.WaitGroup + var counter uint64 + quit := make(chan struct{}) + runLoop := func() { + defer swg.Done() + for { + select { + case <-quit: + return + default: + n := int(atomic.LoadUint64(&numUIDs)) + mutateUID(rand.Intn(n) + 1) + atomic.AddUint64(&counter, 1) + } + } + } + + swg.Add(101) + for i := 0; i < 100; i++ { + go runLoop() + } + go printStats(&counter, quit, &swg) + checkSchemaUpdate(`{ q(func: eq(count(value), "3")) {uid}}`, dg) + close(quit) + swg.Wait() + fmt.Println("mutations done") + + // compute count index + countIndex := make(map[int][]int) + for uid := 1; uid <= int(numUIDs); uid++ { + val := edgeCount[uid] + countIndex[val] = append(countIndex[val], uid) + } + for _, aa := range countIndex { + sort.Ints(aa) + } + + checkDelete := func(uid int) error { + q := fmt.Sprintf(`{ q(func: uid(%v)) {value:count(value)}}`, uid) + resp, err := dg.NewReadOnlyTxn().Query(context.Background(), q) + if err != nil { + return fmt.Errorf("error in query: %v :: %w", q, err) + } + var data struct { + Q []struct { + Count int + } + } + if err := json.Unmarshal(resp.Json, &data); err != nil { + return fmt.Errorf("error in json.Unmarshal :: %w", err) + } + + if len(data.Q) != 1 && data.Q[0].Count != 0 { + return fmt.Errorf("found a deleted UID, %v", uid) + } + return nil + } + + checkValue := func(b int, uids []int) error { + q := fmt.Sprintf(`{ q(func: eq(count(value), "%v")) {uid}}`, b) + resp, err := dg.NewReadOnlyTxn().Query(context.Background(), q) + if err != nil { + return fmt.Errorf("error in query: %v :: %w", q, err) + } + var data struct { + Q []struct { + UID string + } + } + if err := json.Unmarshal(resp.Json, &data); err != nil { + return fmt.Errorf("error in json.Unmarshal :: %w", err) + } + + actual := make([]int, len(data.Q)) + for i, ui := range data.Q { + v, err := strconv.ParseInt(ui.UID, 0, 64) + if err != nil { + return err + } + actual[i] = int(v) + } + sort.Ints(actual) + + if len(actual) != len(uids) { + return fmt.Errorf("length not equal :: exp: %v, actual %v", uids, actual) + } + for i := range uids { + if uids[i] != actual[i] { + return fmt.Errorf("value not equal :: exp: %v, actual %v", uids, actual) + } + } + + return nil + } + + type pair struct { + key int + err string + } + ch := make(chan pair, numUIDs) + + fmt.Println("starting to query") + var count uint64 + th = y.NewThrottle(50000) + th.Do() + go func() { + defer th.Done(nil) + for { + time.Sleep(2 * time.Second) + cur := atomic.LoadUint64(&count) + fmt.Printf("%v/%v done\n", cur, len(countIndex)) + if int(cur) == len(countIndex) { + break + } + } + }() + + for value, uids := range countIndex { + th.Do() + go func(val int, uidList []int) { + defer th.Done(nil) + if val <= 0 { + for _, uid := range uidList { + if err := checkDelete(uid); err != nil { + ch <- pair{uid, err.Error()} + } + } + } else { + if err := checkValue(val, uidList); err != nil { + ch <- pair{val, err.Error()} + } + } + atomic.AddUint64(&count, 1) + }(value, uids) + } + th.Finish() + + close(ch) + for p := range ch { + t.Errorf("failed for %v, :: %v\n", p.key, p.err) + } +} diff --git a/systest/bgindex/docker-compose.yml b/systest/bgindex/docker-compose.yml new file mode 100644 index 00000000000..dad68cdba79 --- /dev/null +++ b/systest/bgindex/docker-compose.yml @@ -0,0 +1,193 @@ +# Auto-generated with: [/tmp/go-build352939306/b001/exe/compose -l -a 6 -r 3 -z 3 -o 100 --acl_secret ../../ee/acl/hmac-secret] +# +version: "3.5" +services: + alpha1: + image: dgraph/dgraph:latest + container_name: alpha1 + working_dir: /data/alpha1 + labels: + cluster: test + ports: + - 8180:8180 + - 9180:9180 + volumes: + - type: bind + source: $GOPATH/bin + target: /gobin + read_only: true + - type: bind + source: ../../ee/acl/hmac-secret + target: /secret/hmac + read_only: true + command: /gobin/dgraph alpha -o 100 --my=alpha1:7180 --lru_mb=1024 --zero=zero1:5180 + --logtostderr -v=2 --idx=1 --acl_secret_file=/secret/hmac --acl_access_ttl 300s + --acl_cache_ttl 500s + alpha2: + image: dgraph/dgraph:latest + container_name: alpha2 + working_dir: /data/alpha2 + depends_on: + - alpha1 + labels: + cluster: test + ports: + - 8182:8182 + - 9182:9182 + volumes: + - type: bind + source: $GOPATH/bin + target: /gobin + read_only: true + - type: bind + source: ../../ee/acl/hmac-secret + target: /secret/hmac + read_only: true + command: /gobin/dgraph alpha -o 102 --my=alpha2:7182 --lru_mb=1024 --zero=zero1:5180 + --logtostderr -v=2 --idx=2 --acl_secret_file=/secret/hmac --acl_access_ttl 300s + --acl_cache_ttl 500s + alpha3: + image: dgraph/dgraph:latest + container_name: alpha3 + working_dir: /data/alpha3 + depends_on: + - alpha2 + labels: + cluster: test + ports: + - 8183:8183 + - 9183:9183 + volumes: + - type: bind + source: $GOPATH/bin + target: /gobin + read_only: true + - type: bind + source: ../../ee/acl/hmac-secret + target: /secret/hmac + read_only: true + command: /gobin/dgraph alpha -o 103 --my=alpha3:7183 --lru_mb=1024 --zero=zero1:5180 + --logtostderr -v=2 --idx=3 --acl_secret_file=/secret/hmac --acl_access_ttl 300s + --acl_cache_ttl 500s + alpha4: + image: dgraph/dgraph:latest + container_name: alpha4 + working_dir: /data/alpha4 + depends_on: + - alpha3 + labels: + cluster: test + ports: + - 8184:8184 + - 9184:9184 + volumes: + - type: bind + source: $GOPATH/bin + target: /gobin + read_only: true + - type: bind + source: ../../ee/acl/hmac-secret + target: /secret/hmac + read_only: true + command: /gobin/dgraph alpha -o 104 --my=alpha4:7184 --lru_mb=1024 --zero=zero1:5180 + --logtostderr -v=2 --idx=4 --acl_secret_file=/secret/hmac --acl_access_ttl 300s + --acl_cache_ttl 500s + alpha5: + image: dgraph/dgraph:latest + container_name: alpha5 + working_dir: /data/alpha5 + depends_on: + - alpha4 + labels: + cluster: test + ports: + - 8185:8185 + - 9185:9185 + volumes: + - type: bind + source: $GOPATH/bin + target: /gobin + read_only: true + - type: bind + source: ../../ee/acl/hmac-secret + target: /secret/hmac + read_only: true + command: /gobin/dgraph alpha -o 105 --my=alpha5:7185 --lru_mb=1024 --zero=zero1:5180 + --logtostderr -v=2 --idx=5 --acl_secret_file=/secret/hmac --acl_access_ttl 300s + --acl_cache_ttl 500s + alpha6: + image: dgraph/dgraph:latest + container_name: alpha6 + working_dir: /data/alpha6 + depends_on: + - alpha5 + labels: + cluster: test + ports: + - 8186:8186 + - 9186:9186 + volumes: + - type: bind + source: $GOPATH/bin + target: /gobin + read_only: true + - type: bind + source: ../../ee/acl/hmac-secret + target: /secret/hmac + read_only: true + command: /gobin/dgraph alpha -o 106 --my=alpha6:7186 --lru_mb=1024 --zero=zero1:5180 + --logtostderr -v=2 --idx=6 --acl_secret_file=/secret/hmac --acl_access_ttl 300s + --acl_cache_ttl 500s + zero1: + image: dgraph/dgraph:latest + container_name: zero1 + working_dir: /data/zero1 + labels: + cluster: test + ports: + - 5180:5180 + - 6180:6180 + volumes: + - type: bind + source: $GOPATH/bin + target: /gobin + read_only: true + command: /gobin/dgraph zero -o 100 --idx=1 --my=zero1:5180 --replicas=3 --logtostderr + -v=2 --bindall + zero2: + image: dgraph/dgraph:latest + container_name: zero2 + working_dir: /data/zero2 + depends_on: + - zero1 + labels: + cluster: test + ports: + - 5182:5182 + - 6182:6182 + volumes: + - type: bind + source: $GOPATH/bin + target: /gobin + read_only: true + command: /gobin/dgraph zero -o 102 --idx=2 --my=zero2:5182 --replicas=3 --logtostderr + -v=2 --peer=zero1:5180 + zero3: + image: dgraph/dgraph:latest + container_name: zero3 + working_dir: /data/zero3 + depends_on: + - zero2 + labels: + cluster: test + ports: + - 5183:5183 + - 6183:6183 + volumes: + - type: bind + source: $GOPATH/bin + target: /gobin + read_only: true + command: /gobin/dgraph zero -o 103 --idx=3 --my=zero3:5183 --replicas=3 --logtostderr + -v=2 --peer=zero1:5180 +volumes: {} diff --git a/systest/bgindex/parallel_test.go b/systest/bgindex/parallel_test.go new file mode 100644 index 00000000000..ec8e09b56a6 --- /dev/null +++ b/systest/bgindex/parallel_test.go @@ -0,0 +1,203 @@ +// +build systest + +/* + * Copyright 2020 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/dgraph-io/badger/v2/y" + "github.com/dgraph-io/dgo/v2" + "github.com/dgraph-io/dgo/v2/protos/api" + "github.com/dgraph-io/dgraph/testutil" +) + +var ( + total = 100000 +) + +func addBankData(dg *dgo.Dgraph, pred string) error { + for i := 1; i <= total; { + bb := &bytes.Buffer{} + for j := 0; j < 10000; j++ { + _, err := bb.WriteString(fmt.Sprintf("<%v> <%v> \"%v\" .\n", i, pred, i)) + if err != nil { + return fmt.Errorf("error in mutation :: %w", err) + } + i++ + } + if err := testutil.RetryMutation(dg, &api.Mutation{ + CommitNow: true, + SetNquads: bb.Bytes(), + }); err != nil { + return fmt.Errorf("error in mutation :: %w", err) + } + } + + return nil +} + +func TestParallelIndexing(t *testing.T) { + if err := testutil.AssignUids(uint64(total * 10)); err != nil { + t.Fatalf("error in assignig UIDs :: %v", err) + } + + dg, err := testutil.DgraphClientWithGroot(testutil.SockAddr) + if err != nil { + t.Fatalf("Error while getting a dgraph client: %v", err) + } + + testutil.DropAll(t, dg) + if err := dg.Alter(context.Background(), &api.Operation{ + Schema: ` + balance_int: int . + balance_str: string . + balance_float: float . + `, + }); err != nil { + t.Fatalf("error in setting up schema :: %v\n", err) + } + + fmt.Println("adding integer dataset") + if err := addBankData(dg, "balance_int"); err != nil { + t.Fatalf("error in adding integer predicate :: %v\n", err) + } + + fmt.Println("adding string dataset") + if err := addBankData(dg, "balance_str"); err != nil { + t.Fatalf("error in adding string predicate :: %v\n", err) + } + + fmt.Println("adding float dataset") + if err := addBankData(dg, "balance_float"); err != nil { + t.Fatalf("error in adding float predicate :: %v\n", err) + } + + fmt.Println("building indexes in background for int and string data") + if err := dg.Alter(context.Background(), &api.Operation{ + Schema: ` + balance_int: int @index(int) . + balance_str: string @index(fulltext, term, exact) . + `, + }); err != nil { + t.Fatalf("error in adding indexes :: %v\n", err) + } + + if err := dg.Alter(context.Background(), &api.Operation{ + Schema: ` + balance_int: int @index(int) . + balance_str: string @index(fulltext, term, exact) . + `, + }); err != nil && !strings.Contains(err.Error(), "is already being modified") { + t.Fatalf("error in adding indexes :: %v\n", err) + } + + // Wait until previous indexing is complete. + for { + if err := dg.Alter(context.Background(), &api.Operation{ + Schema: `balance_float: float @index(float) .`, + }); err != nil && !strings.Contains(err.Error(), "is already being modified") { + t.Fatalf("error in adding indexes :: %v\n", err) + } else if err == nil { + break + } + time.Sleep(time.Second) + } + + fmt.Println("waiting for float indexing to complete") + s := `balance_float: float @index(float) .` + testutil.WaitForAlter(context.Background(), dg, s) + + // balance should be same as uid. + checkBalance := func(b int, pred string) error { + q := fmt.Sprintf(`{ q(func: eq(%v, "%v")) {uid}}`, pred, b) + resp, err := dg.NewReadOnlyTxn().Query(context.Background(), q) + if err != nil { + return fmt.Errorf("error in query: %v ::%w", q, err) + } + var data struct { + Q []struct { + UID string + } + } + if err := json.Unmarshal(resp.Json, &data); err != nil { + return fmt.Errorf("error in json.Unmarshal :: %w", err) + } + + if len(data.Q) != 1 { + return fmt.Errorf("length not equal :: exp: %v, actual %v", b, data.Q[0]) + } + v, err := strconv.ParseInt(data.Q[0].UID, 0, 64) + if err != nil { + return err + } + if b != int(v) { + return fmt.Errorf("value not equal :: exp: %v, actual %v", b, data.Q[0]) + } + + return nil + } + + fmt.Println("starting to query") + var count uint64 + th := y.NewThrottle(50000) + th.Do() + go func() { + defer th.Done(nil) + for { + time.Sleep(2 * time.Second) + cur := atomic.LoadUint64(&count) + fmt.Printf("%v/%v done\n", cur, total*3) + if int(cur) == total*3 { + break + } + } + }() + + type pair struct { + key int + err string + } + ch := make(chan pair, total*3) + for _, predicate := range []string{"balance_str", "balance_int", "balance_float"} { + for i := 1; i <= total; i++ { + th.Do() + go func(bal int, pred string) { + defer th.Done(nil) + if err := checkBalance(bal, pred); err != nil { + ch <- pair{bal, err.Error()} + } + atomic.AddUint64(&count, 1) + }(i, predicate) + } + } + th.Finish() + + close(ch) + for p := range ch { + t.Errorf("failed for %v, :: %v\n", p.key, p.err) + } +} diff --git a/systest/bgindex/reverse_test.go b/systest/bgindex/reverse_test.go new file mode 100644 index 00000000000..6772db2bf63 --- /dev/null +++ b/systest/bgindex/reverse_test.go @@ -0,0 +1,283 @@ +// +build systest + +/* + * Copyright 2020 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "math/rand" + "sort" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/dgraph-io/dgo/v2" + "github.com/dgraph-io/dgo/v2/protos/api" + "github.com/dgraph-io/dgraph/testutil" +) + +func TestReverseIndex(t *testing.T) { + total := 100000 + dg, err := testutil.DgraphClientWithGroot(testutil.SockAddr) + if err != nil { + t.Fatalf("Error while getting a dgraph client: %v", err) + } + + testutil.DropAll(t, dg) + if err := dg.Alter(context.Background(), &api.Operation{ + Schema: "balance: [uid] .", + }); err != nil { + t.Fatalf("error in setting up schema :: %v\n", err) + } + + if err := testutil.AssignUids(uint64(total * 10)); err != nil { + t.Fatalf("error in assigning UIDs :: %v", err) + } + + // Insert edges from uid to (uid+1) + fmt.Println("inserting edges") + for i := 1; i < total; { + bb := &bytes.Buffer{} + for j := 0; j < 10000; j++ { + _, err := bb.WriteString(fmt.Sprintf("<%v> <%v> .\n", i, i+1)) + if err != nil { + t.Fatalf("error in mutation %v\n", err) + } + i++ + } + if err := testutil.RetryMutation(dg, &api.Mutation{ + CommitNow: true, + SetNquads: bb.Bytes(), + }); err != nil { + t.Fatalf("error in mutation :: %v", err) + } + } + + fmt.Println("building indexes in background") + if err := dg.Alter(context.Background(), &api.Operation{ + Schema: "balance: [uid] @reverse .", + }); err != nil { + t.Fatalf("error in adding indexes :: %v\n", err) + } + + numEdges := int64(total) + updated := sync.Map{} + mutateUID := func(uid int) { + switch uid % 4 { + case 0: + // insert an edge from (uid-2) to uid + if _, err := dg.NewTxn().Mutate(context.Background(), &api.Mutation{ + CommitNow: true, + SetNquads: []byte(fmt.Sprintf(`<%v> <%v> .`, uid-2, uid)), + }); err != nil && !errors.Is(err, dgo.ErrAborted) { + t.Fatalf("error in mutation :: %v\n", err) + } else if errors.Is(err, dgo.ErrAborted) { + return + } + updated.Store(uid, nil) + case 1: + // add new uid and edge from (uid-1) to uid + v := atomic.AddInt64(&numEdges, 1) + if _, err := dg.NewTxn().Mutate(context.Background(), &api.Mutation{ + CommitNow: true, + SetNquads: []byte(fmt.Sprintf(`<%v> <%v> .`, v-1, v)), + }); err != nil { + t.Fatalf("error in insertion :: %v\n", err) + } + case 2: + // delete an existing edge from uid-1 to uid + if _, err := dg.NewTxn().Mutate(context.Background(), &api.Mutation{ + CommitNow: true, + DelNquads: []byte(fmt.Sprintf(`<%v> <%v> .`, uid-1, uid)), + }); err != nil && !errors.Is(err, dgo.ErrAborted) { + t.Fatalf("error in mutation :: %v\n", err) + } else if errors.Is(err, dgo.ErrAborted) { + return + } + updated.Store(uid, nil) + case 3: + // add two new edges, uid+1 to uid AND uid-2 to uid, already have uid to uid-1 + if _, err := dg.NewTxn().Mutate(context.Background(), &api.Mutation{ + CommitNow: true, + SetNquads: []byte(fmt.Sprintf("<%v> <%v> .\n<%v> <%v> .", + uid+1, uid, uid-2, uid)), + }); err != nil && !errors.Is(err, dgo.ErrAborted) { + t.Fatalf("error in mutation :: %v\n", err) + } else if errors.Is(err, dgo.ErrAborted) { + return + } + updated.Store(uid, nil) + } + } + + // perform mutations until ctrl+c + var swg sync.WaitGroup + var counter uint64 + quit := make(chan struct{}) + runLoop := func() { + defer swg.Done() + for { + select { + case <-quit: + return + default: + mutateUID(rand.Intn(total) + 1) + atomic.AddUint64(&counter, 1) + } + } + } + + swg.Add(101) + for i := 0; i < 100; i++ { + go runLoop() + } + go printStats(&counter, quit, &swg) + checkSchemaUpdate(`{ q(func: uid(0x01)) { ~balance { uid }}}`, dg) + close(quit) + swg.Wait() + fmt.Println("mutations done") + + // check values now + checkUID := func(i int) error { + q := fmt.Sprintf(`{ q(func: uid(%v)) { ~balance { uid }}}`, i) + resp, err := dg.NewReadOnlyTxn().Query(context.Background(), q) + if err != nil { + return fmt.Errorf("error in query :: %w", err) + } + var data struct { + Q []struct { + Balance []struct { + UID string + } `json:"~balance"` + } + } + if err := json.Unmarshal(resp.Json, &data); err != nil { + return fmt.Errorf("error in json.Unmarshal :: %w", err) + } + + _, ok := updated.Load(i) + switch { + case !ok || i > total || i%4 == 1: + // Expect exactly one edge, uid-1 to uid + if len(data.Q) != 1 || len(data.Q[0].Balance) != 1 { + return fmt.Errorf("length not equal, no mod, got: %+v", data) + } + v1, err := strconv.ParseInt(data.Q[0].Balance[0].UID, 0, 64) + if err != nil { + return err + } + if int(v1) != i-1 { + return fmt.Errorf("value not equal, got: %+v", data) + } + case i%4 == 0: + // Expect two edges, uid-2 to uid AND uid-1 to uid + if len(data.Q) != 1 || len(data.Q[0].Balance) != 2 { + return fmt.Errorf("length not equal, got: %+v", data) + } + v1, err := strconv.ParseInt(data.Q[0].Balance[0].UID, 0, 64) + if err != nil { + return err + } + v2, err := strconv.ParseInt(data.Q[0].Balance[1].UID, 0, 64) + if err != nil { + return err + } + l := []int{int(v1), int(v2)} + sort.Ints(l) + if l[0] != i-2 || l[1] != i-1 { + return fmt.Errorf("value not equal, got: %+v", data) + } + case i%4 == 2: + // This was deleted, so no edges expected + if len(data.Q) != 0 { + return fmt.Errorf("length not equal, del, got: %+v", data) + } + case i%4 == 3: + // Expect 3 edges from uid-2, uid-1 and uid+1 + if len(data.Q) != 1 || len(data.Q[0].Balance) != 3 { + return fmt.Errorf("length not equal, got: %+v", data) + } + v1, err := strconv.ParseInt(data.Q[0].Balance[0].UID, 0, 64) + if err != nil { + return err + } + v2, err := strconv.ParseInt(data.Q[0].Balance[1].UID, 0, 64) + if err != nil { + return err + } + v3, err := strconv.ParseInt(data.Q[0].Balance[2].UID, 0, 64) + if err != nil { + return err + } + l := []int{int(v1), int(v2), int(v3)} + sort.Ints(l) + if l[0] != i-2 || l[1] != i-1 || l[2] != i+1 { + return fmt.Errorf("value not equal, got: %+v", data) + } + } + + return nil + } + + type pair struct { + uid int + err string + } + ch := make(chan pair, numEdges) + + fmt.Println("starting to query") + var wg sync.WaitGroup + var count uint64 + for i := 2; i <= int(numEdges); i += 100 { + wg.Add(1) + go func(j int) { + defer wg.Done() + for k := j; k < j+100 && k <= int(numEdges); k++ { + if err := checkUID(k); err != nil { + ch <- pair{k, err.Error()} + } + atomic.AddUint64(&count, 1) + } + }(i) + } + + wg.Add(1) + go func() { + defer wg.Done() + for { + time.Sleep(2 * time.Second) + cur := atomic.LoadUint64(&count) + fmt.Printf("%v/%v done\n", cur, numEdges-1) + if cur+1 == uint64(numEdges) { + break + } + } + }() + wg.Wait() + + close(ch) + for p := range ch { + t.Errorf("failed for %v, :: %v\n", p.uid, p.err) + } +} diff --git a/systest/bgindex/string_test.go b/systest/bgindex/string_test.go new file mode 100644 index 00000000000..128aa0353cf --- /dev/null +++ b/systest/bgindex/string_test.go @@ -0,0 +1,274 @@ +// +build systest + +/* + * Copyright 2020 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "math/rand" + "sort" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/dgraph-io/badger/v2/y" + "github.com/dgraph-io/dgo/v2" + "github.com/dgraph-io/dgo/v2/protos/api" + "github.com/dgraph-io/dgraph/testutil" +) + +func TestStringIndex(t *testing.T) { + total := 100000 + numAccts := uint64(total) + acctsBal := make(map[int]int, numAccts) + var lock sync.Mutex + + dg, err := testutil.DgraphClientWithGroot(testutil.SockAddr) + if err != nil { + t.Fatalf("Error while getting a dgraph client: %v", err) + } + + testutil.DropAll(t, dg) + if err := dg.Alter(context.Background(), &api.Operation{ + Schema: "balance: string .", + }); err != nil { + t.Fatalf("error in setting up schema :: %v\n", err) + } + + if err := testutil.AssignUids(uint64(total * 10)); err != nil { + t.Fatalf("error in assignig UIDs :: %v", err) + } + + // first insert bank accounts + fmt.Println("inserting accounts") + for i := 1; i <= int(numAccts); { + bb := &bytes.Buffer{} + for j := 0; j < 10000; j++ { + acctsBal[i] = rand.Intn(total * 100) + _, err := bb.WriteString(fmt.Sprintf("<%v> \"%v\" .\n", i, acctsBal[i])) + if err != nil { + t.Fatalf("error in mutation %v\n", err) + } + i++ + } + if err := testutil.RetryMutation(dg, &api.Mutation{ + CommitNow: true, + SetNquads: bb.Bytes(), + }); err != nil { + t.Fatalf("error in mutation :: %v", err) + } + } + + fmt.Println("building indexes in background") + if err := dg.Alter(context.Background(), &api.Operation{ + Schema: "balance: string @index(fulltext, term, exact) .", + }); err != nil { + t.Fatalf("error in adding indexes :: %v\n", err) + } + + // perform mutations until ctrl+c + mutateUID := func(uid int) { + nb := rand.Intn(total * 100) + switch uid % 3 { + case 0: + // change the balance to new random value. + if _, err := dg.NewTxn().Mutate(context.Background(), &api.Mutation{ + CommitNow: true, + SetNquads: []byte(fmt.Sprintf(`<%v> "%v" .`, uid, nb)), + }); err != nil && !errors.Is(err, dgo.ErrAborted) { + t.Fatalf("error in mutation :: %v\n", err) + } else if errors.Is(err, dgo.ErrAborted) { + return + } + case 1: + // delete this uid. + if _, err := dg.NewTxn().Mutate(context.Background(), &api.Mutation{ + CommitNow: true, + DelNquads: []byte(fmt.Sprintf(`<%v> * .`, uid)), + }); err != nil && !errors.Is(err, dgo.ErrAborted) { + t.Fatalf("error in deletion :: %v\n", err) + } else if errors.Is(err, dgo.ErrAborted) { + return + } + nb = -1 + case 2: + // add new uid. + uid = int(atomic.AddUint64(&numAccts, 1)) + if _, err := dg.NewTxn().Mutate(context.Background(), &api.Mutation{ + CommitNow: true, + SetNquads: []byte(fmt.Sprintf(`<%v> "%v" .`, uid, nb)), + }); err != nil && !errors.Is(err, dgo.ErrAborted) { + t.Fatalf("error in insertion :: %v\n", err) + } else if errors.Is(err, dgo.ErrAborted) { + return + } + } + + lock.Lock() + acctsBal[uid] = nb + lock.Unlock() + } + + // perform mutations until ctrl+c + var swg sync.WaitGroup + var counter uint64 + quit := make(chan struct{}) + runLoop := func() { + defer swg.Done() + for { + select { + case <-quit: + return + default: + n := int(atomic.LoadUint64(&numAccts)) + mutateUID(rand.Intn(n) + 1) + atomic.AddUint64(&counter, 1) + } + } + } + + swg.Add(101) + for i := 0; i < 100; i++ { + go runLoop() + } + go printStats(&counter, quit, &swg) + checkSchemaUpdate(`{ q(func: anyoftext(balance, "example")) {uid}}`, dg) + close(quit) + swg.Wait() + fmt.Println("mutations done") + + // compute index + balIndex := make(map[int][]int) + for uid, bal := range acctsBal { + balIndex[bal] = append(balIndex[bal], uid) + } + for _, aa := range balIndex { + sort.Ints(aa) + } + + checkDelete := func(uid int) error { + q := fmt.Sprintf(`{ q(func: uid(%v)) {balance}}`, uid) + resp, err := dg.NewReadOnlyTxn().Query(context.Background(), q) + if err != nil { + return fmt.Errorf("error in query: %v :: %w", q, err) + } + var data struct { + Q []struct { + Balance string + } + } + if err := json.Unmarshal(resp.Json, &data); err != nil { + return fmt.Errorf("error in json.Unmarshal :: %w", err) + } + + if len(data.Q) != 0 { + return fmt.Errorf("found a deleted UID, %v", uid) + } + return nil + } + + checkBalance := func(b int, uids []int) error { + q := fmt.Sprintf(`{ q(func: anyoftext(balance, "%v")) {uid}}`, b) + resp, err := dg.NewReadOnlyTxn().Query(context.Background(), q) + if err != nil { + return fmt.Errorf("error in query: %v :: %w", q, err) + } + var data struct { + Q []struct { + UID string + } + } + if err := json.Unmarshal(resp.Json, &data); err != nil { + return fmt.Errorf("error in json.Unmarshal :: %w", err) + } + + actual := make([]int, len(data.Q)) + for i, ui := range data.Q { + v, err := strconv.ParseInt(ui.UID, 0, 64) + if err != nil { + return err + } + actual[i] = int(v) + } + sort.Ints(actual) + + if len(actual) != len(uids) { + return fmt.Errorf("length not equal :: exp: %v, actual %v", uids, actual) + } + for i := range uids { + if uids[i] != actual[i] { + return fmt.Errorf("value not equal :: exp: %v, actual %v", uids, actual) + } + } + + return nil + } + + type pair struct { + key int + err string + } + ch := make(chan pair, numAccts) + + fmt.Println("starting to query") + var count uint64 + th := y.NewThrottle(50000) + th.Do() + go func() { + defer th.Done(nil) + for { + time.Sleep(2 * time.Second) + cur := atomic.LoadUint64(&count) + fmt.Printf("%v/%v done\n", cur, len(balIndex)) + if int(cur) == len(balIndex) { + break + } + } + }() + + for balance, uids := range balIndex { + th.Do() + go func(bal int, uidList []int) { + defer th.Done(nil) + if bal == -1 { + for _, uid := range uidList { + if err := checkDelete(uid); err != nil { + ch <- pair{uid, err.Error()} + } + } + } else { + if err := checkBalance(bal, uidList); err != nil { + ch <- pair{bal, err.Error()} + } + } + atomic.AddUint64(&count, 1) + }(balance, uids) + } + th.Finish() + + close(ch) + for p := range ch { + t.Fatalf("failed for %v, :: %v\n", p.key, p.err) + } +} diff --git a/systest/bgindex/test-bgindex.sh b/systest/bgindex/test-bgindex.sh new file mode 100755 index 00000000000..85647e2a1af --- /dev/null +++ b/systest/bgindex/test-bgindex.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +set -e +readonly SRCDIR=$(dirname $0) + +function Info { + echo -e "INFO: $*" +} + +function DockerCompose { + docker-compose -p dgraph "$@" +} + +Info "entering directory $SRCDIR" +cd $SRCDIR + +Info "bringing down dgraph cluster and data volumes" +DockerCompose down -v + +Info "bringing up dgraph cluster" +DockerCompose up -d + +Info "waiting for zero to become leader" +DockerCompose logs -f alpha1 | grep -q -m1 "Successfully upserted groot account" + +if [[ ! -z "$TEAMCITY_VERSION" ]]; then + # Make TeamCity aware of Go tests + export GOFLAGS="-json" +fi + +Info "running background indexing test" +go test -v -tags systest || FOUND_DIFFS=1 + +Info "bringing down dgraph cluster and data volumes" +DockerCompose down -v + +if [[ $FOUND_DIFFS -eq 0 ]]; then + Info "test passed" +else + Info "test failed" +fi + +exit $FOUND_DIFFS diff --git a/systest/mutations_test.go b/systest/mutations_test.go index d9b17b81f14..02491348ea4 100644 --- a/systest/mutations_test.go +++ b/systest/mutations_test.go @@ -172,9 +172,9 @@ func ListWithLanguagesTest(t *testing.T, c *dgo.Dgraph) { func NQuadMutationTest(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{ - Schema: `xid: string @index(exact) .`, - })) + op := &api.Operation{Schema: `xid: string @index(exact) .`} + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() assigned, err := txn.Mutate(ctx, &api.Mutation{ @@ -249,7 +249,9 @@ func NQuadMutationTest(t *testing.T, c *dgo.Dgraph) { func DeleteAllReverseIndex(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{Schema: "link: [uid] @reverse ."})) + op := &api.Operation{Schema: "link: [uid] @reverse ."} + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) assignedIds, err := c.NewTxn().Mutate(ctx, &api.Mutation{ CommitNow: true, SetNquads: []byte("_:a _:b ."), @@ -296,7 +298,9 @@ func DeleteAllReverseIndex(t *testing.T, c *dgo.Dgraph) { func NormalizeEdgeCasesTest(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{Schema: "xid: string @index(exact) ."})) + op := &api.Operation{Schema: "xid: string @index(exact) ."} + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) _, err := c.NewTxn().Mutate(ctx, &api.Mutation{ CommitNow: true, @@ -373,9 +377,9 @@ func NormalizeEdgeCasesTest(t *testing.T, c *dgo.Dgraph) { func FacetOrderTest(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{ - Schema: `name: string @index(exact) .`, - })) + op := &api.Operation{Schema: `name: string @index(exact) .`} + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() _, err := txn.Mutate(ctx, &api.Mutation{ @@ -447,7 +451,9 @@ func FacetOrderTest(t *testing.T, c *dgo.Dgraph) { // Shows fix for issue #1918. func LangAndSortBugTest(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{Schema: "name: string @index(exact) @lang ."})) + op := &api.Operation{Schema: "name: string @index(exact) @lang ."} + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() _, err := txn.Mutate(ctx, &api.Mutation{ @@ -541,8 +547,9 @@ func SortFacetsReturnNil(t *testing.T, c *dgo.Dgraph) { func SchemaAfterDeleteNode(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - - require.NoError(t, c.Alter(ctx, &api.Operation{Schema: "married: bool ."})) + op := &api.Operation{Schema: "married: bool ."} + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() assigned, err := txn.Mutate(ctx, &api.Mutation{ @@ -595,8 +602,9 @@ func asJson(schema string) string { func FullTextEqual(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - - require.NoError(t, c.Alter(ctx, &api.Operation{Schema: "text: string @index(fulltext) ."})) + op := &api.Operation{Schema: "text: string @index(fulltext) ."} + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) texts := []string{"bat man", "aqua man", "bat cave", "bat", "man", "aqua", "cave"} var rdfs bytes.Buffer @@ -664,7 +672,9 @@ func JSONBlankNode(t *testing.T, c *dgo.Dgraph) { func ScalarToList(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{Schema: `pred: string @index(exact) .`})) + op := &api.Operation{Schema: `pred: string @index(exact) .`} + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) uids, err := c.NewTxn().Mutate(ctx, &api.Mutation{ SetNquads: []byte(`_:blank "first" .`), @@ -684,7 +694,9 @@ func ScalarToList(t *testing.T, c *dgo.Dgraph) { require.NoError(t, err) require.Equal(t, `{"me":[{"pred":"first"}]}`, string(resp.Json)) - require.NoError(t, c.Alter(ctx, &api.Operation{Schema: `pred: [string] @index(exact) .`})) + op = &api.Operation{Schema: `pred: [string] @index(exact) .`} + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) resp, err = c.NewTxn().Query(ctx, q) require.NoError(t, err) require.Equal(t, `{"me":[{"pred":["first"]}]}`, string(resp.Json)) @@ -750,14 +762,18 @@ func ScalarToList(t *testing.T, c *dgo.Dgraph) { func ListToScalar(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{Schema: `pred: [string] @index(exact) .`})) + op := &api.Operation{Schema: `pred: [string] @index(exact) .`} + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) err := c.Alter(ctx, &api.Operation{Schema: `pred: string @index(exact) .`}) require.Error(t, err) require.Contains(t, err.Error(), `Type can't be changed from list to scalar for attr: [pred] without dropping it first.`) require.NoError(t, c.Alter(ctx, &api.Operation{DropAttr: `pred`})) - err = c.Alter(ctx, &api.Operation{Schema: `pred: string @index(exact) .`}) + op = &api.Operation{Schema: `pred: string @index(exact) .`} + err = c.Alter(ctx, op) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) require.NoError(t, err) } @@ -812,7 +828,9 @@ func SetAfterDeletionListType(t *testing.T, c *dgo.Dgraph) { func EmptyNamesWithExact(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - err := c.Alter(ctx, &api.Operation{Schema: `name: string @index(exact) @lang .`}) + op := &api.Operation{Schema: `name: string @index(exact) @lang .`} + err := c.Alter(ctx, op) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) require.NoError(t, err) _, err = c.NewTxn().Mutate(ctx, &api.Mutation{ @@ -847,6 +865,7 @@ func EmptyRoomsWithTermIndex(t *testing.T, c *dgo.Dgraph) { ` ctx := context.Background() err := c.Alter(ctx, op) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) require.NoError(t, err) _, err = c.NewTxn().Mutate(ctx, &api.Mutation{ @@ -1028,9 +1047,11 @@ func SkipEmptyPLForHas(t *testing.T, c *dgo.Dgraph) { func HasWithDash(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - check(t, (c.Alter(ctx, &api.Operation{ + op := &api.Operation{ Schema: `name: string @index(hash) .`, - }))) + } + check(t, (c.Alter(ctx, op))) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() _, err := txn.Mutate(ctx, &api.Mutation{ @@ -1063,12 +1084,14 @@ func HasWithDash(t *testing.T, c *dgo.Dgraph) { func ListGeoFilterTest(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - check(t, c.Alter(ctx, &api.Operation{ + op := &api.Operation{ Schema: ` name: string @index(term) . loc: [geo] @index(geo) . `, - })) + } + check(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() defer txn.Discard(ctx) @@ -1110,12 +1133,14 @@ func ListGeoFilterTest(t *testing.T, c *dgo.Dgraph) { func ListRegexFilterTest(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - check(t, c.Alter(ctx, &api.Operation{ + op := &api.Operation{ Schema: ` name: string @index(term) . per: [string] @index(trigram) . `, - })) + } + check(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() defer txn.Discard(ctx) @@ -1157,12 +1182,14 @@ func ListRegexFilterTest(t *testing.T, c *dgo.Dgraph) { func RegexQueryWithVars(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - check(t, c.Alter(ctx, &api.Operation{ + op := &api.Operation{ Schema: ` name: string @index(term) . per: [string] @index(trigram) . `, - })) + } + check(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() defer txn.Discard(ctx) @@ -1205,11 +1232,9 @@ func RegexQueryWithVars(t *testing.T, c *dgo.Dgraph) { func GraphQLVarChild(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - check(t, c.Alter(ctx, &api.Operation{ - Schema: ` - name: string @index(exact) . - `, - })) + op := &api.Operation{Schema: `name: string @index(exact) .`} + check(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() defer txn.Discard(ctx) @@ -1309,11 +1334,9 @@ func GraphQLVarChild(t *testing.T, c *dgo.Dgraph) { func MathGe(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - check(t, c.Alter(ctx, &api.Operation{ - Schema: ` - name: string @index(exact) . - `, - })) + op := &api.Operation{Schema: `name: string @index(exact) .`} + check(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() defer txn.Discard(ctx) @@ -1451,11 +1474,9 @@ func HasDeletedEdge(t *testing.T, c *dgo.Dgraph) { func HasReverseEdge(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - check(t, c.Alter(ctx, &api.Operation{ - Schema: ` - follow: [uid] @reverse . - `, - })) + op := &api.Operation{Schema: `follow: [uid] @reverse .`} + check(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() defer txn.Discard(ctx) @@ -1554,12 +1575,14 @@ func RestoreReservedPreds(t *testing.T, c *dgo.Dgraph) { func DropData(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{ + op := &api.Operation{ Schema: ` name: string @index(term) . follow: [uid] @reverse . `, - })) + } + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() _, err := txn.Mutate(ctx, &api.Mutation{ @@ -1655,6 +1678,7 @@ func ReverseCountIndex(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() err := c.Alter(ctx, op) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) require.NoError(t, err) mu := &api.Mutation{ @@ -1754,6 +1778,7 @@ func TypePredicateCheck(t *testing.T, c *dgo.Dgraph) { }` ctx = context.Background() err = c.Alter(ctx, op) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) require.NoError(t, err) } diff --git a/systest/plugin_test.go b/systest/plugin_test.go index 62821388dbd..5acffbaea71 100644 --- a/systest/plugin_test.go +++ b/systest/plugin_test.go @@ -31,6 +31,7 @@ import ( "github.com/dgraph-io/dgo/v2/protos/api" "github.com/dgraph-io/dgraph/testutil" + "github.com/dgraph-io/dgraph/tok" ) func TestPlugins(t *testing.T) { @@ -80,6 +81,7 @@ func TestPlugins(t *testing.T) { check(t, cluster.client.Alter(ctx, &api.Operation{ Schema: initialSchema, })) + check(t, testutil.WaitForAlter(ctx, cluster.client, initialSchema)) txn := cluster.client.NewTxn() _, err = txn.Mutate(ctx, &api.Mutation{SetJson: []byte(setJSON)}) @@ -94,6 +96,11 @@ func TestPlugins(t *testing.T) { } } + // Need to do this so that schema.Parse in testutil.WaitForAlter doesn't complain. + for _, soFile := range soFiles { + tok.LoadCustomTokenizer(soFile) + } + suite( "word: string @index(anagram) .", `[ diff --git a/systest/queries_test.go b/systest/queries_test.go index df7cef19eac..950e86b4411 100644 --- a/systest/queries_test.go +++ b/systest/queries_test.go @@ -65,12 +65,14 @@ func SchemaQueryCleanup(t *testing.T, c *dgo.Dgraph) { func MultipleBlockEval(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{ + op := &api.Operation{ Schema: ` entity: string @index(exact) . stock: [uid] @reverse . `, - })) + } + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() _, err := txn.Mutate(ctx, &api.Mutation{ @@ -226,14 +228,16 @@ func MultipleBlockEval(t *testing.T, c *dgo.Dgraph) { func UnmatchedVarEval(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{ + op := &api.Operation{ Schema: ` item: string @index(hash) . style.type: string . style.name: string . style.cool: bool . `, - })) + } + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() _, err := txn.Mutate(ctx, &api.Mutation{ @@ -318,9 +322,9 @@ func UnmatchedVarEval(t *testing.T, c *dgo.Dgraph) { func SchemaQueryTest(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{ - Schema: `name: string @index(exact) .`, - })) + op := &api.Operation{Schema: `name: string @index(exact) .`} + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() _, err := txn.Mutate(ctx, &api.Mutation{ @@ -360,12 +364,14 @@ func SchemaQueryTest(t *testing.T, c *dgo.Dgraph) { func SchemaQueryTestPredicate1(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{ + op := &api.Operation{ Schema: ` name: string @index(exact) . age: int . `, - })) + } + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() _, err := txn.Mutate(ctx, &api.Mutation{ @@ -427,9 +433,9 @@ func SchemaQueryTestPredicate1(t *testing.T, c *dgo.Dgraph) { func SchemaQueryTestPredicate2(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{ - Schema: `name: string @index(exact) .`, - })) + op := &api.Operation{Schema: `name: string @index(exact) .`} + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() _, err := txn.Mutate(ctx, &api.Mutation{ @@ -460,12 +466,14 @@ func SchemaQueryTestPredicate2(t *testing.T, c *dgo.Dgraph) { func SchemaQueryTestPredicate3(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{ + op := &api.Operation{ Schema: ` name: string @index(exact) . age: int . `, - })) + } + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() _, err := txn.Mutate(ctx, &api.Mutation{ @@ -503,9 +511,9 @@ func SchemaQueryTestPredicate3(t *testing.T, c *dgo.Dgraph) { func SchemaQueryTestHTTP(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{ - Schema: `name: string @index(exact) .`, - })) + op := &api.Operation{Schema: `name: string @index(exact) .`} + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() _, err := txn.Mutate(ctx, &api.Mutation{ @@ -570,12 +578,14 @@ func SchemaQueryTestHTTP(t *testing.T, c *dgo.Dgraph) { func FuzzyMatch(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{ + op := &api.Operation{ Schema: ` term: string @index(trigram) . name: string . `, - })) + } + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() _, err := txn.Mutate(ctx, &api.Mutation{ @@ -714,11 +724,9 @@ func FuzzyMatch(t *testing.T, c *dgo.Dgraph) { func QueryHashIndex(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{ - Schema: ` - name: string @index(hash) @lang . - `, - })) + op := &api.Operation{Schema: `name: string @index(hash) @lang .`} + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() _, err := txn.Mutate(ctx, &api.Mutation{ @@ -827,11 +835,9 @@ func QueryHashIndex(t *testing.T, c *dgo.Dgraph) { func RegexpToggleTrigramIndex(t *testing.T, c *dgo.Dgraph) { ctx := context.Background() - require.NoError(t, c.Alter(ctx, &api.Operation{ - Schema: ` - name: string @index(term) @lang . - `, - })) + op := &api.Operation{Schema: `name: string @index(term) @lang .`} + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) txn := c.NewTxn() _, err := txn.Mutate(ctx, &api.Mutation{ @@ -872,11 +878,9 @@ func RegexpToggleTrigramIndex(t *testing.T, c *dgo.Dgraph) { testutil.CompareJSON(t, tc.out, string(resp.Json)) } - require.NoError(t, c.Alter(ctx, &api.Operation{ - Schema: ` - name: string @index(trigram) @lang . - `, - })) + op = &api.Operation{Schema: `name: string @index(trigram) @lang .`} + require.NoError(t, c.Alter(ctx, op)) + require.NoError(t, testutil.WaitForAlter(ctx, c, op.Schema)) t.Log("testing with trigram index") for _, tc := range tests { diff --git a/test.sh b/test.sh index 29732bcc883..70a299f0a42 100755 --- a/test.sh +++ b/test.sh @@ -256,6 +256,9 @@ if [[ :${TEST_SET}: == *:systest:* ]]; then Info "Running rebuilding index test" RunCmd ./systest/1million/test-reindex.sh || TestFailed + + Info "Running background index test" + RunCmd ./systest/bgindex/test-bgindex.sh || TestFailed fi Info "Stopping cluster" diff --git a/testutil/client.go b/testutil/client.go index 1570ce536ed..a9ea035ff68 100644 --- a/testutil/client.go +++ b/testutil/client.go @@ -25,6 +25,7 @@ import ( "net/http" "os" "os/exec" + "reflect" "strconv" "strings" "testing" @@ -32,6 +33,8 @@ import ( "github.com/dgraph-io/dgo/v2" "github.com/dgraph-io/dgo/v2/protos/api" + "github.com/dgraph-io/dgraph/protos/pb" + "github.com/dgraph-io/dgraph/schema" "github.com/dgraph-io/dgraph/x" "github.com/pkg/errors" "github.com/spf13/viper" @@ -106,7 +109,9 @@ func DgraphClientWithGroot(serviceAddr string) (*dgo.Dgraph, error) { for { // keep retrying until we succeed or receive a non-retriable error err = dg.Login(ctx, x.GrootId, "password") - if err == nil || !strings.Contains(err.Error(), "Please retry") { + if err == nil || !(strings.Contains(err.Error(), "Please retry") || + strings.Contains(err.Error(), "user not found")) { + break } time.Sleep(time.Second) @@ -158,6 +163,60 @@ func DropAll(t *testing.T, dg *dgo.Dgraph) { require.NoError(t, err) } +// SameIndexes checks whether SchemaUpdate and SchemaNode have same indexes. +func SameIndexes(su *pb.SchemaUpdate, n *pb.SchemaNode) bool { + if (su.Directive == pb.SchemaUpdate_REVERSE) != n.Reverse { + return false + } + if !reflect.DeepEqual(su.Tokenizer, n.Tokenizer) { + return false + } + if su.Count != n.Count { + return false + } + return true +} + +// WaitForAlter waits for schema to have the same indexes as the given schema. +func WaitForAlter(ctx context.Context, dg *dgo.Dgraph, s string) error { + ps, err := schema.Parse(s) + if err != nil { + return err + } + + for { + resp, err := dg.NewReadOnlyTxn().Query(ctx, "schema{}") + if err != nil { + return err + } + + var result struct { + Schema []*pb.SchemaNode + } + if err := json.Unmarshal(resp.Json, &result); err != nil { + return err + } + + actual := make(map[string]*pb.SchemaNode) + for _, rs := range result.Schema { + actual[rs.Predicate] = rs + } + + done := true + for _, su := range ps.Preds { + if n, ok := actual[su.Predicate]; !ok || !SameIndexes(su, n) { + done = false + break + } + } + if done { + return nil + } + + time.Sleep(time.Second) + } +} + // RetryQuery will retry a query until it succeeds or a non-retryable error is received. func RetryQuery(dg *dgo.Dgraph, q string) (*api.Response, error) { for { @@ -166,6 +225,7 @@ func RetryQuery(dg *dgo.Dgraph, q string) (*api.Response, error) { time.Sleep(10 * time.Millisecond) continue } + return resp, err } } @@ -306,11 +366,7 @@ type curlOutput struct { Errors []curlErrorEntry `json:"errors"` } -func verifyOutput(t *testing.T, bytes []byte, failureConfig *CurlFailureConfig) { - output := curlOutput{} - require.NoError(t, json.Unmarshal(bytes, &output), - "unable to unmarshal the curl output") - +func verifyOutput(t *testing.T, output curlOutput, failureConfig *CurlFailureConfig) { if failureConfig.ShouldFail { require.True(t, len(output.Errors) > 0, "no error entry found") if len(failureConfig.DgraphErrMsg) > 0 { @@ -327,21 +383,32 @@ func verifyOutput(t *testing.T, bytes []byte, failureConfig *CurlFailureConfig) // VerifyCurlCmd executes the curl command with the given arguments and verifies // the result against the expected output. -func VerifyCurlCmd(t *testing.T, args []string, - failureConfig *CurlFailureConfig) { - queryCmd := exec.Command("curl", args...) - - output, err := queryCmd.Output() - if len(failureConfig.CurlErrMsg) > 0 { - // the curl command should have returned an non-zero code - require.Error(t, err, "the curl command should have failed") - if ee, ok := err.(*exec.ExitError); ok { - require.True(t, strings.Contains(string(ee.Stderr), failureConfig.CurlErrMsg), - "the curl output does not contain the expected output") +func VerifyCurlCmd(t *testing.T, args []string, failureConfig *CurlFailureConfig) { + for { + queryCmd := exec.Command("curl", args...) + output, err := queryCmd.Output() + if len(failureConfig.CurlErrMsg) > 0 { + // the curl command should have returned an non-zero code + require.Error(t, err, "the curl command should have failed") + if ee, ok := err.(*exec.ExitError); ok { + require.True(t, strings.Contains(string(ee.Stderr), failureConfig.CurlErrMsg), + "the curl output does not contain the expected output") + } + return } - } else { + require.NoError(t, err, "the curl command should have succeeded") - verifyOutput(t, output, failureConfig) + co := curlOutput{} + require.NoError(t, json.Unmarshal(output, &co), + "unable to unmarshal the curl output") + if len(co.Errors) > 0 { + if strings.Contains(co.Errors[0].Message, "schema is already being modified") { + time.Sleep(time.Second) + continue + } + } + verifyOutput(t, co, failureConfig) + return } } diff --git a/tlstest/acl/acl_over_tls_test.go b/tlstest/acl/acl_over_tls_test.go index 13295913cf5..788757dfe0f 100644 --- a/tlstest/acl/acl_over_tls_test.go +++ b/tlstest/acl/acl_over_tls_test.go @@ -5,7 +5,9 @@ import ( "crypto/tls" "crypto/x509" "io/ioutil" + "strings" "testing" + "time" "github.com/dgraph-io/dgo/v2" "github.com/dgraph-io/dgo/v2/protos/api" @@ -103,8 +105,15 @@ func TestLoginOverTLS(t *testing.T) { if err != nil { t.Fatalf("Unable to get dgraph client: %s", err.Error()) } - if err := dg.Login(context.Background(), "groot", "password"); err != nil { - t.Fatalf("Unable to login using the groot account: %v", err.Error()) + for { + err := dg.Login(context.Background(), "groot", "password") + if err == nil { + break + } else if err != nil && !strings.Contains(err.Error(), "user not found") { + t.Fatalf("Unable to login using the groot account: %v", err.Error()) + } + + time.Sleep(time.Second) } // Output: diff --git a/types/facets/facet_types.go b/types/facets/facet_types.go index a118f825678..2731511b5c5 100644 --- a/types/facets/facet_types.go +++ b/types/facets/facet_types.go @@ -16,7 +16,12 @@ package facets -import "github.com/dgraph-io/dgo/v2/protos/api" +import ( + "errors" + + "github.com/dgraph-io/dgo/v2/protos/api" + "github.com/dgraph-io/dgraph/x" +) const ( // IntID represents the integer type. @@ -48,5 +53,6 @@ func ValTypeForTypeID(typId TypeID) api.Facet_ValType { case StringID: return api.Facet_STRING } - panic("Unhandled case in ValTypeForTypeID.") + x.Panic(errors.New("unhandled case in ValTypeForTypeID")) + return api.Facet_ValType(0) } diff --git a/wiki/content/clients/index.md b/wiki/content/clients/index.md index a683f0d1db5..76a5811cc96 100644 --- a/wiki/content/clients/index.md +++ b/wiki/content/clients/index.md @@ -572,6 +572,11 @@ type Person { If all goes well, the response should be `{"code":"Success","message":"Done"}`. +We build indexes in the background so that mutations and queries are not blocked. +In such a case, the new schema may not be reflected right away. You could poll the +schema to check whether indexing has been completed. New alter requests will be +rejected until the background indexing task is finished. + Other operations can be performed via the `/alter` endpoint as well. A specific predicate or the entire database can be dropped. diff --git a/wiki/content/deploy/index.md b/wiki/content/deploy/index.md index 5778d93a996..c933e281dc6 100644 --- a/wiki/content/deploy/index.md +++ b/wiki/content/deploy/index.md @@ -1660,6 +1660,36 @@ enabled the browser will prompt you for a client certificate to use. Select the certificate you've just installed in the step above and queries/mutations will succeed. +### Using Curl with Client authentication + +When TLS is enabled, `curl` requests to Dgraph will need some specific options to work. + +If the `--tls_client_auth` option is set to `REQUEST`or `VERIFYIFGIVEN` (default), +use the option `--cacert`. For instance (for an export request): + +``` +curl --cacert ./tls/ca.crt https://localhost:8080/admin/export +``` + +If the `--tls_client_auth` option is set to `REQUIREANY` or `REQUIREANDVERIFY`, +in addition to the `--cacert` option, also use the `--cert` and `--key` options. +For instance (for an export request): + +``` +curl --cacert ./tls/ca.crt --cert ./tls/node.crt --key ./tls/node.key https://localhost:8080/admin/export +``` + +Refer to the `curl` documentation for further information on its TLS options. + +### Access Data Using a Client + +Some examples of connecting via a [Client](/clients) when TLS is in use can be found below: + +- [dgraph4j](https://github.com/dgraph-io/dgraph4j#creating-a-secure-client-using-tls) +- [dgraph-js](https://github.com/dgraph-io/dgraph-js/tree/master/examples/tls) +- [dgo](https://github.com/dgraph-io/dgraph/blob/master/tlstest/acl/acl_over_tls_test.go) +- [pydgraph](https://github.com/dgraph-io/pydgraph/tree/master/examples/tls) + ### Troubleshooting Ratel's Client authentication If you are getting errors in Ratel when server's TLS is enabled try opening diff --git a/worker/compare.go b/worker/compare.go index 3c5ff432505..b6f99db20e7 100644 --- a/worker/compare.go +++ b/worker/compare.go @@ -16,6 +16,12 @@ package worker +import ( + "errors" + + "github.com/dgraph-io/dgraph/x" +) + func evalCompare(cmp string, lv, rv int64) bool { switch cmp { case "le": @@ -29,5 +35,6 @@ func evalCompare(cmp string, lv, rv int64) bool { case "eq": return lv == rv } - panic("EvalCompare: unreachable") + x.Panic(errors.New("EvalCompare: unreachable")) + return false } diff --git a/worker/draft.go b/worker/draft.go index a42605f883a..ef90cfa2ecc 100644 --- a/worker/draft.go +++ b/worker/draft.go @@ -199,18 +199,22 @@ func (n *node) applyMutations(ctx context.Context, proposal *pb.Proposal) (rerr if proposal.Mutations.StartTs == 0 { return errors.New("StartTs must be provided") } - startTs := proposal.Mutations.StartTs if len(proposal.Mutations.Schema) > 0 || len(proposal.Mutations.Types) > 0 { + // MaxAssigned would ensure that everything that's committed up until this point + // would be picked up in building indexes. Any uncommitted txns would be cancelled + // by detectPendingTxns below. + startTs := posting.Oracle().MaxAssigned() + span.Annotatef(nil, "Applying schema and types") for _, supdate := range proposal.Mutations.Schema { // We should not need to check for predicate move here. if err := detectPendingTxns(supdate.Predicate); err != nil { return err } - if err := runSchemaMutation(ctx, supdate, startTs); err != nil { - return err - } + } + if err := runSchemaMutation(ctx, proposal.Mutations.Schema, startTs); err != nil { + return err } for _, tupdate := range proposal.Mutations.Types { diff --git a/worker/export.go b/worker/export.go index d9d4b97142e..ceea91764fb 100644 --- a/worker/export.go +++ b/worker/export.go @@ -126,7 +126,7 @@ func escapedString(str string) string { // All valid stings should be able to be escaped to a JSON string so // it's safe to panic here. Marshal has to return an error because it // accepts an interface. - panic("Could not marshal string to JSON string") + x.Panic(errors.New("Could not marshal string to JSON string")) } return string(byt) } diff --git a/worker/groups.go b/worker/groups.go index fe56644f8db..33f156586f2 100644 --- a/worker/groups.go +++ b/worker/groups.go @@ -198,6 +198,7 @@ func (g *groupi) proposeInitialTypes() { func (g *groupi) proposeInitialSchema() { initialSchema := schema.InitialSchema() + ctx := context.Background() for _, s := range initialSchema { if gid, err := g.BelongsToReadOnly(s.Predicate, 0); err != nil { glog.Errorf("Error getting tablet for predicate %s. Will force schema proposal.", @@ -205,7 +206,7 @@ func (g *groupi) proposeInitialSchema() { g.upsertSchema(s, nil) } else if gid == 0 { g.upsertSchema(s, nil) - } else if curr, _ := schema.State().Get(s.Predicate); gid == g.groupId() && + } else if curr, _ := schema.State().Get(ctx, s.Predicate); gid == g.groupId() && !proto.Equal(s, &curr) { // If this tablet is served to the group, do not upsert the schema unless the // stored schema and the proposed one are different. diff --git a/worker/mutation.go b/worker/mutation.go index 7ce50912bc0..af931218c00 100644 --- a/worker/mutation.go +++ b/worker/mutation.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "math" + "sync" "time" "github.com/dgraph-io/badger/v2" @@ -55,10 +56,11 @@ func isDeletePredicateEdge(edge *pb.DirectedEdge) bool { // runMutation goes through all the edges and applies them. func runMutation(ctx context.Context, edge *pb.DirectedEdge, txn *posting.Txn) error { + ctx = schema.GetWriteContext(ctx) + // We shouldn't check whether this Alpha serves this predicate or not. Membership information // isn't consistent across the entire cluster. We should just apply whatever is given to us. - - su, ok := schema.State().Get(edge.Attr) + su, ok := schema.State().Get(ctx, edge.Attr) if edge.Op == pb.DirectedEdge_SET { if !ok { return errors.Errorf("runMutation: Unable to find schema for %s", edge.Attr) @@ -115,65 +117,115 @@ func runMutation(ctx context.Context, edge *pb.DirectedEdge, txn *posting.Txn) e return plist.AddMutationWithIndex(ctx, edge, txn) } -// This is serialized with mutations, called after applied watermarks catch up -// and further mutations are blocked until this is done. -func runSchemaMutation(ctx context.Context, update *pb.SchemaUpdate, startTs uint64) error { - if err := runSchemaMutationHelper(ctx, update, startTs); err != nil { - // on error, we restore the memory state to be the same as the disk - maxRetries := 10 - loadErr := x.RetryUntilSuccess(maxRetries, 10*time.Millisecond, func() error { - return schema.Load(update.Predicate) - }) - - if loadErr != nil { - glog.Fatalf("failed to load schema after %d retries: %v", maxRetries, loadErr) +func runSchemaMutation(ctx context.Context, updates []*pb.SchemaUpdate, startTs uint64) error { + // Wait until schema modification for all predicates is complete. There cannot be two + // background tasks running as this is a race condition. We typically won't propose an + // index update if one is already going on. If that's not the case, then the receiver + // of the update had probably finished the previous index update but some follower + // (or perhaps leader) had not finished it. + // In other words, the proposer checks whether there is another indexing in progress. + // If that's the case, the alter request is rejected. Otherwise, the request is accepted. + // Before reaching here, the proposer P would have checked that no indexing is in progress + // (could also be because proposer was done earlier than others). If P was still indexing + // when the req was received, it would have rejected the Alter request. Only if P is + // not indexing, it would accept and propose the request. + // It is possible that a receiver R of the proposal is still indexing. In that case, R would + // block here and wait for indexing to be finished. + for { + if !schema.State().IndexingInProgress() { + break } - return err + glog.Infoln("waiting for indexing to complete") + time.Sleep(time.Second * 2) } - return updateSchema(update) -} + buildIndexesHelper := func(update *pb.SchemaUpdate, rebuild posting.IndexRebuild) error { + wrtCtx := schema.GetWriteContext(context.Background()) + if err := rebuild.BuildIndexes(wrtCtx); err != nil { + return err + } + if err := updateSchema(update); err != nil { + return err + } -func runSchemaMutationHelper(ctx context.Context, update *pb.SchemaUpdate, startTs uint64) error { - if tablet, err := groups().Tablet(update.Predicate); err != nil { - return err - } else if tablet.GetGroupId() != groups().groupId() { - return errors.Errorf("Tablet isn't being served by this group. Tablet: %+v", tablet) + glog.Infof("Done schema update %+v\n", update) + return nil } - if err := checkSchema(update); err != nil { - return err + // This wg allows waiting until setup for all the predicates is complete + // befor running buildIndexes for any of those predicates. + var wg sync.WaitGroup + wg.Add(1) + defer wg.Done() + buildIndexes := func(update *pb.SchemaUpdate, rebuild posting.IndexRebuild) { + // We should only start building indexes once this function has returned. + // This is in order to ensure that we do not call DropPrefix for one predicate + // and write indexes for another predicate simultaneously. because that could + // cause writes to badger to fail leading to undesired indexing failures. + wg.Wait() + + // undo schema changes in case re-indexing fails. + if err := buildIndexesHelper(update, rebuild); err != nil { + glog.Errorf("error in building indexes, aborting :: %v\n", err) + + maxRetries := 10 + loadErr := x.RetryUntilSuccess(maxRetries, 10*time.Millisecond, func() error { + return schema.Load(update.Predicate) + }) + + if loadErr != nil { + glog.Fatalf("failed to load schema after %d retries: %v", maxRetries, loadErr) + } + } } - old, _ := schema.State().Get(update.Predicate) - // Sets only in memory, we will update it on disk only after schema mutations - // are successful and written to disk. - schema.State().Set(update.Predicate, update) - - // Once we remove index or reverse edges from schema, even though the values - // are present in db, they won't be used due to validation in work/task.go - - // We don't want to use sync watermarks for background removal, because it would block - // linearizable read requests. Only downside would be on system crash, stale edges - // might remain, which is ok. - - // Indexing can't be done in background as it can cause race conditons with new - // index mutations (old set and new del) - // We need watermark for index/reverse edge addition for linearizable reads. - // (both applied and synced watermarks). - defer glog.Infof("Done schema update %+v\n", update) - rebuild := posting.IndexRebuild{ - Attr: update.Predicate, - StartTs: startTs, - OldSchema: &old, - CurrentSchema: update, - } - return rebuild.Run(ctx) + + for _, su := range updates { + if tablet, err := groups().Tablet(su.Predicate); err != nil { + return err + } else if tablet.GetGroupId() != groups().groupId() { + return errors.Errorf("Tablet isn't being served by this group. Tablet: %+v", tablet) + } + + if err := checkSchema(su); err != nil { + return err + } + + old, _ := schema.State().Get(ctx, su.Predicate) + rebuild := posting.IndexRebuild{ + Attr: su.Predicate, + StartTs: startTs, + OldSchema: &old, + CurrentSchema: su, + } + querySchema := rebuild.GetQuerySchema() + // Sets the schema only in memory. The schema is written to + // disk only after schema mutations are successful. + schema.State().Set(su.Predicate, querySchema) + schema.State().SetMutSchema(su.Predicate, su) + + // TODO(Aman): If we return an error, we may not have right schema reflected. + if err := rebuild.DropIndexes(ctx); err != nil { + return err + } + if err := rebuild.BuildData(ctx); err != nil { + return err + } + + if rebuild.NeedIndexRebuild() { + go buildIndexes(su, rebuild) + } else if err := updateSchema(su); err != nil { + return err + } + } + + return nil } // updateSchema commits the schema to disk in blocking way, should be ok because this happens // only during schema mutations or we see a new predicate. func updateSchema(s *pb.SchemaUpdate) error { schema.State().Set(s.Predicate, s) + schema.State().DeleteMutSchema(s.Predicate) txn := pstore.NewTransactionAt(1, true) defer txn.Discard() data, err := s.Marshal() @@ -190,9 +242,11 @@ func updateSchema(s *pb.SchemaUpdate) error { } func createSchema(attr string, typ types.TypeID, hint pb.Metadata_HintType) error { + ctx := schema.GetWriteContext(context.Background()) + // Don't overwrite schema blindly, acl's might have been set even though // type is not present - s, ok := schema.State().Get(attr) + s, ok := schema.State().Get(ctx, attr) if ok { s.ValueType = typ.Enum() } else { diff --git a/worker/proposal.go b/worker/proposal.go index befbd582564..b4d43fb3f56 100644 --- a/worker/proposal.go +++ b/worker/proposal.go @@ -158,12 +158,13 @@ func (n *node) proposeAndWait(ctx context.Context, proposal *pb.Proposal) (perr // Do a type check here if schema is present // In very rare cases invalid entries might pass through raft, which would // be persisted, we do best effort schema check while writing + ctx = schema.GetWriteContext(ctx) if proposal.Mutations != nil { for _, edge := range proposal.Mutations.Edges { if err := checkTablet(edge.Attr); err != nil { return err } - su, ok := schema.State().Get(edge.Attr) + su, ok := schema.State().Get(ctx, edge.Attr) if !ok { continue } else if err := ValidateAndConvert(edge, &su); err != nil { diff --git a/worker/schema.go b/worker/schema.go index 22279390f8e..f2599a8b50c 100644 --- a/worker/schema.go +++ b/worker/schema.go @@ -89,20 +89,21 @@ func populateSchema(attr string, fields []string) *pb.SchemaNode { return nil } schemaNode.Predicate = attr + ctx := context.Background() for _, field := range fields { switch field { case "type": schemaNode.Type = typ.Name() case "index": - schemaNode.Index = schema.State().IsIndexed(attr) + schemaNode.Index = schema.State().IsIndexed(ctx, attr) case "tokenizer": - if schema.State().IsIndexed(attr) { - schemaNode.Tokenizer = schema.State().TokenizerNames(attr) + if schema.State().IsIndexed(ctx, attr) { + schemaNode.Tokenizer = schema.State().TokenizerNames(ctx, attr) } case "reverse": - schemaNode.Reverse = schema.State().IsReversed(attr) + schemaNode.Reverse = schema.State().IsReversed(ctx, attr) case "count": - schemaNode.Count = schema.State().HasCount(attr) + schemaNode.Count = schema.State().HasCount(ctx, attr) case "list": schemaNode.List = schema.State().IsList(attr) case "upsert": diff --git a/worker/sort.go b/worker/sort.go index eb26c656867..7b8d91fee72 100644 --- a/worker/sort.go +++ b/worker/sort.go @@ -197,11 +197,11 @@ func sortWithIndex(ctx context.Context, ts *pb.SortMessage) *sortresult { } // Get the tokenizers and choose the corresponding one. - if !schema.State().IsIndexed(order.Attr) { + if !schema.State().IsIndexed(ctx, order.Attr) { return resultWithError(errors.Errorf("Attribute %s is not indexed.", order.Attr)) } - tokenizers := schema.State().Tokenizer(order.Attr) + tokenizers := schema.State().Tokenizer(ctx, order.Attr) var tokenizer tok.Tokenizer for _, t := range tokenizers { // Get the first sortable index. diff --git a/worker/task.go b/worker/task.go index b8d0fae7da7..b629c42483d 100644 --- a/worker/task.go +++ b/worker/task.go @@ -898,16 +898,16 @@ func (qs *queryState) helpProcessTask(ctx context.Context, q *pb.Query, gid uint out := new(pb.Result) attr := q.Attr - srcFn, err := parseSrcFn(q) + srcFn, err := parseSrcFn(ctx, q) if err != nil { return nil, err } - if q.Reverse && !schema.State().IsReversed(attr) { + if q.Reverse && !schema.State().IsReversed(ctx, attr) { return nil, errors.Errorf("Predicate %s doesn't have reverse edge", attr) } - if needsIndex(srcFn.fnType, q.UidList) && !schema.State().IsIndexed(q.Attr) { + if needsIndex(srcFn.fnType, q.UidList) && !schema.State().IsIndexed(ctx, q.Attr) { return nil, errors.Errorf("Predicate %s is not indexed", q.Attr) } @@ -968,7 +968,7 @@ func (qs *queryState) helpProcessTask(ctx context.Context, q *pb.Query, gid uint if srcFn.fnType == compareScalarFn && srcFn.isFuncAtRoot { span.Annotate(nil, "handleCompareScalarFunction") - if err := qs.handleCompareScalarFunction(args); err != nil { + if err := qs.handleCompareScalarFunction(ctx, args); err != nil { return nil, err } } @@ -1034,9 +1034,9 @@ func needsStringFiltering(srcFn *functionContext, langs []string, attr string) b srcFn.fnType == customIndexFn) } -func (qs *queryState) handleCompareScalarFunction(arg funcArgs) error { +func (qs *queryState) handleCompareScalarFunction(ctx context.Context, arg funcArgs) error { attr := arg.q.Attr - if ok := schema.State().HasCount(attr); !ok { + if ok := schema.State().HasCount(ctx, attr); !ok { return errors.Errorf("Need @count directive in schema for attr: %s for fn: %s at root", attr, arg.srcFn.fname) } @@ -1069,7 +1069,7 @@ func (qs *queryState) handleRegexFunction(ctx context.Context, arg funcArgs) err if typ != types.StringID { return errors.Errorf("Got non-string type. Regex match is allowed only on string type.") } - useIndex := schema.State().HasTokenizer(tok.IdentTrigram, attr) + useIndex := schema.State().HasTokenizer(ctx, tok.IdentTrigram, attr) span.Annotatef(nil, "Trigram index found: %t, func at root: %t", useIndex, arg.srcFn.isFuncAtRoot) @@ -1172,7 +1172,7 @@ func (qs *queryState) handleCompareFunction(ctx context.Context, arg funcArgs) e attr := arg.q.Attr span.Annotatef(nil, "Attr: %s. Fname: %s", attr, arg.srcFn.fname) - tokenizer, err := pickTokenizer(attr, arg.srcFn.fname) + tokenizer, err := pickTokenizer(ctx, attr, arg.srcFn.fname) if err != nil { return err } @@ -1311,7 +1311,7 @@ func (qs *queryState) handleMatchFunction(ctx context.Context, arg funcArgs) err case arg.q.UidList != nil && len(arg.q.UidList.Uids) != 0: uids = arg.q.UidList - case schema.State().HasTokenizer(tok.IdentTrigram, attr): + case schema.State().HasTokenizer(ctx, tok.IdentTrigram, attr): var err error uids, err = uidsForMatch(attr, arg) if err != nil { @@ -1613,11 +1613,11 @@ func langForFunc(langs []string) string { return langs[0] } -func parseSrcFn(q *pb.Query) (*functionContext, error) { +func parseSrcFn(ctx context.Context, q *pb.Query) (*functionContext, error) { fnType, f := parseFuncType(q.SrcFunc) attr := q.Attr fc := &functionContext{fnType: fnType, fname: f} - isIndexedAttr := schema.State().IsIndexed(attr) + isIndexedAttr := schema.State().IsIndexed(ctx, attr) var err error t, err := schema.State().TypeOf(attr) @@ -1673,7 +1673,7 @@ func parseSrcFn(q *pb.Query) (*functionContext, error) { } // Get tokens ge / le ineqValueToken. - if tokens, fc.ineqValueToken, err = getInequalityTokens(q.ReadTs, attr, f, lang, + if tokens, fc.ineqValueToken, err = getInequalityTokens(ctx, q.ReadTs, attr, f, lang, fc.ineqValue); err != nil { return nil, err } @@ -1727,7 +1727,7 @@ func parseSrcFn(q *pb.Query) (*functionContext, error) { if err = ensureArgsCount(q.SrcFunc, 1); err != nil { return nil, err } - required, found := verifyStringIndex(attr, fnType) + required, found := verifyStringIndex(ctx, attr, fnType) if !found { return nil, errors.Errorf("Attribute %s is not indexed with type %s", attr, required) } @@ -1740,7 +1740,7 @@ func parseSrcFn(q *pb.Query) (*functionContext, error) { if err = ensureArgsCount(q.SrcFunc, 2); err != nil { return nil, err } - required, found := verifyStringIndex(attr, fnType) + required, found := verifyStringIndex(ctx, attr, fnType) if !found { return nil, errors.Errorf("Attribute %s is not indexed with type %s", attr, required) } @@ -1763,7 +1763,7 @@ func parseSrcFn(q *pb.Query) (*functionContext, error) { return nil, err } tokerName := q.SrcFunc.Args[0] - if !verifyCustomIndex(q.Attr, tokerName) { + if !verifyCustomIndex(ctx, q.Attr, tokerName) { return nil, errors.Errorf("Attribute %s is not indexed with custom tokenizer %s", q.Attr, tokerName) } diff --git a/worker/tokens.go b/worker/tokens.go index e7339a519a9..d3946a29ab9 100644 --- a/worker/tokens.go +++ b/worker/tokens.go @@ -17,18 +17,19 @@ package worker import ( - "github.com/dgraph-io/badger/v2" - "bytes" + "context" + + "github.com/pkg/errors" + "github.com/dgraph-io/badger/v2" "github.com/dgraph-io/dgraph/schema" "github.com/dgraph-io/dgraph/tok" "github.com/dgraph-io/dgraph/types" "github.com/dgraph-io/dgraph/x" - "github.com/pkg/errors" ) -func verifyStringIndex(attr string, funcType FuncType) (string, bool) { +func verifyStringIndex(ctx context.Context, attr string, funcType FuncType) (string, bool) { var requiredTokenizer tok.Tokenizer switch funcType { case fullTextSearchFn: @@ -39,12 +40,12 @@ func verifyStringIndex(attr string, funcType FuncType) (string, bool) { requiredTokenizer = tok.TermTokenizer{} } - if !schema.State().IsIndexed(attr) { + if !schema.State().IsIndexed(ctx, attr) { return requiredTokenizer.Name(), false } id := requiredTokenizer.Identifier() - for _, t := range schema.State().Tokenizer(attr) { + for _, t := range schema.State().Tokenizer(ctx, attr) { if t.Identifier() == id { return requiredTokenizer.Name(), true } @@ -52,11 +53,11 @@ func verifyStringIndex(attr string, funcType FuncType) (string, bool) { return requiredTokenizer.Name(), false } -func verifyCustomIndex(attr string, tokenizerName string) bool { - if !schema.State().IsIndexed(attr) { +func verifyCustomIndex(ctx context.Context, attr string, tokenizerName string) bool { + if !schema.State().IsIndexed(ctx, attr) { return false } - for _, t := range schema.State().Tokenizer(attr) { + for _, t := range schema.State().Tokenizer(ctx, attr) { if t.Identifier() >= tok.IdentCustom && t.Name() == tokenizerName { return true } @@ -76,13 +77,13 @@ func getStringTokens(funcArgs []string, lang string, funcType FuncType) ([]strin return tok.GetTermTokens(funcArgs) } -func pickTokenizer(attr string, f string) (tok.Tokenizer, error) { +func pickTokenizer(ctx context.Context, attr string, f string) (tok.Tokenizer, error) { // Get the tokenizers and choose the corresponding one. - if !schema.State().IsIndexed(attr) { + if !schema.State().IsIndexed(ctx, attr) { return nil, errors.Errorf("Attribute %s is not indexed.", attr) } - tokenizers := schema.State().Tokenizer(attr) + tokenizers := schema.State().Tokenizer(ctx, attr) for _, t := range tokenizers { // If function is eq and we found a tokenizer thats !Lossy(), lets return it switch f { @@ -110,9 +111,9 @@ func pickTokenizer(attr string, f string) (tok.Tokenizer, error) { // getInequalityTokens gets tokens ge / le compared to given token using the first sortable // index that is found for the predicate. -func getInequalityTokens(readTs uint64, attr, f, lang string, +func getInequalityTokens(ctx context.Context, readTs uint64, attr, f, lang string, ineqValue types.Val) ([]string, string, error) { - tokenizer, err := pickTokenizer(attr, f) + tokenizer, err := pickTokenizer(ctx, attr, f) if err != nil { return nil, "", err } diff --git a/x/error.go b/x/error.go index 70f7bdd4538..32a50fa03b5 100644 --- a/x/error.go +++ b/x/error.go @@ -39,20 +39,25 @@ import ( // Check logs fatal if err != nil. func Check(err error) { if err != nil { - log.Fatalf("%+v", errors.Wrap(err, "")) + err = errors.Wrap(err, "") + CaptureSentryException(err) + log.Fatalf("%+v", err) } } // Checkf is Check with extra info. func Checkf(err error, format string, args ...interface{}) { if err != nil { - log.Fatalf("%+v", errors.Wrapf(err, format, args...)) + err = errors.Wrapf(err, format, args...) + CaptureSentryException(err) + log.Fatalf("%+v", err) } } // CheckfNoTrace is Checkf without a stack trace. func CheckfNoTrace(err error) { if err != nil { + CaptureSentryException(err) log.Fatalf(err.Error()) } } @@ -60,6 +65,7 @@ func CheckfNoTrace(err error) { // CheckfNoLog exits on error without any message (to avoid duplicate error messages). func CheckfNoLog(err error) { if err != nil { + CaptureSentryException(err) os.Exit(1) } } diff --git a/x/histogram.go b/x/histogram.go index 4b93fb8b7e5..56e94ca3907 100644 --- a/x/histogram.go +++ b/x/histogram.go @@ -15,6 +15,7 @@ package x import ( + "errors" "sync" "time" @@ -41,7 +42,8 @@ type slidingHistogram struct { // details. func newSlidingHistogram(duration time.Duration, maxVal int64, sigFigs int) *slidingHistogram { if duration <= 0 { - panic("cannot create a sliding histogram with nonpositive duration") + Panic(errors.New( + "cannot create a sliding histogram with nonpositive duration")) } return &slidingHistogram{ nextT: time.Now(), diff --git a/x/keys.go b/x/keys.go index 614f00416ab..7cca05902df 100644 --- a/x/keys.go +++ b/x/keys.go @@ -458,7 +458,7 @@ func GetSplitKey(baseKey []byte, startUid uint64) ([]byte, error) { index := 1 + 2 + len(p.Attr) + 1 if index >= len(keyCopy) { - panic("Cannot write to key. Key is too small") + Panic(errors.New("Cannot write to key. Key is too small")) } keyCopy[index] = ByteSplit binary.BigEndian.PutUint64(keyCopy[len(baseKey):], startUid) diff --git a/x/sentry_integration.go b/x/sentry_integration.go new file mode 100644 index 00000000000..1ac2a76d20e --- /dev/null +++ b/x/sentry_integration.go @@ -0,0 +1,104 @@ +/* + * Copyright 2017-2018 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package x + +import ( + "errors" + "os" + "time" + + "github.com/getsentry/sentry-go" + "github.com/golang/glog" + "github.com/mitchellh/panicwrap" +) + +var env string + +// InitSentry initializes the sentry machinery. +func InitSentry(ee bool) { + if ee { + env = "enterprise" + } else { + env = "oss" + } + initSentry() +} + +func initSentry() { + if err := sentry.Init(sentry.ClientOptions{ + Dsn: "https://58a035f0d85a4c1c80aee0a3e72f3899@sentry.io/1805390", + Debug: true, + AttachStacktrace: true, + ServerName: WorkerConfig.MyAddr, + Environment: env, + Release: Version(), + }); err != nil { + glog.Fatalf("Sentry init failed: %v", err) + } +} + +// FlushSentry flushes the buffered events/errors. +func FlushSentry() { + sentry.Flush(time.Second * 2) +} + +// ConfigureSentryScope configures the scope on the global hub of Sentry. +func ConfigureSentryScope(subcmd string) { + sentry.ConfigureScope(func(scope *sentry.Scope) { + scope.SetTag("dgraph", subcmd) + scope.SetLevel(sentry.LevelFatal) + }) +} + +// Panic sends the error report to Sentry and then panics. +func Panic(err error) { + if err != nil { + CaptureSentryException(err) + panic(err) + } +} + +// CaptureSentryException sends the error report to Sentry. +func CaptureSentryException(err error) { + if err != nil { + sentry.CaptureException(err) + } +} + +// PanicHandler is the callback function when a panic happens. It does not recover and is +// only used to log panics (in our case send an event to sentry). +func PanicHandler(out string) { + // Output contains the full output (including stack traces) of the panic. + sentry.CaptureException(errors.New(out)) + FlushSentry() // Need to flush asap. Don't defer here. + + os.Exit(1) +} + +// WrapPanics is a wrapper on panics. We use it to send sentry events about panics +// and crash right after. +func WrapPanics() { + exitStatus, err := panicwrap.BasicWrap(PanicHandler) + if err != nil { + panic(err) + } + // If exitStatus >= 0, then we're the parent process and the panicwrap + // re-executed ourselves and completed. Just exit with the proper status. + if exitStatus >= 0 { + os.Exit(exitStatus) + } +} diff --git a/x/x.go b/x/x.go index 37153cc2194..db1b432ee23 100644 --- a/x/x.go +++ b/x/x.go @@ -280,7 +280,7 @@ func SetStatus(w http.ResponseWriter, code, msg string) { glog.Errorf("Error while writing: %+v", err) } } else { - panic(fmt.Sprintf("Unable to marshal: %+v", qr)) + Panic(errors.Errorf("Unable to marshal: %+v", qr)) } } @@ -323,7 +323,7 @@ func SetStatusWithData(w http.ResponseWriter, code, msg string) { glog.Errorf("Error while writing: %+v", err) } } else { - panic(fmt.Sprintf("Unable to marshal: %+v", qr)) + Panic(errors.Errorf("Unable to marshal: %+v", qr)) } } diff --git a/xidmap/xidmap.go b/xidmap/xidmap.go index 17ddf3a4aea..10192f66281 100644 --- a/xidmap/xidmap.go +++ b/xidmap/xidmap.go @@ -173,7 +173,7 @@ func (m *XidMap) AssignUid(xid string) (uint64, bool) { var uidBuf [8]byte binary.BigEndian.PutUint64(uidBuf[:], newUid) if err := m.writer.Set([]byte(xid), uidBuf[:]); err != nil { - panic(err) + x.Panic(err) } } return newUid, true