diff --git a/dgraph/cmd/alpha/http_test.go b/dgraph/cmd/alpha/http_test.go index 686b99c20e8..33a00ffd755 100644 --- a/dgraph/cmd/alpha/http_test.go +++ b/dgraph/cmd/alpha/http_test.go @@ -428,7 +428,7 @@ func TestTransactionBasic(t *testing.T) { require.Equal(t, 2, len(mr.preds)) var parsedPreds []string for _, pred := range mr.preds { - p := strings.Split(pred, "-")[1] + p := strings.SplitN(pred, "-", 2)[1] parsedPreds = append(parsedPreds, x.ParseAttr(p)) } sort.Strings(parsedPreds) diff --git a/dgraph/cmd/alpha/upsert_test.go b/dgraph/cmd/alpha/upsert_test.go index 6ae11f77840..052cf62d739 100644 --- a/dgraph/cmd/alpha/upsert_test.go +++ b/dgraph/cmd/alpha/upsert_test.go @@ -40,7 +40,7 @@ type QueryResult struct { func splitPreds(ps []string) []string { for i, p := range ps { - ps[i] = x.ParseAttr(strings.Split(p, "-")[1]) + ps[i] = x.ParseAttr(strings.SplitN(p, "-", 2)[1]) } return ps diff --git a/dgraph/cmd/zero/oracle.go b/dgraph/cmd/zero/oracle.go index e2d9f9ebf7a..7e31ccd9075 100644 --- a/dgraph/cmd/zero/oracle.go +++ b/dgraph/cmd/zero/oracle.go @@ -365,7 +365,7 @@ func (s *Server) commit(ctx context.Context, src *api.TxnContext) error { checkPreds := func() error { // Check if any of these tablets is being moved. If so, abort the transaction. for _, pkey := range src.Preds { - splits := strings.Split(pkey, "-") + splits := strings.SplitN(pkey, "-", 2) if len(splits) < 2 { return errors.Errorf("Unable to find group id in %s", pkey) } @@ -373,7 +373,7 @@ func (s *Server) commit(ctx context.Context, src *api.TxnContext) error { if err != nil { return errors.Wrapf(err, "unable to parse group id from %s", pkey) } - pred := strings.Join(splits[1:], "-") + pred := splits[1] tablet := s.ServingTablet(pred) if tablet == nil { return errors.Errorf("Tablet for %s is nil", pred) diff --git a/edgraph/server.go b/edgraph/server.go index 94cc2953982..58cd6234fbf 100644 --- a/edgraph/server.go +++ b/edgraph/server.go @@ -1110,16 +1110,7 @@ func filterTablets(ctx context.Context, ms *pb.MembershipState) error { return errors.Errorf("Namespace not found in JWT.") } if namespace == x.GalaxyNamespace { - // For galaxy namespace, we don't want to filter out the predicates. We only format the - // namespace to human readable form. - for _, group := range ms.Groups { - tablets := make(map[string]*pb.Tablet) - for tabletName, tablet := range group.Tablets { - tablet.Predicate = x.FormatNsAttr(tablet.Predicate) - tablets[x.FormatNsAttr(tabletName)] = tablet - } - group.Tablets = tablets - } + // For galaxy namespace, we don't want to filter out the predicates. return nil } for _, group := range ms.GetGroups() { diff --git a/graphql/admin/state.go b/graphql/admin/state.go index 1b851ba1b27..4240eb64983 100644 --- a/graphql/admin/state.go +++ b/graphql/admin/state.go @@ -47,13 +47,13 @@ func resolveState(ctx context.Context, q schema.Query) *resolve.Resolved { u := jsonpb.Unmarshaler{} var ms pb.MembershipState err = u.Unmarshal(bytes.NewReader(resp.GetJson()), &ms) - if err != nil { return resolve.EmptyResult(q, err) } - // map to graphql response structure - state := convertToGraphQLResp(ms) + ns, _ := x.ExtractNamespace(ctx) + // map to graphql response structure. Only guardian of galaxy can list the namespaces. + state := convertToGraphQLResp(ms, ns == x.GalaxyNamespace) b, err := json.Marshal(state) if err != nil { return resolve.EmptyResult(q, err) @@ -77,7 +77,7 @@ func resolveState(ctx context.Context, q schema.Query) *resolve.Resolved { // values and not the keys. For pb.MembershipState.Group, the keys are the group IDs // and pb.Group didn't contain this ID, so we are creating a custom clusterGroup type, // which is same as pb.Group and also contains the ID for the group. -func convertToGraphQLResp(ms pb.MembershipState) membershipState { +func convertToGraphQLResp(ms pb.MembershipState, listNs bool) membershipState { var state membershipState // namespaces stores set of namespaces @@ -92,9 +92,8 @@ func convertToGraphQLResp(ms pb.MembershipState) membershipState { var tablets = make([]*pb.Tablet, 0, len(v.Tablets)) for name, v1 := range v.Tablets { tablets = append(tablets, v1) - val, err := x.ExtractNamespaceFromPredicate(name) - if err == nil { - namespaces[val] = struct{}{} + if listNs { + namespaces[x.ParseNamespace(name)] = struct{}{} } } state.Groups = append(state.Groups, clusterGroup{ diff --git a/posting/index.go b/posting/index.go index ee310b5d30f..26388178179 100644 --- a/posting/index.go +++ b/posting/index.go @@ -592,7 +592,7 @@ func (r *rebuilder) Run(ctx context.Context) error { glog.V(1).Infof( "Rebuilding index for predicate %s: Starting process. StartTs=%d. Prefix=\n%s\n", - x.FormatNsAttr(r.attr), r.startTs, hex.Dump(r.prefix)) + r.attr, r.startTs, hex.Dump(r.prefix)) // Counter is used here to ensure that all keys are committed at different timestamp. // We set it to 1 in case there are no keys found and NewStreamAt is called with ts=0. @@ -600,8 +600,7 @@ func (r *rebuilder) Run(ctx context.Context) error { tmpWriter := tmpDB.NewManagedWriteBatch() stream := pstore.NewStreamAt(r.startTs) - stream.LogPrefix = fmt.Sprintf("Rebuilding index for predicate %s (1/2):", - x.FormatNsAttr(r.attr)) + stream.LogPrefix = fmt.Sprintf("Rebuilding index for predicate %s (1/2):", r.attr) stream.Prefix = r.prefix stream.KeyToList = func(key []byte, itr *badger.Iterator) (*bpb.KVList, error) { // We should return quickly if the context is no longer valid. @@ -663,21 +662,19 @@ func (r *rebuilder) Run(ctx context.Context) error { return err } glog.V(1).Infof("Rebuilding index for predicate %s: building temp index took: %v\n", - x.FormatNsAttr(r.attr), time.Since(start)) + r.attr, time.Since(start)) // Now we write all the created posting lists to disk. - glog.V(1).Infof("Rebuilding index for predicate %s: writing index to badger", - x.FormatNsAttr(r.attr)) + glog.V(1).Infof("Rebuilding index for predicate %s: writing index to badger", r.attr) start = time.Now() defer func() { glog.V(1).Infof("Rebuilding index for predicate %s: writing index took: %v\n", - x.FormatNsAttr(r.attr), time.Since(start)) + r.attr, time.Since(start)) }() writer := pstore.NewManagedWriteBatch() tmpStream := tmpDB.NewStreamAt(counter) - tmpStream.LogPrefix = fmt.Sprintf("Rebuilding index for predicate %s (2/2):", - x.FormatNsAttr(r.attr)) + tmpStream.LogPrefix = fmt.Sprintf("Rebuilding index for predicate %s (2/2):", r.attr) tmpStream.KeyToList = func(key []byte, itr *badger.Iterator) (*bpb.KVList, error) { l, err := ReadPostingList(key, itr) if err != nil { @@ -720,8 +717,7 @@ func (r *rebuilder) Run(ctx context.Context) error { if err := tmpStream.Orchestrate(ctx); err != nil { return err } - glog.V(1).Infof("Rebuilding index for predicate %s: Flushing all writes.\n", - x.FormatNsAttr(r.attr)) + glog.V(1).Infof("Rebuilding index for predicate %s: Flushing all writes.\n", r.attr) return writer.Flush() } diff --git a/posting/list_test.go b/posting/list_test.go index 57e1936b567..71bd00e7323 100644 --- a/posting/list_test.go +++ b/posting/list_test.go @@ -559,7 +559,7 @@ func TestAddMutation_mrjn2(t *testing.T) { } func TestAddMutation_gru(t *testing.T) { - key := x.DataKey("question.tag", 0x01) + key := x.DataKey(x.GalaxyAttr("question.tag"), 0x01) ol, err := getNew(key, ps, math.MaxUint64) require.NoError(t, err) @@ -592,7 +592,7 @@ func TestAddMutation_gru(t *testing.T) { } func TestAddMutation_gru2(t *testing.T) { - key := x.DataKey("question.tag", 0x100) + key := x.DataKey(x.GalaxyAttr("question.tag"), 0x100) ol, err := getNew(key, ps, math.MaxUint64) require.NoError(t, err) @@ -639,7 +639,7 @@ func TestAddMutation_gru2(t *testing.T) { func TestAddAndDelMutation(t *testing.T) { // Ensure each test uses unique key since we don't clear the postings // after each test - key := x.DataKey("dummy_key", 0x927) + key := x.DataKey(x.GalaxyAttr("dummy_key"), 0x927) ol, err := getNew(key, ps, math.MaxUint64) require.NoError(t, err) @@ -878,7 +878,7 @@ func createMultiPartList(t *testing.T, size int, addFacet bool) (*List, int) { defer setMaxListSize(maxListSize) maxListSize = 5000 - key := x.DataKey(uuid.New().String(), 1331) + key := x.DataKey(x.GalaxyAttr(uuid.New().String()), 1331) ol, err := getNew(key, ps, math.MaxUint64) require.NoError(t, err) commits := 0 @@ -926,7 +926,7 @@ func createAndDeleteMultiPartList(t *testing.T, size int) (*List, int) { defer setMaxListSize(maxListSize) maxListSize = 10000 - key := x.DataKey(uuid.New().String(), 1331) + key := x.DataKey(x.GalaxyAttr(uuid.New().String()), 1331) ol, err := getNew(key, ps, math.MaxUint64) require.NoError(t, err) commits := 0 @@ -1087,7 +1087,7 @@ func TestBinSplit(t *testing.T) { defer func() { maxListSize = originalListSize }() - key := x.DataKey(uuid.New().String(), 1331) + key := x.DataKey(x.GalaxyAttr(uuid.New().String()), 1331) ol, err := getNew(key, ps, math.MaxUint64) require.NoError(t, err) for i := 1; i <= size; i++ { @@ -1268,7 +1268,7 @@ func TestMultiPartListDeleteAndAdd(t *testing.T) { maxListSize = 5000 // Add entries to the maps. - key := x.DataKey(uuid.New().String(), 1331) + key := x.DataKey(x.GalaxyAttr(uuid.New().String()), 1331) ol, err := getNew(key, ps, math.MaxUint64) require.NoError(t, err) for i := 1; i <= size; i++ { @@ -1407,7 +1407,7 @@ func TestRecursiveSplits(t *testing.T) { // Create a list that should be split recursively. size := int(1e5) - key := x.DataKey(uuid.New().String(), 1331) + key := x.DataKey(x.GalaxyAttr(uuid.New().String()), 1331) ol, err := getNew(key, ps, math.MaxUint64) require.NoError(t, err) commits := 0 diff --git a/systest/backup/filesystem/backup_test.go b/systest/backup/filesystem/backup_test.go index 64e5d5ca59b..b0477b86645 100644 --- a/systest/backup/filesystem/backup_test.go +++ b/systest/backup/filesystem/backup_test.go @@ -48,6 +48,7 @@ var ( alphaBackupDir = "/data/backups" oldBackupDir1 = "/data/to_restore/1" oldBackupDir2 = "/data/to_restore/2" + oldBackupDir3 = "/data/to_restore/3" alphaContainers = []string{ "alpha1", "alpha2", @@ -92,7 +93,8 @@ func TestBackupOfOldRestore(t *testing.T) { common.DirSetup(t) common.CopyOldBackupDir(t) - conn, err := grpc.Dial(testutil.SockAddr, grpc.WithTransportCredentials(credentials.NewTLS(testutil.GetAlphaClientConfig(t)))) + conn, err := grpc.Dial(testutil.SockAddr, + grpc.WithTransportCredentials(credentials.NewTLS(testutil.GetAlphaClientConfig(t)))) require.NoError(t, err) dg := dgo.NewDgraphClient(api.NewDgraphClient(conn)) require.NoError(t, err) @@ -105,7 +107,8 @@ func TestBackupOfOldRestore(t *testing.T) { sendRestoreRequest(t, oldBackupDir1) testutil.WaitForRestore(t, dg, testutil.SockAddrHttp) - resp, err := dg.NewTxn().Query(context.Background(), `{ authors(func: has(Author.name)) { count(uid) } }`) + q := `{ authors(func: has(Author.name)) { count(uid) } }` + resp, err := dg.NewTxn().Query(context.Background(), q) require.NoError(t, err) require.JSONEq(t, "{\"authors\":[{\"count\":1}]}", string(resp.Json)) @@ -117,7 +120,7 @@ func TestBackupOfOldRestore(t *testing.T) { sendRestoreRequest(t, alphaBackupDir) testutil.WaitForRestore(t, dg, testutil.SockAddrHttp) - resp, err = dg.NewTxn().Query(context.Background(), `{ authors(func: has(Author.name)) { count(uid) } }`) + resp, err = dg.NewTxn().Query(context.Background(), q) require.NoError(t, err) require.JSONEq(t, "{\"authors\":[{\"count\":1}]}", string(resp.Json)) } @@ -151,16 +154,19 @@ func TestRestoreOfOldBackup(t *testing.T) { require.NoError(t, err) require.JSONEq(t, r, string(resp.Json)) } + queryAndCheck("p1", 0) queryAndCheck("p2", 2) queryAndCheck("p3", 0) queryAndCheck("p4", 2) } t.Run("backup of 20.11", func(t *testing.T) { test(oldBackupDir2) }) + t.Run("backup of 21.03", func(t *testing.T) { test(oldBackupDir3) }) } func TestBackupFilesystem(t *testing.T) { - conn, err := grpc.Dial(testutil.SockAddr, grpc.WithTransportCredentials(credentials.NewTLS(testutil.GetAlphaClientConfig(t)))) + conn, err := grpc.Dial(testutil.SockAddr, + grpc.WithTransportCredentials(credentials.NewTLS(testutil.GetAlphaClientConfig(t)))) require.NoError(t, err) dg := dgo.NewDgraphClient(api.NewDgraphClient(conn)) @@ -432,7 +438,8 @@ func runBackupInternal(t *testing.T, forceFull bool, numExpectedFiles, var data interface{} require.NoError(t, json.NewDecoder(resp.Body).Decode(&data)) - require.Equal(t, "Success", testutil.JsonGet(data, "data", "backup", "response", "code").(string)) + require.Equal(t, "Success", + testutil.JsonGet(data, "data", "backup", "response", "code").(string)) taskId := testutil.JsonGet(data, "data", "backup", "taskId").(string) testutil.WaitForTask(t, taskId, true, testutil.SockAddrHttp) @@ -440,7 +447,8 @@ func runBackupInternal(t *testing.T, forceFull bool, numExpectedFiles, common.CopyToLocalFs(t) files := x.WalkPathFunc(copyBackupDir, func(path string, isdir bool) bool { - return !isdir && strings.HasSuffix(path, ".backup") && strings.HasPrefix(path, "data/backups_copy/dgraph.") + return !isdir && strings.HasSuffix(path, ".backup") && + strings.HasPrefix(path, "data/backups_copy/dgraph.") }) require.Equal(t, numExpectedFiles, len(files)) diff --git a/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095641.969/r9-g1.backup b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095641.969/r9-g1.backup new file mode 100644 index 00000000000..2a3070c33b4 Binary files /dev/null and b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095641.969/r9-g1.backup differ diff --git a/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095641.969/r9-g2.backup b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095641.969/r9-g2.backup new file mode 100644 index 00000000000..45c0a95038f Binary files /dev/null and b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095641.969/r9-g2.backup differ diff --git a/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095641.969/r9-g3.backup b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095641.969/r9-g3.backup new file mode 100644 index 00000000000..45c0a95038f Binary files /dev/null and b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095641.969/r9-g3.backup differ diff --git a/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095716.130/r21-g1.backup b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095716.130/r21-g1.backup new file mode 100644 index 00000000000..353cc3af855 Binary files /dev/null and b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095716.130/r21-g1.backup differ diff --git a/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095716.130/r21-g2.backup b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095716.130/r21-g2.backup new file mode 100644 index 00000000000..45c0a95038f Binary files /dev/null and b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095716.130/r21-g2.backup differ diff --git a/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095716.130/r21-g3.backup b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095716.130/r21-g3.backup new file mode 100644 index 00000000000..45c0a95038f Binary files /dev/null and b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095716.130/r21-g3.backup differ diff --git a/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095726.320/r26-g1.backup b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095726.320/r26-g1.backup new file mode 100644 index 00000000000..cf1f5adf0fc Binary files /dev/null and b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095726.320/r26-g1.backup differ diff --git a/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095726.320/r26-g2.backup b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095726.320/r26-g2.backup new file mode 100644 index 00000000000..45c0a95038f Binary files /dev/null and b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095726.320/r26-g2.backup differ diff --git a/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095726.320/r26-g3.backup b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095726.320/r26-g3.backup new file mode 100644 index 00000000000..45c0a95038f Binary files /dev/null and b/systest/backup/filesystem/data/to_restore/3/dgraph.20210517.095726.320/r26-g3.backup differ diff --git a/systest/backup/filesystem/data/to_restore/3/manifest.json b/systest/backup/filesystem/data/to_restore/3/manifest.json new file mode 100644 index 00000000000..8b2efaa23bb --- /dev/null +++ b/systest/backup/filesystem/data/to_restore/3/manifest.json @@ -0,0 +1 @@ +{"Manifests":[{"type":"full","since":0,"read_ts":9,"groups":{"1":["\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000p1","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000dgraph.graphql.p_query","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000dgraph.graphql.xid","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000dgraph.graphql.schema","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000dgraph.drop.op","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000dgraph.type","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000p3","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000p2"],"2":[],"3":[]},"backup_id":"quirky_kapitsa4","backup_num":1,"version":2103,"path":"dgraph.20210517.095641.969","encrypted":false,"drop_operations":null,"compression":"snappy"},{"type":"incremental","since":0,"read_ts":21,"groups":{"1":["\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000dgraph.drop.op","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000p1","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000dgraph.graphql.schema","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000dgraph.graphql.p_query","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000p3","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000dgraph.graphql.xid","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000p4","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000p2","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000dgraph.type"],"2":[],"3":[]},"backup_id":"quirky_kapitsa4","backup_num":2,"version":2103,"path":"dgraph.20210517.095716.130","encrypted":false,"drop_operations":[{"drop_op":1}],"compression":"snappy"},{"type":"incremental","since":0,"read_ts":26,"groups":{"1":["\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000p4","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000p2","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000dgraph.type","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000p1","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000dgraph.graphql.schema","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000dgraph.graphql.p_query","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000p3","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000dgraph.drop.op","\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000dgraph.graphql.xid"],"2":[],"3":[]},"backup_id":"quirky_kapitsa4","backup_num":3,"version":2103,"path":"dgraph.20210517.095726.320","encrypted":false,"drop_operations":[{"drop_op":2,"drop_value":"\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000p3"}],"compression":"snappy"}]} diff --git a/worker/backup.go b/worker/backup.go index b25af95302e..22c248b12db 100644 --- a/worker/backup.go +++ b/worker/backup.go @@ -89,10 +89,6 @@ func (m *Manifest) getPredsInGroup(gid uint32) predicateSet { predSet := make(predicateSet) for _, pred := range preds { - if m.Version == 0 { - // For older versions, preds set will contain attribute without namespace. - pred = x.NamespaceAttr(x.GalaxyNamespace, pred) - } predSet[pred] = struct{}{} } return predSet diff --git a/worker/backup_ee.go b/worker/backup_ee.go index 770244807c1..37b26b88407 100644 --- a/worker/backup_ee.go +++ b/worker/backup_ee.go @@ -232,7 +232,7 @@ func ProcessBackupRequest(ctx context.Context, req *pb.BackupRequest) error { m := Manifest{ ReadTs: req.ReadTs, Groups: predMap, - Version: x.DgraphVersion, + Version: x.ManifestVersion, DropOperations: dropOperations, Path: dir, Compression: "snappy", @@ -555,14 +555,14 @@ func (pr *BackupProcessor) CompleteBackup(ctx context.Context, m *Manifest) erro return err } - manifest, err := GetManifest(handler, uri) + manifest, err := GetManifestNoUpgrade(handler, uri) if err != nil { return err } manifest.Manifests = append(manifest.Manifests, m) - if err := createManifest(handler, uri, manifest); err != nil { - return errors.Wrap(err, "Complete backup failed") + if err := CreateManifest(handler, uri, manifest); err != nil { + return errors.Wrap(err, "complete backup failed") } glog.Infof("Backup completed OK.") return nil diff --git a/worker/backup_manifest.go b/worker/backup_manifest.go index cf610794bd4..5cc8eacb3cd 100644 --- a/worker/backup_manifest.go +++ b/worker/backup_manifest.go @@ -15,6 +15,7 @@ package worker import ( "encoding/json" + "fmt" "net/url" "path/filepath" "sort" @@ -57,14 +58,9 @@ func verifyManifests(manifests []*Manifest) error { func getManifestsToRestore( h UriHandler, uri *url.URL, req *pb.RestoreRequest) ([]*Manifest, error) { - - if !h.DirExists("") { - return nil, errors.Errorf("getManifestsToRestore: The uri path: %q doesn't exists", - uri.Path) - } - manifest, err := getConsolidatedManifest(h, uri) + manifest, err := GetManifest(h, uri) if err != nil { - return nil, errors.Wrap(err, "failed to get consolidated manifest") + return manifest.Manifests, err } return getFilteredManifests(h, manifest.Manifests, req) } @@ -165,6 +161,62 @@ func getConsolidatedManifest(h UriHandler, uri *url.URL) (*MasterManifest, error return &MasterManifest{Manifests: mlist}, nil } +// upgradeManifest updates the in-memory manifest from various versions to the latest version. +// If the manifest version is 0 (dgraph version < v21.03), attach namespace to the predicates and +// the drop data/attr operation. +// If the manifest version is 2103, convert the format of predicate from | to +// -. This is because of a bug for namespace greater than 127. +// See https://github.com/dgraph-io/dgraph/pull/7810 +// NOTE: Do not use the upgraded manifest to overwrite the non-upgraded manifest. +func upgradeManifest(m *Manifest) error { + switch m.Version { + case 0: + for gid, preds := range m.Groups { + parsedPreds := preds[:0] + for _, pred := range preds { + parsedPreds = append(parsedPreds, x.GalaxyAttr(pred)) + } + m.Groups[gid] = parsedPreds + } + for _, op := range m.DropOperations { + switch op.DropOp { + case pb.DropOperation_DATA: + op.DropValue = fmt.Sprintf("%#x", x.GalaxyNamespace) + case pb.DropOperation_ATTR: + op.DropValue = x.GalaxyAttr(op.DropValue) + default: + // do nothing for drop all and drop namespace. + } + } + case 2103: + for gid, preds := range m.Groups { + parsedPreds := preds[:0] + for _, pred := range preds { + ns_attr, err := x.AttrFrom2103(pred) + if err != nil { + return errors.Errorf("while parsing predicate got: %q", err) + } + parsedPreds = append(parsedPreds, ns_attr) + } + m.Groups[gid] = parsedPreds + } + for _, op := range m.DropOperations { + // We have a cluster wide drop data in v21.03. + if op.DropOp == pb.DropOperation_ATTR { + ns_attr, err := x.AttrFrom2103(op.DropValue) + if err != nil { + return errors.Errorf("while parsing the drop operation %+v got: %q", + op, err) + } + op.DropValue = ns_attr + } + } + case 2105: + // pass + } + return nil +} + func readManifest(h UriHandler, path string) (*Manifest, error) { var m Manifest b, err := h.Read(path) @@ -200,10 +252,11 @@ func readMasterManifest(h UriHandler, path string) (*MasterManifest, error) { return &m, nil } -func GetManifest(h UriHandler, uri *url.URL) (*MasterManifest, error) { +// GetManifestNoUpgrade returns the master manifest using the given handler and uri. +func GetManifestNoUpgrade(h UriHandler, uri *url.URL) (*MasterManifest, error) { if !h.DirExists("") { - return &MasterManifest{}, errors.Errorf("getManifest: The uri path: %q doesn't exists", - uri.Path) + return &MasterManifest{}, + errors.Errorf("getManifestWithoutUpgrade: The uri path: %q doesn't exists", uri.Path) } manifest, err := getConsolidatedManifest(h, uri) if err != nil { @@ -212,7 +265,26 @@ func GetManifest(h UriHandler, uri *url.URL) (*MasterManifest, error) { return manifest, nil } -func createManifest(h UriHandler, uri *url.URL, manifest *MasterManifest) error { +// GetManifest returns the master manifest using the given handler and uri. Additionally, it also +// upgrades the manifest for the in-memory processing. +// Note: This function must not be used when using the returned manifest for the purpose of +// overwriting the old manifest. +func GetManifest(h UriHandler, uri *url.URL) (*MasterManifest, error) { + manifest, err := GetManifestNoUpgrade(h, uri) + if err != nil { + return manifest, err + } + for _, m := range manifest.Manifests { + if err := upgradeManifest(m); err != nil { + return manifest, errors.Wrapf(err, "getManifest: failed to upgrade") + } + } + return manifest, nil +} + +func CreateManifest(h UriHandler, uri *url.URL, manifest *MasterManifest) error { + var err error + w, err := h.CreateFile(tmpManifest) if err != nil { return errors.Wrap(err, "createManifest failed to create tmp path: ") diff --git a/worker/groups.go b/worker/groups.go index 455acb0cca8..14ac311539d 100644 --- a/worker/groups.go +++ b/worker/groups.go @@ -491,7 +491,7 @@ func (g *groupi) sendTablet(tablet *pb.Tablet) (*pb.Tablet, error) { } if out.GroupId == groups().groupId() { - glog.Infof("Serving tablet for: %v\n", x.FormatNsAttr(tablet.GetPredicate())) + glog.Infof("Serving tablet for: %v\n", tablet.GetPredicate()) } return out, nil } @@ -538,7 +538,7 @@ func (g *groupi) Inform(preds []string) ([]*pb.Tablet, error) { } if t.GroupId == groups().groupId() { - glog.Infof("Serving tablet for: %v\n", x.FormatNsAttr(t.GetPredicate())) + glog.Infof("Serving tablet for: %v\n", t.GetPredicate()) } } g.Unlock() diff --git a/worker/online_restore.go b/worker/online_restore.go index 6fe53921451..d0e5ce5d084 100644 --- a/worker/online_restore.go +++ b/worker/online_restore.go @@ -288,15 +288,6 @@ func handleRestoreProposal(ctx context.Context, req *pb.RestoreRequest, pidx uin lastManifest := manifests[0] preds, ok := lastManifest.Groups[req.GroupId] - // Version is 0 if the backup was taken on an old version (v20.11). - if lastManifest.Version == 0 { - tmp := make([]string, 0, len(preds)) - for _, pred := range preds { - tmp = append(tmp, x.GalaxyAttr(pred)) - } - preds = tmp - } - if !ok { return errors.Errorf("backup manifest does not contain information for group ID %d", req.GroupId) diff --git a/worker/restore_map.go b/worker/restore_map.go index 506f317f5b9..7d917c90a2d 100644 --- a/worker/restore_map.go +++ b/worker/restore_map.go @@ -109,7 +109,7 @@ type loadBackupInput struct { restoreTs uint64 preds predicateSet dropNs map[uint64]struct{} - isOld bool + version int keepSchema bool compression string } @@ -390,11 +390,57 @@ func (m *mapper) processReqCh(ctx context.Context) error { kv.Value, err = update.Marshal() return err } - if in.isOld && parsedKey.IsType() { - if err := appendNamespace(); err != nil { - glog.Errorf("Unable to (un)marshal type: %+v. Err=%v\n", parsedKey, err) + changeFormat := func() error { + // In the backup taken on 2103, we have the schemaUpdate.Predicate in format + // |. That had issues with JSON marshalling. + // So, we switched over to the format -. + var err error + if parsedKey.IsSchema() { + var update pb.SchemaUpdate + if err := update.Unmarshal(kv.Value); err != nil { + return err + } + if update.Predicate, err = x.AttrFrom2103(update.Predicate); err != nil { + return err + } + kv.Value, err = update.Marshal() + return err + } + if parsedKey.IsType() { + var update pb.TypeUpdate + if err := update.Unmarshal(kv.Value); err != nil { + return err + } + if update.TypeName, err = x.AttrFrom2103(update.TypeName); err != nil { + return err + } + for _, sch := range update.Fields { + if sch.Predicate, err = x.AttrFrom2103(sch.Predicate); err != nil { + return err + } + } + kv.Value, err = update.Marshal() + return err + } + return nil + } + // We changed the format of predicate in 2103 and 2105. SchemaUpdate and TypeUpdate have + // predicate stored within them, so they also need to be updated accordingly. + switch in.version { + case 0: + if parsedKey.IsType() { + if err := appendNamespace(); err != nil { + glog.Errorf("Unable to (un)marshal type: %+v. Err=%v\n", parsedKey, err) + return nil + } + } + case 2103: + if err := changeFormat(); err != nil { + glog.Errorf("Unable to change format for: %+v Err=%+v", parsedKey, err) return nil } + default: + // for manifest versions >= 2015, do nothing. } // Reset the StreamId to prevent ordering issues while writing to stream writer. kv.StreamId = 0 @@ -665,7 +711,7 @@ func RunMapper(req *pb.RestoreRequest, mapDir string) (*mapResult, error) { in := &loadBackupInput{ preds: predSet, dropNs: localDropNs, - isOld: manifest.Version == 0, + version: manifest.Version, restoreTs: req.RestoreTs, // Only map the schema keys corresponding to the latest backup. keepSchema: i == 0, @@ -686,23 +732,18 @@ func RunMapper(req *pb.RestoreRequest, mapDir string) (*mapResult, error) { case pb.DropOperation_ALL: dropAll = true case pb.DropOperation_DATA: - var ns uint64 - if manifest.Version == 0 { - ns = x.GalaxyNamespace - } else { - var err error - ns, err = strconv.ParseUint(op.DropValue, 0, 64) - if err != nil { - return nil, errors.Wrap(err, "Map phase failed to parse namespace") - } + if op.DropValue == "" { + // In 2103, we do not support namespace level drop data. + dropAll = true + continue + } + ns, err := strconv.ParseUint(op.DropValue, 0, 64) + if err != nil { + return nil, errors.Wrap(err, "map phase failed to parse namespace") } dropNs[ns] = struct{}{} case pb.DropOperation_ATTR: - p := op.DropValue - if manifest.Version == 0 { - p = x.NamespaceAttr(x.GalaxyNamespace, p) - } - dropAttr[p] = struct{}{} + dropAttr[op.DropValue] = struct{}{} case pb.DropOperation_NS: // pstore will be nil for export_backup tool. In that case we don't need to ban ns. if pstore == nil { diff --git a/x/keys.go b/x/keys.go index 8ed78ab86ac..56ce6c7bd0f 100644 --- a/x/keys.go +++ b/x/keys.go @@ -57,8 +57,27 @@ const ( IgnoreBytes = "1-8" // NamespaceOffset is the offset in badger key from which the next 8 bytes contain namespace. NamespaceOffset = 1 + // NsSeparator is the separator between between the namespace and attribute. + NsSeparator = "-" ) +// Invalid bytes are replaced with the Unicode replacement rune. +// See https://golang.org/pkg/encoding/json/#Marshal +const replacementRune = rune('\ufffd') + +// This function parse namespace that were stored in format used before 21.03 version. +// The first 8 bytes are the namespace, rest is the predicate. This format caused issues +// while marshalling, hence was removed. This function is there for backwards compatibility of +// restore. Now we store the predicate as a string of format "hex(namespace)-predicate" +func AttrFrom2103(attr string) (string, error) { + if strings.ContainsRune(attr, replacementRune) { + return "", errors.Errorf("replacement rune found while parsing attr: %s (%+v)", + attr, []byte(attr)) + } + ns, pred := binary.BigEndian.Uint64([]byte(attr[:8])), attr[8:] + return NamespaceAttr(ns, pred), nil +} + func NamespaceToBytes(ns uint64) []byte { buf := make([]byte, 8) binary.BigEndian.PutUint64(buf, ns) @@ -67,7 +86,7 @@ func NamespaceToBytes(ns uint64) []byte { // NamespaceAttr is used to generate attr from namespace. func NamespaceAttr(ns uint64, attr string) string { - return string(NamespaceToBytes(ns)) + attr + return uintToStr(ns) + NsSeparator + attr } func NamespaceAttrList(ns uint64, preds []string) []string { @@ -84,21 +103,25 @@ func GalaxyAttr(attr string) string { // ParseNamespaceAttr returns the namespace and attr from the given value. func ParseNamespaceAttr(attr string) (uint64, string) { - return binary.BigEndian.Uint64([]byte(attr[:8])), attr[8:] + splits := strings.SplitN(attr, NsSeparator, 2) + return strToUint(splits[0]), splits[1] } func ParseNamespaceBytes(attr string) ([]byte, string) { - return []byte(attr[:8]), attr[8:] + splits := strings.SplitN(attr, NsSeparator, 2) + ns := make([]byte, 8) + binary.BigEndian.PutUint64(ns, strToUint(splits[0])) + return ns, splits[1] } // ParseAttr returns the attr from the given value. func ParseAttr(attr string) string { - return attr[8:] + return strings.SplitN(attr, NsSeparator, 2)[1] } // ParseNamespace returns the namespace from the given value. func ParseNamespace(attr string) uint64 { - return binary.BigEndian.Uint64([]byte(attr[:8])) + return strToUint(strings.SplitN(attr, NsSeparator, 2)[0]) } func ParseAttrList(attrs []string) []string { @@ -109,26 +132,19 @@ func ParseAttrList(attrs []string) []string { return resp } -func IsReverseAttr(attr string) bool { - return attr[8] == '~' +// For consistency, use base16 to encode/decode the namespace. +func strToUint(s string) uint64 { + ns, err := strconv.ParseUint(s, 16, 64) + Check(err) + return ns } - -func FormatNsAttr(attr string) string { - ns, attr := ParseNamespaceAttr(attr) - return strconv.FormatUint(ns, 10) + "-" + attr +func uintToStr(ns uint64) string { + return strconv.FormatUint(ns, 16) } -func ExtractNamespaceFromPredicate(predicate string) (uint64, error) { - splitString := strings.Split(predicate, "-") - if len(splitString) <= 1 { - return 0, errors.Errorf("predicate does not contain namespace name") - } - uintVal, err := strconv.ParseUint(splitString[0], 0, 64) - if err != nil { - return 0, errors.Wrapf(err, "while parsing %s as uint64", splitString[0]) - } - return uintVal, nil - +func IsReverseAttr(attr string) bool { + pred := strings.SplitN(attr, NsSeparator, 2)[1] + return pred[0] == '~' } func writeAttr(buf []byte, attr string) []byte { @@ -143,19 +159,18 @@ func writeAttr(buf []byte, attr string) []byte { // genKey creates the key and writes the initial bytes (type byte, length of attribute, // and the attribute itself). It leaves the rest of the key empty for further processing -// if necessary. -func generateKey(typeByte byte, attr string, totalLen int) []byte { - AssertTrue(totalLen >= 1+2+len(attr)) - - buf := make([]byte, totalLen) - buf[0] = typeByte +// if necessary. It also returns next index from where further processing should be done. +func generateKey(typeByte byte, attr string, extra int) ([]byte, int) { // Separate namespace and attribute from attr and write namespace in the first 8 bytes of key. namespace, attr := ParseNamespaceBytes(attr) + prefixLen := 1 + 8 + 2 + len(attr) // byteType + ns + len(pred) + pred + buf := make([]byte, prefixLen+extra) + buf[0] = typeByte AssertTrue(copy(buf[1:], namespace) == 8) rest := buf[9:] writeAttr(rest, attr) - return buf + return buf, prefixLen } // SchemaKey returns schema key for given attribute. Schema keys are stored @@ -166,7 +181,8 @@ func generateKey(typeByte byte, attr string, totalLen int) []byte { // byte 1-2: length of attr // next len(attr) bytes: value of attr func SchemaKey(attr string) []byte { - return generateKey(ByteSchema, attr, 1+2+len(attr)) + key, _ := generateKey(ByteSchema, attr, 0) + return key } // TypeKey returns type key for given type name. Type keys are stored separately @@ -177,7 +193,8 @@ func SchemaKey(attr string) []byte { // byte 1-2: length of typeName // next len(attr) bytes: value of attr (the type name) func TypeKey(attr string) []byte { - return generateKey(ByteType, attr, 1+2+len(attr)) + key, _ := generateKey(ByteType, attr, 0) + return key } // DataKey generates a data key with the given attribute and UID. @@ -192,9 +209,8 @@ func TypeKey(attr string) []byte { // // the split stored in this key and the first byte will be sets to ByteSplit. func DataKey(attr string, uid uint64) []byte { - prefixLen := 1 + 2 + len(attr) - totalLen := prefixLen + 1 + 8 - buf := generateKey(DefaultPrefix, attr, totalLen) + extra := 1 + 8 // ByteData + UID + buf, prefixLen := generateKey(DefaultPrefix, attr, extra) rest := buf[prefixLen:] rest[0] = ByteData @@ -216,9 +232,8 @@ func DataKey(attr string, uid uint64) []byte { // // the split stored in this key. func ReverseKey(attr string, uid uint64) []byte { - prefixLen := 1 + 2 + len(attr) - totalLen := prefixLen + 1 + 8 - buf := generateKey(DefaultPrefix, attr, totalLen) + extra := 1 + 8 // ByteReverse + UID + buf, prefixLen := generateKey(DefaultPrefix, attr, extra) rest := buf[prefixLen:] rest[0] = ByteReverse @@ -240,9 +255,8 @@ func ReverseKey(attr string, uid uint64) []byte { // // the split stored in this key. func IndexKey(attr, term string) []byte { - prefixLen := 1 + 2 + len(attr) - totalLen := prefixLen + 1 + len(term) - buf := generateKey(DefaultPrefix, attr, totalLen) + extra := 1 + len(term) // ByteIndex + term + buf, prefixLen := generateKey(DefaultPrefix, attr, extra) rest := buf[prefixLen:] rest[0] = ByteIndex @@ -262,9 +276,8 @@ func IndexKey(attr, term string) []byte { // next byte: data type prefix (set to ByteCount or ByteCountRev) // next four bytes: value of count. func CountKey(attr string, count uint32, reverse bool) []byte { - prefixLen := 1 + 2 + len(attr) - totalLen := prefixLen + 1 + 4 - buf := generateKey(DefaultPrefix, attr, totalLen) + extra := 1 + 4 // ByteCount + Count + buf, prefixLen := generateKey(DefaultPrefix, attr, extra) rest := buf[prefixLen:] if reverse { @@ -349,14 +362,9 @@ func (p ParsedKey) IsOfType(typ byte) bool { // SkipPredicate returns the first key after the keys corresponding to the predicate // of this key. Useful when iterating in the reverse order. func (p ParsedKey) SkipPredicate() []byte { - buf := make([]byte, 1+2+len(p.Attr)+1) - buf[0] = p.bytePrefix - ns, attr := ParseNamespaceBytes(p.Attr) - AssertTrue(copy(buf[1:], ns) == 8) - rest := buf[9:] - k := writeAttr(rest, attr) - AssertTrue(len(k) == 1) - k[0] = 0xFF + buf, prefixLen := generateKey(p.bytePrefix, p.Attr, 1) + AssertTrue(len(buf[prefixLen:]) == 1) + buf[prefixLen] = 0xFF return buf } @@ -377,56 +385,33 @@ func (p ParsedKey) SkipType() []byte { // DataPrefix returns the prefix for data keys. func (p ParsedKey) DataPrefix() []byte { - buf := make([]byte, 1+2+len(p.Attr)+1) - buf[0] = p.bytePrefix - ns, attr := ParseNamespaceBytes(p.Attr) - AssertTrue(copy(buf[1:], ns) == 8) - rest := buf[9:] - k := writeAttr(rest, attr) - AssertTrue(len(k) == 1) - k[0] = ByteData + buf, prefixLen := generateKey(p.bytePrefix, p.Attr, 1) + buf[prefixLen] = ByteData return buf } // IndexPrefix returns the prefix for index keys. func (p ParsedKey) IndexPrefix() []byte { - buf := make([]byte, 1+2+len(p.Attr)+1) - buf[0] = DefaultPrefix - ns, attr := ParseNamespaceBytes(p.Attr) - AssertTrue(copy(buf[1:], ns) == 8) - rest := buf[9:] - k := writeAttr(rest, attr) - AssertTrue(len(k) == 1) - k[0] = ByteIndex + buf, prefixLen := generateKey(DefaultPrefix, p.Attr, 1) + buf[prefixLen] = ByteIndex return buf } // ReversePrefix returns the prefix for index keys. func (p ParsedKey) ReversePrefix() []byte { - buf := make([]byte, 1+2+len(p.Attr)+1) - buf[0] = DefaultPrefix - ns, attr := ParseNamespaceBytes(p.Attr) - AssertTrue(copy(buf[1:], ns) == 8) - rest := buf[9:] - k := writeAttr(rest, attr) - AssertTrue(len(k) == 1) - k[0] = ByteReverse + buf, prefixLen := generateKey(DefaultPrefix, p.Attr, 1) + buf[prefixLen] = ByteReverse return buf } // CountPrefix returns the prefix for count keys. func (p ParsedKey) CountPrefix(reverse bool) []byte { - buf := make([]byte, 1+2+len(p.Attr)+1) - buf[0] = p.bytePrefix - ns, attr := ParseNamespaceBytes(p.Attr) - AssertTrue(copy(buf[1:], ns) == 8) - rest := buf[9:] - k := writeAttr(rest, attr) - AssertTrue(len(k) == 1) + buf, prefixLen := generateKey(DefaultPrefix, p.Attr, 1) + buf[prefixLen] = ByteReverse if reverse { - k[0] = ByteCountRev + buf[prefixLen] = ByteCountRev } else { - k[0] = ByteCount + buf[prefixLen] = ByteCount } return buf } @@ -512,12 +497,8 @@ func TypePrefix() []byte { // PredicatePrefix returns the prefix for all keys belonging to this predicate except schema key. func PredicatePrefix(predicate string) []byte { - buf := make([]byte, 1+2+len(predicate)) - buf[0] = DefaultPrefix - ns, predicate := ParseNamespaceBytes(predicate) - AssertTrue(copy(buf[1:], ns) == 8) - k := writeAttr(buf[9:], predicate) - AssertTrue(len(k) == 0) + buf, prefixLen := generateKey(DefaultPrefix, predicate, 0) + AssertTrue(len(buf) == prefixLen) return buf } @@ -572,7 +553,7 @@ func Parse(key []byte) (ParsedKey, error) { if len(k) < sz { return p, errors.Errorf("Invalid size %v for key %v", sz, key) } - p.Attr = string(namespace) + string(k[:sz]) + p.Attr = NamespaceAttr(binary.BigEndian.Uint64(namespace), string(k[:sz])) k = k[sz:] switch p.bytePrefix { diff --git a/x/keys_test.go b/x/keys_test.go index e77d071be24..bfb1e58c90e 100644 --- a/x/keys_test.go +++ b/x/keys_test.go @@ -17,6 +17,7 @@ package x import ( + "encoding/json" "fmt" "math" "sort" @@ -29,8 +30,8 @@ func TestNameSpace(t *testing.T) { ns := uint64(133) attr := "name" nsAttr := NamespaceAttr(ns, attr) - require.Equal(t, 8+len(attr), len(nsAttr)) - parsedAttr := ParseAttr(nsAttr) + parsedNs, parsedAttr := ParseNamespaceAttr(nsAttr) + require.Equal(t, ns, parsedNs) require.Equal(t, attr, parsedAttr) } @@ -39,7 +40,7 @@ func TestDataKey(t *testing.T) { // key with uid = 0 is invalid uid = 0 - key := DataKey(NamespaceAttr(GalaxyNamespace, "bad uid"), uid) + key := DataKey(GalaxyAttr("bad uid"), uid) _, err := Parse(key) require.Error(t, err) @@ -47,7 +48,7 @@ func TestDataKey(t *testing.T) { // Use the uid to derive the attribute so it has variable length and the test // can verify that multiple sizes of attr work correctly. sattr := fmt.Sprintf("attr:%d", uid) - key := DataKey(NamespaceAttr(GalaxyNamespace, sattr), uid) + key := DataKey(GalaxyAttr(sattr), uid) pk, err := Parse(key) require.NoError(t, err) @@ -59,14 +60,14 @@ func TestDataKey(t *testing.T) { keys := make([]string, 0, 1024) for uid = 1024; uid >= 1; uid-- { - key := DataKey(NamespaceAttr(GalaxyNamespace, "testing.key"), uid) + key := DataKey(GalaxyAttr("testing.key"), uid) keys = append(keys, string(key)) } // Test that sorting is as expected. sort.Strings(keys) require.True(t, sort.StringsAreSorted(keys)) for i, key := range keys { - exp := DataKey(NamespaceAttr(GalaxyNamespace, "testing.key"), uint64(i+1)) + exp := DataKey(GalaxyAttr("testing.key"), uint64(i+1)) require.Equal(t, string(exp), key) } } @@ -76,7 +77,7 @@ func TestParseDataKeyWithStartUid(t *testing.T) { startUid := uint64(math.MaxUint64) for uid = 1; uid < 1001; uid++ { sattr := fmt.Sprintf("attr:%d", uid) - key := DataKey(NamespaceAttr(GalaxyNamespace, sattr), uid) + key := DataKey(GalaxyAttr(sattr), uid) key, err := SplitKey(key, startUid) require.NoError(t, err) pk, err := Parse(key) @@ -96,7 +97,7 @@ func TestIndexKey(t *testing.T) { sattr := fmt.Sprintf("attr:%d", uid) sterm := fmt.Sprintf("term:%d", uid) - key := IndexKey(NamespaceAttr(GalaxyNamespace, sattr), sterm) + key := IndexKey(GalaxyAttr(sattr), sterm) pk, err := Parse(key) require.NoError(t, err) @@ -113,7 +114,7 @@ func TestIndexKeyWithStartUid(t *testing.T) { sattr := fmt.Sprintf("attr:%d", uid) sterm := fmt.Sprintf("term:%d", uid) - key := IndexKey(NamespaceAttr(GalaxyNamespace, sattr), sterm) + key := IndexKey(GalaxyAttr(sattr), sterm) key, err := SplitKey(key, startUid) require.NoError(t, err) pk, err := Parse(key) @@ -132,7 +133,7 @@ func TestReverseKey(t *testing.T) { for uid = 1; uid < 1001; uid++ { sattr := fmt.Sprintf("attr:%d", uid) - key := ReverseKey(NamespaceAttr(GalaxyNamespace, sattr), uid) + key := ReverseKey(GalaxyAttr(sattr), uid) pk, err := Parse(key) require.NoError(t, err) @@ -148,7 +149,7 @@ func TestReverseKeyWithStartUid(t *testing.T) { for uid = 1; uid < 1001; uid++ { sattr := fmt.Sprintf("attr:%d", uid) - key := ReverseKey(NamespaceAttr(GalaxyNamespace, sattr), uid) + key := ReverseKey(GalaxyAttr(sattr), uid) key, err := SplitKey(key, startUid) require.NoError(t, err) pk, err := Parse(key) @@ -167,7 +168,7 @@ func TestCountKey(t *testing.T) { for count = 0; count < 1001; count++ { sattr := fmt.Sprintf("attr:%d", count) - key := CountKey(NamespaceAttr(GalaxyNamespace, sattr), count, true) + key := CountKey(GalaxyAttr(sattr), count, true) pk, err := Parse(key) require.NoError(t, err) @@ -183,7 +184,7 @@ func TestCountKeyWithStartUid(t *testing.T) { for count = 0; count < 1001; count++ { sattr := fmt.Sprintf("attr:%d", count) - key := CountKey(NamespaceAttr(GalaxyNamespace, sattr), count, true) + key := CountKey(GalaxyAttr(sattr), count, true) key, err := SplitKey(key, startUid) require.NoError(t, err) pk, err := Parse(key) @@ -202,7 +203,7 @@ func TestSchemaKey(t *testing.T) { for uid = 0; uid < 1001; uid++ { sattr := fmt.Sprintf("attr:%d", uid) - key := SchemaKey(NamespaceAttr(GalaxyNamespace, sattr)) + key := SchemaKey(GalaxyAttr(sattr)) pk, err := Parse(key) require.NoError(t, err) @@ -216,7 +217,7 @@ func TestTypeKey(t *testing.T) { for uid = 0; uid < 1001; uid++ { sattr := fmt.Sprintf("attr:%d", uid) - key := TypeKey(NamespaceAttr(GalaxyNamespace, sattr)) + key := TypeKey(GalaxyAttr(sattr)) pk, err := Parse(key) require.NoError(t, err) @@ -236,16 +237,16 @@ func TestBadStartUid(t *testing.T) { require.Error(t, err) } - key := DataKey(NamespaceAttr(GalaxyNamespace, "aa"), 1) + key := DataKey(GalaxyAttr("aa"), 1) testKey(key) - key = ReverseKey(NamespaceAttr(GalaxyNamespace, "aa"), 1) + key = ReverseKey(GalaxyAttr("aa"), 1) testKey(key) - key = CountKey(NamespaceAttr(GalaxyNamespace, "aa"), 0, false) + key = CountKey(GalaxyAttr("aa"), 0, false) testKey(key) - key = CountKey(NamespaceAttr(GalaxyNamespace, "aa"), 0, true) + key = CountKey(GalaxyAttr("aa"), 0, true) testKey(key) } @@ -271,7 +272,35 @@ func TestBadKeys(t *testing.T) { // key with uid = 0 is invalid uid := 0 - key = DataKey(NamespaceAttr(GalaxyNamespace, "bad uid"), uint64(uid)) + key = DataKey(GalaxyAttr("bad uid"), uint64(uid)) _, err = Parse(key) require.Error(t, err) } + +func TestJsonMarshal(t *testing.T) { + type predicate struct { + Predicate string `json:"predicate,omitempty"` + } + + p := &predicate{Predicate: NamespaceAttr(129, "name")} + b, err := json.Marshal(p) + require.NoError(t, err) + + var p2 predicate + require.NoError(t, json.Unmarshal(b, &p2)) + ns, attr := ParseNamespaceAttr(p2.Predicate) + require.Equal(t, uint64(129), ns) + require.Equal(t, "name", attr) +} + +func TestNsSeparator(t *testing.T) { + uid := uint64(10) + pred := "name" + NsSeparator + "surname" + key := DataKey(GalaxyAttr(pred), uid) + pk, err := Parse(key) + require.NoError(t, err) + require.Equal(t, uid, pk.Uid) + ns, attr := ParseNamespaceAttr(pk.Attr) + require.Equal(t, GalaxyNamespace, ns) + require.Equal(t, pred, attr) +} diff --git a/x/x.go b/x/x.go index 730535c6615..1646b600182 100644 --- a/x/x.go +++ b/x/x.go @@ -138,7 +138,7 @@ const ( "X-CSRF-Token, X-Auth-Token, X-Requested-With" DgraphCostHeader = "Dgraph-TouchedUids" - DgraphVersion = 2103 + ManifestVersion = 2105 ) var (