forked from dgraph-io/badger
-
Notifications
You must be signed in to change notification settings - Fork 0
/
backup.go
150 lines (134 loc) · 3.35 KB
/
backup.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
package badger
import (
"bufio"
"encoding/binary"
"io"
"sync"
"github.com/dgraph-io/badger/y"
"github.com/dgraph-io/badger/protos"
)
func writeTo(entry *protos.KVPair, w io.Writer) error {
if err := binary.Write(w, binary.LittleEndian, uint64(entry.Size())); err != nil {
return err
}
buf, err := entry.Marshal()
if err != nil {
return err
}
_, err = w.Write(buf)
return err
}
// Backup dumps a protobuf-encoded list of all entries in the database into the
// given writer, that are newer than the specified version. It returns a
// timestamp indicating when the entries were dumped which can be passed into a
// later invocation to generate an incremental dump, of entries that have been
// added/modified since the last invocation of DB.Backup()
//
// This can be used to backup the data in a database at a given point in time.
func (db *DB) Backup(w io.Writer, since uint64) (uint64, error) {
var tsNew uint64
err := db.View(func(txn *Txn) error {
opts := DefaultIteratorOptions
opts.AllVersions = true
it := txn.NewIterator(opts)
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
if item.Version() < since {
// Ignore versions less than given timestamp
continue
}
val, err := item.Value()
if err != nil {
return err
}
entry := &protos.KVPair{
Key: y.Copy(item.Key()),
Value: y.Copy(val),
UserMeta: []byte{item.UserMeta()},
Version: item.Version(),
ExpiresAt: item.ExpiresAt(),
}
// Write entries to disk
if err := writeTo(entry, w); err != nil {
return err
}
}
tsNew = txn.readTs
return nil
})
return tsNew, err
}
// Load reads a protobuf-encoded list of all entries from a reader and writes
// them to the database. This can be used to restore the database from a backup
// made by calling DB.Dump().
//
// DB.Load() should be called on a database that is not running any other
// concurrent transactions while it is running.
func (db *DB) Load(r io.Reader) error {
br := bufio.NewReaderSize(r, 16<<10)
unmarshalBuf := make([]byte, 1<<10)
var entries []*entry
var wg sync.WaitGroup
errChan := make(chan error, 1)
// func to check for pending error before sending off a batch for writing
batchSetAsyncIfNoErr := func(entries []*entry) error {
select {
case err := <-errChan:
return err
default:
wg.Add(1)
return db.batchSetAsync(entries, func(err error) {
defer wg.Done()
if err != nil {
select {
case errChan <- err:
default:
}
}
})
}
}
for {
var sz uint64
err := binary.Read(br, binary.LittleEndian, &sz)
if err == io.EOF {
break
} else if err != nil {
return err
}
if cap(unmarshalBuf) < int(sz) {
unmarshalBuf = make([]byte, sz)
}
e := &protos.KVPair{}
if _, err = io.ReadFull(br, unmarshalBuf[:sz]); err != nil {
return err
}
if err = e.Unmarshal(unmarshalBuf[:sz]); err != nil {
return err
}
entries = append(entries, &entry{
Key: y.KeyWithTs(e.Key, e.Version),
Value: e.Value,
UserMeta: e.UserMeta[0],
ExpiresAt: e.ExpiresAt,
})
if len(entries) == 1000 {
if err := batchSetAsyncIfNoErr(entries); err != nil {
return err
}
entries = entries[:0]
}
}
if len(entries) > 0 {
if err := batchSetAsyncIfNoErr(entries); err != nil {
return err
}
}
wg.Wait()
select {
case err := <-errChan:
return err
default:
return nil
}
}