diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 45139abc0f1..d7e3a2d4d53 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -25,14 +25,6 @@ "ImportPath": "github.com/codahale/metrics", "Rev": "7c37910bc765e705301b159683480bdd44555c91" }, - { - "ImportPath": "github.com/dustin/go-humanize", - "Rev": "00897f070f09f194c26d65afae734ba4c32404e8" - }, - { - "ImportPath": "github.com/facebookgo/atomicfile", - "Rev": "6f117f2e7f224fb03eb5e5fba370eade6e2b90c8" - }, { "ImportPath": "github.com/hashicorp/golang-lru", "Rev": "253b2dc1ca8bae42c3b5b6e53dd2eab1a7551116" @@ -69,10 +61,6 @@ "ImportPath": "github.com/mtchavez/jenkins", "Rev": "5a816af6ef21ef401bff5e4b7dd255d63400f497" }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb", - "Rev": "4875955338b0a434238a31165cb87255ab6e9e4a" - }, { "ImportPath": "github.com/syndtr/gosnappy/snappy", "Rev": "156a073208e131d7d2e212cb749feae7c339e846" @@ -84,15 +72,6 @@ { "ImportPath": "github.com/whyrusleeping/chunker", "Rev": "537e901819164627ca4bb5ce4e3faa8ce7956564" - }, - { - "ImportPath": "github.com/whyrusleeping/go-sysinfo", - "Rev": "769b7c0b50e8030895abc74ba8107ac715e3162a" - }, - { - "ImportPath": "gopkg.in/fsnotify.v1", - "Comment": "v1.2.0", - "Rev": "96c060f6a6b7e0d6f75fddd10efeaca3e5d1bcb0" } ] } diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/.gitignore b/Godeps/_workspace/src/github.com/dustin/go-humanize/.gitignore deleted file mode 100644 index 05b40514a71..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -#* -*.[568] -*.a -*~ -[568].out -_* diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/LICENSE b/Godeps/_workspace/src/github.com/dustin/go-humanize/LICENSE deleted file mode 100644 index 8d9a94a9068..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (c) 2005-2008 Dustin Sallings - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/README.markdown b/Godeps/_workspace/src/github.com/dustin/go-humanize/README.markdown deleted file mode 100644 index 079bc89a4f4..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/README.markdown +++ /dev/null @@ -1,78 +0,0 @@ -# Humane Units - -Just a few functions for helping humanize times and sizes. - -`go get` it as `github.com/dustin/go-humanize`, import it as -`"github.com/dustin/go-humanize"`, use it as `humanize` - -## Sizes - -This lets you take numbers like `82854982` and convert them to useful -strings like, `83MB` or `79MiB` (whichever you prefer). - -Example: - - fmt.Printf("That file is %s.", humanize.Bytes(82854982)) - -## Times - -This lets you take a `time.Time` and spit it out in relative terms. -For example, `12 seconds ago` or `3 days from now`. - -Example: - - fmt.Printf("This was touched %s", humanize.Time(someTimeInstance)) - -Thanks to Kyle Lemons for the time implementation from an IRC -conversation one day. It's pretty neat. - -## Ordinals - -From a [mailing list discussion][odisc] where a user wanted to be able -to label ordinals. - - 0 -> 0th - 1 -> 1st - 2 -> 2nd - 3 -> 3rd - 4 -> 4th - [...] - -Example: - - fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) - -## Commas - -Want to shove commas into numbers? Be my guest. - - 0 -> 0 - 100 -> 100 - 1000 -> 1,000 - 1000000000 -> 1,000,000,000 - -100000 -> -100,000 - -Example: - - fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) - -## Ftoa - -Nicer float64 formatter that removes trailing zeros. - - fmt.Printf("%f", 2.24) // 2.240000 - fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 - fmt.Printf("%f", 2.0) // 2.000000 - fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 - -## SI notation - -Format numbers with [SI notation][sinotation]. - -Example: - - humanize.SI(0.00000000223, "M") // 2.23nM - - -[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion -[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/big.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/big.go deleted file mode 100644 index f49dc337dcd..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/big.go +++ /dev/null @@ -1,31 +0,0 @@ -package humanize - -import ( - "math/big" -) - -// order of magnitude (to a max order) -func oomm(n, b *big.Int, maxmag int) (float64, int) { - mag := 0 - m := &big.Int{} - for n.Cmp(b) >= 0 { - n.DivMod(n, b, m) - mag++ - if mag == maxmag && maxmag >= 0 { - break - } - } - return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag -} - -// total order of magnitude -// (same as above, but with no upper limit) -func oom(n, b *big.Int) (float64, int) { - mag := 0 - m := &big.Int{} - for n.Cmp(b) >= 0 { - n.DivMod(n, b, m) - mag++ - } - return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes.go deleted file mode 100644 index 6876e92eea6..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes.go +++ /dev/null @@ -1,164 +0,0 @@ -package humanize - -import ( - "fmt" - "math/big" - "strings" - "unicode" -) - -var ( - bigIECExp = big.NewInt(1024) - - // BigByte is one byte in bit.Ints - BigByte = big.NewInt(1) - // BigKiByte is 1,024 bytes in bit.Ints - BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) - // BigMiByte is 1,024 k bytes in bit.Ints - BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) - // BigGiByte is 1,024 m bytes in bit.Ints - BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) - // BigTiByte is 1,024 g bytes in bit.Ints - BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) - // BigPiByte is 1,024 t bytes in bit.Ints - BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) - // BigEiByte is 1,024 p bytes in bit.Ints - BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) - // BigZiByte is 1,024 e bytes in bit.Ints - BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) - // BigYiByte is 1,024 z bytes in bit.Ints - BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) -) - -var ( - bigSIExp = big.NewInt(1000) - - // BigSIByte is one SI byte in big.Ints - BigSIByte = big.NewInt(1) - // BigKByte is 1,000 SI bytes in big.Ints - BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) - // BigMByte is 1,000 SI k bytes in big.Ints - BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) - // BigGByte is 1,000 SI m bytes in big.Ints - BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) - // BigTByte is 1,000 SI g bytes in big.Ints - BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) - // BigPByte is 1,000 SI t bytes in big.Ints - BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) - // BigEByte is 1,000 SI p bytes in big.Ints - BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) - // BigZByte is 1,000 SI e bytes in big.Ints - BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) - // BigYByte is 1,000 SI z bytes in big.Ints - BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) -) - -var bigBytesSizeTable = map[string]*big.Int{ - "b": BigByte, - "kib": BigKiByte, - "kb": BigKByte, - "mib": BigMiByte, - "mb": BigMByte, - "gib": BigGiByte, - "gb": BigGByte, - "tib": BigTiByte, - "tb": BigTByte, - "pib": BigPiByte, - "pb": BigPByte, - "eib": BigEiByte, - "eb": BigEByte, - "zib": BigZiByte, - "zb": BigZByte, - "yib": BigYiByte, - "yb": BigYByte, - // Without suffix - "": BigByte, - "ki": BigKiByte, - "k": BigKByte, - "mi": BigMiByte, - "m": BigMByte, - "gi": BigGiByte, - "g": BigGByte, - "ti": BigTiByte, - "t": BigTByte, - "pi": BigPiByte, - "p": BigPByte, - "ei": BigEiByte, - "e": BigEByte, - "z": BigZByte, - "zi": BigZiByte, - "y": BigYByte, - "yi": BigYiByte, -} - -var ten = big.NewInt(10) - -func humanateBigBytes(s, base *big.Int, sizes []string) string { - if s.Cmp(ten) < 0 { - return fmt.Sprintf("%dB", s) - } - c := (&big.Int{}).Set(s) - val, mag := oomm(c, base, len(sizes)-1) - suffix := sizes[mag] - f := "%.0f%s" - if val < 10 { - f = "%.1f%s" - } - - return fmt.Sprintf(f, val, suffix) - -} - -// BigBytes produces a human readable representation of an SI size. -// -// See also: ParseBigBytes. -// -// BigBytes(82854982) -> 83MB -func BigBytes(s *big.Int) string { - sizes := []string{"B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} - return humanateBigBytes(s, bigSIExp, sizes) -} - -// BigIBytes produces a human readable representation of an IEC size. -// -// See also: ParseBigBytes. -// -// BigIBytes(82854982) -> 79MiB -func BigIBytes(s *big.Int) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - return humanateBigBytes(s, bigIECExp, sizes) -} - -// ParseBigBytes parses a string representation of bytes into the number -// of bytes it represents. -// -// See also: BigBytes, BigIBytes. -// -// ParseBigBytes("42MB") -> 42000000, nil -// ParseBigBytes("42mib") -> 44040192, nil -func ParseBigBytes(s string) (*big.Int, error) { - lastDigit := 0 - for _, r := range s { - if !(unicode.IsDigit(r) || r == '.') { - break - } - lastDigit++ - } - - val := &big.Rat{} - _, err := fmt.Sscanf(s[:lastDigit], "%f", val) - if err != nil { - return nil, err - } - - extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) - if m, ok := bigBytesSizeTable[extra]; ok { - mv := (&big.Rat{}).SetInt(m) - val.Mul(val, mv) - rv := &big.Int{} - rv.Div(val.Num(), val.Denom()) - return rv, nil - } - - return nil, fmt.Errorf("unhandled size name: %v", extra) -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes_test.go deleted file mode 100644 index a0f977a6eb3..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package humanize - -import ( - "math/big" - "testing" -) - -func TestBigByteParsing(t *testing.T) { - tests := []struct { - in string - exp uint64 - }{ - {"42", 42}, - {"42MB", 42000000}, - {"42MiB", 44040192}, - {"42mb", 42000000}, - {"42mib", 44040192}, - {"42MIB", 44040192}, - {"42 MB", 42000000}, - {"42 MiB", 44040192}, - {"42 mb", 42000000}, - {"42 mib", 44040192}, - {"42 MIB", 44040192}, - {"42.5MB", 42500000}, - {"42.5MiB", 44564480}, - {"42.5 MB", 42500000}, - {"42.5 MiB", 44564480}, - // No need to say B - {"42M", 42000000}, - {"42Mi", 44040192}, - {"42m", 42000000}, - {"42mi", 44040192}, - {"42MI", 44040192}, - {"42 M", 42000000}, - {"42 Mi", 44040192}, - {"42 m", 42000000}, - {"42 mi", 44040192}, - {"42 MI", 44040192}, - {"42.5M", 42500000}, - {"42.5Mi", 44564480}, - {"42.5 M", 42500000}, - {"42.5 Mi", 44564480}, - // Large testing, breaks when too much larger than - // this. - {"12.5 EB", uint64(12.5 * float64(EByte))}, - {"12.5 E", uint64(12.5 * float64(EByte))}, - {"12.5 EiB", uint64(12.5 * float64(EiByte))}, - } - - for _, p := range tests { - got, err := ParseBigBytes(p.in) - if err != nil { - t.Errorf("Couldn't parse %v: %v", p.in, err) - } else { - if got.Uint64() != p.exp { - t.Errorf("Expected %v for %v, got %v", - p.exp, p.in, got) - } - } - } -} - -func TestBigByteErrors(t *testing.T) { - got, err := ParseBigBytes("84 JB") - if err == nil { - t.Errorf("Expected error, got %v", got) - } - got, err = ParseBigBytes("") - if err == nil { - t.Errorf("Expected error parsing nothing") - } -} - -func bbyte(in uint64) string { - return BigBytes((&big.Int{}).SetUint64(in)) -} - -func bibyte(in uint64) string { - return BigIBytes((&big.Int{}).SetUint64(in)) -} - -func TestBigBytes(t *testing.T) { - testList{ - {"bytes(0)", bbyte(0), "0B"}, - {"bytes(1)", bbyte(1), "1B"}, - {"bytes(803)", bbyte(803), "803B"}, - {"bytes(999)", bbyte(999), "999B"}, - - {"bytes(1024)", bbyte(1024), "1.0KB"}, - {"bytes(1MB - 1)", bbyte(MByte - Byte), "1000KB"}, - - {"bytes(1MB)", bbyte(1024 * 1024), "1.0MB"}, - {"bytes(1GB - 1K)", bbyte(GByte - KByte), "1000MB"}, - - {"bytes(1GB)", bbyte(GByte), "1.0GB"}, - {"bytes(1TB - 1M)", bbyte(TByte - MByte), "1000GB"}, - - {"bytes(1TB)", bbyte(TByte), "1.0TB"}, - {"bytes(1PB - 1T)", bbyte(PByte - TByte), "999TB"}, - - {"bytes(1PB)", bbyte(PByte), "1.0PB"}, - {"bytes(1PB - 1T)", bbyte(EByte - PByte), "999PB"}, - - {"bytes(1EB)", bbyte(EByte), "1.0EB"}, - // Overflows. - // {"bytes(1EB - 1P)", Bytes((KByte*EByte)-PByte), "1023EB"}, - - {"bytes(0)", bibyte(0), "0B"}, - {"bytes(1)", bibyte(1), "1B"}, - {"bytes(803)", bibyte(803), "803B"}, - {"bytes(1023)", bibyte(1023), "1023B"}, - - {"bytes(1024)", bibyte(1024), "1.0KiB"}, - {"bytes(1MB - 1)", bibyte(MiByte - IByte), "1024KiB"}, - - {"bytes(1MB)", bibyte(1024 * 1024), "1.0MiB"}, - {"bytes(1GB - 1K)", bibyte(GiByte - KiByte), "1024MiB"}, - - {"bytes(1GB)", bibyte(GiByte), "1.0GiB"}, - {"bytes(1TB - 1M)", bibyte(TiByte - MiByte), "1024GiB"}, - - {"bytes(1TB)", bibyte(TiByte), "1.0TiB"}, - {"bytes(1PB - 1T)", bibyte(PiByte - TiByte), "1023TiB"}, - - {"bytes(1PB)", bibyte(PiByte), "1.0PiB"}, - {"bytes(1PB - 1T)", bibyte(EiByte - PiByte), "1023PiB"}, - - {"bytes(1EiB)", bibyte(EiByte), "1.0EiB"}, - // Overflows. - // {"bytes(1EB - 1P)", bibyte((KIByte*EIByte)-PiByte), "1023EB"}, - - {"bytes(5.5GiB)", bibyte(5.5 * GiByte), "5.5GiB"}, - - {"bytes(5.5GB)", bbyte(5.5 * GByte), "5.5GB"}, - }.validate(t) -} - -func TestVeryBigBytes(t *testing.T) { - b, _ := (&big.Int{}).SetString("15347691069326346944512", 10) - s := BigBytes(b) - if s != "15ZB" { - t.Errorf("Expected 15ZB, got %v", s) - } - s = BigIBytes(b) - if s != "13ZiB" { - t.Errorf("Expected 13ZiB, got %v", s) - } - - b, _ = (&big.Int{}).SetString("15716035654990179271180288", 10) - s = BigBytes(b) - if s != "16YB" { - t.Errorf("Expected 16YB, got %v", s) - } - s = BigIBytes(b) - if s != "13YiB" { - t.Errorf("Expected 13YiB, got %v", s) - } -} - -func TestVeryVeryBigBytes(t *testing.T) { - b, _ := (&big.Int{}).SetString("16093220510709943573688614912", 10) - s := BigBytes(b) - if s != "16093YB" { - t.Errorf("Expected 16093YB, got %v", s) - } - s = BigIBytes(b) - if s != "13312YiB" { - t.Errorf("Expected 13312YiB, got %v", s) - } -} - -func TestParseVeryBig(t *testing.T) { - tests := []struct { - in string - out string - }{ - {"16ZB", "16000000000000000000000"}, - {"16ZiB", "18889465931478580854784"}, - {"16.5ZB", "16500000000000000000000"}, - {"16.5ZiB", "19479761741837286506496"}, - {"16Z", "16000000000000000000000"}, - {"16Zi", "18889465931478580854784"}, - {"16.5Z", "16500000000000000000000"}, - {"16.5Zi", "19479761741837286506496"}, - - {"16YB", "16000000000000000000000000"}, - {"16YiB", "19342813113834066795298816"}, - {"16.5YB", "16500000000000000000000000"}, - {"16.5YiB", "19947276023641381382651904"}, - {"16Y", "16000000000000000000000000"}, - {"16Yi", "19342813113834066795298816"}, - {"16.5Y", "16500000000000000000000000"}, - {"16.5Yi", "19947276023641381382651904"}, - } - - for _, test := range tests { - x, err := ParseBigBytes(test.in) - if err != nil { - t.Errorf("Error parsing %q: %v", test.in, err) - continue - } - - if x.String() != test.out { - t.Errorf("Expected %q for %q, got %v", test.out, test.in, x) - } - } -} - -func BenchmarkParseBigBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - ParseBigBytes("16.5Z") - } -} - -func BenchmarkBigBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - bibyte(16.5 * GByte) - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/bytes.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/bytes.go deleted file mode 100644 index 4c4b5af156f..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/bytes.go +++ /dev/null @@ -1,134 +0,0 @@ -package humanize - -import ( - "fmt" - "math" - "strconv" - "strings" - "unicode" -) - -// IEC Sizes. -// kibis of bits -const ( - Byte = 1 << (iota * 10) - KiByte - MiByte - GiByte - TiByte - PiByte - EiByte -) - -// SI Sizes. -const ( - IByte = 1 - KByte = IByte * 1000 - MByte = KByte * 1000 - GByte = MByte * 1000 - TByte = GByte * 1000 - PByte = TByte * 1000 - EByte = PByte * 1000 -) - -var bytesSizeTable = map[string]uint64{ - "b": Byte, - "kib": KiByte, - "kb": KByte, - "mib": MiByte, - "mb": MByte, - "gib": GiByte, - "gb": GByte, - "tib": TiByte, - "tb": TByte, - "pib": PiByte, - "pb": PByte, - "eib": EiByte, - "eb": EByte, - // Without suffix - "": Byte, - "ki": KiByte, - "k": KByte, - "mi": MiByte, - "m": MByte, - "gi": GiByte, - "g": GByte, - "ti": TiByte, - "t": TByte, - "pi": PiByte, - "p": PByte, - "ei": EiByte, - "e": EByte, -} - -func logn(n, b float64) float64 { - return math.Log(n) / math.Log(b) -} - -func humanateBytes(s uint64, base float64, sizes []string) string { - if s < 10 { - return fmt.Sprintf("%dB", s) - } - e := math.Floor(logn(float64(s), base)) - suffix := sizes[int(e)] - val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 - f := "%.0f%s" - if val < 10 { - f = "%.1f%s" - } - - return fmt.Sprintf(f, val, suffix) -} - -// Bytes produces a human readable representation of an SI size. -// -// See also: ParseBytes. -// -// Bytes(82854982) -> 83MB -func Bytes(s uint64) string { - sizes := []string{"B", "KB", "MB", "GB", "TB", "PB", "EB"} - return humanateBytes(s, 1000, sizes) -} - -// IBytes produces a human readable representation of an IEC size. -// -// See also: ParseBytes. -// -// IBytes(82854982) -> 79MiB -func IBytes(s uint64) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} - return humanateBytes(s, 1024, sizes) -} - -// ParseBytes parses a string representation of bytes into the number -// of bytes it represents. -// -// See Also: Bytes, IBytes. -// -// ParseBytes("42MB") -> 42000000, nil -// ParseBytes("42mib") -> 44040192, nil -func ParseBytes(s string) (uint64, error) { - lastDigit := 0 - for _, r := range s { - if !(unicode.IsDigit(r) || r == '.') { - break - } - lastDigit++ - } - - f, err := strconv.ParseFloat(s[:lastDigit], 64) - if err != nil { - return 0, err - } - - extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) - if m, ok := bytesSizeTable[extra]; ok { - f *= float64(m) - if f >= math.MaxUint64 { - return 0, fmt.Errorf("too large: %v", s) - } - return uint64(f), nil - } - - return 0, fmt.Errorf("unhandled size name: %v", extra) -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/bytes_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/bytes_test.go deleted file mode 100644 index 76a594c1a09..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/bytes_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package humanize - -import ( - "testing" -) - -func TestByteParsing(t *testing.T) { - tests := []struct { - in string - exp uint64 - }{ - {"42", 42}, - {"42MB", 42000000}, - {"42MiB", 44040192}, - {"42mb", 42000000}, - {"42mib", 44040192}, - {"42MIB", 44040192}, - {"42 MB", 42000000}, - {"42 MiB", 44040192}, - {"42 mb", 42000000}, - {"42 mib", 44040192}, - {"42 MIB", 44040192}, - {"42.5MB", 42500000}, - {"42.5MiB", 44564480}, - {"42.5 MB", 42500000}, - {"42.5 MiB", 44564480}, - // No need to say B - {"42M", 42000000}, - {"42Mi", 44040192}, - {"42m", 42000000}, - {"42mi", 44040192}, - {"42MI", 44040192}, - {"42 M", 42000000}, - {"42 Mi", 44040192}, - {"42 m", 42000000}, - {"42 mi", 44040192}, - {"42 MI", 44040192}, - {"42.5M", 42500000}, - {"42.5Mi", 44564480}, - {"42.5 M", 42500000}, - {"42.5 Mi", 44564480}, - // Large testing, breaks when too much larger than - // this. - {"12.5 EB", uint64(12.5 * float64(EByte))}, - {"12.5 E", uint64(12.5 * float64(EByte))}, - {"12.5 EiB", uint64(12.5 * float64(EiByte))}, - } - - for _, p := range tests { - got, err := ParseBytes(p.in) - if err != nil { - t.Errorf("Couldn't parse %v: %v", p.in, err) - } - if got != p.exp { - t.Errorf("Expected %v for %v, got %v", - p.exp, p.in, got) - } - } -} - -func TestByteErrors(t *testing.T) { - got, err := ParseBytes("84 JB") - if err == nil { - t.Errorf("Expected error, got %v", got) - } - got, err = ParseBytes("") - if err == nil { - t.Errorf("Expected error parsing nothing") - } - got, err = ParseBytes("16 EiB") - if err == nil { - t.Errorf("Expected error, got %v", got) - } -} - -func TestBytes(t *testing.T) { - testList{ - {"bytes(0)", Bytes(0), "0B"}, - {"bytes(1)", Bytes(1), "1B"}, - {"bytes(803)", Bytes(803), "803B"}, - {"bytes(999)", Bytes(999), "999B"}, - - {"bytes(1024)", Bytes(1024), "1.0KB"}, - {"bytes(9999)", Bytes(9999), "10KB"}, - {"bytes(1MB - 1)", Bytes(MByte - Byte), "1000KB"}, - - {"bytes(1MB)", Bytes(1024 * 1024), "1.0MB"}, - {"bytes(1GB - 1K)", Bytes(GByte - KByte), "1000MB"}, - - {"bytes(1GB)", Bytes(GByte), "1.0GB"}, - {"bytes(1TB - 1M)", Bytes(TByte - MByte), "1000GB"}, - {"bytes(10MB)", Bytes(9999 * 1000), "10MB"}, - - {"bytes(1TB)", Bytes(TByte), "1.0TB"}, - {"bytes(1PB - 1T)", Bytes(PByte - TByte), "999TB"}, - - {"bytes(1PB)", Bytes(PByte), "1.0PB"}, - {"bytes(1PB - 1T)", Bytes(EByte - PByte), "999PB"}, - - {"bytes(1EB)", Bytes(EByte), "1.0EB"}, - // Overflows. - // {"bytes(1EB - 1P)", Bytes((KByte*EByte)-PByte), "1023EB"}, - - {"bytes(0)", IBytes(0), "0B"}, - {"bytes(1)", IBytes(1), "1B"}, - {"bytes(803)", IBytes(803), "803B"}, - {"bytes(1023)", IBytes(1023), "1023B"}, - - {"bytes(1024)", IBytes(1024), "1.0KiB"}, - {"bytes(1MB - 1)", IBytes(MiByte - IByte), "1024KiB"}, - - {"bytes(1MB)", IBytes(1024 * 1024), "1.0MiB"}, - {"bytes(1GB - 1K)", IBytes(GiByte - KiByte), "1024MiB"}, - - {"bytes(1GB)", IBytes(GiByte), "1.0GiB"}, - {"bytes(1TB - 1M)", IBytes(TiByte - MiByte), "1024GiB"}, - - {"bytes(1TB)", IBytes(TiByte), "1.0TiB"}, - {"bytes(1PB - 1T)", IBytes(PiByte - TiByte), "1023TiB"}, - - {"bytes(1PB)", IBytes(PiByte), "1.0PiB"}, - {"bytes(1PB - 1T)", IBytes(EiByte - PiByte), "1023PiB"}, - - {"bytes(1EiB)", IBytes(EiByte), "1.0EiB"}, - // Overflows. - // {"bytes(1EB - 1P)", IBytes((KIByte*EIByte)-PiByte), "1023EB"}, - - {"bytes(5.5GiB)", IBytes(5.5 * GiByte), "5.5GiB"}, - - {"bytes(5.5GB)", Bytes(5.5 * GByte), "5.5GB"}, - }.validate(t) -} - -func BenchmarkParseBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - ParseBytes("16.5GB") - } -} - -func BenchmarkBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - Bytes(16.5 * GByte) - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/comma.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/comma.go deleted file mode 100644 index 9d0d2ed18f8..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/comma.go +++ /dev/null @@ -1,101 +0,0 @@ -package humanize - -import ( - "bytes" - "math/big" - "strconv" - "strings" -) - -// Comma produces a string form of the given number in base 10 with -// commas after every three orders of magnitude. -// -// e.g. Comma(834142) -> 834,142 -func Comma(v int64) string { - sign := "" - if v < 0 { - sign = "-" - v = 0 - v - } - - parts := []string{"", "", "", "", "", "", "", ""} - j := len(parts) - 1 - - for v > 999 { - parts[j] = strconv.FormatInt(v%1000, 10) - switch len(parts[j]) { - case 2: - parts[j] = "0" + parts[j] - case 1: - parts[j] = "00" + parts[j] - } - v = v / 1000 - j-- - } - parts[j] = strconv.Itoa(int(v)) - return sign + strings.Join(parts[j:len(parts)], ",") -} - -// Commaf produces a string form of the given number in base 10 with -// commas after every three orders of magnitude. -// -// e.g. Comma(834142.32) -> 834,142.32 -func Commaf(v float64) string { - buf := &bytes.Buffer{} - if v < 0 { - buf.Write([]byte{'-'}) - v = 0 - v - } - - comma := []byte{','} - - parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") - pos := 0 - if len(parts[0])%3 != 0 { - pos += len(parts[0]) % 3 - buf.WriteString(parts[0][:pos]) - buf.Write(comma) - } - for ; pos < len(parts[0]); pos += 3 { - buf.WriteString(parts[0][pos : pos+3]) - buf.Write(comma) - } - buf.Truncate(buf.Len() - 1) - - if len(parts) > 1 { - buf.Write([]byte{'.'}) - buf.WriteString(parts[1]) - } - return buf.String() -} - -// BigComma produces a string form of the given big.Int in base 10 -// with commas after every three orders of magnitude. -func BigComma(b *big.Int) string { - sign := "" - if b.Sign() < 0 { - sign = "-" - b.Abs(b) - } - - athousand := big.NewInt(1000) - c := (&big.Int{}).Set(b) - _, m := oom(c, athousand) - parts := make([]string, m+1) - j := len(parts) - 1 - - mod := &big.Int{} - for b.Cmp(athousand) >= 0 { - b.DivMod(b, athousand, mod) - parts[j] = strconv.FormatInt(mod.Int64(), 10) - switch len(parts[j]) { - case 2: - parts[j] = "0" + parts[j] - case 1: - parts[j] = "00" + parts[j] - } - j-- - } - parts[j] = strconv.Itoa(int(b.Int64())) - return sign + strings.Join(parts[j:len(parts)], ",") -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/comma_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/comma_test.go deleted file mode 100644 index 49040fb7137..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/comma_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package humanize - -import ( - "math" - "math/big" - "testing" -) - -func TestCommas(t *testing.T) { - testList{ - {"0", Comma(0), "0"}, - {"10", Comma(10), "10"}, - {"100", Comma(100), "100"}, - {"1,000", Comma(1000), "1,000"}, - {"10,000", Comma(10000), "10,000"}, - {"100,000", Comma(100000), "100,000"}, - {"10,000,000", Comma(10000000), "10,000,000"}, - {"10,100,000", Comma(10100000), "10,100,000"}, - {"10,010,000", Comma(10010000), "10,010,000"}, - {"10,001,000", Comma(10001000), "10,001,000"}, - {"123,456,789", Comma(123456789), "123,456,789"}, - {"maxint", Comma(9.223372e+18), "9,223,372,000,000,000,000"}, - {"minint", Comma(-9.223372e+18), "-9,223,372,000,000,000,000"}, - {"-123,456,789", Comma(-123456789), "-123,456,789"}, - {"-10,100,000", Comma(-10100000), "-10,100,000"}, - {"-10,010,000", Comma(-10010000), "-10,010,000"}, - {"-10,001,000", Comma(-10001000), "-10,001,000"}, - {"-10,000,000", Comma(-10000000), "-10,000,000"}, - {"-100,000", Comma(-100000), "-100,000"}, - {"-10,000", Comma(-10000), "-10,000"}, - {"-1,000", Comma(-1000), "-1,000"}, - {"-100", Comma(-100), "-100"}, - {"-10", Comma(-10), "-10"}, - }.validate(t) -} - -func TestCommafs(t *testing.T) { - testList{ - {"0", Commaf(0), "0"}, - {"10.11", Commaf(10.11), "10.11"}, - {"100", Commaf(100), "100"}, - {"1,000", Commaf(1000), "1,000"}, - {"10,000", Commaf(10000), "10,000"}, - {"100,000", Commaf(100000), "100,000"}, - {"834,142.32", Commaf(834142.32), "834,142.32"}, - {"10,000,000", Commaf(10000000), "10,000,000"}, - {"10,100,000", Commaf(10100000), "10,100,000"}, - {"10,010,000", Commaf(10010000), "10,010,000"}, - {"10,001,000", Commaf(10001000), "10,001,000"}, - {"123,456,789", Commaf(123456789), "123,456,789"}, - {"maxf64", Commaf(math.MaxFloat64), "179,769,313,486,231,570,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000"}, - {"minf64", Commaf(math.SmallestNonzeroFloat64), "0.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005"}, - {"-123,456,789", Commaf(-123456789), "-123,456,789"}, - {"-10,100,000", Commaf(-10100000), "-10,100,000"}, - {"-10,010,000", Commaf(-10010000), "-10,010,000"}, - {"-10,001,000", Commaf(-10001000), "-10,001,000"}, - {"-10,000,000", Commaf(-10000000), "-10,000,000"}, - {"-100,000", Commaf(-100000), "-100,000"}, - {"-10,000", Commaf(-10000), "-10,000"}, - {"-1,000", Commaf(-1000), "-1,000"}, - {"-100.11", Commaf(-100.11), "-100.11"}, - {"-10", Commaf(-10), "-10"}, - }.validate(t) -} - -func BenchmarkCommas(b *testing.B) { - for i := 0; i < b.N; i++ { - Comma(1234567890) - } -} - -func BenchmarkCommaf(b *testing.B) { - for i := 0; i < b.N; i++ { - Commaf(1234567890.83584) - } -} - -func BenchmarkBigCommas(b *testing.B) { - for i := 0; i < b.N; i++ { - BigComma(big.NewInt(1234567890)) - } -} - -func bigComma(i int64) string { - return BigComma(big.NewInt(i)) -} - -func TestBigCommas(t *testing.T) { - testList{ - {"0", bigComma(0), "0"}, - {"10", bigComma(10), "10"}, - {"100", bigComma(100), "100"}, - {"1,000", bigComma(1000), "1,000"}, - {"10,000", bigComma(10000), "10,000"}, - {"100,000", bigComma(100000), "100,000"}, - {"10,000,000", bigComma(10000000), "10,000,000"}, - {"10,100,000", bigComma(10100000), "10,100,000"}, - {"10,010,000", bigComma(10010000), "10,010,000"}, - {"10,001,000", bigComma(10001000), "10,001,000"}, - {"123,456,789", bigComma(123456789), "123,456,789"}, - {"maxint", bigComma(9.223372e+18), "9,223,372,000,000,000,000"}, - {"minint", bigComma(-9.223372e+18), "-9,223,372,000,000,000,000"}, - {"-123,456,789", bigComma(-123456789), "-123,456,789"}, - {"-10,100,000", bigComma(-10100000), "-10,100,000"}, - {"-10,010,000", bigComma(-10010000), "-10,010,000"}, - {"-10,001,000", bigComma(-10001000), "-10,001,000"}, - {"-10,000,000", bigComma(-10000000), "-10,000,000"}, - {"-100,000", bigComma(-100000), "-100,000"}, - {"-10,000", bigComma(-10000), "-10,000"}, - {"-1,000", bigComma(-1000), "-1,000"}, - {"-100", bigComma(-100), "-100"}, - {"-10", bigComma(-10), "-10"}, - }.validate(t) -} - -func TestVeryBigCommas(t *testing.T) { - tests := []struct{ in, exp string }{ - { - "84889279597249724975972597249849757294578485", - "84,889,279,597,249,724,975,972,597,249,849,757,294,578,485", - }, - { - "-84889279597249724975972597249849757294578485", - "-84,889,279,597,249,724,975,972,597,249,849,757,294,578,485", - }, - } - for _, test := range tests { - n, _ := (&big.Int{}).SetString(test.in, 10) - got := BigComma(n) - if test.exp != got { - t.Errorf("Expected %q, got %q", test.exp, got) - } - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/common_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/common_test.go deleted file mode 100644 index fc7db151640..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/common_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package humanize - -import ( - "testing" -) - -type testList []struct { - name, got, exp string -} - -func (tl testList) validate(t *testing.T) { - for _, test := range tl { - if test.got != test.exp { - t.Errorf("On %v, expected '%v', but got '%v'", - test.name, test.exp, test.got) - } - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa.go deleted file mode 100644 index c76190b1067..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa.go +++ /dev/null @@ -1,23 +0,0 @@ -package humanize - -import "strconv" - -func stripTrailingZeros(s string) string { - offset := len(s) - 1 - for offset > 0 { - if s[offset] == '.' { - offset-- - break - } - if s[offset] != '0' { - break - } - offset-- - } - return s[:offset+1] -} - -// Ftoa converts a float to a string with no trailing zeros. -func Ftoa(num float64) string { - return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa_test.go deleted file mode 100644 index 40d13bd71a8..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package humanize - -import ( - "fmt" - "regexp" - "strconv" - "testing" -) - -func TestFtoa(t *testing.T) { - testList{ - {"200", Ftoa(200), "200"}, - {"2", Ftoa(2), "2"}, - {"2.2", Ftoa(2.2), "2.2"}, - {"2.02", Ftoa(2.02), "2.02"}, - {"200.02", Ftoa(200.02), "200.02"}, - }.validate(t) -} - -func BenchmarkFtoaRegexTrailing(b *testing.B) { - trailingZerosRegex := regexp.MustCompile(`\.?0+$`) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - trailingZerosRegex.ReplaceAllString("2.00000", "") - trailingZerosRegex.ReplaceAllString("2.0000", "") - trailingZerosRegex.ReplaceAllString("2.000", "") - trailingZerosRegex.ReplaceAllString("2.00", "") - trailingZerosRegex.ReplaceAllString("2.0", "") - trailingZerosRegex.ReplaceAllString("2", "") - } -} - -func BenchmarkFtoaFunc(b *testing.B) { - for i := 0; i < b.N; i++ { - stripTrailingZeros("2.00000") - stripTrailingZeros("2.0000") - stripTrailingZeros("2.000") - stripTrailingZeros("2.00") - stripTrailingZeros("2.0") - stripTrailingZeros("2") - } -} - -func BenchmarkFmtF(b *testing.B) { - for i := 0; i < b.N; i++ { - fmt.Sprintf("%f", 2.03584) - } -} - -func BenchmarkStrconvF(b *testing.B) { - for i := 0; i < b.N; i++ { - strconv.FormatFloat(2.03584, 'f', 6, 64) - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/humanize.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/humanize.go deleted file mode 100644 index a69540a06b6..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/humanize.go +++ /dev/null @@ -1,8 +0,0 @@ -/* -Package humanize converts boring ugly numbers to human-friendly strings and back. - -Durations can be turned into strings such as "3 days ago", numbers -representing sizes like 82854982 into useful strings like, "83MB" or -"79MiB" (whichever you prefer). -*/ -package humanize diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/number.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/number.go deleted file mode 100644 index 32141348c80..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/number.go +++ /dev/null @@ -1,192 +0,0 @@ -package humanize - -/* -Slightly adapted from the source to fit go-humanize. - -Author: https://github.com/gorhill -Source: https://gist.github.com/gorhill/5285193 - -*/ - -import ( - "math" - "strconv" -) - -var ( - renderFloatPrecisionMultipliers = [...]float64{ - 1, - 10, - 100, - 1000, - 10000, - 100000, - 1000000, - 10000000, - 100000000, - 1000000000, - } - - renderFloatPrecisionRounders = [...]float64{ - 0.5, - 0.05, - 0.005, - 0.0005, - 0.00005, - 0.000005, - 0.0000005, - 0.00000005, - 0.000000005, - 0.0000000005, - } -) - -// FormatFloat produces a formatted number as string based on the following user-specified criteria: -// * thousands separator -// * decimal separator -// * decimal precision -// -// Usage: s := RenderFloat(format, n) -// The format parameter tells how to render the number n. -// -// See examples: http://play.golang.org/p/LXc1Ddm1lJ -// -// Examples of format strings, given n = 12345.6789: -// "#,###.##" => "12,345.67" -// "#,###." => "12,345" -// "#,###" => "12345,678" -// "#\u202F###,##" => "12 345,68" -// "#.###,###### => 12.345,678900 -// "" (aka default format) => 12,345.67 -// -// The highest precision allowed is 9 digits after the decimal symbol. -// There is also a version for integer number, FormatInteger(), -// which is convenient for calls within template. -func FormatFloat(format string, n float64) string { - // Special cases: - // NaN = "NaN" - // +Inf = "+Infinity" - // -Inf = "-Infinity" - if math.IsNaN(n) { - return "NaN" - } - if n > math.MaxFloat64 { - return "Infinity" - } - if n < -math.MaxFloat64 { - return "-Infinity" - } - - // default format - precision := 2 - decimalStr := "." - thousandStr := "," - positiveStr := "" - negativeStr := "-" - - if len(format) > 0 { - format := []rune(format) - - // If there is an explicit format directive, - // then default values are these: - precision = 9 - thousandStr = "" - - // collect indices of meaningful formatting directives - formatIndx := []int{} - for i, char := range format { - if char != '#' && char != '0' { - formatIndx = append(formatIndx, i) - } - } - - if len(formatIndx) > 0 { - // Directive at index 0: - // Must be a '+' - // Raise an error if not the case - // index: 0123456789 - // +0.000,000 - // +000,000.0 - // +0000.00 - // +0000 - if formatIndx[0] == 0 { - if format[formatIndx[0]] != '+' { - panic("RenderFloat(): invalid positive sign directive") - } - positiveStr = "+" - formatIndx = formatIndx[1:] - } - - // Two directives: - // First is thousands separator - // Raise an error if not followed by 3-digit - // 0123456789 - // 0.000,000 - // 000,000.00 - if len(formatIndx) == 2 { - if (formatIndx[1] - formatIndx[0]) != 4 { - panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") - } - thousandStr = string(format[formatIndx[0]]) - formatIndx = formatIndx[1:] - } - - // One directive: - // Directive is decimal separator - // The number of digit-specifier following the separator indicates wanted precision - // 0123456789 - // 0.00 - // 000,0000 - if len(formatIndx) == 1 { - decimalStr = string(format[formatIndx[0]]) - precision = len(format) - formatIndx[0] - 1 - } - } - } - - // generate sign part - var signStr string - if n >= 0.000000001 { - signStr = positiveStr - } else if n <= -0.000000001 { - signStr = negativeStr - n = -n - } else { - signStr = "" - n = 0.0 - } - - // split number into integer and fractional parts - intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) - - // generate integer part string - intStr := strconv.Itoa(int(intf)) - - // add thousand separator if required - if len(thousandStr) > 0 { - for i := len(intStr); i > 3; { - i -= 3 - intStr = intStr[:i] + thousandStr + intStr[i:] - } - } - - // no fractional part, we can leave now - if precision == 0 { - return signStr + intStr - } - - // generate fractional part - fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) - // may need padding - if len(fracStr) < precision { - fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr - } - - return signStr + intStr + decimalStr + fracStr -} - -// FormatInteger produces a formatted number as string. -// See FormatFloat. -func FormatInteger(format string, n int) string { - return FormatFloat(format, float64(n)) -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/number_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/number_test.go deleted file mode 100644 index dd38a5bb97f..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/number_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package humanize - -import ( - "math" - "testing" -) - -type TestStruct struct { - name string - format string - num float64 - formatted string -} - -func TestFormatFloat(t *testing.T) { - tests := []TestStruct{ - {"default", "", 12345.6789, "12,345.68"}, - {"#", "#", 12345.6789, "12345.678900000"}, - {"#.", "#.", 12345.6789, "12346"}, - {"#,#", "#,#", 12345.6789, "12345,7"}, - {"#,##", "#,##", 12345.6789, "12345,68"}, - {"#,###", "#,###", 12345.6789, "12345,679"}, - {"#,###.", "#,###.", 12345.6789, "12,346"}, - {"#,###.##", "#,###.##", 12345.6789, "12,345.68"}, - {"#,###.###", "#,###.###", 12345.6789, "12,345.679"}, - {"#,###.####", "#,###.####", 12345.6789, "12,345.6789"}, - {"#.###,######", "#.###,######", 12345.6789, "12.345,678900"}, - {"#\u202f###,##", "#\u202f###,##", 12345.6789, "12 345,68"}, - - // special cases - {"NaN", "#", math.NaN(), "NaN"}, - {"+Inf", "#", math.Inf(1), "Infinity"}, - {"-Inf", "#", math.Inf(-1), "-Infinity"}, - {"signStr <= -0.000000001", "", -0.000000002, "-0.00"}, - {"signStr = 0", "", 0, "0.00"}, - {"Format directive must start with +", "+000", 12345.6789, "+12345.678900000"}, - } - - for _, test := range tests { - got := FormatFloat(test.format, test.num) - if got != test.formatted { - t.Errorf("On %v (%v, %v), got %v, wanted %v", - test.name, test.format, test.num, got, test.formatted) - } - } - // Test a single integer - got := FormatInteger("#", 12345) - if got != "12345.000000000" { - t.Errorf("On %v (%v, %v), got %v, wanted %v", - "integerTest", "#", 12345, got, "12345.000000000") - } - // Test the things that could panic - panictests := []TestStruct{ - {"RenderFloat(): invalid positive sign directive", "-", 12345.6789, "12,345.68"}, - {"RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers", "0.01", 12345.6789, "12,345.68"}, - } - for _, test := range panictests { - didPanic := false - var message interface{} - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - } - }() - - // call the target function - _ = FormatFloat(test.format, test.num) - - }() - if didPanic != true { - t.Errorf("On %v, should have panic and did not.", - test.name) - } - } - -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals.go deleted file mode 100644 index 43d88a86195..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals.go +++ /dev/null @@ -1,25 +0,0 @@ -package humanize - -import "strconv" - -// Ordinal gives you the input number in a rank/ordinal format. -// -// Ordinal(3) -> 3rd -func Ordinal(x int) string { - suffix := "th" - switch x % 10 { - case 1: - if x%100 != 11 { - suffix = "st" - } - case 2: - if x%100 != 12 { - suffix = "nd" - } - case 3: - if x%100 != 13 { - suffix = "rd" - } - } - return strconv.Itoa(x) + suffix -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals_test.go deleted file mode 100644 index 51d85ee7a0b..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package humanize - -import ( - "testing" -) - -func TestOrdinals(t *testing.T) { - testList{ - {"0", Ordinal(0), "0th"}, - {"1", Ordinal(1), "1st"}, - {"2", Ordinal(2), "2nd"}, - {"3", Ordinal(3), "3rd"}, - {"4", Ordinal(4), "4th"}, - {"10", Ordinal(10), "10th"}, - {"11", Ordinal(11), "11th"}, - {"12", Ordinal(12), "12th"}, - {"13", Ordinal(13), "13th"}, - {"101", Ordinal(101), "101st"}, - {"102", Ordinal(102), "102nd"}, - {"103", Ordinal(103), "103rd"}, - }.validate(t) -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/si.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/si.go deleted file mode 100644 index dee8b765a06..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/si.go +++ /dev/null @@ -1,110 +0,0 @@ -package humanize - -import ( - "errors" - "math" - "regexp" - "strconv" -) - -var siPrefixTable = map[float64]string{ - -24: "y", // yocto - -21: "z", // zepto - -18: "a", // atto - -15: "f", // femto - -12: "p", // pico - -9: "n", // nano - -6: "µ", // micro - -3: "m", // milli - 0: "", - 3: "k", // kilo - 6: "M", // mega - 9: "G", // giga - 12: "T", // tera - 15: "P", // peta - 18: "E", // exa - 21: "Z", // zetta - 24: "Y", // yotta -} - -var revSIPrefixTable = revfmap(siPrefixTable) - -// revfmap reverses the map and precomputes the power multiplier -func revfmap(in map[float64]string) map[string]float64 { - rv := map[string]float64{} - for k, v := range in { - rv[v] = math.Pow(10, k) - } - return rv -} - -var riParseRegex *regexp.Regexp - -func init() { - ri := `^([0-9.]+)([` - for _, v := range siPrefixTable { - ri += v - } - ri += `]?)(.*)` - - riParseRegex = regexp.MustCompile(ri) -} - -// ComputeSI finds the most appropriate SI prefix for the given number -// and returns the prefix along with the value adjusted to be within -// that prefix. -// -// See also: SI, ParseSI. -// -// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") -func ComputeSI(input float64) (float64, string) { - if input == 0 { - return 0, "" - } - exponent := math.Floor(logn(input, 10)) - exponent = math.Floor(exponent/3) * 3 - - value := input / math.Pow(10, exponent) - - // Handle special case where value is exactly 1000.0 - // Should return 1M instead of 1000k - if value == 1000.0 { - exponent += 3 - value = input / math.Pow(10, exponent) - } - - prefix := siPrefixTable[exponent] - return value, prefix -} - -// SI returns a string with default formatting. -// -// SI uses Ftoa to format float value, removing trailing zeros. -// -// See also: ComputeSI, ParseSI. -// -// e.g. SI(1000000, B) -> 1MB -// e.g. SI(2.2345e-12, "F") -> 2.2345pF -func SI(input float64, unit string) string { - value, prefix := ComputeSI(input) - return Ftoa(value) + prefix + unit -} - -var errInvalid = errors.New("invalid input") - -// ParseSI parses an SI string back into the number and unit. -// -// See also: SI, ComputeSI. -// -// e.g. ParseSI(2.2345pF) -> (2.2345e-12, "F", nil) -func ParseSI(input string) (float64, string, error) { - found := riParseRegex.FindStringSubmatch(input) - if len(found) != 4 { - return 0, "", errInvalid - } - mag := revSIPrefixTable[found[2]] - unit := found[3] - - base, err := strconv.ParseFloat(found[1], 64) - return base * mag, unit, err -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/si_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/si_test.go deleted file mode 100644 index 32fb386b5ba..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/si_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package humanize - -import ( - "math" - "testing" -) - -func TestSI(t *testing.T) { - tests := []struct { - name string - num float64 - formatted string - }{ - {"e-24", 1e-24, "1yF"}, - {"e-21", 1e-21, "1zF"}, - {"e-18", 1e-18, "1aF"}, - {"e-15", 1e-15, "1fF"}, - {"e-12", 1e-12, "1pF"}, - {"e-12", 2.2345e-12, "2.2345pF"}, - {"e-12", 2.23e-12, "2.23pF"}, - {"e-11", 2.23e-11, "22.3pF"}, - {"e-10", 2.2e-10, "220pF"}, - {"e-9", 2.2e-9, "2.2nF"}, - {"e-8", 2.2e-8, "22nF"}, - {"e-7", 2.2e-7, "220nF"}, - {"e-6", 2.2e-6, "2.2µF"}, - {"e-6", 1e-6, "1µF"}, - {"e-5", 2.2e-5, "22µF"}, - {"e-4", 2.2e-4, "220µF"}, - {"e-3", 2.2e-3, "2.2mF"}, - {"e-2", 2.2e-2, "22mF"}, - {"e-1", 2.2e-1, "220mF"}, - {"e+0", 2.2e-0, "2.2F"}, - {"e+0", 2.2, "2.2F"}, - {"e+1", 2.2e+1, "22F"}, - {"0", 0, "0F"}, - {"e+1", 22, "22F"}, - {"e+2", 2.2e+2, "220F"}, - {"e+2", 220, "220F"}, - {"e+3", 2.2e+3, "2.2kF"}, - {"e+3", 2200, "2.2kF"}, - {"e+4", 2.2e+4, "22kF"}, - {"e+4", 22000, "22kF"}, - {"e+5", 2.2e+5, "220kF"}, - {"e+6", 2.2e+6, "2.2MF"}, - {"e+6", 1e+6, "1MF"}, - {"e+7", 2.2e+7, "22MF"}, - {"e+8", 2.2e+8, "220MF"}, - {"e+9", 2.2e+9, "2.2GF"}, - {"e+10", 2.2e+10, "22GF"}, - {"e+11", 2.2e+11, "220GF"}, - {"e+12", 2.2e+12, "2.2TF"}, - {"e+15", 2.2e+15, "2.2PF"}, - {"e+18", 2.2e+18, "2.2EF"}, - {"e+21", 2.2e+21, "2.2ZF"}, - {"e+24", 2.2e+24, "2.2YF"}, - - // special case - {"1F", 1000 * 1000, "1MF"}, - {"1F", 1e6, "1MF"}, - } - - for _, test := range tests { - got := SI(test.num, "F") - if got != test.formatted { - t.Errorf("On %v (%v), got %v, wanted %v", - test.name, test.num, got, test.formatted) - } - - gotf, gotu, err := ParseSI(test.formatted) - if err != nil { - t.Errorf("Error parsing %v (%v): %v", test.name, test.formatted, err) - continue - } - - if math.Abs(1-(gotf/test.num)) > 0.01 { - t.Errorf("On %v (%v), got %v, wanted %v (±%v)", - test.name, test.formatted, gotf, test.num, - math.Abs(1-(gotf/test.num))) - } - if gotu != "F" { - t.Errorf("On %v (%v), expected unit F, got %v", - test.name, test.formatted, gotu) - } - } - - // Parse error - gotf, gotu, err := ParseSI("x1.21JW") // 1.21 jigga whats - if err == nil { - t.Errorf("Expected error on x1.21JW, got %v %v", gotf, gotu) - } -} - -func BenchmarkParseSI(b *testing.B) { - for i := 0; i < b.N; i++ { - ParseSI("2.2346ZB") - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/times.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/times.go deleted file mode 100644 index 592ebe1d625..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/times.go +++ /dev/null @@ -1,91 +0,0 @@ -package humanize - -import ( - "fmt" - "math" - "sort" - "time" -) - -// Seconds-based time units -const ( - Minute = 60 - Hour = 60 * Minute - Day = 24 * Hour - Week = 7 * Day - Month = 30 * Day - Year = 12 * Month - LongTime = 37 * Year -) - -// Time formats a time into a relative string. -// -// Time(someT) -> "3 weeks ago" -func Time(then time.Time) string { - return RelTime(then, time.Now(), "ago", "from now") -} - -var magnitudes = []struct { - d int64 - format string - divby int64 -}{ - {1, "now", 1}, - {2, "1 second %s", 1}, - {Minute, "%d seconds %s", 1}, - {2 * Minute, "1 minute %s", 1}, - {Hour, "%d minutes %s", Minute}, - {2 * Hour, "1 hour %s", 1}, - {Day, "%d hours %s", Hour}, - {2 * Day, "1 day %s", 1}, - {Week, "%d days %s", Day}, - {2 * Week, "1 week %s", 1}, - {Month, "%d weeks %s", Week}, - {2 * Month, "1 month %s", 1}, - {Year, "%d months %s", Month}, - {18 * Month, "1 year %s", 1}, - {2 * Year, "2 years %s", 1}, - {LongTime, "%d years %s", Year}, - {math.MaxInt64, "a long while %s", 1}, -} - -// RelTime formats a time into a relative string. -// -// It takes two times and two labels. In addition to the generic time -// delta string (e.g. 5 minutes), the labels are used applied so that -// the label corresponding to the smaller time is applied. -// -// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" -func RelTime(a, b time.Time, albl, blbl string) string { - lbl := albl - diff := b.Unix() - a.Unix() - - after := a.After(b) - if after { - lbl = blbl - diff = a.Unix() - b.Unix() - } - - n := sort.Search(len(magnitudes), func(i int) bool { - return magnitudes[i].d > diff - }) - - mag := magnitudes[n] - args := []interface{}{} - escaped := false - for _, ch := range mag.format { - if escaped { - switch ch { - case '%': - case 's': - args = append(args, lbl) - case 'd': - args = append(args, diff/mag.divby) - } - escaped = false - } else { - escaped = ch == '%' - } - } - return fmt.Sprintf(mag.format, args...) -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/times_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/times_test.go deleted file mode 100644 index 528daa4ec2e..00000000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/times_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package humanize - -import ( - "math" - "testing" - "time" -) - -func TestPast(t *testing.T) { - now := time.Now().Unix() - testList{ - {"now", Time(time.Unix(now, 0)), "now"}, - {"1 second ago", Time(time.Unix(now-1, 0)), "1 second ago"}, - {"12 seconds ago", Time(time.Unix(now-12, 0)), "12 seconds ago"}, - {"30 seconds ago", Time(time.Unix(now-30, 0)), "30 seconds ago"}, - {"45 seconds ago", Time(time.Unix(now-45, 0)), "45 seconds ago"}, - {"1 minute ago", Time(time.Unix(now-63, 0)), "1 minute ago"}, - {"15 minutes ago", Time(time.Unix(now-15*Minute, 0)), "15 minutes ago"}, - {"1 hour ago", Time(time.Unix(now-63*Minute, 0)), "1 hour ago"}, - {"2 hours ago", Time(time.Unix(now-2*Hour, 0)), "2 hours ago"}, - {"21 hours ago", Time(time.Unix(now-21*Hour, 0)), "21 hours ago"}, - {"1 day ago", Time(time.Unix(now-26*Hour, 0)), "1 day ago"}, - {"2 days ago", Time(time.Unix(now-49*Hour, 0)), "2 days ago"}, - {"3 days ago", Time(time.Unix(now-3*Day, 0)), "3 days ago"}, - {"1 week ago (1)", Time(time.Unix(now-7*Day, 0)), "1 week ago"}, - {"1 week ago (2)", Time(time.Unix(now-12*Day, 0)), "1 week ago"}, - {"2 weeks ago", Time(time.Unix(now-15*Day, 0)), "2 weeks ago"}, - {"1 month ago", Time(time.Unix(now-39*Day, 0)), "1 month ago"}, - {"3 months ago", Time(time.Unix(now-99*Day, 0)), "3 months ago"}, - {"1 year ago (1)", Time(time.Unix(now-365*Day, 0)), "1 year ago"}, - {"1 year ago (1)", Time(time.Unix(now-400*Day, 0)), "1 year ago"}, - {"2 years ago (1)", Time(time.Unix(now-548*Day, 0)), "2 years ago"}, - {"2 years ago (2)", Time(time.Unix(now-725*Day, 0)), "2 years ago"}, - {"2 years ago (3)", Time(time.Unix(now-800*Day, 0)), "2 years ago"}, - {"3 years ago", Time(time.Unix(now-3*Year, 0)), "3 years ago"}, - {"long ago", Time(time.Unix(now-LongTime, 0)), "a long while ago"}, - }.validate(t) -} - -func TestFuture(t *testing.T) { - now := time.Now().Unix() - testList{ - {"now", Time(time.Unix(now, 0)), "now"}, - {"1 second from now", Time(time.Unix(now+1, 0)), "1 second from now"}, - {"12 seconds from now", Time(time.Unix(now+12, 0)), "12 seconds from now"}, - {"30 seconds from now", Time(time.Unix(now+30, 0)), "30 seconds from now"}, - {"45 seconds from now", Time(time.Unix(now+45, 0)), "45 seconds from now"}, - {"15 minutes from now", Time(time.Unix(now+15*Minute, 0)), "15 minutes from now"}, - {"2 hours from now", Time(time.Unix(now+2*Hour, 0)), "2 hours from now"}, - {"21 hours from now", Time(time.Unix(now+21*Hour, 0)), "21 hours from now"}, - {"1 day from now", Time(time.Unix(now+26*Hour, 0)), "1 day from now"}, - {"2 days from now", Time(time.Unix(now+49*Hour, 0)), "2 days from now"}, - {"3 days from now", Time(time.Unix(now+3*Day, 0)), "3 days from now"}, - {"1 week from now (1)", Time(time.Unix(now+7*Day, 0)), "1 week from now"}, - {"1 week from now (2)", Time(time.Unix(now+12*Day, 0)), "1 week from now"}, - {"2 weeks from now", Time(time.Unix(now+15*Day, 0)), "2 weeks from now"}, - {"1 month from now", Time(time.Unix(now+30*Day, 0)), "1 month from now"}, - {"1 year from now", Time(time.Unix(now+365*Day, 0)), "1 year from now"}, - {"2 years from now", Time(time.Unix(now+2*Year, 0)), "2 years from now"}, - {"a while from now", Time(time.Unix(now+LongTime, 0)), "a long while from now"}, - }.validate(t) -} - -func TestRange(t *testing.T) { - start := time.Time{} - end := time.Unix(math.MaxInt64, math.MaxInt64) - x := RelTime(start, end, "ago", "from now") - if x != "a long while from now" { - t.Errorf("Expected a long while from now, got %q", x) - } -} diff --git a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/.travis.yml b/Godeps/_workspace/src/github.com/facebookgo/atomicfile/.travis.yml deleted file mode 100644 index 2cc62c5e853..00000000000 --- a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: go - -go: - - 1.2 - - 1.3 - -matrix: - fast_finish: true - -before_install: - - go get -v code.google.com/p/go.tools/cmd/vet - - go get -v github.com/golang/lint/golint - - go get -v code.google.com/p/go.tools/cmd/cover - -install: - - go install -race -v std - - go get -race -t -v ./... - - go install -race -v ./... - -script: - - go vet ./... - - $HOME/gopath/bin/golint . - - go test -cpu=2 -race -v ./... - - go test -cpu=2 -covermode=atomic ./... diff --git a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/atomicfile.go b/Godeps/_workspace/src/github.com/facebookgo/atomicfile/atomicfile.go deleted file mode 100644 index 60cda2a5bae..00000000000 --- a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/atomicfile.go +++ /dev/null @@ -1,54 +0,0 @@ -// Package atomicfile provides the ability to write a file with an eventual -// rename on Close. This allows for a file to always be in a consistent state -// and never represent an in-progress write. -package atomicfile - -import ( - "io/ioutil" - "os" - "path/filepath" -) - -// File behaves like os.File, but does an atomic rename operation at Close. -type File struct { - *os.File - path string -} - -// New creates a new temporary file that will replace the file at the given -// path when Closed. -func New(path string, mode os.FileMode) (*File, error) { - f, err := ioutil.TempFile(filepath.Dir(path), filepath.Base(path)) - if err != nil { - return nil, err - } - if err := os.Chmod(f.Name(), mode); err != nil { - os.Remove(f.Name()) - return nil, err - } - return &File{File: f, path: path}, nil -} - -// Close the file replacing the configured file. -func (f *File) Close() error { - if err := f.File.Close(); err != nil { - return err - } - if err := os.Rename(f.Name(), f.path); err != nil { - return err - } - return nil -} - -// Abort closes the file and removes it instead of replacing the configured -// file. This is useful if after starting to write to the file you decide you -// don't want it anymore. -func (f *File) Abort() error { - if err := f.File.Close(); err != nil { - return err - } - if err := os.Remove(f.Name()); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/atomicfile_test.go b/Godeps/_workspace/src/github.com/facebookgo/atomicfile/atomicfile_test.go deleted file mode 100644 index 0ed481f8cbf..00000000000 --- a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/atomicfile_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package atomicfile_test - -import ( - "bytes" - "io/ioutil" - "os" - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/facebookgo/atomicfile" -) - -func test(t *testing.T, dir, prefix string) { - t.Parallel() - - tmpfile, err := ioutil.TempFile(dir, prefix) - if err != nil { - t.Fatal(err) - } - name := tmpfile.Name() - - if err := os.Remove(name); err != nil { - t.Fatal(err) - } - - defer os.Remove(name) - f, err := atomicfile.New(name, os.FileMode(0666)) - if err != nil { - t.Fatal(err) - } - f.Write([]byte("foo")) - if _, err := os.Stat(name); !os.IsNotExist(err) { - t.Fatal("did not expect file to exist") - } - if err := f.Close(); err != nil { - t.Fatal(err) - } - if _, err := os.Stat(name); err != nil { - t.Fatalf("expected file to exist: %s", err) - } -} - -func TestCurrentDir(t *testing.T) { - cwd, _ := os.Getwd() - test(t, cwd, "atomicfile-current-dir-") -} - -func TestRootTmpDir(t *testing.T) { - test(t, "/tmp", "atomicfile-root-tmp-dir-") -} - -func TestDefaultTmpDir(t *testing.T) { - test(t, "", "atomicfile-default-tmp-dir-") -} - -func TestAbort(t *testing.T) { - contents := []byte("the answer is 42") - t.Parallel() - tmpfile, err := ioutil.TempFile("", "atomicfile-abort-") - if err != nil { - t.Fatal(err) - } - name := tmpfile.Name() - if _, err := tmpfile.Write(contents); err != nil { - t.Fatal(err) - } - defer os.Remove(name) - - f, err := atomicfile.New(name, os.FileMode(0666)) - if err != nil { - t.Fatal(err) - } - f.Write([]byte("foo")) - if err := f.Abort(); err != nil { - t.Fatal(err) - } - if _, err := os.Stat(name); err != nil { - t.Fatalf("expected file to exist: %s", err) - } - actual, err := ioutil.ReadFile(name) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(contents, actual) { - t.Fatalf(`did not find expected "%s" instead found "%s"`, contents, actual) - } -} diff --git a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/license b/Godeps/_workspace/src/github.com/facebookgo/atomicfile/license deleted file mode 100644 index 4ce34257ce8..00000000000 --- a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/license +++ /dev/null @@ -1,30 +0,0 @@ -BSD License - -For atomicfile software - -Copyright (c) 2014, Facebook, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/patents b/Godeps/_workspace/src/github.com/facebookgo/atomicfile/patents deleted file mode 100644 index 887426cf1ce..00000000000 --- a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/patents +++ /dev/null @@ -1,23 +0,0 @@ -Additional Grant of Patent Rights - -"Software" means the atomicfile software distributed by Facebook, Inc. - -Facebook hereby grants you a perpetual, worldwide, royalty-free, non-exclusive, -irrevocable (subject to the termination provision below) license under any -rights in any patent claims owned by Facebook, to make, have made, use, sell, -offer to sell, import, and otherwise transfer the Software. For avoidance of -doubt, no license is granted under Facebook’s rights in any patent claims that -are infringed by (i) modifications to the Software made by you or a third party, -or (ii) the Software in combination with any software or other technology -provided by you or a third party. - -The license granted hereunder will terminate, automatically and without notice, -for anyone that makes any claim (including by filing any lawsuit, assertion or -other action) alleging (a) direct, indirect, or contributory infringement or -inducement to infringe any patent: (i) by Facebook or any of its subsidiaries or -affiliates, whether or not such claim is related to the Software, (ii) by any -party if such claim arises in whole or in part from any software, product or -service of Facebook or any of its subsidiaries or affiliates, whether or not -such claim is related to the Software, or (iii) by any party relating to the -Software; or (b) that any right in any patent claim of Facebook is invalid or -unenforceable. diff --git a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/readme.md b/Godeps/_workspace/src/github.com/facebookgo/atomicfile/readme.md deleted file mode 100644 index 80038c3e0ac..00000000000 --- a/Godeps/_workspace/src/github.com/facebookgo/atomicfile/readme.md +++ /dev/null @@ -1,4 +0,0 @@ -atomicfile [![Build Status](https://secure.travis-ci.org/facebookgo/atomicfile.png)](http://travis-ci.org/facebookgo/atomicfile) -========== - -Documentation: http://godoc.org/github.com/facebookgo/atomicfile diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/Godeps/Godeps.json b/Godeps/_workspace/src/github.com/ipfs/go-datastore/Godeps/Godeps.json index 2f155082378..da3894edba7 100644 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/Godeps/Godeps.json +++ b/Godeps/_workspace/src/github.com/ipfs/go-datastore/Godeps/Godeps.json @@ -55,10 +55,6 @@ "ImportPath": "github.com/mattbaird/elastigo/core", "Rev": "041b88c1fcf6489a5721ede24378ce1253b9159d" }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb", - "Rev": "871eee0a7546bb7d1b2795142e29c4534abc49b3" - }, { "ImportPath": "github.com/syndtr/gosnappy/snappy", "Rev": "ce8acff4829e0c2458a67ead32390ac0a381c862" diff --git a/Godeps/_workspace/src/github.com/ipfs/go-datastore/leveldb/datastore.go b/Godeps/_workspace/src/github.com/ipfs/go-datastore/leveldb/datastore.go index 7820a7974f8..b66e30f1396 100644 --- a/Godeps/_workspace/src/github.com/ipfs/go-datastore/leveldb/datastore.go +++ b/Godeps/_workspace/src/github.com/ipfs/go-datastore/leveldb/datastore.go @@ -3,9 +3,9 @@ package leveldb import ( ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" dsq "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/query" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" + "gx/ipfs/QmbBhyDKsY4mbY6xsKt3qu9Y7FPvMJ6qbD8AMjYYvPRw1g/goleveldb/leveldb" + "gx/ipfs/QmbBhyDKsY4mbY6xsKt3qu9Y7FPvMJ6qbD8AMjYYvPRw1g/goleveldb/leveldb/opt" + "gx/ipfs/QmbBhyDKsY4mbY6xsKt3qu9Y7FPvMJ6qbD8AMjYYvPRw1g/goleveldb/leveldb/util" "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" ) diff --git a/Godeps/_workspace/src/github.com/jbenet/go-random/random/random.go b/Godeps/_workspace/src/github.com/jbenet/go-random/random/random.go index 563c4fbefa6..cdf79b64fe6 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-random/random/random.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-random/random/random.go @@ -5,8 +5,8 @@ import ( "os" "strconv" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/dustin/go-humanize" random "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-random" + "gx/ipfs/QmPSBJL4momYnE7DcUyk2DVhD6rH488ZmHBGLbxNdhU44K/go-humanize" ) func main() { diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go deleted file mode 100644 index 88993e87b0c..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "encoding/binary" - "fmt" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" -) - -type ErrBatchCorrupted struct { - Reason string -} - -func (e *ErrBatchCorrupted) Error() string { - return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason) -} - -func newErrBatchCorrupted(reason string) error { - return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason}) -} - -const ( - batchHdrLen = 8 + 4 - batchGrowRec = 3000 -) - -type BatchReplay interface { - Put(key, value []byte) - Delete(key []byte) -} - -// Batch is a write batch. -type Batch struct { - data []byte - rLen, bLen int - seq uint64 - sync bool -} - -func (b *Batch) grow(n int) { - off := len(b.data) - if off == 0 { - off = batchHdrLen - if b.data != nil { - b.data = b.data[:off] - } - } - if cap(b.data)-off < n { - if b.data == nil { - b.data = make([]byte, off, off+n) - } else { - odata := b.data - div := 1 - if b.rLen > batchGrowRec { - div = b.rLen / batchGrowRec - } - b.data = make([]byte, off, off+n+(off-batchHdrLen)/div) - copy(b.data, odata) - } - } -} - -func (b *Batch) appendRec(kt kType, key, value []byte) { - n := 1 + binary.MaxVarintLen32 + len(key) - if kt == ktVal { - n += binary.MaxVarintLen32 + len(value) - } - b.grow(n) - off := len(b.data) - data := b.data[:off+n] - data[off] = byte(kt) - off += 1 - off += binary.PutUvarint(data[off:], uint64(len(key))) - copy(data[off:], key) - off += len(key) - if kt == ktVal { - off += binary.PutUvarint(data[off:], uint64(len(value))) - copy(data[off:], value) - off += len(value) - } - b.data = data[:off] - b.rLen++ - // Include 8-byte ikey header - b.bLen += len(key) + len(value) + 8 -} - -// Put appends 'put operation' of the given key/value pair to the batch. -// It is safe to modify the contents of the argument after Put returns. -func (b *Batch) Put(key, value []byte) { - b.appendRec(ktVal, key, value) -} - -// Delete appends 'delete operation' of the given key to the batch. -// It is safe to modify the contents of the argument after Delete returns. -func (b *Batch) Delete(key []byte) { - b.appendRec(ktDel, key, nil) -} - -// Dump dumps batch contents. The returned slice can be loaded into the -// batch using Load method. -// The returned slice is not its own copy, so the contents should not be -// modified. -func (b *Batch) Dump() []byte { - return b.encode() -} - -// Load loads given slice into the batch. Previous contents of the batch -// will be discarded. -// The given slice will not be copied and will be used as batch buffer, so -// it is not safe to modify the contents of the slice. -func (b *Batch) Load(data []byte) error { - return b.decode(0, data) -} - -// Replay replays batch contents. -func (b *Batch) Replay(r BatchReplay) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { - switch kt { - case ktVal: - r.Put(key, value) - case ktDel: - r.Delete(key) - } - }) -} - -// Len returns number of records in the batch. -func (b *Batch) Len() int { - return b.rLen -} - -// Reset resets the batch. -func (b *Batch) Reset() { - b.data = b.data[:0] - b.seq = 0 - b.rLen = 0 - b.bLen = 0 - b.sync = false -} - -func (b *Batch) init(sync bool) { - b.sync = sync -} - -func (b *Batch) append(p *Batch) { - if p.rLen > 0 { - b.grow(len(p.data) - batchHdrLen) - b.data = append(b.data, p.data[batchHdrLen:]...) - b.rLen += p.rLen - } - if p.sync { - b.sync = true - } -} - -// size returns sums of key/value pair length plus 8-bytes ikey. -func (b *Batch) size() int { - return b.bLen -} - -func (b *Batch) encode() []byte { - b.grow(0) - binary.LittleEndian.PutUint64(b.data, b.seq) - binary.LittleEndian.PutUint32(b.data[8:], uint32(b.rLen)) - - return b.data -} - -func (b *Batch) decode(prevSeq uint64, data []byte) error { - if len(data) < batchHdrLen { - return newErrBatchCorrupted("too short") - } - - b.seq = binary.LittleEndian.Uint64(data) - if b.seq < prevSeq { - return newErrBatchCorrupted("invalid sequence number") - } - b.rLen = int(binary.LittleEndian.Uint32(data[8:])) - if b.rLen < 0 { - return newErrBatchCorrupted("invalid records length") - } - // No need to be precise at this point, it won't be used anyway - b.bLen = len(data) - batchHdrLen - b.data = data - - return nil -} - -func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) { - off := batchHdrLen - for i := 0; i < b.rLen; i++ { - if off >= len(b.data) { - return newErrBatchCorrupted("invalid records length") - } - - kt := kType(b.data[off]) - if kt > ktVal { - return newErrBatchCorrupted("bad record: invalid type") - } - off += 1 - - x, n := binary.Uvarint(b.data[off:]) - off += n - if n <= 0 || off+int(x) > len(b.data) { - return newErrBatchCorrupted("bad record: invalid key length") - } - key := b.data[off : off+int(x)] - off += int(x) - var value []byte - if kt == ktVal { - x, n := binary.Uvarint(b.data[off:]) - off += n - if n <= 0 || off+int(x) > len(b.data) { - return newErrBatchCorrupted("bad record: invalid value length") - } - value = b.data[off : off+int(x)] - off += int(x) - } - - f(i, kt, key, value) - } - - return nil -} - -func (b *Batch) memReplay(to *memdb.DB) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { - ikey := newIkey(key, b.seq+uint64(i), kt) - to.Put(ikey, value) - }) -} - -func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) error { - if err := b.decode(prevSeq, data); err != nil { - return err - } - return b.memReplay(to) -} - -func (b *Batch) revertMemReplay(to *memdb.DB) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { - ikey := newIkey(key, b.seq+uint64(i), kt) - to.Delete(ikey) - }) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go deleted file mode 100644 index 49578d99ba2..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" -) - -type tbRec struct { - kt kType - key, value []byte -} - -type testBatch struct { - rec []*tbRec -} - -func (p *testBatch) Put(key, value []byte) { - p.rec = append(p.rec, &tbRec{ktVal, key, value}) -} - -func (p *testBatch) Delete(key []byte) { - p.rec = append(p.rec, &tbRec{ktDel, key, nil}) -} - -func compareBatch(t *testing.T, b1, b2 *Batch) { - if b1.seq != b2.seq { - t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq) - } - if b1.Len() != b2.Len() { - t.Fatalf("invalid record length want %d, got %d", b1.Len(), b2.Len()) - } - p1, p2 := new(testBatch), new(testBatch) - err := b1.Replay(p1) - if err != nil { - t.Fatal("error when replaying batch 1: ", err) - } - err = b2.Replay(p2) - if err != nil { - t.Fatal("error when replaying batch 2: ", err) - } - for i := range p1.rec { - r1, r2 := p1.rec[i], p2.rec[i] - if r1.kt != r2.kt { - t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.kt, r2.kt) - } - if !bytes.Equal(r1.key, r2.key) { - t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key)) - } - if r1.kt == ktVal { - if !bytes.Equal(r1.value, r2.value) { - t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value)) - } - } - } -} - -func TestBatch_EncodeDecode(t *testing.T) { - b1 := new(Batch) - b1.seq = 10009 - b1.Put([]byte("key1"), []byte("value1")) - b1.Put([]byte("key2"), []byte("value2")) - b1.Delete([]byte("key1")) - b1.Put([]byte("k"), []byte("")) - b1.Put([]byte("zzzzzzzzzzz"), []byte("zzzzzzzzzzzzzzzzzzzzzzzz")) - b1.Delete([]byte("key10000")) - b1.Delete([]byte("k")) - buf := b1.encode() - b2 := new(Batch) - err := b2.decode(0, buf) - if err != nil { - t.Error("error when decoding batch: ", err) - } - compareBatch(t, b1, b2) -} - -func TestBatch_Append(t *testing.T) { - b1 := new(Batch) - b1.seq = 10009 - b1.Put([]byte("key1"), []byte("value1")) - b1.Put([]byte("key2"), []byte("value2")) - b1.Delete([]byte("key1")) - b1.Put([]byte("foo"), []byte("foovalue")) - b1.Put([]byte("bar"), []byte("barvalue")) - b2a := new(Batch) - b2a.seq = 10009 - b2a.Put([]byte("key1"), []byte("value1")) - b2a.Put([]byte("key2"), []byte("value2")) - b2a.Delete([]byte("key1")) - b2b := new(Batch) - b2b.Put([]byte("foo"), []byte("foovalue")) - b2b.Put([]byte("bar"), []byte("barvalue")) - b2a.append(b2b) - compareBatch(t, b1, b2a) -} - -func TestBatch_Size(t *testing.T) { - b := new(Batch) - for i := 0; i < 2; i++ { - b.Put([]byte("key1"), []byte("value1")) - b.Put([]byte("key2"), []byte("value2")) - b.Delete([]byte("key1")) - b.Put([]byte("foo"), []byte("foovalue")) - b.Put([]byte("bar"), []byte("barvalue")) - mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0) - b.memReplay(mem) - if b.size() != mem.Size() { - t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size()) - } - b.Reset() - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go deleted file mode 100644 index 0dd60fd829b..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.2 - -package leveldb - -import ( - "sync/atomic" - "testing" -) - -func BenchmarkDBReadConcurrent(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - defer p.close() - - b.ResetTimer() - b.SetBytes(116) - - b.RunParallel(func(pb *testing.PB) { - iter := p.newIter() - defer iter.Release() - for pb.Next() && iter.Next() { - } - }) -} - -func BenchmarkDBReadConcurrent2(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - defer p.close() - - b.ResetTimer() - b.SetBytes(116) - - var dir uint32 - b.RunParallel(func(pb *testing.PB) { - iter := p.newIter() - defer iter.Release() - if atomic.AddUint32(&dir, 1)%2 == 0 { - for pb.Next() && iter.Next() { - } - } else { - if pb.Next() && iter.Last() { - for pb.Next() && iter.Prev() { - } - } - } - }) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go deleted file mode 100644 index 500c3850f20..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "fmt" - "math/rand" - "os" - "path/filepath" - "runtime" - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" -) - -func randomString(r *rand.Rand, n int) []byte { - b := new(bytes.Buffer) - for i := 0; i < n; i++ { - b.WriteByte(' ' + byte(r.Intn(95))) - } - return b.Bytes() -} - -func compressibleStr(r *rand.Rand, frac float32, n int) []byte { - nn := int(float32(n) * frac) - rb := randomString(r, nn) - b := make([]byte, 0, n+nn) - for len(b) < n { - b = append(b, rb...) - } - return b[:n] -} - -type valueGen struct { - src []byte - pos int -} - -func newValueGen(frac float32) *valueGen { - v := new(valueGen) - r := rand.New(rand.NewSource(301)) - v.src = make([]byte, 0, 1048576+100) - for len(v.src) < 1048576 { - v.src = append(v.src, compressibleStr(r, frac, 100)...) - } - return v -} - -func (v *valueGen) get(n int) []byte { - if v.pos+n > len(v.src) { - v.pos = 0 - } - v.pos += n - return v.src[v.pos-n : v.pos] -} - -var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid())) - -type dbBench struct { - b *testing.B - stor storage.Storage - db *DB - - o *opt.Options - ro *opt.ReadOptions - wo *opt.WriteOptions - - keys, values [][]byte -} - -func openDBBench(b *testing.B, noCompress bool) *dbBench { - _, err := os.Stat(benchDB) - if err == nil { - err = os.RemoveAll(benchDB) - if err != nil { - b.Fatal("cannot remove old db: ", err) - } - } - - p := &dbBench{ - b: b, - o: &opt.Options{}, - ro: &opt.ReadOptions{}, - wo: &opt.WriteOptions{}, - } - p.stor, err = storage.OpenFile(benchDB) - if err != nil { - b.Fatal("cannot open stor: ", err) - } - if noCompress { - p.o.Compression = opt.NoCompression - } - - p.db, err = Open(p.stor, p.o) - if err != nil { - b.Fatal("cannot open db: ", err) - } - - runtime.GOMAXPROCS(runtime.NumCPU()) - return p -} - -func (p *dbBench) reopen() { - p.db.Close() - var err error - p.db, err = Open(p.stor, p.o) - if err != nil { - p.b.Fatal("Reopen: got error: ", err) - } -} - -func (p *dbBench) populate(n int) { - p.keys, p.values = make([][]byte, n), make([][]byte, n) - v := newValueGen(0.5) - for i := range p.keys { - p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100) - } -} - -func (p *dbBench) randomize() { - m := len(p.keys) - times := m * 2 - r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface)) - for n := 0; n < times; n++ { - i, j := r1.Int()%m, r2.Int()%m - if i == j { - continue - } - p.keys[i], p.keys[j] = p.keys[j], p.keys[i] - p.values[i], p.values[j] = p.values[j], p.values[i] - } -} - -func (p *dbBench) writes(perBatch int) { - b := p.b - db := p.db - - n := len(p.keys) - m := n / perBatch - if n%perBatch > 0 { - m++ - } - batches := make([]Batch, m) - j := 0 - for i := range batches { - first := true - for ; j < n && ((j+1)%perBatch != 0 || first); j++ { - first = false - batches[i].Put(p.keys[j], p.values[j]) - } - } - runtime.GC() - - b.ResetTimer() - b.StartTimer() - for i := range batches { - err := db.Write(&(batches[i]), p.wo) - if err != nil { - b.Fatal("write failed: ", err) - } - } - b.StopTimer() - b.SetBytes(116) -} - -func (p *dbBench) gc() { - p.keys, p.values = nil, nil - runtime.GC() -} - -func (p *dbBench) puts() { - b := p.b - db := p.db - - b.ResetTimer() - b.StartTimer() - for i := range p.keys { - err := db.Put(p.keys[i], p.values[i], p.wo) - if err != nil { - b.Fatal("put failed: ", err) - } - } - b.StopTimer() - b.SetBytes(116) -} - -func (p *dbBench) fill() { - b := p.b - db := p.db - - perBatch := 10000 - batch := new(Batch) - for i, n := 0, len(p.keys); i < n; { - first := true - for ; i < n && ((i+1)%perBatch != 0 || first); i++ { - first = false - batch.Put(p.keys[i], p.values[i]) - } - err := db.Write(batch, p.wo) - if err != nil { - b.Fatal("write failed: ", err) - } - batch.Reset() - } -} - -func (p *dbBench) gets() { - b := p.b - db := p.db - - b.ResetTimer() - for i := range p.keys { - _, err := db.Get(p.keys[i], p.ro) - if err != nil { - b.Error("got error: ", err) - } - } - b.StopTimer() -} - -func (p *dbBench) seeks() { - b := p.b - - iter := p.newIter() - defer iter.Release() - b.ResetTimer() - for i := range p.keys { - if !iter.Seek(p.keys[i]) { - b.Error("value not found for: ", string(p.keys[i])) - } - } - b.StopTimer() -} - -func (p *dbBench) newIter() iterator.Iterator { - iter := p.db.NewIterator(nil, p.ro) - err := iter.Error() - if err != nil { - p.b.Fatal("cannot create iterator: ", err) - } - return iter -} - -func (p *dbBench) close() { - if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil { - p.b.Log("Block pool stats: ", bp) - } - p.db.Close() - p.stor.Close() - os.RemoveAll(benchDB) - p.db = nil - p.keys = nil - p.values = nil - runtime.GC() - runtime.GOMAXPROCS(1) -} - -func BenchmarkDBWrite(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBWriteBatch(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1000) - p.close() -} - -func BenchmarkDBWriteUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBWriteBatchUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.writes(1000) - p.close() -} - -func BenchmarkDBWriteRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.randomize() - p.writes(1) - p.close() -} - -func BenchmarkDBWriteRandomSync(b *testing.B) { - p := openDBBench(b, false) - p.wo.Sync = true - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBOverwrite(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.writes(1) - p.close() -} - -func BenchmarkDBOverwriteRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.randomize() - p.writes(1) - p.close() -} - -func BenchmarkDBPut(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.puts() - p.close() -} - -func BenchmarkDBRead(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadGC(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadTable(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.reopen() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadReverse(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - iter.Last() - for iter.Prev() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadReverseTable(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.reopen() - p.gc() - - iter := p.newIter() - b.ResetTimer() - iter.Last() - for iter.Prev() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBSeek(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.seeks() - p.close() -} - -func BenchmarkDBSeekRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.randomize() - p.seeks() - p.close() -} - -func BenchmarkDBGet(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gets() - p.close() -} - -func BenchmarkDBGetRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.randomize() - p.gets() - p.close() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go deleted file mode 100644 index 175e2220323..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.2 - -package cache - -import ( - "math/rand" - "testing" -) - -func BenchmarkLRUCache(b *testing.B) { - c := NewCache(NewLRU(10000)) - - b.SetParallelism(10) - b.RunParallel(func(pb *testing.PB) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for pb.Next() { - key := uint64(r.Intn(1000000)) - c.Get(0, key, func() (int, Value) { - return 1, key - }).Release() - } - }) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go deleted file mode 100644 index 8162e16a0f8..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go +++ /dev/null @@ -1,676 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package cache provides interface and implementation of a cache algorithms. -package cache - -import ( - "sync" - "sync/atomic" - "unsafe" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -// Cacher provides interface to implements a caching functionality. -// An implementation must be goroutine-safe. -type Cacher interface { - // Capacity returns cache capacity. - Capacity() int - - // SetCapacity sets cache capacity. - SetCapacity(capacity int) - - // Promote promotes the 'cache node'. - Promote(n *Node) - - // Ban evicts the 'cache node' and prevent subsequent 'promote'. - Ban(n *Node) - - // Evict evicts the 'cache node'. - Evict(n *Node) - - // EvictNS evicts 'cache node' with the given namespace. - EvictNS(ns uint64) - - // EvictAll evicts all 'cache node'. - EvictAll() - - // Close closes the 'cache tree' - Close() error -} - -// Value is a 'cacheable object'. It may implements util.Releaser, if -// so the the Release method will be called once object is released. -type Value interface{} - -type CacheGetter struct { - Cache *Cache - NS uint64 -} - -func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle { - return g.Cache.Get(g.NS, key, setFunc) -} - -// The hash tables implementation is based on: -// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014. - -const ( - mInitialSize = 1 << 4 - mOverflowThreshold = 1 << 5 - mOverflowGrowThreshold = 1 << 7 -) - -type mBucket struct { - mu sync.Mutex - node []*Node - frozen bool -} - -func (b *mBucket) freeze() []*Node { - b.mu.Lock() - defer b.mu.Unlock() - if !b.frozen { - b.frozen = true - } - return b.node -} - -func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) { - b.mu.Lock() - - if b.frozen { - b.mu.Unlock() - return - } - - // Scan the node. - for _, n := range b.node { - if n.hash == hash && n.ns == ns && n.key == key { - atomic.AddInt32(&n.ref, 1) - b.mu.Unlock() - return true, false, n - } - } - - // Get only. - if noset { - b.mu.Unlock() - return true, false, nil - } - - // Create node. - n = &Node{ - r: r, - hash: hash, - ns: ns, - key: key, - ref: 1, - } - // Add node to bucket. - b.node = append(b.node, n) - bLen := len(b.node) - b.mu.Unlock() - - // Update counter. - grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold - if bLen > mOverflowThreshold { - grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold - } - - // Grow. - if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { - nhLen := len(h.buckets) << 1 - nh := &mNode{ - buckets: make([]unsafe.Pointer, nhLen), - mask: uint32(nhLen) - 1, - pred: unsafe.Pointer(h), - growThreshold: int32(nhLen * mOverflowThreshold), - shrinkThreshold: int32(nhLen >> 1), - } - ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) - if !ok { - panic("BUG: failed swapping head") - } - go nh.initBuckets() - } - - return true, true, n -} - -func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) { - b.mu.Lock() - - if b.frozen { - b.mu.Unlock() - return - } - - // Scan the node. - var ( - n *Node - bLen int - ) - for i := range b.node { - n = b.node[i] - if n.ns == ns && n.key == key { - if atomic.LoadInt32(&n.ref) == 0 { - deleted = true - - // Call releaser. - if n.value != nil { - if r, ok := n.value.(util.Releaser); ok { - r.Release() - } - n.value = nil - } - - // Remove node from bucket. - b.node = append(b.node[:i], b.node[i+1:]...) - bLen = len(b.node) - } - break - } - } - b.mu.Unlock() - - if deleted { - // Call OnDel. - for _, f := range n.onDel { - f() - } - - // Update counter. - atomic.AddInt32(&r.size, int32(n.size)*-1) - shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold - if bLen >= mOverflowThreshold { - atomic.AddInt32(&h.overflow, -1) - } - - // Shrink. - if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { - nhLen := len(h.buckets) >> 1 - nh := &mNode{ - buckets: make([]unsafe.Pointer, nhLen), - mask: uint32(nhLen) - 1, - pred: unsafe.Pointer(h), - growThreshold: int32(nhLen * mOverflowThreshold), - shrinkThreshold: int32(nhLen >> 1), - } - ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) - if !ok { - panic("BUG: failed swapping head") - } - go nh.initBuckets() - } - } - - return true, deleted -} - -type mNode struct { - buckets []unsafe.Pointer // []*mBucket - mask uint32 - pred unsafe.Pointer // *mNode - resizeInProgess int32 - - overflow int32 - growThreshold int32 - shrinkThreshold int32 -} - -func (n *mNode) initBucket(i uint32) *mBucket { - if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil { - return b - } - - p := (*mNode)(atomic.LoadPointer(&n.pred)) - if p != nil { - var node []*Node - if n.mask > p.mask { - // Grow. - pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask])) - if pb == nil { - pb = p.initBucket(i & p.mask) - } - m := pb.freeze() - // Split nodes. - for _, x := range m { - if x.hash&n.mask == i { - node = append(node, x) - } - } - } else { - // Shrink. - pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i])) - if pb0 == nil { - pb0 = p.initBucket(i) - } - pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))])) - if pb1 == nil { - pb1 = p.initBucket(i + uint32(len(n.buckets))) - } - m0 := pb0.freeze() - m1 := pb1.freeze() - // Merge nodes. - node = make([]*Node, 0, len(m0)+len(m1)) - node = append(node, m0...) - node = append(node, m1...) - } - b := &mBucket{node: node} - if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) { - if len(node) > mOverflowThreshold { - atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold)) - } - return b - } - } - - return (*mBucket)(atomic.LoadPointer(&n.buckets[i])) -} - -func (n *mNode) initBuckets() { - for i := range n.buckets { - n.initBucket(uint32(i)) - } - atomic.StorePointer(&n.pred, nil) -} - -// Cache is a 'cache map'. -type Cache struct { - mu sync.RWMutex - mHead unsafe.Pointer // *mNode - nodes int32 - size int32 - cacher Cacher - closed bool -} - -// NewCache creates a new 'cache map'. The cacher is optional and -// may be nil. -func NewCache(cacher Cacher) *Cache { - h := &mNode{ - buckets: make([]unsafe.Pointer, mInitialSize), - mask: mInitialSize - 1, - growThreshold: int32(mInitialSize * mOverflowThreshold), - shrinkThreshold: 0, - } - for i := range h.buckets { - h.buckets[i] = unsafe.Pointer(&mBucket{}) - } - r := &Cache{ - mHead: unsafe.Pointer(h), - cacher: cacher, - } - return r -} - -func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) { - h := (*mNode)(atomic.LoadPointer(&r.mHead)) - i := hash & h.mask - b := (*mBucket)(atomic.LoadPointer(&h.buckets[i])) - if b == nil { - b = h.initBucket(i) - } - return h, b -} - -func (r *Cache) delete(n *Node) bool { - for { - h, b := r.getBucket(n.hash) - done, deleted := b.delete(r, h, n.hash, n.ns, n.key) - if done { - return deleted - } - } - return false -} - -// Nodes returns number of 'cache node' in the map. -func (r *Cache) Nodes() int { - return int(atomic.LoadInt32(&r.nodes)) -} - -// Size returns sums of 'cache node' size in the map. -func (r *Cache) Size() int { - return int(atomic.LoadInt32(&r.size)) -} - -// Capacity returns cache capacity. -func (r *Cache) Capacity() int { - if r.cacher == nil { - return 0 - } - return r.cacher.Capacity() -} - -// SetCapacity sets cache capacity. -func (r *Cache) SetCapacity(capacity int) { - if r.cacher != nil { - r.cacher.SetCapacity(capacity) - } -} - -// Get gets 'cache node' with the given namespace and key. -// If cache node is not found and setFunc is not nil, Get will atomically creates -// the 'cache node' by calling setFunc. Otherwise Get will returns nil. -// -// The returned 'cache handle' should be released after use by calling Release -// method. -func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return nil - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, setFunc == nil) - if done { - if n != nil { - n.mu.Lock() - if n.value == nil { - if setFunc == nil { - n.mu.Unlock() - n.unref() - return nil - } - - n.size, n.value = setFunc() - if n.value == nil { - n.size = 0 - n.mu.Unlock() - n.unref() - return nil - } - atomic.AddInt32(&r.size, int32(n.size)) - } - n.mu.Unlock() - if r.cacher != nil { - r.cacher.Promote(n) - } - return &Handle{unsafe.Pointer(n)} - } - - break - } - } - return nil -} - -// Delete removes and ban 'cache node' with the given namespace and key. -// A banned 'cache node' will never inserted into the 'cache tree'. Ban -// only attributed to the particular 'cache node', so when a 'cache node' -// is recreated it will not be banned. -// -// If onDel is not nil, then it will be executed if such 'cache node' -// doesn't exist or once the 'cache node' is released. -// -// Delete return true is such 'cache node' exist. -func (r *Cache) Delete(ns, key uint64, onDel func()) bool { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return false - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, true) - if done { - if n != nil { - if onDel != nil { - n.mu.Lock() - n.onDel = append(n.onDel, onDel) - n.mu.Unlock() - } - if r.cacher != nil { - r.cacher.Ban(n) - } - n.unref() - return true - } - - break - } - } - - if onDel != nil { - onDel() - } - - return false -} - -// Evict evicts 'cache node' with the given namespace and key. This will -// simply call Cacher.Evict. -// -// Evict return true is such 'cache node' exist. -func (r *Cache) Evict(ns, key uint64) bool { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return false - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, true) - if done { - if n != nil { - if r.cacher != nil { - r.cacher.Evict(n) - } - n.unref() - return true - } - - break - } - } - - return false -} - -// EvictNS evicts 'cache node' with the given namespace. This will -// simply call Cacher.EvictNS. -func (r *Cache) EvictNS(ns uint64) { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return - } - - if r.cacher != nil { - r.cacher.EvictNS(ns) - } -} - -// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll. -func (r *Cache) EvictAll() { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return - } - - if r.cacher != nil { - r.cacher.EvictAll() - } -} - -// Close closes the 'cache map' and releases all 'cache node'. -func (r *Cache) Close() error { - r.mu.Lock() - if !r.closed { - r.closed = true - - if r.cacher != nil { - if err := r.cacher.Close(); err != nil { - return err - } - } - - h := (*mNode)(r.mHead) - h.initBuckets() - - for i := range h.buckets { - b := (*mBucket)(h.buckets[i]) - for _, n := range b.node { - // Call releaser. - if n.value != nil { - if r, ok := n.value.(util.Releaser); ok { - r.Release() - } - n.value = nil - } - - // Call OnDel. - for _, f := range n.onDel { - f() - } - } - } - } - r.mu.Unlock() - return nil -} - -// Node is a 'cache node'. -type Node struct { - r *Cache - - hash uint32 - ns, key uint64 - - mu sync.Mutex - size int - value Value - - ref int32 - onDel []func() - - CacheData unsafe.Pointer -} - -// NS returns this 'cache node' namespace. -func (n *Node) NS() uint64 { - return n.ns -} - -// Key returns this 'cache node' key. -func (n *Node) Key() uint64 { - return n.key -} - -// Size returns this 'cache node' size. -func (n *Node) Size() int { - return n.size -} - -// Value returns this 'cache node' value. -func (n *Node) Value() Value { - return n.value -} - -// Ref returns this 'cache node' ref counter. -func (n *Node) Ref() int32 { - return atomic.LoadInt32(&n.ref) -} - -// GetHandle returns an handle for this 'cache node'. -func (n *Node) GetHandle() *Handle { - if atomic.AddInt32(&n.ref, 1) <= 1 { - panic("BUG: Node.GetHandle on zero ref") - } - return &Handle{unsafe.Pointer(n)} -} - -func (n *Node) unref() { - if atomic.AddInt32(&n.ref, -1) == 0 { - n.r.delete(n) - } -} - -func (n *Node) unrefLocked() { - if atomic.AddInt32(&n.ref, -1) == 0 { - n.r.mu.RLock() - if !n.r.closed { - n.r.delete(n) - } - n.r.mu.RUnlock() - } -} - -type Handle struct { - n unsafe.Pointer // *Node -} - -func (h *Handle) Value() Value { - n := (*Node)(atomic.LoadPointer(&h.n)) - if n != nil { - return n.value - } - return nil -} - -func (h *Handle) Release() { - nPtr := atomic.LoadPointer(&h.n) - if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) { - n := (*Node)(nPtr) - n.unrefLocked() - } -} - -func murmur32(ns, key uint64, seed uint32) uint32 { - const ( - m = uint32(0x5bd1e995) - r = 24 - ) - - k1 := uint32(ns >> 32) - k2 := uint32(ns) - k3 := uint32(key >> 32) - k4 := uint32(key) - - k1 *= m - k1 ^= k1 >> r - k1 *= m - - k2 *= m - k2 ^= k2 >> r - k2 *= m - - k3 *= m - k3 ^= k3 >> r - k3 *= m - - k4 *= m - k4 ^= k4 >> r - k4 *= m - - h := seed - - h *= m - h ^= k1 - h *= m - h ^= k2 - h *= m - h ^= k3 - h *= m - h ^= k4 - - h ^= h >> 13 - h *= m - h ^= h >> 15 - - return h -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go deleted file mode 100644 index c2a50156f04..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go +++ /dev/null @@ -1,554 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package cache - -import ( - "math/rand" - "runtime" - "sync" - "sync/atomic" - "testing" - "time" - "unsafe" -) - -type int32o int32 - -func (o *int32o) acquire() { - if atomic.AddInt32((*int32)(o), 1) != 1 { - panic("BUG: invalid ref") - } -} - -func (o *int32o) Release() { - if atomic.AddInt32((*int32)(o), -1) != 0 { - panic("BUG: invalid ref") - } -} - -type releaserFunc struct { - fn func() - value Value -} - -func (r releaserFunc) Release() { - if r.fn != nil { - r.fn() - } -} - -func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle { - return c.Get(ns, key, func() (int, Value) { - if relf != nil { - return charge, releaserFunc{relf, value} - } else { - return charge, value - } - }) -} - -func TestCacheMap(t *testing.T) { - runtime.GOMAXPROCS(runtime.NumCPU()) - - nsx := []struct { - nobjects, nhandles, concurrent, repeat int - }{ - {10000, 400, 50, 3}, - {100000, 1000, 100, 10}, - } - - var ( - objects [][]int32o - handles [][]unsafe.Pointer - ) - - for _, x := range nsx { - objects = append(objects, make([]int32o, x.nobjects)) - handles = append(handles, make([]unsafe.Pointer, x.nhandles)) - } - - c := NewCache(nil) - - wg := new(sync.WaitGroup) - var done int32 - - for ns, x := range nsx { - for i := 0; i < x.concurrent; i++ { - wg.Add(1) - go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for j := len(objects) * repeat; j >= 0; j-- { - key := uint64(r.Intn(len(objects))) - h := c.Get(uint64(ns), key, func() (int, Value) { - o := &objects[key] - o.acquire() - return 1, o - }) - if v := h.Value().(*int32o); v != &objects[key] { - t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v) - } - if objects[key] != 1 { - t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key]) - } - if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) { - h.Release() - } - } - }(ns, i, x.repeat, objects[ns], handles[ns]) - } - - go func(handles []unsafe.Pointer) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for atomic.LoadInt32(&done) == 0 { - i := r.Intn(len(handles)) - h := (*Handle)(atomic.LoadPointer(&handles[i])) - if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) { - h.Release() - } - time.Sleep(time.Millisecond) - } - }(handles[ns]) - } - - go func() { - handles := make([]*Handle, 100000) - for atomic.LoadInt32(&done) == 0 { - for i := range handles { - handles[i] = c.Get(999999999, uint64(i), func() (int, Value) { - return 1, 1 - }) - } - for _, h := range handles { - h.Release() - } - } - }() - - wg.Wait() - - atomic.StoreInt32(&done, 1) - - for _, handles0 := range handles { - for i := range handles0 { - h := (*Handle)(atomic.LoadPointer(&handles0[i])) - if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) { - h.Release() - } - } - } - - for ns, objects0 := range objects { - for i, o := range objects0 { - if o != 0 { - t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o) - } - } - } -} - -func TestCacheMap_NodesAndSize(t *testing.T) { - c := NewCache(nil) - if c.Nodes() != 0 { - t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes()) - } - if c.Size() != 0 { - t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size()) - } - set(c, 0, 1, 1, 1, nil) - set(c, 0, 2, 2, 2, nil) - set(c, 1, 1, 3, 3, nil) - set(c, 2, 1, 4, 1, nil) - if c.Nodes() != 4 { - t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes()) - } - if c.Size() != 7 { - t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size()) - } -} - -func TestLRUCache_Capacity(t *testing.T) { - c := NewCache(NewLRU(10)) - if c.Capacity() != 10 { - t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity()) - } - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 2, nil).Release() - set(c, 1, 1, 3, 3, nil).Release() - set(c, 2, 1, 4, 1, nil).Release() - set(c, 2, 2, 5, 1, nil).Release() - set(c, 2, 3, 6, 1, nil).Release() - set(c, 2, 4, 7, 1, nil).Release() - set(c, 2, 5, 8, 1, nil).Release() - if c.Nodes() != 7 { - t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes()) - } - if c.Size() != 10 { - t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size()) - } - c.SetCapacity(9) - if c.Capacity() != 9 { - t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity()) - } - if c.Nodes() != 6 { - t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes()) - } - if c.Size() != 8 { - t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size()) - } -} - -func TestCacheMap_NilValue(t *testing.T) { - c := NewCache(NewLRU(10)) - h := c.Get(0, 0, func() (size int, value Value) { - return 1, nil - }) - if h != nil { - t.Error("cache handle is non-nil") - } - if c.Nodes() != 0 { - t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes()) - } - if c.Size() != 0 { - t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size()) - } -} - -func TestLRUCache_GetLatency(t *testing.T) { - runtime.GOMAXPROCS(runtime.NumCPU()) - - const ( - concurrentSet = 30 - concurrentGet = 3 - duration = 3 * time.Second - delay = 3 * time.Millisecond - maxkey = 100000 - ) - - var ( - set, getHit, getAll int32 - getMaxLatency, getDuration int64 - ) - - c := NewCache(NewLRU(5000)) - wg := &sync.WaitGroup{} - until := time.Now().Add(duration) - for i := 0; i < concurrentSet; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - for time.Now().Before(until) { - c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) { - time.Sleep(delay) - atomic.AddInt32(&set, 1) - return 1, 1 - }).Release() - } - }(i) - } - for i := 0; i < concurrentGet; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - for { - mark := time.Now() - if mark.Before(until) { - h := c.Get(0, uint64(r.Intn(maxkey)), nil) - latency := int64(time.Now().Sub(mark)) - m := atomic.LoadInt64(&getMaxLatency) - if latency > m { - atomic.CompareAndSwapInt64(&getMaxLatency, m, latency) - } - atomic.AddInt64(&getDuration, latency) - if h != nil { - atomic.AddInt32(&getHit, 1) - h.Release() - } - atomic.AddInt32(&getAll, 1) - } else { - break - } - } - }(i) - } - - wg.Wait() - getAvglatency := time.Duration(getDuration) / time.Duration(getAll) - t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v", - set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency) - - if getAvglatency > delay/3 { - t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency) - } -} - -func TestLRUCache_HitMiss(t *testing.T) { - cases := []struct { - key uint64 - value string - }{ - {1, "vvvvvvvvv"}, - {100, "v1"}, - {0, "v2"}, - {12346, "v3"}, - {777, "v4"}, - {999, "v5"}, - {7654, "v6"}, - {2, "v7"}, - {3, "v8"}, - {9, "v9"}, - } - - setfin := 0 - c := NewCache(NewLRU(1000)) - for i, x := range cases { - set(c, 0, x.key, x.value, len(x.value), func() { - setfin++ - }).Release() - for j, y := range cases { - h := c.Get(0, y.key, nil) - if j <= i { - // should hit - if h == nil { - t.Errorf("case '%d' iteration '%d' is miss", i, j) - } else { - if x := h.Value().(releaserFunc).value.(string); x != y.value { - t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) - } - } - } else { - // should miss - if h != nil { - t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string)) - } - } - if h != nil { - h.Release() - } - } - } - - for i, x := range cases { - finalizerOk := false - c.Delete(0, x.key, func() { - finalizerOk = true - }) - - if !finalizerOk { - t.Errorf("case %d delete finalizer not executed", i) - } - - for j, y := range cases { - h := c.Get(0, y.key, nil) - if j > i { - // should hit - if h == nil { - t.Errorf("case '%d' iteration '%d' is miss", i, j) - } else { - if x := h.Value().(releaserFunc).value.(string); x != y.value { - t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) - } - } - } else { - // should miss - if h != nil { - t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string)) - } - } - if h != nil { - h.Release() - } - } - } - - if setfin != len(cases) { - t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin) - } -} - -func TestLRUCache_Eviction(t *testing.T) { - c := NewCache(NewLRU(12)) - o1 := set(c, 0, 1, 1, 1, nil) - set(c, 0, 2, 2, 1, nil).Release() - set(c, 0, 3, 3, 1, nil).Release() - set(c, 0, 4, 4, 1, nil).Release() - set(c, 0, 5, 5, 1, nil).Release() - if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2 - h.Release() - } - set(c, 0, 9, 9, 10, nil).Release() // 5,2,9 - - for _, key := range []uint64{9, 2, 5, 1} { - h := c.Get(0, key, nil) - if h == nil { - t.Errorf("miss for key '%d'", key) - } else { - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } - o1.Release() - for _, key := range []uint64{1, 2, 5} { - h := c.Get(0, key, nil) - if h == nil { - t.Errorf("miss for key '%d'", key) - } else { - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } - for _, key := range []uint64{3, 4, 9} { - h := c.Get(0, key, nil) - if h != nil { - t.Errorf("hit for key '%d'", key) - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } -} - -func TestLRUCache_Evict(t *testing.T) { - c := NewCache(NewLRU(6)) - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 1, nil).Release() - set(c, 1, 1, 4, 1, nil).Release() - set(c, 1, 2, 5, 1, nil).Release() - set(c, 2, 1, 6, 1, nil).Release() - set(c, 2, 2, 7, 1, nil).Release() - - for ns := 0; ns < 3; ns++ { - for key := 1; key < 3; key++ { - if h := c.Get(uint64(ns), uint64(key), nil); h != nil { - h.Release() - } else { - t.Errorf("Cache.Get on #%d.%d return nil", ns, key) - } - } - } - - if ok := c.Evict(0, 1); !ok { - t.Error("first Cache.Evict on #0.1 return false") - } - if ok := c.Evict(0, 1); ok { - t.Error("second Cache.Evict on #0.1 return true") - } - if h := c.Get(0, 1, nil); h != nil { - t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value()) - } - - c.EvictNS(1) - if h := c.Get(1, 1, nil); h != nil { - t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value()) - } - if h := c.Get(1, 2, nil); h != nil { - t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value()) - } - - c.EvictAll() - for ns := 0; ns < 3; ns++ { - for key := 1; key < 3; key++ { - if h := c.Get(uint64(ns), uint64(key), nil); h != nil { - t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value()) - } - } - } -} - -func TestLRUCache_Delete(t *testing.T) { - delFuncCalled := 0 - delFunc := func() { - delFuncCalled++ - } - - c := NewCache(NewLRU(2)) - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 1, nil).Release() - - if ok := c.Delete(0, 1, delFunc); !ok { - t.Error("Cache.Delete on #1 return false") - } - if h := c.Get(0, 1, nil); h != nil { - t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value()) - } - if ok := c.Delete(0, 1, delFunc); ok { - t.Error("Cache.Delete on #1 return true") - } - - h2 := c.Get(0, 2, nil) - if h2 == nil { - t.Error("Cache.Get on #2 return nil") - } - if ok := c.Delete(0, 2, delFunc); !ok { - t.Error("(1) Cache.Delete on #2 return false") - } - if ok := c.Delete(0, 2, delFunc); !ok { - t.Error("(2) Cache.Delete on #2 return false") - } - - set(c, 0, 3, 3, 1, nil).Release() - set(c, 0, 4, 4, 1, nil).Release() - c.Get(0, 2, nil).Release() - - for key := 2; key <= 4; key++ { - if h := c.Get(0, uint64(key), nil); h != nil { - h.Release() - } else { - t.Errorf("Cache.Get on #%d return nil", key) - } - } - - h2.Release() - if h := c.Get(0, 2, nil); h != nil { - t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value()) - } - - if delFuncCalled != 4 { - t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled) - } -} - -func TestLRUCache_Close(t *testing.T) { - relFuncCalled := 0 - relFunc := func() { - relFuncCalled++ - } - delFuncCalled := 0 - delFunc := func() { - delFuncCalled++ - } - - c := NewCache(NewLRU(2)) - set(c, 0, 1, 1, 1, relFunc).Release() - set(c, 0, 2, 2, 1, relFunc).Release() - - h3 := set(c, 0, 3, 3, 1, relFunc) - if h3 == nil { - t.Error("Cache.Get on #3 return nil") - } - if ok := c.Delete(0, 3, delFunc); !ok { - t.Error("Cache.Delete on #3 return false") - } - - c.Close() - - if relFuncCalled != 3 { - t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled) - } - if delFuncCalled != 1 { - t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go deleted file mode 100644 index d9a84cde15e..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package cache - -import ( - "sync" - "unsafe" -) - -type lruNode struct { - n *Node - h *Handle - ban bool - - next, prev *lruNode -} - -func (n *lruNode) insert(at *lruNode) { - x := at.next - at.next = n - n.prev = at - n.next = x - x.prev = n -} - -func (n *lruNode) remove() { - if n.prev != nil { - n.prev.next = n.next - n.next.prev = n.prev - n.prev = nil - n.next = nil - } else { - panic("BUG: removing removed node") - } -} - -type lru struct { - mu sync.Mutex - capacity int - used int - recent lruNode -} - -func (r *lru) reset() { - r.recent.next = &r.recent - r.recent.prev = &r.recent - r.used = 0 -} - -func (r *lru) Capacity() int { - r.mu.Lock() - defer r.mu.Unlock() - return r.capacity -} - -func (r *lru) SetCapacity(capacity int) { - var evicted []*lruNode - - r.mu.Lock() - r.capacity = capacity - for r.used > r.capacity { - rn := r.recent.prev - if rn == nil { - panic("BUG: invalid LRU used or capacity counter") - } - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) Promote(n *Node) { - var evicted []*lruNode - - r.mu.Lock() - if n.CacheData == nil { - if n.Size() <= r.capacity { - rn := &lruNode{n: n, h: n.GetHandle()} - rn.insert(&r.recent) - n.CacheData = unsafe.Pointer(rn) - r.used += n.Size() - - for r.used > r.capacity { - rn := r.recent.prev - if rn == nil { - panic("BUG: invalid LRU used or capacity counter") - } - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - } - } else { - rn := (*lruNode)(n.CacheData) - if !rn.ban { - rn.remove() - rn.insert(&r.recent) - } - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) Ban(n *Node) { - r.mu.Lock() - if n.CacheData == nil { - n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true}) - } else { - rn := (*lruNode)(n.CacheData) - if !rn.ban { - rn.remove() - rn.ban = true - r.used -= rn.n.Size() - r.mu.Unlock() - - rn.h.Release() - rn.h = nil - return - } - } - r.mu.Unlock() -} - -func (r *lru) Evict(n *Node) { - r.mu.Lock() - rn := (*lruNode)(n.CacheData) - if rn == nil || rn.ban { - r.mu.Unlock() - return - } - n.CacheData = nil - r.mu.Unlock() - - rn.h.Release() -} - -func (r *lru) EvictNS(ns uint64) { - var evicted []*lruNode - - r.mu.Lock() - for e := r.recent.prev; e != &r.recent; { - rn := e - e = e.prev - if rn.n.NS() == ns { - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) EvictAll() { - r.mu.Lock() - back := r.recent.prev - for rn := back; rn != &r.recent; rn = rn.prev { - rn.n.CacheData = nil - } - r.reset() - r.mu.Unlock() - - for rn := back; rn != &r.recent; rn = rn.prev { - rn.h.Release() - } -} - -func (r *lru) Close() error { - return nil -} - -// NewLRU create a new LRU-cache. -func NewLRU(capacity int) Cacher { - r := &lru{capacity: capacity} - r.reset() - return r -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go deleted file mode 100644 index 0554336db31..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - -type iComparer struct { - ucmp comparer.Comparer -} - -func (icmp *iComparer) uName() string { - return icmp.ucmp.Name() -} - -func (icmp *iComparer) uCompare(a, b []byte) int { - return icmp.ucmp.Compare(a, b) -} - -func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte { - return icmp.ucmp.Separator(dst, a, b) -} - -func (icmp *iComparer) uSuccessor(dst, b []byte) []byte { - return icmp.ucmp.Successor(dst, b) -} - -func (icmp *iComparer) Name() string { - return icmp.uName() -} - -func (icmp *iComparer) Compare(a, b []byte) int { - x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey()) - if x == 0 { - if m, n := iKey(a).num(), iKey(b).num(); m > n { - x = -1 - } else if m < n { - x = 1 - } - } - return x -} - -func (icmp *iComparer) Separator(dst, a, b []byte) []byte { - ua, ub := iKey(a).ukey(), iKey(b).ukey() - dst = icmp.ucmp.Separator(dst, ua, ub) - if dst == nil { - return nil - } - if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 { - dst = append(dst, kMaxNumBytes...) - } else { - // Did not close possibilities that n maybe longer than len(ub). - dst = append(dst, a[len(a)-8:]...) - } - return dst -} - -func (icmp *iComparer) Successor(dst, b []byte) []byte { - ub := iKey(b).ukey() - dst = icmp.ucmp.Successor(dst, ub) - if dst == nil { - return nil - } - if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 { - dst = append(dst, kMaxNumBytes...) - } else { - // Did not close possibilities that n maybe longer than len(ub). - dst = append(dst, b[len(b)-8:]...) - } - return dst -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go deleted file mode 100644 index 14dddf88dd2..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package comparer - -import "bytes" - -type bytesComparer struct{} - -func (bytesComparer) Compare(a, b []byte) int { - return bytes.Compare(a, b) -} - -func (bytesComparer) Name() string { - return "leveldb.BytewiseComparator" -} - -func (bytesComparer) Separator(dst, a, b []byte) []byte { - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for ; i < n && a[i] == b[i]; i++ { - } - if i >= n { - // Do not shorten if one string is a prefix of the other - } else if c := a[i]; c < 0xff && c+1 < b[i] { - dst = append(dst, a[:i+1]...) - dst[i]++ - return dst - } - return nil -} - -func (bytesComparer) Successor(dst, b []byte) []byte { - for i, c := range b { - if c != 0xff { - dst = append(dst, b[:i+1]...) - dst[i]++ - return dst - } - } - return nil -} - -// DefaultComparer are default implementation of the Comparer interface. -// It uses the natural ordering, consistent with bytes.Compare. -var DefaultComparer = bytesComparer{} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go deleted file mode 100644 index 14a28f16fce..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package comparer provides interface and implementation for ordering -// sets of data. -package comparer - -// BasicComparer is the interface that wraps the basic Compare method. -type BasicComparer interface { - // Compare returns -1, 0, or +1 depending on whether a is 'less than', - // 'equal to' or 'greater than' b. The two arguments can only be 'equal' - // if their contents are exactly equal. Furthermore, the empty slice - // must be 'less than' any non-empty slice. - Compare(a, b []byte) int -} - -// Comparer defines a total ordering over the space of []byte keys: a 'less -// than' relationship. -type Comparer interface { - BasicComparer - - // Name returns name of the comparer. - // - // The Level-DB on-disk format stores the comparer name, and opening a - // database with a different comparer from the one it was created with - // will result in an error. - // - // An implementation to a new name whenever the comparer implementation - // changes in a way that will cause the relative ordering of any two keys - // to change. - // - // Names starting with "leveldb." are reserved and should not be used - // by any users of this package. - Name() string - - // Bellow are advanced functions used used to reduce the space requirements - // for internal data structures such as index blocks. - - // Separator appends a sequence of bytes x to dst such that a <= x && x < b, - // where 'less than' is consistent with Compare. An implementation should - // return nil if x equal to a. - // - // Either contents of a or b should not by any means modified. Doing so - // may cause corruption on the internal state. - Separator(dst, a, b []byte) []byte - - // Successor appends a sequence of bytes x to dst such that x >= b, where - // 'less than' is consistent with Compare. An implementation should return - // nil if x equal to b. - // - // Contents of b should not by any means modified. Doing so may cause - // corruption on the internal state. - Successor(dst, b []byte) []byte -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go deleted file mode 100644 index a3bdfae624c..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "fmt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "io" - "math/rand" - "testing" -) - -const ctValSize = 1000 - -type dbCorruptHarness struct { - dbHarness -} - -func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness { - h := new(dbCorruptHarness) - h.init(t, o) - return h -} - -func newDbCorruptHarness(t *testing.T) *dbCorruptHarness { - return newDbCorruptHarnessWopt(t, &opt.Options{ - BlockCacheCapacity: 100, - Strict: opt.StrictJournalChecksum, - }) -} - -func (h *dbCorruptHarness) recover() { - p := &h.dbHarness - t := p.t - - var err error - p.db, err = Recover(h.stor, h.o) - if err != nil { - t.Fatal("Repair: got error: ", err) - } -} - -func (h *dbCorruptHarness) build(n int) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := 0; i < n; i++ { - batch.Reset() - batch.Put(tkey(i), tval(i, ctValSize)) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := range rnd.Perm(n) { - batch.Reset() - batch.Put(tkey(i), tval(i, ctValSize)) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := 0; i < n; i++ { - batch.Reset() - batch.Delete(tkey(rnd.Intn(max))) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) { - p := &h.dbHarness - t := p.t - - ff, _ := p.stor.GetFiles(ft) - sff := files(ff) - sff.sort() - if fi < 0 { - fi = len(sff) - 1 - } - if fi >= len(sff) { - t.Fatalf("no such file with type %q with index %d", ft, fi) - } - - file := sff[fi] - - r, err := file.Open() - if err != nil { - t.Fatal("cannot open file: ", err) - } - x, err := r.Seek(0, 2) - if err != nil { - t.Fatal("cannot query file size: ", err) - } - m := int(x) - if _, err := r.Seek(0, 0); err != nil { - t.Fatal(err) - } - - if offset < 0 { - if -offset > m { - offset = 0 - } else { - offset = m + offset - } - } - if offset > m { - offset = m - } - if offset+n > m { - n = m - offset - } - - buf := make([]byte, m) - _, err = io.ReadFull(r, buf) - if err != nil { - t.Fatal("cannot read file: ", err) - } - r.Close() - - for i := 0; i < n; i++ { - buf[offset+i] ^= 0x80 - } - - err = file.Remove() - if err != nil { - t.Fatal("cannot remove old file: ", err) - } - w, err := file.Create() - if err != nil { - t.Fatal("cannot create new file: ", err) - } - _, err = w.Write(buf) - if err != nil { - t.Fatal("cannot write new file: ", err) - } - w.Close() -} - -func (h *dbCorruptHarness) removeAll(ft storage.FileType) { - ff, err := h.stor.GetFiles(ft) - if err != nil { - h.t.Fatal("get files: ", err) - } - for _, f := range ff { - if err := f.Remove(); err != nil { - h.t.Error("remove file: ", err) - } - } -} - -func (h *dbCorruptHarness) removeOne(ft storage.FileType) { - ff, err := h.stor.GetFiles(ft) - if err != nil { - h.t.Fatal("get files: ", err) - } - f := ff[rand.Intn(len(ff))] - h.t.Logf("removing file @%d", f.Num()) - if err := f.Remove(); err != nil { - h.t.Error("remove file: ", err) - } -} - -func (h *dbCorruptHarness) check(min, max int) { - p := &h.dbHarness - t := p.t - db := p.db - - var n, badk, badv, missed, good int - iter := db.NewIterator(nil, p.ro) - for iter.Next() { - k := 0 - fmt.Sscanf(string(iter.Key()), "%d", &k) - if k < n { - badk++ - continue - } - missed += k - n - n = k + 1 - if !bytes.Equal(iter.Value(), tval(k, ctValSize)) { - badv++ - } else { - good++ - } - } - err := iter.Error() - iter.Release() - t.Logf("want=%d..%d got=%d badkeys=%d badvalues=%d missed=%d, err=%v", - min, max, good, badk, badv, missed, err) - if good < min || good > max { - t.Errorf("good entries number not in range") - } -} - -func TestCorruptDB_Journal(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(100) - h.check(100, 100) - h.closeDB() - h.corrupt(storage.TypeJournal, -1, 19, 1) - h.corrupt(storage.TypeJournal, -1, 32*1024+1000, 1) - - h.openDB() - h.check(36, 36) - - h.close() -} - -func TestCorruptDB_Table(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(100) - h.compactMem() - h.compactRangeAt(0, "", "") - h.compactRangeAt(1, "", "") - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.check(99, 99) - - h.close() -} - -func TestCorruptDB_TableIndex(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10000) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, -2000, 500) - - h.openDB() - h.check(5000, 9999) - - h.close() -} - -func TestCorruptDB_MissingManifest(t *testing.T) { - rnd := rand.New(rand.NewSource(0x0badda7a)) - h := newDbCorruptHarnessWopt(t, &opt.Options{ - BlockCacheCapacity: 100, - Strict: opt.StrictJournalChecksum, - WriteBuffer: 1000 * 60, - }) - - h.build(1000) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.deleteRand(500, 1000, rnd) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.deleteRand(500, 1000, rnd) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.closeDB() - - h.stor.SetIgnoreOpenErr(storage.TypeManifest) - h.removeAll(storage.TypeManifest) - h.openAssert(false) - h.stor.SetIgnoreOpenErr(0) - - h.recover() - h.check(1000, 1000) - h.build(1000) - h.compactMem() - h.compactRange("", "") - h.closeDB() - - h.recover() - h.check(1000, 1000) - - h.close() -} - -func TestCorruptDB_SequenceNumberRecovery(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "v1") - h.put("foo", "v2") - h.put("foo", "v3") - h.put("foo", "v4") - h.put("foo", "v5") - h.closeDB() - - h.recover() - h.getVal("foo", "v5") - h.put("foo", "v6") - h.getVal("foo", "v6") - - h.reopenDB() - h.getVal("foo", "v6") - - h.close() -} - -func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "v1") - h.put("foo", "v2") - h.put("foo", "v3") - h.compactMem() - h.put("foo", "v4") - h.put("foo", "v5") - h.compactMem() - h.closeDB() - - h.recover() - h.getVal("foo", "v5") - h.put("foo", "v6") - h.getVal("foo", "v6") - - h.reopenDB() - h.getVal("foo", "v6") - - h.close() -} - -func TestCorruptDB_CorruptedManifest(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "hello") - h.compactMem() - h.compactRange("", "") - h.closeDB() - h.corrupt(storage.TypeManifest, -1, 0, 1000) - h.openAssert(false) - - h.recover() - h.getVal("foo", "hello") - - h.close() -} - -func TestCorruptDB_CompactionInputError(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.check(9, 9) - - h.build(10000) - h.check(10000, 10000) - - h.close() -} - -func TestCorruptDB_UnrelatedKeys(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.put(string(tkey(1000)), string(tval(1000, ctValSize))) - h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) - h.compactMem() - h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) - - h.close() -} - -func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("a", "v2") - h.put("b", "v2") - h.compactMem() - h.put("a", "v3") - h.put("b", "v3") - h.compactMem() - h.put("c", "v0") - h.put("d", "v0") - h.compactMem() - h.compactRangeAt(1, "", "") - h.closeDB() - - h.recover() - h.getVal("a", "v3") - h.getVal("b", "v3") - h.getVal("c", "v0") - h.getVal("d", "v0") - - h.close() -} - -func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("a", "v2") - h.put("b", "v2") - h.compactMem() - h.put("a", "v3") - h.put("b", "v3") - h.compactMem() - h.put("c", "v0") - h.put("d", "v0") - h.compactMem() - h.compactRangeAt(0, "", "") - h.closeDB() - - h.recover() - h.getVal("a", "v3") - h.getVal("b", "v3") - h.getVal("c", "v0") - h.getVal("d", "v0") - - h.close() -} - -func TestCorruptDB_MissingTableFiles(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("c", "v2") - h.put("d", "v2") - h.compactMem() - h.put("e", "v3") - h.put("f", "v3") - h.closeDB() - - h.removeOne(storage.TypeTable) - h.openAssert(false) - - h.close() -} - -func TestCorruptDB_RecoverTable(t *testing.T) { - h := newDbCorruptHarnessWopt(t, &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 90 * opt.KiB, - Filter: filter.NewBloomFilter(10), - }) - - h.build(1000) - h.compactMem() - h.compactRangeAt(0, "", "") - h.compactRangeAt(1, "", "") - seq := h.db.seq - h.closeDB() - h.corrupt(storage.TypeTable, 0, 1000, 1) - h.corrupt(storage.TypeTable, 3, 10000, 1) - // Corrupted filter shouldn't affect recovery. - h.corrupt(storage.TypeTable, 3, 113888, 10) - h.corrupt(storage.TypeTable, -1, 20000, 1) - - h.recover() - if h.db.seq != seq { - t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq) - } - h.check(985, 985) - - h.close() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go deleted file mode 100644 index b117078688c..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go +++ /dev/null @@ -1,945 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "container/list" - "fmt" - "io" - "os" - "runtime" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type DB struct // DB is a LevelDB database. -{ - // Need 64-bit alignment. - seq uint64 - - // Session. - s *session - - // MemDB. - memMu sync.RWMutex - memPool chan *memdb.DB - mem, frozenMem *memDB - journal *journal.Writer - journalWriter storage.Writer - journalFile storage.File - frozenJournalFile storage.File - frozenSeq uint64 - - // Snapshot. - snapsMu sync.Mutex - snapsList *list.List - - // Stats. - aliveSnaps, aliveIters int32 - - // Write. - writeC chan *Batch - writeMergedC chan bool - writeLockC chan struct{} - writeAckC chan error - writeDelay time.Duration - writeDelayN int - journalC chan *Batch - journalAckC chan error - - // Compaction. - tcompCmdC chan cCmd - tcompPauseC chan chan<- struct{} - mcompCmdC chan cCmd - compErrC chan error - compPerErrC chan error - compErrSetC chan error - compStats []cStats - - // Close. - closeW sync.WaitGroup - closeC chan struct{} - closed uint32 - closer io.Closer -} - -func openDB(s *session) (*DB, error) { - s.log("db@open opening") - start := time.Now() - db := &DB{ - s: s, - // Initial sequence - seq: s.stSeqNum, - // MemDB - memPool: make(chan *memdb.DB, 1), - // Snapshot - snapsList: list.New(), - // Write - writeC: make(chan *Batch), - writeMergedC: make(chan bool), - writeLockC: make(chan struct{}, 1), - writeAckC: make(chan error), - journalC: make(chan *Batch), - journalAckC: make(chan error), - // Compaction - tcompCmdC: make(chan cCmd), - tcompPauseC: make(chan chan<- struct{}), - mcompCmdC: make(chan cCmd), - compErrC: make(chan error), - compPerErrC: make(chan error), - compErrSetC: make(chan error), - compStats: make([]cStats, s.o.GetNumLevel()), - // Close - closeC: make(chan struct{}), - } - - if err := db.recoverJournal(); err != nil { - return nil, err - } - - // Remove any obsolete files. - if err := db.checkAndCleanFiles(); err != nil { - // Close journal. - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - return nil, err - } - - // Doesn't need to be included in the wait group. - go db.compactionError() - go db.mpoolDrain() - - db.closeW.Add(3) - go db.tCompaction() - go db.mCompaction() - go db.jWriter() - - s.logf("db@open done T·%v", time.Since(start)) - - runtime.SetFinalizer(db, (*DB).Close) - return db, nil -} - -// Open opens or creates a DB for the given storage. -// The DB will be created if not exist, unless ErrorIfMissing is true. -// Also, if ErrorIfExist is true and the DB exist Open will returns -// os.ErrExist error. -// -// Open will return an error with type of ErrCorrupted if corruption -// detected in the DB. Corrupted DB can be recovered with Recover -// function. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) { - s, err := newSession(stor, o) - if err != nil { - return - } - defer func() { - if err != nil { - s.close() - s.release() - } - }() - - err = s.recover() - if err != nil { - if !os.IsNotExist(err) || s.o.GetErrorIfMissing() { - return - } - err = s.create() - if err != nil { - return - } - } else if s.o.GetErrorIfExist() { - err = os.ErrExist - return - } - - return openDB(s) -} - -// OpenFile opens or creates a DB for the given path. -// The DB will be created if not exist, unless ErrorIfMissing is true. -// Also, if ErrorIfExist is true and the DB exist OpenFile will returns -// os.ErrExist error. -// -// OpenFile uses standard file-system backed storage implementation as -// desribed in the leveldb/storage package. -// -// OpenFile will return an error with type of ErrCorrupted if corruption -// detected in the DB. Corrupted DB can be recovered with Recover -// function. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func OpenFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path) - if err != nil { - return - } - db, err = Open(stor, o) - if err != nil { - stor.Close() - } else { - db.closer = stor - } - return -} - -// Recover recovers and opens a DB with missing or corrupted manifest files -// for the given storage. It will ignore any manifest files, valid or not. -// The DB must already exist or it will returns an error. -// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) { - s, err := newSession(stor, o) - if err != nil { - return - } - defer func() { - if err != nil { - s.close() - s.release() - } - }() - - err = recoverTable(s, o) - if err != nil { - return - } - return openDB(s) -} - -// RecoverFile recovers and opens a DB with missing or corrupted manifest files -// for the given path. It will ignore any manifest files, valid or not. -// The DB must already exist or it will returns an error. -// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. -// -// RecoverFile uses standard file-system backed storage implementation as desribed -// in the leveldb/storage package. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func RecoverFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path) - if err != nil { - return - } - db, err = Recover(stor, o) - if err != nil { - stor.Close() - } else { - db.closer = stor - } - return -} - -func recoverTable(s *session, o *opt.Options) error { - o = dupOptions(o) - // Mask StrictReader, lets StrictRecovery doing its job. - o.Strict &= ^opt.StrictReader - - // Get all tables and sort it by file number. - tableFiles_, err := s.getFiles(storage.TypeTable) - if err != nil { - return err - } - tableFiles := files(tableFiles_) - tableFiles.sort() - - var ( - maxSeq uint64 - recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int - - // We will drop corrupted table. - strict = o.GetStrict(opt.StrictRecovery) - - rec = &sessionRecord{numLevel: o.GetNumLevel()} - bpool = util.NewBufferPool(o.GetBlockSize() + 5) - ) - buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) { - tmp = s.newTemp() - writer, err := tmp.Create() - if err != nil { - return - } - defer func() { - writer.Close() - if err != nil { - tmp.Remove() - tmp = nil - } - }() - - // Copy entries. - tw := table.NewWriter(writer, o) - for iter.Next() { - key := iter.Key() - if validIkey(key) { - err = tw.Append(key, iter.Value()) - if err != nil { - return - } - } - } - err = iter.Error() - if err != nil { - return - } - err = tw.Close() - if err != nil { - return - } - err = writer.Sync() - if err != nil { - return - } - size = int64(tw.BytesLen()) - return - } - recoverTable := func(file storage.File) error { - s.logf("table@recovery recovering @%d", file.Num()) - reader, err := file.Open() - if err != nil { - return err - } - var closed bool - defer func() { - if !closed { - reader.Close() - } - }() - - // Get file size. - size, err := reader.Seek(0, 2) - if err != nil { - return err - } - - var ( - tSeq uint64 - tgoodKey, tcorruptedKey, tcorruptedBlock int - imin, imax []byte - ) - tr, err := table.NewReader(reader, size, storage.NewFileInfo(file), nil, bpool, o) - if err != nil { - return err - } - iter := tr.NewIterator(nil, nil) - if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok { - itererr.SetErrorCallback(func(err error) { - if errors.IsCorrupted(err) { - s.logf("table@recovery block corruption @%d %q", file.Num(), err) - tcorruptedBlock++ - } - }) - } - - // Scan the table. - for iter.Next() { - key := iter.Key() - _, seq, _, kerr := parseIkey(key) - if kerr != nil { - tcorruptedKey++ - continue - } - tgoodKey++ - if seq > tSeq { - tSeq = seq - } - if imin == nil { - imin = append([]byte{}, key...) - } - imax = append(imax[:0], key...) - } - if err := iter.Error(); err != nil { - iter.Release() - return err - } - iter.Release() - - goodKey += tgoodKey - corruptedKey += tcorruptedKey - corruptedBlock += tcorruptedBlock - - if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) { - droppedTable++ - s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) - return nil - } - - if tgoodKey > 0 { - if tcorruptedKey > 0 || tcorruptedBlock > 0 { - // Rebuild the table. - s.logf("table@recovery rebuilding @%d", file.Num()) - iter := tr.NewIterator(nil, nil) - tmp, newSize, err := buildTable(iter) - iter.Release() - if err != nil { - return err - } - closed = true - reader.Close() - if err := file.Replace(tmp); err != nil { - return err - } - size = newSize - } - if tSeq > maxSeq { - maxSeq = tSeq - } - recoveredKey += tgoodKey - // Add table to level 0. - rec.addTable(0, file.Num(), uint64(size), imin, imax) - s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) - } else { - droppedTable++ - s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", file.Num(), tcorruptedKey, tcorruptedBlock, size) - } - - return nil - } - - // Recover all tables. - if len(tableFiles) > 0 { - s.logf("table@recovery F·%d", len(tableFiles)) - - // Mark file number as used. - s.markFileNum(tableFiles[len(tableFiles)-1].Num()) - - for _, file := range tableFiles { - if err := recoverTable(file); err != nil { - return err - } - } - - s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, maxSeq) - } - - // Set sequence number. - rec.setSeqNum(maxSeq) - - // Create new manifest. - if err := s.create(); err != nil { - return err - } - - // Commit. - return s.commit(rec) -} - -func (db *DB) recoverJournal() error { - // Get all tables and sort it by file number. - journalFiles_, err := db.s.getFiles(storage.TypeJournal) - if err != nil { - return err - } - journalFiles := files(journalFiles_) - journalFiles.sort() - - // Discard older journal. - prev := -1 - for i, file := range journalFiles { - if file.Num() >= db.s.stJournalNum { - if prev >= 0 { - i-- - journalFiles[i] = journalFiles[prev] - } - journalFiles = journalFiles[i:] - break - } else if file.Num() == db.s.stPrevJournalNum { - prev = i - } - } - - var jr *journal.Reader - var of storage.File - var mem *memdb.DB - batch := new(Batch) - cm := newCMem(db.s) - buf := new(util.Buffer) - // Options. - strict := db.s.o.GetStrict(opt.StrictJournal) - checksum := db.s.o.GetStrict(opt.StrictJournalChecksum) - writeBuffer := db.s.o.GetWriteBuffer() - recoverJournal := func(file storage.File) error { - db.logf("journal@recovery recovering @%d", file.Num()) - reader, err := file.Open() - if err != nil { - return err - } - defer reader.Close() - - // Create/reset journal reader instance. - if jr == nil { - jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum) - } else { - jr.Reset(reader, dropper{db.s, file}, strict, checksum) - } - - // Flush memdb and remove obsolete journal file. - if of != nil { - if mem.Len() > 0 { - if err := cm.flush(mem, 0); err != nil { - return err - } - } - if err := cm.commit(file.Num(), db.seq); err != nil { - return err - } - cm.reset() - of.Remove() - of = nil - } - - // Replay journal to memdb. - mem.Reset() - for { - r, err := jr.Next() - if err != nil { - if err == io.EOF { - break - } - return errors.SetFile(err, file) - } - - buf.Reset() - if _, err := buf.ReadFrom(r); err != nil { - if err == io.ErrUnexpectedEOF { - // This is error returned due to corruption, with strict == false. - continue - } else { - return errors.SetFile(err, file) - } - } - if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mem); err != nil { - if strict || !errors.IsCorrupted(err) { - return errors.SetFile(err, file) - } else { - db.s.logf("journal error: %v (skipped)", err) - // We won't apply sequence number as it might be corrupted. - continue - } - } - - // Save sequence number. - db.seq = batch.seq + uint64(batch.Len()) - - // Flush it if large enough. - if mem.Size() >= writeBuffer { - if err := cm.flush(mem, 0); err != nil { - return err - } - mem.Reset() - } - } - - of = file - return nil - } - - // Recover all journals. - if len(journalFiles) > 0 { - db.logf("journal@recovery F·%d", len(journalFiles)) - - // Mark file number as used. - db.s.markFileNum(journalFiles[len(journalFiles)-1].Num()) - - mem = memdb.New(db.s.icmp, writeBuffer) - for _, file := range journalFiles { - if err := recoverJournal(file); err != nil { - return err - } - } - - // Flush the last journal. - if mem.Len() > 0 { - if err := cm.flush(mem, 0); err != nil { - return err - } - } - } - - // Create a new journal. - if _, err := db.newMem(0); err != nil { - return err - } - - // Commit. - if err := cm.commit(db.journalFile.Num(), db.seq); err != nil { - // Close journal. - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - return err - } - - // Remove the last obsolete journal file. - if of != nil { - of.Remove() - } - - return nil -} - -func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { - ikey := newIkey(key, seq, ktSeek) - - em, fm := db.getMems() - for _, m := range [...]*memDB{em, fm} { - if m == nil { - continue - } - defer m.decref() - - mk, mv, me := m.mdb.Find(ikey) - if me == nil { - ukey, _, kt, kerr := parseIkey(mk) - if kerr != nil { - // Shouldn't have had happen. - panic(kerr) - } - if db.s.icmp.uCompare(ukey, key) == 0 { - if kt == ktDel { - return nil, ErrNotFound - } - return append([]byte{}, mv...), nil - } - } else if me != ErrNotFound { - return nil, me - } - } - - v := db.s.version() - value, cSched, err := v.get(ikey, ro, false) - v.release() - if cSched { - // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) - } - return -} - -func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) { - ikey := newIkey(key, seq, ktSeek) - - em, fm := db.getMems() - for _, m := range [...]*memDB{em, fm} { - if m == nil { - continue - } - defer m.decref() - - mk, _, me := m.mdb.Find(ikey) - if me == nil { - ukey, _, kt, kerr := parseIkey(mk) - if kerr != nil { - // Shouldn't have had happen. - panic(kerr) - } - if db.s.icmp.uCompare(ukey, key) == 0 { - if kt == ktDel { - return false, nil - } - return true, nil - } - } else if me != ErrNotFound { - return false, me - } - } - - v := db.s.version() - _, cSched, err := v.get(ikey, ro, true) - v.release() - if cSched { - // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) - } - if err == nil { - ret = true - } else if err == ErrNotFound { - err = nil - } - return -} - -// Get gets the value for the given key. It returns ErrNotFound if the -// DB does not contains the key. -// -// The returned slice is its own copy, it is safe to modify the contents -// of the returned slice. -// It is safe to modify the contents of the argument after Get returns. -func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - err = db.ok() - if err != nil { - return - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - return db.get(key, se.seq, ro) -} - -// Has returns true if the DB does contains the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { - err = db.ok() - if err != nil { - return - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - return db.has(key, se.seq, ro) -} - -// NewIterator returns an iterator for the latest snapshot of the -// uderlying DB. -// The returned iterator is not goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. The resultant key/value pairs are guaranteed to be -// consistent. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - if err := db.ok(); err != nil { - return iterator.NewEmptyIterator(err) - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - // Iterator holds 'version' lock, 'version' is immutable so snapshot - // can be released after iterator created. - return db.newIterator(se.seq, slice, ro) -} - -// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot -// is a frozen snapshot of a DB state at a particular point in time. The -// content of snapshot are guaranteed to be consistent. -// -// The snapshot must be released after use, by calling Release method. -func (db *DB) GetSnapshot() (*Snapshot, error) { - if err := db.ok(); err != nil { - return nil, err - } - - return db.newSnapshot(), nil -} - -// GetProperty returns value of the given property name. -// -// Property names: -// leveldb.num-files-at-level{n} -// Returns the number of files at level 'n'. -// leveldb.stats -// Returns statistics of the underlying DB. -// leveldb.sstables -// Returns sstables list for each level. -// leveldb.blockpool -// Returns block pool stats. -// leveldb.cachedblock -// Returns size of cached block. -// leveldb.openedtables -// Returns number of opened tables. -// leveldb.alivesnaps -// Returns number of alive snapshots. -// leveldb.aliveiters -// Returns number of alive iterators. -func (db *DB) GetProperty(name string) (value string, err error) { - err = db.ok() - if err != nil { - return - } - - const prefix = "leveldb." - if !strings.HasPrefix(name, prefix) { - return "", errors.New("leveldb: GetProperty: unknown property: " + name) - } - p := name[len(prefix):] - - v := db.s.version() - defer v.release() - - numFilesPrefix := "num-files-at-level" - switch { - case strings.HasPrefix(p, numFilesPrefix): - var level uint - var rest string - n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest) - if n != 1 || int(level) >= db.s.o.GetNumLevel() { - err = errors.New("leveldb: GetProperty: invalid property: " + name) - } else { - value = fmt.Sprint(v.tLen(int(level))) - } - case p == "stats": - value = "Compactions\n" + - " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" + - "-------+------------+---------------+---------------+---------------+---------------\n" - for level, tables := range v.tables { - duration, read, write := db.compStats[level].get() - if len(tables) == 0 && duration == 0 { - continue - } - value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n", - level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(), - float64(read)/1048576.0, float64(write)/1048576.0) - } - case p == "sstables": - for level, tables := range v.tables { - value += fmt.Sprintf("--- level %d ---\n", level) - for _, t := range tables { - value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax) - } - } - case p == "blockpool": - value = fmt.Sprintf("%v", db.s.tops.bpool) - case p == "cachedblock": - if db.s.tops.bcache != nil { - value = fmt.Sprintf("%d", db.s.tops.bcache.Size()) - } else { - value = "" - } - case p == "openedtables": - value = fmt.Sprintf("%d", db.s.tops.cache.Size()) - case p == "alivesnaps": - value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps)) - case p == "aliveiters": - value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters)) - default: - err = errors.New("leveldb: GetProperty: unknown property: " + name) - } - - return -} - -// SizeOf calculates approximate sizes of the given key ranges. -// The length of the returned sizes are equal with the length of the given -// ranges. The returned sizes measure storage space usage, so if the user -// data compresses by a factor of ten, the returned sizes will be one-tenth -// the size of the corresponding user data size. -// The results may not include the sizes of recently written data. -func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) { - if err := db.ok(); err != nil { - return nil, err - } - - v := db.s.version() - defer v.release() - - sizes := make(Sizes, 0, len(ranges)) - for _, r := range ranges { - imin := newIkey(r.Start, kMaxSeq, ktSeek) - imax := newIkey(r.Limit, kMaxSeq, ktSeek) - start, err := v.offsetOf(imin) - if err != nil { - return nil, err - } - limit, err := v.offsetOf(imax) - if err != nil { - return nil, err - } - var size uint64 - if limit >= start { - size = limit - start - } - sizes = append(sizes, size) - } - - return sizes, nil -} - -// Close closes the DB. This will also releases any outstanding snapshot and -// abort any in-flight compaction. -// -// It is not safe to close a DB until all outstanding iterators are released. -// It is valid to call Close multiple times. Other methods should not be -// called after the DB has been closed. -func (db *DB) Close() error { - if !db.setClosed() { - return ErrClosed - } - - start := time.Now() - db.log("db@close closing") - - // Clear the finalizer. - runtime.SetFinalizer(db, nil) - - // Get compaction error. - var err error - select { - case err = <-db.compErrC: - default: - } - - // Signal all goroutines. - close(db.closeC) - - // Wait for all gorotines to exit. - db.closeW.Wait() - - // Lock writer and closes journal. - db.writeLockC <- struct{}{} - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - - if db.writeDelayN > 0 { - db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) - } - - // Close session. - db.s.close() - db.logf("db@close done T·%v", time.Since(start)) - db.s.release() - - if db.closer != nil { - if err1 := db.closer.Close(); err == nil { - err = err1 - } - } - - // NIL'ing pointers. - db.s = nil - db.mem = nil - db.frozenMem = nil - db.journal = nil - db.journalWriter = nil - db.journalFile = nil - db.frozenJournalFile = nil - db.closer = nil - - return err -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go deleted file mode 100644 index 6bf2c4bb6cd..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go +++ /dev/null @@ -1,835 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync" - "time" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" -) - -var ( - errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting") -) - -type cStats struct { - sync.Mutex - duration time.Duration - read uint64 - write uint64 -} - -func (p *cStats) add(n *cStatsStaging) { - p.Lock() - p.duration += n.duration - p.read += n.read - p.write += n.write - p.Unlock() -} - -func (p *cStats) get() (duration time.Duration, read, write uint64) { - p.Lock() - defer p.Unlock() - return p.duration, p.read, p.write -} - -type cStatsStaging struct { - start time.Time - duration time.Duration - on bool - read uint64 - write uint64 -} - -func (p *cStatsStaging) startTimer() { - if !p.on { - p.start = time.Now() - p.on = true - } -} - -func (p *cStatsStaging) stopTimer() { - if p.on { - p.duration += time.Since(p.start) - p.on = false - } -} - -type cMem struct { - s *session - level int - rec *sessionRecord -} - -func newCMem(s *session) *cMem { - return &cMem{s: s, rec: &sessionRecord{numLevel: s.o.GetNumLevel()}} -} - -func (c *cMem) flush(mem *memdb.DB, level int) error { - s := c.s - - // Write memdb to table. - iter := mem.NewIterator(nil) - defer iter.Release() - t, n, err := s.tops.createFrom(iter) - if err != nil { - return err - } - - // Pick level. - if level < 0 { - v := s.version() - level = v.pickLevel(t.imin.ukey(), t.imax.ukey()) - v.release() - } - c.rec.addTableFile(level, t) - - s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax) - - c.level = level - return nil -} - -func (c *cMem) reset() { - c.rec = &sessionRecord{numLevel: c.s.o.GetNumLevel()} -} - -func (c *cMem) commit(journal, seq uint64) error { - c.rec.setJournalNum(journal) - c.rec.setSeqNum(seq) - - // Commit changes. - return c.s.commit(c.rec) -} - -func (db *DB) compactionError() { - var ( - err error - wlocked bool - ) -noerr: - // No error. - for { - select { - case err = <-db.compErrSetC: - switch { - case err == nil: - case errors.IsCorrupted(err): - goto hasperr - default: - goto haserr - } - case _, _ = <-db.closeC: - return - } - } -haserr: - // Transient error. - for { - select { - case db.compErrC <- err: - case err = <-db.compErrSetC: - switch { - case err == nil: - goto noerr - case errors.IsCorrupted(err): - goto hasperr - default: - } - case _, _ = <-db.closeC: - return - } - } -hasperr: - // Persistent error. - for { - select { - case db.compErrC <- err: - case db.compPerErrC <- err: - case db.writeLockC <- struct{}{}: - // Hold write lock, so that write won't pass-through. - wlocked = true - case _, _ = <-db.closeC: - if wlocked { - // We should release the lock or Close will hang. - <-db.writeLockC - } - return - } - } -} - -type compactionTransactCounter int - -func (cnt *compactionTransactCounter) incr() { - *cnt++ -} - -type compactionTransactInterface interface { - run(cnt *compactionTransactCounter) error - revert() error -} - -func (db *DB) compactionTransact(name string, t compactionTransactInterface) { - defer func() { - if x := recover(); x != nil { - if x == errCompactionTransactExiting { - if err := t.revert(); err != nil { - db.logf("%s revert error %q", name, err) - } - } - panic(x) - } - }() - - const ( - backoffMin = 1 * time.Second - backoffMax = 8 * time.Second - backoffMul = 2 * time.Second - ) - var ( - backoff = backoffMin - backoffT = time.NewTimer(backoff) - lastCnt = compactionTransactCounter(0) - - disableBackoff = db.s.o.GetDisableCompactionBackoff() - ) - for n := 0; ; n++ { - // Check wether the DB is closed. - if db.isClosed() { - db.logf("%s exiting", name) - db.compactionExitTransact() - } else if n > 0 { - db.logf("%s retrying N·%d", name, n) - } - - // Execute. - cnt := compactionTransactCounter(0) - err := t.run(&cnt) - if err != nil { - db.logf("%s error I·%d %q", name, cnt, err) - } - - // Set compaction error status. - select { - case db.compErrSetC <- err: - case perr := <-db.compPerErrC: - if err != nil { - db.logf("%s exiting (persistent error %q)", name, perr) - db.compactionExitTransact() - } - case _, _ = <-db.closeC: - db.logf("%s exiting", name) - db.compactionExitTransact() - } - if err == nil { - return - } - if errors.IsCorrupted(err) { - db.logf("%s exiting (corruption detected)", name) - db.compactionExitTransact() - } - - if !disableBackoff { - // Reset backoff duration if counter is advancing. - if cnt > lastCnt { - backoff = backoffMin - lastCnt = cnt - } - - // Backoff. - backoffT.Reset(backoff) - if backoff < backoffMax { - backoff *= backoffMul - if backoff > backoffMax { - backoff = backoffMax - } - } - select { - case <-backoffT.C: - case _, _ = <-db.closeC: - db.logf("%s exiting", name) - db.compactionExitTransact() - } - } - } -} - -type compactionTransactFunc struct { - runFunc func(cnt *compactionTransactCounter) error - revertFunc func() error -} - -func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error { - return t.runFunc(cnt) -} - -func (t *compactionTransactFunc) revert() error { - if t.revertFunc != nil { - return t.revertFunc() - } - return nil -} - -func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) { - db.compactionTransact(name, &compactionTransactFunc{run, revert}) -} - -func (db *DB) compactionExitTransact() { - panic(errCompactionTransactExiting) -} - -func (db *DB) memCompaction() { - mem := db.getFrozenMem() - if mem == nil { - return - } - defer mem.decref() - - c := newCMem(db.s) - stats := new(cStatsStaging) - - db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size())) - - // Don't compact empty memdb. - if mem.mdb.Len() == 0 { - db.logf("mem@flush skipping") - // drop frozen mem - db.dropFrozenMem() - return - } - - // Pause table compaction. - resumeC := make(chan struct{}) - select { - case db.tcompPauseC <- (chan<- struct{})(resumeC): - case <-db.compPerErrC: - close(resumeC) - resumeC = nil - case _, _ = <-db.closeC: - return - } - - db.compactionTransactFunc("mem@flush", func(cnt *compactionTransactCounter) (err error) { - stats.startTimer() - defer stats.stopTimer() - return c.flush(mem.mdb, -1) - }, func() error { - for _, r := range c.rec.addedTables { - db.logf("mem@flush revert @%d", r.num) - f := db.s.getTableFile(r.num) - if err := f.Remove(); err != nil { - return err - } - } - return nil - }) - - db.compactionTransactFunc("mem@commit", func(cnt *compactionTransactCounter) (err error) { - stats.startTimer() - defer stats.stopTimer() - return c.commit(db.journalFile.Num(), db.frozenSeq) - }, nil) - - db.logf("mem@flush committed F·%d T·%v", len(c.rec.addedTables), stats.duration) - - for _, r := range c.rec.addedTables { - stats.write += r.size - } - db.compStats[c.level].add(stats) - - // Drop frozen mem. - db.dropFrozenMem() - - // Resume table compaction. - if resumeC != nil { - select { - case <-resumeC: - close(resumeC) - case _, _ = <-db.closeC: - return - } - } - - // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) -} - -type tableCompactionBuilder struct { - db *DB - s *session - c *compaction - rec *sessionRecord - stat0, stat1 *cStatsStaging - - snapHasLastUkey bool - snapLastUkey []byte - snapLastSeq uint64 - snapIter int - snapKerrCnt int - snapDropCnt int - - kerrCnt int - dropCnt int - - minSeq uint64 - strict bool - tableSize int - - tw *tWriter -} - -func (b *tableCompactionBuilder) appendKV(key, value []byte) error { - // Create new table if not already. - if b.tw == nil { - // Check for pause event. - if b.db != nil { - select { - case ch := <-b.db.tcompPauseC: - b.db.pauseCompaction(ch) - case _, _ = <-b.db.closeC: - b.db.compactionExitTransact() - default: - } - } - - // Create new table. - var err error - b.tw, err = b.s.tops.create() - if err != nil { - return err - } - } - - // Write key/value into table. - return b.tw.append(key, value) -} - -func (b *tableCompactionBuilder) needFlush() bool { - return b.tw.tw.BytesLen() >= b.tableSize -} - -func (b *tableCompactionBuilder) flush() error { - t, err := b.tw.finish() - if err != nil { - return err - } - b.rec.addTableFile(b.c.level+1, t) - b.stat1.write += t.size - b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.level+1, t.file.Num(), b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) - b.tw = nil - return nil -} - -func (b *tableCompactionBuilder) cleanup() { - if b.tw != nil { - b.tw.drop() - b.tw = nil - } -} - -func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error { - snapResumed := b.snapIter > 0 - hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary. - lastUkey := append([]byte{}, b.snapLastUkey...) - lastSeq := b.snapLastSeq - b.kerrCnt = b.snapKerrCnt - b.dropCnt = b.snapDropCnt - // Restore compaction state. - b.c.restore() - - defer b.cleanup() - - b.stat1.startTimer() - defer b.stat1.stopTimer() - - iter := b.c.newIterator() - defer iter.Release() - for i := 0; iter.Next(); i++ { - // Incr transact counter. - cnt.incr() - - // Skip until last state. - if i < b.snapIter { - continue - } - - resumed := false - if snapResumed { - resumed = true - snapResumed = false - } - - ikey := iter.Key() - ukey, seq, kt, kerr := parseIkey(ikey) - - if kerr == nil { - shouldStop := !resumed && b.c.shouldStopBefore(ikey) - - if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 { - // First occurrence of this user key. - - // Only rotate tables if ukey doesn't hop across. - if b.tw != nil && (shouldStop || b.needFlush()) { - if err := b.flush(); err != nil { - return err - } - - // Creates snapshot of the state. - b.c.save() - b.snapHasLastUkey = hasLastUkey - b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...) - b.snapLastSeq = lastSeq - b.snapIter = i - b.snapKerrCnt = b.kerrCnt - b.snapDropCnt = b.dropCnt - } - - hasLastUkey = true - lastUkey = append(lastUkey[:0], ukey...) - lastSeq = kMaxSeq - } - - switch { - case lastSeq <= b.minSeq: - // Dropped because newer entry for same user key exist - fallthrough // (A) - case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey): - // For this user key: - // (1) there is no data in higher levels - // (2) data in lower levels will have larger seq numbers - // (3) data in layers that are being compacted here and have - // smaller seq numbers will be dropped in the next - // few iterations of this loop (by rule (A) above). - // Therefore this deletion marker is obsolete and can be dropped. - lastSeq = seq - b.dropCnt++ - continue - default: - lastSeq = seq - } - } else { - if b.strict { - return kerr - } - - // Don't drop corrupted keys. - hasLastUkey = false - lastUkey = lastUkey[:0] - lastSeq = kMaxSeq - b.kerrCnt++ - } - - if err := b.appendKV(ikey, iter.Value()); err != nil { - return err - } - } - - if err := iter.Error(); err != nil { - return err - } - - // Finish last table. - if b.tw != nil && !b.tw.empty() { - return b.flush() - } - return nil -} - -func (b *tableCompactionBuilder) revert() error { - for _, at := range b.rec.addedTables { - b.s.logf("table@build revert @%d", at.num) - f := b.s.getTableFile(at.num) - if err := f.Remove(); err != nil { - return err - } - } - return nil -} - -func (db *DB) tableCompaction(c *compaction, noTrivial bool) { - defer c.release() - - rec := &sessionRecord{numLevel: db.s.o.GetNumLevel()} - rec.addCompPtr(c.level, c.imax) - - if !noTrivial && c.trivial() { - t := c.tables[0][0] - db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1) - rec.delTable(c.level, t.file.Num()) - rec.addTableFile(c.level+1, t) - db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) { - return db.s.commit(rec) - }, nil) - return - } - - var stats [2]cStatsStaging - for i, tables := range c.tables { - for _, t := range tables { - stats[i].read += t.size - // Insert deleted tables into record - rec.delTable(c.level+i, t.file.Num()) - } - } - sourceSize := int(stats[0].read + stats[1].read) - minSeq := db.minSeq() - db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq) - - b := &tableCompactionBuilder{ - db: db, - s: db.s, - c: c, - rec: rec, - stat1: &stats[1], - minSeq: minSeq, - strict: db.s.o.GetStrict(opt.StrictCompaction), - tableSize: db.s.o.GetCompactionTableSize(c.level + 1), - } - db.compactionTransact("table@build", b) - - // Commit changes - db.compactionTransactFunc("table@commit", func(cnt *compactionTransactCounter) (err error) { - stats[1].startTimer() - defer stats[1].stopTimer() - return db.s.commit(rec) - }, nil) - - resultSize := int(stats[1].write) - db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration) - - // Save compaction stats - for i := range stats { - db.compStats[c.level+1].add(&stats[i]) - } -} - -func (db *DB) tableRangeCompaction(level int, umin, umax []byte) { - db.logf("table@compaction range L%d %q:%q", level, umin, umax) - - if level >= 0 { - if c := db.s.getCompactionRange(level, umin, umax); c != nil { - db.tableCompaction(c, true) - } - } else { - v := db.s.version() - m := 1 - for i, t := range v.tables[1:] { - if t.overlaps(db.s.icmp, umin, umax, false) { - m = i + 1 - } - } - v.release() - - for level := 0; level < m; level++ { - if c := db.s.getCompactionRange(level, umin, umax); c != nil { - db.tableCompaction(c, true) - } - } - } -} - -func (db *DB) tableAutoCompaction() { - if c := db.s.pickCompaction(); c != nil { - db.tableCompaction(c, false) - } -} - -func (db *DB) tableNeedCompaction() bool { - v := db.s.version() - defer v.release() - return v.needCompaction() -} - -func (db *DB) pauseCompaction(ch chan<- struct{}) { - select { - case ch <- struct{}{}: - case _, _ = <-db.closeC: - db.compactionExitTransact() - } -} - -type cCmd interface { - ack(err error) -} - -type cIdle struct { - ackC chan<- error -} - -func (r cIdle) ack(err error) { - if r.ackC != nil { - defer func() { - recover() - }() - r.ackC <- err - } -} - -type cRange struct { - level int - min, max []byte - ackC chan<- error -} - -func (r cRange) ack(err error) { - if r.ackC != nil { - defer func() { - recover() - }() - r.ackC <- err - } -} - -// This will trigger auto compation and/or wait for all compaction to be done. -func (db *DB) compSendIdle(compC chan<- cCmd) (err error) { - ch := make(chan error) - defer close(ch) - // Send cmd. - select { - case compC <- cIdle{ch}: - case err = <-db.compErrC: - return - case _, _ = <-db.closeC: - return ErrClosed - } - // Wait cmd. - select { - case err = <-ch: - case err = <-db.compErrC: - case _, _ = <-db.closeC: - return ErrClosed - } - return err -} - -// This will trigger auto compaction but will not wait for it. -func (db *DB) compSendTrigger(compC chan<- cCmd) { - select { - case compC <- cIdle{}: - default: - } -} - -// Send range compaction request. -func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) { - ch := make(chan error) - defer close(ch) - // Send cmd. - select { - case compC <- cRange{level, min, max, ch}: - case err := <-db.compErrC: - return err - case _, _ = <-db.closeC: - return ErrClosed - } - // Wait cmd. - select { - case err = <-ch: - case err = <-db.compErrC: - case _, _ = <-db.closeC: - return ErrClosed - } - return err -} - -func (db *DB) mCompaction() { - var x cCmd - - defer func() { - if x := recover(); x != nil { - if x != errCompactionTransactExiting { - panic(x) - } - } - if x != nil { - x.ack(ErrClosed) - } - db.closeW.Done() - }() - - for { - select { - case x = <-db.mcompCmdC: - switch x.(type) { - case cIdle: - db.memCompaction() - x.ack(nil) - x = nil - default: - panic("leveldb: unknown command") - } - case _, _ = <-db.closeC: - return - } - } -} - -func (db *DB) tCompaction() { - var x cCmd - var ackQ []cCmd - - defer func() { - if x := recover(); x != nil { - if x != errCompactionTransactExiting { - panic(x) - } - } - for i := range ackQ { - ackQ[i].ack(ErrClosed) - ackQ[i] = nil - } - if x != nil { - x.ack(ErrClosed) - } - db.closeW.Done() - }() - - for { - if db.tableNeedCompaction() { - select { - case x = <-db.tcompCmdC: - case ch := <-db.tcompPauseC: - db.pauseCompaction(ch) - continue - case _, _ = <-db.closeC: - return - default: - } - } else { - for i := range ackQ { - ackQ[i].ack(nil) - ackQ[i] = nil - } - ackQ = ackQ[:0] - select { - case x = <-db.tcompCmdC: - case ch := <-db.tcompPauseC: - db.pauseCompaction(ch) - continue - case _, _ = <-db.closeC: - return - } - } - if x != nil { - switch cmd := x.(type) { - case cIdle: - ackQ = append(ackQ, x) - case cRange: - db.tableRangeCompaction(cmd.level, cmd.min, cmd.max) - x.ack(nil) - default: - panic("leveldb: unknown command") - } - x = nil - } - db.tableAutoCompaction() - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go deleted file mode 100644 index 1366bdb54c7..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - "math/rand" - "runtime" - "sync" - "sync/atomic" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key") -) - -type memdbReleaser struct { - once sync.Once - m *memDB -} - -func (mr *memdbReleaser) Release() { - mr.once.Do(func() { - mr.m.decref() - }) -} - -func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - em, fm := db.getMems() - v := db.s.version() - - ti := v.getIterators(slice, ro) - n := len(ti) + 2 - i := make([]iterator.Iterator, 0, n) - emi := em.mdb.NewIterator(slice) - emi.SetReleaser(&memdbReleaser{m: em}) - i = append(i, emi) - if fm != nil { - fmi := fm.mdb.NewIterator(slice) - fmi.SetReleaser(&memdbReleaser{m: fm}) - i = append(i, fmi) - } - i = append(i, ti...) - strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader) - mi := iterator.NewMergedIterator(i, db.s.icmp, strict) - mi.SetReleaser(&versionReleaser{v: v}) - return mi -} - -func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { - var islice *util.Range - if slice != nil { - islice = &util.Range{} - if slice.Start != nil { - islice.Start = newIkey(slice.Start, kMaxSeq, ktSeek) - } - if slice.Limit != nil { - islice.Limit = newIkey(slice.Limit, kMaxSeq, ktSeek) - } - } - rawIter := db.newRawIterator(islice, ro) - iter := &dbIter{ - db: db, - icmp: db.s.icmp, - iter: rawIter, - seq: seq, - strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader), - key: make([]byte, 0), - value: make([]byte, 0), - } - atomic.AddInt32(&db.aliveIters, 1) - runtime.SetFinalizer(iter, (*dbIter).Release) - return iter -} - -func (db *DB) iterSamplingRate() int { - return rand.Intn(2 * db.s.o.GetIteratorSamplingRate()) -} - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -// dbIter represent an interator states over a database session. -type dbIter struct { - db *DB - icmp *iComparer - iter iterator.Iterator - seq uint64 - strict bool - - smaplingGap int - dir dir - key []byte - value []byte - err error - releaser util.Releaser -} - -func (i *dbIter) sampleSeek() { - ikey := i.iter.Key() - i.smaplingGap -= len(ikey) + len(i.iter.Value()) - for i.smaplingGap < 0 { - i.smaplingGap += i.db.iterSamplingRate() - i.db.sampleSeek(ikey) - } -} - -func (i *dbIter) setErr(err error) { - i.err = err - i.key = nil - i.value = nil -} - -func (i *dbIter) iterErr() { - if err := i.iter.Error(); err != nil { - i.setErr(err) - } -} - -func (i *dbIter) Valid() bool { - return i.err == nil && i.dir > dirEOI -} - -func (i *dbIter) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.iter.First() { - i.dir = dirSOI - return i.next() - } - i.dir = dirEOI - i.iterErr() - return false -} - -func (i *dbIter) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.iter.Last() { - return i.prev() - } - i.dir = dirSOI - i.iterErr() - return false -} - -func (i *dbIter) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - ikey := newIkey(key, i.seq, ktSeek) - if i.iter.Seek(ikey) { - i.dir = dirSOI - return i.next() - } - i.dir = dirEOI - i.iterErr() - return false -} - -func (i *dbIter) next() bool { - for { - if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil { - i.sampleSeek() - if seq <= i.seq { - switch kt { - case ktDel: - // Skip deleted key. - i.key = append(i.key[:0], ukey...) - i.dir = dirForward - case ktVal: - if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 { - i.key = append(i.key[:0], ukey...) - i.value = append(i.value[:0], i.iter.Value()...) - i.dir = dirForward - return true - } - } - } - } else if i.strict { - i.setErr(kerr) - break - } - if !i.iter.Next() { - i.dir = dirEOI - i.iterErr() - break - } - } - return false -} - -func (i *dbIter) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) { - i.dir = dirEOI - i.iterErr() - return false - } - return i.next() -} - -func (i *dbIter) prev() bool { - i.dir = dirBackward - del := true - if i.iter.Valid() { - for { - if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil { - i.sampleSeek() - if seq <= i.seq { - if !del && i.icmp.uCompare(ukey, i.key) < 0 { - return true - } - del = (kt == ktDel) - if !del { - i.key = append(i.key[:0], ukey...) - i.value = append(i.value[:0], i.iter.Value()...) - } - } - } else if i.strict { - i.setErr(kerr) - return false - } - if !i.iter.Prev() { - break - } - } - } - if del { - i.dir = dirSOI - i.iterErr() - return false - } - return true -} - -func (i *dbIter) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirEOI: - return i.Last() - case dirForward: - for i.iter.Prev() { - if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil { - i.sampleSeek() - if i.icmp.uCompare(ukey, i.key) < 0 { - goto cont - } - } else if i.strict { - i.setErr(kerr) - return false - } - } - i.dir = dirSOI - i.iterErr() - return false - } - -cont: - return i.prev() -} - -func (i *dbIter) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.key -} - -func (i *dbIter) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.value -} - -func (i *dbIter) Release() { - if i.dir != dirReleased { - // Clear the finalizer. - runtime.SetFinalizer(i, nil) - - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - - i.dir = dirReleased - i.key = nil - i.value = nil - i.iter.Release() - i.iter = nil - atomic.AddInt32(&i.db.aliveIters, -1) - i.db = nil - } -} - -func (i *dbIter) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *dbIter) Error() error { - return i.err -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go deleted file mode 100644 index 9738159dc20..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "container/list" - "fmt" - "runtime" - "sync" - "sync/atomic" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type snapshotElement struct { - seq uint64 - ref int - e *list.Element -} - -// Acquires a snapshot, based on latest sequence. -func (db *DB) acquireSnapshot() *snapshotElement { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - seq := db.getSeq() - - if e := db.snapsList.Back(); e != nil { - se := e.Value.(*snapshotElement) - if se.seq == seq { - se.ref++ - return se - } else if seq < se.seq { - panic("leveldb: sequence number is not increasing") - } - } - se := &snapshotElement{seq: seq, ref: 1} - se.e = db.snapsList.PushBack(se) - return se -} - -// Releases given snapshot element. -func (db *DB) releaseSnapshot(se *snapshotElement) { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - se.ref-- - if se.ref == 0 { - db.snapsList.Remove(se.e) - se.e = nil - } else if se.ref < 0 { - panic("leveldb: Snapshot: negative element reference") - } -} - -// Gets minimum sequence that not being snapshoted. -func (db *DB) minSeq() uint64 { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - if e := db.snapsList.Front(); e != nil { - return e.Value.(*snapshotElement).seq - } - - return db.getSeq() -} - -// Snapshot is a DB snapshot. -type Snapshot struct { - db *DB - elem *snapshotElement - mu sync.RWMutex - released bool -} - -// Creates new snapshot object. -func (db *DB) newSnapshot() *Snapshot { - snap := &Snapshot{ - db: db, - elem: db.acquireSnapshot(), - } - atomic.AddInt32(&db.aliveSnaps, 1) - runtime.SetFinalizer(snap, (*Snapshot).Release) - return snap -} - -func (snap *Snapshot) String() string { - return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq) -} - -// Get gets the value for the given key. It returns ErrNotFound if -// the DB does not contains the key. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Get returns. -func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - err = snap.db.ok() - if err != nil { - return - } - snap.mu.RLock() - defer snap.mu.RUnlock() - if snap.released { - err = ErrSnapshotReleased - return - } - return snap.db.get(key, snap.elem.seq, ro) -} - -// Has returns true if the DB does contains the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { - err = snap.db.ok() - if err != nil { - return - } - snap.mu.RLock() - defer snap.mu.RUnlock() - if snap.released { - err = ErrSnapshotReleased - return - } - return snap.db.has(key, snap.elem.seq, ro) -} - -// NewIterator returns an iterator for the snapshot of the uderlying DB. -// The returned iterator is not goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. The resultant key/value pairs are guaranteed to be -// consistent. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// Releasing the snapshot doesn't mean releasing the iterator too, the -// iterator would be still valid until released. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - if err := snap.db.ok(); err != nil { - return iterator.NewEmptyIterator(err) - } - snap.mu.Lock() - defer snap.mu.Unlock() - if snap.released { - return iterator.NewEmptyIterator(ErrSnapshotReleased) - } - // Since iterator already hold version ref, it doesn't need to - // hold snapshot ref. - return snap.db.newIterator(snap.elem.seq, slice, ro) -} - -// Release releases the snapshot. This will not release any returned -// iterators, the iterators would still be valid until released or the -// underlying DB is closed. -// -// Other methods should not be called after the snapshot has been released. -func (snap *Snapshot) Release() { - snap.mu.Lock() - defer snap.mu.Unlock() - - if !snap.released { - // Clear the finalizer. - runtime.SetFinalizer(snap, nil) - - snap.released = true - snap.db.releaseSnapshot(snap.elem) - atomic.AddInt32(&snap.db.aliveSnaps, -1) - snap.db = nil - snap.elem = nil - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go deleted file mode 100644 index f642283ba89..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync/atomic" - "time" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" -) - -type memDB struct { - db *DB - mdb *memdb.DB - ref int32 -} - -func (m *memDB) incref() { - atomic.AddInt32(&m.ref, 1) -} - -func (m *memDB) decref() { - if ref := atomic.AddInt32(&m.ref, -1); ref == 0 { - // Only put back memdb with std capacity. - if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() { - m.mdb.Reset() - m.db.mpoolPut(m.mdb) - } - m.db = nil - m.mdb = nil - } else if ref < 0 { - panic("negative memdb ref") - } -} - -// Get latest sequence number. -func (db *DB) getSeq() uint64 { - return atomic.LoadUint64(&db.seq) -} - -// Atomically adds delta to seq. -func (db *DB) addSeq(delta uint64) { - atomic.AddUint64(&db.seq, delta) -} - -func (db *DB) sampleSeek(ikey iKey) { - v := db.s.version() - if v.sampleSeek(ikey) { - // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) - } - v.release() -} - -func (db *DB) mpoolPut(mem *memdb.DB) { - defer func() { - recover() - }() - select { - case db.memPool <- mem: - default: - } -} - -func (db *DB) mpoolGet() *memdb.DB { - select { - case mem := <-db.memPool: - return mem - default: - return nil - } -} - -func (db *DB) mpoolDrain() { - ticker := time.NewTicker(30 * time.Second) - for { - select { - case <-ticker.C: - select { - case <-db.memPool: - default: - } - case _, _ = <-db.closeC: - close(db.memPool) - return - } - } -} - -// Create new memdb and froze the old one; need external synchronization. -// newMem only called synchronously by the writer. -func (db *DB) newMem(n int) (mem *memDB, err error) { - num := db.s.allocFileNum() - file := db.s.getJournalFile(num) - w, err := file.Create() - if err != nil { - db.s.reuseFileNum(num) - return - } - - db.memMu.Lock() - defer db.memMu.Unlock() - - if db.frozenMem != nil { - panic("still has frozen mem") - } - - if db.journal == nil { - db.journal = journal.NewWriter(w) - } else { - db.journal.Reset(w) - db.journalWriter.Close() - db.frozenJournalFile = db.journalFile - } - db.journalWriter = w - db.journalFile = file - db.frozenMem = db.mem - mdb := db.mpoolGet() - if mdb == nil || mdb.Capacity() < n { - mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) - } - mem = &memDB{ - db: db, - mdb: mdb, - ref: 2, - } - db.mem = mem - // The seq only incremented by the writer. And whoever called newMem - // should hold write lock, so no need additional synchronization here. - db.frozenSeq = db.seq - return -} - -// Get all memdbs. -func (db *DB) getMems() (e, f *memDB) { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.mem == nil { - panic("nil effective mem") - } - db.mem.incref() - if db.frozenMem != nil { - db.frozenMem.incref() - } - return db.mem, db.frozenMem -} - -// Get frozen memdb. -func (db *DB) getEffectiveMem() *memDB { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.mem == nil { - panic("nil effective mem") - } - db.mem.incref() - return db.mem -} - -// Check whether we has frozen memdb. -func (db *DB) hasFrozenMem() bool { - db.memMu.RLock() - defer db.memMu.RUnlock() - return db.frozenMem != nil -} - -// Get frozen memdb. -func (db *DB) getFrozenMem() *memDB { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.frozenMem != nil { - db.frozenMem.incref() - } - return db.frozenMem -} - -// Drop frozen memdb; assume that frozen memdb isn't nil. -func (db *DB) dropFrozenMem() { - db.memMu.Lock() - if err := db.frozenJournalFile.Remove(); err != nil { - db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err) - } else { - db.logf("journal@remove removed @%d", db.frozenJournalFile.Num()) - } - db.frozenJournalFile = nil - db.frozenMem.decref() - db.frozenMem = nil - db.memMu.Unlock() -} - -// Set closed flag; return true if not already closed. -func (db *DB) setClosed() bool { - return atomic.CompareAndSwapUint32(&db.closed, 0, 1) -} - -// Check whether DB was closed. -func (db *DB) isClosed() bool { - return atomic.LoadUint32(&db.closed) != 0 -} - -// Check read ok status. -func (db *DB) ok() error { - if db.isClosed() { - return ErrClosed - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go deleted file mode 100644 index 2cb0a55d4af..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go +++ /dev/null @@ -1,2665 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "container/list" - crand "crypto/rand" - "encoding/binary" - "fmt" - "math/rand" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - "unsafe" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -func tkey(i int) []byte { - return []byte(fmt.Sprintf("%016d", i)) -} - -func tval(seed, n int) []byte { - r := rand.New(rand.NewSource(int64(seed))) - return randomString(r, n) -} - -type dbHarness struct { - t *testing.T - - stor *testStorage - db *DB - o *opt.Options - ro *opt.ReadOptions - wo *opt.WriteOptions -} - -func newDbHarnessWopt(t *testing.T, o *opt.Options) *dbHarness { - h := new(dbHarness) - h.init(t, o) - return h -} - -func newDbHarness(t *testing.T) *dbHarness { - return newDbHarnessWopt(t, &opt.Options{}) -} - -func (h *dbHarness) init(t *testing.T, o *opt.Options) { - h.t = t - h.stor = newTestStorage(t) - h.o = o - h.ro = nil - h.wo = nil - - if err := h.openDB0(); err != nil { - // So that it will come after fatal message. - defer h.stor.Close() - h.t.Fatal("Open (init): got error: ", err) - } -} - -func (h *dbHarness) openDB0() (err error) { - h.t.Log("opening DB") - h.db, err = Open(h.stor, h.o) - return -} - -func (h *dbHarness) openDB() { - if err := h.openDB0(); err != nil { - h.t.Fatal("Open: got error: ", err) - } -} - -func (h *dbHarness) closeDB0() error { - h.t.Log("closing DB") - return h.db.Close() -} - -func (h *dbHarness) closeDB() { - if err := h.closeDB0(); err != nil { - h.t.Error("Close: got error: ", err) - } - h.stor.CloseCheck() - runtime.GC() -} - -func (h *dbHarness) reopenDB() { - h.closeDB() - h.openDB() -} - -func (h *dbHarness) close() { - h.closeDB0() - h.db = nil - h.stor.Close() - h.stor = nil - runtime.GC() -} - -func (h *dbHarness) openAssert(want bool) { - db, err := Open(h.stor, h.o) - if err != nil { - if want { - h.t.Error("Open: assert: got error: ", err) - } else { - h.t.Log("Open: assert: got error (expected): ", err) - } - } else { - if !want { - h.t.Error("Open: assert: expect error") - } - db.Close() - } -} - -func (h *dbHarness) write(batch *Batch) { - if err := h.db.Write(batch, h.wo); err != nil { - h.t.Error("Write: got error: ", err) - } -} - -func (h *dbHarness) put(key, value string) { - if err := h.db.Put([]byte(key), []byte(value), h.wo); err != nil { - h.t.Error("Put: got error: ", err) - } -} - -func (h *dbHarness) putMulti(n int, low, hi string) { - for i := 0; i < n; i++ { - h.put(low, "begin") - h.put(hi, "end") - h.compactMem() - } -} - -func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) { - t := h.t - db := h.db - - var ( - maxOverlaps uint64 - maxLevel int - ) - v := db.s.version() - for i, tt := range v.tables[1 : len(v.tables)-1] { - level := i + 1 - next := v.tables[level+1] - for _, t := range tt { - r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false) - sum := r.size() - if sum > maxOverlaps { - maxOverlaps = sum - maxLevel = level - } - } - } - v.release() - - if maxOverlaps > want { - t.Errorf("next level most overlapping bytes is more than %d, got=%d level=%d", want, maxOverlaps, maxLevel) - } else { - t.Logf("next level most overlapping bytes is %d, level=%d want=%d", maxOverlaps, maxLevel, want) - } -} - -func (h *dbHarness) delete(key string) { - t := h.t - db := h.db - - err := db.Delete([]byte(key), h.wo) - if err != nil { - t.Error("Delete: got error: ", err) - } -} - -func (h *dbHarness) assertNumKeys(want int) { - iter := h.db.NewIterator(nil, h.ro) - defer iter.Release() - got := 0 - for iter.Next() { - got++ - } - if err := iter.Error(); err != nil { - h.t.Error("assertNumKeys: ", err) - } - if want != got { - h.t.Errorf("assertNumKeys: want=%d got=%d", want, got) - } -} - -func (h *dbHarness) getr(db Reader, key string, expectFound bool) (found bool, v []byte) { - t := h.t - v, err := db.Get([]byte(key), h.ro) - switch err { - case ErrNotFound: - if expectFound { - t.Errorf("Get: key '%s' not found, want found", key) - } - case nil: - found = true - if !expectFound { - t.Errorf("Get: key '%s' found, want not found", key) - } - default: - t.Error("Get: got error: ", err) - } - return -} - -func (h *dbHarness) get(key string, expectFound bool) (found bool, v []byte) { - return h.getr(h.db, key, expectFound) -} - -func (h *dbHarness) getValr(db Reader, key, value string) { - t := h.t - found, r := h.getr(db, key, true) - if !found { - return - } - rval := string(r) - if rval != value { - t.Errorf("Get: invalid value, got '%s', want '%s'", rval, value) - } -} - -func (h *dbHarness) getVal(key, value string) { - h.getValr(h.db, key, value) -} - -func (h *dbHarness) allEntriesFor(key, want string) { - t := h.t - db := h.db - s := db.s - - ikey := newIkey([]byte(key), kMaxSeq, ktVal) - iter := db.newRawIterator(nil, nil) - if !iter.Seek(ikey) && iter.Error() != nil { - t.Error("AllEntries: error during seek, err: ", iter.Error()) - return - } - res := "[ " - first := true - for iter.Valid() { - if ukey, _, kt, kerr := parseIkey(iter.Key()); kerr == nil { - if s.icmp.uCompare(ikey.ukey(), ukey) != 0 { - break - } - if !first { - res += ", " - } - first = false - switch kt { - case ktVal: - res += string(iter.Value()) - case ktDel: - res += "DEL" - } - } else { - if !first { - res += ", " - } - first = false - res += "CORRUPTED" - } - iter.Next() - } - if !first { - res += " " - } - res += "]" - if res != want { - t.Errorf("AllEntries: assert failed for key %q, got=%q want=%q", key, res, want) - } -} - -// Return a string that contains all key,value pairs in order, -// formatted like "(k1->v1)(k2->v2)". -func (h *dbHarness) getKeyVal(want string) { - t := h.t - db := h.db - - s, err := db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - res := "" - iter := s.NewIterator(nil, nil) - for iter.Next() { - res += fmt.Sprintf("(%s->%s)", string(iter.Key()), string(iter.Value())) - } - iter.Release() - - if res != want { - t.Errorf("GetKeyVal: invalid key/value pair, got=%q want=%q", res, want) - } - s.Release() -} - -func (h *dbHarness) waitCompaction() { - t := h.t - db := h.db - if err := db.compSendIdle(db.tcompCmdC); err != nil { - t.Error("compaction error: ", err) - } -} - -func (h *dbHarness) waitMemCompaction() { - t := h.t - db := h.db - - if err := db.compSendIdle(db.mcompCmdC); err != nil { - t.Error("compaction error: ", err) - } -} - -func (h *dbHarness) compactMem() { - t := h.t - db := h.db - - t.Log("starting memdb compaction") - - db.writeLockC <- struct{}{} - defer func() { - <-db.writeLockC - }() - - if _, err := db.rotateMem(0); err != nil { - t.Error("compaction error: ", err) - } - if err := db.compSendIdle(db.mcompCmdC); err != nil { - t.Error("compaction error: ", err) - } - - if h.totalTables() == 0 { - t.Error("zero tables after mem compaction") - } - - t.Log("memdb compaction done") -} - -func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) { - t := h.t - db := h.db - - var _min, _max []byte - if min != "" { - _min = []byte(min) - } - if max != "" { - _max = []byte(max) - } - - t.Logf("starting table range compaction: level=%d, min=%q, max=%q", level, min, max) - - if err := db.compSendRange(db.tcompCmdC, level, _min, _max); err != nil { - if wanterr { - t.Log("CompactRangeAt: got error (expected): ", err) - } else { - t.Error("CompactRangeAt: got error: ", err) - } - } else if wanterr { - t.Error("CompactRangeAt: expect error") - } - - t.Log("table range compaction done") -} - -func (h *dbHarness) compactRangeAt(level int, min, max string) { - h.compactRangeAtErr(level, min, max, false) -} - -func (h *dbHarness) compactRange(min, max string) { - t := h.t - db := h.db - - t.Logf("starting DB range compaction: min=%q, max=%q", min, max) - - var r util.Range - if min != "" { - r.Start = []byte(min) - } - if max != "" { - r.Limit = []byte(max) - } - if err := db.CompactRange(r); err != nil { - t.Error("CompactRange: got error: ", err) - } - - t.Log("DB range compaction done") -} - -func (h *dbHarness) sizeOf(start, limit string) uint64 { - sz, err := h.db.SizeOf([]util.Range{ - {[]byte(start), []byte(limit)}, - }) - if err != nil { - h.t.Error("SizeOf: got error: ", err) - } - return sz.Sum() -} - -func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) { - sz := h.sizeOf(start, limit) - if sz < low || sz > hi { - h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d", - shorten(start), shorten(limit), low, hi, sz) - } -} - -func (h *dbHarness) getSnapshot() (s *Snapshot) { - s, err := h.db.GetSnapshot() - if err != nil { - h.t.Fatal("GetSnapshot: got error: ", err) - } - return -} -func (h *dbHarness) tablesPerLevel(want string) { - res := "" - nz := 0 - v := h.db.s.version() - for level, tt := range v.tables { - if level > 0 { - res += "," - } - res += fmt.Sprint(len(tt)) - if len(tt) > 0 { - nz = len(res) - } - } - v.release() - res = res[:nz] - if res != want { - h.t.Errorf("invalid tables len, want=%s, got=%s", want, res) - } -} - -func (h *dbHarness) totalTables() (n int) { - v := h.db.s.version() - for _, tt := range v.tables { - n += len(tt) - } - v.release() - return -} - -type keyValue interface { - Key() []byte - Value() []byte -} - -func testKeyVal(t *testing.T, kv keyValue, want string) { - res := string(kv.Key()) + "->" + string(kv.Value()) - if res != want { - t.Errorf("invalid key/value, want=%q, got=%q", want, res) - } -} - -func numKey(num int) string { - return fmt.Sprintf("key%06d", num) -} - -var _bloom_filter = filter.NewBloomFilter(10) - -func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) { - for i := 0; i < 4; i++ { - func() { - switch i { - case 0: - case 1: - if o == nil { - o = &opt.Options{Filter: _bloom_filter} - } else { - old := o - o = &opt.Options{} - *o = *old - o.Filter = _bloom_filter - } - case 2: - if o == nil { - o = &opt.Options{Compression: opt.NoCompression} - } else { - old := o - o = &opt.Options{} - *o = *old - o.Compression = opt.NoCompression - } - } - h := newDbHarnessWopt(t, o) - defer h.close() - switch i { - case 3: - h.reopenDB() - } - f(h) - }() - } -} - -func trun(t *testing.T, f func(h *dbHarness)) { - truno(t, nil, f) -} - -func testAligned(t *testing.T, name string, offset uintptr) { - if offset%8 != 0 { - t.Errorf("field %s offset is not 64-bit aligned", name) - } -} - -func Test_FieldsAligned(t *testing.T) { - p1 := new(DB) - testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq)) - p2 := new(session) - testAligned(t, "session.stNextFileNum", unsafe.Offsetof(p2.stNextFileNum)) - testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum)) - testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum)) - testAligned(t, "session.stSeqNum", unsafe.Offsetof(p2.stSeqNum)) -} - -func TestDB_Locking(t *testing.T) { - h := newDbHarness(t) - defer h.stor.Close() - h.openAssert(false) - h.closeDB() - h.openAssert(true) -} - -func TestDB_Empty(t *testing.T) { - trun(t, func(h *dbHarness) { - h.get("foo", false) - - h.reopenDB() - h.get("foo", false) - }) -} - -func TestDB_ReadWrite(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.getVal("foo", "v1") - h.put("bar", "v2") - h.put("foo", "v3") - h.getVal("foo", "v3") - h.getVal("bar", "v2") - - h.reopenDB() - h.getVal("foo", "v3") - h.getVal("bar", "v2") - }) -} - -func TestDB_PutDeleteGet(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.getVal("foo", "v1") - h.put("foo", "v2") - h.getVal("foo", "v2") - h.delete("foo") - h.get("foo", false) - - h.reopenDB() - h.get("foo", false) - }) -} - -func TestDB_EmptyBatch(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.get("foo", false) - err := h.db.Write(new(Batch), h.wo) - if err != nil { - t.Error("writing empty batch yield error: ", err) - } - h.get("foo", false) -} - -func TestDB_GetFromFrozen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100100}) - defer h.close() - - h.put("foo", "v1") - h.getVal("foo", "v1") - - h.stor.DelaySync(storage.TypeTable) // Block sync calls - h.put("k1", strings.Repeat("x", 100000)) // Fill memtable - h.put("k2", strings.Repeat("y", 100000)) // Trigger compaction - for i := 0; h.db.getFrozenMem() == nil && i < 100; i++ { - time.Sleep(10 * time.Microsecond) - } - if h.db.getFrozenMem() == nil { - h.stor.ReleaseSync(storage.TypeTable) - t.Fatal("No frozen mem") - } - h.getVal("foo", "v1") - h.stor.ReleaseSync(storage.TypeTable) // Release sync calls - - h.reopenDB() - h.getVal("foo", "v1") - h.get("k1", true) - h.get("k2", true) -} - -func TestDB_GetFromTable(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.compactMem() - h.getVal("foo", "v1") - }) -} - -func TestDB_GetSnapshot(t *testing.T) { - trun(t, func(h *dbHarness) { - bar := strings.Repeat("b", 200) - h.put("foo", "v1") - h.put(bar, "v1") - - snap, err := h.db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - - h.put("foo", "v2") - h.put(bar, "v2") - - h.getVal("foo", "v2") - h.getVal(bar, "v2") - h.getValr(snap, "foo", "v1") - h.getValr(snap, bar, "v1") - - h.compactMem() - - h.getVal("foo", "v2") - h.getVal(bar, "v2") - h.getValr(snap, "foo", "v1") - h.getValr(snap, bar, "v1") - - snap.Release() - - h.reopenDB() - h.getVal("foo", "v2") - h.getVal(bar, "v2") - }) -} - -func TestDB_GetLevel0Ordering(t *testing.T) { - trun(t, func(h *dbHarness) { - for i := 0; i < 4; i++ { - h.put("bar", fmt.Sprintf("b%d", i)) - h.put("foo", fmt.Sprintf("v%d", i)) - h.compactMem() - } - h.getVal("foo", "v3") - h.getVal("bar", "b3") - - v := h.db.s.version() - t0len := v.tLen(0) - v.release() - if t0len < 2 { - t.Errorf("level-0 tables is less than 2, got %d", t0len) - } - - h.reopenDB() - h.getVal("foo", "v3") - h.getVal("bar", "b3") - }) -} - -func TestDB_GetOrderedByLevels(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.compactMem() - h.compactRange("a", "z") - h.getVal("foo", "v1") - h.put("foo", "v2") - h.compactMem() - h.getVal("foo", "v2") - }) -} - -func TestDB_GetPicksCorrectFile(t *testing.T) { - trun(t, func(h *dbHarness) { - // Arrange to have multiple files in a non-level-0 level. - h.put("a", "va") - h.compactMem() - h.compactRange("a", "b") - h.put("x", "vx") - h.compactMem() - h.compactRange("x", "y") - h.put("f", "vf") - h.compactMem() - h.compactRange("f", "g") - - h.getVal("a", "va") - h.getVal("f", "vf") - h.getVal("x", "vx") - - h.compactRange("", "") - h.getVal("a", "va") - h.getVal("f", "vf") - h.getVal("x", "vx") - }) -} - -func TestDB_GetEncountersEmptyLevel(t *testing.T) { - trun(t, func(h *dbHarness) { - // Arrange for the following to happen: - // * sstable A in level 0 - // * nothing in level 1 - // * sstable B in level 2 - // Then do enough Get() calls to arrange for an automatic compaction - // of sstable A. A bug would cause the compaction to be marked as - // occuring at level 1 (instead of the correct level 0). - - // Step 1: First place sstables in levels 0 and 2 - for i := 0; ; i++ { - if i >= 100 { - t.Fatal("could not fill levels-0 and level-2") - } - v := h.db.s.version() - if v.tLen(0) > 0 && v.tLen(2) > 0 { - v.release() - break - } - v.release() - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - - h.getVal("a", "begin") - h.getVal("z", "end") - } - - // Step 2: clear level 1 if necessary. - h.compactRangeAt(1, "", "") - h.tablesPerLevel("1,0,1") - - h.getVal("a", "begin") - h.getVal("z", "end") - - // Step 3: read a bunch of times - for i := 0; i < 200; i++ { - h.get("missing", false) - } - - // Step 4: Wait for compaction to finish - h.waitCompaction() - - v := h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - v.release() - - h.getVal("a", "begin") - h.getVal("z", "end") - }) -} - -func TestDB_IterMultiWithDelete(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("a", "va") - h.put("b", "vb") - h.put("c", "vc") - h.delete("b") - h.get("b", false) - - iter := h.db.NewIterator(nil, nil) - iter.Seek([]byte("c")) - testKeyVal(t, iter, "c->vc") - iter.Prev() - testKeyVal(t, iter, "a->va") - iter.Release() - - h.compactMem() - - iter = h.db.NewIterator(nil, nil) - iter.Seek([]byte("c")) - testKeyVal(t, iter, "c->vc") - iter.Prev() - testKeyVal(t, iter, "a->va") - iter.Release() - }) -} - -func TestDB_IteratorPinsRef(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "hello") - - // Get iterator that will yield the current contents of the DB. - iter := h.db.NewIterator(nil, nil) - - // Write to force compactions - h.put("foo", "newvalue1") - for i := 0; i < 100; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } - h.put("foo", "newvalue2") - - iter.First() - testKeyVal(t, iter, "foo->hello") - if iter.Next() { - t.Errorf("expect eof") - } - iter.Release() -} - -func TestDB_Recover(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.put("baz", "v5") - - h.reopenDB() - h.getVal("foo", "v1") - - h.getVal("foo", "v1") - h.getVal("baz", "v5") - h.put("bar", "v2") - h.put("foo", "v3") - - h.reopenDB() - h.getVal("foo", "v3") - h.put("foo", "v4") - h.getVal("foo", "v4") - h.getVal("bar", "v2") - h.getVal("baz", "v5") - }) -} - -func TestDB_RecoverWithEmptyJournal(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.put("foo", "v2") - - h.reopenDB() - h.reopenDB() - h.put("foo", "v3") - - h.reopenDB() - h.getVal("foo", "v3") - }) -} - -func TestDB_RecoverDuringMemtableCompaction(t *testing.T) { - truno(t, &opt.Options{WriteBuffer: 1000000}, func(h *dbHarness) { - - h.stor.DelaySync(storage.TypeTable) - h.put("big1", strings.Repeat("x", 10000000)) - h.put("big2", strings.Repeat("y", 1000)) - h.put("bar", "v2") - h.stor.ReleaseSync(storage.TypeTable) - - h.reopenDB() - h.getVal("bar", "v2") - h.getVal("big1", strings.Repeat("x", 10000000)) - h.getVal("big2", strings.Repeat("y", 1000)) - }) -} - -func TestDB_MinorCompactionsHappen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 10000}) - defer h.close() - - n := 500 - - key := func(i int) string { - return fmt.Sprintf("key%06d", i) - } - - for i := 0; i < n; i++ { - h.put(key(i), key(i)+strings.Repeat("v", 1000)) - } - - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) - } - - h.reopenDB() - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) - } -} - -func TestDB_RecoverWithLargeJournal(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("big1", strings.Repeat("1", 200000)) - h.put("big2", strings.Repeat("2", 200000)) - h.put("small3", strings.Repeat("3", 10)) - h.put("small4", strings.Repeat("4", 10)) - h.tablesPerLevel("") - - // Make sure that if we re-open with a small write buffer size that - // we flush table files in the middle of a large journal file. - h.o.WriteBuffer = 100000 - h.reopenDB() - h.getVal("big1", strings.Repeat("1", 200000)) - h.getVal("big2", strings.Repeat("2", 200000)) - h.getVal("small3", strings.Repeat("3", 10)) - h.getVal("small4", strings.Repeat("4", 10)) - v := h.db.s.version() - if v.tLen(0) <= 1 { - t.Errorf("tables-0 less than one") - } - v.release() -} - -func TestDB_CompactionsGenerateMultipleFiles(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 10000000, - Compression: opt.NoCompression, - }) - defer h.close() - - v := h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - v.release() - - n := 80 - - // Write 8MB (80 values, each 100K) - for i := 0; i < n; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } - - // Reopening moves updates to level-0 - h.reopenDB() - h.compactRangeAt(0, "", "") - - v = h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - if v.tLen(1) <= 1 { - t.Errorf("level-1 tables less than 1, got %d", v.tLen(1)) - } - v.release() - - for i := 0; i < n; i++ { - h.getVal(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } -} - -func TestDB_RepeatedWritesToSameKey(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) - defer h.close() - - maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger() - - value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) - for i := 0; i < 5*maxTables; i++ { - h.put("key", value) - n := h.totalTables() - if n > maxTables { - t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) - } - } -} - -func TestDB_RepeatedWritesToSameKeyAfterReopen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) - defer h.close() - - h.reopenDB() - - maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger() - - value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) - for i := 0; i < 5*maxTables; i++ { - h.put("key", value) - n := h.totalTables() - if n > maxTables { - t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) - } - } -} - -func TestDB_SparseMerge(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - h.putMulti(h.o.GetNumLevel(), "A", "Z") - - // Suppose there is: - // small amount of data with prefix A - // large amount of data with prefix B - // small amount of data with prefix C - // and that recent updates have made small changes to all three prefixes. - // Check that we do not do a compaction that merges all of B in one shot. - h.put("A", "va") - value := strings.Repeat("x", 1000) - for i := 0; i < 100000; i++ { - h.put(fmt.Sprintf("B%010d", i), value) - } - h.put("C", "vc") - h.compactMem() - h.compactRangeAt(0, "", "") - h.waitCompaction() - - // Make sparse update - h.put("A", "va2") - h.put("B100", "bvalue2") - h.put("C", "vc2") - h.compactMem() - - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) - h.compactRangeAt(0, "", "") - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) - h.compactRangeAt(1, "", "") - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) -} - -func TestDB_SizeOf(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - Compression: opt.NoCompression, - WriteBuffer: 10000000, - }) - defer h.close() - - h.sizeAssert("", "xyz", 0, 0) - h.reopenDB() - h.sizeAssert("", "xyz", 0, 0) - - // Write 8MB (80 values, each 100K) - n := 80 - s1 := 100000 - s2 := 105000 - - for i := 0; i < n; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10)) - } - - // 0 because SizeOf() does not account for memtable space - h.sizeAssert("", numKey(50), 0, 0) - - for r := 0; r < 3; r++ { - h.reopenDB() - - for cs := 0; cs < n; cs += 10 { - for i := 0; i < n; i += 10 { - h.sizeAssert("", numKey(i), uint64(s1*i), uint64(s2*i)) - h.sizeAssert("", numKey(i)+".suffix", uint64(s1*(i+1)), uint64(s2*(i+1))) - h.sizeAssert(numKey(i), numKey(i+10), uint64(s1*10), uint64(s2*10)) - } - - h.sizeAssert("", numKey(50), uint64(s1*50), uint64(s2*50)) - h.sizeAssert("", numKey(50)+".suffix", uint64(s1*50), uint64(s2*50)) - - h.compactRangeAt(0, numKey(cs), numKey(cs+9)) - } - - v := h.db.s.version() - if v.tLen(0) != 0 { - t.Errorf("level-0 tables was not zero, got %d", v.tLen(0)) - } - if v.tLen(1) == 0 { - t.Error("level-1 tables was zero") - } - v.release() - } -} - -func TestDB_SizeOf_MixOfSmallAndLarge(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - sizes := []uint64{ - 10000, - 10000, - 100000, - 10000, - 100000, - 10000, - 300000, - 10000, - } - - for i, n := range sizes { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), int(n)/10)) - } - - for r := 0; r < 3; r++ { - h.reopenDB() - - var x uint64 - for i, n := range sizes { - y := x - if i > 0 { - y += 1000 - } - h.sizeAssert("", numKey(i), x, y) - x += n - } - - h.sizeAssert(numKey(3), numKey(5), 110000, 111000) - - h.compactRangeAt(0, "", "") - } -} - -func TestDB_Snapshot(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - s1 := h.getSnapshot() - h.put("foo", "v2") - s2 := h.getSnapshot() - h.put("foo", "v3") - s3 := h.getSnapshot() - h.put("foo", "v4") - - h.getValr(s1, "foo", "v1") - h.getValr(s2, "foo", "v2") - h.getValr(s3, "foo", "v3") - h.getVal("foo", "v4") - - s3.Release() - h.getValr(s1, "foo", "v1") - h.getValr(s2, "foo", "v2") - h.getVal("foo", "v4") - - s1.Release() - h.getValr(s2, "foo", "v2") - h.getVal("foo", "v4") - - s2.Release() - h.getVal("foo", "v4") - }) -} - -func TestDB_SnapshotList(t *testing.T) { - db := &DB{snapsList: list.New()} - e0a := db.acquireSnapshot() - e0b := db.acquireSnapshot() - db.seq = 1 - e1 := db.acquireSnapshot() - db.seq = 2 - e2 := db.acquireSnapshot() - - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e0a) - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e2) - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e0b) - if db.minSeq() != 1 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - e2 = db.acquireSnapshot() - if db.minSeq() != 1 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e1) - if db.minSeq() != 2 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e2) - if db.minSeq() != 2 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } -} - -func TestDB_HiddenValuesAreRemoved(t *testing.T) { - trun(t, func(h *dbHarness) { - s := h.db.s - - h.put("foo", "v1") - h.compactMem() - m := h.o.GetMaxMemCompationLevel() - v := s.version() - num := v.tLen(m) - v.release() - if num != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, num) - } - - // Place a table at level last-1 to prevent merging with preceding mutation - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - v = s.version() - if v.tLen(m) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) - } - if v.tLen(m-1) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) - } - v.release() - - h.delete("foo") - h.put("foo", "v2") - h.allEntriesFor("foo", "[ v2, DEL, v1 ]") - h.compactMem() - h.allEntriesFor("foo", "[ v2, DEL, v1 ]") - h.compactRangeAt(m-2, "", "z") - // DEL eliminated, but v1 remains because we aren't compacting that level - // (DEL can be eliminated because v2 hides v1). - h.allEntriesFor("foo", "[ v2, v1 ]") - h.compactRangeAt(m-1, "", "") - // Merging last-1 w/ last, so we are the base level for "foo", so - // DEL is removed. (as is v1). - h.allEntriesFor("foo", "[ v2 ]") - }) -} - -func TestDB_DeletionMarkers2(t *testing.T) { - h := newDbHarness(t) - defer h.close() - s := h.db.s - - h.put("foo", "v1") - h.compactMem() - m := h.o.GetMaxMemCompationLevel() - v := s.version() - num := v.tLen(m) - v.release() - if num != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, num) - } - - // Place a table at level last-1 to prevent merging with preceding mutation - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - v = s.version() - if v.tLen(m) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) - } - if v.tLen(m-1) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) - } - v.release() - - h.delete("foo") - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactMem() // Moves to level last-2 - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactRangeAt(m-2, "", "") - // DEL kept: "last" file overlaps - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactRangeAt(m-1, "", "") - // Merging last-1 w/ last, so we are the base level for "foo", so - // DEL is removed. (as is v1). - h.allEntriesFor("foo", "[ ]") -} - -func TestDB_CompactionTableOpenError(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{OpenFilesCacheCapacity: -1}) - defer h.close() - - im := 10 - jm := 10 - for r := 0; r < 2; r++ { - for i := 0; i < im; i++ { - for j := 0; j < jm; j++ { - h.put(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) - } - h.compactMem() - } - } - - if n := h.totalTables(); n != im*2 { - t.Errorf("total tables is %d, want %d", n, im) - } - - h.stor.SetEmuErr(storage.TypeTable, tsOpOpen) - go h.db.CompactRange(util.Range{}) - if err := h.db.compSendIdle(h.db.tcompCmdC); err != nil { - t.Log("compaction error: ", err) - } - h.closeDB0() - h.openDB() - h.stor.SetEmuErr(0, tsOpOpen) - - for i := 0; i < im; i++ { - for j := 0; j < jm; j++ { - h.getVal(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) - } - } -} - -func TestDB_OverlapInLevel0(t *testing.T) { - trun(t, func(h *dbHarness) { - if h.o.GetMaxMemCompationLevel() != 2 { - t.Fatal("fix test to reflect the config") - } - - // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0. - h.put("100", "v100") - h.put("999", "v999") - h.compactMem() - h.delete("100") - h.delete("999") - h.compactMem() - h.tablesPerLevel("0,1,1") - - // Make files spanning the following ranges in level-0: - // files[0] 200 .. 900 - // files[1] 300 .. 500 - // Note that files are sorted by min key. - h.put("300", "v300") - h.put("500", "v500") - h.compactMem() - h.put("200", "v200") - h.put("600", "v600") - h.put("900", "v900") - h.compactMem() - h.tablesPerLevel("2,1,1") - - // Compact away the placeholder files we created initially - h.compactRangeAt(1, "", "") - h.compactRangeAt(2, "", "") - h.tablesPerLevel("2") - - // Do a memtable compaction. Before bug-fix, the compaction would - // not detect the overlap with level-0 files and would incorrectly place - // the deletion in a deeper level. - h.delete("600") - h.compactMem() - h.tablesPerLevel("3") - h.get("600", false) - }) -} - -func TestDB_L0_CompactionBug_Issue44_a(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.reopenDB() - h.put("b", "v") - h.reopenDB() - h.delete("b") - h.delete("a") - h.reopenDB() - h.delete("a") - h.reopenDB() - h.put("a", "v") - h.reopenDB() - h.reopenDB() - h.getKeyVal("(a->v)") - h.waitCompaction() - h.getKeyVal("(a->v)") -} - -func TestDB_L0_CompactionBug_Issue44_b(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.reopenDB() - h.put("", "") - h.reopenDB() - h.delete("e") - h.put("", "") - h.reopenDB() - h.put("c", "cv") - h.reopenDB() - h.put("", "") - h.reopenDB() - h.put("", "") - h.waitCompaction() - h.reopenDB() - h.put("d", "dv") - h.reopenDB() - h.put("", "") - h.reopenDB() - h.delete("d") - h.delete("b") - h.reopenDB() - h.getKeyVal("(->)(c->cv)") - h.waitCompaction() - h.getKeyVal("(->)(c->cv)") -} - -func TestDB_SingleEntryMemCompaction(t *testing.T) { - trun(t, func(h *dbHarness) { - for i := 0; i < 10; i++ { - h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer)) - h.compactMem() - h.put("key", strings.Repeat("v", opt.DefaultBlockSize)) - h.compactMem() - h.put("k", "v") - h.compactMem() - h.put("", "") - h.compactMem() - h.put("verybig", strings.Repeat("v", opt.DefaultWriteBuffer*2)) - h.compactMem() - } - }) -} - -func TestDB_ManifestWriteError(t *testing.T) { - for i := 0; i < 2; i++ { - func() { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "bar") - h.getVal("foo", "bar") - - // Mem compaction (will succeed) - h.compactMem() - h.getVal("foo", "bar") - v := h.db.s.version() - if n := v.tLen(h.o.GetMaxMemCompationLevel()); n != 1 { - t.Errorf("invalid total tables, want=1 got=%d", n) - } - v.release() - - if i == 0 { - h.stor.SetEmuErr(storage.TypeManifest, tsOpWrite) - } else { - h.stor.SetEmuErr(storage.TypeManifest, tsOpSync) - } - - // Merging compaction (will fail) - h.compactRangeAtErr(h.o.GetMaxMemCompationLevel(), "", "", true) - - h.db.Close() - h.stor.SetEmuErr(0, tsOpWrite) - h.stor.SetEmuErr(0, tsOpSync) - - // Should not lose data - h.openDB() - h.getVal("foo", "bar") - }() - } -} - -func assertErr(t *testing.T, err error, wanterr bool) { - if err != nil { - if wanterr { - t.Log("AssertErr: got error (expected): ", err) - } else { - t.Error("AssertErr: got error: ", err) - } - } else if wanterr { - t.Error("AssertErr: expect error") - } -} - -func TestDB_ClosedIsClosed(t *testing.T) { - h := newDbHarness(t) - db := h.db - - var iter, iter2 iterator.Iterator - var snap *Snapshot - func() { - defer h.close() - - h.put("k", "v") - h.getVal("k", "v") - - iter = db.NewIterator(nil, h.ro) - iter.Seek([]byte("k")) - testKeyVal(t, iter, "k->v") - - var err error - snap, err = db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - - h.getValr(snap, "k", "v") - - iter2 = snap.NewIterator(nil, h.ro) - iter2.Seek([]byte("k")) - testKeyVal(t, iter2, "k->v") - - h.put("foo", "v2") - h.delete("foo") - - // closing DB - iter.Release() - iter2.Release() - }() - - assertErr(t, db.Put([]byte("x"), []byte("y"), h.wo), true) - _, err := db.Get([]byte("k"), h.ro) - assertErr(t, err, true) - - if iter.Valid() { - t.Errorf("iter.Valid should false") - } - assertErr(t, iter.Error(), false) - testKeyVal(t, iter, "->") - if iter.Seek([]byte("k")) { - t.Errorf("iter.Seek should false") - } - assertErr(t, iter.Error(), true) - - assertErr(t, iter2.Error(), false) - - _, err = snap.Get([]byte("k"), h.ro) - assertErr(t, err, true) - - _, err = db.GetSnapshot() - assertErr(t, err, true) - - iter3 := db.NewIterator(nil, h.ro) - assertErr(t, iter3.Error(), true) - - iter3 = snap.NewIterator(nil, h.ro) - assertErr(t, iter3.Error(), true) - - assertErr(t, db.Delete([]byte("k"), h.wo), true) - - _, err = db.GetProperty("leveldb.stats") - assertErr(t, err, true) - - _, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}}) - assertErr(t, err, true) - - assertErr(t, db.CompactRange(util.Range{}), true) - - assertErr(t, db.Close(), true) -} - -type numberComparer struct{} - -func (numberComparer) num(x []byte) (n int) { - fmt.Sscan(string(x[1:len(x)-1]), &n) - return -} - -func (numberComparer) Name() string { - return "test.NumberComparer" -} - -func (p numberComparer) Compare(a, b []byte) int { - return p.num(a) - p.num(b) -} - -func (numberComparer) Separator(dst, a, b []byte) []byte { return nil } -func (numberComparer) Successor(dst, b []byte) []byte { return nil } - -func TestDB_CustomComparer(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - Comparer: numberComparer{}, - WriteBuffer: 1000, - }) - defer h.close() - - h.put("[10]", "ten") - h.put("[0x14]", "twenty") - for i := 0; i < 2; i++ { - h.getVal("[10]", "ten") - h.getVal("[0xa]", "ten") - h.getVal("[20]", "twenty") - h.getVal("[0x14]", "twenty") - h.get("[15]", false) - h.get("[0xf]", false) - h.compactMem() - h.compactRange("[0]", "[9999]") - } - - for n := 0; n < 2; n++ { - for i := 0; i < 100; i++ { - v := fmt.Sprintf("[%d]", i*10) - h.put(v, v) - } - h.compactMem() - h.compactRange("[0]", "[1000000]") - } -} - -func TestDB_ManualCompaction(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - if h.o.GetMaxMemCompationLevel() != 2 { - t.Fatal("fix test to reflect the config") - } - - h.putMulti(3, "p", "q") - h.tablesPerLevel("1,1,1") - - // Compaction range falls before files - h.compactRange("", "c") - h.tablesPerLevel("1,1,1") - - // Compaction range falls after files - h.compactRange("r", "z") - h.tablesPerLevel("1,1,1") - - // Compaction range overlaps files - h.compactRange("p1", "p9") - h.tablesPerLevel("0,0,1") - - // Populate a different range - h.putMulti(3, "c", "e") - h.tablesPerLevel("1,1,2") - - // Compact just the new range - h.compactRange("b", "f") - h.tablesPerLevel("0,0,2") - - // Compact all - h.putMulti(1, "a", "z") - h.tablesPerLevel("0,1,2") - h.compactRange("", "") - h.tablesPerLevel("0,0,1") -} - -func TestDB_BloomFilter(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableBlockCache: true, - Filter: filter.NewBloomFilter(10), - }) - defer h.close() - - key := func(i int) string { - return fmt.Sprintf("key%06d", i) - } - - const n = 10000 - - // Populate multiple layers - for i := 0; i < n; i++ { - h.put(key(i), key(i)) - } - h.compactMem() - h.compactRange("a", "z") - for i := 0; i < n; i += 100 { - h.put(key(i), key(i)) - } - h.compactMem() - - // Prevent auto compactions triggered by seeks - h.stor.DelaySync(storage.TypeTable) - - // Lookup present keys. Should rarely read from small sstable. - h.stor.SetReadCounter(storage.TypeTable) - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)) - } - cnt := int(h.stor.ReadCounter()) - t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt) - - if min, max := n, n+2*n/100; cnt < min || cnt > max { - t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt) - } - - // Lookup missing keys. Should rarely read from either sstable. - h.stor.ResetReadCounter() - for i := 0; i < n; i++ { - h.get(key(i)+".missing", false) - } - cnt = int(h.stor.ReadCounter()) - t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt) - if max := 3 * n / 100; cnt > max { - t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt) - } - - h.stor.ReleaseSync(storage.TypeTable) -} - -func TestDB_Concurrent(t *testing.T) { - const n, secs, maxkey = 4, 2, 1000 - - runtime.GOMAXPROCS(n) - trun(t, func(h *dbHarness) { - var closeWg sync.WaitGroup - var stop uint32 - var cnt [n]uint32 - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - var put, get, found uint - defer func() { - t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d", - i, cnt[i], put, get, found, get-found) - closeWg.Done() - }() - - rnd := rand.New(rand.NewSource(int64(1000 + i))) - for atomic.LoadUint32(&stop) == 0 { - x := cnt[i] - - k := rnd.Intn(maxkey) - kstr := fmt.Sprintf("%016d", k) - - if (rnd.Int() % 2) > 0 { - put++ - h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x)) - } else { - get++ - v, err := h.db.Get([]byte(kstr), h.ro) - if err == nil { - found++ - rk, ri, rx := 0, -1, uint32(0) - fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx) - if rk != k { - t.Errorf("invalid key want=%d got=%d", k, rk) - } - if ri < 0 || ri >= n { - t.Error("invalid goroutine number: ", ri) - } else { - tx := atomic.LoadUint32(&(cnt[ri])) - if rx > tx { - t.Errorf("invalid seq number, %d > %d ", rx, tx) - } - } - } else if err != ErrNotFound { - t.Error("Get: got error: ", err) - return - } - } - atomic.AddUint32(&cnt[i], 1) - } - }(i) - } - - time.Sleep(secs * time.Second) - atomic.StoreUint32(&stop, 1) - closeWg.Wait() - }) - - runtime.GOMAXPROCS(1) -} - -func TestDB_Concurrent2(t *testing.T) { - const n, n2 = 4, 4000 - - runtime.GOMAXPROCS(n*2 + 2) - truno(t, &opt.Options{WriteBuffer: 30}, func(h *dbHarness) { - var closeWg sync.WaitGroup - var stop uint32 - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - for k := 0; atomic.LoadUint32(&stop) == 0; k++ { - h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) - } - closeWg.Done() - }(i) - } - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - for k := 1000000; k < 0 || atomic.LoadUint32(&stop) == 0; k-- { - h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) - } - closeWg.Done() - }(i) - } - - cmp := comparer.DefaultComparer - for i := 0; i < n2; i++ { - closeWg.Add(1) - go func(i int) { - it := h.db.NewIterator(nil, nil) - var pk []byte - for it.Next() { - kk := it.Key() - if cmp.Compare(kk, pk) <= 0 { - t.Errorf("iter %d: %q is successor of %q", i, pk, kk) - } - pk = append(pk[:0], kk...) - var k, vk, vi int - if n, err := fmt.Sscanf(string(it.Key()), "k%d", &k); err != nil { - t.Errorf("iter %d: Scanf error on key %q: %v", i, it.Key(), err) - } else if n < 1 { - t.Errorf("iter %d: Cannot parse key %q", i, it.Key()) - } - if n, err := fmt.Sscanf(string(it.Value()), "%d.%d", &vk, &vi); err != nil { - t.Errorf("iter %d: Scanf error on value %q: %v", i, it.Value(), err) - } else if n < 2 { - t.Errorf("iter %d: Cannot parse value %q", i, it.Value()) - } - - if vk != k { - t.Errorf("iter %d: invalid value i=%d, want=%d got=%d", i, vi, k, vk) - } - } - if err := it.Error(); err != nil { - t.Errorf("iter %d: Got error: %v", i, err) - } - it.Release() - closeWg.Done() - }(i) - } - - atomic.StoreUint32(&stop, 1) - closeWg.Wait() - }) - - runtime.GOMAXPROCS(1) -} - -func TestDB_CreateReopenDbOnFile(t *testing.T) { - dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid())) - if err := os.RemoveAll(dbpath); err != nil { - t.Fatal("cannot remove old db: ", err) - } - defer os.RemoveAll(dbpath) - - for i := 0; i < 3; i++ { - stor, err := storage.OpenFile(dbpath) - if err != nil { - t.Fatalf("(%d) cannot open storage: %s", i, err) - } - db, err := Open(stor, nil) - if err != nil { - t.Fatalf("(%d) cannot open db: %s", i, err) - } - if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { - t.Fatalf("(%d) cannot write to db: %s", i, err) - } - if err := db.Close(); err != nil { - t.Fatalf("(%d) cannot close db: %s", i, err) - } - if err := stor.Close(); err != nil { - t.Fatalf("(%d) cannot close storage: %s", i, err) - } - } -} - -func TestDB_CreateReopenDbOnFile2(t *testing.T) { - dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid())) - if err := os.RemoveAll(dbpath); err != nil { - t.Fatal("cannot remove old db: ", err) - } - defer os.RemoveAll(dbpath) - - for i := 0; i < 3; i++ { - db, err := OpenFile(dbpath, nil) - if err != nil { - t.Fatalf("(%d) cannot open db: %s", i, err) - } - if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { - t.Fatalf("(%d) cannot write to db: %s", i, err) - } - if err := db.Close(); err != nil { - t.Fatalf("(%d) cannot close db: %s", i, err) - } - } -} - -func TestDB_DeletionMarkersOnMemdb(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "v1") - h.compactMem() - h.delete("foo") - h.get("foo", false) - h.getKeyVal("") -} - -func TestDB_LeveldbIssue178(t *testing.T) { - nKeys := (opt.DefaultCompactionTableSize / 30) * 5 - key1 := func(i int) string { - return fmt.Sprintf("my_key_%d", i) - } - key2 := func(i int) string { - return fmt.Sprintf("my_key_%d_xxx", i) - } - - // Disable compression since it affects the creation of layers and the - // code below is trying to test against a very specific scenario. - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - // Create first key range. - batch := new(Batch) - for i := 0; i < nKeys; i++ { - batch.Put([]byte(key1(i)), []byte("value for range 1 key")) - } - h.write(batch) - - // Create second key range. - batch.Reset() - for i := 0; i < nKeys; i++ { - batch.Put([]byte(key2(i)), []byte("value for range 2 key")) - } - h.write(batch) - - // Delete second key range. - batch.Reset() - for i := 0; i < nKeys; i++ { - batch.Delete([]byte(key2(i))) - } - h.write(batch) - h.waitMemCompaction() - - // Run manual compaction. - h.compactRange(key1(0), key1(nKeys-1)) - - // Checking the keys. - h.assertNumKeys(nKeys) -} - -func TestDB_LeveldbIssue200(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("1", "b") - h.put("2", "c") - h.put("3", "d") - h.put("4", "e") - h.put("5", "f") - - iter := h.db.NewIterator(nil, h.ro) - - // Add an element that should not be reflected in the iterator. - h.put("25", "cd") - - iter.Seek([]byte("5")) - assertBytes(t, []byte("5"), iter.Key()) - iter.Prev() - assertBytes(t, []byte("4"), iter.Key()) - iter.Prev() - assertBytes(t, []byte("3"), iter.Key()) - iter.Next() - assertBytes(t, []byte("4"), iter.Key()) - iter.Next() - assertBytes(t, []byte("5"), iter.Key()) -} - -func TestDB_GoleveldbIssue74(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 1 * opt.MiB, - }) - defer h.close() - - const n, dur = 10000, 5 * time.Second - - runtime.GOMAXPROCS(runtime.NumCPU()) - - until := time.Now().Add(dur) - wg := new(sync.WaitGroup) - wg.Add(2) - var done uint32 - go func() { - var i int - defer func() { - t.Logf("WRITER DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - - b := new(Batch) - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - iv := fmt.Sprintf("VAL%010d", i) - for k := 0; k < n; k++ { - key := fmt.Sprintf("KEY%06d", k) - b.Put([]byte(key), []byte(key+iv)) - b.Put([]byte(fmt.Sprintf("PTR%06d", k)), []byte(key)) - } - h.write(b) - - b.Reset() - snap := h.getSnapshot() - iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil) - var k int - for ; iter.Next(); k++ { - ptrKey := iter.Key() - key := iter.Value() - - if _, err := snap.Get(ptrKey, nil); err != nil { - t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, ptrKey, err) - } - if value, err := snap.Get(key, nil); err != nil { - t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, key, err) - } else if string(value) != string(key)+iv { - t.Fatalf("WRITER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+iv, value) - } - - b.Delete(key) - b.Delete(ptrKey) - } - h.write(b) - iter.Release() - snap.Release() - if k != n { - t.Fatalf("#%d %d != %d", i, k, n) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - snap := h.getSnapshot() - iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil) - var prevValue string - var k int - for ; iter.Next(); k++ { - ptrKey := iter.Key() - key := iter.Value() - - if _, err := snap.Get(ptrKey, nil); err != nil { - t.Fatalf("READER #%d snapshot.Get %q: %v", i, ptrKey, err) - } - - if value, err := snap.Get(key, nil); err != nil { - t.Fatalf("READER #%d snapshot.Get %q: %v", i, key, err) - } else if prevValue != "" && string(value) != string(key)+prevValue { - t.Fatalf("READER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+prevValue, value) - } else { - prevValue = string(value[len(key):]) - } - } - iter.Release() - snap.Release() - if k > 0 && k != n { - t.Fatalf("#%d %d != %d", i, k, n) - } - } - }() - wg.Wait() -} - -func TestDB_GetProperties(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - _, err := h.db.GetProperty("leveldb.num-files-at-level") - if err == nil { - t.Error("GetProperty() failed to detect missing level") - } - - _, err = h.db.GetProperty("leveldb.num-files-at-level0") - if err != nil { - t.Error("got unexpected error", err) - } - - _, err = h.db.GetProperty("leveldb.num-files-at-level0x") - if err == nil { - t.Error("GetProperty() failed to detect invalid level") - } -} - -func TestDB_GoleveldbIssue72and83(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 1 * opt.MiB, - OpenFilesCacheCapacity: 3, - }) - defer h.close() - - const n, wn, dur = 10000, 100, 30 * time.Second - - runtime.GOMAXPROCS(runtime.NumCPU()) - - randomData := func(prefix byte, i int) []byte { - data := make([]byte, 1+4+32+64+32) - _, err := crand.Reader.Read(data[1 : len(data)-8]) - if err != nil { - panic(err) - } - data[0] = prefix - binary.LittleEndian.PutUint32(data[len(data)-8:], uint32(i)) - binary.LittleEndian.PutUint32(data[len(data)-4:], util.NewCRC(data[:len(data)-4]).Value()) - return data - } - - keys := make([][]byte, n) - for i := range keys { - keys[i] = randomData(1, 0) - } - - until := time.Now().Add(dur) - wg := new(sync.WaitGroup) - wg.Add(3) - var done uint32 - go func() { - i := 0 - defer func() { - t.Logf("WRITER DONE #%d", i) - wg.Done() - }() - - b := new(Batch) - for ; i < wn && atomic.LoadUint32(&done) == 0; i++ { - b.Reset() - for _, k1 := range keys { - k2 := randomData(2, i) - b.Put(k2, randomData(42, i)) - b.Put(k1, k2) - } - if err := h.db.Write(b, h.wo); err != nil { - atomic.StoreUint32(&done, 1) - t.Fatalf("WRITER #%d db.Write: %v", i, err) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER0 DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - snap := h.getSnapshot() - seq := snap.elem.seq - if seq == 0 { - snap.Release() - continue - } - iter := snap.NewIterator(util.BytesPrefix([]byte{1}), nil) - writei := int(seq/(n*2) - 1) - var k int - for ; iter.Next(); k++ { - k1 := iter.Key() - k2 := iter.Value() - k1checksum0 := binary.LittleEndian.Uint32(k1[len(k1)-4:]) - k1checksum1 := util.NewCRC(k1[:len(k1)-4]).Value() - if k1checksum0 != k1checksum1 { - t.Fatalf("READER0 #%d.%d W#%d invalid K1 checksum: %#x != %#x", i, k, k1checksum0, k1checksum0) - } - k2checksum0 := binary.LittleEndian.Uint32(k2[len(k2)-4:]) - k2checksum1 := util.NewCRC(k2[:len(k2)-4]).Value() - if k2checksum0 != k2checksum1 { - t.Fatalf("READER0 #%d.%d W#%d invalid K2 checksum: %#x != %#x", i, k, k2checksum0, k2checksum1) - } - kwritei := int(binary.LittleEndian.Uint32(k2[len(k2)-8:])) - if writei != kwritei { - t.Fatalf("READER0 #%d.%d W#%d invalid write iteration num: %d", i, k, writei, kwritei) - } - if _, err := snap.Get(k2, nil); err != nil { - t.Fatalf("READER0 #%d.%d W#%d snap.Get: %v\nk1: %x\n -> k2: %x", i, k, writei, err, k1, k2) - } - } - if err := iter.Error(); err != nil { - t.Fatalf("READER0 #%d.%d W#%d snap.Iterator: %v", i, k, writei, err) - } - iter.Release() - snap.Release() - if k > 0 && k != n { - t.Fatalf("READER0 #%d W#%d short read, got=%d want=%d", i, writei, k, n) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER1 DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - iter := h.db.NewIterator(nil, nil) - seq := iter.(*dbIter).seq - if seq == 0 { - iter.Release() - continue - } - writei := int(seq/(n*2) - 1) - var k int - for ok := iter.Last(); ok; ok = iter.Prev() { - k++ - } - if err := iter.Error(); err != nil { - t.Fatalf("READER1 #%d.%d W#%d db.Iterator: %v", i, k, writei, err) - } - iter.Release() - if m := (writei+1)*n + n; k != m { - t.Fatalf("READER1 #%d W#%d short read, got=%d want=%d", i, writei, k, m) - } - } - }() - - wg.Wait() -} - -func TestDB_TransientError(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 128 * opt.KiB, - OpenFilesCacheCapacity: 3, - DisableCompactionBackoff: true, - }) - defer h.close() - - const ( - nSnap = 20 - nKey = 10000 - ) - - var ( - snaps [nSnap]*Snapshot - b = &Batch{} - ) - for i := range snaps { - vtail := fmt.Sprintf("VAL%030d", i) - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%8d", k) - b.Put([]byte(key), []byte(key+vtail)) - } - h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt) - if err := h.db.Write(b, nil); err != nil { - t.Logf("WRITE #%d error: %v", i, err) - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt, tsOpWrite) - for { - if err := h.db.Write(b, nil); err == nil { - break - } else if errors.IsCorrupted(err) { - t.Fatalf("WRITE #%d corrupted: %v", i, err) - } - } - } - - snaps[i] = h.db.newSnapshot() - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%8d", k) - b.Delete([]byte(key)) - } - h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt) - if err := h.db.Write(b, nil); err != nil { - t.Logf("WRITE #%d error: %v", i, err) - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt) - for { - if err := h.db.Write(b, nil); err == nil { - break - } else if errors.IsCorrupted(err) { - t.Fatalf("WRITE #%d corrupted: %v", i, err) - } - } - } - } - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt) - - runtime.GOMAXPROCS(runtime.NumCPU()) - - rnd := rand.New(rand.NewSource(0xecafdaed)) - wg := &sync.WaitGroup{} - for i, snap := range snaps { - wg.Add(2) - - go func(i int, snap *Snapshot, sk []int) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - for _, k := range sk { - key := fmt.Sprintf("KEY%8d", k) - xvalue, err := snap.Get([]byte(key), nil) - if err != nil { - t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err) - } - value := key + vtail - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue) - } - } - }(i, snap, rnd.Perm(nKey)) - - go func(i int, snap *Snapshot) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - iter := snap.NewIterator(nil, nil) - defer iter.Release() - for k := 0; k < nKey; k++ { - if !iter.Next() { - if err := iter.Error(); err != nil { - t.Fatalf("READER_ITER #%d K%d error: %v", i, k, err) - } else { - t.Fatalf("READER_ITER #%d K%d eoi", i, k) - } - } - key := fmt.Sprintf("KEY%8d", k) - xkey := iter.Key() - if !bytes.Equal([]byte(key), xkey) { - t.Fatalf("READER_ITER #%d K%d invalid key: want %q, got %q", i, k, key, xkey) - } - value := key + vtail - xvalue := iter.Value() - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_ITER #%d K%d invalid value: want %q, got %q", i, k, value, xvalue) - } - } - }(i, snap) - } - - wg.Wait() -} - -func TestDB_UkeyShouldntHopAcrossTable(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 90 * opt.KiB, - CompactionExpandLimitFactor: 1, - }) - defer h.close() - - const ( - nSnap = 190 - nKey = 140 - ) - - var ( - snaps [nSnap]*Snapshot - b = &Batch{} - ) - for i := range snaps { - vtail := fmt.Sprintf("VAL%030d", i) - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - b.Put([]byte(key), []byte(key+vtail)) - } - if err := h.db.Write(b, nil); err != nil { - t.Fatalf("WRITE #%d error: %v", i, err) - } - - snaps[i] = h.db.newSnapshot() - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - b.Delete([]byte(key)) - } - if err := h.db.Write(b, nil); err != nil { - t.Fatalf("WRITE #%d error: %v", i, err) - } - } - - h.compactMem() - - h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) - } - } - - h.compactRangeAt(0, "", "") - h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) - } - } - h.compactRangeAt(1, "", "") - h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) - } - } - runtime.GOMAXPROCS(runtime.NumCPU()) - - wg := &sync.WaitGroup{} - for i, snap := range snaps { - wg.Add(1) - - go func(i int, snap *Snapshot) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - xvalue, err := snap.Get([]byte(key), nil) - if err != nil { - t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err) - } - value := key + vtail - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue) - } - } - }(i, snap) - } - - wg.Wait() -} - -func TestDB_TableCompactionBuilder(t *testing.T) { - stor := newTestStorage(t) - defer stor.Close() - - const nSeq = 99 - - o := &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 43 * opt.KiB, - CompactionExpandLimitFactor: 1, - CompactionGPOverlapsFactor: 1, - DisableBlockCache: true, - } - s, err := newSession(stor, o) - if err != nil { - t.Fatal(err) - } - if err := s.create(); err != nil { - t.Fatal(err) - } - defer s.close() - var ( - seq uint64 - targetSize = 5 * o.CompactionTableSize - value = bytes.Repeat([]byte{'0'}, 100) - ) - for i := 0; i < 2; i++ { - tw, err := s.tops.create() - if err != nil { - t.Fatal(err) - } - for k := 0; tw.tw.BytesLen() < targetSize; k++ { - key := []byte(fmt.Sprintf("%09d", k)) - seq += nSeq - 1 - for x := uint64(0); x < nSeq; x++ { - if err := tw.append(newIkey(key, seq-x, ktVal), value); err != nil { - t.Fatal(err) - } - } - } - tf, err := tw.finish() - if err != nil { - t.Fatal(err) - } - rec := &sessionRecord{numLevel: s.o.GetNumLevel()} - rec.addTableFile(i, tf) - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - } - - // Build grandparent. - v := s.version() - c := newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...)) - rec := &sessionRecord{numLevel: s.o.GetNumLevel()} - b := &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatsStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize/3 + 961, - } - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Fatal(err) - } - for _, t := range c.tables[0] { - rec.delTable(c.level, t.file.Num()) - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - // Build level-1. - v = s.version() - c = newCompaction(s, v, 0, append(tFiles{}, v.tables[0]...)) - rec = &sessionRecord{numLevel: s.o.GetNumLevel()} - b = &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatsStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize, - } - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Fatal(err) - } - for _, t := range c.tables[0] { - rec.delTable(c.level, t.file.Num()) - } - // Move grandparent to level-3 - for _, t := range v.tables[2] { - rec.delTable(2, t.file.Num()) - rec.addTableFile(3, t) - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - v = s.version() - for level, want := range []bool{false, true, false, true, false} { - got := len(v.tables[level]) > 0 - if want != got { - t.Fatalf("invalid level-%d tables len: want %v, got %v", level, want, got) - } - } - for i, f := range v.tables[1][:len(v.tables[1])-1] { - nf := v.tables[1][i+1] - if bytes.Equal(f.imax.ukey(), nf.imin.ukey()) { - t.Fatalf("KEY %q hop across table %d .. %d", f.imax.ukey(), f.file.Num(), nf.file.Num()) - } - } - v.release() - - // Compaction with transient error. - v = s.version() - c = newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...)) - rec = &sessionRecord{numLevel: s.o.GetNumLevel()} - b = &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatsStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize, - } - stor.SetEmuErrOnce(storage.TypeTable, tsOpSync) - stor.SetEmuRandErr(storage.TypeTable, tsOpRead, tsOpReadAt, tsOpWrite) - stor.SetEmuRandErrProb(0xf0) - for { - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Logf("(expected) b.run: %v", err) - } else { - break - } - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - stor.SetEmuErrOnce(0, tsOpSync) - stor.SetEmuRandErr(0, tsOpRead, tsOpReadAt, tsOpWrite) - - v = s.version() - if len(v.tables[1]) != len(v.tables[2]) { - t.Fatalf("invalid tables length, want %d, got %d", len(v.tables[1]), len(v.tables[2])) - } - for i, f0 := range v.tables[1] { - f1 := v.tables[2][i] - iter0 := s.tops.newIterator(f0, nil, nil) - iter1 := s.tops.newIterator(f1, nil, nil) - for j := 0; true; j++ { - next0 := iter0.Next() - next1 := iter1.Next() - if next0 != next1 { - t.Fatalf("#%d.%d invalid eoi: want %v, got %v", i, j, next0, next1) - } - key0 := iter0.Key() - key1 := iter1.Key() - if !bytes.Equal(key0, key1) { - t.Fatalf("#%d.%d invalid key: want %q, got %q", i, j, key0, key1) - } - if next0 == false { - break - } - } - iter0.Release() - iter1.Release() - } - v.release() -} - -func testDB_IterTriggeredCompaction(t *testing.T, limitDiv int) { - const ( - vSize = 200 * opt.KiB - tSize = 100 * opt.MiB - mIter = 100 - n = tSize / vSize - ) - - h := newDbHarnessWopt(t, &opt.Options{ - Compression: opt.NoCompression, - DisableBlockCache: true, - }) - defer h.close() - - key := func(x int) string { - return fmt.Sprintf("v%06d", x) - } - - // Fill. - value := strings.Repeat("x", vSize) - for i := 0; i < n; i++ { - h.put(key(i), value) - } - h.compactMem() - - // Delete all. - for i := 0; i < n; i++ { - h.delete(key(i)) - } - h.compactMem() - - var ( - limit = n / limitDiv - - startKey = key(0) - limitKey = key(limit) - maxKey = key(n) - slice = &util.Range{Limit: []byte(limitKey)} - - initialSize0 = h.sizeOf(startKey, limitKey) - initialSize1 = h.sizeOf(limitKey, maxKey) - ) - - t.Logf("inital size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1))) - - for r := 0; true; r++ { - if r >= mIter { - t.Fatal("taking too long to compact") - } - - // Iterates. - iter := h.db.NewIterator(slice, h.ro) - for iter.Next() { - } - if err := iter.Error(); err != nil { - t.Fatalf("Iter err: %v", err) - } - iter.Release() - - // Wait compaction. - h.waitCompaction() - - // Check size. - size0 := h.sizeOf(startKey, limitKey) - size1 := h.sizeOf(limitKey, maxKey) - t.Logf("#%03d size %s [rest %s]", r, shortenb(int(size0)), shortenb(int(size1))) - if size0 < initialSize0/10 { - break - } - } - - if initialSize1 > 0 { - h.sizeAssert(limitKey, maxKey, initialSize1/4-opt.MiB, initialSize1+opt.MiB) - } -} - -func TestDB_IterTriggeredCompaction(t *testing.T) { - testDB_IterTriggeredCompaction(t, 1) -} - -func TestDB_IterTriggeredCompactionHalf(t *testing.T) { - testDB_IterTriggeredCompaction(t, 2) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go deleted file mode 100644 index ed322988c61..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -// Reader is the interface that wraps basic Get and NewIterator methods. -// This interface implemented by both DB and Snapshot. -type Reader interface { - Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) - NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator -} - -type Sizes []uint64 - -// Sum returns sum of the sizes. -func (p Sizes) Sum() (n uint64) { - for _, s := range p { - n += s - } - return n -} - -// Logging. -func (db *DB) log(v ...interface{}) { db.s.log(v...) } -func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) } - -// Check and clean files. -func (db *DB) checkAndCleanFiles() error { - v := db.s.version() - defer v.release() - - tablesMap := make(map[uint64]bool) - for _, tables := range v.tables { - for _, t := range tables { - tablesMap[t.file.Num()] = false - } - } - - files, err := db.s.getFiles(storage.TypeAll) - if err != nil { - return err - } - - var nTables int - var rem []storage.File - for _, f := range files { - keep := true - switch f.Type() { - case storage.TypeManifest: - keep = f.Num() >= db.s.manifestFile.Num() - case storage.TypeJournal: - if db.frozenJournalFile != nil { - keep = f.Num() >= db.frozenJournalFile.Num() - } else { - keep = f.Num() >= db.journalFile.Num() - } - case storage.TypeTable: - _, keep = tablesMap[f.Num()] - if keep { - tablesMap[f.Num()] = true - nTables++ - } - } - - if !keep { - rem = append(rem, f) - } - } - - if nTables != len(tablesMap) { - var missing []*storage.FileInfo - for num, present := range tablesMap { - if !present { - missing = append(missing, &storage.FileInfo{Type: storage.TypeTable, Num: num}) - db.logf("db@janitor table missing @%d", num) - } - } - return errors.NewErrCorrupted(nil, &errors.ErrMissingFiles{Files: missing}) - } - - db.logf("db@janitor F·%d G·%d", len(files), len(rem)) - for _, f := range rem { - db.logf("db@janitor removing %s-%d", f.Type(), f.Num()) - if err := f.Remove(); err != nil { - return err - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go deleted file mode 100644 index f1c6b73270c..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "time" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -func (db *DB) writeJournal(b *Batch) error { - w, err := db.journal.Next() - if err != nil { - return err - } - if _, err := w.Write(b.encode()); err != nil { - return err - } - if err := db.journal.Flush(); err != nil { - return err - } - if b.sync { - return db.journalWriter.Sync() - } - return nil -} - -func (db *DB) jWriter() { - defer db.closeW.Done() - for { - select { - case b := <-db.journalC: - if b != nil { - db.journalAckC <- db.writeJournal(b) - } - case _, _ = <-db.closeC: - return - } - } -} - -func (db *DB) rotateMem(n int) (mem *memDB, err error) { - // Wait for pending memdb compaction. - err = db.compSendIdle(db.mcompCmdC) - if err != nil { - return - } - - // Create new memdb and journal. - mem, err = db.newMem(n) - if err != nil { - return - } - - // Schedule memdb compaction. - db.compSendTrigger(db.mcompCmdC) - return -} - -func (db *DB) flush(n int) (mem *memDB, nn int, err error) { - delayed := false - flush := func() (retry bool) { - v := db.s.version() - defer v.release() - mem = db.getEffectiveMem() - defer func() { - if retry { - mem.decref() - mem = nil - } - }() - nn = mem.mdb.Free() - switch { - case v.tLen(0) >= db.s.o.GetWriteL0SlowdownTrigger() && !delayed: - delayed = true - time.Sleep(time.Millisecond) - case nn >= n: - return false - case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger(): - delayed = true - err = db.compSendIdle(db.tcompCmdC) - if err != nil { - return false - } - default: - // Allow memdb to grow if it has no entry. - if mem.mdb.Len() == 0 { - nn = n - } else { - mem.decref() - mem, err = db.rotateMem(n) - if err == nil { - nn = mem.mdb.Free() - } else { - nn = 0 - } - } - return false - } - return true - } - start := time.Now() - for flush() { - } - if delayed { - db.writeDelay += time.Since(start) - db.writeDelayN++ - } else if db.writeDelayN > 0 { - db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) - db.writeDelay = 0 - db.writeDelayN = 0 - } - return -} - -// Write apply the given batch to the DB. The batch will be applied -// sequentially. -// -// It is safe to modify the contents of the arguments after Write returns. -func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) { - err = db.ok() - if err != nil || b == nil || b.Len() == 0 { - return - } - - b.init(wo.GetSync()) - - // The write happen synchronously. - select { - case db.writeC <- b: - if <-db.writeMergedC { - return <-db.writeAckC - } - case db.writeLockC <- struct{}{}: - case err = <-db.compPerErrC: - return - case _, _ = <-db.closeC: - return ErrClosed - } - - merged := 0 - danglingMerge := false - defer func() { - if danglingMerge { - db.writeMergedC <- false - } else { - <-db.writeLockC - } - for i := 0; i < merged; i++ { - db.writeAckC <- err - } - }() - - mem, memFree, err := db.flush(b.size()) - if err != nil { - return - } - defer mem.decref() - - // Calculate maximum size of the batch. - m := 1 << 20 - if x := b.size(); x <= 128<<10 { - m = x + (128 << 10) - } - m = minInt(m, memFree) - - // Merge with other batch. -drain: - for b.size() < m && !b.sync { - select { - case nb := <-db.writeC: - if b.size()+nb.size() <= m { - b.append(nb) - db.writeMergedC <- true - merged++ - } else { - danglingMerge = true - break drain - } - default: - break drain - } - } - - // Set batch first seq number relative from last seq. - b.seq = db.seq + 1 - - // Write journal concurrently if it is large enough. - if b.size() >= (128 << 10) { - // Push the write batch to the journal writer - select { - case db.journalC <- b: - // Write into memdb - if berr := b.memReplay(mem.mdb); berr != nil { - panic(berr) - } - case err = <-db.compPerErrC: - return - case _, _ = <-db.closeC: - err = ErrClosed - return - } - // Wait for journal writer - select { - case err = <-db.journalAckC: - if err != nil { - // Revert memdb if error detected - if berr := b.revertMemReplay(mem.mdb); berr != nil { - panic(berr) - } - return - } - case _, _ = <-db.closeC: - err = ErrClosed - return - } - } else { - err = db.writeJournal(b) - if err != nil { - return - } - if berr := b.memReplay(mem.mdb); berr != nil { - panic(berr) - } - } - - // Set last seq number. - db.addSeq(uint64(b.Len())) - - if b.size() >= memFree { - db.rotateMem(0) - } - return -} - -// Put sets the value for the given key. It overwrites any previous value -// for that key; a DB is not a multi-map. -// -// It is safe to modify the contents of the arguments after Put returns. -func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error { - b := new(Batch) - b.Put(key, value) - return db.Write(b, wo) -} - -// Delete deletes the value for the given key. It returns ErrNotFound if -// the DB does not contain the key. -// -// It is safe to modify the contents of the arguments after Delete returns. -func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error { - b := new(Batch) - b.Delete(key) - return db.Write(b, wo) -} - -func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool { - iter := mem.NewIterator(nil) - defer iter.Release() - return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) && - (min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0)) -} - -// CompactRange compacts the underlying DB for the given key range. -// In particular, deleted and overwritten versions are discarded, -// and the data is rearranged to reduce the cost of operations -// needed to access the data. This operation should typically only -// be invoked by users who understand the underlying implementation. -// -// A nil Range.Start is treated as a key before all keys in the DB. -// And a nil Range.Limit is treated as a key after all keys in the DB. -// Therefore if both is nil then it will compact entire DB. -func (db *DB) CompactRange(r util.Range) error { - if err := db.ok(); err != nil { - return err - } - - // Lock writer. - select { - case db.writeLockC <- struct{}{}: - case err := <-db.compPerErrC: - return err - case _, _ = <-db.closeC: - return ErrClosed - } - - // Check for overlaps in memdb. - mem := db.getEffectiveMem() - defer mem.decref() - if isMemOverlaps(db.s.icmp, mem.mdb, r.Start, r.Limit) { - // Memdb compaction. - if _, err := db.rotateMem(0); err != nil { - <-db.writeLockC - return err - } - <-db.writeLockC - if err := db.compSendIdle(db.mcompCmdC); err != nil { - return err - } - } else { - <-db.writeLockC - } - - // Table compaction. - return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go deleted file mode 100644 index 53f13bb24cc..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package leveldb provides implementation of LevelDB key/value database. -// -// Create or open a database: -// -// db, err := leveldb.OpenFile("path/to/db", nil) -// ... -// defer db.Close() -// ... -// -// Read or modify the database content: -// -// // Remember that the contents of the returned slice should not be modified. -// data, err := db.Get([]byte("key"), nil) -// ... -// err = db.Put([]byte("key"), []byte("value"), nil) -// ... -// err = db.Delete([]byte("key"), nil) -// ... -// -// Iterate over database content: -// -// iter := db.NewIterator(nil, nil) -// for iter.Next() { -// // Remember that the contents of the returned slice should not be modified, and -// // only valid until the next call to Next. -// key := iter.Key() -// value := iter.Value() -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Iterate over subset of database content with a particular prefix: -// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil) -// for iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Seek-then-Iterate: -// -// iter := db.NewIterator(nil, nil) -// for ok := iter.Seek(key); ok; ok = iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Iterate over subset of database content: -// -// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil) -// for iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Batch writes: -// -// batch := new(leveldb.Batch) -// batch.Put([]byte("foo"), []byte("value")) -// batch.Put([]byte("bar"), []byte("another value")) -// batch.Delete([]byte("baz")) -// err = db.Write(batch, nil) -// ... -// -// Use bloom filter: -// -// o := &opt.Options{ -// Filter: filter.NewBloomFilter(10), -// } -// db, err := leveldb.OpenFile("path/to/db", o) -// ... -// defer db.Close() -// ... -package leveldb diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go deleted file mode 100644 index 5d96d6d34d1..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" -) - -var ( - ErrNotFound = errors.ErrNotFound - ErrSnapshotReleased = errors.New("leveldb: snapshot released") - ErrIterReleased = errors.New("leveldb: iterator released") - ErrClosed = errors.New("leveldb: closed") -) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go deleted file mode 100644 index f31528da75b..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package errors provides common error types used throughout leveldb. -package errors - -import ( - "errors" - "fmt" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrNotFound = New("leveldb: not found") - ErrReleased = util.ErrReleased - ErrHasReleaser = util.ErrHasReleaser -) - -// New returns an error that formats as the given text. -func New(text string) error { - return errors.New(text) -} - -// ErrCorrupted is the type that wraps errors that indicate corruption in -// the database. -type ErrCorrupted struct { - File *storage.FileInfo - Err error -} - -func (e *ErrCorrupted) Error() string { - if e.File != nil { - return fmt.Sprintf("%v [file=%v]", e.Err, e.File) - } else { - return e.Err.Error() - } -} - -// NewErrCorrupted creates new ErrCorrupted error. -func NewErrCorrupted(f storage.File, err error) error { - return &ErrCorrupted{storage.NewFileInfo(f), err} -} - -// IsCorrupted returns a boolean indicating whether the error is indicating -// a corruption. -func IsCorrupted(err error) bool { - switch err.(type) { - case *ErrCorrupted: - return true - } - return false -} - -// ErrMissingFiles is the type that indicating a corruption due to missing -// files. -type ErrMissingFiles struct { - Files []*storage.FileInfo -} - -func (e *ErrMissingFiles) Error() string { return "file missing" } - -// SetFile sets 'file info' of the given error with the given file. -// Currently only ErrCorrupted is supported, otherwise will do nothing. -func SetFile(err error, f storage.File) error { - switch x := err.(type) { - case *ErrCorrupted: - x.File = storage.NewFileInfo(f) - return x - } - return err -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go deleted file mode 100644 index 0769157f7ca..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Leveldb external", func() { - o := &opt.Options{ - DisableBlockCache: true, - BlockRestartInterval: 5, - BlockSize: 80, - Compression: opt.NoCompression, - OpenFilesCacheCapacity: -1, - Strict: opt.StrictAll, - WriteBuffer: 1000, - CompactionTableSize: 2000, - } - - Describe("write test", func() { - It("should do write correctly", func(done Done) { - db := newTestingDB(o, nil, nil) - t := testutil.DBTesting{ - DB: db, - Deleted: testutil.KeyValue_Generate(nil, 500, 1, 50, 5, 5).Clone(), - } - testutil.DoDBTesting(&t) - db.TestClose() - done <- true - }, 20.0) - }) - - Describe("read test", func() { - testutil.AllKeyValueTesting(nil, nil, func(kv testutil.KeyValue) testutil.DB { - // Building the DB. - db := newTestingDB(o, nil, nil) - kv.IterateShuffled(nil, func(i int, key, value []byte) { - err := db.TestPut(key, value) - Expect(err).NotTo(HaveOccurred()) - }) - - return db - }, func(db testutil.DB) { - db.(*testingDB).TestClose() - }) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go deleted file mode 100644 index ba1e8c16517..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" -) - -type iFilter struct { - filter.Filter -} - -func (f iFilter) Contains(filter, key []byte) bool { - return f.Filter.Contains(filter, iKey(key).ukey()) -} - -func (f iFilter) NewGenerator() filter.FilterGenerator { - return iFilterGenerator{f.Filter.NewGenerator()} -} - -type iFilterGenerator struct { - filter.FilterGenerator -} - -func (g iFilterGenerator) Add(key []byte) { - g.FilterGenerator.Add(iKey(key).ukey()) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go deleted file mode 100644 index fc5ea790f5f..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package filter - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -func bloomHash(key []byte) uint32 { - return util.Hash(key, 0xbc9f1d34) -} - -type bloomFilter int - -// The bloom filter serializes its parameters and is backward compatible -// with respect to them. Therefor, its parameters are not added to its -// name. -func (bloomFilter) Name() string { - return "leveldb.BuiltinBloomFilter" -} - -func (f bloomFilter) Contains(filter, key []byte) bool { - nBytes := len(filter) - 1 - if nBytes < 1 { - return false - } - nBits := uint32(nBytes * 8) - - // Use the encoded k so that we can read filters generated by - // bloom filters created using different parameters. - k := filter[nBytes] - if k > 30 { - // Reserved for potentially new encodings for short bloom filters. - // Consider it a match. - return true - } - - kh := bloomHash(key) - delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits - for j := uint8(0); j < k; j++ { - bitpos := kh % nBits - if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 { - return false - } - kh += delta - } - return true -} - -func (f bloomFilter) NewGenerator() FilterGenerator { - // Round down to reduce probing cost a little bit. - k := uint8(f * 69 / 100) // 0.69 =~ ln(2) - if k < 1 { - k = 1 - } else if k > 30 { - k = 30 - } - return &bloomFilterGenerator{ - n: int(f), - k: k, - } -} - -type bloomFilterGenerator struct { - n int - k uint8 - - keyHashes []uint32 -} - -func (g *bloomFilterGenerator) Add(key []byte) { - // Use double-hashing to generate a sequence of hash values. - // See analysis in [Kirsch,Mitzenmacher 2006]. - g.keyHashes = append(g.keyHashes, bloomHash(key)) -} - -func (g *bloomFilterGenerator) Generate(b Buffer) { - // Compute bloom filter size (in both bits and bytes) - nBits := uint32(len(g.keyHashes) * g.n) - // For small n, we can see a very high false positive rate. Fix it - // by enforcing a minimum bloom filter length. - if nBits < 64 { - nBits = 64 - } - nBytes := (nBits + 7) / 8 - nBits = nBytes * 8 - - dest := b.Alloc(int(nBytes) + 1) - dest[nBytes] = g.k - for _, kh := range g.keyHashes { - delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits - for j := uint8(0); j < g.k; j++ { - bitpos := kh % nBits - dest[bitpos/8] |= (1 << (bitpos % 8)) - kh += delta - } - } - - g.keyHashes = g.keyHashes[:0] -} - -// NewBloomFilter creates a new initialized bloom filter for given -// bitsPerKey. -// -// Since bitsPerKey is persisted individually for each bloom filter -// serialization, bloom filters are backwards compatible with respect to -// changing bitsPerKey. This means that no big performance penalty will -// be experienced when changing the parameter. See documentation for -// opt.Options.Filter for more information. -func NewBloomFilter(bitsPerKey int) Filter { - return bloomFilter(bitsPerKey) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go deleted file mode 100644 index 3175ffcbce9..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package filter - -import ( - "encoding/binary" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" - "testing" -) - -type harness struct { - t *testing.T - - bloom Filter - generator FilterGenerator - filter []byte -} - -func newHarness(t *testing.T) *harness { - bloom := NewBloomFilter(10) - return &harness{ - t: t, - bloom: bloom, - generator: bloom.NewGenerator(), - } -} - -func (h *harness) add(key []byte) { - h.generator.Add(key) -} - -func (h *harness) addNum(key uint32) { - var b [4]byte - binary.LittleEndian.PutUint32(b[:], key) - h.add(b[:]) -} - -func (h *harness) build() { - b := &util.Buffer{} - h.generator.Generate(b) - h.filter = b.Bytes() -} - -func (h *harness) reset() { - h.filter = nil -} - -func (h *harness) filterLen() int { - return len(h.filter) -} - -func (h *harness) assert(key []byte, want, silent bool) bool { - got := h.bloom.Contains(h.filter, key) - if !silent && got != want { - h.t.Errorf("assert on '%v' failed got '%v', want '%v'", key, got, want) - } - return got -} - -func (h *harness) assertNum(key uint32, want, silent bool) bool { - var b [4]byte - binary.LittleEndian.PutUint32(b[:], key) - return h.assert(b[:], want, silent) -} - -func TestBloomFilter_Empty(t *testing.T) { - h := newHarness(t) - h.build() - h.assert([]byte("hello"), false, false) - h.assert([]byte("world"), false, false) -} - -func TestBloomFilter_Small(t *testing.T) { - h := newHarness(t) - h.add([]byte("hello")) - h.add([]byte("world")) - h.build() - h.assert([]byte("hello"), true, false) - h.assert([]byte("world"), true, false) - h.assert([]byte("x"), false, false) - h.assert([]byte("foo"), false, false) -} - -func nextN(n int) int { - switch { - case n < 10: - n += 1 - case n < 100: - n += 10 - case n < 1000: - n += 100 - default: - n += 1000 - } - return n -} - -func TestBloomFilter_VaryingLengths(t *testing.T) { - h := newHarness(t) - var mediocre, good int - for n := 1; n < 10000; n = nextN(n) { - h.reset() - for i := 0; i < n; i++ { - h.addNum(uint32(i)) - } - h.build() - - got := h.filterLen() - want := (n * 10 / 8) + 40 - if got > want { - t.Errorf("filter len test failed, '%d' > '%d'", got, want) - } - - for i := 0; i < n; i++ { - h.assertNum(uint32(i), true, false) - } - - var rate float32 - for i := 0; i < 10000; i++ { - if h.assertNum(uint32(i+1000000000), true, true) { - rate++ - } - } - rate /= 10000 - if rate > 0.02 { - t.Errorf("false positive rate is more than 2%%, got %v, at len %d", rate, n) - } - if rate > 0.0125 { - mediocre++ - } else { - good++ - } - } - t.Logf("false positive rate: %d good, %d mediocre", good, mediocre) - if mediocre > good/5 { - t.Error("mediocre false positive rate is more than expected") - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go deleted file mode 100644 index 7a925c5a869..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package filter provides interface and implementation of probabilistic -// data structure. -// -// The filter is resposible for creating small filter from a set of keys. -// These filter will then used to test whether a key is a member of the set. -// In many cases, a filter can cut down the number of disk seeks from a -// handful to a single disk seek per DB.Get call. -package filter - -// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods. -type Buffer interface { - // Alloc allocs n bytes of slice from the buffer. This also advancing - // write offset. - Alloc(n int) []byte - - // Write appends the contents of p to the buffer. - Write(p []byte) (n int, err error) - - // WriteByte appends the byte c to the buffer. - WriteByte(c byte) error -} - -// Filter is the filter. -type Filter interface { - // Name returns the name of this policy. - // - // Note that if the filter encoding changes in an incompatible way, - // the name returned by this method must be changed. Otherwise, old - // incompatible filters may be passed to methods of this type. - Name() string - - // NewGenerator creates a new filter generator. - NewGenerator() FilterGenerator - - // Contains returns true if the filter contains the given key. - // - // The filter are filters generated by the filter generator. - Contains(filter, key []byte) bool -} - -// FilterGenerator is the filter generator. -type FilterGenerator interface { - // Add adds a key to the filter generator. - // - // The key may become invalid after call to this method end, therefor - // key must be copied if implementation require keeping key for later - // use. The key should not modified directly, doing so may cause - // undefined results. - Add(key []byte) - - // Generate generates filters based on keys passed so far. After call - // to Generate the filter generator maybe resetted, depends on implementation. - Generate(b Buffer) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go deleted file mode 100644 index 806552d1758..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -// BasicArray is the interface that wraps basic Len and Search method. -type BasicArray interface { - // Len returns length of the array. - Len() int - - // Search finds smallest index that point to a key that is greater - // than or equal to the given key. - Search(key []byte) int -} - -// Array is the interface that wraps BasicArray and basic Index method. -type Array interface { - BasicArray - - // Index returns key/value pair with index of i. - Index(i int) (key, value []byte) -} - -// Array is the interface that wraps BasicArray and basic Get method. -type ArrayIndexer interface { - BasicArray - - // Get returns a new data iterator with index of i. - Get(i int) Iterator -} - -type basicArrayIterator struct { - util.BasicReleaser - array BasicArray - pos int - err error -} - -func (i *basicArrayIterator) Valid() bool { - return i.pos >= 0 && i.pos < i.array.Len() && !i.Released() -} - -func (i *basicArrayIterator) First() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.array.Len() == 0 { - i.pos = -1 - return false - } - i.pos = 0 - return true -} - -func (i *basicArrayIterator) Last() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - n := i.array.Len() - if n == 0 { - i.pos = 0 - return false - } - i.pos = n - 1 - return true -} - -func (i *basicArrayIterator) Seek(key []byte) bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - n := i.array.Len() - if n == 0 { - i.pos = 0 - return false - } - i.pos = i.array.Search(key) - if i.pos >= n { - return false - } - return true -} - -func (i *basicArrayIterator) Next() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.pos++ - if n := i.array.Len(); i.pos >= n { - i.pos = n - return false - } - return true -} - -func (i *basicArrayIterator) Prev() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.pos-- - if i.pos < 0 { - i.pos = -1 - return false - } - return true -} - -func (i *basicArrayIterator) Error() error { return i.err } - -type arrayIterator struct { - basicArrayIterator - array Array - pos int - key, value []byte -} - -func (i *arrayIterator) updateKV() { - if i.pos == i.basicArrayIterator.pos { - return - } - i.pos = i.basicArrayIterator.pos - if i.Valid() { - i.key, i.value = i.array.Index(i.pos) - } else { - i.key = nil - i.value = nil - } -} - -func (i *arrayIterator) Key() []byte { - i.updateKV() - return i.key -} - -func (i *arrayIterator) Value() []byte { - i.updateKV() - return i.value -} - -type arrayIteratorIndexer struct { - basicArrayIterator - array ArrayIndexer -} - -func (i *arrayIteratorIndexer) Get() Iterator { - if i.Valid() { - return i.array.Get(i.basicArrayIterator.pos) - } - return nil -} - -// NewArrayIterator returns an iterator from the given array. -func NewArrayIterator(array Array) Iterator { - return &arrayIterator{ - basicArrayIterator: basicArrayIterator{array: array, pos: -1}, - array: array, - pos: -1, - } -} - -// NewArrayIndexer returns an index iterator from the given array. -func NewArrayIndexer(array ArrayIndexer) IteratorIndexer { - return &arrayIteratorIndexer{ - basicArrayIterator: basicArrayIterator{array: array, pos: -1}, - array: array, - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go deleted file mode 100644 index f730e0a44ee..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - . "github.com/onsi/ginkgo" - - . "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Array iterator", func() { - It("Should iterates and seeks correctly", func() { - // Build key/value. - kv := testutil.KeyValue_Generate(nil, 70, 1, 5, 3, 3) - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewArrayIterator(kv), - } - testutil.DoIteratorTesting(&t) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go deleted file mode 100644 index 8f9e9339dee..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -// IteratorIndexer is the interface that wraps CommonIterator and basic Get -// method. IteratorIndexer provides index for indexed iterator. -type IteratorIndexer interface { - CommonIterator - - // Get returns a new data iterator for the current position, or nil if - // done. - Get() Iterator -} - -type indexedIterator struct { - util.BasicReleaser - index IteratorIndexer - strict bool - - data Iterator - err error - errf func(err error) - closed bool -} - -func (i *indexedIterator) setData() { - if i.data != nil { - i.data.Release() - } - i.data = i.index.Get() -} - -func (i *indexedIterator) clearData() { - if i.data != nil { - i.data.Release() - } - i.data = nil -} - -func (i *indexedIterator) indexErr() { - if err := i.index.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - i.err = err - } -} - -func (i *indexedIterator) dataErr() bool { - if err := i.data.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - if i.strict || !errors.IsCorrupted(err) { - i.err = err - return true - } - } - return false -} - -func (i *indexedIterator) Valid() bool { - return i.data != nil && i.data.Valid() -} - -func (i *indexedIterator) First() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.First() { - i.indexErr() - i.clearData() - return false - } - i.setData() - return i.Next() -} - -func (i *indexedIterator) Last() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.Last() { - i.indexErr() - i.clearData() - return false - } - i.setData() - if !i.data.Last() { - if i.dataErr() { - return false - } - i.clearData() - return i.Prev() - } - return true -} - -func (i *indexedIterator) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.Seek(key) { - i.indexErr() - i.clearData() - return false - } - i.setData() - if !i.data.Seek(key) { - if i.dataErr() { - return false - } - i.clearData() - return i.Next() - } - return true -} - -func (i *indexedIterator) Next() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - switch { - case i.data != nil && !i.data.Next(): - if i.dataErr() { - return false - } - i.clearData() - fallthrough - case i.data == nil: - if !i.index.Next() { - i.indexErr() - return false - } - i.setData() - return i.Next() - } - return true -} - -func (i *indexedIterator) Prev() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - switch { - case i.data != nil && !i.data.Prev(): - if i.dataErr() { - return false - } - i.clearData() - fallthrough - case i.data == nil: - if !i.index.Prev() { - i.indexErr() - return false - } - i.setData() - if !i.data.Last() { - if i.dataErr() { - return false - } - i.clearData() - return i.Prev() - } - } - return true -} - -func (i *indexedIterator) Key() []byte { - if i.data == nil { - return nil - } - return i.data.Key() -} - -func (i *indexedIterator) Value() []byte { - if i.data == nil { - return nil - } - return i.data.Value() -} - -func (i *indexedIterator) Release() { - i.clearData() - i.index.Release() - i.BasicReleaser.Release() -} - -func (i *indexedIterator) Error() error { - if i.err != nil { - return i.err - } - if err := i.index.Error(); err != nil { - return err - } - return nil -} - -func (i *indexedIterator) SetErrorCallback(f func(err error)) { - i.errf = f -} - -// NewIndexedIterator returns an 'indexed iterator'. An index is iterator -// that returns another iterator, a 'data iterator'. A 'data iterator' is the -// iterator that contains actual key/value pairs. -// -// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) -// won't be ignored and will halt 'indexed iterator', otherwise the iterator will -// continue to the next 'data iterator'. Corruption on 'index iterator' will not be -// ignored and will halt the iterator. -func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator { - return &indexedIterator{index: index, strict: strict} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go deleted file mode 100644 index 4d98cd7acaf..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - "sort" - - . "github.com/onsi/ginkgo" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - . "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" -) - -type keyValue struct { - key []byte - testutil.KeyValue -} - -type keyValueIndex []keyValue - -func (x keyValueIndex) Search(key []byte) int { - return sort.Search(x.Len(), func(i int) bool { - return comparer.DefaultComparer.Compare(x[i].key, key) >= 0 - }) -} - -func (x keyValueIndex) Len() int { return len(x) } -func (x keyValueIndex) Index(i int) (key, value []byte) { return x[i].key, nil } -func (x keyValueIndex) Get(i int) Iterator { return NewArrayIterator(x[i]) } - -var _ = testutil.Defer(func() { - Describe("Indexed iterator", func() { - Test := func(n ...int) func() { - if len(n) == 0 { - rnd := testutil.NewRand() - n = make([]int, rnd.Intn(17)+3) - for i := range n { - n[i] = rnd.Intn(19) + 1 - } - } - - return func() { - It("Should iterates and seeks correctly", func(done Done) { - // Build key/value. - index := make(keyValueIndex, len(n)) - sum := 0 - for _, x := range n { - sum += x - } - kv := testutil.KeyValue_Generate(nil, sum, 1, 10, 4, 4) - for i, j := 0, 0; i < len(n); i++ { - for x := n[i]; x > 0; x-- { - key, value := kv.Index(j) - index[i].key = key - index[i].Put(key, value) - j++ - } - } - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewIndexedIterator(NewArrayIndexer(index), true), - } - testutil.DoIteratorTesting(&t) - done <- true - }, 1.5) - } - } - - Describe("with 100 keys", Test(100)) - Describe("with 50-50 keys", Test(50, 50)) - Describe("with 50-1 keys", Test(50, 1)) - Describe("with 50-1-50 keys", Test(50, 1, 50)) - Describe("with 1-50 keys", Test(1, 50)) - Describe("with random N-keys", Test()) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go deleted file mode 100644 index 454112954df..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package iterator provides interface and implementation to traverse over -// contents of a database. -package iterator - -import ( - "errors" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrIterReleased = errors.New("leveldb/iterator: iterator released") -) - -// IteratorSeeker is the interface that wraps the 'seeks method'. -type IteratorSeeker interface { - // First moves the iterator to the first key/value pair. If the iterator - // only contains one key/value pair then First and Last whould moves - // to the same key/value pair. - // It returns whether such pair exist. - First() bool - - // Last moves the iterator to the last key/value pair. If the iterator - // only contains one key/value pair then First and Last whould moves - // to the same key/value pair. - // It returns whether such pair exist. - Last() bool - - // Seek moves the iterator to the first key/value pair whose key is greater - // than or equal to the given key. - // It returns whether such pair exist. - // - // It is safe to modify the contents of the argument after Seek returns. - Seek(key []byte) bool - - // Next moves the iterator to the next key/value pair. - // It returns whether the iterator is exhausted. - Next() bool - - // Prev moves the iterator to the previous key/value pair. - // It returns whether the iterator is exhausted. - Prev() bool -} - -// CommonIterator is the interface that wraps common interator methods. -type CommonIterator interface { - IteratorSeeker - - // util.Releaser is the interface that wraps basic Release method. - // When called Release will releases any resources associated with the - // iterator. - util.Releaser - - // util.ReleaseSetter is the interface that wraps the basic SetReleaser - // method. - util.ReleaseSetter - - // TODO: Remove this when ready. - Valid() bool - - // Error returns any accumulated error. Exhausting all the key/value pairs - // is not considered to be an error. - Error() error -} - -// Iterator iterates over a DB's key/value pairs in key order. -// -// When encouter an error any 'seeks method' will return false and will -// yield no key/value pairs. The error can be queried by calling the Error -// method. Calling Release is still necessary. -// -// An iterator must be released after use, but it is not necessary to read -// an iterator until exhaustion. -// Also, an iterator is not necessarily goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -type Iterator interface { - CommonIterator - - // Key returns the key of the current key/value pair, or nil if done. - // The caller should not modify the contents of the returned slice, and - // its contents may change on the next call to any 'seeks method'. - Key() []byte - - // Value returns the key of the current key/value pair, or nil if done. - // The caller should not modify the contents of the returned slice, and - // its contents may change on the next call to any 'seeks method'. - Value() []byte -} - -// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback -// method. -// -// ErrorCallbackSetter implemented by indexed and merged iterator. -type ErrorCallbackSetter interface { - // SetErrorCallback allows set an error callback of the coresponding - // iterator. Use nil to clear the callback. - SetErrorCallback(f func(err error)) -} - -type emptyIterator struct { - util.BasicReleaser - err error -} - -func (i *emptyIterator) rErr() { - if i.err == nil && i.Released() { - i.err = ErrIterReleased - } -} - -func (*emptyIterator) Valid() bool { return false } -func (i *emptyIterator) First() bool { i.rErr(); return false } -func (i *emptyIterator) Last() bool { i.rErr(); return false } -func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false } -func (i *emptyIterator) Next() bool { i.rErr(); return false } -func (i *emptyIterator) Prev() bool { i.rErr(); return false } -func (*emptyIterator) Key() []byte { return nil } -func (*emptyIterator) Value() []byte { return nil } -func (i *emptyIterator) Error() error { return i.err } - -// NewEmptyIterator creates an empty iterator. The err parameter can be -// nil, but if not nil the given err will be returned by Error method. -func NewEmptyIterator(err error) Iterator { - return &emptyIterator{err: err} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go deleted file mode 100644 index e44291a93c3..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package iterator_test - -import ( - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestIterator(t *testing.T) { - testutil.RunSuite(t, "Iterator Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go deleted file mode 100644 index 0a14ca8a550..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -type mergedIterator struct { - cmp comparer.Comparer - iters []Iterator - strict bool - - keys [][]byte - index int - dir dir - err error - errf func(err error) - releaser util.Releaser -} - -func assertKey(key []byte) []byte { - if key == nil { - panic("leveldb/iterator: nil key") - } - return key -} - -func (i *mergedIterator) iterErr(iter Iterator) bool { - if err := iter.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - if i.strict || !errors.IsCorrupted(err) { - i.err = err - return true - } - } - return false -} - -func (i *mergedIterator) Valid() bool { - return i.err == nil && i.dir > dirEOI -} - -func (i *mergedIterator) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.First(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirSOI - return i.next() -} - -func (i *mergedIterator) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.Last(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirEOI - return i.prev() -} - -func (i *mergedIterator) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.Seek(key): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirSOI - return i.next() -} - -func (i *mergedIterator) next() bool { - var key []byte - if i.dir == dirForward { - key = i.keys[i.index] - } - for x, tkey := range i.keys { - if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) { - key = tkey - i.index = x - } - } - if key == nil { - i.dir = dirEOI - return false - } - i.dir = dirForward - return true -} - -func (i *mergedIterator) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirSOI: - return i.First() - case dirBackward: - key := append([]byte{}, i.keys[i.index]...) - if !i.Seek(key) { - return false - } - return i.Next() - } - - x := i.index - iter := i.iters[x] - switch { - case iter.Next(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - return i.next() -} - -func (i *mergedIterator) prev() bool { - var key []byte - if i.dir == dirBackward { - key = i.keys[i.index] - } - for x, tkey := range i.keys { - if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) { - key = tkey - i.index = x - } - } - if key == nil { - i.dir = dirSOI - return false - } - i.dir = dirBackward - return true -} - -func (i *mergedIterator) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirEOI: - return i.Last() - case dirForward: - key := append([]byte{}, i.keys[i.index]...) - for x, iter := range i.iters { - if x == i.index { - continue - } - seek := iter.Seek(key) - switch { - case seek && iter.Prev(), !seek && iter.Last(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - } - - x := i.index - iter := i.iters[x] - switch { - case iter.Prev(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - return i.prev() -} - -func (i *mergedIterator) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.keys[i.index] -} - -func (i *mergedIterator) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.iters[i.index].Value() -} - -func (i *mergedIterator) Release() { - if i.dir != dirReleased { - i.dir = dirReleased - for _, iter := range i.iters { - iter.Release() - } - i.iters = nil - i.keys = nil - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - } -} - -func (i *mergedIterator) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *mergedIterator) Error() error { - return i.err -} - -func (i *mergedIterator) SetErrorCallback(f func(err error)) { - i.errf = f -} - -// NewMergedIterator returns an iterator that merges its input. Walking the -// resultant iterator will return all key/value pairs of all input iterators -// in strictly increasing key order, as defined by cmp. -// The input's key ranges may overlap, but there are assumed to be no duplicate -// keys: if iters[i] contains a key k then iters[j] will not contain that key k. -// None of the iters may be nil. -// -// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) -// won't be ignored and will halt 'merged iterator', otherwise the iterator will -// continue to the next 'input iterator'. -func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator { - return &mergedIterator{ - iters: iters, - cmp: cmp, - strict: strict, - keys: make([][]byte, len(iters)), - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go deleted file mode 100644 index 79cb970b691..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - . "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Merged iterator", func() { - Test := func(filled int, empty int) func() { - return func() { - It("Should iterates and seeks correctly", func(done Done) { - rnd := testutil.NewRand() - - // Build key/value. - filledKV := make([]testutil.KeyValue, filled) - kv := testutil.KeyValue_Generate(nil, 100, 1, 10, 4, 4) - kv.Iterate(func(i int, key, value []byte) { - filledKV[rnd.Intn(filled)].Put(key, value) - }) - - // Create itearators. - iters := make([]Iterator, filled+empty) - for i := range iters { - if empty == 0 || (rnd.Int()%2 == 0 && filled > 0) { - filled-- - Expect(filledKV[filled].Len()).ShouldNot(BeZero()) - iters[i] = NewArrayIterator(filledKV[filled]) - } else { - empty-- - iters[i] = NewEmptyIterator(nil) - } - } - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewMergedIterator(iters, comparer.DefaultComparer, true), - } - testutil.DoIteratorTesting(&t) - done <- true - }, 1.5) - } - } - - Describe("with three, all filled iterators", Test(3, 0)) - Describe("with one filled, one empty iterators", Test(1, 1)) - Describe("with one filled, two empty iterators", Test(1, 2)) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go deleted file mode 100644 index 0ab2bffcc5e..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0 -// License, authors and contributors informations can be found at bellow URLs respectively: -// https://code.google.com/p/leveldb-go/source/browse/LICENSE -// https://code.google.com/p/leveldb-go/source/browse/AUTHORS -// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS - -// Package journal reads and writes sequences of journals. Each journal is a stream -// of bytes that completes before the next journal starts. -// -// When reading, call Next to obtain an io.Reader for the next journal. Next will -// return io.EOF when there are no more journals. It is valid to call Next -// without reading the current journal to exhaustion. -// -// When writing, call Next to obtain an io.Writer for the next journal. Calling -// Next finishes the current journal. Call Close to finish the final journal. -// -// Optionally, call Flush to finish the current journal and flush the underlying -// writer without starting a new journal. To start a new journal after flushing, -// call Next. -// -// Neither Readers or Writers are safe to use concurrently. -// -// Example code: -// func read(r io.Reader) ([]string, error) { -// var ss []string -// journals := journal.NewReader(r, nil, true, true) -// for { -// j, err := journals.Next() -// if err == io.EOF { -// break -// } -// if err != nil { -// return nil, err -// } -// s, err := ioutil.ReadAll(j) -// if err != nil { -// return nil, err -// } -// ss = append(ss, string(s)) -// } -// return ss, nil -// } -// -// func write(w io.Writer, ss []string) error { -// journals := journal.NewWriter(w) -// for _, s := range ss { -// j, err := journals.Next() -// if err != nil { -// return err -// } -// if _, err := j.Write([]byte(s)), err != nil { -// return err -// } -// } -// return journals.Close() -// } -// -// The wire format is that the stream is divided into 32KiB blocks, and each -// block contains a number of tightly packed chunks. Chunks cannot cross block -// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a -// block must be zero. -// -// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4 -// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type) -// followed by a payload. The checksum is over the chunk type and the payload. -// -// There are four chunk types: whether the chunk is the full journal, or the -// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal -// has one first chunk, zero or more middle chunks, and one last chunk. -// -// The wire format allows for limited recovery in the face of data corruption: -// on a format error (such as a checksum mismatch), the reader moves to the -// next block and looks for the next full or first chunk. -package journal - -import ( - "encoding/binary" - "fmt" - "io" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -// These constants are part of the wire format and should not be changed. -const ( - fullChunkType = 1 - firstChunkType = 2 - middleChunkType = 3 - lastChunkType = 4 -) - -const ( - blockSize = 32 * 1024 - headerSize = 7 -) - -type flusher interface { - Flush() error -} - -// ErrCorrupted is the error type that generated by corrupted block or chunk. -type ErrCorrupted struct { - Size int - Reason string -} - -func (e *ErrCorrupted) Error() string { - return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size) -} - -// Dropper is the interface that wrap simple Drop method. The Drop -// method will be called when the journal reader dropping a block or chunk. -type Dropper interface { - Drop(err error) -} - -// Reader reads journals from an underlying io.Reader. -type Reader struct { - // r is the underlying reader. - r io.Reader - // the dropper. - dropper Dropper - // strict flag. - strict bool - // checksum flag. - checksum bool - // seq is the sequence number of the current journal. - seq int - // buf[i:j] is the unread portion of the current chunk's payload. - // The low bound, i, excludes the chunk header. - i, j int - // n is the number of bytes of buf that are valid. Once reading has started, - // only the final block can have n < blockSize. - n int - // last is whether the current chunk is the last chunk of the journal. - last bool - // err is any accumulated error. - err error - // buf is the buffer. - buf [blockSize]byte -} - -// NewReader returns a new reader. The dropper may be nil, and if -// strict is true then corrupted or invalid chunk will halt the journal -// reader entirely. -func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader { - return &Reader{ - r: r, - dropper: dropper, - strict: strict, - checksum: checksum, - last: true, - } -} - -var errSkip = errors.New("leveldb/journal: skipped") - -func (r *Reader) corrupt(n int, reason string, skip bool) error { - if r.dropper != nil { - r.dropper.Drop(&ErrCorrupted{n, reason}) - } - if r.strict && !skip { - r.err = errors.NewErrCorrupted(nil, &ErrCorrupted{n, reason}) - return r.err - } - return errSkip -} - -// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the -// next block into the buffer if necessary. -func (r *Reader) nextChunk(first bool) error { - for { - if r.j+headerSize <= r.n { - checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4]) - length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6]) - chunkType := r.buf[r.j+6] - - if checksum == 0 && length == 0 && chunkType == 0 { - // Drop entire block. - m := r.n - r.j - r.i = r.n - r.j = r.n - return r.corrupt(m, "zero header", false) - } else { - m := r.n - r.j - r.i = r.j + headerSize - r.j = r.j + headerSize + int(length) - if r.j > r.n { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(m, "chunk length overflows block", false) - } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(m, "checksum mismatch", false) - } - } - if first && chunkType != fullChunkType && chunkType != firstChunkType { - m := r.j - r.i - r.i = r.j - // Report the error, but skip it. - return r.corrupt(m+headerSize, "orphan chunk", true) - } - r.last = chunkType == fullChunkType || chunkType == lastChunkType - return nil - } - - // The last block. - if r.n < blockSize && r.n > 0 { - if !first { - return r.corrupt(0, "missing chunk part", false) - } - r.err = io.EOF - return r.err - } - - // Read block. - n, err := io.ReadFull(r.r, r.buf[:]) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return err - } - if n == 0 { - if !first { - return r.corrupt(0, "missing chunk part", false) - } - r.err = io.EOF - return r.err - } - r.i, r.j, r.n = 0, 0, n - } -} - -// Next returns a reader for the next journal. It returns io.EOF if there are no -// more journals. The reader returned becomes stale after the next Next call, -// and should no longer be used. If strict is false, the reader will returns -// io.ErrUnexpectedEOF error when found corrupted journal. -func (r *Reader) Next() (io.Reader, error) { - r.seq++ - if r.err != nil { - return nil, r.err - } - r.i = r.j - for { - if err := r.nextChunk(true); err == nil { - break - } else if err != errSkip { - return nil, err - } - } - return &singleReader{r, r.seq, nil}, nil -} - -// Reset resets the journal reader, allows reuse of the journal reader. Reset returns -// last accumulated error. -func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error { - r.seq++ - err := r.err - r.r = reader - r.dropper = dropper - r.strict = strict - r.checksum = checksum - r.i = 0 - r.j = 0 - r.n = 0 - r.last = true - r.err = nil - return err -} - -type singleReader struct { - r *Reader - seq int - err error -} - -func (x *singleReader) Read(p []byte) (int, error) { - r := x.r - if r.seq != x.seq { - return 0, errors.New("leveldb/journal: stale reader") - } - if x.err != nil { - return 0, x.err - } - if r.err != nil { - return 0, r.err - } - for r.i == r.j { - if r.last { - return 0, io.EOF - } - x.err = r.nextChunk(false) - if x.err != nil { - if x.err == errSkip { - x.err = io.ErrUnexpectedEOF - } - return 0, x.err - } - } - n := copy(p, r.buf[r.i:r.j]) - r.i += n - return n, nil -} - -func (x *singleReader) ReadByte() (byte, error) { - r := x.r - if r.seq != x.seq { - return 0, errors.New("leveldb/journal: stale reader") - } - if x.err != nil { - return 0, x.err - } - if r.err != nil { - return 0, r.err - } - for r.i == r.j { - if r.last { - return 0, io.EOF - } - x.err = r.nextChunk(false) - if x.err != nil { - if x.err == errSkip { - x.err = io.ErrUnexpectedEOF - } - return 0, x.err - } - } - c := r.buf[r.i] - r.i++ - return c, nil -} - -// Writer writes journals to an underlying io.Writer. -type Writer struct { - // w is the underlying writer. - w io.Writer - // seq is the sequence number of the current journal. - seq int - // f is w as a flusher. - f flusher - // buf[i:j] is the bytes that will become the current chunk. - // The low bound, i, includes the chunk header. - i, j int - // buf[:written] has already been written to w. - // written is zero unless Flush has been called. - written int - // first is whether the current chunk is the first chunk of the journal. - first bool - // pending is whether a chunk is buffered but not yet written. - pending bool - // err is any accumulated error. - err error - // buf is the buffer. - buf [blockSize]byte -} - -// NewWriter returns a new Writer. -func NewWriter(w io.Writer) *Writer { - f, _ := w.(flusher) - return &Writer{ - w: w, - f: f, - } -} - -// fillHeader fills in the header for the pending chunk. -func (w *Writer) fillHeader(last bool) { - if w.i+headerSize > w.j || w.j > blockSize { - panic("leveldb/journal: bad writer state") - } - if last { - if w.first { - w.buf[w.i+6] = fullChunkType - } else { - w.buf[w.i+6] = lastChunkType - } - } else { - if w.first { - w.buf[w.i+6] = firstChunkType - } else { - w.buf[w.i+6] = middleChunkType - } - } - binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value()) - binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize)) -} - -// writeBlock writes the buffered block to the underlying writer, and reserves -// space for the next chunk's header. -func (w *Writer) writeBlock() { - _, w.err = w.w.Write(w.buf[w.written:]) - w.i = 0 - w.j = headerSize - w.written = 0 -} - -// writePending finishes the current journal and writes the buffer to the -// underlying writer. -func (w *Writer) writePending() { - if w.err != nil { - return - } - if w.pending { - w.fillHeader(true) - w.pending = false - } - _, w.err = w.w.Write(w.buf[w.written:w.j]) - w.written = w.j -} - -// Close finishes the current journal and closes the writer. -func (w *Writer) Close() error { - w.seq++ - w.writePending() - if w.err != nil { - return w.err - } - w.err = errors.New("leveldb/journal: closed Writer") - return nil -} - -// Flush finishes the current journal, writes to the underlying writer, and -// flushes it if that writer implements interface{ Flush() error }. -func (w *Writer) Flush() error { - w.seq++ - w.writePending() - if w.err != nil { - return w.err - } - if w.f != nil { - w.err = w.f.Flush() - return w.err - } - return nil -} - -// Reset resets the journal writer, allows reuse of the journal writer. Reset -// will also closes the journal writer if not already. -func (w *Writer) Reset(writer io.Writer) (err error) { - w.seq++ - if w.err == nil { - w.writePending() - err = w.err - } - w.w = writer - w.f, _ = writer.(flusher) - w.i = 0 - w.j = 0 - w.written = 0 - w.first = false - w.pending = false - w.err = nil - return -} - -// Next returns a writer for the next journal. The writer returned becomes stale -// after the next Close, Flush or Next call, and should no longer be used. -func (w *Writer) Next() (io.Writer, error) { - w.seq++ - if w.err != nil { - return nil, w.err - } - if w.pending { - w.fillHeader(true) - } - w.i = w.j - w.j = w.j + headerSize - // Check if there is room in the block for the header. - if w.j > blockSize { - // Fill in the rest of the block with zeroes. - for k := w.i; k < blockSize; k++ { - w.buf[k] = 0 - } - w.writeBlock() - if w.err != nil { - return nil, w.err - } - } - w.first = true - w.pending = true - return singleWriter{w, w.seq}, nil -} - -type singleWriter struct { - w *Writer - seq int -} - -func (x singleWriter) Write(p []byte) (int, error) { - w := x.w - if w.seq != x.seq { - return 0, errors.New("leveldb/journal: stale writer") - } - if w.err != nil { - return 0, w.err - } - n0 := len(p) - for len(p) > 0 { - // Write a block, if it is full. - if w.j == blockSize { - w.fillHeader(false) - w.writeBlock() - if w.err != nil { - return 0, w.err - } - w.first = false - } - // Copy bytes into the buffer. - n := copy(w.buf[w.j:], p) - w.j += n - p = p[n:] - } - return n0, nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go deleted file mode 100644 index 0fcf22599f2..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go +++ /dev/null @@ -1,818 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record_test.go?r=df1fa28f7f3be6c3935548169002309c12967135 -// License, authors and contributors informations can be found at bellow URLs respectively: -// https://code.google.com/p/leveldb-go/source/browse/LICENSE -// https://code.google.com/p/leveldb-go/source/browse/AUTHORS -// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS - -package journal - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "math/rand" - "strings" - "testing" -) - -type dropper struct { - t *testing.T -} - -func (d dropper) Drop(err error) { - d.t.Log(err) -} - -func short(s string) string { - if len(s) < 64 { - return s - } - return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:]) -} - -// big returns a string of length n, composed of repetitions of partial. -func big(partial string, n int) string { - return strings.Repeat(partial, n/len(partial)+1)[:n] -} - -func TestEmpty(t *testing.T) { - buf := new(bytes.Buffer) - r := NewReader(buf, dropper{t}, true, true) - if _, err := r.Next(); err != io.EOF { - t.Fatalf("got %v, want %v", err, io.EOF) - } -} - -func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) { - buf := new(bytes.Buffer) - - reset() - w := NewWriter(buf) - for { - s, ok := gen() - if !ok { - break - } - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write([]byte(s)); err != nil { - t.Fatal(err) - } - } - if err := w.Close(); err != nil { - t.Fatal(err) - } - - reset() - r := NewReader(buf, dropper{t}, true, true) - for { - s, ok := gen() - if !ok { - break - } - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - x, err := ioutil.ReadAll(rr) - if err != nil { - t.Fatal(err) - } - if string(x) != s { - t.Fatalf("got %q, want %q", short(string(x)), short(s)) - } - } - if _, err := r.Next(); err != io.EOF { - t.Fatalf("got %v, want %v", err, io.EOF) - } -} - -func testLiterals(t *testing.T, s []string) { - var i int - reset := func() { - i = 0 - } - gen := func() (string, bool) { - if i == len(s) { - return "", false - } - i++ - return s[i-1], true - } - testGenerator(t, reset, gen) -} - -func TestMany(t *testing.T) { - const n = 1e5 - var i int - reset := func() { - i = 0 - } - gen := func() (string, bool) { - if i == n { - return "", false - } - i++ - return fmt.Sprintf("%d.", i-1), true - } - testGenerator(t, reset, gen) -} - -func TestRandom(t *testing.T) { - const n = 1e2 - var ( - i int - r *rand.Rand - ) - reset := func() { - i, r = 0, rand.New(rand.NewSource(0)) - } - gen := func() (string, bool) { - if i == n { - return "", false - } - i++ - return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true - } - testGenerator(t, reset, gen) -} - -func TestBasic(t *testing.T) { - testLiterals(t, []string{ - strings.Repeat("a", 1000), - strings.Repeat("b", 97270), - strings.Repeat("c", 8000), - }) -} - -func TestBoundary(t *testing.T) { - for i := blockSize - 16; i < blockSize+16; i++ { - s0 := big("abcd", i) - for j := blockSize - 16; j < blockSize+16; j++ { - s1 := big("ABCDE", j) - testLiterals(t, []string{s0, s1}) - testLiterals(t, []string{s0, "", s1}) - testLiterals(t, []string{s0, "x", s1}) - } - } -} - -func TestFlush(t *testing.T) { - buf := new(bytes.Buffer) - w := NewWriter(buf) - // Write a couple of records. Everything should still be held - // in the record.Writer buffer, so that buf.Len should be 0. - w0, _ := w.Next() - w0.Write([]byte("0")) - w1, _ := w.Next() - w1.Write([]byte("11")) - if got, want := buf.Len(), 0; got != want { - t.Fatalf("buffer length #0: got %d want %d", got, want) - } - // Flush the record.Writer buffer, which should yield 17 bytes. - // 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 17; got != want { - t.Fatalf("buffer length #1: got %d want %d", got, want) - } - // Do another write, one that isn't large enough to complete the block. - // The write should not have flowed through to buf. - w2, _ := w.Next() - w2.Write(bytes.Repeat([]byte("2"), 10000)) - if got, want := buf.Len(), 17; got != want { - t.Fatalf("buffer length #2: got %d want %d", got, want) - } - // Flushing should get us up to 10024 bytes written. - // 10024 = 17 + 7 + 10000. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 10024; got != want { - t.Fatalf("buffer length #3: got %d want %d", got, want) - } - // Do a bigger write, one that completes the current block. - // We should now have 32768 bytes (a complete block), without - // an explicit flush. - w3, _ := w.Next() - w3.Write(bytes.Repeat([]byte("3"), 40000)) - if got, want := buf.Len(), 32768; got != want { - t.Fatalf("buffer length #4: got %d want %d", got, want) - } - // Flushing should get us up to 50038 bytes written. - // 50038 = 10024 + 2*7 + 40000. There are two headers because - // the one record was split into two chunks. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 50038; got != want { - t.Fatalf("buffer length #5: got %d want %d", got, want) - } - // Check that reading those records give the right lengths. - r := NewReader(buf, dropper{t}, true, true) - wants := []int64{1, 2, 10000, 40000} - for i, want := range wants { - rr, _ := r.Next() - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #%d: %v", i, err) - } - if n != want { - t.Fatalf("read #%d: got %d bytes want %d", i, n, want) - } - } -} - -func TestNonExhaustiveRead(t *testing.T) { - const n = 100 - buf := new(bytes.Buffer) - p := make([]byte, 10) - rnd := rand.New(rand.NewSource(1)) - - w := NewWriter(buf) - for i := 0; i < n; i++ { - length := len(p) + rnd.Intn(3*blockSize) - s := string(uint8(i)) + "123456789abcdefgh" - ww, _ := w.Next() - ww.Write([]byte(big(s, length))) - } - if err := w.Close(); err != nil { - t.Fatal(err) - } - - r := NewReader(buf, dropper{t}, true, true) - for i := 0; i < n; i++ { - rr, _ := r.Next() - _, err := io.ReadFull(rr, p) - if err != nil { - t.Fatal(err) - } - want := string(uint8(i)) + "123456789" - if got := string(p); got != want { - t.Fatalf("read #%d: got %q want %q", i, got, want) - } - } -} - -func TestStaleReader(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - w0, err := w.Next() - if err != nil { - t.Fatal(err) - } - w0.Write([]byte("0")) - w1, err := w.Next() - if err != nil { - t.Fatal(err) - } - w1.Write([]byte("11")) - if err := w.Close(); err != nil { - t.Fatal(err) - } - - r := NewReader(buf, dropper{t}, true, true) - r0, err := r.Next() - if err != nil { - t.Fatal(err) - } - r1, err := r.Next() - if err != nil { - t.Fatal(err) - } - p := make([]byte, 1) - if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale read #0: unexpected error: %v", err) - } - if _, err := r1.Read(p); err != nil { - t.Fatalf("fresh read #1: got %v want nil error", err) - } - if p[0] != '1' { - t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0]) - } -} - -func TestStaleWriter(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - w0, err := w.Next() - if err != nil { - t.Fatal(err) - } - w1, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := w0.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale write #0: unexpected error: %v", err) - } - if _, err := w1.Write([]byte("11")); err != nil { - t.Fatalf("fresh write #1: got %v want nil error", err) - } - if err := w.Flush(); err != nil { - t.Fatalf("flush: %v", err) - } - if _, err := w1.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale write #1: unexpected error: %v", err) - } -} - -func TestCorrupt_MissingLastBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - // Cut the last block. - b := buf.Bytes()[:blockSize] - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read. - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if n != blockSize-1024 { - t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024) - } - - // Second read. - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedFirstBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #0. - for i := 0; i < 1024; i++ { - b[i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (third record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize-headerSize) + 2; n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedMiddleBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #1. - for i := 0; i < 1024; i++ { - b[blockSize+i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - // Third read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #2: %v", err) - } - if want := int64(blockSize-headerSize) + 2; n != want { - t.Fatalf("read #2: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedLastBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #3. - for i := len(b) - 1; i > len(b)-1024; i-- { - b[i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize - headerSize); n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - // Third read (third record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #2: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #2: got %d bytes want %d", n, want) - } - - // Fourth read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #3: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting record #1. - x := blockSize - binary.LittleEndian.PutUint16(b[x+4:], 0xffff) - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting record #1. - x := blockSize/2 + headerSize - binary.LittleEndian.PutUint16(b[x+4:], 0xffff) - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (third record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go deleted file mode 100644 index 769c83be0ff..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "encoding/binary" - "fmt" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" -) - -type ErrIkeyCorrupted struct { - Ikey []byte - Reason string -} - -func (e *ErrIkeyCorrupted) Error() string { - return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason) -} - -func newErrIkeyCorrupted(ikey []byte, reason string) error { - return errors.NewErrCorrupted(nil, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason}) -} - -type kType int - -func (kt kType) String() string { - switch kt { - case ktDel: - return "d" - case ktVal: - return "v" - } - return "x" -} - -// Value types encoded as the last component of internal keys. -// Don't modify; this value are saved to disk. -const ( - ktDel kType = iota - ktVal -) - -// ktSeek defines the kType that should be passed when constructing an -// internal key for seeking to a particular sequence number (since we -// sort sequence numbers in decreasing order and the value type is -// embedded as the low 8 bits in the sequence number in internal keys, -// we need to use the highest-numbered ValueType, not the lowest). -const ktSeek = ktVal - -const ( - // Maximum value possible for sequence number; the 8-bits are - // used by value type, so its can packed together in single - // 64-bit integer. - kMaxSeq uint64 = (uint64(1) << 56) - 1 - // Maximum value possible for packed sequence number and type. - kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek) -) - -// Maximum number encoded in bytes. -var kMaxNumBytes = make([]byte, 8) - -func init() { - binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum) -} - -type iKey []byte - -func newIkey(ukey []byte, seq uint64, kt kType) iKey { - if seq > kMaxSeq { - panic("leveldb: invalid sequence number") - } else if kt > ktVal { - panic("leveldb: invalid type") - } - - ik := make(iKey, len(ukey)+8) - copy(ik, ukey) - binary.LittleEndian.PutUint64(ik[len(ukey):], (seq<<8)|uint64(kt)) - return ik -} - -func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) { - if len(ik) < 8 { - return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length") - } - num := binary.LittleEndian.Uint64(ik[len(ik)-8:]) - seq, kt = uint64(num>>8), kType(num&0xff) - if kt > ktVal { - return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type") - } - ukey = ik[:len(ik)-8] - return -} - -func validIkey(ik []byte) bool { - _, _, _, err := parseIkey(ik) - return err == nil -} - -func (ik iKey) assert() { - if ik == nil { - panic("leveldb: nil iKey") - } - if len(ik) < 8 { - panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik))) - } -} - -func (ik iKey) ukey() []byte { - ik.assert() - return ik[:len(ik)-8] -} - -func (ik iKey) num() uint64 { - ik.assert() - return binary.LittleEndian.Uint64(ik[len(ik)-8:]) -} - -func (ik iKey) parseNum() (seq uint64, kt kType) { - num := ik.num() - seq, kt = uint64(num>>8), kType(num&0xff) - if kt > ktVal { - panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt)) - } - return -} - -func (ik iKey) String() string { - if ik == nil { - return "" - } - - if ukey, seq, kt, err := parseIkey(ik); err == nil { - return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq) - } else { - return "" - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go deleted file mode 100644 index 5c368cd678b..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" -) - -var defaultIComparer = &iComparer{comparer.DefaultComparer} - -func ikey(key string, seq uint64, kt kType) iKey { - return newIkey([]byte(key), uint64(seq), kt) -} - -func shortSep(a, b []byte) []byte { - dst := make([]byte, len(a)) - dst = defaultIComparer.Separator(dst[:0], a, b) - if dst == nil { - return a - } - return dst -} - -func shortSuccessor(b []byte) []byte { - dst := make([]byte, len(b)) - dst = defaultIComparer.Successor(dst[:0], b) - if dst == nil { - return b - } - return dst -} - -func testSingleKey(t *testing.T, key string, seq uint64, kt kType) { - ik := ikey(key, seq, kt) - - if !bytes.Equal(ik.ukey(), []byte(key)) { - t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) - } - - rseq, rt := ik.parseNum() - if rseq != seq { - t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) - } - if rt != kt { - t.Errorf("type does not equal, got %v, want %v", rt, kt) - } - - if rukey, rseq, rt, kerr := parseIkey(ik); kerr == nil { - if !bytes.Equal(rukey, []byte(key)) { - t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) - } - if rseq != seq { - t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) - } - if rt != kt { - t.Errorf("type does not equal, got %v, want %v", rt, kt) - } - } else { - t.Errorf("key error: %v", kerr) - } -} - -func TestIkey_EncodeDecode(t *testing.T) { - keys := []string{"", "k", "hello", "longggggggggggggggggggggg"} - seqs := []uint64{ - 1, 2, 3, - (1 << 8) - 1, 1 << 8, (1 << 8) + 1, - (1 << 16) - 1, 1 << 16, (1 << 16) + 1, - (1 << 32) - 1, 1 << 32, (1 << 32) + 1, - } - for _, key := range keys { - for _, seq := range seqs { - testSingleKey(t, key, seq, ktVal) - testSingleKey(t, "hello", 1, ktDel) - } - } -} - -func assertBytes(t *testing.T, want, got []byte) { - if !bytes.Equal(got, want) { - t.Errorf("assert failed, got %v, want %v", got, want) - } -} - -func TestIkeyShortSeparator(t *testing.T) { - // When user keys are same - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 99, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 101, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 100, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 100, ktDel))) - - // When user keys are misordered - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("bar", 99, ktVal))) - - // When user keys are different, but correctly ordered - assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek), - shortSep(ikey("foo", 100, ktVal), - ikey("hello", 200, ktVal))) - - // When start user key is prefix of limit user key - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foobar", 200, ktVal))) - - // When limit user key is prefix of start user key - assertBytes(t, ikey("foobar", 100, ktVal), - shortSep(ikey("foobar", 100, ktVal), - ikey("foo", 200, ktVal))) -} - -func TestIkeyShortestSuccessor(t *testing.T) { - assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek), - shortSuccessor(ikey("foo", 100, ktVal))) - assertBytes(t, ikey("\xff\xff", 100, ktVal), - shortSuccessor(ikey("\xff\xff", 100, ktVal))) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go deleted file mode 100644 index df1164374e8..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package leveldb - -import ( - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestLevelDB(t *testing.T) { - testutil.RunSuite(t, "LevelDB Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go deleted file mode 100644 index 6656ce9a943..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package memdb - -import ( - "encoding/binary" - "math/rand" - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" -) - -func BenchmarkPut(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - b.ResetTimer() - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } -} - -func BenchmarkPutRandom(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(rand.Int())) - } - - b.ResetTimer() - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } -} - -func BenchmarkGet(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } - - b.ResetTimer() - for i := range buf { - p.Get(buf[i][:]) - } -} - -func BenchmarkGetRandom(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - p.Get(buf[rand.Int()%b.N][:]) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go deleted file mode 100644 index 4e03a7cd542..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go +++ /dev/null @@ -1,468 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package memdb provides in-memory key/value database implementation. -package memdb - -import ( - "math/rand" - "sync" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrNotFound = errors.ErrNotFound - ErrIterReleased = errors.New("leveldb/memdb: iterator released") -) - -const tMaxHeight = 12 - -type dbIter struct { - util.BasicReleaser - p *DB - slice *util.Range - node int - forward bool - key, value []byte - err error -} - -func (i *dbIter) fill(checkStart, checkLimit bool) bool { - if i.node != 0 { - n := i.p.nodeData[i.node] - m := n + i.p.nodeData[i.node+nKey] - i.key = i.p.kvData[n:m] - if i.slice != nil { - switch { - case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0: - fallthrough - case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0: - i.node = 0 - goto bail - } - } - i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]] - return true - } -bail: - i.key = nil - i.value = nil - return false -} - -func (i *dbIter) Valid() bool { - return i.node != 0 -} - -func (i *dbIter) First() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Start != nil { - i.node, _ = i.p.findGE(i.slice.Start, false) - } else { - i.node = i.p.nodeData[nNext] - } - return i.fill(false, true) -} - -func (i *dbIter) Last() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = false - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Limit != nil { - i.node = i.p.findLT(i.slice.Limit) - } else { - i.node = i.p.findLast() - } - return i.fill(true, false) -} - -func (i *dbIter) Seek(key []byte) bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 { - key = i.slice.Start - } - i.node, _ = i.p.findGE(key, false) - return i.fill(false, true) -} - -func (i *dbIter) Next() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.node == 0 { - if !i.forward { - return i.First() - } - return false - } - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - i.node = i.p.nodeData[i.node+nNext] - return i.fill(false, true) -} - -func (i *dbIter) Prev() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.node == 0 { - if i.forward { - return i.Last() - } - return false - } - i.forward = false - i.p.mu.RLock() - defer i.p.mu.RUnlock() - i.node = i.p.findLT(i.key) - return i.fill(true, false) -} - -func (i *dbIter) Key() []byte { - return i.key -} - -func (i *dbIter) Value() []byte { - return i.value -} - -func (i *dbIter) Error() error { return i.err } - -func (i *dbIter) Release() { - if !i.Released() { - i.p = nil - i.node = 0 - i.key = nil - i.value = nil - i.BasicReleaser.Release() - } -} - -const ( - nKV = iota - nKey - nVal - nHeight - nNext -) - -// DB is an in-memory key/value database. -type DB struct { - cmp comparer.BasicComparer - rnd *rand.Rand - - mu sync.RWMutex - kvData []byte - // Node data: - // [0] : KV offset - // [1] : Key length - // [2] : Value length - // [3] : Height - // [3..height] : Next nodes - nodeData []int - prevNode [tMaxHeight]int - maxHeight int - n int - kvSize int -} - -func (p *DB) randHeight() (h int) { - const branching = 4 - h = 1 - for h < tMaxHeight && p.rnd.Int()%branching == 0 { - h++ - } - return -} - -func (p *DB) findGE(key []byte, prev bool) (int, bool) { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - cmp := 1 - if next != 0 { - o := p.nodeData[next] - cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) - } - if cmp < 0 { - // Keep searching in this list - node = next - } else { - if prev { - p.prevNode[h] = node - } else if cmp == 0 { - return next, true - } - if h == 0 { - return next, cmp == 0 - } - h-- - } - } -} - -func (p *DB) findLT(key []byte) int { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - o := p.nodeData[next] - if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 { - if h == 0 { - break - } - h-- - } else { - node = next - } - } - return node -} - -func (p *DB) findLast() int { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - if next == 0 { - if h == 0 { - break - } - h-- - } else { - node = next - } - } - return node -} - -// Put sets the value for the given key. It overwrites any previous value -// for that key; a DB is not a multi-map. -// -// It is safe to modify the contents of the arguments after Put returns. -func (p *DB) Put(key []byte, value []byte) error { - p.mu.Lock() - defer p.mu.Unlock() - - if node, exact := p.findGE(key, true); exact { - kvOffset := len(p.kvData) - p.kvData = append(p.kvData, key...) - p.kvData = append(p.kvData, value...) - p.nodeData[node] = kvOffset - m := p.nodeData[node+nVal] - p.nodeData[node+nVal] = len(value) - p.kvSize += len(value) - m - return nil - } - - h := p.randHeight() - if h > p.maxHeight { - for i := p.maxHeight; i < h; i++ { - p.prevNode[i] = 0 - } - p.maxHeight = h - } - - kvOffset := len(p.kvData) - p.kvData = append(p.kvData, key...) - p.kvData = append(p.kvData, value...) - // Node - node := len(p.nodeData) - p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h) - for i, n := range p.prevNode[:h] { - m := n + 4 + i - p.nodeData = append(p.nodeData, p.nodeData[m]) - p.nodeData[m] = node - } - - p.kvSize += len(key) + len(value) - p.n++ - return nil -} - -// Delete deletes the value for the given key. It returns ErrNotFound if -// the DB does not contain the key. -// -// It is safe to modify the contents of the arguments after Delete returns. -func (p *DB) Delete(key []byte) error { - p.mu.Lock() - defer p.mu.Unlock() - - node, exact := p.findGE(key, true) - if !exact { - return ErrNotFound - } - - h := p.nodeData[node+nHeight] - for i, n := range p.prevNode[:h] { - m := n + 4 + i - p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i] - } - - p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal] - p.n-- - return nil -} - -// Contains returns true if the given key are in the DB. -// -// It is safe to modify the contents of the arguments after Contains returns. -func (p *DB) Contains(key []byte) bool { - p.mu.RLock() - _, exact := p.findGE(key, false) - p.mu.RUnlock() - return exact -} - -// Get gets the value for the given key. It returns error.ErrNotFound if the -// DB does not contain the key. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Get returns. -func (p *DB) Get(key []byte) (value []byte, err error) { - p.mu.RLock() - if node, exact := p.findGE(key, false); exact { - o := p.nodeData[node] + p.nodeData[node+nKey] - value = p.kvData[o : o+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -// Find finds key/value pair whose key is greater than or equal to the -// given key. It returns ErrNotFound if the table doesn't contain -// such pair. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Find returns. -func (p *DB) Find(key []byte) (rkey, value []byte, err error) { - p.mu.RLock() - if node, _ := p.findGE(key, false); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -// NewIterator returns an iterator of the DB. -// The returned iterator is not goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. However, the resultant key/value pairs are not guaranteed -// to be a consistent snapshot of the DB at a particular point in time. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (p *DB) NewIterator(slice *util.Range) iterator.Iterator { - return &dbIter{p: p, slice: slice} -} - -// Capacity returns keys/values buffer capacity. -func (p *DB) Capacity() int { - p.mu.RLock() - defer p.mu.RUnlock() - return cap(p.kvData) -} - -// Size returns sum of keys and values length. Note that deleted -// key/value will not be accouted for, but it will still consume -// the buffer, since the buffer is append only. -func (p *DB) Size() int { - p.mu.RLock() - defer p.mu.RUnlock() - return p.kvSize -} - -// Free returns keys/values free buffer before need to grow. -func (p *DB) Free() int { - p.mu.RLock() - defer p.mu.RUnlock() - return cap(p.kvData) - len(p.kvData) -} - -// Len returns the number of entries in the DB. -func (p *DB) Len() int { - p.mu.RLock() - defer p.mu.RUnlock() - return p.n -} - -// Reset resets the DB to initial empty state. Allows reuse the buffer. -func (p *DB) Reset() { - p.rnd = rand.New(rand.NewSource(0xdeadbeef)) - p.maxHeight = 1 - p.n = 0 - p.kvSize = 0 - p.kvData = p.kvData[:0] - p.nodeData = p.nodeData[:4+tMaxHeight] - p.nodeData[nKV] = 0 - p.nodeData[nKey] = 0 - p.nodeData[nVal] = 0 - p.nodeData[nHeight] = tMaxHeight - for n := 0; n < tMaxHeight; n++ { - p.nodeData[4+n] = 0 - p.prevNode[n] = 0 - } -} - -// New creates a new initalized in-memory key/value DB. The capacity -// is the initial key/value buffer capacity. The capacity is advisory, -// not enforced. -// -// The returned DB instance is goroutine-safe. -func New(cmp comparer.BasicComparer, capacity int) *DB { - p := &DB{ - cmp: cmp, - rnd: rand.New(rand.NewSource(0xdeadbeef)), - maxHeight: 1, - kvData: make([]byte, 0, capacity), - nodeData: make([]int, 4+tMaxHeight), - } - p.nodeData[nHeight] = tMaxHeight - return p -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go deleted file mode 100644 index 7116b8f28a5..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package memdb - -import ( - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestMemDB(t *testing.T) { - testutil.RunSuite(t, "MemDB Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go deleted file mode 100644 index ef6a7b21f43..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package memdb - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -func (p *DB) TestFindLT(key []byte) (rkey, value []byte, err error) { - p.mu.RLock() - if node := p.findLT(key); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -func (p *DB) TestFindLast() (rkey, value []byte, err error) { - p.mu.RLock() - if node := p.findLast(); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -func (p *DB) TestPut(key []byte, value []byte) error { - p.Put(key, value) - return nil -} - -func (p *DB) TestDelete(key []byte) error { - p.Delete(key) - return nil -} - -func (p *DB) TestFind(key []byte) (rkey, rvalue []byte, err error) { - return p.Find(key) -} - -func (p *DB) TestGet(key []byte) (value []byte, err error) { - return p.Get(key) -} - -func (p *DB) TestNewIterator(slice *util.Range) iterator.Iterator { - return p.NewIterator(slice) -} - -var _ = testutil.Defer(func() { - Describe("Memdb", func() { - Describe("write test", func() { - It("should do write correctly", func() { - db := New(comparer.DefaultComparer, 0) - t := testutil.DBTesting{ - DB: db, - Deleted: testutil.KeyValue_Generate(nil, 1000, 1, 30, 5, 5).Clone(), - PostFn: func(t *testutil.DBTesting) { - Expect(db.Len()).Should(Equal(t.Present.Len())) - Expect(db.Size()).Should(Equal(t.Present.Size())) - switch t.Act { - case testutil.DBPut, testutil.DBOverwrite: - Expect(db.Contains(t.ActKey)).Should(BeTrue()) - default: - Expect(db.Contains(t.ActKey)).Should(BeFalse()) - } - }, - } - testutil.DoDBTesting(&t) - }) - }) - - Describe("read test", func() { - testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB { - // Building the DB. - db := New(comparer.DefaultComparer, 0) - kv.IterateShuffled(nil, func(i int, key, value []byte) { - db.Put(key, value) - }) - - if kv.Len() > 1 { - It("Should find correct keys with findLT", func() { - testutil.ShuffledIndex(nil, kv.Len()-1, 1, func(i int) { - key_, key, _ := kv.IndexInexact(i + 1) - expectedKey, expectedValue := kv.Index(i) - - // Using key that exist. - rkey, rvalue, err := db.TestFindLT(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q -> %q", key, expectedKey) - Expect(rkey).Should(Equal(expectedKey), "Key") - Expect(rvalue).Should(Equal(expectedValue), "Value for key %q -> %q", key, expectedKey) - - // Using key that doesn't exist. - rkey, rvalue, err = db.TestFindLT(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q) -> %q", key_, key, expectedKey) - Expect(rkey).Should(Equal(expectedKey)) - Expect(rvalue).Should(Equal(expectedValue), "Value for key %q (%q) -> %q", key_, key, expectedKey) - }) - }) - } - - if kv.Len() > 0 { - It("Should find last key with findLast", func() { - key, value := kv.Index(kv.Len() - 1) - rkey, rvalue, err := db.TestFindLast() - Expect(err).ShouldNot(HaveOccurred()) - Expect(rkey).Should(Equal(key)) - Expect(rvalue).Should(Equal(value)) - }) - } - - return db - }, nil, nil) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go deleted file mode 100644 index 79ab9e748a7..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go +++ /dev/null @@ -1,639 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package opt provides sets of options used by LevelDB. -package opt - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" - "math" -) - -const ( - KiB = 1024 - MiB = KiB * 1024 - GiB = MiB * 1024 -) - -var ( - DefaultBlockCacher = LRUCacher - DefaultBlockCacheCapacity = 8 * MiB - DefaultBlockRestartInterval = 16 - DefaultBlockSize = 4 * KiB - DefaultCompactionExpandLimitFactor = 25 - DefaultCompactionGPOverlapsFactor = 10 - DefaultCompactionL0Trigger = 4 - DefaultCompactionSourceLimitFactor = 1 - DefaultCompactionTableSize = 2 * MiB - DefaultCompactionTableSizeMultiplier = 1.0 - DefaultCompactionTotalSize = 10 * MiB - DefaultCompactionTotalSizeMultiplier = 10.0 - DefaultCompressionType = SnappyCompression - DefaultIteratorSamplingRate = 1 * MiB - DefaultMaxMemCompationLevel = 2 - DefaultNumLevel = 7 - DefaultOpenFilesCacher = LRUCacher - DefaultOpenFilesCacheCapacity = 500 - DefaultWriteBuffer = 4 * MiB - DefaultWriteL0PauseTrigger = 12 - DefaultWriteL0SlowdownTrigger = 8 -) - -// Cacher is a caching algorithm. -type Cacher interface { - New(capacity int) cache.Cacher -} - -type CacherFunc struct { - NewFunc func(capacity int) cache.Cacher -} - -func (f *CacherFunc) New(capacity int) cache.Cacher { - if f.NewFunc != nil { - return f.NewFunc(capacity) - } - return nil -} - -func noCacher(int) cache.Cacher { return nil } - -var ( - // LRUCacher is the LRU-cache algorithm. - LRUCacher = &CacherFunc{cache.NewLRU} - - // NoCacher is the value to disable caching algorithm. - NoCacher = &CacherFunc{} -) - -// Compression is the 'sorted table' block compression algorithm to use. -type Compression uint - -func (c Compression) String() string { - switch c { - case DefaultCompression: - return "default" - case NoCompression: - return "none" - case SnappyCompression: - return "snappy" - } - return "invalid" -} - -const ( - DefaultCompression Compression = iota - NoCompression - SnappyCompression - nCompression -) - -// Strict is the DB 'strict level'. -type Strict uint - -const ( - // If present then a corrupted or invalid chunk or block in manifest - // journal will cause an error instead of being dropped. - // This will prevent database with corrupted manifest to be opened. - StrictManifest Strict = 1 << iota - - // If present then journal chunk checksum will be verified. - StrictJournalChecksum - - // If present then a corrupted or invalid chunk or block in journal - // will cause an error instead of being dropped. - // This will prevent database with corrupted journal to be opened. - StrictJournal - - // If present then 'sorted table' block checksum will be verified. - // This has effect on both 'read operation' and compaction. - StrictBlockChecksum - - // If present then a corrupted 'sorted table' will fails compaction. - // The database will enter read-only mode. - StrictCompaction - - // If present then a corrupted 'sorted table' will halts 'read operation'. - StrictReader - - // If present then leveldb.Recover will drop corrupted 'sorted table'. - StrictRecovery - - // This only applicable for ReadOptions, if present then this ReadOptions - // 'strict level' will override global ones. - StrictOverride - - // StrictAll enables all strict flags. - StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery - - // DefaultStrict is the default strict flags. Specify any strict flags - // will override default strict flags as whole (i.e. not OR'ed). - DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader - - // NoStrict disables all strict flags. Override default strict flags. - NoStrict = ^StrictAll -) - -// Options holds the optional parameters for the DB at large. -type Options struct { - // AltFilters defines one or more 'alternative filters'. - // 'alternative filters' will be used during reads if a filter block - // does not match with the 'effective filter'. - // - // The default value is nil - AltFilters []filter.Filter - - // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching. - // Specify NoCacher to disable caching algorithm. - // - // The default value is LRUCacher. - BlockCacher Cacher - - // BlockCacheCapacity defines the capacity of the 'sorted table' block caching. - // Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher. - // - // The default value is 8MiB. - BlockCacheCapacity int - - // BlockRestartInterval is the number of keys between restart points for - // delta encoding of keys. - // - // The default value is 16. - BlockRestartInterval int - - // BlockSize is the minimum uncompressed size in bytes of each 'sorted table' - // block. - // - // The default value is 4KiB. - BlockSize int - - // CompactionExpandLimitFactor limits compaction size after expanded. - // This will be multiplied by table size limit at compaction target level. - // - // The default value is 25. - CompactionExpandLimitFactor int - - // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a - // single 'sorted table' generates. - // This will be multiplied by table size limit at grandparent level. - // - // The default value is 10. - CompactionGPOverlapsFactor int - - // CompactionL0Trigger defines number of 'sorted table' at level-0 that will - // trigger compaction. - // - // The default value is 4. - CompactionL0Trigger int - - // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to - // level-0. - // This will be multiplied by table size limit at compaction target level. - // - // The default value is 1. - CompactionSourceLimitFactor int - - // CompactionTableSize limits size of 'sorted table' that compaction generates. - // The limits for each level will be calculated as: - // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level) - // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel. - // - // The default value is 2MiB. - CompactionTableSize int - - // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize. - // - // The default value is 1. - CompactionTableSizeMultiplier float64 - - // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for - // CompactionTableSize. - // Use zero to skip a level. - // - // The default value is nil. - CompactionTableSizeMultiplierPerLevel []float64 - - // CompactionTotalSize limits total size of 'sorted table' for each level. - // The limits for each level will be calculated as: - // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level) - // The multiplier for each level can also fine-tuned using - // CompactionTotalSizeMultiplierPerLevel. - // - // The default value is 10MiB. - CompactionTotalSize int - - // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize. - // - // The default value is 10. - CompactionTotalSizeMultiplier float64 - - // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for - // CompactionTotalSize. - // Use zero to skip a level. - // - // The default value is nil. - CompactionTotalSizeMultiplierPerLevel []float64 - - // Comparer defines a total ordering over the space of []byte keys: a 'less - // than' relationship. The same comparison algorithm must be used for reads - // and writes over the lifetime of the DB. - // - // The default value uses the same ordering as bytes.Compare. - Comparer comparer.Comparer - - // Compression defines the 'sorted table' block compression to use. - // - // The default value (DefaultCompression) uses snappy compression. - Compression Compression - - // DisableBlockCache allows disable use of cache.Cache functionality on - // 'sorted table' block. - // - // The default value is false. - DisableBlockCache bool - - // DisableCompactionBackoff allows disable compaction retry backoff. - // - // The default value is false. - DisableCompactionBackoff bool - - // ErrorIfExist defines whether an error should returned if the DB already - // exist. - // - // The default value is false. - ErrorIfExist bool - - // ErrorIfMissing defines whether an error should returned if the DB is - // missing. If false then the database will be created if missing, otherwise - // an error will be returned. - // - // The default value is false. - ErrorIfMissing bool - - // Filter defines an 'effective filter' to use. An 'effective filter' - // if defined will be used to generate per-table filter block. - // The filter name will be stored on disk. - // During reads LevelDB will try to find matching filter from - // 'effective filter' and 'alternative filters'. - // - // Filter can be changed after a DB has been created. It is recommended - // to put old filter to the 'alternative filters' to mitigate lack of - // filter during transition period. - // - // A filter is used to reduce disk reads when looking for a specific key. - // - // The default value is nil. - Filter filter.Filter - - // IteratorSamplingRate defines approximate gap (in bytes) between read - // sampling of an iterator. The samples will be used to determine when - // compaction should be triggered. - // - // The default is 1MiB. - IteratorSamplingRate int - - // MaxMemCompationLevel defines maximum level a newly compacted 'memdb' - // will be pushed into if doesn't creates overlap. This should less than - // NumLevel. Use -1 for level-0. - // - // The default is 2. - MaxMemCompationLevel int - - // NumLevel defines number of database level. The level shouldn't changed - // between opens, or the database will panic. - // - // The default is 7. - NumLevel int - - // OpenFilesCacher provides cache algorithm for open files caching. - // Specify NoCacher to disable caching algorithm. - // - // The default value is LRUCacher. - OpenFilesCacher Cacher - - // OpenFilesCacheCapacity defines the capacity of the open files caching. - // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher. - // - // The default value is 500. - OpenFilesCacheCapacity int - - // Strict defines the DB strict level. - Strict Strict - - // WriteBuffer defines maximum size of a 'memdb' before flushed to - // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk - // unsorted journal. - // - // LevelDB may held up to two 'memdb' at the same time. - // - // The default value is 4MiB. - WriteBuffer int - - // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will - // pause write. - // - // The default value is 12. - WriteL0PauseTrigger int - - // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that - // will trigger write slowdown. - // - // The default value is 8. - WriteL0SlowdownTrigger int -} - -func (o *Options) GetAltFilters() []filter.Filter { - if o == nil { - return nil - } - return o.AltFilters -} - -func (o *Options) GetBlockCacher() Cacher { - if o == nil || o.BlockCacher == nil { - return DefaultBlockCacher - } else if o.BlockCacher == NoCacher { - return nil - } - return o.BlockCacher -} - -func (o *Options) GetBlockCacheCapacity() int { - if o == nil || o.BlockCacheCapacity == 0 { - return DefaultBlockCacheCapacity - } else if o.BlockCacheCapacity < 0 { - return 0 - } - return o.BlockCacheCapacity -} - -func (o *Options) GetBlockRestartInterval() int { - if o == nil || o.BlockRestartInterval <= 0 { - return DefaultBlockRestartInterval - } - return o.BlockRestartInterval -} - -func (o *Options) GetBlockSize() int { - if o == nil || o.BlockSize <= 0 { - return DefaultBlockSize - } - return o.BlockSize -} - -func (o *Options) GetCompactionExpandLimit(level int) int { - factor := DefaultCompactionExpandLimitFactor - if o != nil && o.CompactionExpandLimitFactor > 0 { - factor = o.CompactionExpandLimitFactor - } - return o.GetCompactionTableSize(level+1) * factor -} - -func (o *Options) GetCompactionGPOverlaps(level int) int { - factor := DefaultCompactionGPOverlapsFactor - if o != nil && o.CompactionGPOverlapsFactor > 0 { - factor = o.CompactionGPOverlapsFactor - } - return o.GetCompactionTableSize(level+2) * factor -} - -func (o *Options) GetCompactionL0Trigger() int { - if o == nil || o.CompactionL0Trigger == 0 { - return DefaultCompactionL0Trigger - } - return o.CompactionL0Trigger -} - -func (o *Options) GetCompactionSourceLimit(level int) int { - factor := DefaultCompactionSourceLimitFactor - if o != nil && o.CompactionSourceLimitFactor > 0 { - factor = o.CompactionSourceLimitFactor - } - return o.GetCompactionTableSize(level+1) * factor -} - -func (o *Options) GetCompactionTableSize(level int) int { - var ( - base = DefaultCompactionTableSize - mult float64 - ) - if o != nil { - if o.CompactionTableSize > 0 { - base = o.CompactionTableSize - } - if len(o.CompactionTableSizeMultiplierPerLevel) > level && o.CompactionTableSizeMultiplierPerLevel[level] > 0 { - mult = o.CompactionTableSizeMultiplierPerLevel[level] - } else if o.CompactionTableSizeMultiplier > 0 { - mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level)) - } - } - if mult == 0 { - mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level)) - } - return int(float64(base) * mult) -} - -func (o *Options) GetCompactionTotalSize(level int) int64 { - var ( - base = DefaultCompactionTotalSize - mult float64 - ) - if o != nil { - if o.CompactionTotalSize > 0 { - base = o.CompactionTotalSize - } - if len(o.CompactionTotalSizeMultiplierPerLevel) > level && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 { - mult = o.CompactionTotalSizeMultiplierPerLevel[level] - } else if o.CompactionTotalSizeMultiplier > 0 { - mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level)) - } - } - if mult == 0 { - mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level)) - } - return int64(float64(base) * mult) -} - -func (o *Options) GetComparer() comparer.Comparer { - if o == nil || o.Comparer == nil { - return comparer.DefaultComparer - } - return o.Comparer -} - -func (o *Options) GetCompression() Compression { - if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression { - return DefaultCompressionType - } - return o.Compression -} - -func (o *Options) GetDisableCompactionBackoff() bool { - if o == nil { - return false - } - return o.DisableCompactionBackoff -} - -func (o *Options) GetErrorIfExist() bool { - if o == nil { - return false - } - return o.ErrorIfExist -} - -func (o *Options) GetErrorIfMissing() bool { - if o == nil { - return false - } - return o.ErrorIfMissing -} - -func (o *Options) GetFilter() filter.Filter { - if o == nil { - return nil - } - return o.Filter -} - -func (o *Options) GetIteratorSamplingRate() int { - if o == nil || o.IteratorSamplingRate <= 0 { - return DefaultIteratorSamplingRate - } - return o.IteratorSamplingRate -} - -func (o *Options) GetMaxMemCompationLevel() int { - level := DefaultMaxMemCompationLevel - if o != nil { - if o.MaxMemCompationLevel > 0 { - level = o.MaxMemCompationLevel - } else if o.MaxMemCompationLevel < 0 { - level = 0 - } - } - if level >= o.GetNumLevel() { - return o.GetNumLevel() - 1 - } - return level -} - -func (o *Options) GetNumLevel() int { - if o == nil || o.NumLevel <= 0 { - return DefaultNumLevel - } - return o.NumLevel -} - -func (o *Options) GetOpenFilesCacher() Cacher { - if o == nil || o.OpenFilesCacher == nil { - return DefaultOpenFilesCacher - } - if o.OpenFilesCacher == NoCacher { - return nil - } - return o.OpenFilesCacher -} - -func (o *Options) GetOpenFilesCacheCapacity() int { - if o == nil || o.OpenFilesCacheCapacity == 0 { - return DefaultOpenFilesCacheCapacity - } else if o.OpenFilesCacheCapacity < 0 { - return 0 - } - return o.OpenFilesCacheCapacity -} - -func (o *Options) GetStrict(strict Strict) bool { - if o == nil || o.Strict == 0 { - return DefaultStrict&strict != 0 - } - return o.Strict&strict != 0 -} - -func (o *Options) GetWriteBuffer() int { - if o == nil || o.WriteBuffer <= 0 { - return DefaultWriteBuffer - } - return o.WriteBuffer -} - -func (o *Options) GetWriteL0PauseTrigger() int { - if o == nil || o.WriteL0PauseTrigger == 0 { - return DefaultWriteL0PauseTrigger - } - return o.WriteL0PauseTrigger -} - -func (o *Options) GetWriteL0SlowdownTrigger() int { - if o == nil || o.WriteL0SlowdownTrigger == 0 { - return DefaultWriteL0SlowdownTrigger - } - return o.WriteL0SlowdownTrigger -} - -// ReadOptions holds the optional parameters for 'read operation'. The -// 'read operation' includes Get, Find and NewIterator. -type ReadOptions struct { - // DontFillCache defines whether block reads for this 'read operation' - // should be cached. If false then the block will be cached. This does - // not affects already cached block. - // - // The default value is false. - DontFillCache bool - - // Strict will be OR'ed with global DB 'strict level' unless StrictOverride - // is present. Currently only StrictReader that has effect here. - Strict Strict -} - -func (ro *ReadOptions) GetDontFillCache() bool { - if ro == nil { - return false - } - return ro.DontFillCache -} - -func (ro *ReadOptions) GetStrict(strict Strict) bool { - if ro == nil { - return false - } - return ro.Strict&strict != 0 -} - -// WriteOptions holds the optional parameters for 'write operation'. The -// 'write operation' includes Write, Put and Delete. -type WriteOptions struct { - // Sync is whether to sync underlying writes from the OS buffer cache - // through to actual disk, if applicable. Setting Sync can result in - // slower writes. - // - // If false, and the machine crashes, then some recent writes may be lost. - // Note that if it is just the process that crashes (and the machine does - // not) then no writes will be lost. - // - // In other words, Sync being false has the same semantics as a write - // system call. Sync being true means write followed by fsync. - // - // The default value is false. - Sync bool -} - -func (wo *WriteOptions) GetSync() bool { - if wo == nil { - return false - } - return wo.Sync -} - -func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool { - if ro.GetStrict(StrictOverride) { - return ro.GetStrict(strict) - } else { - return o.GetStrict(strict) || ro.GetStrict(strict) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go deleted file mode 100644 index e1478b1b995..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" -) - -func dupOptions(o *opt.Options) *opt.Options { - newo := &opt.Options{} - if o != nil { - *newo = *o - } - if newo.Strict == 0 { - newo.Strict = opt.DefaultStrict - } - return newo -} - -func (s *session) setOptions(o *opt.Options) { - no := dupOptions(o) - // Alternative filters. - if filters := o.GetAltFilters(); len(filters) > 0 { - no.AltFilters = make([]filter.Filter, len(filters)) - for i, filter := range filters { - no.AltFilters[i] = &iFilter{filter} - } - } - // Comparer. - s.icmp = &iComparer{o.GetComparer()} - no.Comparer = s.icmp - // Filter. - if filter := o.GetFilter(); filter != nil { - no.Filter = &iFilter{filter} - } - - s.o = &cachedOptions{Options: no} - s.o.cache() -} - -type cachedOptions struct { - *opt.Options - - compactionExpandLimit []int - compactionGPOverlaps []int - compactionSourceLimit []int - compactionTableSize []int - compactionTotalSize []int64 -} - -func (co *cachedOptions) cache() { - numLevel := co.Options.GetNumLevel() - - co.compactionExpandLimit = make([]int, numLevel) - co.compactionGPOverlaps = make([]int, numLevel) - co.compactionSourceLimit = make([]int, numLevel) - co.compactionTableSize = make([]int, numLevel) - co.compactionTotalSize = make([]int64, numLevel) - - for level := 0; level < numLevel; level++ { - co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level) - co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level) - co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level) - co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level) - co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level) - } -} - -func (co *cachedOptions) GetCompactionExpandLimit(level int) int { - return co.compactionExpandLimit[level] -} - -func (co *cachedOptions) GetCompactionGPOverlaps(level int) int { - return co.compactionGPOverlaps[level] -} - -func (co *cachedOptions) GetCompactionSourceLimit(level int) int { - return co.compactionSourceLimit[level] -} - -func (co *cachedOptions) GetCompactionTableSize(level int) int { - return co.compactionTableSize[level] -} - -func (co *cachedOptions) GetCompactionTotalSize(level int) int64 { - return co.compactionTotalSize[level] -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go deleted file mode 100644 index 097c0f22513..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "io" - "os" - "sync" - "sync/atomic" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type ErrManifestCorrupted struct { - Field string - Reason string -} - -func (e *ErrManifestCorrupted) Error() string { - return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason) -} - -func newErrManifestCorrupted(f storage.File, field, reason string) error { - return errors.NewErrCorrupted(f, &ErrManifestCorrupted{field, reason}) -} - -// session represent a persistent database session. -type session struct { - // Need 64-bit alignment. - stNextFileNum uint64 // current unused file number - stJournalNum uint64 // current journal file number; need external synchronization - stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb - stSeqNum uint64 // last mem compacted seq; need external synchronization - stTempFileNum uint64 - - stor storage.Storage - storLock util.Releaser - o *cachedOptions - icmp *iComparer - tops *tOps - - manifest *journal.Writer - manifestWriter storage.Writer - manifestFile storage.File - - stCompPtrs []iKey // compaction pointers; need external synchronization - stVersion *version // current version - vmu sync.Mutex -} - -// Creates new initialized session instance. -func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) { - if stor == nil { - return nil, os.ErrInvalid - } - storLock, err := stor.Lock() - if err != nil { - return - } - s = &session{ - stor: stor, - storLock: storLock, - stCompPtrs: make([]iKey, o.GetNumLevel()), - } - s.setOptions(o) - s.tops = newTableOps(s) - s.setVersion(newVersion(s)) - s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed") - return -} - -// Close session. -func (s *session) close() { - s.tops.close() - if s.manifest != nil { - s.manifest.Close() - } - if s.manifestWriter != nil { - s.manifestWriter.Close() - } - s.manifest = nil - s.manifestWriter = nil - s.manifestFile = nil - s.stVersion = nil -} - -// Release session lock. -func (s *session) release() { - s.storLock.Release() -} - -// Create a new database session; need external synchronization. -func (s *session) create() error { - // create manifest - return s.newManifest(nil, nil) -} - -// Recover a database session; need external synchronization. -func (s *session) recover() (err error) { - defer func() { - if os.IsNotExist(err) { - // Don't return os.ErrNotExist if the underlying storage contains - // other files that belong to LevelDB. So the DB won't get trashed. - if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 { - err = &errors.ErrCorrupted{File: &storage.FileInfo{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}} - } - } - }() - - m, err := s.stor.GetManifest() - if err != nil { - return - } - - reader, err := m.Open() - if err != nil { - return - } - defer reader.Close() - strict := s.o.GetStrict(opt.StrictManifest) - jr := journal.NewReader(reader, dropper{s, m}, strict, true) - - staging := s.stVersion.newStaging() - rec := &sessionRecord{numLevel: s.o.GetNumLevel()} - for { - var r io.Reader - r, err = jr.Next() - if err != nil { - if err == io.EOF { - err = nil - break - } - return errors.SetFile(err, m) - } - - err = rec.decode(r) - if err == nil { - // save compact pointers - for _, r := range rec.compPtrs { - s.stCompPtrs[r.level] = iKey(r.ikey) - } - // commit record to version staging - staging.commit(rec) - } else { - err = errors.SetFile(err, m) - if strict || !errors.IsCorrupted(err) { - return - } else { - s.logf("manifest error: %v (skipped)", errors.SetFile(err, m)) - } - } - rec.resetCompPtrs() - rec.resetAddedTables() - rec.resetDeletedTables() - } - - switch { - case !rec.has(recComparer): - return newErrManifestCorrupted(m, "comparer", "missing") - case rec.comparer != s.icmp.uName(): - return newErrManifestCorrupted(m, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer)) - case !rec.has(recNextFileNum): - return newErrManifestCorrupted(m, "next-file-num", "missing") - case !rec.has(recJournalNum): - return newErrManifestCorrupted(m, "journal-file-num", "missing") - case !rec.has(recSeqNum): - return newErrManifestCorrupted(m, "seq-num", "missing") - } - - s.manifestFile = m - s.setVersion(staging.finish()) - s.setNextFileNum(rec.nextFileNum) - s.recordCommited(rec) - return nil -} - -// Commit session; need external synchronization. -func (s *session) commit(r *sessionRecord) (err error) { - v := s.version() - defer v.release() - - // spawn new version based on current version - nv := v.spawn(r) - - if s.manifest == nil { - // manifest journal writer not yet created, create one - err = s.newManifest(r, nv) - } else { - err = s.flushManifest(r) - } - - // finally, apply new version if no error rise - if err == nil { - s.setVersion(nv) - } - - return -} - -// Pick a compaction based on current state; need external synchronization. -func (s *session) pickCompaction() *compaction { - v := s.version() - - var level int - var t0 tFiles - if v.cScore >= 1 { - level = v.cLevel - cptr := s.stCompPtrs[level] - tables := v.tables[level] - for _, t := range tables { - if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 { - t0 = append(t0, t) - break - } - } - if len(t0) == 0 { - t0 = append(t0, tables[0]) - } - } else { - if p := atomic.LoadPointer(&v.cSeek); p != nil { - ts := (*tSet)(p) - level = ts.level - t0 = append(t0, ts.table) - } else { - v.release() - return nil - } - } - - return newCompaction(s, v, level, t0) -} - -// Create compaction from given level and range; need external synchronization. -func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction { - v := s.version() - - t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0) - if len(t0) == 0 { - v.release() - return nil - } - - // Avoid compacting too much in one shot in case the range is large. - // But we cannot do this for level-0 since level-0 files can overlap - // and we must not pick one file and drop another older file if the - // two files overlap. - if level > 0 { - limit := uint64(v.s.o.GetCompactionSourceLimit(level)) - total := uint64(0) - for i, t := range t0 { - total += t.size - if total >= limit { - s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1) - t0 = t0[:i+1] - break - } - } - } - - return newCompaction(s, v, level, t0) -} - -func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction { - c := &compaction{ - s: s, - v: v, - level: level, - tables: [2]tFiles{t0, nil}, - maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)), - tPtrs: make([]int, s.o.GetNumLevel()), - } - c.expand() - c.save() - return c -} - -// compaction represent a compaction state. -type compaction struct { - s *session - v *version - - level int - tables [2]tFiles - maxGPOverlaps uint64 - - gp tFiles - gpi int - seenKey bool - gpOverlappedBytes uint64 - imin, imax iKey - tPtrs []int - released bool - - snapGPI int - snapSeenKey bool - snapGPOverlappedBytes uint64 - snapTPtrs []int -} - -func (c *compaction) save() { - c.snapGPI = c.gpi - c.snapSeenKey = c.seenKey - c.snapGPOverlappedBytes = c.gpOverlappedBytes - c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...) -} - -func (c *compaction) restore() { - c.gpi = c.snapGPI - c.seenKey = c.snapSeenKey - c.gpOverlappedBytes = c.snapGPOverlappedBytes - c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...) -} - -func (c *compaction) release() { - if !c.released { - c.released = true - c.v.release() - } -} - -// Expand compacted tables; need external synchronization. -func (c *compaction) expand() { - limit := uint64(c.s.o.GetCompactionExpandLimit(c.level)) - vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1] - - t0, t1 := c.tables[0], c.tables[1] - imin, imax := t0.getRange(c.s.icmp) - // We expand t0 here just incase ukey hop across tables. - t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0) - if len(t0) != len(c.tables[0]) { - imin, imax = t0.getRange(c.s.icmp) - } - t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false) - // Get entire range covered by compaction. - amin, amax := append(t0, t1...).getRange(c.s.icmp) - - // See if we can grow the number of inputs in "level" without - // changing the number of "level+1" files we pick up. - if len(t1) > 0 { - exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0) - if len(exp0) > len(t0) && t1.size()+exp0.size() < limit { - xmin, xmax := exp0.getRange(c.s.icmp) - exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false) - if len(exp1) == len(t1) { - c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)", - c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), - len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size()))) - imin, imax = xmin, xmax - t0, t1 = exp0, exp1 - amin, amax = append(t0, t1...).getRange(c.s.icmp) - } - } - } - - // Compute the set of grandparent files that overlap this compaction - // (parent == level+1; grandparent == level+2) - if c.level+2 < c.s.o.GetNumLevel() { - c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false) - } - - c.tables[0], c.tables[1] = t0, t1 - c.imin, c.imax = imin, imax -} - -// Check whether compaction is trivial. -func (c *compaction) trivial() bool { - return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps -} - -func (c *compaction) baseLevelForKey(ukey []byte) bool { - for level, tables := range c.v.tables[c.level+2:] { - for c.tPtrs[level] < len(tables) { - t := tables[c.tPtrs[level]] - if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 { - // We've advanced far enough. - if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { - // Key falls in this file's range, so definitely not base level. - return false - } - break - } - c.tPtrs[level]++ - } - } - return true -} - -func (c *compaction) shouldStopBefore(ikey iKey) bool { - for ; c.gpi < len(c.gp); c.gpi++ { - gp := c.gp[c.gpi] - if c.s.icmp.Compare(ikey, gp.imax) <= 0 { - break - } - if c.seenKey { - c.gpOverlappedBytes += gp.size - } - } - c.seenKey = true - - if c.gpOverlappedBytes > c.maxGPOverlaps { - // Too much overlap for current output; start new output. - c.gpOverlappedBytes = 0 - return true - } - return false -} - -// Creates an iterator. -func (c *compaction) newIterator() iterator.Iterator { - // Creates iterator slice. - icap := len(c.tables) - if c.level == 0 { - // Special case for level-0 - icap = len(c.tables[0]) + 1 - } - its := make([]iterator.Iterator, 0, icap) - - // Options. - ro := &opt.ReadOptions{ - DontFillCache: true, - Strict: opt.StrictOverride, - } - strict := c.s.o.GetStrict(opt.StrictCompaction) - if strict { - ro.Strict |= opt.StrictReader - } - - for i, tables := range c.tables { - if len(tables) == 0 { - continue - } - - // Level-0 is not sorted and may overlaps each other. - if c.level+i == 0 { - for _, t := range tables { - its = append(its, c.s.tops.newIterator(t, nil, ro)) - } - } else { - it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict) - its = append(its, it) - } - } - - return iterator.NewMergedIterator(its, c.s.icmp, strict) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go deleted file mode 100644 index 375d49beba6..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bufio" - "encoding/binary" - "io" - "strings" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" -) - -type byteReader interface { - io.Reader - io.ByteReader -} - -// These numbers are written to disk and should not be changed. -const ( - recComparer = 1 - recJournalNum = 2 - recNextFileNum = 3 - recSeqNum = 4 - recCompPtr = 5 - recDelTable = 6 - recAddTable = 7 - // 8 was used for large value refs - recPrevJournalNum = 9 -) - -type cpRecord struct { - level int - ikey iKey -} - -type atRecord struct { - level int - num uint64 - size uint64 - imin iKey - imax iKey -} - -type dtRecord struct { - level int - num uint64 -} - -type sessionRecord struct { - numLevel int - - hasRec int - comparer string - journalNum uint64 - prevJournalNum uint64 - nextFileNum uint64 - seqNum uint64 - compPtrs []cpRecord - addedTables []atRecord - deletedTables []dtRecord - - scratch [binary.MaxVarintLen64]byte - err error -} - -func (p *sessionRecord) has(rec int) bool { - return p.hasRec&(1<= uint64(p.numLevel) { - p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"}) - return 0 - } - return int(x) -} - -func (p *sessionRecord) decode(r io.Reader) error { - br, ok := r.(byteReader) - if !ok { - br = bufio.NewReader(r) - } - p.err = nil - for p.err == nil { - rec := p.readUvarintMayEOF("field-header", br, true) - if p.err != nil { - if p.err == io.EOF { - return nil - } - return p.err - } - switch rec { - case recComparer: - x := p.readBytes("comparer", br) - if p.err == nil { - p.setComparer(string(x)) - } - case recJournalNum: - x := p.readUvarint("journal-num", br) - if p.err == nil { - p.setJournalNum(x) - } - case recPrevJournalNum: - x := p.readUvarint("prev-journal-num", br) - if p.err == nil { - p.setPrevJournalNum(x) - } - case recNextFileNum: - x := p.readUvarint("next-file-num", br) - if p.err == nil { - p.setNextFileNum(x) - } - case recSeqNum: - x := p.readUvarint("seq-num", br) - if p.err == nil { - p.setSeqNum(x) - } - case recCompPtr: - level := p.readLevel("comp-ptr.level", br) - ikey := p.readBytes("comp-ptr.ikey", br) - if p.err == nil { - p.addCompPtr(level, iKey(ikey)) - } - case recAddTable: - level := p.readLevel("add-table.level", br) - num := p.readUvarint("add-table.num", br) - size := p.readUvarint("add-table.size", br) - imin := p.readBytes("add-table.imin", br) - imax := p.readBytes("add-table.imax", br) - if p.err == nil { - p.addTable(level, num, size, imin, imax) - } - case recDelTable: - level := p.readLevel("del-table.level", br) - num := p.readUvarint("del-table.num", br) - if p.err == nil { - p.delTable(level, num) - } - } - } - - return p.err -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go deleted file mode 100644 index c83bf8a6a3f..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" -) - -func decodeEncode(v *sessionRecord) (res bool, err error) { - b := new(bytes.Buffer) - err = v.encode(b) - if err != nil { - return - } - v2 := &sessionRecord{numLevel: opt.DefaultNumLevel} - err = v.decode(b) - if err != nil { - return - } - b2 := new(bytes.Buffer) - err = v2.encode(b2) - if err != nil { - return - } - return bytes.Equal(b.Bytes(), b2.Bytes()), nil -} - -func TestSessionRecord_EncodeDecode(t *testing.T) { - big := uint64(1) << 50 - v := &sessionRecord{numLevel: opt.DefaultNumLevel} - i := uint64(0) - test := func() { - res, err := decodeEncode(v) - if err != nil { - t.Fatalf("error when testing encode/decode sessionRecord: %v", err) - } - if !res { - t.Error("encode/decode test failed at iteration:", i) - } - } - - for ; i < 4; i++ { - test() - v.addTable(3, big+300+i, big+400+i, - newIkey([]byte("foo"), big+500+1, ktVal), - newIkey([]byte("zoo"), big+600+1, ktDel)) - v.delTable(4, big+700+i) - v.addCompPtr(int(i), newIkey([]byte("x"), big+900+1, ktVal)) - } - - v.setComparer("foo") - v.setJournalNum(big + 100) - v.setPrevJournalNum(big + 99) - v.setNextFileNum(big + 200) - v.setSeqNum(big + 1000) - test() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go deleted file mode 100644 index 6e10d20e5fd..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sync/atomic" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" -) - -type dropper struct { - s *session // Logging. - file storage.File -} - -func (d dropper) Drop(err error) { - if e, ok := err.(*journal.ErrCorrupted); ok { - d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason) - } else { - d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err) - } -} - -func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) } -func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) } - -// File utils. - -func (s *session) getJournalFile(num uint64) storage.File { - return s.stor.GetFile(num, storage.TypeJournal) -} - -func (s *session) getTableFile(num uint64) storage.File { - return s.stor.GetFile(num, storage.TypeTable) -} - -func (s *session) getFiles(t storage.FileType) ([]storage.File, error) { - return s.stor.GetFiles(t) -} - -func (s *session) newTemp() storage.File { - num := atomic.AddUint64(&s.stTempFileNum, 1) - 1 - return s.stor.GetFile(num, storage.TypeTemp) -} - -func (s *session) tableFileFromRecord(r atRecord) *tFile { - return newTableFile(s.getTableFile(r.num), r.size, r.imin, r.imax) -} - -// Session state. - -// Get current version. This will incr version ref, must call -// version.release (exactly once) after use. -func (s *session) version() *version { - s.vmu.Lock() - defer s.vmu.Unlock() - s.stVersion.ref++ - return s.stVersion -} - -// Set current version to v. -func (s *session) setVersion(v *version) { - s.vmu.Lock() - v.ref = 1 // Holds by session. - if old := s.stVersion; old != nil { - v.ref++ // Holds by old version. - old.next = v - old.releaseNB() - } - s.stVersion = v - s.vmu.Unlock() -} - -// Get current unused file number. -func (s *session) nextFileNum() uint64 { - return atomic.LoadUint64(&s.stNextFileNum) -} - -// Set current unused file number to num. -func (s *session) setNextFileNum(num uint64) { - atomic.StoreUint64(&s.stNextFileNum, num) -} - -// Mark file number as used. -func (s *session) markFileNum(num uint64) { - nextFileNum := num + 1 - for { - old, x := s.stNextFileNum, nextFileNum - if old > x { - x = old - } - if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) { - break - } - } -} - -// Allocate a file number. -func (s *session) allocFileNum() uint64 { - return atomic.AddUint64(&s.stNextFileNum, 1) - 1 -} - -// Reuse given file number. -func (s *session) reuseFileNum(num uint64) { - for { - old, x := s.stNextFileNum, num - if old != x+1 { - x = old - } - if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) { - break - } - } -} - -// Manifest related utils. - -// Fill given session record obj with current states; need external -// synchronization. -func (s *session) fillRecord(r *sessionRecord, snapshot bool) { - r.setNextFileNum(s.nextFileNum()) - - if snapshot { - if !r.has(recJournalNum) { - r.setJournalNum(s.stJournalNum) - } - - if !r.has(recSeqNum) { - r.setSeqNum(s.stSeqNum) - } - - for level, ik := range s.stCompPtrs { - if ik != nil { - r.addCompPtr(level, ik) - } - } - - r.setComparer(s.icmp.uName()) - } -} - -// Mark if record has been committed, this will update session state; -// need external synchronization. -func (s *session) recordCommited(r *sessionRecord) { - if r.has(recJournalNum) { - s.stJournalNum = r.journalNum - } - - if r.has(recPrevJournalNum) { - s.stPrevJournalNum = r.prevJournalNum - } - - if r.has(recSeqNum) { - s.stSeqNum = r.seqNum - } - - for _, p := range r.compPtrs { - s.stCompPtrs[p.level] = iKey(p.ikey) - } -} - -// Create a new manifest file; need external synchronization. -func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { - num := s.allocFileNum() - file := s.stor.GetFile(num, storage.TypeManifest) - writer, err := file.Create() - if err != nil { - return - } - jw := journal.NewWriter(writer) - - if v == nil { - v = s.version() - defer v.release() - } - if rec == nil { - rec = &sessionRecord{numLevel: s.o.GetNumLevel()} - } - s.fillRecord(rec, true) - v.fillRecord(rec) - - defer func() { - if err == nil { - s.recordCommited(rec) - if s.manifest != nil { - s.manifest.Close() - } - if s.manifestWriter != nil { - s.manifestWriter.Close() - } - if s.manifestFile != nil { - s.manifestFile.Remove() - } - s.manifestFile = file - s.manifestWriter = writer - s.manifest = jw - } else { - writer.Close() - file.Remove() - s.reuseFileNum(num) - } - }() - - w, err := jw.Next() - if err != nil { - return - } - err = rec.encode(w) - if err != nil { - return - } - err = jw.Flush() - if err != nil { - return - } - err = s.stor.SetManifest(file) - return -} - -// Flush record to disk. -func (s *session) flushManifest(rec *sessionRecord) (err error) { - s.fillRecord(rec, false) - w, err := s.manifest.Next() - if err != nil { - return - } - err = rec.encode(w) - if err != nil { - return - } - err = s.manifest.Flush() - if err != nil { - return - } - err = s.manifestWriter.Sync() - if err != nil { - return - } - s.recordCommited(rec) - return -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go deleted file mode 100644 index 34f05d45ddd..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go +++ /dev/null @@ -1,534 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reservefs. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -var errFileOpen = errors.New("leveldb/storage: file still open") - -type fileLock interface { - release() error -} - -type fileStorageLock struct { - fs *fileStorage -} - -func (lock *fileStorageLock) Release() { - fs := lock.fs - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.slock == lock { - fs.slock = nil - } - return -} - -// fileStorage is a file-system backed storage. -type fileStorage struct { - path string - - mu sync.Mutex - flock fileLock - slock *fileStorageLock - logw *os.File - buf []byte - // Opened file counter; if open < 0 means closed. - open int - day int -} - -// OpenFile returns a new filesytem-backed storage implementation with the given -// path. This also hold a file lock, so any subsequent attempt to open the same -// path will fail. -// -// The storage must be closed after use, by calling Close method. -func OpenFile(path string) (Storage, error) { - if err := os.MkdirAll(path, 0755); err != nil { - return nil, err - } - - flock, err := newFileLock(filepath.Join(path, "LOCK")) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - flock.release() - } - }() - - rename(filepath.Join(path, "LOG"), filepath.Join(path, "LOG.old")) - logw, err := os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return nil, err - } - - fs := &fileStorage{path: path, flock: flock, logw: logw} - runtime.SetFinalizer(fs, (*fileStorage).Close) - return fs, nil -} - -func (fs *fileStorage) Lock() (util.Releaser, error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - if fs.slock != nil { - return nil, ErrLocked - } - fs.slock = &fileStorageLock{fs: fs} - return fs.slock, nil -} - -func itoa(buf []byte, i int, wid int) []byte { - var u uint = uint(i) - if u == 0 && wid <= 1 { - return append(buf, '0') - } - - // Assemble decimal in reverse order. - var b [32]byte - bp := len(b) - for ; u > 0 || wid > 0; u /= 10 { - bp-- - wid-- - b[bp] = byte(u%10) + '0' - } - return append(buf, b[bp:]...) -} - -func (fs *fileStorage) printDay(t time.Time) { - if fs.day == t.Day() { - return - } - fs.day = t.Day() - fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n")) -} - -func (fs *fileStorage) doLog(t time.Time, str string) { - fs.printDay(t) - hour, min, sec := t.Clock() - msec := t.Nanosecond() / 1e3 - // time - fs.buf = itoa(fs.buf[:0], hour, 2) - fs.buf = append(fs.buf, ':') - fs.buf = itoa(fs.buf, min, 2) - fs.buf = append(fs.buf, ':') - fs.buf = itoa(fs.buf, sec, 2) - fs.buf = append(fs.buf, '.') - fs.buf = itoa(fs.buf, msec, 6) - fs.buf = append(fs.buf, ' ') - // write - fs.buf = append(fs.buf, []byte(str)...) - fs.buf = append(fs.buf, '\n') - fs.logw.Write(fs.buf) -} - -func (fs *fileStorage) Log(str string) { - t := time.Now() - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return - } - fs.doLog(t, str) -} - -func (fs *fileStorage) log(str string) { - fs.doLog(time.Now(), str) -} - -func (fs *fileStorage) GetFile(num uint64, t FileType) File { - return &file{fs: fs, num: num, t: t} -} - -func (fs *fileStorage) GetFiles(t FileType) (ff []File, err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - dir, err := os.Open(fs.path) - if err != nil { - return - } - fnn, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if err := dir.Close(); err != nil { - fs.log(fmt.Sprintf("close dir: %v", err)) - } - if err != nil { - return - } - f := &file{fs: fs} - for _, fn := range fnn { - if f.parse(fn) && (f.t&t) != 0 { - ff = append(ff, f) - f = &file{fs: fs} - } - } - return -} - -func (fs *fileStorage) GetManifest() (f File, err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - dir, err := os.Open(fs.path) - if err != nil { - return - } - fnn, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if err := dir.Close(); err != nil { - fs.log(fmt.Sprintf("close dir: %v", err)) - } - if err != nil { - return - } - // Find latest CURRENT file. - var rem []string - var pend bool - var cerr error - for _, fn := range fnn { - if strings.HasPrefix(fn, "CURRENT") { - pend1 := len(fn) > 7 - // Make sure it is valid name for a CURRENT file, otherwise skip it. - if pend1 { - if fn[7] != '.' || len(fn) < 9 { - fs.log(fmt.Sprintf("skipping %s: invalid file name", fn)) - continue - } - if _, e1 := strconv.ParseUint(fn[8:], 10, 0); e1 != nil { - fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1)) - continue - } - } - path := filepath.Join(fs.path, fn) - r, e1 := os.OpenFile(path, os.O_RDONLY, 0) - if e1 != nil { - return nil, e1 - } - b, e1 := ioutil.ReadAll(r) - if e1 != nil { - r.Close() - return nil, e1 - } - f1 := &file{fs: fs} - if len(b) < 1 || b[len(b)-1] != '\n' || !f1.parse(string(b[:len(b)-1])) { - fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", fn)) - if pend1 { - rem = append(rem, fn) - } - if !pend1 || cerr == nil { - cerr = fmt.Errorf("leveldb/storage: corrupted or incomplete %s file", fn) - } - } else if f != nil && f1.Num() < f.Num() { - fs.log(fmt.Sprintf("skipping %s: obsolete", fn)) - if pend1 { - rem = append(rem, fn) - } - } else { - f = f1 - pend = pend1 - } - if err := r.Close(); err != nil { - fs.log(fmt.Sprintf("close %s: %v", fn, err)) - } - } - } - // Don't remove any files if there is no valid CURRENT file. - if f == nil { - if cerr != nil { - err = cerr - } else { - err = os.ErrNotExist - } - return - } - // Rename pending CURRENT file to an effective CURRENT. - if pend { - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f.Num()) - if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil { - fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", f.Num(), err)) - } - } - // Remove obsolete or incomplete pending CURRENT files. - for _, fn := range rem { - path := filepath.Join(fs.path, fn) - if err := os.Remove(path); err != nil { - fs.log(fmt.Sprintf("remove %s: %v", fn, err)) - } - } - return -} - -func (fs *fileStorage) SetManifest(f File) (err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - f2, ok := f.(*file) - if !ok || f2.t != TypeManifest { - return ErrInvalidFile - } - defer func() { - if err != nil { - fs.log(fmt.Sprintf("CURRENT: %v", err)) - } - }() - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f2.Num()) - w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return err - } - _, err = fmt.Fprintln(w, f2.name()) - // Close the file first. - if err := w.Close(); err != nil { - fs.log(fmt.Sprintf("close CURRENT.%d: %v", f2.num, err)) - } - if err != nil { - return err - } - return rename(path, filepath.Join(fs.path, "CURRENT")) -} - -func (fs *fileStorage) Close() error { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - // Clear the finalizer. - runtime.SetFinalizer(fs, nil) - - if fs.open > 0 { - fs.log(fmt.Sprintf("refuse to close, %d files still open", fs.open)) - return fmt.Errorf("leveldb/storage: cannot close, %d files still open", fs.open) - } - fs.open = -1 - e1 := fs.logw.Close() - err := fs.flock.release() - if err == nil { - err = e1 - } - return err -} - -type fileWrap struct { - *os.File - f *file -} - -func (fw fileWrap) Sync() error { - if err := fw.File.Sync(); err != nil { - return err - } - if fw.f.Type() == TypeManifest { - // Also sync parent directory if file type is manifest. - // See: https://code.google.com/p/leveldb/issues/detail?id=190. - if err := syncDir(fw.f.fs.path); err != nil { - return err - } - } - return nil -} - -func (fw fileWrap) Close() error { - f := fw.f - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if !f.open { - return ErrClosed - } - f.open = false - f.fs.open-- - err := fw.File.Close() - if err != nil { - f.fs.log(fmt.Sprintf("close %s.%d: %v", f.Type(), f.Num(), err)) - } - return err -} - -type file struct { - fs *fileStorage - num uint64 - t FileType - open bool -} - -func (f *file) Open() (Reader, error) { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return nil, ErrClosed - } - if f.open { - return nil, errFileOpen - } - of, err := os.OpenFile(f.path(), os.O_RDONLY, 0) - if err != nil { - if f.hasOldName() && os.IsNotExist(err) { - of, err = os.OpenFile(f.oldPath(), os.O_RDONLY, 0) - if err == nil { - goto ok - } - } - return nil, err - } -ok: - f.open = true - f.fs.open++ - return fileWrap{of, f}, nil -} - -func (f *file) Create() (Writer, error) { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return nil, ErrClosed - } - if f.open { - return nil, errFileOpen - } - of, err := os.OpenFile(f.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return nil, err - } - f.open = true - f.fs.open++ - return fileWrap{of, f}, nil -} - -func (f *file) Replace(newfile File) error { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return ErrClosed - } - newfile2, ok := newfile.(*file) - if !ok { - return ErrInvalidFile - } - if f.open || newfile2.open { - return errFileOpen - } - return rename(newfile2.path(), f.path()) -} - -func (f *file) Type() FileType { - return f.t -} - -func (f *file) Num() uint64 { - return f.num -} - -func (f *file) Remove() error { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return ErrClosed - } - if f.open { - return errFileOpen - } - err := os.Remove(f.path()) - if err != nil { - f.fs.log(fmt.Sprintf("remove %s.%d: %v", f.Type(), f.Num(), err)) - } - // Also try remove file with old name, just in case. - if f.hasOldName() { - if e1 := os.Remove(f.oldPath()); !os.IsNotExist(e1) { - f.fs.log(fmt.Sprintf("remove %s.%d: %v (old name)", f.Type(), f.Num(), err)) - err = e1 - } - } - return err -} - -func (f *file) hasOldName() bool { - return f.t == TypeTable -} - -func (f *file) oldName() string { - switch f.t { - case TypeTable: - return fmt.Sprintf("%06d.sst", f.num) - } - return f.name() -} - -func (f *file) oldPath() string { - return filepath.Join(f.fs.path, f.oldName()) -} - -func (f *file) name() string { - switch f.t { - case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", f.num) - case TypeJournal: - return fmt.Sprintf("%06d.log", f.num) - case TypeTable: - return fmt.Sprintf("%06d.ldb", f.num) - case TypeTemp: - return fmt.Sprintf("%06d.tmp", f.num) - default: - panic("invalid file type") - } -} - -func (f *file) path() string { - return filepath.Join(f.fs.path, f.name()) -} - -func (f *file) parse(name string) bool { - var num uint64 - var tail string - _, err := fmt.Sscanf(name, "%d.%s", &num, &tail) - if err == nil { - switch tail { - case "log": - f.t = TypeJournal - case "ldb", "sst": - f.t = TypeTable - case "tmp": - f.t = TypeTemp - default: - return false - } - f.num = num - return true - } - n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &num, &tail) - if n == 1 { - f.t = TypeManifest - f.num = num - return true - } - - return false -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go deleted file mode 100644 index 42940d769f8..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "os" - "path/filepath" -) - -type plan9FileLock struct { - f *os.File -} - -func (fl *plan9FileLock) release() error { - return fl.f.Close() -} - -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644) - if err != nil { - return - } - fl = &plan9FileLock{f: f} - return -} - -func rename(oldpath, newpath string) error { - if _, err := os.Stat(newpath); err == nil { - if err := os.Remove(newpath); err != nil { - return err - } - } - - _, fname := filepath.Split(newpath) - return os.Rename(oldpath, fname) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go deleted file mode 100644 index 102031bfd54..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build solaris - -package storage - -import ( - "os" - "syscall" -) - -type unixFileLock struct { - f *os.File -} - -func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false); err != nil { - return err - } - return fl.f.Close() -} - -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) - if err != nil { - return - } - err = setFileLock(f, true) - if err != nil { - f.Close() - return - } - fl = &unixFileLock{f: f} - return -} - -func setFileLock(f *os.File, lock bool) error { - flock := syscall.Flock_t{ - Type: syscall.F_UNLCK, - Start: 0, - Len: 0, - Whence: 1, - } - if lock { - flock.Type = syscall.F_WRLCK - } - return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock) -} - -func rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go deleted file mode 100644 index 92abcbb7d0c..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "fmt" - "os" - "path/filepath" - "testing" -) - -var cases = []struct { - oldName []string - name string - ftype FileType - num uint64 -}{ - {nil, "000100.log", TypeJournal, 100}, - {nil, "000000.log", TypeJournal, 0}, - {[]string{"000000.sst"}, "000000.ldb", TypeTable, 0}, - {nil, "MANIFEST-000002", TypeManifest, 2}, - {nil, "MANIFEST-000007", TypeManifest, 7}, - {nil, "18446744073709551615.log", TypeJournal, 18446744073709551615}, - {nil, "000100.tmp", TypeTemp, 100}, -} - -var invalidCases = []string{ - "", - "foo", - "foo-dx-100.log", - ".log", - "", - "manifest", - "CURREN", - "CURRENTX", - "MANIFES", - "MANIFEST", - "MANIFEST-", - "XMANIFEST-3", - "MANIFEST-3x", - "LOC", - "LOCKx", - "LO", - "LOGx", - "18446744073709551616.log", - "184467440737095516150.log", - "100", - "100.", - "100.lop", -} - -func TestFileStorage_CreateFileName(t *testing.T) { - for _, c := range cases { - f := &file{num: c.num, t: c.ftype} - if f.name() != c.name { - t.Errorf("invalid filename got '%s', want '%s'", f.name(), c.name) - } - } -} - -func TestFileStorage_ParseFileName(t *testing.T) { - for _, c := range cases { - for _, name := range append([]string{c.name}, c.oldName...) { - f := new(file) - if !f.parse(name) { - t.Errorf("cannot parse filename '%s'", name) - continue - } - if f.Type() != c.ftype { - t.Errorf("filename '%s' invalid type got '%d', want '%d'", name, f.Type(), c.ftype) - } - if f.Num() != c.num { - t.Errorf("filename '%s' invalid number got '%d', want '%d'", name, f.Num(), c.num) - } - } - } -} - -func TestFileStorage_InvalidFileName(t *testing.T) { - for _, name := range invalidCases { - f := new(file) - if f.parse(name) { - t.Errorf("filename '%s' should be invalid", name) - } - } -} - -func TestFileStorage_Locking(t *testing.T) { - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestfd-%d", os.Getuid())) - - _, err := os.Stat(path) - if err == nil { - err = os.RemoveAll(path) - if err != nil { - t.Fatal("RemoveAll: got error: ", err) - } - } - - p1, err := OpenFile(path) - if err != nil { - t.Fatal("OpenFile(1): got error: ", err) - } - - defer os.RemoveAll(path) - - p2, err := OpenFile(path) - if err != nil { - t.Logf("OpenFile(2): got error: %s (expected)", err) - } else { - p2.Close() - p1.Close() - t.Fatal("OpenFile(2): expect error") - } - - p1.Close() - - p3, err := OpenFile(path) - if err != nil { - t.Fatal("OpenFile(3): got error: ", err) - } - defer p3.Close() - - l, err := p3.Lock() - if err != nil { - t.Fatal("storage lock failed(1): ", err) - } - _, err = p3.Lock() - if err == nil { - t.Fatal("expect error for second storage lock attempt") - } else { - t.Logf("storage lock got error: %s (expected)", err) - } - l.Release() - _, err = p3.Lock() - if err != nil { - t.Fatal("storage lock failed(2): ", err) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go deleted file mode 100644 index d0a604b7ab4..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package storage - -import ( - "os" - "syscall" -) - -type unixFileLock struct { - f *os.File -} - -func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false); err != nil { - return err - } - return fl.f.Close() -} - -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) - if err != nil { - return - } - err = setFileLock(f, true) - if err != nil { - f.Close() - return - } - fl = &unixFileLock{f: f} - return -} - -func setFileLock(f *os.File, lock bool) error { - how := syscall.LOCK_UN - if lock { - how = syscall.LOCK_EX - } - return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB) -} - -func rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go deleted file mode 100644 index 50c3c454e75..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procMoveFileExW = modkernel32.NewProc("MoveFileExW") -) - -const ( - _MOVEFILE_REPLACE_EXISTING = 1 -) - -type windowsFileLock struct { - fd syscall.Handle -} - -func (fl *windowsFileLock) release() error { - return syscall.Close(fl.fd) -} - -func newFileLock(path string) (fl fileLock, err error) { - pathp, err := syscall.UTF16PtrFromString(path) - if err != nil { - return - } - fd, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.CREATE_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) - if err != nil { - return - } - fl = &windowsFileLock{fd: fd} - return -} - -func moveFileEx(from *uint16, to *uint16, flags uint32) error { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) - if r1 == 0 { - if e1 != 0 { - return error(e1) - } else { - return syscall.EINVAL - } - } - return nil -} - -func rename(oldpath, newpath string) error { - from, err := syscall.UTF16PtrFromString(oldpath) - if err != nil { - return err - } - to, err := syscall.UTF16PtrFromString(newpath) - if err != nil { - return err - } - return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING) -} - -func syncDir(name string) error { return nil } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go deleted file mode 100644 index d3f3e136ed7..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "bytes" - "os" - "sync" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -const typeShift = 3 - -type memStorageLock struct { - ms *memStorage -} - -func (lock *memStorageLock) Release() { - ms := lock.ms - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.slock == lock { - ms.slock = nil - } - return -} - -// memStorage is a memory-backed storage. -type memStorage struct { - mu sync.Mutex - slock *memStorageLock - files map[uint64]*memFile - manifest *memFilePtr -} - -// NewMemStorage returns a new memory-backed storage implementation. -func NewMemStorage() Storage { - return &memStorage{ - files: make(map[uint64]*memFile), - } -} - -func (ms *memStorage) Lock() (util.Releaser, error) { - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.slock != nil { - return nil, ErrLocked - } - ms.slock = &memStorageLock{ms: ms} - return ms.slock, nil -} - -func (*memStorage) Log(str string) {} - -func (ms *memStorage) GetFile(num uint64, t FileType) File { - return &memFilePtr{ms: ms, num: num, t: t} -} - -func (ms *memStorage) GetFiles(t FileType) ([]File, error) { - ms.mu.Lock() - var ff []File - for x, _ := range ms.files { - num, mt := x>>typeShift, FileType(x)&TypeAll - if mt&t == 0 { - continue - } - ff = append(ff, &memFilePtr{ms: ms, num: num, t: mt}) - } - ms.mu.Unlock() - return ff, nil -} - -func (ms *memStorage) GetManifest() (File, error) { - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.manifest == nil { - return nil, os.ErrNotExist - } - return ms.manifest, nil -} - -func (ms *memStorage) SetManifest(f File) error { - fm, ok := f.(*memFilePtr) - if !ok || fm.t != TypeManifest { - return ErrInvalidFile - } - ms.mu.Lock() - ms.manifest = fm - ms.mu.Unlock() - return nil -} - -func (*memStorage) Close() error { return nil } - -type memReader struct { - *bytes.Reader - m *memFile -} - -func (mr *memReader) Close() error { - return mr.m.Close() -} - -type memFile struct { - bytes.Buffer - ms *memStorage - open bool -} - -func (*memFile) Sync() error { return nil } -func (m *memFile) Close() error { - m.ms.mu.Lock() - m.open = false - m.ms.mu.Unlock() - return nil -} - -type memFilePtr struct { - ms *memStorage - num uint64 - t FileType -} - -func (p *memFilePtr) x() uint64 { - return p.Num()< -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "bytes" - "testing" -) - -func TestMemStorage(t *testing.T) { - m := NewMemStorage() - - l, err := m.Lock() - if err != nil { - t.Fatal("storage lock failed(1): ", err) - } - _, err = m.Lock() - if err == nil { - t.Fatal("expect error for second storage lock attempt") - } else { - t.Logf("storage lock got error: %s (expected)", err) - } - l.Release() - _, err = m.Lock() - if err != nil { - t.Fatal("storage lock failed(2): ", err) - } - - f := m.GetFile(1, TypeTable) - if f.Num() != 1 && f.Type() != TypeTable { - t.Fatal("invalid file number and type") - } - w, _ := f.Create() - w.Write([]byte("abc")) - w.Close() - if ff, _ := m.GetFiles(TypeAll); len(ff) != 1 { - t.Fatal("invalid GetFiles len") - } - buf := new(bytes.Buffer) - r, err := f.Open() - if err != nil { - t.Fatal("Open: got error: ", err) - } - buf.ReadFrom(r) - r.Close() - if got := buf.String(); got != "abc" { - t.Fatalf("Read: invalid value, want=abc got=%s", got) - } - if _, err := f.Open(); err != nil { - t.Fatal("Open: got error: ", err) - } - if _, err := m.GetFile(1, TypeTable).Open(); err == nil { - t.Fatal("expecting error") - } - f.Remove() - if ff, _ := m.GetFiles(TypeAll); len(ff) != 0 { - t.Fatal("invalid GetFiles len", len(ff)) - } - if _, err := f.Open(); err == nil { - t.Fatal("expecting error") - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go deleted file mode 100644 index 0914bea8919..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package storage provides storage abstraction for LevelDB. -package storage - -import ( - "errors" - "fmt" - "io" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type FileType uint32 - -const ( - TypeManifest FileType = 1 << iota - TypeJournal - TypeTable - TypeTemp - - TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp -) - -func (t FileType) String() string { - switch t { - case TypeManifest: - return "manifest" - case TypeJournal: - return "journal" - case TypeTable: - return "table" - case TypeTemp: - return "temp" - } - return fmt.Sprintf("", t) -} - -var ( - ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument") - ErrLocked = errors.New("leveldb/storage: already locked") - ErrClosed = errors.New("leveldb/storage: closed") -) - -// Syncer is the interface that wraps basic Sync method. -type Syncer interface { - // Sync commits the current contents of the file to stable storage. - Sync() error -} - -// Reader is the interface that groups the basic Read, Seek, ReadAt and Close -// methods. -type Reader interface { - io.ReadSeeker - io.ReaderAt - io.Closer -} - -// Writer is the interface that groups the basic Write, Sync and Close -// methods. -type Writer interface { - io.WriteCloser - Syncer -} - -// File is the file. A file instance must be goroutine-safe. -type File interface { - // Open opens the file for read. Returns os.ErrNotExist error - // if the file does not exist. - // Returns ErrClosed if the underlying storage is closed. - Open() (r Reader, err error) - - // Create creates the file for writting. Truncate the file if - // already exist. - // Returns ErrClosed if the underlying storage is closed. - Create() (w Writer, err error) - - // Replace replaces file with newfile. - // Returns ErrClosed if the underlying storage is closed. - Replace(newfile File) error - - // Type returns the file type - Type() FileType - - // Num returns the file number. - Num() uint64 - - // Remove removes the file. - // Returns ErrClosed if the underlying storage is closed. - Remove() error -} - -// Storage is the storage. A storage instance must be goroutine-safe. -type Storage interface { - // Lock locks the storage. Any subsequent attempt to call Lock will fail - // until the last lock released. - // After use the caller should call the Release method. - Lock() (l util.Releaser, err error) - - // Log logs a string. This is used for logging. An implementation - // may write to a file, stdout or simply do nothing. - Log(str string) - - // GetFile returns a file for the given number and type. GetFile will never - // returns nil, even if the underlying storage is closed. - GetFile(num uint64, t FileType) File - - // GetFiles returns a slice of files that match the given file types. - // The file types may be OR'ed together. - GetFiles(t FileType) ([]File, error) - - // GetManifest returns a manifest file. Returns os.ErrNotExist if manifest - // file does not exist. - GetManifest() (File, error) - - // SetManifest sets the given file as manifest file. The given file should - // be a manifest file type or error will be returned. - SetManifest(f File) error - - // Close closes the storage. It is valid to call Close multiple times. - // Other methods should not be called after the storage has been closed. - Close() error -} - -// FileInfo wraps basic file info. -type FileInfo struct { - Type FileType - Num uint64 -} - -func (fi FileInfo) String() string { - switch fi.Type { - case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", fi.Num) - case TypeJournal: - return fmt.Sprintf("%06d.log", fi.Num) - case TypeTable: - return fmt.Sprintf("%06d.ldb", fi.Num) - case TypeTemp: - return fmt.Sprintf("%06d.tmp", fi.Num) - default: - return fmt.Sprintf("%#x-%d", fi.Type, fi.Num) - } -} - -// NewFileInfo creates new FileInfo from the given File. It will returns nil -// if File is nil. -func NewFileInfo(f File) *FileInfo { - if f == nil { - return nil - } - return &FileInfo{f.Type(), f.Num()} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go deleted file mode 100644 index 482ae83ad51..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go +++ /dev/null @@ -1,539 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENE file. - -package leveldb - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "sync" - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -const typeShift = 4 - -var ( - tsErrInvalidFile = errors.New("leveldb.testStorage: invalid file for argument") - tsErrFileOpen = errors.New("leveldb.testStorage: file still open") -) - -var ( - tsFSEnv = os.Getenv("GOLEVELDB_USEFS") - tsTempdir = os.Getenv("GOLEVELDB_TEMPDIR") - tsKeepFS = tsFSEnv == "2" - tsFS = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1" - tsMU = &sync.Mutex{} - tsNum = 0 -) - -type tsOp uint - -const ( - tsOpOpen tsOp = iota - tsOpCreate - tsOpRead - tsOpReadAt - tsOpWrite - tsOpSync - - tsOpNum -) - -type tsLock struct { - ts *testStorage - r util.Releaser -} - -func (l tsLock) Release() { - l.r.Release() - l.ts.t.Log("I: storage lock released") -} - -type tsReader struct { - tf tsFile - storage.Reader -} - -func (tr tsReader) Read(b []byte) (n int, err error) { - ts := tr.tf.ts - ts.countRead(tr.tf.Type()) - if tr.tf.shouldErrLocked(tsOpRead) { - return 0, errors.New("leveldb.testStorage: emulated read error") - } - n, err = tr.Reader.Read(b) - if err != nil && err != io.EOF { - ts.t.Errorf("E: read error, num=%d type=%v n=%d: %v", tr.tf.Num(), tr.tf.Type(), n, err) - } - return -} - -func (tr tsReader) ReadAt(b []byte, off int64) (n int, err error) { - ts := tr.tf.ts - ts.countRead(tr.tf.Type()) - if tr.tf.shouldErrLocked(tsOpReadAt) { - return 0, errors.New("leveldb.testStorage: emulated readAt error") - } - n, err = tr.Reader.ReadAt(b, off) - if err != nil && err != io.EOF { - ts.t.Errorf("E: readAt error, num=%d type=%v off=%d n=%d: %v", tr.tf.Num(), tr.tf.Type(), off, n, err) - } - return -} - -func (tr tsReader) Close() (err error) { - err = tr.Reader.Close() - tr.tf.close("reader", err) - return -} - -type tsWriter struct { - tf tsFile - storage.Writer -} - -func (tw tsWriter) Write(b []byte) (n int, err error) { - if tw.tf.shouldErrLocked(tsOpWrite) { - return 0, errors.New("leveldb.testStorage: emulated write error") - } - n, err = tw.Writer.Write(b) - if err != nil { - tw.tf.ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err) - } - return -} - -func (tw tsWriter) Sync() (err error) { - ts := tw.tf.ts - ts.mu.Lock() - for ts.emuDelaySync&tw.tf.Type() != 0 { - ts.cond.Wait() - } - ts.mu.Unlock() - if tw.tf.shouldErrLocked(tsOpSync) { - return errors.New("leveldb.testStorage: emulated sync error") - } - err = tw.Writer.Sync() - if err != nil { - tw.tf.ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err) - } - return -} - -func (tw tsWriter) Close() (err error) { - err = tw.Writer.Close() - tw.tf.close("writer", err) - return -} - -type tsFile struct { - ts *testStorage - storage.File -} - -func (tf tsFile) x() uint64 { - return tf.Num()<>typeShift, storage.FileType(x)&storage.TypeAll - ts.t.Errorf("E: * num=%d type=%v writer=%v", num, tt, writer) - } - } - ts.mu.Unlock() -} - -func newTestStorage(t *testing.T) *testStorage { - var stor storage.Storage - var closeFn func() error - if tsFS { - for { - tsMU.Lock() - num := tsNum - tsNum++ - tsMU.Unlock() - tempdir := tsTempdir - if tempdir == "" { - tempdir = os.TempDir() - } - path := filepath.Join(tempdir, fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) - if _, err := os.Stat(path); err != nil { - stor, err = storage.OpenFile(path) - if err != nil { - t.Fatalf("F: cannot create storage: %v", err) - } - t.Logf("I: storage created: %s", path) - closeFn = func() error { - for _, name := range []string{"LOG.old", "LOG"} { - f, err := os.Open(filepath.Join(path, name)) - if err != nil { - continue - } - if log, err := ioutil.ReadAll(f); err != nil { - t.Logf("---------------------- %s ----------------------", name) - t.Logf("cannot read log: %v", err) - t.Logf("---------------------- %s ----------------------", name) - } else if len(log) > 0 { - t.Logf("---------------------- %s ----------------------\n%s", name, string(log)) - t.Logf("---------------------- %s ----------------------", name) - } - f.Close() - } - if t.Failed() { - t.Logf("testing failed, test DB preserved at %s", path) - return nil - } - if tsKeepFS { - return nil - } - return os.RemoveAll(path) - } - - break - } - } - } else { - stor = storage.NewMemStorage() - } - ts := &testStorage{ - t: t, - Storage: stor, - closeFn: closeFn, - opens: make(map[uint64]bool), - emuErrOnceMap: make(map[uint64]uint), - emuRandErrProb: 0x999, - emuRandRand: rand.New(rand.NewSource(0xfacedead)), - } - ts.cond.L = &ts.mu - return ts -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go deleted file mode 100644 index 18dd9a665c0..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sort" - "sync/atomic" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -// tFile holds basic information about a table. -type tFile struct { - file storage.File - seekLeft int32 - size uint64 - imin, imax iKey -} - -// Returns true if given key is after largest key of this table. -func (t *tFile) after(icmp *iComparer, ukey []byte) bool { - return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0 -} - -// Returns true if given key is before smallest key of this table. -func (t *tFile) before(icmp *iComparer, ukey []byte) bool { - return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0 -} - -// Returns true if given key range overlaps with this table key range. -func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool { - return !t.after(icmp, umin) && !t.before(icmp, umax) -} - -// Cosumes one seek and return current seeks left. -func (t *tFile) consumeSeek() int32 { - return atomic.AddInt32(&t.seekLeft, -1) -} - -// Creates new tFile. -func newTableFile(file storage.File, size uint64, imin, imax iKey) *tFile { - f := &tFile{ - file: file, - size: size, - imin: imin, - imax: imax, - } - - // We arrange to automatically compact this file after - // a certain number of seeks. Let's assume: - // (1) One seek costs 10ms - // (2) Writing or reading 1MB costs 10ms (100MB/s) - // (3) A compaction of 1MB does 25MB of IO: - // 1MB read from this level - // 10-12MB read from next level (boundaries may be misaligned) - // 10-12MB written to next level - // This implies that 25 seeks cost the same as the compaction - // of 1MB of data. I.e., one seek costs approximately the - // same as the compaction of 40KB of data. We are a little - // conservative and allow approximately one seek for every 16KB - // of data before triggering a compaction. - f.seekLeft = int32(size / 16384) - if f.seekLeft < 100 { - f.seekLeft = 100 - } - - return f -} - -// tFiles hold multiple tFile. -type tFiles []*tFile - -func (tf tFiles) Len() int { return len(tf) } -func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } - -func (tf tFiles) nums() string { - x := "[ " - for i, f := range tf { - if i != 0 { - x += ", " - } - x += fmt.Sprint(f.file.Num()) - } - x += " ]" - return x -} - -// Returns true if i smallest key is less than j. -// This used for sort by key in ascending order. -func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { - a, b := tf[i], tf[j] - n := icmp.Compare(a.imin, b.imin) - if n == 0 { - return a.file.Num() < b.file.Num() - } - return n < 0 -} - -// Returns true if i file number is greater than j. -// This used for sort by file number in descending order. -func (tf tFiles) lessByNum(i, j int) bool { - return tf[i].file.Num() > tf[j].file.Num() -} - -// Sorts tables by key in ascending order. -func (tf tFiles) sortByKey(icmp *iComparer) { - sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp}) -} - -// Sorts tables by file number in descending order. -func (tf tFiles) sortByNum() { - sort.Sort(&tFilesSortByNum{tFiles: tf}) -} - -// Returns sum of all tables size. -func (tf tFiles) size() (sum uint64) { - for _, t := range tf { - sum += t.size - } - return sum -} - -// Searches smallest index of tables whose its smallest -// key is after or equal with given key. -func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int { - return sort.Search(len(tf), func(i int) bool { - return icmp.Compare(tf[i].imin, ikey) >= 0 - }) -} - -// Searches smallest index of tables whose its largest -// key is after or equal with given key. -func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int { - return sort.Search(len(tf), func(i int) bool { - return icmp.Compare(tf[i].imax, ikey) >= 0 - }) -} - -// Returns true if given key range overlaps with one or more -// tables key range. If unsorted is true then binary search will not be used. -func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool { - if unsorted { - // Check against all files. - for _, t := range tf { - if t.overlaps(icmp, umin, umax) { - return true - } - } - return false - } - - i := 0 - if len(umin) > 0 { - // Find the earliest possible internal key for min. - i = tf.searchMax(icmp, newIkey(umin, kMaxSeq, ktSeek)) - } - if i >= len(tf) { - // Beginning of range is after all files, so no overlap. - return false - } - return !tf[i].before(icmp, umax) -} - -// Returns tables whose its key range overlaps with given key range. -// Range will be expanded if ukey found hop across tables. -// If overlapped is true then the search will be restarted if umax -// expanded. -// The dst content will be overwritten. -func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles { - dst = dst[:0] - for i := 0; i < len(tf); { - t := tf[i] - if t.overlaps(icmp, umin, umax) { - if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 { - umin = t.imin.ukey() - dst = dst[:0] - i = 0 - continue - } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 { - umax = t.imax.ukey() - // Restart search if it is overlapped. - if overlapped { - dst = dst[:0] - i = 0 - continue - } - } - - dst = append(dst, t) - } - i++ - } - - return dst -} - -// Returns tables key range. -func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) { - for i, t := range tf { - if i == 0 { - imin, imax = t.imin, t.imax - continue - } - if icmp.Compare(t.imin, imin) < 0 { - imin = t.imin - } - if icmp.Compare(t.imax, imax) > 0 { - imax = t.imax - } - } - - return -} - -// Creates iterator index from tables. -func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer { - if slice != nil { - var start, limit int - if slice.Start != nil { - start = tf.searchMax(icmp, iKey(slice.Start)) - } - if slice.Limit != nil { - limit = tf.searchMin(icmp, iKey(slice.Limit)) - } else { - limit = tf.Len() - } - tf = tf[start:limit] - } - return iterator.NewArrayIndexer(&tFilesArrayIndexer{ - tFiles: tf, - tops: tops, - icmp: icmp, - slice: slice, - ro: ro, - }) -} - -// Tables iterator index. -type tFilesArrayIndexer struct { - tFiles - tops *tOps - icmp *iComparer - slice *util.Range - ro *opt.ReadOptions -} - -func (a *tFilesArrayIndexer) Search(key []byte) int { - return a.searchMax(a.icmp, iKey(key)) -} - -func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { - if i == 0 || i == a.Len()-1 { - return a.tops.newIterator(a.tFiles[i], a.slice, a.ro) - } - return a.tops.newIterator(a.tFiles[i], nil, a.ro) -} - -// Helper type for sortByKey. -type tFilesSortByKey struct { - tFiles - icmp *iComparer -} - -func (x *tFilesSortByKey) Less(i, j int) bool { - return x.lessByKey(x.icmp, i, j) -} - -// Helper type for sortByNum. -type tFilesSortByNum struct { - tFiles -} - -func (x *tFilesSortByNum) Less(i, j int) bool { - return x.lessByNum(i, j) -} - -// Table operations. -type tOps struct { - s *session - cache *cache.Cache - bcache *cache.Cache - bpool *util.BufferPool -} - -// Creates an empty table and returns table writer. -func (t *tOps) create() (*tWriter, error) { - file := t.s.getTableFile(t.s.allocFileNum()) - fw, err := file.Create() - if err != nil { - return nil, err - } - return &tWriter{ - t: t, - file: file, - w: fw, - tw: table.NewWriter(fw, t.s.o.Options), - }, nil -} - -// Builds table from src iterator. -func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { - w, err := t.create() - if err != nil { - return - } - - defer func() { - if err != nil { - w.drop() - } - }() - - for src.Next() { - err = w.append(src.Key(), src.Value()) - if err != nil { - return - } - } - err = src.Error() - if err != nil { - return - } - - n = w.tw.EntriesLen() - f, err = w.finish() - return -} - -// Opens table. It returns a cache handle, which should -// be released after use. -func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) { - num := f.file.Num() - ch = t.cache.Get(0, num, func() (size int, value cache.Value) { - var r storage.Reader - r, err = f.file.Open() - if err != nil { - return 0, nil - } - - var bcache *cache.CacheGetter - if t.bcache != nil { - bcache = &cache.CacheGetter{Cache: t.bcache, NS: num} - } - - var tr *table.Reader - tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcache, t.bpool, t.s.o.Options) - if err != nil { - r.Close() - return 0, nil - } - return 1, tr - - }) - if ch == nil && err == nil { - err = ErrClosed - } - return -} - -// Finds key/value pair whose key is greater than or equal to the -// given key. -func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) { - ch, err := t.open(f) - if err != nil { - return nil, nil, err - } - defer ch.Release() - return ch.Value().(*table.Reader).Find(key, true, ro) -} - -// Finds key that is greater than or equal to the given key. -func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) { - ch, err := t.open(f) - if err != nil { - return nil, err - } - defer ch.Release() - return ch.Value().(*table.Reader).FindKey(key, true, ro) -} - -// Returns approximate offset of the given key. -func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) { - ch, err := t.open(f) - if err != nil { - return - } - defer ch.Release() - offset_, err := ch.Value().(*table.Reader).OffsetOf(key) - return uint64(offset_), err -} - -// Creates an iterator from the given table. -func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - ch, err := t.open(f) - if err != nil { - return iterator.NewEmptyIterator(err) - } - iter := ch.Value().(*table.Reader).NewIterator(slice, ro) - iter.SetReleaser(ch) - return iter -} - -// Removes table from persistent storage. It waits until -// no one use the the table. -func (t *tOps) remove(f *tFile) { - num := f.file.Num() - t.cache.Delete(0, num, func() { - if err := f.file.Remove(); err != nil { - t.s.logf("table@remove removing @%d %q", num, err) - } else { - t.s.logf("table@remove removed @%d", num) - } - if t.bcache != nil { - t.bcache.EvictNS(num) - } - }) -} - -// Closes the table ops instance. It will close all tables, -// regadless still used or not. -func (t *tOps) close() { - t.bpool.Close() - t.cache.Close() - if t.bcache != nil { - t.bcache.Close() - } -} - -// Creates new initialized table ops instance. -func newTableOps(s *session) *tOps { - var ( - cacher cache.Cacher - bcache *cache.Cache - ) - if s.o.GetOpenFilesCacheCapacity() > 0 { - cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity()) - } - if !s.o.DisableBlockCache { - var bcacher cache.Cacher - if s.o.GetBlockCacheCapacity() > 0 { - bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity()) - } - bcache = cache.NewCache(bcacher) - } - return &tOps{ - s: s, - cache: cache.NewCache(cacher), - bcache: bcache, - bpool: util.NewBufferPool(s.o.GetBlockSize() + 5), - } -} - -// tWriter wraps the table writer. It keep track of file descriptor -// and added key range. -type tWriter struct { - t *tOps - - file storage.File - w storage.Writer - tw *table.Writer - - first, last []byte -} - -// Append key/value pair to the table. -func (w *tWriter) append(key, value []byte) error { - if w.first == nil { - w.first = append([]byte{}, key...) - } - w.last = append(w.last[:0], key...) - return w.tw.Append(key, value) -} - -// Returns true if the table is empty. -func (w *tWriter) empty() bool { - return w.first == nil -} - -// Closes the storage.Writer. -func (w *tWriter) close() { - if w.w != nil { - w.w.Close() - w.w = nil - } -} - -// Finalizes the table and returns table file. -func (w *tWriter) finish() (f *tFile, err error) { - defer w.close() - err = w.tw.Close() - if err != nil { - return - } - err = w.w.Sync() - if err != nil { - return - } - f = newTableFile(w.file, uint64(w.tw.BytesLen()), iKey(w.first), iKey(w.last)) - return -} - -// Drops the table. -func (w *tWriter) drop() { - w.close() - w.file.Remove() - w.t.s.reuseFileNum(w.file.Num()) - w.file = nil - w.tw = nil - w.first = nil - w.last = nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go deleted file mode 100644 index 8622228ce46..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "fmt" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type blockTesting struct { - tr *Reader - b *block -} - -func (t *blockTesting) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.tr.newBlockIter(t.b, nil, slice, false) -} - -var _ = testutil.Defer(func() { - Describe("Block", func() { - Build := func(kv *testutil.KeyValue, restartInterval int) *blockTesting { - // Building the block. - bw := &blockWriter{ - restartInterval: restartInterval, - scratch: make([]byte, 30), - } - kv.Iterate(func(i int, key, value []byte) { - bw.append(key, value) - }) - bw.finish() - - // Opening the block. - data := bw.buf.Bytes() - restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) - return &blockTesting{ - tr: &Reader{cmp: comparer.DefaultComparer}, - b: &block{ - data: data, - restartsLen: restartsLen, - restartsOffset: len(data) - (restartsLen+1)*4, - }, - } - } - - Describe("read test", func() { - for restartInterval := 1; restartInterval <= 5; restartInterval++ { - Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { - kv := &testutil.KeyValue{} - Text := func() string { - return fmt.Sprintf("and %d keys", kv.Len()) - } - - Test := func() { - // Make block. - br := Build(kv, restartInterval) - // Do testing. - testutil.KeyValueTesting(nil, kv.Clone(), br, nil, nil) - } - - Describe(Text(), Test) - - kv.PutString("", "empty") - Describe(Text(), Test) - - kv.PutString("a1", "foo") - Describe(Text(), Test) - - kv.PutString("a2", "v") - Describe(Text(), Test) - - kv.PutString("a3qqwrkks", "hello") - Describe(Text(), Test) - - kv.PutString("a4", "bar") - Describe(Text(), Test) - - kv.PutString("a5111111", "v5") - kv.PutString("a6", "") - kv.PutString("a7", "v7") - kv.PutString("a8", "vvvvvvvvvvvvvvvvvvvvvv8") - kv.PutString("b", "v9") - kv.PutString("c9", "v9") - kv.PutString("c91", "v9") - kv.PutString("d0", "v9") - Describe(Text(), Test) - }) - } - }) - - Describe("out-of-bound slice test", func() { - kv := &testutil.KeyValue{} - kv.PutString("k1", "v1") - kv.PutString("k2", "v2") - kv.PutString("k3abcdefgg", "v3") - kv.PutString("k4", "v4") - kv.PutString("k5", "v5") - for restartInterval := 1; restartInterval <= 5; restartInterval++ { - Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { - // Make block. - bt := Build(kv, restartInterval) - - Test := func(r *util.Range) func(done Done) { - return func(done Done) { - iter := bt.TestNewIterator(r) - Expect(iter.Error()).ShouldNot(HaveOccurred()) - - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: iter, - } - - testutil.DoIteratorTesting(&t) - iter.Release() - done <- true - } - } - - It("Should do iterations and seeks correctly #0", - Test(&util.Range{Start: []byte("k0"), Limit: []byte("k6")}), 2.0) - - It("Should do iterations and seeks correctly #1", - Test(&util.Range{Start: []byte(""), Limit: []byte("zzzzzzz")}), 2.0) - }) - } - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go deleted file mode 100644 index 152923d26fe..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go +++ /dev/null @@ -1,1106 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "fmt" - "io" - "sort" - "strings" - "sync" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy" -) - -var ( - ErrNotFound = errors.ErrNotFound - ErrReaderReleased = errors.New("leveldb/table: reader released") - ErrIterReleased = errors.New("leveldb/table: iterator released") -) - -type ErrCorrupted struct { - Pos int64 - Size int64 - Kind string - Reason string -} - -func (e *ErrCorrupted) Error() string { - return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason) -} - -func max(x, y int) int { - if x > y { - return x - } - return y -} - -type block struct { - bpool *util.BufferPool - bh blockHandle - data []byte - restartsLen int - restartsOffset int -} - -func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) { - index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { - offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) - offset += 1 // shared always zero, since this is a restart point - v1, n1 := binary.Uvarint(b.data[offset:]) // key length - _, n2 := binary.Uvarint(b.data[offset+n1:]) // value length - m := offset + n1 + n2 - return cmp.Compare(b.data[m:m+int(v1)], key) > 0 - }) + rstart - 1 - if index < rstart { - // The smallest key is greater-than key sought. - index = rstart - } - offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) - return -} - -func (b *block) restartIndex(rstart, rlimit, offset int) int { - return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { - return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset - }) + rstart - 1 -} - -func (b *block) restartOffset(index int) int { - return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) -} - -func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) { - if offset >= b.restartsOffset { - if offset != b.restartsOffset { - err = &ErrCorrupted{Reason: "entries offset not aligned"} - } - return - } - v0, n0 := binary.Uvarint(b.data[offset:]) // Shared prefix length - v1, n1 := binary.Uvarint(b.data[offset+n0:]) // Key length - v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length - m := n0 + n1 + n2 - n = m + int(v1) + int(v2) - if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset { - err = &ErrCorrupted{Reason: "entries corrupted"} - return - } - key = b.data[offset+m : offset+m+int(v1)] - value = b.data[offset+m+int(v1) : offset+n] - nShared = int(v0) - return -} - -func (b *block) Release() { - b.bpool.Put(b.data) - b.bpool = nil - b.data = nil -} - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -type blockIter struct { - tr *Reader - block *block - blockReleaser util.Releaser - releaser util.Releaser - key, value []byte - offset int - // Previous offset, only filled by Next. - prevOffset int - prevNode []int - prevKeys []byte - restartIndex int - // Iterator direction. - dir dir - // Restart index slice range. - riStart int - riLimit int - // Offset slice range. - offsetStart int - offsetRealStart int - offsetLimit int - // Error. - err error -} - -func (i *blockIter) sErr(err error) { - i.err = err - i.key = nil - i.value = nil - i.prevNode = nil - i.prevKeys = nil -} - -func (i *blockIter) reset() { - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.restartIndex = i.riStart - i.offset = i.offsetStart - i.dir = dirSOI - i.key = i.key[:0] - i.value = nil -} - -func (i *blockIter) isFirst() bool { - switch i.dir { - case dirForward: - return i.prevOffset == i.offsetRealStart - case dirBackward: - return len(i.prevNode) == 1 && i.restartIndex == i.riStart - } - return false -} - -func (i *blockIter) isLast() bool { - switch i.dir { - case dirForward, dirBackward: - return i.offset == i.offsetLimit - } - return false -} - -func (i *blockIter) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.dir = dirSOI - return i.Next() -} - -func (i *blockIter) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.dir = dirEOI - return i.Prev() -} - -func (i *blockIter) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key) - if err != nil { - i.sErr(err) - return false - } - i.restartIndex = ri - i.offset = max(i.offsetStart, offset) - if i.dir == dirSOI || i.dir == dirEOI { - i.dir = dirForward - } - for i.Next() { - if i.tr.cmp.Compare(i.key, key) >= 0 { - return true - } - } - return false -} - -func (i *blockIter) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirSOI { - i.restartIndex = i.riStart - i.offset = i.offsetStart - } else if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - for i.offset < i.offsetRealStart { - key, value, nShared, n, err := i.block.entry(i.offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if n == 0 { - i.dir = dirEOI - return false - } - i.key = append(i.key[:nShared], key...) - i.value = value - i.offset += n - } - if i.offset >= i.offsetLimit { - i.dir = dirEOI - if i.offset != i.offsetLimit { - i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) - } - return false - } - key, value, nShared, n, err := i.block.entry(i.offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if n == 0 { - i.dir = dirEOI - return false - } - i.key = append(i.key[:nShared], key...) - i.value = value - i.prevOffset = i.offset - i.offset += n - i.dir = dirForward - return true -} - -func (i *blockIter) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - var ri int - if i.dir == dirForward { - // Change direction. - i.offset = i.prevOffset - if i.offset == i.offsetRealStart { - i.dir = dirSOI - return false - } - ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset) - i.dir = dirBackward - } else if i.dir == dirEOI { - // At the end of iterator. - i.restartIndex = i.riLimit - i.offset = i.offsetLimit - if i.offset == i.offsetRealStart { - i.dir = dirSOI - return false - } - ri = i.riLimit - 1 - i.dir = dirBackward - } else if len(i.prevNode) == 1 { - // This is the end of a restart range. - i.offset = i.prevNode[0] - i.prevNode = i.prevNode[:0] - if i.restartIndex == i.riStart { - i.dir = dirSOI - return false - } - i.restartIndex-- - ri = i.restartIndex - } else { - // In the middle of restart range, get from cache. - n := len(i.prevNode) - 3 - node := i.prevNode[n:] - i.prevNode = i.prevNode[:n] - // Get the key. - ko := node[0] - i.key = append(i.key[:0], i.prevKeys[ko:]...) - i.prevKeys = i.prevKeys[:ko] - // Get the value. - vo := node[1] - vl := vo + node[2] - i.value = i.block.data[vo:vl] - i.offset = vl - return true - } - // Build entries cache. - i.key = i.key[:0] - i.value = nil - offset := i.block.restartOffset(ri) - if offset == i.offset { - ri -= 1 - if ri < 0 { - i.dir = dirSOI - return false - } - offset = i.block.restartOffset(ri) - } - i.prevNode = append(i.prevNode, offset) - for { - key, value, nShared, n, err := i.block.entry(offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if offset >= i.offsetRealStart { - if i.value != nil { - // Appends 3 variables: - // 1. Previous keys offset - // 2. Value offset in the data block - // 3. Value length - i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value)) - i.prevKeys = append(i.prevKeys, i.key...) - } - i.value = value - } - i.key = append(i.key[:nShared], key...) - offset += n - // Stop if target offset reached. - if offset >= i.offset { - if offset != i.offset { - i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) - return false - } - - break - } - } - i.restartIndex = ri - i.offset = offset - return true -} - -func (i *blockIter) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.key -} - -func (i *blockIter) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.value -} - -func (i *blockIter) Release() { - if i.dir != dirReleased { - i.tr = nil - i.block = nil - i.prevNode = nil - i.prevKeys = nil - i.key = nil - i.value = nil - i.dir = dirReleased - if i.blockReleaser != nil { - i.blockReleaser.Release() - i.blockReleaser = nil - } - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - } -} - -func (i *blockIter) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *blockIter) Valid() bool { - return i.err == nil && (i.dir == dirBackward || i.dir == dirForward) -} - -func (i *blockIter) Error() error { - return i.err -} - -type filterBlock struct { - bpool *util.BufferPool - data []byte - oOffset int - baseLg uint - filtersNum int -} - -func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool { - i := int(offset >> b.baseLg) - if i < b.filtersNum { - o := b.data[b.oOffset+i*4:] - n := int(binary.LittleEndian.Uint32(o)) - m := int(binary.LittleEndian.Uint32(o[4:])) - if n < m && m <= b.oOffset { - return filter.Contains(b.data[n:m], key) - } else if n == m { - return false - } - } - return true -} - -func (b *filterBlock) Release() { - b.bpool.Put(b.data) - b.bpool = nil - b.data = nil -} - -type indexIter struct { - *blockIter - tr *Reader - slice *util.Range - // Options - fillCache bool -} - -func (i *indexIter) Get() iterator.Iterator { - value := i.Value() - if value == nil { - return nil - } - dataBH, n := decodeBlockHandle(value) - if n == 0 { - return iterator.NewEmptyIterator(i.tr.newErrCorruptedBH(i.tr.indexBH, "bad data block handle")) - } - - var slice *util.Range - if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) { - slice = i.slice - } - return i.tr.getDataIterErr(dataBH, slice, i.tr.verifyChecksum, i.fillCache) -} - -// Reader is a table reader. -type Reader struct { - mu sync.RWMutex - fi *storage.FileInfo - reader io.ReaderAt - cache *cache.CacheGetter - err error - bpool *util.BufferPool - // Options - o *opt.Options - cmp comparer.Comparer - filter filter.Filter - verifyChecksum bool - - dataEnd int64 - metaBH, indexBH, filterBH blockHandle - indexBlock *block - filterBlock *filterBlock -} - -func (r *Reader) blockKind(bh blockHandle) string { - switch bh.offset { - case r.metaBH.offset: - return "meta-block" - case r.indexBH.offset: - return "index-block" - case r.filterBH.offset: - if r.filterBH.length > 0 { - return "filter-block" - } - } - return "data-block" -} - -func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error { - return &errors.ErrCorrupted{File: r.fi, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}} -} - -func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error { - return r.newErrCorrupted(int64(bh.offset), int64(bh.length), r.blockKind(bh), reason) -} - -func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error { - if cerr, ok := err.(*ErrCorrupted); ok { - cerr.Pos = int64(bh.offset) - cerr.Size = int64(bh.length) - cerr.Kind = r.blockKind(bh) - return &errors.ErrCorrupted{File: r.fi, Err: cerr} - } - return err -} - -func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) { - data := r.bpool.Get(int(bh.length + blockTrailerLen)) - if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF { - return nil, err - } - - if verifyChecksum { - n := bh.length + 1 - checksum0 := binary.LittleEndian.Uint32(data[n:]) - checksum1 := util.NewCRC(data[:n]).Value() - if checksum0 != checksum1 { - r.bpool.Put(data) - return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1)) - } - } - - switch data[bh.length] { - case blockTypeNoCompression: - data = data[:bh.length] - case blockTypeSnappyCompression: - decLen, err := snappy.DecodedLen(data[:bh.length]) - if err != nil { - return nil, r.newErrCorruptedBH(bh, err.Error()) - } - decData := r.bpool.Get(decLen) - decData, err = snappy.Decode(decData, data[:bh.length]) - r.bpool.Put(data) - if err != nil { - r.bpool.Put(decData) - return nil, r.newErrCorruptedBH(bh, err.Error()) - } - data = decData - default: - r.bpool.Put(data) - return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length])) - } - return data, nil -} - -func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error) { - data, err := r.readRawBlock(bh, verifyChecksum) - if err != nil { - return nil, err - } - restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) - b := &block{ - bpool: r.bpool, - bh: bh, - data: data, - restartsLen: restartsLen, - restartsOffset: len(data) - (restartsLen+1)*4, - } - return b, nil -} - -func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) { - if r.cache != nil { - var ( - err error - ch *cache.Handle - ) - if fillCache { - ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { - var b *block - b, err = r.readBlock(bh, verifyChecksum) - if err != nil { - return 0, nil - } - return cap(b.data), b - }) - } else { - ch = r.cache.Get(bh.offset, nil) - } - if ch != nil { - b, ok := ch.Value().(*block) - if !ok { - ch.Release() - return nil, nil, errors.New("leveldb/table: inconsistent block type") - } - return b, ch, err - } else if err != nil { - return nil, nil, err - } - } - - b, err := r.readBlock(bh, verifyChecksum) - return b, b, err -} - -func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) { - data, err := r.readRawBlock(bh, true) - if err != nil { - return nil, err - } - n := len(data) - if n < 5 { - return nil, r.newErrCorruptedBH(bh, "too short") - } - m := n - 5 - oOffset := int(binary.LittleEndian.Uint32(data[m:])) - if oOffset > m { - return nil, r.newErrCorruptedBH(bh, "invalid data-offsets offset") - } - b := &filterBlock{ - bpool: r.bpool, - data: data, - oOffset: oOffset, - baseLg: uint(data[n-1]), - filtersNum: (m - oOffset) / 4, - } - return b, nil -} - -func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) { - if r.cache != nil { - var ( - err error - ch *cache.Handle - ) - if fillCache { - ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { - var b *filterBlock - b, err = r.readFilterBlock(bh) - if err != nil { - return 0, nil - } - return cap(b.data), b - }) - } else { - ch = r.cache.Get(bh.offset, nil) - } - if ch != nil { - b, ok := ch.Value().(*filterBlock) - if !ok { - ch.Release() - return nil, nil, errors.New("leveldb/table: inconsistent block type") - } - return b, ch, err - } else if err != nil { - return nil, nil, err - } - } - - b, err := r.readFilterBlock(bh) - return b, b, err -} - -func (r *Reader) getIndexBlock(fillCache bool) (b *block, rel util.Releaser, err error) { - if r.indexBlock == nil { - return r.readBlockCached(r.indexBH, true, fillCache) - } - return r.indexBlock, util.NoopReleaser{}, nil -} - -func (r *Reader) getFilterBlock(fillCache bool) (*filterBlock, util.Releaser, error) { - if r.filterBlock == nil { - return r.readFilterBlockCached(r.filterBH, fillCache) - } - return r.filterBlock, util.NoopReleaser{}, nil -} - -func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter { - bi := &blockIter{ - tr: r, - block: b, - blockReleaser: bReleaser, - // Valid key should never be nil. - key: make([]byte, 0), - dir: dirSOI, - riStart: 0, - riLimit: b.restartsLen, - offsetStart: 0, - offsetRealStart: 0, - offsetLimit: b.restartsOffset, - } - if slice != nil { - if slice.Start != nil { - if bi.Seek(slice.Start) { - bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset) - bi.offsetStart = b.restartOffset(bi.riStart) - bi.offsetRealStart = bi.prevOffset - } else { - bi.riStart = b.restartsLen - bi.offsetStart = b.restartsOffset - bi.offsetRealStart = b.restartsOffset - } - } - if slice.Limit != nil { - if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) { - bi.offsetLimit = bi.prevOffset - bi.riLimit = bi.restartIndex + 1 - } - } - bi.reset() - if bi.offsetStart > bi.offsetLimit { - bi.sErr(errors.New("leveldb/table: invalid slice range")) - } - } - return bi -} - -func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { - b, rel, err := r.readBlockCached(dataBH, verifyChecksum, fillCache) - if err != nil { - return iterator.NewEmptyIterator(err) - } - return r.newBlockIter(b, rel, slice, false) -} - -func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - return iterator.NewEmptyIterator(r.err) - } - - return r.getDataIter(dataBH, slice, verifyChecksum, fillCache) -} - -// NewIterator creates an iterator from the table. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// table. And a nil Range.Limit is treated as a key after all keys in -// the table. -// -// The returned iterator is not goroutine-safe and should be released -// when not used. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - return iterator.NewEmptyIterator(r.err) - } - - fillCache := !ro.GetDontFillCache() - indexBlock, rel, err := r.getIndexBlock(fillCache) - if err != nil { - return iterator.NewEmptyIterator(err) - } - index := &indexIter{ - blockIter: r.newBlockIter(indexBlock, rel, slice, true), - tr: r, - slice: slice, - fillCache: !ro.GetDontFillCache(), - } - return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader)) -} - -func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - indexBlock, rel, err := r.getIndexBlock(true) - if err != nil { - return - } - defer rel.Release() - - index := r.newBlockIter(indexBlock, nil, nil, true) - defer index.Release() - if !index.Seek(key) { - err = index.Error() - if err == nil { - err = ErrNotFound - } - return - } - dataBH, n := decodeBlockHandle(index.Value()) - if n == 0 { - r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") - return - } - if filtered && r.filter != nil { - filterBlock, frel, ferr := r.getFilterBlock(true) - if ferr == nil { - if !filterBlock.contains(r.filter, dataBH.offset, key) { - frel.Release() - return nil, nil, ErrNotFound - } - frel.Release() - } else if !errors.IsCorrupted(ferr) { - err = ferr - return - } - } - data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache()) - defer data.Release() - if !data.Seek(key) { - err = data.Error() - if err == nil { - err = ErrNotFound - } - return - } - // Don't use block buffer, no need to copy the buffer. - rkey = data.Key() - if !noValue { - if r.bpool == nil { - value = data.Value() - } else { - // Use block buffer, and since the buffer will be recycled, the buffer - // need to be copied. - value = append([]byte{}, data.Value()...) - } - } - return -} - -// Find finds key/value pair whose key is greater than or equal to the -// given key. It returns ErrNotFound if the table doesn't contain -// such pair. -// If filtered is true then the nearest 'block' will be checked against -// 'filter data' (if present) and will immediately return ErrNotFound if -// 'filter data' indicates that such pair doesn't exist. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) { - return r.find(key, filtered, ro, false) -} - -// Find finds key that is greater than or equal to the given key. -// It returns ErrNotFound if the table doesn't contain such key. -// If filtered is true then the nearest 'block' will be checked against -// 'filter data' (if present) and will immediately return ErrNotFound if -// 'filter data' indicates that such key doesn't exist. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) { - rkey, _, err = r.find(key, filtered, ro, true) - return -} - -// Get gets the value for the given key. It returns errors.ErrNotFound -// if the table does not contain the key. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - rkey, value, err := r.find(key, false, ro, false) - if err == nil && r.cmp.Compare(rkey, key) != 0 { - value = nil - err = ErrNotFound - } - return -} - -// OffsetOf returns approximate offset for the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (r *Reader) OffsetOf(key []byte) (offset int64, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true) - if err != nil { - return - } - defer rel.Release() - - index := r.newBlockIter(indexBlock, nil, nil, true) - defer index.Release() - if index.Seek(key) { - dataBH, n := decodeBlockHandle(index.Value()) - if n == 0 { - r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") - return - } - offset = int64(dataBH.offset) - return - } - err = index.Error() - if err == nil { - offset = r.dataEnd - } - return -} - -// Release implements util.Releaser. -// It also close the file if it is an io.Closer. -func (r *Reader) Release() { - r.mu.Lock() - defer r.mu.Unlock() - - if closer, ok := r.reader.(io.Closer); ok { - closer.Close() - } - if r.indexBlock != nil { - r.indexBlock.Release() - r.indexBlock = nil - } - if r.filterBlock != nil { - r.filterBlock.Release() - r.filterBlock = nil - } - r.reader = nil - r.cache = nil - r.bpool = nil - r.err = ErrReaderReleased -} - -// NewReader creates a new initialized table reader for the file. -// The fi, cache and bpool is optional and can be nil. -// -// The returned table reader instance is goroutine-safe. -func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) { - if f == nil { - return nil, errors.New("leveldb/table: nil file") - } - - r := &Reader{ - fi: fi, - reader: f, - cache: cache, - bpool: bpool, - o: o, - cmp: o.GetComparer(), - verifyChecksum: o.GetStrict(opt.StrictBlockChecksum), - } - - if size < footerLen { - r.err = r.newErrCorrupted(0, size, "table", "too small") - return r, nil - } - - footerPos := size - footerLen - var footer [footerLen]byte - if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF { - return nil, err - } - if string(footer[footerLen-len(magic):footerLen]) != magic { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number") - return r, nil - } - - var n int - // Decode the metaindex block handle. - r.metaBH, n = decodeBlockHandle(footer[:]) - if n == 0 { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle") - return r, nil - } - - // Decode the index block handle. - r.indexBH, n = decodeBlockHandle(footer[n:]) - if n == 0 { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle") - return r, nil - } - - // Read metaindex block. - metaBlock, err := r.readBlock(r.metaBH, true) - if err != nil { - if errors.IsCorrupted(err) { - r.err = err - return r, nil - } else { - return nil, err - } - } - - // Set data end. - r.dataEnd = int64(r.metaBH.offset) - - // Read metaindex. - metaIter := r.newBlockIter(metaBlock, nil, nil, true) - for metaIter.Next() { - key := string(metaIter.Key()) - if !strings.HasPrefix(key, "filter.") { - continue - } - fn := key[7:] - if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn { - r.filter = f0 - } else { - for _, f0 := range o.GetAltFilters() { - if f0.Name() == fn { - r.filter = f0 - break - } - } - } - if r.filter != nil { - filterBH, n := decodeBlockHandle(metaIter.Value()) - if n == 0 { - continue - } - r.filterBH = filterBH - // Update data end. - r.dataEnd = int64(filterBH.offset) - break - } - } - metaIter.Release() - metaBlock.Release() - - // Cache index and filter block locally, since we don't have global cache. - if cache == nil { - r.indexBlock, err = r.readBlock(r.indexBH, true) - if err != nil { - if errors.IsCorrupted(err) { - r.err = err - return r, nil - } else { - return nil, err - } - } - if r.filter != nil { - r.filterBlock, err = r.readFilterBlock(r.filterBH) - if err != nil { - if !errors.IsCorrupted(err) { - return nil, err - } - - // Don't use filter then. - r.filter = nil - } - } - } - - return r, nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go deleted file mode 100644 index beacdc1f024..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package table allows read and write sorted key/value. -package table - -import ( - "encoding/binary" -) - -/* -Table: - -Table is consist of one or more data blocks, an optional filter block -a metaindex block, an index block and a table footer. Metaindex block -is a special block used to keep parameters of the table, such as filter -block name and its block handle. Index block is a special block used to -keep record of data blocks offset and length, index block use one as -restart interval. The key used by index block are the last key of preceding -block, shorter separator of adjacent blocks or shorter successor of the -last key of the last block. Filter block is an optional block contains -sequence of filter data generated by a filter generator. - -Table data structure: - + optional - / - +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+ - | data block 1 | ... | data block n | filter block | metaindex block | index block | footer | - +--------------+--------------+--------------+--------------+-----------------+-------------+--------+ - - Each block followed by a 5-bytes trailer contains compression type and checksum. - -Table block trailer: - - +---------------------------+-------------------+ - | compression type (1-byte) | checksum (4-byte) | - +---------------------------+-------------------+ - - The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression - type also included in the checksum. - -Table footer: - - +------------------- 40-bytes -------------------+ - / \ - +------------------------+--------------------+------+-----------------+ - | metaindex block handle / index block handle / ---- | magic (8-bytes) | - +------------------------+--------------------+------+-----------------+ - - The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/". - -NOTE: All fixed-length integer are little-endian. -*/ - -/* -Block: - -Block is consist of one or more key/value entries and a block trailer. -Block entry shares key prefix with its preceding key until a restart -point reached. A block should contains at least one restart point. -First restart point are always zero. - -Block data structure: - - + restart point + restart point (depends on restart interval) - / / - +---------------+---------------+---------------+---------------+---------+ - | block entry 1 | block entry 2 | ... | block entry n | trailer | - +---------------+---------------+---------------+---------------+---------+ - -Key/value entry: - - +---- key len ----+ - / \ - +-------+---------+-----------+---------+--------------------+--------------+----------------+ - | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) | - +-----------------+---------------------+--------------------+--------------+----------------+ - - Block entry shares key prefix with its preceding key: - Conditions: - restart_interval=2 - entry one : key=deck,value=v1 - entry two : key=dock,value=v2 - entry three: key=duck,value=v3 - The entries will be encoded as follow: - - + restart point (offset=0) + restart point (offset=16) - / / - +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ - | 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" | - +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ - \ / \ / \ / - +----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+ - - The block trailer will contains two restart points: - - +------------+-----------+--------+ - | 0 | 16 | 2 | - +------------+-----------+---+----+ - \ / \ - +-- restart points --+ + restart points length - -Block trailer: - - +-- 4-bytes --+ - / \ - +-----------------+-----------------+-----------------+------------------------------+ - | restart point 1 | .... | restart point n | restart points len (4-bytes) | - +-----------------+-----------------+-----------------+------------------------------+ - - -NOTE: All fixed-length integer are little-endian. -*/ - -/* -Filter block: - -Filter block consist of one or more filter data and a filter block trailer. -The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg. - -Filter block data structure: - - + offset 1 + offset 2 + offset n + trailer offset - / / / / - +---------------+---------------+---------------+---------+ - | filter data 1 | ... | filter data n | trailer | - +---------------+---------------+---------------+---------+ - -Filter block trailer: - - +- 4-bytes -+ - / \ - +---------------+---------------+---------------+-------------------------------+------------------+ - | data 1 offset | .... | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) | - +-------------- +---------------+---------------+-------------------------------+------------------+ - - -NOTE: All fixed-length integer are little-endian. -*/ - -const ( - blockTrailerLen = 5 - footerLen = 48 - - magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb" - - // The block type gives the per-block compression format. - // These constants are part of the file format and should not be changed. - blockTypeNoCompression = 0 - blockTypeSnappyCompression = 1 - - // Generate new filter every 2KB of data - filterBaseLg = 11 - filterBase = 1 << filterBaseLg -) - -type blockHandle struct { - offset, length uint64 -} - -func decodeBlockHandle(src []byte) (blockHandle, int) { - offset, n := binary.Uvarint(src) - length, m := binary.Uvarint(src[n:]) - if n == 0 || m == 0 { - return blockHandle{}, 0 - } - return blockHandle{offset, length}, n + m -} - -func encodeBlockHandle(dst []byte, b blockHandle) int { - n := binary.PutUvarint(dst, b.offset) - m := binary.PutUvarint(dst[n:], b.length) - return n + m -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go deleted file mode 100644 index bd8a086e24d..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package table - -import ( - "testing" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestTable(t *testing.T) { - testutil.RunSuite(t, "Table Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go deleted file mode 100644 index 105f62b0f9f..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "bytes" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type tableWrapper struct { - *Reader -} - -func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) { - return t.Reader.Find(key, false, nil) -} - -func (t tableWrapper) TestGet(key []byte) (value []byte, err error) { - return t.Reader.Get(key, nil) -} - -func (t tableWrapper) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.Reader.NewIterator(slice, nil) -} - -var _ = testutil.Defer(func() { - Describe("Table", func() { - Describe("approximate offset test", func() { - var ( - buf = &bytes.Buffer{} - o = &opt.Options{ - BlockSize: 1024, - Compression: opt.NoCompression, - } - ) - - // Building the table. - tw := NewWriter(buf, o) - tw.Append([]byte("k01"), []byte("hello")) - tw.Append([]byte("k02"), []byte("hello2")) - tw.Append([]byte("k03"), bytes.Repeat([]byte{'x'}, 10000)) - tw.Append([]byte("k04"), bytes.Repeat([]byte{'x'}, 200000)) - tw.Append([]byte("k05"), bytes.Repeat([]byte{'x'}, 300000)) - tw.Append([]byte("k06"), []byte("hello3")) - tw.Append([]byte("k07"), bytes.Repeat([]byte{'x'}, 100000)) - err := tw.Close() - - It("Should be able to approximate offset of a key correctly", func() { - Expect(err).ShouldNot(HaveOccurred()) - - tr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o) - Expect(err).ShouldNot(HaveOccurred()) - CheckOffset := func(key string, expect, threshold int) { - offset, err := tr.OffsetOf([]byte(key)) - Expect(err).ShouldNot(HaveOccurred()) - Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key) - } - - CheckOffset("k0", 0, 0) - CheckOffset("k01a", 0, 0) - CheckOffset("k02", 0, 0) - CheckOffset("k03", 0, 0) - CheckOffset("k04", 10000, 1000) - CheckOffset("k04a", 210000, 1000) - CheckOffset("k05", 210000, 1000) - CheckOffset("k06", 510000, 1000) - CheckOffset("k07", 510000, 1000) - CheckOffset("xyz", 610000, 2000) - }) - }) - - Describe("read test", func() { - Build := func(kv testutil.KeyValue) testutil.DB { - o := &opt.Options{ - BlockSize: 512, - BlockRestartInterval: 3, - } - buf := &bytes.Buffer{} - - // Building the table. - tw := NewWriter(buf, o) - kv.Iterate(func(i int, key, value []byte) { - tw.Append(key, value) - }) - tw.Close() - - // Opening the table. - tr, _ := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o) - return tableWrapper{tr} - } - Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() { - return func() { - db := Build(*kv) - if body != nil { - body(db.(tableWrapper).Reader) - } - testutil.KeyValueTesting(nil, *kv, db, nil, nil) - } - } - - testutil.AllKeyValueTesting(nil, Build, nil, nil) - Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) { - It("should have correct blocks number", func() { - indexBlock, err := r.readBlock(r.indexBH, true) - Expect(err).To(BeNil()) - Expect(indexBlock.restartsLen).Should(Equal(9)) - }) - })) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go deleted file mode 100644 index e15df55e9e7..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go +++ /dev/null @@ -1,378 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy" -) - -func sharedPrefixLen(a, b []byte) int { - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for i < n && a[i] == b[i] { - i++ - } - return i -} - -type blockWriter struct { - restartInterval int - buf util.Buffer - nEntries int - prevKey []byte - restarts []uint32 - scratch []byte -} - -func (w *blockWriter) append(key, value []byte) { - nShared := 0 - if w.nEntries%w.restartInterval == 0 { - w.restarts = append(w.restarts, uint32(w.buf.Len())) - } else { - nShared = sharedPrefixLen(w.prevKey, key) - } - n := binary.PutUvarint(w.scratch[0:], uint64(nShared)) - n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared)) - n += binary.PutUvarint(w.scratch[n:], uint64(len(value))) - w.buf.Write(w.scratch[:n]) - w.buf.Write(key[nShared:]) - w.buf.Write(value) - w.prevKey = append(w.prevKey[:0], key...) - w.nEntries++ -} - -func (w *blockWriter) finish() { - // Write restarts entry. - if w.nEntries == 0 { - // Must have at least one restart entry. - w.restarts = append(w.restarts, 0) - } - w.restarts = append(w.restarts, uint32(len(w.restarts))) - for _, x := range w.restarts { - buf4 := w.buf.Alloc(4) - binary.LittleEndian.PutUint32(buf4, x) - } -} - -func (w *blockWriter) reset() { - w.buf.Reset() - w.nEntries = 0 - w.restarts = w.restarts[:0] -} - -func (w *blockWriter) bytesLen() int { - restartsLen := len(w.restarts) - if restartsLen == 0 { - restartsLen = 1 - } - return w.buf.Len() + 4*restartsLen + 4 -} - -type filterWriter struct { - generator filter.FilterGenerator - buf util.Buffer - nKeys int - offsets []uint32 -} - -func (w *filterWriter) add(key []byte) { - if w.generator == nil { - return - } - w.generator.Add(key) - w.nKeys++ -} - -func (w *filterWriter) flush(offset uint64) { - if w.generator == nil { - return - } - for x := int(offset / filterBase); x > len(w.offsets); { - w.generate() - } -} - -func (w *filterWriter) finish() { - if w.generator == nil { - return - } - // Generate last keys. - - if w.nKeys > 0 { - w.generate() - } - w.offsets = append(w.offsets, uint32(w.buf.Len())) - for _, x := range w.offsets { - buf4 := w.buf.Alloc(4) - binary.LittleEndian.PutUint32(buf4, x) - } - w.buf.WriteByte(filterBaseLg) -} - -func (w *filterWriter) generate() { - // Record offset. - w.offsets = append(w.offsets, uint32(w.buf.Len())) - // Generate filters. - if w.nKeys > 0 { - w.generator.Generate(&w.buf) - w.nKeys = 0 - } -} - -// Writer is a table writer. -type Writer struct { - writer io.Writer - err error - // Options - cmp comparer.Comparer - filter filter.Filter - compression opt.Compression - blockSize int - - dataBlock blockWriter - indexBlock blockWriter - filterBlock filterWriter - pendingBH blockHandle - offset uint64 - nEntries int - // Scratch allocated enough for 5 uvarint. Block writer should not use - // first 20-bytes since it will be used to encode block handle, which - // then passed to the block writer itself. - scratch [50]byte - comparerScratch []byte - compressionScratch []byte -} - -func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) { - // Compress the buffer if necessary. - var b []byte - if compression == opt.SnappyCompression { - // Allocate scratch enough for compression and block trailer. - if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n { - w.compressionScratch = make([]byte, n) - } - var compressed []byte - compressed, err = snappy.Encode(w.compressionScratch, buf.Bytes()) - if err != nil { - return - } - n := len(compressed) - b = compressed[:n+blockTrailerLen] - b[n] = blockTypeSnappyCompression - } else { - tmp := buf.Alloc(blockTrailerLen) - tmp[0] = blockTypeNoCompression - b = buf.Bytes() - } - - // Calculate the checksum. - n := len(b) - 4 - checksum := util.NewCRC(b[:n]).Value() - binary.LittleEndian.PutUint32(b[n:], checksum) - - // Write the buffer to the file. - _, err = w.writer.Write(b) - if err != nil { - return - } - bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)} - w.offset += uint64(len(b)) - return -} - -func (w *Writer) flushPendingBH(key []byte) { - if w.pendingBH.length == 0 { - return - } - var separator []byte - if len(key) == 0 { - separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey) - } else { - separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key) - } - if separator == nil { - separator = w.dataBlock.prevKey - } else { - w.comparerScratch = separator - } - n := encodeBlockHandle(w.scratch[:20], w.pendingBH) - // Append the block handle to the index block. - w.indexBlock.append(separator, w.scratch[:n]) - // Reset prev key of the data block. - w.dataBlock.prevKey = w.dataBlock.prevKey[:0] - // Clear pending block handle. - w.pendingBH = blockHandle{} -} - -func (w *Writer) finishBlock() error { - w.dataBlock.finish() - bh, err := w.writeBlock(&w.dataBlock.buf, w.compression) - if err != nil { - return err - } - w.pendingBH = bh - // Reset the data block. - w.dataBlock.reset() - // Flush the filter block. - w.filterBlock.flush(w.offset) - return nil -} - -// Append appends key/value pair to the table. The keys passed must -// be in increasing order. -// -// It is safe to modify the contents of the arguments after Append returns. -func (w *Writer) Append(key, value []byte) error { - if w.err != nil { - return w.err - } - if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 { - w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key) - return w.err - } - - w.flushPendingBH(key) - // Append key/value pair to the data block. - w.dataBlock.append(key, value) - // Add key to the filter block. - w.filterBlock.add(key) - - // Finish the data block if block size target reached. - if w.dataBlock.bytesLen() >= w.blockSize { - if err := w.finishBlock(); err != nil { - w.err = err - return w.err - } - } - w.nEntries++ - return nil -} - -// BlocksLen returns number of blocks written so far. -func (w *Writer) BlocksLen() int { - n := w.indexBlock.nEntries - if w.pendingBH.length > 0 { - // Includes the pending block. - n++ - } - return n -} - -// EntriesLen returns number of entries added so far. -func (w *Writer) EntriesLen() int { - return w.nEntries -} - -// BytesLen returns number of bytes written so far. -func (w *Writer) BytesLen() int { - return int(w.offset) -} - -// Close will finalize the table. Calling Append is not possible -// after Close, but calling BlocksLen, EntriesLen and BytesLen -// is still possible. -func (w *Writer) Close() error { - if w.err != nil { - return w.err - } - - // Write the last data block. Or empty data block if there - // aren't any data blocks at all. - if w.dataBlock.nEntries > 0 || w.nEntries == 0 { - if err := w.finishBlock(); err != nil { - w.err = err - return w.err - } - } - w.flushPendingBH(nil) - - // Write the filter block. - var filterBH blockHandle - w.filterBlock.finish() - if buf := &w.filterBlock.buf; buf.Len() > 0 { - filterBH, w.err = w.writeBlock(buf, opt.NoCompression) - if w.err != nil { - return w.err - } - } - - // Write the metaindex block. - if filterBH.length > 0 { - key := []byte("filter." + w.filter.Name()) - n := encodeBlockHandle(w.scratch[:20], filterBH) - w.dataBlock.append(key, w.scratch[:n]) - } - w.dataBlock.finish() - metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression) - if err != nil { - w.err = err - return w.err - } - - // Write the index block. - w.indexBlock.finish() - indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression) - if err != nil { - w.err = err - return w.err - } - - // Write the table footer. - footer := w.scratch[:footerLen] - for i := range footer { - footer[i] = 0 - } - n := encodeBlockHandle(footer, metaindexBH) - encodeBlockHandle(footer[n:], indexBH) - copy(footer[footerLen-len(magic):], magic) - if _, err := w.writer.Write(footer); err != nil { - w.err = err - return w.err - } - w.offset += footerLen - - w.err = errors.New("leveldb/table: writer is closed") - return nil -} - -// NewWriter creates a new initialized table writer for the file. -// -// Table writer is not goroutine-safe. -func NewWriter(f io.Writer, o *opt.Options) *Writer { - w := &Writer{ - writer: f, - cmp: o.GetComparer(), - filter: o.GetFilter(), - compression: o.GetCompression(), - blockSize: o.GetBlockSize(), - comparerScratch: make([]byte, 0), - } - // data block - w.dataBlock.restartInterval = o.GetBlockRestartInterval() - // The first 20-bytes are used for encoding block handle. - w.dataBlock.scratch = w.scratch[20:] - // index block - w.indexBlock.restartInterval = 1 - w.indexBlock.scratch = w.scratch[20:] - // filter block - if w.filter != nil { - w.filterBlock.generator = w.filter.NewGenerator() - w.filterBlock.flush(0) - } - return w -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go deleted file mode 100644 index 5a499fb916e..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type DB interface{} - -type Put interface { - TestPut(key []byte, value []byte) error -} - -type Delete interface { - TestDelete(key []byte) error -} - -type Find interface { - TestFind(key []byte) (rkey, rvalue []byte, err error) -} - -type Get interface { - TestGet(key []byte) (value []byte, err error) -} - -type Has interface { - TestHas(key []byte) (ret bool, err error) -} - -type NewIterator interface { - TestNewIterator(slice *util.Range) iterator.Iterator -} - -type DBAct int - -func (a DBAct) String() string { - switch a { - case DBNone: - return "none" - case DBPut: - return "put" - case DBOverwrite: - return "overwrite" - case DBDelete: - return "delete" - case DBDeleteNA: - return "delete_na" - } - return "unknown" -} - -const ( - DBNone DBAct = iota - DBPut - DBOverwrite - DBDelete - DBDeleteNA -) - -type DBTesting struct { - Rand *rand.Rand - DB interface { - Get - Put - Delete - } - PostFn func(t *DBTesting) - Deleted, Present KeyValue - Act, LastAct DBAct - ActKey, LastActKey []byte -} - -func (t *DBTesting) post() { - if t.PostFn != nil { - t.PostFn(t) - } -} - -func (t *DBTesting) setAct(act DBAct, key []byte) { - t.LastAct, t.Act = t.Act, act - t.LastActKey, t.ActKey = t.ActKey, key -} - -func (t *DBTesting) text() string { - return fmt.Sprintf("last action was <%v> %q, <%v> %q", t.LastAct, t.LastActKey, t.Act, t.ActKey) -} - -func (t *DBTesting) Text() string { - return "DBTesting " + t.text() -} - -func (t *DBTesting) TestPresentKV(key, value []byte) { - rvalue, err := t.DB.TestGet(key) - Expect(err).ShouldNot(HaveOccurred(), "Get on key %q, %s", key, t.text()) - Expect(rvalue).Should(Equal(value), "Value for key %q, %s", key, t.text()) -} - -func (t *DBTesting) TestAllPresent() { - t.Present.IterateShuffled(t.Rand, func(i int, key, value []byte) { - t.TestPresentKV(key, value) - }) -} - -func (t *DBTesting) TestDeletedKey(key []byte) { - _, err := t.DB.TestGet(key) - Expect(err).Should(Equal(errors.ErrNotFound), "Get on deleted key %q, %s", key, t.text()) -} - -func (t *DBTesting) TestAllDeleted() { - t.Deleted.IterateShuffled(t.Rand, func(i int, key, value []byte) { - t.TestDeletedKey(key) - }) -} - -func (t *DBTesting) TestAll() { - dn := t.Deleted.Len() - pn := t.Present.Len() - ShuffledIndex(t.Rand, dn+pn, 1, func(i int) { - if i >= dn { - key, value := t.Present.Index(i - dn) - t.TestPresentKV(key, value) - } else { - t.TestDeletedKey(t.Deleted.KeyAt(i)) - } - }) -} - -func (t *DBTesting) Put(key, value []byte) { - if new := t.Present.PutU(key, value); new { - t.setAct(DBPut, key) - } else { - t.setAct(DBOverwrite, key) - } - t.Deleted.Delete(key) - err := t.DB.TestPut(key, value) - Expect(err).ShouldNot(HaveOccurred(), t.Text()) - t.TestPresentKV(key, value) - t.post() -} - -func (t *DBTesting) PutRandom() bool { - if t.Deleted.Len() > 0 { - i := t.Rand.Intn(t.Deleted.Len()) - key, value := t.Deleted.Index(i) - t.Put(key, value) - return true - } - return false -} - -func (t *DBTesting) Delete(key []byte) { - if exist, value := t.Present.Delete(key); exist { - t.setAct(DBDelete, key) - t.Deleted.PutU(key, value) - } else { - t.setAct(DBDeleteNA, key) - } - err := t.DB.TestDelete(key) - Expect(err).ShouldNot(HaveOccurred(), t.Text()) - t.TestDeletedKey(key) - t.post() -} - -func (t *DBTesting) DeleteRandom() bool { - if t.Present.Len() > 0 { - i := t.Rand.Intn(t.Present.Len()) - t.Delete(t.Present.KeyAt(i)) - return true - } - return false -} - -func (t *DBTesting) RandomAct(round int) { - for i := 0; i < round; i++ { - if t.Rand.Int()%2 == 0 { - t.PutRandom() - } else { - t.DeleteRandom() - } - } -} - -func DoDBTesting(t *DBTesting) { - if t.Rand == nil { - t.Rand = NewRand() - } - - t.DeleteRandom() - t.PutRandom() - t.DeleteRandom() - t.DeleteRandom() - for i := t.Deleted.Len() / 2; i >= 0; i-- { - t.PutRandom() - } - t.RandomAct((t.Deleted.Len() + t.Present.Len()) * 10) - - // Additional iterator testing - if db, ok := t.DB.(NewIterator); ok { - iter := db.TestNewIterator(nil) - Expect(iter.Error()).NotTo(HaveOccurred()) - - it := IteratorTesting{ - KeyValue: t.Present, - Iter: iter, - } - - DoIteratorTesting(&it) - iter.Release() - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go deleted file mode 100644 index 82f3d0e8111..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go +++ /dev/null @@ -1,21 +0,0 @@ -package testutil - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func RunSuite(t GinkgoTestingT, name string) { - RunDefer() - - SynchronizedBeforeSuite(func() []byte { - RunDefer("setup") - return nil - }, func(data []byte) {}) - SynchronizedAfterSuite(func() { - RunDefer("teardown") - }, func() {}) - - RegisterFailHandler(Fail) - RunSpecs(t, name) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go deleted file mode 100644 index 0e4a7f1093a..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" -) - -type IterAct int - -func (a IterAct) String() string { - switch a { - case IterNone: - return "none" - case IterFirst: - return "first" - case IterLast: - return "last" - case IterPrev: - return "prev" - case IterNext: - return "next" - case IterSeek: - return "seek" - case IterSOI: - return "soi" - case IterEOI: - return "eoi" - } - return "unknown" -} - -const ( - IterNone IterAct = iota - IterFirst - IterLast - IterPrev - IterNext - IterSeek - IterSOI - IterEOI -) - -type IteratorTesting struct { - KeyValue - Iter iterator.Iterator - Rand *rand.Rand - PostFn func(t *IteratorTesting) - Pos int - Act, LastAct IterAct - - once bool -} - -func (t *IteratorTesting) init() { - if !t.once { - t.Pos = -1 - t.once = true - } -} - -func (t *IteratorTesting) post() { - if t.PostFn != nil { - t.PostFn(t) - } -} - -func (t *IteratorTesting) setAct(act IterAct) { - t.LastAct, t.Act = t.Act, act -} - -func (t *IteratorTesting) text() string { - return fmt.Sprintf("at pos %d and last action was <%v> -> <%v>", t.Pos, t.LastAct, t.Act) -} - -func (t *IteratorTesting) Text() string { - return "IteratorTesting is " + t.text() -} - -func (t *IteratorTesting) IsFirst() bool { - t.init() - return t.Len() > 0 && t.Pos == 0 -} - -func (t *IteratorTesting) IsLast() bool { - t.init() - return t.Len() > 0 && t.Pos == t.Len()-1 -} - -func (t *IteratorTesting) TestKV() { - t.init() - key, value := t.Index(t.Pos) - Expect(t.Iter.Key()).NotTo(BeNil()) - Expect(t.Iter.Key()).Should(Equal(key), "Key is invalid, %s", t.text()) - Expect(t.Iter.Value()).Should(Equal(value), "Value for key %q, %s", key, t.text()) -} - -func (t *IteratorTesting) First() { - t.init() - t.setAct(IterFirst) - - ok := t.Iter.First() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Len() > 0 { - t.Pos = 0 - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = -1 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Last() { - t.init() - t.setAct(IterLast) - - ok := t.Iter.Last() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Len() > 0 { - t.Pos = t.Len() - 1 - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = 0 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Next() { - t.init() - t.setAct(IterNext) - - ok := t.Iter.Next() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Pos < t.Len()-1 { - t.Pos++ - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = t.Len() - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Prev() { - t.init() - t.setAct(IterPrev) - - ok := t.Iter.Prev() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Pos > 0 { - t.Pos-- - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = -1 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Seek(i int) { - t.init() - t.setAct(IterSeek) - - key, _ := t.Index(i) - oldKey, _ := t.IndexOrNil(t.Pos) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q, to pos %d, %s", oldKey, key, i, t.text())) - - t.Pos = i - t.TestKV() - t.post() -} - -func (t *IteratorTesting) SeekInexact(i int) { - t.init() - t.setAct(IterSeek) - var key0 []byte - key1, _ := t.Index(i) - if i > 0 { - key0, _ = t.Index(i - 1) - } - key := BytesSeparator(key0, key1) - oldKey, _ := t.IndexOrNil(t.Pos) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key1, i, t.text())) - - t.Pos = i - t.TestKV() - t.post() -} - -func (t *IteratorTesting) SeekKey(key []byte) { - t.init() - t.setAct(IterSeek) - oldKey, _ := t.IndexOrNil(t.Pos) - i := t.Search(key) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if i < t.Len() { - key_, _ := t.Index(i) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key_, i, t.text())) - t.Pos = i - t.TestKV() - } else { - Expect(ok).ShouldNot(BeTrue(), fmt.Sprintf("Seek from key %q to %q, %s", oldKey, key, t.text())) - } - - t.Pos = i - t.post() -} - -func (t *IteratorTesting) SOI() { - t.init() - t.setAct(IterSOI) - Expect(t.Pos).Should(BeNumerically("<=", 0), t.Text()) - for i := 0; i < 3; i++ { - t.Prev() - } - t.post() -} - -func (t *IteratorTesting) EOI() { - t.init() - t.setAct(IterEOI) - Expect(t.Pos).Should(BeNumerically(">=", t.Len()-1), t.Text()) - for i := 0; i < 3; i++ { - t.Next() - } - t.post() -} - -func (t *IteratorTesting) WalkPrev(fn func(t *IteratorTesting)) { - t.init() - for old := t.Pos; t.Pos > 0; old = t.Pos { - fn(t) - Expect(t.Pos).Should(BeNumerically("<", old), t.Text()) - } -} - -func (t *IteratorTesting) WalkNext(fn func(t *IteratorTesting)) { - t.init() - for old := t.Pos; t.Pos < t.Len()-1; old = t.Pos { - fn(t) - Expect(t.Pos).Should(BeNumerically(">", old), t.Text()) - } -} - -func (t *IteratorTesting) PrevAll() { - t.WalkPrev(func(t *IteratorTesting) { - t.Prev() - }) -} - -func (t *IteratorTesting) NextAll() { - t.WalkNext(func(t *IteratorTesting) { - t.Next() - }) -} - -func DoIteratorTesting(t *IteratorTesting) { - if t.Rand == nil { - t.Rand = NewRand() - } - t.SOI() - t.NextAll() - t.First() - t.SOI() - t.NextAll() - t.EOI() - t.PrevAll() - t.Last() - t.EOI() - t.PrevAll() - t.SOI() - - t.NextAll() - t.PrevAll() - t.NextAll() - t.Last() - t.PrevAll() - t.First() - t.NextAll() - t.EOI() - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.Seek(i) - }) - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.SeekInexact(i) - }) - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.Seek(i) - if i%2 != 0 { - t.PrevAll() - t.SOI() - } else { - t.NextAll() - t.EOI() - } - }) - - for _, key := range []string{"", "foo", "bar", "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"} { - t.SeekKey([]byte(key)) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go deleted file mode 100644 index 15f7726b2eb..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - "sort" - "strings" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type KeyValueEntry struct { - key, value []byte -} - -type KeyValue struct { - entries []KeyValueEntry - nbytes int -} - -func (kv *KeyValue) Put(key, value []byte) { - if n := len(kv.entries); n > 0 && cmp.Compare(kv.entries[n-1].key, key) >= 0 { - panic(fmt.Sprintf("Put: keys are not in increasing order: %q, %q", kv.entries[n-1].key, key)) - } - kv.entries = append(kv.entries, KeyValueEntry{key, value}) - kv.nbytes += len(key) + len(value) -} - -func (kv *KeyValue) PutString(key, value string) { - kv.Put([]byte(key), []byte(value)) -} - -func (kv *KeyValue) PutU(key, value []byte) bool { - if i, exist := kv.Get(key); !exist { - if i < kv.Len() { - kv.entries = append(kv.entries[:i+1], kv.entries[i:]...) - kv.entries[i] = KeyValueEntry{key, value} - } else { - kv.entries = append(kv.entries, KeyValueEntry{key, value}) - } - kv.nbytes += len(key) + len(value) - return true - } else { - kv.nbytes += len(value) - len(kv.ValueAt(i)) - kv.entries[i].value = value - } - return false -} - -func (kv *KeyValue) PutUString(key, value string) bool { - return kv.PutU([]byte(key), []byte(value)) -} - -func (kv *KeyValue) Delete(key []byte) (exist bool, value []byte) { - i, exist := kv.Get(key) - if exist { - value = kv.entries[i].value - kv.DeleteIndex(i) - } - return -} - -func (kv *KeyValue) DeleteIndex(i int) bool { - if i < kv.Len() { - kv.nbytes -= len(kv.KeyAt(i)) + len(kv.ValueAt(i)) - kv.entries = append(kv.entries[:i], kv.entries[i+1:]...) - return true - } - return false -} - -func (kv KeyValue) Len() int { - return len(kv.entries) -} - -func (kv *KeyValue) Size() int { - return kv.nbytes -} - -func (kv KeyValue) KeyAt(i int) []byte { - return kv.entries[i].key -} - -func (kv KeyValue) ValueAt(i int) []byte { - return kv.entries[i].value -} - -func (kv KeyValue) Index(i int) (key, value []byte) { - if i < 0 || i >= len(kv.entries) { - panic(fmt.Sprintf("Index #%d: out of range", i)) - } - return kv.entries[i].key, kv.entries[i].value -} - -func (kv KeyValue) IndexInexact(i int) (key_, key, value []byte) { - key, value = kv.Index(i) - var key0 []byte - var key1 = kv.KeyAt(i) - if i > 0 { - key0 = kv.KeyAt(i - 1) - } - key_ = BytesSeparator(key0, key1) - return -} - -func (kv KeyValue) IndexOrNil(i int) (key, value []byte) { - if i >= 0 && i < len(kv.entries) { - return kv.entries[i].key, kv.entries[i].value - } - return nil, nil -} - -func (kv KeyValue) IndexString(i int) (key, value string) { - key_, _value := kv.Index(i) - return string(key_), string(_value) -} - -func (kv KeyValue) Search(key []byte) int { - return sort.Search(kv.Len(), func(i int) bool { - return cmp.Compare(kv.KeyAt(i), key) >= 0 - }) -} - -func (kv KeyValue) SearchString(key string) int { - return kv.Search([]byte(key)) -} - -func (kv KeyValue) Get(key []byte) (i int, exist bool) { - i = kv.Search(key) - if i < kv.Len() && cmp.Compare(kv.KeyAt(i), key) == 0 { - exist = true - } - return -} - -func (kv KeyValue) GetString(key string) (i int, exist bool) { - return kv.Get([]byte(key)) -} - -func (kv KeyValue) Iterate(fn func(i int, key, value []byte)) { - for i, x := range kv.entries { - fn(i, x.key, x.value) - } -} - -func (kv KeyValue) IterateString(fn func(i int, key, value string)) { - kv.Iterate(func(i int, key, value []byte) { - fn(i, string(key), string(value)) - }) -} - -func (kv KeyValue) IterateShuffled(rnd *rand.Rand, fn func(i int, key, value []byte)) { - ShuffledIndex(rnd, kv.Len(), 1, func(i int) { - fn(i, kv.entries[i].key, kv.entries[i].value) - }) -} - -func (kv KeyValue) IterateShuffledString(rnd *rand.Rand, fn func(i int, key, value string)) { - kv.IterateShuffled(rnd, func(i int, key, value []byte) { - fn(i, string(key), string(value)) - }) -} - -func (kv KeyValue) IterateInexact(fn func(i int, key_, key, value []byte)) { - for i := range kv.entries { - key_, key, value := kv.IndexInexact(i) - fn(i, key_, key, value) - } -} - -func (kv KeyValue) IterateInexactString(fn func(i int, key_, key, value string)) { - kv.IterateInexact(func(i int, key_, key, value []byte) { - fn(i, string(key_), string(key), string(value)) - }) -} - -func (kv KeyValue) Clone() KeyValue { - return KeyValue{append([]KeyValueEntry{}, kv.entries...), kv.nbytes} -} - -func (kv KeyValue) Slice(start, limit int) KeyValue { - if start < 0 || limit > kv.Len() { - panic(fmt.Sprintf("Slice %d .. %d: out of range", start, limit)) - } else if limit < start { - panic(fmt.Sprintf("Slice %d .. %d: invalid range", start, limit)) - } - return KeyValue{append([]KeyValueEntry{}, kv.entries[start:limit]...), kv.nbytes} -} - -func (kv KeyValue) SliceKey(start, limit []byte) KeyValue { - start_ := 0 - limit_ := kv.Len() - if start != nil { - start_ = kv.Search(start) - } - if limit != nil { - limit_ = kv.Search(limit) - } - return kv.Slice(start_, limit_) -} - -func (kv KeyValue) SliceKeyString(start, limit string) KeyValue { - return kv.SliceKey([]byte(start), []byte(limit)) -} - -func (kv KeyValue) SliceRange(r *util.Range) KeyValue { - if r != nil { - return kv.SliceKey(r.Start, r.Limit) - } - return kv.Clone() -} - -func (kv KeyValue) Range(start, limit int) (r util.Range) { - if kv.Len() > 0 { - if start == kv.Len() { - r.Start = BytesAfter(kv.KeyAt(start - 1)) - } else { - r.Start = kv.KeyAt(start) - } - } - if limit < kv.Len() { - r.Limit = kv.KeyAt(limit) - } - return -} - -func KeyValue_EmptyKey() *KeyValue { - kv := &KeyValue{} - kv.PutString("", "v") - return kv -} - -func KeyValue_EmptyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("abc", "") - kv.PutString("abcd", "") - return kv -} - -func KeyValue_OneKeyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("abc", "v") - return kv -} - -func KeyValue_BigValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("big1", strings.Repeat("1", 200000)) - return kv -} - -func KeyValue_SpecialKey() *KeyValue { - kv := &KeyValue{} - kv.PutString("\xff\xff", "v3") - return kv -} - -func KeyValue_MultipleKeyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("a", "v") - kv.PutString("aa", "v1") - kv.PutString("aaa", "v2") - kv.PutString("aaacccccccccc", "v2") - kv.PutString("aaaccccccccccd", "v3") - kv.PutString("aaaccccccccccf", "v4") - kv.PutString("aaaccccccccccfg", "v5") - kv.PutString("ab", "v6") - kv.PutString("abc", "v7") - kv.PutString("abcd", "v8") - kv.PutString("accccccccccccccc", "v9") - kv.PutString("b", "v10") - kv.PutString("bb", "v11") - kv.PutString("bc", "v12") - kv.PutString("c", "v13") - kv.PutString("c1", "v13") - kv.PutString("czzzzzzzzzzzzzz", "v14") - kv.PutString("fffffffffffffff", "v15") - kv.PutString("g11", "v15") - kv.PutString("g111", "v15") - kv.PutString("g111\xff", "v15") - kv.PutString("zz", "v16") - kv.PutString("zzzzzzz", "v16") - kv.PutString("zzzzzzzzzzzzzzzz", "v16") - return kv -} - -var keymap = []byte("012345678ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxy") - -func KeyValue_Generate(rnd *rand.Rand, n, minlen, maxlen, vminlen, vmaxlen int) *KeyValue { - if rnd == nil { - rnd = NewRand() - } - if maxlen < minlen { - panic("max len should >= min len") - } - - rrand := func(min, max int) int { - if min == max { - return max - } - return rnd.Intn(max-min) + min - } - - kv := &KeyValue{} - endC := byte(len(keymap) - 1) - gen := make([]byte, 0, maxlen) - for i := 0; i < n; i++ { - m := rrand(minlen, maxlen) - last := gen - retry: - gen = last[:m] - if k := len(last); m > k { - for j := k; j < m; j++ { - gen[j] = 0 - } - } else { - for j := m - 1; j >= 0; j-- { - c := last[j] - if c == endC { - continue - } - gen[j] = c + 1 - for j += 1; j < m; j++ { - gen[j] = 0 - } - goto ok - } - if m < maxlen { - m++ - goto retry - } - panic(fmt.Sprintf("only able to generate %d keys out of %d keys, try increasing max len", kv.Len(), n)) - ok: - } - key := make([]byte, m) - for j := 0; j < m; j++ { - key[j] = keymap[gen[j]] - } - value := make([]byte, rrand(vminlen, vmaxlen)) - for n := copy(value, []byte(fmt.Sprintf("v%d", i))); n < len(value); n++ { - value[n] = 'x' - } - kv.Put(key, value) - } - return kv -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go deleted file mode 100644 index ad244a9d1d7..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) { - if rnd == nil { - rnd = NewRand() - } - - if p == nil { - BeforeEach(func() { - p = setup(kv) - }) - if teardown != nil { - AfterEach(func() { - teardown(p) - }) - } - } - - It("Should find all keys with Find", func() { - if db, ok := p.(Find); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, value := kv.IndexInexact(i) - - // Using exact key. - rkey, rvalue, err := db.TestFind(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(rkey).Should(Equal(key), "Key") - Expect(rvalue).Should(Equal(value), "Value for key %q", key) - - // Using inexact key. - rkey, rvalue, err = db.TestFind(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key) - Expect(rkey).Should(Equal(key)) - Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key) - }) - } - }) - - It("Should return error if the key is not present", func() { - if db, ok := p.(Find); ok { - var key []byte - if kv.Len() > 0 { - key_, _ := kv.Index(kv.Len() - 1) - key = BytesAfter(key_) - } - rkey, _, err := db.TestFind(key) - Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey) - Expect(err).Should(Equal(errors.ErrNotFound)) - } - }) - - It("Should only find exact key with Get", func() { - if db, ok := p.(Get); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, value := kv.IndexInexact(i) - - // Using exact key. - rvalue, err := db.TestGet(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(rvalue).Should(Equal(value), "Value for key %q", key) - - // Using inexact key. - if len(key_) > 0 { - _, err = db.TestGet(key_) - Expect(err).Should(HaveOccurred(), "Error for key %q", key_) - Expect(err).Should(Equal(errors.ErrNotFound)) - } - }) - } - }) - - It("Should only find present key with Has", func() { - if db, ok := p.(Has); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, _ := kv.IndexInexact(i) - - // Using exact key. - ret, err := db.TestHas(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(ret).Should(BeTrue(), "False for key %q", key) - - // Using inexact key. - if len(key_) > 0 { - ret, err = db.TestHas(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_) - Expect(ret).ShouldNot(BeTrue(), "True for key %q", key) - } - }) - } - }) - - TestIter := func(r *util.Range, _kv KeyValue) { - if db, ok := p.(NewIterator); ok { - iter := db.TestNewIterator(r) - Expect(iter.Error()).ShouldNot(HaveOccurred()) - - t := IteratorTesting{ - KeyValue: _kv, - Iter: iter, - } - - DoIteratorTesting(&t) - iter.Release() - } - } - - It("Should iterates and seeks correctly", func(done Done) { - TestIter(nil, kv.Clone()) - done <- true - }, 3.0) - - RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) { - type slice struct { - r *util.Range - start, limit int - } - - key_, _, _ := kv.IndexInexact(i) - for _, x := range []slice{ - {&util.Range{Start: key_, Limit: nil}, i, kv.Len()}, - {&util.Range{Start: nil, Limit: key_}, 0, i}, - } { - It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) { - TestIter(x.r, kv.Slice(x.start, x.limit)) - done <- true - }, 3.0) - } - }) - - RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) { - It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) { - r := kv.Range(start, limit) - TestIter(&r, kv.Slice(start, limit)) - done <- true - }, 3.0) - }) -} - -func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) { - Test := func(kv *KeyValue) func() { - return func() { - var p DB - if setup != nil { - Defer("setup", func() { - p = setup(*kv) - }) - } - if teardown != nil { - Defer("teardown", func() { - teardown(p) - }) - } - if body != nil { - p = body(*kv) - } - KeyValueTesting(rnd, *kv, p, func(KeyValue) DB { - return p - }, nil) - } - } - - Describe("with no key/value (empty)", Test(&KeyValue{})) - Describe("with empty key", Test(KeyValue_EmptyKey())) - Describe("with empty value", Test(KeyValue_EmptyValue())) - Describe("with one key/value", Test(KeyValue_OneKeyValue())) - Describe("with big value", Test(KeyValue_BigValue())) - Describe("with special key", Test(KeyValue_SpecialKey())) - Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue())) - Describe("with generated key/value", Test(KeyValue_Generate(nil, 120, 1, 50, 10, 120))) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go deleted file mode 100644 index 5695bda9192..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go +++ /dev/null @@ -1,586 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - storageMu sync.Mutex - storageUseFS bool = true - storageKeepFS bool = false - storageNum int -) - -type StorageMode int - -const ( - ModeOpen StorageMode = 1 << iota - ModeCreate - ModeRemove - ModeRead - ModeWrite - ModeSync - ModeClose -) - -const ( - modeOpen = iota - modeCreate - modeRemove - modeRead - modeWrite - modeSync - modeClose - - modeCount -) - -const ( - typeManifest = iota - typeJournal - typeTable - typeTemp - - typeCount -) - -const flattenCount = modeCount * typeCount - -func flattenType(m StorageMode, t storage.FileType) int { - var x int - switch m { - case ModeOpen: - x = modeOpen - case ModeCreate: - x = modeCreate - case ModeRemove: - x = modeRemove - case ModeRead: - x = modeRead - case ModeWrite: - x = modeWrite - case ModeSync: - x = modeSync - case ModeClose: - x = modeClose - default: - panic("invalid storage mode") - } - x *= typeCount - switch t { - case storage.TypeManifest: - return x + typeManifest - case storage.TypeJournal: - return x + typeJournal - case storage.TypeTable: - return x + typeTable - case storage.TypeTemp: - return x + typeTemp - default: - panic("invalid file type") - } -} - -func listFlattenType(m StorageMode, t storage.FileType) []int { - ret := make([]int, 0, flattenCount) - add := func(x int) { - x *= typeCount - switch { - case t&storage.TypeManifest != 0: - ret = append(ret, x+typeManifest) - case t&storage.TypeJournal != 0: - ret = append(ret, x+typeJournal) - case t&storage.TypeTable != 0: - ret = append(ret, x+typeTable) - case t&storage.TypeTemp != 0: - ret = append(ret, x+typeTemp) - } - } - switch { - case m&ModeOpen != 0: - add(modeOpen) - case m&ModeCreate != 0: - add(modeCreate) - case m&ModeRemove != 0: - add(modeRemove) - case m&ModeRead != 0: - add(modeRead) - case m&ModeWrite != 0: - add(modeWrite) - case m&ModeSync != 0: - add(modeSync) - case m&ModeClose != 0: - add(modeClose) - } - return ret -} - -func packFile(num uint64, t storage.FileType) uint64 { - if num>>(64-typeCount) != 0 { - panic("overflow") - } - return num<> typeCount, storage.FileType(x) & storage.TypeAll -} - -type emulatedError struct { - err error -} - -func (err emulatedError) Error() string { - return fmt.Sprintf("emulated storage error: %v", err.err) -} - -type storageLock struct { - s *Storage - r util.Releaser -} - -func (l storageLock) Release() { - l.r.Release() - l.s.logI("storage lock released") -} - -type reader struct { - f *file - storage.Reader -} - -func (r *reader) Read(p []byte) (n int, err error) { - err = r.f.s.emulateError(ModeRead, r.f.Type()) - if err == nil { - r.f.s.stall(ModeRead, r.f.Type()) - n, err = r.Reader.Read(p) - } - r.f.s.count(ModeRead, r.f.Type(), n) - if err != nil && err != io.EOF { - r.f.s.logI("read error, num=%d type=%v n=%d err=%v", r.f.Num(), r.f.Type(), n, err) - } - return -} - -func (r *reader) ReadAt(p []byte, off int64) (n int, err error) { - err = r.f.s.emulateError(ModeRead, r.f.Type()) - if err == nil { - r.f.s.stall(ModeRead, r.f.Type()) - n, err = r.Reader.ReadAt(p, off) - } - r.f.s.count(ModeRead, r.f.Type(), n) - if err != nil && err != io.EOF { - r.f.s.logI("readAt error, num=%d type=%v offset=%d n=%d err=%v", r.f.Num(), r.f.Type(), off, n, err) - } - return -} - -func (r *reader) Close() (err error) { - return r.f.doClose(r.Reader) -} - -type writer struct { - f *file - storage.Writer -} - -func (w *writer) Write(p []byte) (n int, err error) { - err = w.f.s.emulateError(ModeWrite, w.f.Type()) - if err == nil { - w.f.s.stall(ModeWrite, w.f.Type()) - n, err = w.Writer.Write(p) - } - w.f.s.count(ModeWrite, w.f.Type(), n) - if err != nil && err != io.EOF { - w.f.s.logI("write error, num=%d type=%v n=%d err=%v", w.f.Num(), w.f.Type(), n, err) - } - return -} - -func (w *writer) Sync() (err error) { - err = w.f.s.emulateError(ModeSync, w.f.Type()) - if err == nil { - w.f.s.stall(ModeSync, w.f.Type()) - err = w.Writer.Sync() - } - w.f.s.count(ModeSync, w.f.Type(), 0) - if err != nil { - w.f.s.logI("sync error, num=%d type=%v err=%v", w.f.Num(), w.f.Type(), err) - } - return -} - -func (w *writer) Close() (err error) { - return w.f.doClose(w.Writer) -} - -type file struct { - s *Storage - storage.File -} - -func (f *file) pack() uint64 { - return packFile(f.Num(), f.Type()) -} - -func (f *file) assertOpen() { - ExpectWithOffset(2, f.s.opens).NotTo(HaveKey(f.pack()), "File open, num=%d type=%v writer=%v", f.Num(), f.Type(), f.s.opens[f.pack()]) -} - -func (f *file) doClose(closer io.Closer) (err error) { - err = f.s.emulateError(ModeClose, f.Type()) - if err == nil { - f.s.stall(ModeClose, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - ExpectWithOffset(2, f.s.opens).To(HaveKey(f.pack()), "File closed, num=%d type=%v", f.Num(), f.Type()) - err = closer.Close() - } - f.s.countNB(ModeClose, f.Type(), 0) - writer := f.s.opens[f.pack()] - if err != nil { - f.s.logISkip(1, "file close failed, num=%d type=%v writer=%v err=%v", f.Num(), f.Type(), writer, err) - } else { - f.s.logISkip(1, "file closed, num=%d type=%v writer=%v", f.Num(), f.Type(), writer) - delete(f.s.opens, f.pack()) - } - return -} - -func (f *file) Open() (r storage.Reader, err error) { - err = f.s.emulateError(ModeOpen, f.Type()) - if err == nil { - f.s.stall(ModeOpen, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeOpen, f.Type(), 0) - r, err = f.File.Open() - } - if err != nil { - f.s.logI("file open failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file opened, num=%d type=%v", f.Num(), f.Type()) - f.s.opens[f.pack()] = false - r = &reader{f, r} - } - return -} - -func (f *file) Create() (w storage.Writer, err error) { - err = f.s.emulateError(ModeCreate, f.Type()) - if err == nil { - f.s.stall(ModeCreate, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeCreate, f.Type(), 0) - w, err = f.File.Create() - } - if err != nil { - f.s.logI("file create failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file created, num=%d type=%v", f.Num(), f.Type()) - f.s.opens[f.pack()] = true - w = &writer{f, w} - } - return -} - -func (f *file) Remove() (err error) { - err = f.s.emulateError(ModeRemove, f.Type()) - if err == nil { - f.s.stall(ModeRemove, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeRemove, f.Type(), 0) - err = f.File.Remove() - } - if err != nil { - f.s.logI("file remove failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file removed, num=%d type=%v", f.Num(), f.Type()) - } - return -} - -type Storage struct { - storage.Storage - closeFn func() error - - lmu sync.Mutex - lb bytes.Buffer - - mu sync.Mutex - // Open files, true=writer, false=reader - opens map[uint64]bool - counters [flattenCount]int - bytesCounter [flattenCount]int64 - emulatedError [flattenCount]error - stallCond sync.Cond - stalled [flattenCount]bool -} - -func (s *Storage) log(skip int, str string) { - s.lmu.Lock() - defer s.lmu.Unlock() - _, file, line, ok := runtime.Caller(skip + 2) - if ok { - // Truncate file name at last file name separator. - if index := strings.LastIndex(file, "/"); index >= 0 { - file = file[index+1:] - } else if index = strings.LastIndex(file, "\\"); index >= 0 { - file = file[index+1:] - } - } else { - file = "???" - line = 1 - } - fmt.Fprintf(&s.lb, "%s:%d: ", file, line) - lines := strings.Split(str, "\n") - if l := len(lines); l > 1 && lines[l-1] == "" { - lines = lines[:l-1] - } - for i, line := range lines { - if i > 0 { - s.lb.WriteString("\n\t") - } - s.lb.WriteString(line) - } - s.lb.WriteByte('\n') -} - -func (s *Storage) logISkip(skip int, format string, args ...interface{}) { - pc, _, _, ok := runtime.Caller(skip + 1) - if ok { - if f := runtime.FuncForPC(pc); f != nil { - fname := f.Name() - if index := strings.LastIndex(fname, "."); index >= 0 { - fname = fname[index+1:] - } - format = fname + ": " + format - } - } - s.log(skip+1, fmt.Sprintf(format, args...)) -} - -func (s *Storage) logI(format string, args ...interface{}) { - s.logISkip(1, format, args...) -} - -func (s *Storage) Log(str string) { - s.log(1, "Log: "+str) - s.Storage.Log(str) -} - -func (s *Storage) Lock() (r util.Releaser, err error) { - r, err = s.Storage.Lock() - if err != nil { - s.logI("storage locking failed, err=%v", err) - } else { - s.logI("storage locked") - r = storageLock{s, r} - } - return -} - -func (s *Storage) GetFile(num uint64, t storage.FileType) storage.File { - return &file{s, s.Storage.GetFile(num, t)} -} - -func (s *Storage) GetFiles(t storage.FileType) (files []storage.File, err error) { - rfiles, err := s.Storage.GetFiles(t) - if err != nil { - s.logI("get files failed, err=%v", err) - return - } - files = make([]storage.File, len(rfiles)) - for i, f := range rfiles { - files[i] = &file{s, f} - } - s.logI("get files, type=0x%x count=%d", int(t), len(files)) - return -} - -func (s *Storage) GetManifest() (f storage.File, err error) { - manifest, err := s.Storage.GetManifest() - if err != nil { - if !os.IsNotExist(err) { - s.logI("get manifest failed, err=%v", err) - } - return - } - s.logI("get manifest, num=%d", manifest.Num()) - return &file{s, manifest}, nil -} - -func (s *Storage) SetManifest(f storage.File) error { - f_, ok := f.(*file) - ExpectWithOffset(1, ok).To(BeTrue()) - ExpectWithOffset(1, f_.Type()).To(Equal(storage.TypeManifest)) - err := s.Storage.SetManifest(f_.File) - if err != nil { - s.logI("set manifest failed, err=%v", err) - } else { - s.logI("set manifest, num=%d", f_.Num()) - } - return err -} - -func (s *Storage) openFiles() string { - out := "Open files:" - for x, writer := range s.opens { - num, t := unpackFile(x) - out += fmt.Sprintf("\n · num=%d type=%v writer=%v", num, t, writer) - } - return out -} - -func (s *Storage) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles()) - err := s.Storage.Close() - if err != nil { - s.logI("storage closing failed, err=%v", err) - } else { - s.logI("storage closed") - } - if s.closeFn != nil { - if err1 := s.closeFn(); err1 != nil { - s.logI("close func error, err=%v", err1) - } - } - return err -} - -func (s *Storage) countNB(m StorageMode, t storage.FileType, n int) { - s.counters[flattenType(m, t)]++ - s.bytesCounter[flattenType(m, t)] += int64(n) -} - -func (s *Storage) count(m StorageMode, t storage.FileType, n int) { - s.mu.Lock() - defer s.mu.Unlock() - s.countNB(m, t, n) -} - -func (s *Storage) ResetCounter(m StorageMode, t storage.FileType) { - for _, x := range listFlattenType(m, t) { - s.counters[x] = 0 - s.bytesCounter[x] = 0 - } -} - -func (s *Storage) Counter(m StorageMode, t storage.FileType) (count int, bytes int64) { - for _, x := range listFlattenType(m, t) { - count += s.counters[x] - bytes += s.bytesCounter[x] - } - return -} - -func (s *Storage) emulateError(m StorageMode, t storage.FileType) error { - s.mu.Lock() - defer s.mu.Unlock() - err := s.emulatedError[flattenType(m, t)] - if err != nil { - return emulatedError{err} - } - return nil -} - -func (s *Storage) EmulateError(m StorageMode, t storage.FileType, err error) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.emulatedError[x] = err - } -} - -func (s *Storage) stall(m StorageMode, t storage.FileType) { - x := flattenType(m, t) - s.mu.Lock() - defer s.mu.Unlock() - for s.stalled[x] { - s.stallCond.Wait() - } -} - -func (s *Storage) Stall(m StorageMode, t storage.FileType) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.stalled[x] = true - } -} - -func (s *Storage) Release(m StorageMode, t storage.FileType) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.stalled[x] = false - } - s.stallCond.Broadcast() -} - -func NewStorage() *Storage { - var stor storage.Storage - var closeFn func() error - if storageUseFS { - for { - storageMu.Lock() - num := storageNum - storageNum++ - storageMu.Unlock() - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) - if _, err := os.Stat(path); os.IsNotExist(err) { - stor, err = storage.OpenFile(path) - ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path) - closeFn = func() error { - if storageKeepFS { - return nil - } - return os.RemoveAll(path) - } - break - } - } - } else { - stor = storage.NewMemStorage() - } - s := &Storage{ - Storage: stor, - closeFn: closeFn, - opens: make(map[uint64]bool), - } - s.stallCond.L = &s.mu - return s -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go deleted file mode 100644 index 918c865106b..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "bytes" - "flag" - "math/rand" - "reflect" - "sync" - - "github.com/onsi/ginkgo/config" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer" -) - -var ( - runfn = make(map[string][]func()) - runmu sync.Mutex -) - -func Defer(args ...interface{}) bool { - var ( - group string - fn func() - ) - for _, arg := range args { - v := reflect.ValueOf(arg) - switch v.Kind() { - case reflect.String: - group = v.String() - case reflect.Func: - r := reflect.ValueOf(&fn).Elem() - r.Set(v) - } - } - if fn != nil { - runmu.Lock() - runfn[group] = append(runfn[group], fn) - runmu.Unlock() - } - return true -} - -func RunDefer(groups ...string) bool { - if len(groups) == 0 { - groups = append(groups, "") - } - runmu.Lock() - var runfn_ []func() - for _, group := range groups { - runfn_ = append(runfn_, runfn[group]...) - delete(runfn, group) - } - runmu.Unlock() - for _, fn := range runfn_ { - fn() - } - return runfn_ != nil -} - -func RandomSeed() int64 { - if !flag.Parsed() { - panic("random seed not initialized") - } - return config.GinkgoConfig.RandomSeed -} - -func NewRand() *rand.Rand { - return rand.New(rand.NewSource(RandomSeed())) -} - -var cmp = comparer.DefaultComparer - -func BytesSeparator(a, b []byte) []byte { - if bytes.Equal(a, b) { - return b - } - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for ; i < n && (a[i] == b[i]); i++ { - } - x := append([]byte{}, a[:i]...) - if i < n { - if c := a[i] + 1; c < b[i] { - return append(x, c) - } - x = append(x, a[i]) - i++ - } - for ; i < len(a); i++ { - if c := a[i]; c < 0xff { - return append(x, c+1) - } else { - x = append(x, c) - } - } - if len(b) > i && b[i] > 0 { - return append(x, b[i]-1) - } - return append(x, 'x') -} - -func BytesAfter(b []byte) []byte { - var x []byte - for _, c := range b { - if c < 0xff { - return append(x, c+1) - } else { - x = append(x, c) - } - } - return append(x, 'x') -} - -func RandomIndex(rnd *rand.Rand, n, round int, fn func(i int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - fn(rnd.Intn(n)) - } - return -} - -func ShuffledIndex(rnd *rand.Rand, n, round int, fn func(i int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - for _, i := range rnd.Perm(n) { - fn(i) - } - } - return -} - -func RandomRange(rnd *rand.Rand, n, round int, fn func(start, limit int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - start := rnd.Intn(n) - length := 0 - if j := n - start; j > 0 { - length = rnd.Intn(j) - } - fn(start, start+length) - } - return -} - -func Max(x, y int) int { - if x > y { - return x - } - return y -} - -func Min(x, y int) int { - if x < y { - return x - } - return y -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go deleted file mode 100644 index 930ac01653d..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - . "github.com/onsi/gomega" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type testingDB struct { - *DB - ro *opt.ReadOptions - wo *opt.WriteOptions - stor *testutil.Storage -} - -func (t *testingDB) TestPut(key []byte, value []byte) error { - return t.Put(key, value, t.wo) -} - -func (t *testingDB) TestDelete(key []byte) error { - return t.Delete(key, t.wo) -} - -func (t *testingDB) TestGet(key []byte) (value []byte, err error) { - return t.Get(key, t.ro) -} - -func (t *testingDB) TestHas(key []byte) (ret bool, err error) { - return t.Has(key, t.ro) -} - -func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.NewIterator(slice, t.ro) -} - -func (t *testingDB) TestClose() { - err := t.Close() - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - err = t.stor.Close() - ExpectWithOffset(1, err).NotTo(HaveOccurred()) -} - -func newTestingDB(o *opt.Options, ro *opt.ReadOptions, wo *opt.WriteOptions) *testingDB { - stor := testutil.NewStorage() - db, err := Open(stor, o) - // FIXME: This may be called from outside It, which may cause panic. - Expect(err).NotTo(HaveOccurred()) - return &testingDB{ - DB: db, - ro: ro, - wo: wo, - stor: stor, - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go deleted file mode 100644 index 974215244c8..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sort" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage" -) - -func shorten(str string) string { - if len(str) <= 8 { - return str - } - return str[:3] + ".." + str[len(str)-3:] -} - -var bunits = [...]string{"", "Ki", "Mi", "Gi"} - -func shortenb(bytes int) string { - i := 0 - for ; bytes > 1024 && i < 4; i++ { - bytes /= 1024 - } - return fmt.Sprintf("%d%sB", bytes, bunits[i]) -} - -func sshortenb(bytes int) string { - if bytes == 0 { - return "~" - } - sign := "+" - if bytes < 0 { - sign = "-" - bytes *= -1 - } - i := 0 - for ; bytes > 1024 && i < 4; i++ { - bytes /= 1024 - } - return fmt.Sprintf("%s%d%sB", sign, bytes, bunits[i]) -} - -func sint(x int) string { - if x == 0 { - return "~" - } - sign := "+" - if x < 0 { - sign = "-" - x *= -1 - } - return fmt.Sprintf("%s%d", sign, x) -} - -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - -func maxInt(a, b int) int { - if a > b { - return a - } - return b -} - -type files []storage.File - -func (p files) Len() int { - return len(p) -} - -func (p files) Less(i, j int) bool { - return p[i].Num() < p[j].Num() -} - -func (p files) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -func (p files) sort() { - sort.Sort(p) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go deleted file mode 100644 index 21de242552d..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package util - -// This a copy of Go std bytes.Buffer with some modification -// and some features stripped. - -import ( - "bytes" - "io" -) - -// A Buffer is a variable-sized buffer of bytes with Read and Write methods. -// The zero value for Buffer is an empty buffer ready to use. -type Buffer struct { - buf []byte // contents are the bytes buf[off : len(buf)] - off int // read at &buf[off], write at &buf[len(buf)] - bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation. -} - -// Bytes returns a slice of the contents of the unread portion of the buffer; -// len(b.Bytes()) == b.Len(). If the caller changes the contents of the -// returned slice, the contents of the buffer will change provided there -// are no intervening method calls on the Buffer. -func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } - -// String returns the contents of the unread portion of the buffer -// as a string. If the Buffer is a nil pointer, it returns "". -func (b *Buffer) String() string { - if b == nil { - // Special case, useful in debugging. - return "" - } - return string(b.buf[b.off:]) -} - -// Len returns the number of bytes of the unread portion of the buffer; -// b.Len() == len(b.Bytes()). -func (b *Buffer) Len() int { return len(b.buf) - b.off } - -// Truncate discards all but the first n unread bytes from the buffer. -// It panics if n is negative or greater than the length of the buffer. -func (b *Buffer) Truncate(n int) { - switch { - case n < 0 || n > b.Len(): - panic("leveldb/util.Buffer: truncation out of range") - case n == 0: - // Reuse buffer space. - b.off = 0 - } - b.buf = b.buf[0 : b.off+n] -} - -// Reset resets the buffer so it has no content. -// b.Reset() is the same as b.Truncate(0). -func (b *Buffer) Reset() { b.Truncate(0) } - -// grow grows the buffer to guarantee space for n more bytes. -// It returns the index where bytes should be written. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) grow(n int) int { - m := b.Len() - // If buffer is empty, reset to recover space. - if m == 0 && b.off != 0 { - b.Truncate(0) - } - if len(b.buf)+n > cap(b.buf) { - var buf []byte - if b.buf == nil && n <= len(b.bootstrap) { - buf = b.bootstrap[0:] - } else if m+n <= cap(b.buf)/2 { - // We can slide things down instead of allocating a new - // slice. We only need m+n <= cap(b.buf) to slide, but - // we instead let capacity get twice as large so we - // don't spend all our time copying. - copy(b.buf[:], b.buf[b.off:]) - buf = b.buf[:m] - } else { - // not enough space anywhere - buf = makeSlice(2*cap(b.buf) + n) - copy(buf, b.buf[b.off:]) - } - b.buf = buf - b.off = 0 - } - b.buf = b.buf[0 : b.off+m+n] - return b.off + m -} - -// Alloc allocs n bytes of slice from the buffer, growing the buffer as -// needed. If n is negative, Alloc will panic. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) Alloc(n int) []byte { - if n < 0 { - panic("leveldb/util.Buffer.Alloc: negative count") - } - m := b.grow(n) - return b.buf[m:] -} - -// Grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After Grow(n), at least n bytes can be written to the -// buffer without another allocation. -// If n is negative, Grow will panic. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) Grow(n int) { - if n < 0 { - panic("leveldb/util.Buffer.Grow: negative count") - } - m := b.grow(n) - b.buf = b.buf[0:m] -} - -// Write appends the contents of p to the buffer, growing the buffer as -// needed. The return value n is the length of p; err is always nil. If the -// buffer becomes too large, Write will panic with bytes.ErrTooLarge. -func (b *Buffer) Write(p []byte) (n int, err error) { - m := b.grow(len(p)) - return copy(b.buf[m:], p), nil -} - -// MinRead is the minimum slice size passed to a Read call by -// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond -// what is required to hold the contents of r, ReadFrom will not grow the -// underlying buffer. -const MinRead = 512 - -// ReadFrom reads data from r until EOF and appends it to the buffer, growing -// the buffer as needed. The return value n is the number of bytes read. Any -// error except io.EOF encountered during the read is also returned. If the -// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge. -func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { - // If buffer is empty, reset to recover space. - if b.off >= len(b.buf) { - b.Truncate(0) - } - for { - if free := cap(b.buf) - len(b.buf); free < MinRead { - // not enough space at end - newBuf := b.buf - if b.off+free < MinRead { - // not enough space using beginning of buffer; - // double buffer capacity - newBuf = makeSlice(2*cap(b.buf) + MinRead) - } - copy(newBuf, b.buf[b.off:]) - b.buf = newBuf[:len(b.buf)-b.off] - b.off = 0 - } - m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) - b.buf = b.buf[0 : len(b.buf)+m] - n += int64(m) - if e == io.EOF { - break - } - if e != nil { - return n, e - } - } - return n, nil // err is EOF, so return nil explicitly -} - -// makeSlice allocates a slice of size n. If the allocation fails, it panics -// with bytes.ErrTooLarge. -func makeSlice(n int) []byte { - // If the make fails, give a known error. - defer func() { - if recover() != nil { - panic(bytes.ErrTooLarge) - } - }() - return make([]byte, n) -} - -// WriteTo writes data to w until the buffer is drained or an error occurs. -// The return value n is the number of bytes written; it always fits into an -// int, but it is int64 to match the io.WriterTo interface. Any error -// encountered during the write is also returned. -func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { - if b.off < len(b.buf) { - nBytes := b.Len() - m, e := w.Write(b.buf[b.off:]) - if m > nBytes { - panic("leveldb/util.Buffer.WriteTo: invalid Write count") - } - b.off += m - n = int64(m) - if e != nil { - return n, e - } - // all bytes should have been written, by definition of - // Write method in io.Writer - if m != nBytes { - return n, io.ErrShortWrite - } - } - // Buffer is now empty; reset. - b.Truncate(0) - return -} - -// WriteByte appends the byte c to the buffer, growing the buffer as needed. -// The returned error is always nil, but is included to match bufio.Writer's -// WriteByte. If the buffer becomes too large, WriteByte will panic with -// bytes.ErrTooLarge. -func (b *Buffer) WriteByte(c byte) error { - m := b.grow(1) - b.buf[m] = c - return nil -} - -// Read reads the next len(p) bytes from the buffer or until the buffer -// is drained. The return value n is the number of bytes read. If the -// buffer has no data to return, err is io.EOF (unless len(p) is zero); -// otherwise it is nil. -func (b *Buffer) Read(p []byte) (n int, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - if len(p) == 0 { - return - } - return 0, io.EOF - } - n = copy(p, b.buf[b.off:]) - b.off += n - return -} - -// Next returns a slice containing the next n bytes from the buffer, -// advancing the buffer as if the bytes had been returned by Read. -// If there are fewer than n bytes in the buffer, Next returns the entire buffer. -// The slice is only valid until the next call to a read or write method. -func (b *Buffer) Next(n int) []byte { - m := b.Len() - if n > m { - n = m - } - data := b.buf[b.off : b.off+n] - b.off += n - return data -} - -// ReadByte reads and returns the next byte from the buffer. -// If no byte is available, it returns error io.EOF. -func (b *Buffer) ReadByte() (c byte, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - return 0, io.EOF - } - c = b.buf[b.off] - b.off++ - return c, nil -} - -// ReadBytes reads until the first occurrence of delim in the input, -// returning a slice containing the data up to and including the delimiter. -// If ReadBytes encounters an error before finding a delimiter, -// it returns the data read before the error and the error itself (often io.EOF). -// ReadBytes returns err != nil if and only if the returned data does not end in -// delim. -func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { - slice, err := b.readSlice(delim) - // return a copy of slice. The buffer's backing array may - // be overwritten by later calls. - line = append(line, slice...) - return -} - -// readSlice is like ReadBytes but returns a reference to internal buffer data. -func (b *Buffer) readSlice(delim byte) (line []byte, err error) { - i := bytes.IndexByte(b.buf[b.off:], delim) - end := b.off + i + 1 - if i < 0 { - end = len(b.buf) - err = io.EOF - } - line = b.buf[b.off:end] - b.off = end - return line, err -} - -// NewBuffer creates and initializes a new Buffer using buf as its initial -// contents. It is intended to prepare a Buffer to read existing data. It -// can also be used to size the internal buffer for writing. To do that, -// buf should have the desired capacity but a length of zero. -// -// In most cases, new(Buffer) (or just declaring a Buffer variable) is -// sufficient to initialize a Buffer. -func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go deleted file mode 100644 index 2b8453d7598..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "fmt" - "sync" - "sync/atomic" - "time" -) - -type buffer struct { - b []byte - miss int -} - -// BufferPool is a 'buffer pool'. -type BufferPool struct { - pool [6]chan []byte - size [5]uint32 - sizeMiss [5]uint32 - sizeHalf [5]uint32 - baseline [4]int - baseline0 int - - mu sync.RWMutex - closed bool - closeC chan struct{} - - get uint32 - put uint32 - half uint32 - less uint32 - equal uint32 - greater uint32 - miss uint32 -} - -func (p *BufferPool) poolNum(n int) int { - if n <= p.baseline0 && n > p.baseline0/2 { - return 0 - } - for i, x := range p.baseline { - if n <= x { - return i + 1 - } - } - return len(p.baseline) + 1 -} - -// Get returns buffer with length of n. -func (p *BufferPool) Get(n int) []byte { - if p == nil { - return make([]byte, n) - } - - p.mu.RLock() - defer p.mu.RUnlock() - - if p.closed { - return make([]byte, n) - } - - atomic.AddUint32(&p.get, 1) - - poolNum := p.poolNum(n) - pool := p.pool[poolNum] - if poolNum == 0 { - // Fast path. - select { - case b := <-pool: - switch { - case cap(b) > n: - if cap(b)-n >= n { - atomic.AddUint32(&p.half, 1) - select { - case pool <- b: - default: - } - return make([]byte, n) - } else { - atomic.AddUint32(&p.less, 1) - return b[:n] - } - case cap(b) == n: - atomic.AddUint32(&p.equal, 1) - return b[:n] - default: - atomic.AddUint32(&p.greater, 1) - } - default: - atomic.AddUint32(&p.miss, 1) - } - - return make([]byte, n, p.baseline0) - } else { - sizePtr := &p.size[poolNum-1] - - select { - case b := <-pool: - switch { - case cap(b) > n: - if cap(b)-n >= n { - atomic.AddUint32(&p.half, 1) - sizeHalfPtr := &p.sizeHalf[poolNum-1] - if atomic.AddUint32(sizeHalfPtr, 1) == 20 { - atomic.StoreUint32(sizePtr, uint32(cap(b)/2)) - atomic.StoreUint32(sizeHalfPtr, 0) - } else { - select { - case pool <- b: - default: - } - } - return make([]byte, n) - } else { - atomic.AddUint32(&p.less, 1) - return b[:n] - } - case cap(b) == n: - atomic.AddUint32(&p.equal, 1) - return b[:n] - default: - atomic.AddUint32(&p.greater, 1) - if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) { - select { - case pool <- b: - default: - } - } - } - default: - atomic.AddUint32(&p.miss, 1) - } - - if size := atomic.LoadUint32(sizePtr); uint32(n) > size { - if size == 0 { - atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n)) - } else { - sizeMissPtr := &p.sizeMiss[poolNum-1] - if atomic.AddUint32(sizeMissPtr, 1) == 20 { - atomic.StoreUint32(sizePtr, uint32(n)) - atomic.StoreUint32(sizeMissPtr, 0) - } - } - return make([]byte, n) - } else { - return make([]byte, n, size) - } - } -} - -// Put adds given buffer to the pool. -func (p *BufferPool) Put(b []byte) { - if p == nil { - return - } - - p.mu.RLock() - defer p.mu.RUnlock() - - if p.closed { - return - } - - atomic.AddUint32(&p.put, 1) - - pool := p.pool[p.poolNum(cap(b))] - select { - case pool <- b: - default: - } - -} - -func (p *BufferPool) Close() { - if p == nil { - return - } - - p.mu.Lock() - if !p.closed { - p.closed = true - p.closeC <- struct{}{} - } - p.mu.Unlock() -} - -func (p *BufferPool) String() string { - if p == nil { - return "" - } - - return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}", - p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss) -} - -func (p *BufferPool) drain() { - ticker := time.NewTicker(2 * time.Second) - for { - select { - case <-ticker.C: - for _, ch := range p.pool { - select { - case <-ch: - default: - } - } - case <-p.closeC: - close(p.closeC) - for _, ch := range p.pool { - close(ch) - } - return - } - } -} - -// NewBufferPool creates a new initialized 'buffer pool'. -func NewBufferPool(baseline int) *BufferPool { - if baseline <= 0 { - panic("baseline can't be <= 0") - } - p := &BufferPool{ - baseline0: baseline, - baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4}, - closeC: make(chan struct{}, 1), - } - for i, cap := range []int{2, 2, 4, 4, 2, 1} { - p.pool[i] = make(chan []byte, cap) - } - go p.drain() - return p -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go deleted file mode 100644 index 87d96739c43..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package util - -import ( - "bytes" - "io" - "math/rand" - "runtime" - "testing" -) - -const N = 10000 // make this bigger for a larger (and slower) test -var data string // test data for write tests -var testBytes []byte // test data; same as data but as a slice. - -func init() { - testBytes = make([]byte, N) - for i := 0; i < N; i++ { - testBytes[i] = 'a' + byte(i%26) - } - data = string(testBytes) -} - -// Verify that contents of buf match the string s. -func check(t *testing.T, testname string, buf *Buffer, s string) { - bytes := buf.Bytes() - str := buf.String() - if buf.Len() != len(bytes) { - t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes)) - } - - if buf.Len() != len(str) { - t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str)) - } - - if buf.Len() != len(s) { - t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s)) - } - - if string(bytes) != s { - t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s) - } -} - -// Fill buf through n writes of byte slice fub. -// The initial contents of buf corresponds to the string s; -// the result is the final contents of buf returned as a string. -func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string { - check(t, testname+" (fill 1)", buf, s) - for ; n > 0; n-- { - m, err := buf.Write(fub) - if m != len(fub) { - t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub)) - } - if err != nil { - t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err) - } - s += string(fub) - check(t, testname+" (fill 4)", buf, s) - } - return s -} - -func TestNewBuffer(t *testing.T) { - buf := NewBuffer(testBytes) - check(t, "NewBuffer", buf, data) -} - -// Empty buf through repeated reads into fub. -// The initial contents of buf corresponds to the string s. -func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) { - check(t, testname+" (empty 1)", buf, s) - - for { - n, err := buf.Read(fub) - if n == 0 { - break - } - if err != nil { - t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err) - } - s = s[n:] - check(t, testname+" (empty 3)", buf, s) - } - - check(t, testname+" (empty 4)", buf, "") -} - -func TestBasicOperations(t *testing.T) { - var buf Buffer - - for i := 0; i < 5; i++ { - check(t, "TestBasicOperations (1)", &buf, "") - - buf.Reset() - check(t, "TestBasicOperations (2)", &buf, "") - - buf.Truncate(0) - check(t, "TestBasicOperations (3)", &buf, "") - - n, err := buf.Write([]byte(data[0:1])) - if n != 1 { - t.Errorf("wrote 1 byte, but n == %d", n) - } - if err != nil { - t.Errorf("err should always be nil, but err == %s", err) - } - check(t, "TestBasicOperations (4)", &buf, "a") - - buf.WriteByte(data[1]) - check(t, "TestBasicOperations (5)", &buf, "ab") - - n, err = buf.Write([]byte(data[2:26])) - if n != 24 { - t.Errorf("wrote 25 bytes, but n == %d", n) - } - check(t, "TestBasicOperations (6)", &buf, string(data[0:26])) - - buf.Truncate(26) - check(t, "TestBasicOperations (7)", &buf, string(data[0:26])) - - buf.Truncate(20) - check(t, "TestBasicOperations (8)", &buf, string(data[0:20])) - - empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5)) - empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100)) - - buf.WriteByte(data[1]) - c, err := buf.ReadByte() - if err != nil { - t.Error("ReadByte unexpected eof") - } - if c != data[1] { - t.Errorf("ReadByte wrong value c=%v", c) - } - c, err = buf.ReadByte() - if err == nil { - t.Error("ReadByte unexpected not eof") - } - } -} - -func TestLargeByteWrites(t *testing.T) { - var buf Buffer - limit := 30 - if testing.Short() { - limit = 9 - } - for i := 3; i < limit; i += 3 { - s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes) - empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i)) - } - check(t, "TestLargeByteWrites (3)", &buf, "") -} - -func TestLargeByteReads(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) - } - check(t, "TestLargeByteReads (3)", &buf, "") -} - -func TestMixedReadsAndWrites(t *testing.T) { - var buf Buffer - s := "" - for i := 0; i < 50; i++ { - wlen := rand.Intn(len(data)) - s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen]) - rlen := rand.Intn(len(data)) - fub := make([]byte, rlen) - n, _ := buf.Read(fub) - s = s[n:] - } - empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len())) -} - -func TestNil(t *testing.T) { - var b *Buffer - if b.String() != "" { - t.Errorf("expected ; got %q", b.String()) - } -} - -func TestReadFrom(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - var b Buffer - b.ReadFrom(&buf) - empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data))) - } -} - -func TestWriteTo(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - var b Buffer - buf.WriteTo(&b) - empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data))) - } -} - -func TestNext(t *testing.T) { - b := []byte{0, 1, 2, 3, 4} - tmp := make([]byte, 5) - for i := 0; i <= 5; i++ { - for j := i; j <= 5; j++ { - for k := 0; k <= 6; k++ { - // 0 <= i <= j <= 5; 0 <= k <= 6 - // Check that if we start with a buffer - // of length j at offset i and ask for - // Next(k), we get the right bytes. - buf := NewBuffer(b[0:j]) - n, _ := buf.Read(tmp[0:i]) - if n != i { - t.Fatalf("Read %d returned %d", i, n) - } - bb := buf.Next(k) - want := k - if want > j-i { - want = j - i - } - if len(bb) != want { - t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb)) - } - for l, v := range bb { - if v != byte(l+i) { - t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i) - } - } - } - } - } -} - -var readBytesTests = []struct { - buffer string - delim byte - expected []string - err error -}{ - {"", 0, []string{""}, io.EOF}, - {"a\x00", 0, []string{"a\x00"}, nil}, - {"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil}, - {"hello\x01world", 1, []string{"hello\x01"}, nil}, - {"foo\nbar", 0, []string{"foo\nbar"}, io.EOF}, - {"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil}, - {"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF}, -} - -func TestReadBytes(t *testing.T) { - for _, test := range readBytesTests { - buf := NewBuffer([]byte(test.buffer)) - var err error - for _, expected := range test.expected { - var bytes []byte - bytes, err = buf.ReadBytes(test.delim) - if string(bytes) != expected { - t.Errorf("expected %q, got %q", expected, bytes) - } - if err != nil { - break - } - } - if err != test.err { - t.Errorf("expected error %v, got %v", test.err, err) - } - } -} - -func TestGrow(t *testing.T) { - x := []byte{'x'} - y := []byte{'y'} - tmp := make([]byte, 72) - for _, startLen := range []int{0, 100, 1000, 10000, 100000} { - xBytes := bytes.Repeat(x, startLen) - for _, growLen := range []int{0, 100, 1000, 10000, 100000} { - buf := NewBuffer(xBytes) - // If we read, this affects buf.off, which is good to test. - readBytes, _ := buf.Read(tmp) - buf.Grow(growLen) - yBytes := bytes.Repeat(y, growLen) - // Check no allocation occurs in write, as long as we're single-threaded. - var m1, m2 runtime.MemStats - runtime.ReadMemStats(&m1) - buf.Write(yBytes) - runtime.ReadMemStats(&m2) - if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs { - t.Errorf("allocation occurred during write") - } - // Check that buffer has correct data. - if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) { - t.Errorf("bad initial data at %d %d", startLen, growLen) - } - if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) { - t.Errorf("bad written data at %d %d", startLen, growLen) - } - } - } -} - -// Was a bug: used to give EOF reading empty slice at EOF. -func TestReadEmptyAtEOF(t *testing.T) { - b := new(Buffer) - slice := make([]byte, 0) - n, err := b.Read(slice) - if err != nil { - t.Errorf("read error: %v", err) - } - if n != 0 { - t.Errorf("wrong count; got %d want 0", n) - } -} - -// Tests that we occasionally compact. Issue 5154. -func TestBufferGrowth(t *testing.T) { - var b Buffer - buf := make([]byte, 1024) - b.Write(buf[0:1]) - var cap0 int - for i := 0; i < 5<<10; i++ { - b.Write(buf) - b.Read(buf) - if i == 0 { - cap0 = cap(b.buf) - } - } - cap1 := cap(b.buf) - // (*Buffer).grow allows for 2x capacity slop before sliding, - // so set our error threshold at 3x. - if cap1 > cap0*3 { - t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0) - } -} - -// From Issue 5154. -func BenchmarkBufferNotEmptyWriteRead(b *testing.B) { - buf := make([]byte, 1024) - for i := 0; i < b.N; i++ { - var b Buffer - b.Write(buf[0:1]) - for i := 0; i < 5<<10; i++ { - b.Write(buf) - b.Read(buf) - } - } -} - -// Check that we don't compact too often. From Issue 5154. -func BenchmarkBufferFullSmallReads(b *testing.B) { - buf := make([]byte, 1024) - for i := 0; i < b.N; i++ { - var b Buffer - b.Write(buf) - for b.Len()+20 < cap(b.buf) { - b.Write(buf[:10]) - } - for i := 0; i < 5<<10; i++ { - b.Read(buf[:1]) - b.Write(buf[:1]) - } - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go deleted file mode 100644 index 631c9d6109c..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "hash/crc32" -) - -var table = crc32.MakeTable(crc32.Castagnoli) - -// CRC is a CRC-32 checksum computed using Castagnoli's polynomial. -type CRC uint32 - -// NewCRC creates a new crc based on the given bytes. -func NewCRC(b []byte) CRC { - return CRC(0).Update(b) -} - -// Update updates the crc with the given bytes. -func (c CRC) Update(b []byte) CRC { - return CRC(crc32.Update(uint32(c), table, b)) -} - -// Value returns a masked crc. -func (c CRC) Value() uint32 { - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go deleted file mode 100644 index 54903660ffa..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "bytes" - "encoding/binary" -) - -// Hash return hash of the given data. -func Hash(data []byte, seed uint32) uint32 { - // Similar to murmur hash - var m uint32 = 0xc6a4a793 - var r uint32 = 24 - h := seed ^ (uint32(len(data)) * m) - - buf := bytes.NewBuffer(data) - for buf.Len() >= 4 { - var w uint32 - binary.Read(buf, binary.LittleEndian, &w) - h += w - h *= m - h ^= (h >> 16) - } - - rest := buf.Bytes() - switch len(rest) { - default: - panic("not reached") - case 3: - h += uint32(rest[2]) << 16 - fallthrough - case 2: - h += uint32(rest[1]) << 8 - fallthrough - case 1: - h += uint32(rest[0]) - h *= m - h ^= (h >> r) - case 0: - } - - return h -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go deleted file mode 100644 index 1f7fdd41fe4..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build go1.3 - -package util - -import ( - "sync" -) - -type Pool struct { - sync.Pool -} - -func NewPool(cap int) *Pool { - return &Pool{} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go deleted file mode 100644 index 27b8d03be94..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.3 - -package util - -type Pool struct { - pool chan interface{} -} - -func (p *Pool) Get() interface{} { - select { - case x := <-p.pool: - return x - default: - return nil - } -} - -func (p *Pool) Put(x interface{}) { - select { - case p.pool <- x: - default: - } -} - -func NewPool(cap int) *Pool { - return &Pool{pool: make(chan interface{}, cap)} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go deleted file mode 100644 index 85159583d2c..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -// Range is a key range. -type Range struct { - // Start of the key range, include in the range. - Start []byte - - // Limit of the key range, not include in the range. - Limit []byte -} - -// BytesPrefix returns key range that satisfy the given prefix. -// This only applicable for the standard 'bytes comparer'. -func BytesPrefix(prefix []byte) *Range { - var limit []byte - for i := len(prefix) - 1; i >= 0; i-- { - c := prefix[i] - if c < 0xff { - limit = make([]byte, i+1) - copy(limit, prefix) - limit[i] = c + 1 - break - } - } - return &Range{prefix, limit} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go deleted file mode 100644 index f35976865bf..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package util provides utilities used throughout leveldb. -package util - -import ( - "errors" -) - -var ( - ErrReleased = errors.New("leveldb: resource already relesed") - ErrHasReleaser = errors.New("leveldb: releaser already defined") -) - -// Releaser is the interface that wraps the basic Release method. -type Releaser interface { - // Release releases associated resources. Release should always success - // and can be called multipe times without causing error. - Release() -} - -// ReleaseSetter is the interface that wraps the basic SetReleaser method. -type ReleaseSetter interface { - // SetReleaser associates the given releaser to the resources. The - // releaser will be called once coresponding resources released. - // Calling SetReleaser with nil will clear the releaser. - // - // This will panic if a releaser already present or coresponding - // resource is already released. Releaser should be cleared first - // before assigned a new one. - SetReleaser(releaser Releaser) -} - -// BasicReleaser provides basic implementation of Releaser and ReleaseSetter. -type BasicReleaser struct { - releaser Releaser - released bool -} - -// Released returns whether Release method already called. -func (r *BasicReleaser) Released() bool { - return r.released -} - -// Release implements Releaser.Release. -func (r *BasicReleaser) Release() { - if !r.released { - if r.releaser != nil { - r.releaser.Release() - r.releaser = nil - } - r.released = true - } -} - -// SetReleaser implements ReleaseSetter.SetReleaser. -func (r *BasicReleaser) SetReleaser(releaser Releaser) { - if r.released { - panic(ErrReleased) - } - if r.releaser != nil && releaser != nil { - panic(ErrHasReleaser) - } - r.releaser = releaser -} - -type NoopReleaser struct{} - -func (NoopReleaser) Release() {} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go deleted file mode 100644 index e3cc28d2d04..00000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go +++ /dev/null @@ -1,457 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync/atomic" - "unsafe" - - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util" -) - -type tSet struct { - level int - table *tFile -} - -type version struct { - s *session - - tables []tFiles - - // Level that should be compacted next and its compaction score. - // Score < 1 means compaction is not strictly needed. These fields - // are initialized by computeCompaction() - cLevel int - cScore float64 - - cSeek unsafe.Pointer - - ref int - // Succeeding version. - next *version -} - -func newVersion(s *session) *version { - return &version{s: s, tables: make([]tFiles, s.o.GetNumLevel())} -} - -func (v *version) releaseNB() { - v.ref-- - if v.ref > 0 { - return - } - if v.ref < 0 { - panic("negative version ref") - } - - tables := make(map[uint64]bool) - for _, tt := range v.next.tables { - for _, t := range tt { - num := t.file.Num() - tables[num] = true - } - } - - for _, tt := range v.tables { - for _, t := range tt { - num := t.file.Num() - if _, ok := tables[num]; !ok { - v.s.tops.remove(t) - } - } - } - - v.next.releaseNB() - v.next = nil -} - -func (v *version) release() { - v.s.vmu.Lock() - v.releaseNB() - v.s.vmu.Unlock() -} - -func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) { - ukey := ikey.ukey() - - // Walk tables level-by-level. - for level, tables := range v.tables { - if len(tables) == 0 { - continue - } - - if level == 0 { - // Level-0 files may overlap each other. Find all files that - // overlap ukey. - for _, t := range tables { - if t.overlaps(v.s.icmp, ukey, ukey) { - if !f(level, t) { - return - } - } - } - } else { - if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) { - t := tables[i] - if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { - if !f(level, t) { - return - } - } - } - } - - if lf != nil && !lf(level) { - return - } - } -} - -func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) { - ukey := ikey.ukey() - - var ( - tset *tSet - tseek bool - - // Level-0. - zfound bool - zseq uint64 - zkt kType - zval []byte - ) - - err = ErrNotFound - - // Since entries never hope across level, finding key/value - // in smaller level make later levels irrelevant. - v.walkOverlapping(ikey, func(level int, t *tFile) bool { - if !tseek { - if tset == nil { - tset = &tSet{level, t} - } else { - tseek = true - } - } - - var ( - fikey, fval []byte - ferr error - ) - if noValue { - fikey, ferr = v.s.tops.findKey(t, ikey, ro) - } else { - fikey, fval, ferr = v.s.tops.find(t, ikey, ro) - } - switch ferr { - case nil: - case ErrNotFound: - return true - default: - err = ferr - return false - } - - if fukey, fseq, fkt, fkerr := parseIkey(fikey); fkerr == nil { - if v.s.icmp.uCompare(ukey, fukey) == 0 { - if level == 0 { - if fseq >= zseq { - zfound = true - zseq = fseq - zkt = fkt - zval = fval - } - } else { - switch fkt { - case ktVal: - value = fval - err = nil - case ktDel: - default: - panic("leveldb: invalid iKey type") - } - return false - } - } - } else { - err = fkerr - return false - } - - return true - }, func(level int) bool { - if zfound { - switch zkt { - case ktVal: - value = zval - err = nil - case ktDel: - default: - panic("leveldb: invalid iKey type") - } - return false - } - - return true - }) - - if tseek && tset.table.consumeSeek() <= 0 { - tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) - } - - return -} - -func (v *version) sampleSeek(ikey iKey) (tcomp bool) { - var tset *tSet - - v.walkOverlapping(ikey, func(level int, t *tFile) bool { - if tset == nil { - tset = &tSet{level, t} - return true - } else { - if tset.table.consumeSeek() <= 0 { - tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) - } - return false - } - }, nil) - - return -} - -func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) { - // Merge all level zero files together since they may overlap - for _, t := range v.tables[0] { - it := v.s.tops.newIterator(t, slice, ro) - its = append(its, it) - } - - strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader) - for _, tables := range v.tables[1:] { - if len(tables) == 0 { - continue - } - - it := iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict) - its = append(its, it) - } - - return -} - -func (v *version) newStaging() *versionStaging { - return &versionStaging{base: v, tables: make([]tablesScratch, v.s.o.GetNumLevel())} -} - -// Spawn a new version based on this version. -func (v *version) spawn(r *sessionRecord) *version { - staging := v.newStaging() - staging.commit(r) - return staging.finish() -} - -func (v *version) fillRecord(r *sessionRecord) { - for level, ts := range v.tables { - for _, t := range ts { - r.addTableFile(level, t) - } - } -} - -func (v *version) tLen(level int) int { - return len(v.tables[level]) -} - -func (v *version) offsetOf(ikey iKey) (n uint64, err error) { - for level, tables := range v.tables { - for _, t := range tables { - if v.s.icmp.Compare(t.imax, ikey) <= 0 { - // Entire file is before "ikey", so just add the file size - n += t.size - } else if v.s.icmp.Compare(t.imin, ikey) > 0 { - // Entire file is after "ikey", so ignore - if level > 0 { - // Files other than level 0 are sorted by meta->min, so - // no further files in this level will contain data for - // "ikey". - break - } - } else { - // "ikey" falls in the range for this table. Add the - // approximate offset of "ikey" within the table. - var nn uint64 - nn, err = v.s.tops.offsetOf(t, ikey) - if err != nil { - return 0, err - } - n += nn - } - } - } - - return -} - -func (v *version) pickLevel(umin, umax []byte) (level int) { - if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) { - var overlaps tFiles - maxLevel := v.s.o.GetMaxMemCompationLevel() - for ; level < maxLevel; level++ { - if v.tables[level+1].overlaps(v.s.icmp, umin, umax, false) { - break - } - overlaps = v.tables[level+2].getOverlaps(overlaps, v.s.icmp, umin, umax, false) - if overlaps.size() > uint64(v.s.o.GetCompactionGPOverlaps(level)) { - break - } - } - } - - return -} - -func (v *version) computeCompaction() { - // Precomputed best level for next compaction - var bestLevel int = -1 - var bestScore float64 = -1 - - for level, tables := range v.tables { - var score float64 - if level == 0 { - // We treat level-0 specially by bounding the number of files - // instead of number of bytes for two reasons: - // - // (1) With larger write-buffer sizes, it is nice not to do too - // many level-0 compactions. - // - // (2) The files in level-0 are merged on every read and - // therefore we wish to avoid too many files when the individual - // file size is small (perhaps because of a small write-buffer - // setting, or very high compression ratios, or lots of - // overwrites/deletions). - score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger()) - } else { - score = float64(tables.size()) / float64(v.s.o.GetCompactionTotalSize(level)) - } - - if score > bestScore { - bestLevel = level - bestScore = score - } - } - - v.cLevel = bestLevel - v.cScore = bestScore -} - -func (v *version) needCompaction() bool { - return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil -} - -type tablesScratch struct { - added map[uint64]atRecord - deleted map[uint64]struct{} -} - -type versionStaging struct { - base *version - tables []tablesScratch -} - -func (p *versionStaging) commit(r *sessionRecord) { - // Deleted tables. - for _, r := range r.deletedTables { - tm := &(p.tables[r.level]) - - if len(p.base.tables[r.level]) > 0 { - if tm.deleted == nil { - tm.deleted = make(map[uint64]struct{}) - } - tm.deleted[r.num] = struct{}{} - } - - if tm.added != nil { - delete(tm.added, r.num) - } - } - - // New tables. - for _, r := range r.addedTables { - tm := &(p.tables[r.level]) - - if tm.added == nil { - tm.added = make(map[uint64]atRecord) - } - tm.added[r.num] = r - - if tm.deleted != nil { - delete(tm.deleted, r.num) - } - } -} - -func (p *versionStaging) finish() *version { - // Build new version. - nv := newVersion(p.base.s) - for level, tm := range p.tables { - btables := p.base.tables[level] - - n := len(btables) + len(tm.added) - len(tm.deleted) - if n < 0 { - n = 0 - } - nt := make(tFiles, 0, n) - - // Base tables. - for _, t := range btables { - if _, ok := tm.deleted[t.file.Num()]; ok { - continue - } - if _, ok := tm.added[t.file.Num()]; ok { - continue - } - nt = append(nt, t) - } - - // New tables. - for _, r := range tm.added { - nt = append(nt, p.base.s.tableFileFromRecord(r)) - } - - // Sort tables. - if level == 0 { - nt.sortByNum() - } else { - nt.sortByKey(p.base.s.icmp) - } - nv.tables[level] = nt - } - - // Compute compaction score for new version. - nv.computeCompaction() - - return nv -} - -type versionReleaser struct { - v *version - once bool -} - -func (vr *versionReleaser) Release() { - v := vr.v - v.s.vmu.Lock() - if !vr.once { - v.releaseNB() - vr.once = true - } - v.s.vmu.Unlock() -} diff --git a/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info.go b/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info.go deleted file mode 100644 index d1be7534111..00000000000 --- a/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info.go +++ /dev/null @@ -1,38 +0,0 @@ -package sysinfo - -import ( - "errors" -) - -var ErrPlatformNotSupported = errors.New("this operation is not supported on your platform") - -type DiskStats struct { - Free uint64 - Total uint64 - FsType string -} - -var diskUsageImpl func(string) (*DiskStats, error) - -func DiskUsage(path string) (*DiskStats, error) { - if diskUsageImpl == nil { - return nil, ErrPlatformNotSupported - } - - return diskUsageImpl(path) -} - -type MemStats struct { - Swap uint64 - Used uint64 -} - -var memInfoImpl func() (*MemStats, error) - -func MemoryInfo() (*MemStats, error) { - if memInfoImpl == nil { - return nil, ErrPlatformNotSupported - } - - return memInfoImpl() -} diff --git a/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info_darwin.go b/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info_darwin.go deleted file mode 100644 index c4e17fe44c9..00000000000 --- a/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info_darwin.go +++ /dev/null @@ -1,32 +0,0 @@ -package sysinfo - -import ( - "fmt" - "syscall" -) - -func init() { - diskUsageImpl = darwinDiskUsage - memInfoImpl = darwinMemInfo -} - -func darwinDiskUsage(path string) (*DiskStats, error) { - var stfst syscall.Statfs_t - err := syscall.Statfs(path, &stfst) - if err != nil { - return nil, err - } - - free := stfst.Bfree * uint64(stfst.Bsize) - total := stfst.Bavail * uint64(stfst.Bsize) - return &DiskStats{ - Free: free, - Total: total, - FsType: fmt.Sprint(stfst.Type), - }, nil -} - -func darwinMemInfo() (*MemStats, error) { - // TODO: use vm_stat on osx to gather memory information - return new(MemStats), nil -} diff --git a/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info_linux.go b/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info_linux.go deleted file mode 100644 index b5342620118..00000000000 --- a/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo/info_linux.go +++ /dev/null @@ -1,70 +0,0 @@ -package sysinfo - -import ( - "bytes" - "fmt" - "io/ioutil" - "strings" - "syscall" - - humanize "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/dustin/go-humanize" -) - -func init() { - diskUsageImpl = linuxDiskUsage - memInfoImpl = linuxMemInfo -} - -func linuxDiskUsage(path string) (*DiskStats, error) { - var stfst syscall.Statfs_t - err := syscall.Statfs(path, &stfst) - if err != nil { - return nil, err - } - - free := stfst.Bfree * uint64(stfst.Bsize) - total := stfst.Bavail * uint64(stfst.Bsize) - return &DiskStats{ - Free: free, - Total: total, - FsType: fmt.Sprint(stfst.Type), - }, nil -} - -func linuxMemInfo() (*MemStats, error) { - info, err := ioutil.ReadFile("/proc/self/status") - if err != nil { - return nil, err - } - - var stats MemStats - for _, e := range bytes.Split(info, []byte("\n")) { - if !bytes.HasPrefix(e, []byte("Vm")) { - continue - } - - parts := bytes.Split(e, []byte(":")) - if len(parts) != 2 { - return nil, fmt.Errorf("unexpected line in proc stats: %q", string(e)) - } - - val := strings.Trim(string(parts[1]), " \n\t") - switch string(parts[0]) { - case "VmSize": - vmsize, err := humanize.ParseBytes(val) - if err != nil { - return nil, err - } - - stats.Used = vmsize - case "VmSwap": - swapsize, err := humanize.ParseBytes(val) - if err != nil { - return nil, err - } - - stats.Swap = swapsize - } - } - return &stats, nil -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore deleted file mode 100644 index 4cd0cbaf432..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global - -.vagrant -*.sublime-project diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml deleted file mode 100644 index 67467e14079..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -sudo: false -language: go - -go: - - 1.4.1 - -before_script: - - FIXED=$(go fmt ./... | wc -l); if [ $FIXED -gt 0 ]; then echo "gofmt - $FIXED file(s) not formatted correctly, please run gofmt to fix this." && exit 1; fi - -os: - - linux - - osx - -notifications: - email: false diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS deleted file mode 100644 index 4e0e8284e95..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS +++ /dev/null @@ -1,34 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' - -# Please keep the list sorted. - -Adrien Bustany -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Dave Cheney -Francisco Souza -Hari haran -John C Barstow -Kelvin Fo -Matt Layher -Nathan Youngman -Paul Hammond -Pieter Droogendijk -Pursuit92 -Rob Figueiredo -Soge Zhang -Tilak Sharma -Travis Cline -Tudor Golubenco -Yukang -bronze1man -debrando -henrikedwards diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md deleted file mode 100644 index ea9428a2a49..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md +++ /dev/null @@ -1,263 +0,0 @@ -# Changelog - -## v1.2.0 / 2015-02-08 - -* inotify: use epoll to wake up readEvents [#66](https://github.com/go-fsnotify/fsnotify/pull/66) (thanks @PieterD) -* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/go-fsnotify/fsnotify/pull/63) (thanks @PieterD) -* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/go-fsnotify/fsnotify/issues/59) - -## v1.1.1 / 2015-02-05 - -* inotify: Retry read on EINTR [#61](https://github.com/go-fsnotify/fsnotify/issues/61) (thanks @PieterD) - -## v1.1.0 / 2014-12-12 - -* kqueue: rework internals [#43](https://github.com/go-fsnotify/fsnotify/pull/43) - * add low-level functions - * only need to store flags on directories - * less mutexes [#13](https://github.com/go-fsnotify/fsnotify/issues/13) - * done can be an unbuffered channel - * remove calls to os.NewSyscallError -* More efficient string concatenation for Event.String() [#52](https://github.com/go-fsnotify/fsnotify/pull/52) (thanks @mdlayher) -* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/go-fsnotify/fsnotify/issues/48) -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51) - -## v1.0.4 / 2014-09-07 - -* kqueue: add dragonfly to the build tags. -* Rename source code files, rearrange code so exported APIs are at the top. -* Add done channel to example code. [#37](https://github.com/go-fsnotify/fsnotify/pull/37) (thanks @chenyukang) - -## v1.0.3 / 2014-08-19 - -* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/go-fsnotify/fsnotify/issues/36) - -## v1.0.2 / 2014-08-17 - -* [Fix] Missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso) -* [Fix] Make ./path and path equivalent. (thanks @zhsso) - -## v1.0.0 / 2014-08-15 - -* [API] Remove AddWatch on Windows, use Add. -* Improve documentation for exported identifiers. [#30](https://github.com/go-fsnotify/fsnotify/issues/30) -* Minor updates based on feedback from golint. - -## dev / 2014-07-09 - -* Moved to [github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify). -* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) - -## dev / 2014-07-04 - -* kqueue: fix incorrect mutex used in Close() -* Update example to demonstrate usage of Op. - -## dev / 2014-06-28 - -* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/go-fsnotify/fsnotify/issues/4) -* Fix for String() method on Event (thanks Alex Brainman) -* Don't build on Plan 9 or Solaris (thanks @4ad) - -## dev / 2014-06-21 - -* Events channel of type Event rather than *Event. -* [internal] use syscall constants directly for inotify and kqueue. -* [internal] kqueue: rename events to kevents and fileEvent to event. - -## dev / 2014-06-19 - -* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). -* [internal] remove cookie from Event struct (unused). -* [internal] Event struct has the same definition across every OS. -* [internal] remove internal watch and removeWatch methods. - -## dev / 2014-06-12 - -* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). -* [API] Pluralized channel names: Events and Errors. -* [API] Renamed FileEvent struct to Event. -* [API] Op constants replace methods like IsCreate(). - -## dev / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## dev / 2014-05-23 - -* [API] Remove current implementation of WatchFlags. - * current implementation doesn't take advantage of OS for efficiency - * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes - * no tests for the current implementation - * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) - -## v0.9.3 / 2014-12-31 - -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51) - -## v0.9.2 / 2014-08-17 - -* [Backport] Fix missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso) - -## v0.9.1 / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## v0.9.0 / 2014-01-17 - -* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) -* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) -* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. - -## v0.8.12 / 2013-11-13 - -* [API] Remove FD_SET and friends from Linux adapter - -## v0.8.11 / 2013-11-02 - -* [Doc] Add Changelog [#72][] (thanks @nathany) -* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond) - -## v0.8.10 / 2013-10-19 - -* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) -* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) -* [Doc] specify OS-specific limits in README (thanks @debrando) - -## v0.8.9 / 2013-09-08 - -* [Doc] Contributing (thanks @nathany) -* [Doc] update package path in example code [#63][] (thanks @paulhammond) -* [Doc] GoCI badge in README (Linux only) [#60][] -* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) - -## v0.8.8 / 2013-06-17 - -* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) - -## v0.8.7 / 2013-06-03 - -* [API] Make syscall flags internal -* [Fix] inotify: ignore event changes -* [Fix] race in symlink test [#45][] (reported by @srid) -* [Fix] tests on Windows -* lower case error messages - -## v0.8.6 / 2013-05-23 - -* kqueue: Use EVT_ONLY flag on Darwin -* [Doc] Update README with full example - -## v0.8.5 / 2013-05-09 - -* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) - -## v0.8.4 / 2013-04-07 - -* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) - -## v0.8.3 / 2013-03-13 - -* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) -* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) - -## v0.8.2 / 2013-02-07 - -* [Doc] add Authors -* [Fix] fix data races for map access [#29][] (thanks @fsouza) - -## v0.8.1 / 2013-01-09 - -* [Fix] Windows path separators -* [Doc] BSD License - -## v0.8.0 / 2012-11-09 - -* kqueue: directory watching improvements (thanks @vmirage) -* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) -* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) - -## v0.7.4 / 2012-10-09 - -* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) -* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) -* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) -* [Fix] kqueue: modify after recreation of file - -## v0.7.3 / 2012-09-27 - -* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) -* [Fix] kqueue: no longer get duplicate CREATE events - -## v0.7.2 / 2012-09-01 - -* kqueue: events for created directories - -## v0.7.1 / 2012-07-14 - -* [Fix] for renaming files - -## v0.7.0 / 2012-07-02 - -* [Feature] FSNotify flags -* [Fix] inotify: Added file name back to event path - -## v0.6.0 / 2012-06-06 - -* kqueue: watch files after directory created (thanks @tmc) - -## v0.5.1 / 2012-05-22 - -* [Fix] inotify: remove all watches before Close() - -## v0.5.0 / 2012-05-03 - -* [API] kqueue: return errors during watch instead of sending over channel -* kqueue: match symlink behavior on Linux -* inotify: add `DELETE_SELF` (requested by @taralx) -* [Fix] kqueue: handle EINTR (reported by @robfig) -* [Doc] Godoc example [#1][] (thanks @davecheney) - -## v0.4.0 / 2012-03-30 - -* Go 1 released: build with go tool -* [Feature] Windows support using winfsnotify -* Windows does not have attribute change notifications -* Roll attribute notifications into IsModify - -## v0.3.0 / 2012-02-19 - -* kqueue: add files when watch directory - -## v0.2.0 / 2011-12-30 - -* update to latest Go weekly code - -## v0.1.0 / 2011-10-19 - -* kqueue: add watch on file creation to match inotify -* kqueue: create file event -* inotify: ignore `IN_IGNORED` events -* event String() -* linux: common FileEvent functions -* initial commit - -[#79]: https://github.com/howeyc/fsnotify/pull/79 -[#77]: https://github.com/howeyc/fsnotify/pull/77 -[#72]: https://github.com/howeyc/fsnotify/issues/72 -[#71]: https://github.com/howeyc/fsnotify/issues/71 -[#70]: https://github.com/howeyc/fsnotify/issues/70 -[#63]: https://github.com/howeyc/fsnotify/issues/63 -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#60]: https://github.com/howeyc/fsnotify/issues/60 -[#59]: https://github.com/howeyc/fsnotify/issues/59 -[#49]: https://github.com/howeyc/fsnotify/issues/49 -[#45]: https://github.com/howeyc/fsnotify/issues/45 -[#40]: https://github.com/howeyc/fsnotify/issues/40 -[#36]: https://github.com/howeyc/fsnotify/issues/36 -[#33]: https://github.com/howeyc/fsnotify/issues/33 -[#29]: https://github.com/howeyc/fsnotify/issues/29 -[#25]: https://github.com/howeyc/fsnotify/issues/25 -[#24]: https://github.com/howeyc/fsnotify/issues/24 -[#21]: https://github.com/howeyc/fsnotify/issues/21 - diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md deleted file mode 100644 index 0f377f341b0..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md +++ /dev/null @@ -1,77 +0,0 @@ -# Contributing - -## Issues - -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/go-fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. - -## Pull Requests - -### Contributor License Agreement - -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/go-fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/go-fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). - -Please indicate that you have signed the CLA in your pull request. - -### How fsnotify is Developed - -* Development is done on feature branches. -* Tests are run on BSD, Linux, OS X and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork - -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. - -1. Install from GitHub (`go get -u github.com/go-fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) - -Contribute upstream: - -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub - -This workflow is [thoroughly explained by Katrina Owen](https://blog.splice.com/contributing-open-source-git-repositories-go/). - -### Testing - -fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows. - -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. - -To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. - -* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) -* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. -* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) -* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd go-fsnotify/fsnotify; go test'`. -* When you're done, you will want to halt or destroy the Vagrant boxes. - -Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. - -Right now there is no equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). - -### Maintainers - -Help maintaining fsnotify is welcome. To be a maintainer: - -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. - -To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. - -All code changes should be internal pull requests. - -Releases are tagged using [Semantic Versioning](http://semver.org/). - -[hub]: https://github.com/github/hub -[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE deleted file mode 100644 index f21e5408009..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/README.md b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/README.md deleted file mode 100644 index 7a0b2473645..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# File system notifications for Go - -[![Coverage](http://gocover.io/_badge/github.com/go-fsnotify/fsnotify)](http://gocover.io/github.com/go-fsnotify/fsnotify) [![GoDoc](https://godoc.org/gopkg.in/fsnotify.v1?status.svg)](https://godoc.org/gopkg.in/fsnotify.v1) - -Go 1.3+ required. - -Cross platform: Windows, Linux, BSD and OS X. - -|Adapter |OS |Status | -|----------|----------|----------| -|inotify |Linux, Android\*|Supported [![Build Status](https://travis-ci.org/go-fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/go-fsnotify/fsnotify)| -|kqueue |BSD, OS X, iOS\*|Supported [![Circle CI](https://circleci.com/gh/go-fsnotify/fsnotify.svg?style=svg)](https://circleci.com/gh/go-fsnotify/fsnotify)| -|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)| -|FSEvents |OS X |[Planned](https://github.com/go-fsnotify/fsnotify/issues/11)| -|FEN |Solaris 11 |[Planned](https://github.com/go-fsnotify/fsnotify/issues/12)| -|fanotify |Linux 2.6.37+ | | -|USN Journals |Windows |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/53)| -|Polling |*All* |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/9)| - -\* Android and iOS are untested. - -Please see [the documentation](https://godoc.org/gopkg.in/fsnotify.v1) for usage. Consult the [Wiki](https://github.com/go-fsnotify/fsnotify/wiki) for the FAQ and further information. - -## API stability - -Two major versions of fsnotify exist. - -**[fsnotify.v0](https://gopkg.in/fsnotify.v0)** is API-compatible with [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify). Bugfixes *may* be backported, but I recommend upgrading to v1. - -```go -import "gopkg.in/fsnotify.v0" -``` - -\* Refer to the package as fsnotify (without the .v0 suffix). - -**[fsnotify.v1](https://gopkg.in/fsnotify.v1)** provides [a new API](https://godoc.org/gopkg.in/fsnotify.v1) based on [this design document](http://goo.gl/MrYxyA). You can import v1 with: - -```go -import "gopkg.in/fsnotify.v1" -``` - -Further API changes are [planned](https://github.com/go-fsnotify/fsnotify/milestones), but a new major revision will be tagged, so you can depend on the v1 API. - -**Master** may have unreleased changes. Use it to test the very latest code or when [contributing][], but don't expect it to remain API-compatible: - -```go -import "github.com/go-fsnotify/fsnotify" -``` - -## Contributing - -Please refer to [CONTRIBUTING][] before opening an issue or pull request. - -## Example - -See [example_test.go](https://github.com/go-fsnotify/fsnotify/blob/master/example_test.go). - - -[contributing]: https://github.com/go-fsnotify/fsnotify/blob/master/CONTRIBUTING.md diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml deleted file mode 100644 index 204217fb0b2..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml +++ /dev/null @@ -1,26 +0,0 @@ -## OS X build (CircleCI iOS beta) - -# Pretend like it's an Xcode project, at least to get it running. -machine: - environment: - XCODE_WORKSPACE: NotUsed.xcworkspace - XCODE_SCHEME: NotUsed - # This is where the go project is actually checked out to: - CIRCLE_BUILD_DIR: $HOME/.go_project/src/github.com/go-fsnotify/fsnotify - -dependencies: - pre: - - brew upgrade go - -test: - override: - - go test ./... - -# Idealized future config, eventually with cross-platform build matrix :-) - -# machine: -# go: -# version: 1.4 -# os: -# - osx -# - linux diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go deleted file mode 100644 index 3063796602c..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,!solaris - -package fsnotify_test - -import ( - "log" - - "github.com/go-fsnotify/fsnotify" -) - -func ExampleNewWatcher() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event := <-watcher.Events: - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err := <-watcher.Errors: - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go deleted file mode 100644 index c899ee00838..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,!solaris - -// Package fsnotify provides a platform-independent interface for file system notifications. -package fsnotify - -import ( - "bytes" - "fmt" -) - -// Event represents a single file system notification. -type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. -} - -// Op describes a set of file operations. -type Op uint32 - -// These are the generalized file operations that can trigger a notification. -const ( - Create Op = 1 << iota - Write - Remove - Rename - Chmod -) - -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." -func (e Event) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer - - if e.Op&Create == Create { - buffer.WriteString("|CREATE") - } - if e.Op&Remove == Remove { - buffer.WriteString("|REMOVE") - } - if e.Op&Write == Write { - buffer.WriteString("|WRITE") - } - if e.Op&Rename == Rename { - buffer.WriteString("|RENAME") - } - if e.Op&Chmod == Chmod { - buffer.WriteString("|CHMOD") - } - - // If buffer remains empty, return no event names - if buffer.Len() == 0 { - return fmt.Sprintf("%q: ", e.Name) - } - - // Return a list of event names, with leading pipe character stripped - return fmt.Sprintf("%q: %s", e.Name, buffer.String()[1:]) -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go deleted file mode 100644 index d7759ec8c86..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := syscall.InotifyInit() - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - syscall.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = syscall.IN_MOVED_TO | syscall.IN_MOVED_FROM | - syscall.IN_CREATE | syscall.IN_ATTRIB | syscall.IN_MODIFY | - syscall.IN_MOVE_SELF | syscall.IN_DELETE | syscall.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - watchEntry, found := w.watches[name] - w.mu.Unlock() - if found { - watchEntry.flags |= flags - flags |= syscall.IN_MASK_ADD - } - wd, errno := syscall.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - w.mu.Lock() - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - w.mu.Unlock() - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // That means we can safely delete it from our watches, whatever inotify_rm_watch does. - delete(w.watches, name) - success, errno := syscall.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - return nil -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [syscall.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer syscall.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = syscall.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == syscall.EINTR { - continue - } - - // syscall.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < syscall.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occured while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-syscall.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name := w.paths[int(raw.Wd)] - w.mu.Unlock() - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent])) - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += syscall.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&syscall.IN_IGNORED == syscall.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&syscall.IN_CREATE == syscall.IN_CREATE || mask&syscall.IN_MOVED_TO == syscall.IN_MOVED_TO { - e.Op |= Create - } - if mask&syscall.IN_DELETE_SELF == syscall.IN_DELETE_SELF || mask&syscall.IN_DELETE == syscall.IN_DELETE { - e.Op |= Remove - } - if mask&syscall.IN_MODIFY == syscall.IN_MODIFY { - e.Op |= Write - } - if mask&syscall.IN_MOVE_SELF == syscall.IN_MOVE_SELF || mask&syscall.IN_MOVED_FROM == syscall.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&syscall.IN_ATTRIB == syscall.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go deleted file mode 100644 index 3b417840419..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - "syscall" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - poller.fd = fd - - // Create epoll fd - poller.epfd, errno = syscall.EpollCreate(1) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = syscall.Pipe2(poller.pipe[:], syscall.O_NONBLOCK) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := syscall.EpollEvent{ - Fd: int32(poller.fd), - Events: syscall.EPOLLIN, - } - errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = syscall.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: syscall.EPOLLIN, - } - errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]syscall.EpollEvent, 7) - for { - n, errno := syscall.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == syscall.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&syscall.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&syscall.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let syscall.Read pick up the error. - epollerr = true - } - if event.Events&syscall.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&syscall.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&syscall.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&syscall.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := syscall.Write(poller.pipe[1], buf) - if n == -1 { - if errno == syscall.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := syscall.Read(poller.pipe[0], buf) - if n == -1 { - if errno == syscall.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - syscall.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - syscall.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - syscall.Close(poller.epfd) - } -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go deleted file mode 100644 index af9f407f8d9..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "syscall" - "testing" - "time" -) - -type testFd [2]int - -func makeTestFd(t *testing.T) testFd { - var tfd testFd - errno := syscall.Pipe(tfd[:]) - if errno != nil { - t.Fatalf("Failed to create pipe: %v", errno) - } - return tfd -} - -func (tfd testFd) fd() int { - return tfd[0] -} - -func (tfd testFd) closeWrite(t *testing.T) { - errno := syscall.Close(tfd[1]) - if errno != nil { - t.Fatalf("Failed to close write end of pipe: %v", errno) - } -} - -func (tfd testFd) put(t *testing.T) { - buf := make([]byte, 10) - _, errno := syscall.Write(tfd[1], buf) - if errno != nil { - t.Fatalf("Failed to write to pipe: %v", errno) - } -} - -func (tfd testFd) get(t *testing.T) { - buf := make([]byte, 10) - _, errno := syscall.Read(tfd[0], buf) - if errno != nil { - t.Fatalf("Failed to read from pipe: %v", errno) - } -} - -func (tfd testFd) close() { - syscall.Close(tfd[1]) - syscall.Close(tfd[0]) -} - -func makePoller(t *testing.T) (testFd, *fdPoller) { - tfd := makeTestFd(t) - poller, err := newFdPoller(tfd.fd()) - if err != nil { - t.Fatalf("Failed to create poller: %v", err) - } - return tfd, poller -} - -func TestPollerWithBadFd(t *testing.T) { - _, err := newFdPoller(-1) - if err != syscall.EBADF { - t.Fatalf("Expected EBADF, got: %v", err) - } -} - -func TestPollerWithData(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - tfd.put(t) - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if !ok { - t.Fatalf("expected poller to return true") - } - tfd.get(t) -} - -func TestPollerWithWakeup(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - err := poller.wake() - if err != nil { - t.Fatalf("wake failed: %v", err) - } - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if ok { - t.Fatalf("expected poller to return false") - } -} - -func TestPollerWithClose(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - tfd.closeWrite(t) - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if !ok { - t.Fatalf("expected poller to return true") - } -} - -func TestPollerWithWakeupAndData(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - tfd.put(t) - err := poller.wake() - if err != nil { - t.Fatalf("wake failed: %v", err) - } - - // both data and wakeup - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if !ok { - t.Fatalf("expected poller to return true") - } - - // data is still in the buffer, wakeup is cleared - ok, err = poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if !ok { - t.Fatalf("expected poller to return true") - } - - tfd.get(t) - // data is gone, only wakeup now - err = poller.wake() - if err != nil { - t.Fatalf("wake failed: %v", err) - } - ok, err = poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - if ok { - t.Fatalf("expected poller to return false") - } -} - -func TestPollerConcurrent(t *testing.T) { - tfd, poller := makePoller(t) - defer tfd.close() - defer poller.close() - - oks := make(chan bool) - live := make(chan bool) - defer close(live) - go func() { - defer close(oks) - for { - ok, err := poller.wait() - if err != nil { - t.Fatalf("poller failed: %v", err) - } - oks <- ok - if !<-live { - return - } - } - }() - - // Try a write - select { - case <-time.After(50 * time.Millisecond): - case <-oks: - t.Fatalf("poller did not wait") - } - tfd.put(t) - if !<-oks { - t.Fatalf("expected true") - } - tfd.get(t) - live <- true - - // Try a wakeup - select { - case <-time.After(50 * time.Millisecond): - case <-oks: - t.Fatalf("poller did not wait") - } - err := poller.wake() - if err != nil { - t.Fatalf("wake failed: %v", err) - } - if <-oks { - t.Fatalf("expected false") - } - live <- true - - // Try a close - select { - case <-time.After(50 * time.Millisecond): - case <-oks: - t.Fatalf("poller did not wait") - } - tfd.closeWrite(t) - if !<-oks { - t.Fatalf("expected true") - } - tfd.get(t) -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go deleted file mode 100644 index 035ee8f95d5..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "os" - "path/filepath" - "syscall" - "testing" - "time" -) - -func TestInotifyCloseRightAway(t *testing.T) { - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher") - } - - // Close immediately; it won't even reach the first syscall.Read. - w.Close() - - // Wait for the close to complete. - <-time.After(50 * time.Millisecond) - isWatcherReallyClosed(t, w) -} - -func TestInotifyCloseSlightlyLater(t *testing.T) { - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher") - } - - // Wait until readEvents has reached syscall.Read, and Close. - <-time.After(50 * time.Millisecond) - w.Close() - - // Wait for the close to complete. - <-time.After(50 * time.Millisecond) - isWatcherReallyClosed(t, w) -} - -func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher") - } - w.Add(testDir) - - // Wait until readEvents has reached syscall.Read, and Close. - <-time.After(50 * time.Millisecond) - w.Close() - - // Wait for the close to complete. - <-time.After(50 * time.Millisecond) - isWatcherReallyClosed(t, w) -} - -func TestInotifyCloseAfterRead(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher") - } - - err = w.Add(testDir) - if err != nil { - t.Fatalf("Failed to add .") - } - - // Generate an event. - os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING")) - - // Wait for readEvents to read the event, then close the watcher. - <-time.After(50 * time.Millisecond) - w.Close() - - // Wait for the close to complete. - <-time.After(50 * time.Millisecond) - isWatcherReallyClosed(t, w) -} - -func isWatcherReallyClosed(t *testing.T, w *Watcher) { - select { - case err, ok := <-w.Errors: - if ok { - t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err) - } - default: - t.Fatalf("w.Errors would have blocked; readEvents is still alive!") - } - - select { - case _, ok := <-w.Events: - if ok { - t.Fatalf("w.Events is not closed; readEvents is still alive after closing") - } - default: - t.Fatalf("w.Events would have blocked; readEvents is still alive!") - } -} - -func TestInotifyCloseCreate(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher: %v", err) - } - defer w.Close() - - err = w.Add(testDir) - if err != nil { - t.Fatalf("Failed to add testDir: %v", err) - } - h, err := os.Create(filepath.Join(testDir, "testfile")) - if err != nil { - t.Fatalf("Failed to create file in testdir: %v", err) - } - h.Close() - select { - case _ = <-w.Events: - case err := <-w.Errors: - t.Fatalf("Error from watcher: %v", err) - case <-time.After(50 * time.Millisecond): - t.Fatalf("Took too long to wait for event") - } - - // At this point, we've received one event, so the goroutine is ready. - // It's also blocking on syscall.Read. - // Now we try to swap the file descriptor under its nose. - w.Close() - w, err = NewWatcher() - defer w.Close() - if err != nil { - t.Fatalf("Failed to create second watcher: %v", err) - } - - <-time.After(50 * time.Millisecond) - err = w.Add(testDir) - if err != nil { - t.Fatalf("Error adding testDir again: %v", err) - } -} - -func TestInotifyStress(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - testFile := filepath.Join(testDir, "testfile") - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher: %v", err) - } - defer w.Close() - - killchan := make(chan struct{}) - defer close(killchan) - - err = w.Add(testDir) - if err != nil { - t.Fatalf("Failed to add testDir: %v", err) - } - - proc, err := os.FindProcess(os.Getpid()) - if err != nil { - t.Fatalf("Error finding process: %v", err) - } - - go func() { - for { - select { - case <-time.After(5 * time.Millisecond): - err := proc.Signal(syscall.SIGUSR1) - if err != nil { - t.Fatalf("Signal failed: %v", err) - } - case <-killchan: - return - } - } - }() - - go func() { - for { - select { - case <-time.After(11 * time.Millisecond): - err := w.poller.wake() - if err != nil { - t.Fatalf("Wake failed: %v", err) - } - case <-killchan: - return - } - } - }() - - go func() { - for { - select { - case <-killchan: - return - default: - handle, err := os.Create(testFile) - if err != nil { - t.Fatalf("Create failed: %v", err) - } - handle.Close() - time.Sleep(time.Millisecond) - err = os.Remove(testFile) - if err != nil { - t.Fatalf("Remove failed: %v", err) - } - } - } - }() - - creates := 0 - removes := 0 - after := time.After(5 * time.Second) - for { - select { - case <-after: - if creates-removes > 1 || creates-removes < -1 { - t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes) - } - if creates < 50 { - t.Fatalf("Expected at least 50 creates, got %d", creates) - } - return - case err := <-w.Errors: - t.Fatalf("Got an error from watcher: %v", err) - case evt := <-w.Events: - if evt.Name != testFile { - t.Fatalf("Got an event for an unknown file: %s", evt.Name) - } - if evt.Op == Create { - creates++ - } - if evt.Op == Remove { - removes++ - } - } - } -} - -func TestInotifyRemoveTwice(t *testing.T) { - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - testFile := filepath.Join(testDir, "testfile") - - handle, err := os.Create(testFile) - if err != nil { - t.Fatalf("Create failed: %v", err) - } - handle.Close() - - w, err := NewWatcher() - if err != nil { - t.Fatalf("Failed to create watcher: %v", err) - } - defer w.Close() - - err = w.Add(testFile) - if err != nil { - t.Fatalf("Failed to add testFile: %v", err) - } - - err = os.Remove(testFile) - if err != nil { - t.Fatalf("Failed to remove testFile: %v", err) - } - - err = w.Remove(testFile) - if err != syscall.EINVAL { - t.Fatalf("Expected EINVAL from Remove, got: %v", err) - } - - err = w.Remove(testFile) - if err == syscall.EINVAL { - t.Fatalf("Got EINVAL again, watch was not removed") - } -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go deleted file mode 100644 index 59169c6afa8..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go +++ /dev/null @@ -1,1135 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9,!solaris - -package fsnotify - -import ( - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "sync/atomic" - "testing" - "time" -) - -// An atomic counter -type counter struct { - val int32 -} - -func (c *counter) increment() { - atomic.AddInt32(&c.val, 1) -} - -func (c *counter) value() int32 { - return atomic.LoadInt32(&c.val) -} - -func (c *counter) reset() { - atomic.StoreInt32(&c.val, 0) -} - -// tempMkdir makes a temporary directory -func tempMkdir(t *testing.T) string { - dir, err := ioutil.TempDir("", "fsnotify") - if err != nil { - t.Fatalf("failed to create test directory: %s", err) - } - return dir -} - -// newWatcher initializes an fsnotify Watcher instance. -func newWatcher(t *testing.T) *Watcher { - watcher, err := NewWatcher() - if err != nil { - t.Fatalf("NewWatcher() failed: %s", err) - } - return watcher -} - -// addWatch adds a watch for a directory -func addWatch(t *testing.T, watcher *Watcher, dir string) { - if err := watcher.Add(dir); err != nil { - t.Fatalf("watcher.Add(%q) failed: %s", dir, err) - } -} - -func TestFsnotifyMultipleOperations(t *testing.T) { - watcher := newWatcher(t) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create directory that's not watched - testDirToMoveFiles := tempMkdir(t) - defer os.RemoveAll(testDirToMoveFiles) - - testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile") - testFileRenamed := filepath.Join(testDirToMoveFiles, "TestFsnotifySeqRename.testfile") - - addWatch(t, watcher, testDir) - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived, modifyReceived, deleteReceived, renameReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { - t.Logf("event received: %s", event) - if event.Op&Remove == Remove { - deleteReceived.increment() - } - if event.Op&Write == Write { - modifyReceived.increment() - } - if event.Op&Create == Create { - createReceived.increment() - } - if event.Op&Rename == Rename { - renameReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - if err := testRename(testFile, testFileRenamed); err != nil { - t.Fatalf("rename failed: %s", err) - } - - // Modify the file outside of the watched dir - f, err = os.Open(testFileRenamed) - if err != nil { - t.Fatalf("open test renamed file failed: %s", err) - } - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // Recreate the file that was moved - f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Close() - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - cReceived := createReceived.value() - if cReceived != 2 { - t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) - } - mReceived := modifyReceived.value() - if mReceived != 1 { - t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1) - } - dReceived := deleteReceived.value() - rReceived := renameReceived.value() - if dReceived+rReceived != 1 { - t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", rReceived+dReceived, 1) - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -func TestFsnotifyMultipleCreates(t *testing.T) { - watcher := newWatcher(t) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile") - - addWatch(t, watcher, testDir) - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived, modifyReceived, deleteReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { - t.Logf("event received: %s", event) - if event.Op&Remove == Remove { - deleteReceived.increment() - } - if event.Op&Create == Create { - createReceived.increment() - } - if event.Op&Write == Write { - modifyReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - os.Remove(testFile) - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // Recreate the file - f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Close() - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // Modify - f, err = os.OpenFile(testFile, os.O_WRONLY, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // Modify - f, err = os.OpenFile(testFile, os.O_WRONLY, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - cReceived := createReceived.value() - if cReceived != 2 { - t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) - } - mReceived := modifyReceived.value() - if mReceived < 3 { - t.Fatalf("incorrect number of modify events received after 500 ms (%d vs atleast %d)", mReceived, 3) - } - dReceived := deleteReceived.value() - if dReceived != 1 { - t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", dReceived, 1) - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -func TestFsnotifyDirOnly(t *testing.T) { - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create a file before watching directory - // This should NOT add any events to the fsnotify event queue - testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") - { - var f *os.File - f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - } - - addWatch(t, watcher, testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - testFile := filepath.Join(testDir, "TestFsnotifyDirOnly.testfile") - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived, modifyReceived, deleteReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileAlreadyExists) { - t.Logf("event received: %s", event) - if event.Op&Remove == Remove { - deleteReceived.increment() - } - if event.Op&Write == Write { - modifyReceived.increment() - } - if event.Op&Create == Create { - createReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - time.Sleep(time.Millisecond) - f.WriteString("data") - f.Sync() - f.Close() - - time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete - - os.Remove(testFile) - os.Remove(testFileAlreadyExists) - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - cReceived := createReceived.value() - if cReceived != 1 { - t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 1) - } - mReceived := modifyReceived.value() - if mReceived != 1 { - t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1) - } - dReceived := deleteReceived.value() - if dReceived != 2 { - t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2) - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -func TestFsnotifyDeleteWatchedDir(t *testing.T) { - watcher := newWatcher(t) - defer watcher.Close() - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create a file before watching directory - testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") - { - var f *os.File - f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - } - - addWatch(t, watcher, testDir) - - // Add a watch for testFile - addWatch(t, watcher, testFileAlreadyExists) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var deleteReceived counter - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFileAlreadyExists) { - t.Logf("event received: %s", event) - if event.Op&Remove == Remove { - deleteReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - }() - - os.RemoveAll(testDir) - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - dReceived := deleteReceived.value() - if dReceived < 2 { - t.Fatalf("did not receive at least %d delete events, received %d after 500 ms", 2, dReceived) - } -} - -func TestFsnotifySubDir(t *testing.T) { - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - testFile1 := filepath.Join(testDir, "TestFsnotifyFile1.testfile") - testSubDir := filepath.Join(testDir, "sub") - testSubDirFile := filepath.Join(testDir, "sub/TestFsnotifyFile1.testfile") - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived, deleteReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testSubDir) || event.Name == filepath.Clean(testFile1) { - t.Logf("event received: %s", event) - if event.Op&Create == Create { - createReceived.increment() - } - if event.Op&Remove == Remove { - deleteReceived.increment() - } - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - addWatch(t, watcher, testDir) - - // Create sub-directory - if err := os.Mkdir(testSubDir, 0777); err != nil { - t.Fatalf("failed to create test sub-directory: %s", err) - } - - // Create a file - var f *os.File - f, err := os.OpenFile(testFile1, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - - // Create a file (Should not see this! we are not watching subdir) - var fs *os.File - fs, err = os.OpenFile(testSubDirFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - fs.Sync() - fs.Close() - - time.Sleep(200 * time.Millisecond) - - // Make sure receive deletes for both file and sub-directory - os.RemoveAll(testSubDir) - os.Remove(testFile1) - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - cReceived := createReceived.value() - if cReceived != 2 { - t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2) - } - dReceived := deleteReceived.value() - if dReceived != 2 { - t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2) - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } -} - -func TestFsnotifyRename(t *testing.T) { - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - addWatch(t, watcher, testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - testFile := filepath.Join(testDir, "TestFsnotifyEvents.testfile") - testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var renameReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) { - if event.Op&Rename == Rename { - renameReceived.increment() - } - t.Logf("event received: %s", event) - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - f.WriteString("data") - f.Sync() - f.Close() - - // Add a watch for testFile - addWatch(t, watcher, testFile) - - if err := testRename(testFile, testFileRenamed); err != nil { - t.Fatalf("rename failed: %s", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - if renameReceived.value() == 0 { - t.Fatal("fsnotify rename events have not been received after 500 ms") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } - - os.Remove(testFileRenamed) -} - -func TestFsnotifyRenameToCreate(t *testing.T) { - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create directory to get file - testDirFrom := tempMkdir(t) - defer os.RemoveAll(testDirFrom) - - addWatch(t, watcher, testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile") - testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var createReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) { - if event.Op&Create == Create { - createReceived.increment() - } - t.Logf("event received: %s", event) - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - - if err := testRename(testFile, testFileRenamed); err != nil { - t.Fatalf("rename failed: %s", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - if createReceived.value() == 0 { - t.Fatal("fsnotify create events have not been received after 500 ms") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } - - os.Remove(testFileRenamed) -} - -func TestFsnotifyRenameToOverwrite(t *testing.T) { - switch runtime.GOOS { - case "plan9", "windows": - t.Skipf("skipping test on %q (os.Rename over existing file does not create event).", runtime.GOOS) - } - - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create directory to get file - testDirFrom := tempMkdir(t) - defer os.RemoveAll(testDirFrom) - - testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile") - testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed") - - // Create a file - var fr *os.File - fr, err := os.OpenFile(testFileRenamed, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - fr.Sync() - fr.Close() - - addWatch(t, watcher, testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - var eventReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testFileRenamed) { - eventReceived.increment() - t.Logf("event received: %s", event) - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - - if err := testRename(testFile, testFileRenamed); err != nil { - t.Fatalf("rename failed: %s", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - if eventReceived.value() == 0 { - t.Fatal("fsnotify events have not been received after 500 ms") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(2 * time.Second): - t.Fatal("event stream was not closed after 2 seconds") - } - - os.Remove(testFileRenamed) -} - -func TestRemovalOfWatch(t *testing.T) { - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create a file before watching directory - testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") - { - var f *os.File - f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - } - - watcher := newWatcher(t) - defer watcher.Close() - - addWatch(t, watcher, testDir) - if err := watcher.Remove(testDir); err != nil { - t.Fatalf("Could not remove the watch: %v\n", err) - } - - go func() { - select { - case ev := <-watcher.Events: - t.Fatalf("We received event: %v\n", ev) - case <-time.After(500 * time.Millisecond): - t.Log("No event received, as expected.") - } - }() - - time.Sleep(200 * time.Millisecond) - // Modify the file outside of the watched dir - f, err := os.Open(testFileAlreadyExists) - if err != nil { - t.Fatalf("Open test file failed: %s", err) - } - f.WriteString("data") - f.Sync() - f.Close() - if err := os.Chmod(testFileAlreadyExists, 0700); err != nil { - t.Fatalf("chmod failed: %s", err) - } - time.Sleep(400 * time.Millisecond) -} - -func TestFsnotifyAttrib(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("attributes don't work on Windows.") - } - - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Receive errors on the error channel on a separate goroutine - go func() { - for err := range watcher.Errors { - t.Fatalf("error received: %s", err) - } - }() - - testFile := filepath.Join(testDir, "TestFsnotifyAttrib.testfile") - - // Receive events on the event channel on a separate goroutine - eventstream := watcher.Events - // The modifyReceived counter counts IsModify events that are not IsAttrib, - // and the attribReceived counts IsAttrib events (which are also IsModify as - // a consequence). - var modifyReceived counter - var attribReceived counter - done := make(chan bool) - go func() { - for event := range eventstream { - // Only count relevant events - if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) { - if event.Op&Write == Write { - modifyReceived.increment() - } - if event.Op&Chmod == Chmod { - attribReceived.increment() - } - t.Logf("event received: %s", event) - } else { - t.Logf("unexpected event received: %s", event) - } - } - done <- true - }() - - // Create a file - // This should add at least one event to the fsnotify event queue - var f *os.File - f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - - f.WriteString("data") - f.Sync() - f.Close() - - // Add a watch for testFile - addWatch(t, watcher, testFile) - - if err := os.Chmod(testFile, 0700); err != nil { - t.Fatalf("chmod failed: %s", err) - } - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - // Creating/writing a file changes also the mtime, so IsAttrib should be set to true here - time.Sleep(500 * time.Millisecond) - if modifyReceived.value() != 0 { - t.Fatal("received an unexpected modify event when creating a test file") - } - if attribReceived.value() == 0 { - t.Fatal("fsnotify attribute events have not received after 500 ms") - } - - // Modifying the contents of the file does not set the attrib flag (although eg. the mtime - // might have been modified). - modifyReceived.reset() - attribReceived.reset() - - f, err = os.OpenFile(testFile, os.O_WRONLY, 0) - if err != nil { - t.Fatalf("reopening test file failed: %s", err) - } - - f.WriteString("more data") - f.Sync() - f.Close() - - time.Sleep(500 * time.Millisecond) - - if modifyReceived.value() != 1 { - t.Fatal("didn't receive a modify event after changing test file contents") - } - - if attribReceived.value() != 0 { - t.Fatal("did receive an unexpected attrib event after changing test file contents") - } - - modifyReceived.reset() - attribReceived.reset() - - // Doing a chmod on the file should trigger an event with the "attrib" flag set (the contents - // of the file are not changed though) - if err := os.Chmod(testFile, 0600); err != nil { - t.Fatalf("chmod failed: %s", err) - } - - time.Sleep(500 * time.Millisecond) - - if attribReceived.value() != 1 { - t.Fatal("didn't receive an attribute change after 500ms") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() - t.Log("waiting for the event channel to become closed...") - select { - case <-done: - t.Log("event channel closed") - case <-time.After(1e9): - t.Fatal("event stream was not closed after 1 second") - } - - os.Remove(testFile) -} - -func TestFsnotifyClose(t *testing.T) { - watcher := newWatcher(t) - watcher.Close() - - var done int32 - go func() { - watcher.Close() - atomic.StoreInt32(&done, 1) - }() - - time.Sleep(50e6) // 50 ms - if atomic.LoadInt32(&done) == 0 { - t.Fatal("double Close() test failed: second Close() call didn't return") - } - - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - if err := watcher.Add(testDir); err == nil { - t.Fatal("expected error on Watch() after Close(), got nil") - } -} - -func TestFsnotifyFakeSymlink(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("symlinks don't work on Windows.") - } - - watcher := newWatcher(t) - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - var errorsReceived counter - // Receive errors on the error channel on a separate goroutine - go func() { - for errors := range watcher.Errors { - t.Logf("Received error: %s", errors) - errorsReceived.increment() - } - }() - - // Count the CREATE events received - var createEventsReceived, otherEventsReceived counter - go func() { - for ev := range watcher.Events { - t.Logf("event received: %s", ev) - if ev.Op&Create == Create { - createEventsReceived.increment() - } else { - otherEventsReceived.increment() - } - } - }() - - addWatch(t, watcher, testDir) - - if err := os.Symlink(filepath.Join(testDir, "zzz"), filepath.Join(testDir, "zzznew")); err != nil { - t.Fatalf("Failed to create bogus symlink: %s", err) - } - t.Logf("Created bogus symlink") - - // We expect this event to be received almost immediately, but let's wait 500 ms to be sure - time.Sleep(500 * time.Millisecond) - - // Should not be error, just no events for broken links (watching nothing) - if errorsReceived.value() > 0 { - t.Fatal("fsnotify errors have been received.") - } - if otherEventsReceived.value() > 0 { - t.Fatal("fsnotify other events received on the broken link") - } - - // Except for 1 create event (for the link itself) - if createEventsReceived.value() == 0 { - t.Fatal("fsnotify create events were not received after 500 ms") - } - if createEventsReceived.value() > 1 { - t.Fatal("fsnotify more create events received than expected") - } - - // Try closing the fsnotify instance - t.Log("calling Close()") - watcher.Close() -} - -// TestConcurrentRemovalOfWatch tests that concurrent calls to RemoveWatch do not race. -// See https://codereview.appspot.com/103300045/ -// go test -test.run=TestConcurrentRemovalOfWatch -test.cpu=1,1,1,1,1 -race -func TestConcurrentRemovalOfWatch(t *testing.T) { - if runtime.GOOS != "darwin" { - t.Skip("regression test for race only present on darwin") - } - - // Create directory to watch - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - // Create a file before watching directory - testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile") - { - var f *os.File - f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - t.Fatalf("creating test file failed: %s", err) - } - f.Sync() - f.Close() - } - - watcher := newWatcher(t) - defer watcher.Close() - - addWatch(t, watcher, testDir) - - // Test that RemoveWatch can be invoked concurrently, with no data races. - removed1 := make(chan struct{}) - go func() { - defer close(removed1) - watcher.Remove(testDir) - }() - removed2 := make(chan struct{}) - go func() { - close(removed2) - watcher.Remove(testDir) - }() - <-removed1 - <-removed2 -} - -func TestClose(t *testing.T) { - // Regression test for #59 bad file descriptor from Close - testDir := tempMkdir(t) - defer os.RemoveAll(testDir) - - watcher := newWatcher(t) - if err := watcher.Add(testDir); err != nil { - t.Fatalf("Expected no error on Add, got %v", err) - } - err := watcher.Close() - if err != nil { - t.Fatalf("Expected no error on Close, got %v.", err) - } -} - -func testRename(file1, file2 string) error { - switch runtime.GOOS { - case "windows", "plan9": - return os.Rename(file1, file2) - default: - cmd := exec.Command("mv", file1, file2) - return cmd.Run() - } -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go deleted file mode 100644 index 265622d201d..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go +++ /dev/null @@ -1,463 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "syscall" - "time" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan bool // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan bool), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - w.mu.Unlock() - - w.mu.Lock() - ws := w.watches - w.mu.Unlock() - - var err error - for name := range ws { - if e := w.Remove(name); e != nil && err == nil { - err = e - } - } - - // Send "quit" message to the reader goroutine: - w.done <- true - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - return w.addWatch(name, noteAllEvents) -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = syscall.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - syscall.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = syscall.NOTE_DELETE | syscall.NOTE_WRITE | syscall.NOTE_ATTRIB | syscall.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -func (w *Watcher) addWatch(name string, flags uint32) error { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return nil - } - - fi, err = os.Lstat(name) - if err != nil { - return nil - } - } - - watchfd, err = syscall.Open(name, openMode, 0700) - if watchfd == -1 { - return err - } - - isDir = fi.IsDir() - } - - const registerAdd = syscall.EV_ADD | syscall.EV_CLEAR | syscall.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - syscall.Close(watchfd) - return err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - watchDir := (flags&syscall.NOTE_WRITE) == syscall.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&syscall.NOTE_WRITE) != syscall.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return err - } - } - } - return nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]syscall.Kevent_t, 10) - - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - err := syscall.Close(w.kq) - if err != nil { - w.Errors <- err - } - close(w.Events) - close(w.Errors) - return - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != syscall.EINTR { - w.Errors <- err - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel - w.Events <- event - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - fileDir, _ := filepath.Split(event.Name) - fileDir = filepath.Clean(fileDir) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); os.IsExist(err) { - w.sendDirectoryChangeEvents(fileDir) - // FIXME: should this be for events on files or just isDir? - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&syscall.NOTE_DELETE == syscall.NOTE_DELETE { - e.Op |= Remove - } - if mask&syscall.NOTE_WRITE == syscall.NOTE_WRITE { - e.Op |= Write - } - if mask&syscall.NOTE_RENAME == syscall.NOTE_RENAME { - e.Op |= Rename - } - if mask&syscall.NOTE_ATTRIB == syscall.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - if err := w.internalWatch(filePath, fileInfo); err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - w.Errors <- err - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - w.Events <- newCreateEvent(filePath) - } - - // like watchDirectoryFiles (but without doing another ReadDir) - if err := w.internalWatch(filePath, fileInfo); err != nil { - return - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) error { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= syscall.NOTE_DELETE - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = syscall.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]syscall.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - syscall.SetKevent(&changes[i], fd, syscall.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := syscall.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []syscall.Kevent_t, timeout *syscall.Timespec) ([]syscall.Kevent_t, error) { - n, err := syscall.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) syscall.Timespec { - return syscall.NsecToTimespec(d.Nanoseconds()) -} diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go deleted file mode 100644 index c57ccb427b9..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly - -package fsnotify - -import "syscall" - -const openMode = syscall.O_NONBLOCK | syscall.O_RDONLY diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go deleted file mode 100644 index 2bec296c5d4..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin - -package fsnotify - -import "syscall" - -// note: This constant is not defined on BSD -const openMode = syscall.O_EVTONLY diff --git a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go b/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go deleted file mode 100644 index 811585227d8..00000000000 --- a/Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go +++ /dev/null @@ -1,561 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sys_FS_ALL_EVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -const ( - // Options for AddWatch - sys_FS_ONESHOT = 0x80000000 - sys_FS_ONLYDIR = 0x1000000 - - // Events - sys_FS_ACCESS = 0x1 - sys_FS_ALL_EVENTS = 0xfff - sys_FS_ATTRIB = 0x4 - sys_FS_CLOSE = 0x18 - sys_FS_CREATE = 0x100 - sys_FS_DELETE = 0x200 - sys_FS_DELETE_SELF = 0x400 - sys_FS_MODIFY = 0x2 - sys_FS_MOVE = 0xc0 - sys_FS_MOVED_FROM = 0x40 - sys_FS_MOVED_TO = 0x80 - sys_FS_MOVE_SELF = 0x800 - - // Special events - sys_FS_IGNORED = 0x8000 - sys_FS_Q_OVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sys_FS_CREATE == sys_FS_CREATE || mask&sys_FS_MOVED_TO == sys_FS_MOVED_TO { - e.Op |= Create - } - if mask&sys_FS_DELETE == sys_FS_DELETE || mask&sys_FS_DELETE_SELF == sys_FS_DELETE_SELF { - e.Op |= Remove - } - if mask&sys_FS_MODIFY == sys_FS_MODIFY { - e.Op |= Write - } - if mask&sys_FS_MOVE == sys_FS_MOVE || mask&sys_FS_MOVE_SELF == sys_FS_MOVE_SELF || mask&sys_FS_MOVED_FROM == sys_FS_MOVED_FROM { - e.Op |= Rename - } - if mask&sys_FS_ATTRIB == sys_FS_ATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sys_FS_ONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(watch.path+"\\"+name, watch.names[name]&sys_FS_IGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(watch.path+"\\"+name, mask&sys_FS_IGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) { - if watch.mask&sys_FS_ONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sys_FS_Q_OVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) - name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) - fullname := watch.path + "\\" + name - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sys_FS_DELETE_SELF - case syscall.FILE_ACTION_MODIFIED: - mask = sys_FS_MODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sys_FS_MOVE_SELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sys_FS_ONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sys_FS_IGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sys_FS_ONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = watch.path + "\\" + watch.rename - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sys_FS_ACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sys_FS_MODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sys_FS_ATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sys_FS_MOVE|sys_FS_CREATE|sys_FS_DELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sys_FS_CREATE - case syscall.FILE_ACTION_REMOVED: - return sys_FS_DELETE - case syscall.FILE_ACTION_MODIFIED: - return sys_FS_MODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sys_FS_MOVED_FROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sys_FS_MOVED_TO - } - return 0 -} diff --git a/cmd/ipfswatch/main.go b/cmd/ipfswatch/main.go index bf173bbbaa1..f8011bd9502 100644 --- a/cmd/ipfswatch/main.go +++ b/cmd/ipfswatch/main.go @@ -8,7 +8,6 @@ import ( "path/filepath" homedir "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/mitchellh/go-homedir" - fsnotify "github.com/ipfs/go-ipfs/Godeps/_workspace/src/gopkg.in/fsnotify.v1" commands "github.com/ipfs/go-ipfs/commands" core "github.com/ipfs/go-ipfs/core" corehttp "github.com/ipfs/go-ipfs/core/corehttp" @@ -17,6 +16,7 @@ import ( fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo" process "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" + fsnotify "gx/ipfs/QmczzCMvJ3HV57WBKDy8b4ucp7quT325JjDbixYRS5Pwvv/fsnotify.v1" ) var http = flag.Bool("http", false, "expose IPFS HTTP API") diff --git a/core/commands/bitswap.go b/core/commands/bitswap.go index b3e75e68d30..1658b3832d0 100644 --- a/core/commands/bitswap.go +++ b/core/commands/bitswap.go @@ -5,7 +5,7 @@ import ( "fmt" "io" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/dustin/go-humanize" + "gx/ipfs/QmPSBJL4momYnE7DcUyk2DVhD6rH488ZmHBGLbxNdhU44K/go-humanize" key "github.com/ipfs/go-ipfs/blocks/key" cmds "github.com/ipfs/go-ipfs/commands" diff --git a/core/commands/stat.go b/core/commands/stat.go index 832ccab0534..5c92130794a 100644 --- a/core/commands/stat.go +++ b/core/commands/stat.go @@ -7,7 +7,7 @@ import ( "io" "time" - humanize "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/dustin/go-humanize" + humanize "gx/ipfs/QmPSBJL4momYnE7DcUyk2DVhD6rH488ZmHBGLbxNdhU44K/go-humanize" cmds "github.com/ipfs/go-ipfs/commands" peer "gx/ipfs/QmQGwpJy9P4yXZySmqkZEXCmbBpJUb8xntCv8Ca4taZwDC/go-libp2p-peer" diff --git a/core/commands/sysdiag.go b/core/commands/sysdiag.go index 52d936ce884..4ced66fc211 100644 --- a/core/commands/sysdiag.go +++ b/core/commands/sysdiag.go @@ -8,8 +8,8 @@ import ( cmds "github.com/ipfs/go-ipfs/commands" config "github.com/ipfs/go-ipfs/repo/config" - sysi "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/whyrusleeping/go-sysinfo" manet "gx/ipfs/QmUBa4w6CbHJUMeGJPDiMEDWsM93xToK1fTnFXnrC8Hksw/go-multiaddr-net" + sysi "gx/ipfs/QmZRjKbHa6DenStpQJFiaPcEwkZqrx7TH6xTf342LDU3qM/go-sysinfo" ) var sysDiagCmd = &cmds.Command{ diff --git a/core/corehttp/gateway_handler.go b/core/corehttp/gateway_handler.go index 32269a17029..f6f8705598f 100644 --- a/core/corehttp/gateway_handler.go +++ b/core/corehttp/gateway_handler.go @@ -10,7 +10,7 @@ import ( "strings" "time" - humanize "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/dustin/go-humanize" + humanize "gx/ipfs/QmPSBJL4momYnE7DcUyk2DVhD6rH488ZmHBGLbxNdhU44K/go-humanize" "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" key "github.com/ipfs/go-ipfs/blocks/key" diff --git a/core/corerepo/gc.go b/core/corerepo/gc.go index 355b7f5f26b..3d6a5297540 100644 --- a/core/corerepo/gc.go +++ b/core/corerepo/gc.go @@ -4,11 +4,11 @@ import ( "errors" "time" - humanize "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/dustin/go-humanize" key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/core" gc "github.com/ipfs/go-ipfs/pin/gc" repo "github.com/ipfs/go-ipfs/repo" + humanize "gx/ipfs/QmPSBJL4momYnE7DcUyk2DVhD6rH488ZmHBGLbxNdhU44K/go-humanize" context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" ) diff --git a/package.json b/package.json index c63fb5eab31..1d03826cc96 100644 --- a/package.json +++ b/package.json @@ -111,6 +111,36 @@ "hash": "QmQzTLDsi3a37CJyMDBXnjiHKQpth3AGS1yqwU57FfLwfG", "name": "cors", "version": "0.0.0" + }, + { + "author": "dustin", + "hash": "QmPSBJL4momYnE7DcUyk2DVhD6rH488ZmHBGLbxNdhU44K", + "name": "go-humanize", + "version": "0.0.0" + }, + { + "author": "facebookgo", + "hash": "QmdYwCmx8pZRkzdcd8MhmLJqYVoVTC1aGsy5Q4reMGLNLg", + "name": "atomicfile", + "version": "0.0.0" + }, + { + "author": "syndtr", + "hash": "QmbBhyDKsY4mbY6xsKt3qu9Y7FPvMJ6qbD8AMjYYvPRw1g", + "name": "goleveldb", + "version": "0.0.1" + }, + { + "author": "whyrusleeping", + "hash": "QmZRjKbHa6DenStpQJFiaPcEwkZqrx7TH6xTf342LDU3qM", + "name": "go-sysinfo", + "version": "0.0.0" + }, + { + "author": "kubuxu", + "hash": "QmczzCMvJ3HV57WBKDy8b4ucp7quT325JjDbixYRS5Pwvv", + "name": "fsnotify.v1", + "version": "1.3.0" } ], "gxVersion": "0.4.0", diff --git a/repo/fsrepo/defaultds.go b/repo/fsrepo/defaultds.go index c9fef0f122a..9d5fe268433 100644 --- a/repo/fsrepo/defaultds.go +++ b/repo/fsrepo/defaultds.go @@ -9,10 +9,10 @@ import ( levelds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/leveldb" "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/measure" mount "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore/syncmount" - ldbopts "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt" repo "github.com/ipfs/go-ipfs/repo" config "github.com/ipfs/go-ipfs/repo/config" "github.com/ipfs/go-ipfs/thirdparty/dir" + ldbopts "gx/ipfs/QmbBhyDKsY4mbY6xsKt3qu9Y7FPvMJ6qbD8AMjYYvPRw1g/goleveldb/leveldb/opt" ) const ( diff --git a/repo/fsrepo/serialize/serialize.go b/repo/fsrepo/serialize/serialize.go index 2e0a37aa256..6ef2c23cb62 100644 --- a/repo/fsrepo/serialize/serialize.go +++ b/repo/fsrepo/serialize/serialize.go @@ -8,10 +8,10 @@ import ( "os" "path/filepath" - "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/facebookgo/atomicfile" "github.com/ipfs/go-ipfs/repo/config" "gx/ipfs/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1/go-ipfs-util" logging "gx/ipfs/QmaDNZ4QMdBdku1YZWBysufYyoQt1negQGNav6PLYarbY8/go-log" + "gx/ipfs/QmdYwCmx8pZRkzdcd8MhmLJqYVoVTC1aGsy5Q4reMGLNLg/atomicfile" ) var log = logging.Logger("fsrepo") diff --git a/test/sharness/t0220-bitswap.sh b/test/sharness/t0220-bitswap.sh index eb3403138df..67ba2f33773 100755 --- a/test/sharness/t0220-bitswap.sh +++ b/test/sharness/t0220-bitswap.sh @@ -28,7 +28,7 @@ bitswap status provides buffer: 0 / 256 blocks received: 0 dup blocks received: 0 - dup data received: 0B + dup data received: 0 B wantlist [1 keys] $NONEXIST partners [0] @@ -68,7 +68,7 @@ bitswap status provides buffer: 0 / 256 blocks received: 0 dup blocks received: 0 - dup data received: 0B + dup data received: 0 B wantlist [0 keys] partners [0] EOF