Skip to content

Commit

Permalink
Merge pull request #2042 from influxdb/more_int_tests
Browse files Browse the repository at this point in the history
TestServer_LimitAndOffset to integration test
  • Loading branch information
otoolep committed Mar 21, 2015
2 parents 57a1f5e + 2a4032a commit 2e5a4ab
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 51 deletions.
28 changes: 28 additions & 0 deletions cmd/influxd/server_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,14 @@ var mergeMany = func(t *testing.T, node *Node, database, retention string) {
}
}

var limitAndOffset = func(t *testing.T, node *Node, database, retention string) {
for i := 1; i < 10; i++ {
data := fmt.Sprintf(`{"database": "%s", "retentionPolicy": "%s", "points": [{"name": "cpu", "timestamp": "%s", "tags": {"region": "us-east", "host": "server-%d"}, "fields": {"value": %d}}]}`,
database, retention, time.Unix(int64(i), int64(0)).Format(time.RFC3339), i, i)
write(t, node, data)
}
}

// runTests_Errors tests some basic error cases.
func runTests_Errors(t *testing.T, nodes Cluster) {
t.Logf("Running tests against %d-node cluster", len(nodes))
Expand Down Expand Up @@ -371,6 +379,26 @@ func runTestsData(t *testing.T, testName string, nodes Cluster, database, retent
expected: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:01Z",10],["1970-01-01T00:00:02Z",10],["1970-01-01T00:00:03Z",10],["1970-01-01T00:00:04Z",10],["1970-01-01T00:00:05Z",7],["1970-01-01T00:00:06Z",3]]}]}]}`,
},

// Limit and offset
{
reset: true,
name: "limit and offset",
writeFn: limitAndOffset,
query: `SELECT count(value) FROM cpu GROUP BY * SLIMIT 2 SOFFSET 1`,
queryDb: "%DB%",
expected: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-2","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-3","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`,
},
{
query: `SELECT count(value) FROM cpu GROUP BY * SLIMIT 2 SOFFSET 3`,
queryDb: "%DB%",
expected: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-4","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-5","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`,
},
{
query: `SELECT count(value) FROM cpu GROUP BY * SLIMIT 3 SOFFSET 8`,
queryDb: "%DB%",
expected: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-9","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`,
},

// FROM /regex/
{
reset: true,
Expand Down
51 changes: 0 additions & 51 deletions server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1424,57 +1424,6 @@ func TestServer_RawDataReturnsInOrder(t *testing.T) {
}
}

// Ensure that limit and offset work
func TestServer_LimitAndOffset(t *testing.T) {
c := test.NewMessagingClient()
defer c.Close()
s := OpenServer(c)
defer s.Close()
s.CreateDatabase("foo")
s.CreateRetentionPolicy("foo", &influxdb.RetentionPolicy{Name: "raw", Duration: 1 * time.Hour})
s.SetDefaultRetentionPolicy("foo", "raw")

for i := 1; i < 10; i++ {
host := fmt.Sprintf("server-%d", i)
s.MustWriteSeries("foo", "raw", []influxdb.Point{{Name: "cpu", Tags: map[string]string{"region": "us-east", "host": host}, Timestamp: time.Unix(int64(i), 0), Fields: map[string]interface{}{"value": float64(i)}}})
}

results := s.ExecuteQuery(MustParseQuery(`SELECT count(value) FROM cpu GROUP BY * SLIMIT 20`), "foo", nil)
if res := results.Results[0]; res.Err != nil {
t.Fatalf("unexpected error during COUNT: %s", res.Err)
} else if len(res.Series) != 9 {
t.Fatalf("unexpected 9 series back but got %d", len(res.Series))
}

results = s.ExecuteQuery(MustParseQuery(`SELECT count(value) FROM cpu GROUP BY * SLIMIT 2 SOFFSET 1`), "foo", nil)
expected := `{"series":[{"name":"cpu","tags":{"host":"server-2","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-3","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}`
if res := results.Results[0]; res.Err != nil {
t.Fatalf("unexpected error during COUNT: %s", res.Err)
} else if s := mustMarshalJSON(res); s != expected {
t.Fatalf("unexpected row(0) during COUNT:\n exp: %s\n got: %s", expected, s)
}

results = s.ExecuteQuery(MustParseQuery(`SELECT count(value) FROM cpu GROUP BY * SLIMIT 2 SOFFSET 3`), "foo", nil)
expected = `{"series":[{"name":"cpu","tags":{"host":"server-4","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-5","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}`
if res := results.Results[0]; res.Err != nil {
t.Fatalf("unexpected error during COUNT: %s", res.Err)
} else if s := mustMarshalJSON(res); s != expected {
t.Fatalf("unexpected row(0) during COUNT:\n exp: %s\n got: %s", expected, s)
}

results = s.ExecuteQuery(MustParseQuery(`SELECT count(value) FROM cpu GROUP BY * SLIMIT 3 SOFFSET 8`), "foo", nil)
if res := results.Results[0]; res.Err != nil {
t.Fatalf("unexpected error during COUNT: %s", res.Err)
} else if s := mustMarshalJSON(res); s != `{"series":[{"name":"cpu","tags":{"host":"server-9","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}` {
t.Fatalf("unexpected row(0) during COUNT: %s", s)
}

results = s.ExecuteQuery(MustParseQuery(`SELECT count(value) FROM cpu GROUP BY * SLIMIT 3 SOFFSET 20`), "foo", nil)
if res := results.Results[0]; res.Err != nil {
t.Fatalf("unexpected error during COUNT: %s", res.Err)
}
}

func TestServer_CreateShardGroupIfNotExist(t *testing.T) {
c := test.NewMessagingClient()
defer c.Close()
Expand Down

0 comments on commit 2e5a4ab

Please sign in to comment.