diff --git a/.circleci/config.yml b/.circleci/config.yml index e339d02a5e6d9..90e119646a18d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -47,7 +47,7 @@ workflows: # https://circleci.com/blog/circleci-hacks-reuse-yaml-in-your-circleci-config-with-yaml/ .defaults: &defaults docker: - - image: grafana/loki-build-image:0.9.0 + - image: grafana/loki-build-image:0.9.1 working_directory: /src/loki jobs: diff --git a/.drone/drone.yml b/.drone/drone.yml index d9d7d8b8c6bfa..870cd025affcb 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -12,28 +12,28 @@ workspace: steps: - name: test - image: grafana/loki-build-image:0.9.0 + image: grafana/loki-build-image:0.9.1 commands: - make BUILD_IN_CONTAINER=false test depends_on: - clone - name: lint - image: grafana/loki-build-image:0.9.0 + image: grafana/loki-build-image:0.9.1 commands: - make BUILD_IN_CONTAINER=false lint depends_on: - clone - name: check-generated-files - image: grafana/loki-build-image:0.9.0 + image: grafana/loki-build-image:0.9.1 commands: - make BUILD_IN_CONTAINER=false check-generated-files depends_on: - clone - name: check-mod - image: grafana/loki-build-image:0.9.0 + image: grafana/loki-build-image:0.9.1 commands: - make BUILD_IN_CONTAINER=false check-mod depends_on: @@ -562,7 +562,7 @@ platform: steps: - name: trigger - image: grafana/loki-build-image:0.9.0 + image: grafana/loki-build-image:0.9.1 commands: - ./tools/deploy.sh environment: diff --git a/Makefile b/Makefile index efc57a7dc5b43..43fc03985d559 100644 --- a/Makefile +++ b/Makefile @@ -38,7 +38,7 @@ IMAGE_NAMES := $(foreach dir,$(DOCKER_IMAGE_DIRS),$(patsubst %,$(IMAGE_PREFIX)%, # make BUILD_IN_CONTAINER=false target # or you can override this with an environment variable BUILD_IN_CONTAINER ?= true -BUILD_IMAGE_VERSION := 0.9.0 +BUILD_IMAGE_VERSION := 0.9.1 # Docker image info IMAGE_PREFIX ?= grafana diff --git a/cmd/docker-driver/Dockerfile b/cmd/docker-driver/Dockerfile index a5f45d87ec0a3..49a70e44d5384 100644 --- a/cmd/docker-driver/Dockerfile +++ b/cmd/docker-driver/Dockerfile @@ -1,4 +1,4 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.9.0 +ARG BUILD_IMAGE=grafana/loki-build-image:0.9.1 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/loki -f cmd/loki/Dockerfile . diff --git a/cmd/loki-canary/Dockerfile.cross b/cmd/loki-canary/Dockerfile.cross index 715bd855fc3b3..04baa9af175a5 100644 --- a/cmd/loki-canary/Dockerfile.cross +++ b/cmd/loki-canary/Dockerfile.cross @@ -1,4 +1,4 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.9.0 +ARG BUILD_IMAGE=grafana/loki-build-image:0.9.1 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/promtail -f cmd/promtail/Dockerfile . diff --git a/cmd/loki/Dockerfile.cross b/cmd/loki/Dockerfile.cross index 5511c2aed59b8..cef4419dcd913 100644 --- a/cmd/loki/Dockerfile.cross +++ b/cmd/loki/Dockerfile.cross @@ -1,4 +1,4 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.9.0 +ARG BUILD_IMAGE=grafana/loki-build-image:0.9.1 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/loki -f cmd/loki/Dockerfile . diff --git a/cmd/promtail/Dockerfile.cross b/cmd/promtail/Dockerfile.cross index cb0ee2a15c364..00fbe0100c48d 100644 --- a/cmd/promtail/Dockerfile.cross +++ b/cmd/promtail/Dockerfile.cross @@ -1,4 +1,4 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.9.0 +ARG BUILD_IMAGE=grafana/loki-build-image:0.9.1 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/promtail -f cmd/promtail/Dockerfile . @@ -23,4 +23,3 @@ COPY --from=build /src/loki/cmd/promtail/promtail /usr/bin/promtail COPY cmd/promtail/promtail-local-config.yaml /etc/promtail/local-config.yaml COPY cmd/promtail/promtail-docker-config.yaml /etc/promtail/docker-config.yaml ENTRYPOINT ["/usr/bin/promtail"] - diff --git a/docs/api.md b/docs/api.md index 8581ffc450778..06ddb2e7bf7b2 100644 --- a/docs/api.md +++ b/docs/api.md @@ -35,7 +35,7 @@ These endpoints are exposed by all components: - [`GET /ready`](#get-ready) - [`GET /metrics`](#get-metrics) -These endpoints are exposed by just the querier: +These endpoints are exposed by the querier and the frontend: - [`GET /loki/api/v1/query`](#get-lokiapiv1query) - [`GET /loki/api/v1/query_range`](#get-lokiapiv1query_range) @@ -86,7 +86,7 @@ query parameters support the following values: - `time`: The evaluation time for the query as a nanosecond Unix epoch. Defaults to now. - `direction`: Determines the sort order of logs. Supported values are `forward` or `backward`. Defaults to `backward.` -In microservices mode, `/loki/api/v1/query` is exposed by the querier. +In microservices mode, `/loki/api/v1/query` is exposed by the querier and the frontend. Response: @@ -95,7 +95,8 @@ Response: "status": "success", "data": { "resultType": "vector" | "streams", - "result": [] | [] + "result": [] | []. + "stats" : [] } } ``` @@ -131,6 +132,8 @@ And `` is: } ``` +See [statistics](#Statistics) for information about the statistics returned by Loki. + ### Examples ```bash @@ -165,7 +168,10 @@ $ curl -G -s "http://localhost:3100/loki/api/v1/query" --data-urlencode 'query= "37.69" ] } - ] + ], + "stats": { + ... + } } } ``` @@ -192,9 +198,12 @@ $ curl -G -s "http://localhost:3100/loki/api/v1/query" --data-urlencode 'query= "1568234269716526880", "bar" ] - ] + ], } - ] + ], + "stats": { + ... + } } } ``` @@ -216,7 +225,7 @@ find log streams for particular labels. Because the index store is spread out by time, the time span covered by `start` and `end`, if large, may cause additional load against the index server and result in a slow query. -In microservices mode, `/loki/api/v1/query_range` is exposed by the querier. +In microservices mode, `/loki/api/v1/query_range` is exposed by the querier and the frontend. Response: @@ -226,6 +235,7 @@ Response: "data": { "resultType": "matrix" | "streams", "result": [] | [] + "stats" : [] } } ``` @@ -261,6 +271,8 @@ And `` is: } ``` +See [statistics](#Statistics) for information about the statistics returned by Loki. + ### Examples ```bash @@ -308,7 +320,10 @@ $ curl -G -s "http://localhost:3100/loki/api/v1/query_range" --data-urlencode ' ] ] } - ] + ], + "stats": { + ... + } } } ``` @@ -337,7 +352,10 @@ $ curl -G -s "http://localhost:3100/loki/api/v1/query_range" --data-urlencode ' ] ] } - ] + ], + "stats": { + ... + } } } ``` @@ -564,7 +582,7 @@ support the following values: - `direction`: Determines the sort order of logs. Supported values are `forward` or `backward`. Defaults to `backward.` - `regexp`: a regex to filter the returned results -In microservices mode, `/api/prom/query` is exposed by the querier. +In microservices mode, `/api/prom/query` is exposed by the querier and the frontend. Note that the larger the time span between `start` and `end` will cause additional load on Loki and the index store, resulting in slower queries. @@ -585,10 +603,13 @@ Response: ], }, ... - ] + ], + "stats": [] } ``` +See [statistics](#Statistics) for information about the statistics returned by Loki. + ### Examples ```bash @@ -608,7 +629,10 @@ $ curl -G -s "http://localhost:3100/api/prom/query" --data-urlencode '{foo="bar } ] } - ] + ], + "stats": { + ... + } } ``` @@ -822,3 +846,49 @@ $ curl -s "http://localhost:3100/loki/api/v1/series" --data-urlencode 'match={co ] } ``` + +## Statistics + +Query endpoints such as `/api/prom/query`, `/loki/api/v1/query` and `/loki/api/v1/query_range` return a set of statistics about the query execution. Those statistics allow users to understand the amount of data processed and at which speed. + +The example belows show all possible statistics returned with their respective description. + +```json +{ + "status": "success", + "data": { + "resultType": "streams", + "result": [], + "stats": { + "ingester" : { + "compressedBytes": 0, // Total bytes of compressed chunks (blocks) processed by ingesters + "decompressedBytes": 0, // Total bytes decompressed and processed by ingesters + "decompressedLines": 0, // Total lines decompressed and processed by ingesters + "headChunkBytes": 0, // Total bytes read from ingesters head chunks + "headChunkLines": 0, // Total lines read from ingesters head chunks + "totalBatches": 0, // Total batches sent by ingesters + "totalChunksMatched": 0, // Total chunks matched by ingesters + "totalDuplicates": 0, // Total of duplicates found by ingesters + "totalLinesSent": 0, // Total lines sent by ingesters + "totalReached": 0 // Amount of ingesters reached. + }, + "store": { + "compressedBytes": 0, // Total bytes of compressed chunks (blocks) processed by the store + "decompressedBytes": 0, // Total bytes decompressed and processed by the store + "decompressedLines": 0, // Total lines decompressed and processed by the store + "chunksDownloadTime": 0, // Total time spent downloading chunks in seconds (float) + "totalChunksRef": 0, // Total chunks found in the index for the current query + "totalChunksDownloaded": 0, // Total of chunks downloaded + "totalDuplicates": 0 // Total of duplicates removed from replication + }, + "summary": { + "bytesProcessedPerSeconds": 0, // Total of bytes processed per seconds + "execTime": 0, // Total execution time in seconds (float) + "linesProcessedPerSeconds": 0, // Total lines processed per second + "totalBytesProcessed":0, // Total amount of bytes processed overall for this request + "totalLinesProcessed":0 // Total amount of lines processed overall for this request + } + } + } +} +``` diff --git a/loki-build-image/Dockerfile b/loki-build-image/Dockerfile index a6d53ff8ce592..1bc496669f8df 100644 --- a/loki-build-image/Dockerfile +++ b/loki-build-image/Dockerfile @@ -41,8 +41,8 @@ COPY --from=drone /go/bin/drone /usr/bin/drone # It's possible this can be revisited in newer versions of Go if the behavior around GOPATH vs GO111MODULES changes RUN GO111MODULE=on go get \ github.com/golang/protobuf/protoc-gen-go@v1.3.0 \ - github.com/gogo/protobuf/protoc-gen-gogoslick@v1.3.0 \ - github.com/gogo/protobuf/gogoproto@v1.3.0 \ + github.com/gogo/protobuf/protoc-gen-gogoslick@v1.2.1 \ + github.com/gogo/protobuf/gogoproto@v1.2.1 \ github.com/go-delve/delve/cmd/dlv \ # Due to the lack of a proper release tag, we use the commit hash of # https://github.com/golang/tools/releases v0.1.7 diff --git a/pkg/iter/iterator.go b/pkg/iter/iterator.go index 2cc81ae99c915..72a7926da3328 100644 --- a/pkg/iter/iterator.go +++ b/pkg/iter/iterator.go @@ -21,6 +21,16 @@ type EntryIterator interface { Close() error } +type noOpIterator struct{} + +var NoopIterator = noOpIterator{} + +func (noOpIterator) Next() bool { return false } +func (noOpIterator) Error() error { return nil } +func (noOpIterator) Labels() string { return "" } +func (noOpIterator) Entry() logproto.Entry { return logproto.Entry{} } +func (noOpIterator) Close() error { return nil } + // streamIterator iterates over entries in a stream. type streamIterator struct { i int diff --git a/pkg/loghttp/query.go b/pkg/loghttp/query.go index 167744944a896..5770898baf7f1 100644 --- a/pkg/loghttp/query.go +++ b/pkg/loghttp/query.go @@ -9,6 +9,7 @@ import ( "unsafe" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql/stats" json "github.com/json-iterator/go" "github.com/prometheus/common/model" ) @@ -55,8 +56,9 @@ type ResultValue interface { //QueryResponseData represents the http json response to a label query type QueryResponseData struct { - ResultType ResultType `json:"resultType"` - Result ResultValue `json:"result"` + ResultType ResultType `json:"resultType"` + Result ResultValue `json:"result"` + Statistics stats.Result `json:"stats"` } // Type implements the promql.Value interface @@ -98,8 +100,9 @@ type Entry struct { // UnmarshalJSON implements the json.Unmarshaler interface. func (q *QueryResponseData) UnmarshalJSON(data []byte) error { unmarshal := struct { - Type ResultType `json:"resultType"` - Result json.RawMessage `json:"result"` + Type ResultType `json:"resultType"` + Result json.RawMessage `json:"result"` + Statistics stats.Result `json:"stats"` }{} err := json.Unmarshal(data, &unmarshal) @@ -133,6 +136,7 @@ func (q *QueryResponseData) UnmarshalJSON(data []byte) error { q.ResultType = unmarshal.Type q.Result = value + q.Statistics = unmarshal.Statistics return nil } diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go index 08b4ab15d7b44..52428bf978b03 100644 --- a/pkg/logproto/logproto.pb.go +++ b/pkg/logproto/logproto.pb.go @@ -13,11 +13,8 @@ import ( _ "github.com/gogo/protobuf/types" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" io "io" math "math" - math_bits "math/bits" reflect "reflect" strconv "strconv" strings "strings" @@ -34,7 +31,7 @@ var _ = time.Kitchen // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type Direction int32 @@ -74,7 +71,7 @@ func (m *PushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_PushRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -116,7 +113,7 @@ func (m *PushResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_PushResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -156,7 +153,7 @@ func (m *QueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_QueryRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -227,7 +224,7 @@ func (m *QueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return xxx_messageInfo_QueryResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -273,7 +270,7 @@ func (m *LabelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_LabelRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -337,7 +334,7 @@ func (m *LabelResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return xxx_messageInfo_LabelResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -381,7 +378,7 @@ func (m *Stream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Stream.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -432,7 +429,7 @@ func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Entry.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -485,7 +482,7 @@ func (m *TailRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_TailRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -550,7 +547,7 @@ func (m *TailResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_TailResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -602,7 +599,7 @@ func (m *SeriesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return xxx_messageInfo_SeriesRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -659,7 +656,7 @@ func (m *SeriesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return xxx_messageInfo_SeriesResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -702,7 +699,7 @@ func (m *SeriesIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return xxx_messageInfo_SeriesIdentifier.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -747,7 +744,7 @@ func (m *DroppedStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return xxx_messageInfo_DroppedStream.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -807,7 +804,7 @@ func (m *TimeSeriesChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, err return xxx_messageInfo_TimeSeriesChunk.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -872,7 +869,7 @@ func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -922,7 +919,7 @@ func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Chunk.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -964,7 +961,7 @@ func (m *TransferChunksResponse) XXX_Marshal(b []byte, deterministic bool) ([]by return xxx_messageInfo_TransferChunksResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -999,7 +996,7 @@ func (m *TailersCountRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, return xxx_messageInfo_TailersCountRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -1035,7 +1032,7 @@ func (m *TailersCountResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte return xxx_messageInfo_TailersCountResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -2061,14 +2058,6 @@ type PusherServer interface { Push(context.Context, *PushRequest) (*PushResponse, error) } -// UnimplementedPusherServer can be embedded to have forward compatible implementations. -type UnimplementedPusherServer struct { -} - -func (*UnimplementedPusherServer) Push(ctx context.Context, req *PushRequest) (*PushResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Push not implemented") -} - func RegisterPusherServer(s *grpc.Server, srv PusherServer) { s.RegisterService(&_Pusher_serviceDesc, srv) } @@ -2223,26 +2212,6 @@ type QuerierServer interface { TailersCount(context.Context, *TailersCountRequest) (*TailersCountResponse, error) } -// UnimplementedQuerierServer can be embedded to have forward compatible implementations. -type UnimplementedQuerierServer struct { -} - -func (*UnimplementedQuerierServer) Query(req *QueryRequest, srv Querier_QueryServer) error { - return status.Errorf(codes.Unimplemented, "method Query not implemented") -} -func (*UnimplementedQuerierServer) Label(ctx context.Context, req *LabelRequest) (*LabelResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Label not implemented") -} -func (*UnimplementedQuerierServer) Tail(req *TailRequest, srv Querier_TailServer) error { - return status.Errorf(codes.Unimplemented, "method Tail not implemented") -} -func (*UnimplementedQuerierServer) Series(ctx context.Context, req *SeriesRequest) (*SeriesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Series not implemented") -} -func (*UnimplementedQuerierServer) TailersCount(ctx context.Context, req *TailersCountRequest) (*TailersCountResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method TailersCount not implemented") -} - func RegisterQuerierServer(s *grpc.Server, srv QuerierServer) { s.RegisterService(&_Querier_serviceDesc, srv) } @@ -2429,14 +2398,6 @@ type IngesterServer interface { TransferChunks(Ingester_TransferChunksServer) error } -// UnimplementedIngesterServer can be embedded to have forward compatible implementations. -type UnimplementedIngesterServer struct { -} - -func (*UnimplementedIngesterServer) TransferChunks(srv Ingester_TransferChunksServer) error { - return status.Errorf(codes.Unimplemented, "method TransferChunks not implemented") -} - func RegisterIngesterServer(s *grpc.Server, srv IngesterServer) { s.RegisterService(&_Ingester_serviceDesc, srv) } @@ -2484,7 +2445,7 @@ var _Ingester_serviceDesc = grpc.ServiceDesc{ func (m *PushRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2492,36 +2453,29 @@ func (m *PushRequest) Marshal() (dAtA []byte, err error) { } func (m *PushRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PushRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m.Streams) > 0 { - for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Streams[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogproto(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Streams { dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *PushResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2529,22 +2483,17 @@ func (m *PushResponse) Marshal() (dAtA []byte, err error) { } func (m *PushResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PushResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - return len(dAtA) - i, nil + return i, nil } func (m *QueryRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2552,55 +2501,49 @@ func (m *QueryRequest) Marshal() (dAtA []byte, err error) { } func (m *QueryRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.Direction != 0 { - i = encodeVarintLogproto(dAtA, i, uint64(m.Direction)) - i-- - dAtA[i] = 0x28 - } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintLogproto(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x22 - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) - if err2 != nil { - return 0, err2 + if len(m.Selector) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Selector))) + i += copy(dAtA[i:], m.Selector) } - i -= n2 - i = encodeVarintLogproto(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x1a if m.Limit != 0 { - i = encodeVarintLogproto(dAtA, i, uint64(m.Limit)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.Limit)) } - if len(m.Selector) > 0 { - i -= len(m.Selector) - copy(dAtA[i:], m.Selector) - i = encodeVarintLogproto(dAtA, i, uint64(len(m.Selector))) - i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start))) + n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x22 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.End))) + n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.Direction != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.Direction)) } - return len(dAtA) - i, nil + return i, nil } func (m *QueryResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2608,36 +2551,29 @@ func (m *QueryResponse) Marshal() (dAtA []byte, err error) { } func (m *QueryResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m.Streams) > 0 { - for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Streams[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogproto(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Streams { dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *LabelRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2645,59 +2581,53 @@ func (m *LabelRequest) Marshal() (dAtA []byte, err error) { } func (m *LabelRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.End != nil { - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.End):]) - if err3 != nil { - return 0, err3 - } - i -= n3 - i = encodeVarintLogproto(dAtA, i, uint64(n3)) - i-- - dAtA[i] = 0x22 - } - if m.Start != nil { - n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start):]) - if err4 != nil { - return 0, err4 - } - i -= n4 - i = encodeVarintLogproto(dAtA, i, uint64(n4)) - i-- - dAtA[i] = 0x1a + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if m.Values { - i-- + dAtA[i] = 0x10 + i++ if m.Values { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x10 + i++ } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintLogproto(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if m.Start != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start))) + n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Start, dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.End != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.End))) + n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.End, dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 } - return len(dAtA) - i, nil + return i, nil } func (m *LabelResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2705,31 +2635,32 @@ func (m *LabelResponse) Marshal() (dAtA []byte, err error) { } func (m *LabelResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m.Values) > 0 { - for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Values[iNdEx]) - copy(dAtA[i:], m.Values[iNdEx]) - i = encodeVarintLogproto(dAtA, i, uint64(len(m.Values[iNdEx]))) - i-- + for _, s := range m.Values { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } func (m *Stream) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2737,43 +2668,35 @@ func (m *Stream) Marshal() (dAtA []byte, err error) { } func (m *Stream) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Stream) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + if len(m.Labels) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Labels))) + i += copy(dAtA[i:], m.Labels) + } if len(m.Entries) > 0 { - for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogproto(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Entries { dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if len(m.Labels) > 0 { - i -= len(m.Labels) - copy(dAtA[i:], m.Labels) - i = encodeVarintLogproto(dAtA, i, uint64(len(m.Labels))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *Entry) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2781,37 +2704,31 @@ func (m *Entry) Marshal() (dAtA []byte, err error) { } func (m *Entry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp))) + n5, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 if len(m.Line) > 0 { - i -= len(m.Line) - copy(dAtA[i:], m.Line) - i = encodeVarintLogproto(dAtA, i, uint64(len(m.Line))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Line))) + i += copy(dAtA[i:], m.Line) } - n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err5 != nil { - return 0, err5 - } - i -= n5 - i = encodeVarintLogproto(dAtA, i, uint64(n5)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *TailRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2819,47 +2736,41 @@ func (m *TailRequest) Marshal() (dAtA []byte, err error) { } func (m *TailRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TailRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) - if err6 != nil { - return 0, err6 - } - i -= n6 - i = encodeVarintLogproto(dAtA, i, uint64(n6)) - i-- - dAtA[i] = 0x2a - if m.Limit != 0 { - i = encodeVarintLogproto(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x20 + if len(m.Query) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Query))) + i += copy(dAtA[i:], m.Query) } if m.DelayFor != 0 { - i = encodeVarintLogproto(dAtA, i, uint64(m.DelayFor)) - i-- dAtA[i] = 0x18 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.DelayFor)) } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarintLogproto(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0xa + if m.Limit != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.Limit)) + } + dAtA[i] = 0x2a + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start))) + n6, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:]) + if err != nil { + return 0, err } - return len(dAtA) - i, nil + i += n6 + return i, nil } func (m *TailResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2867,48 +2778,39 @@ func (m *TailResponse) Marshal() (dAtA []byte, err error) { } func (m *TailResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TailResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.DroppedStreams) > 0 { - for iNdEx := len(m.DroppedStreams) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DroppedStreams[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogproto(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if m.Stream != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.Stream.Size())) + n7, err := m.Stream.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n7 } - if m.Stream != nil { - { - size, err := m.Stream.MarshalToSizedBuffer(dAtA[:i]) + if len(m.DroppedStreams) > 0 { + for _, msg := range m.DroppedStreams { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintLogproto(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *SeriesRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2916,47 +2818,48 @@ func (m *SeriesRequest) Marshal() (dAtA []byte, err error) { } func (m *SeriesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SeriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start))) + n8, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.End))) + n9, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Groups[iNdEx]) - copy(dAtA[i:], m.Groups[iNdEx]) - i = encodeVarintLogproto(dAtA, i, uint64(len(m.Groups[iNdEx]))) - i-- + for _, s := range m.Groups { dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) - if err8 != nil { - return 0, err8 - } - i -= n8 - i = encodeVarintLogproto(dAtA, i, uint64(n8)) - i-- - dAtA[i] = 0x12 - n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) - if err9 != nil { - return 0, err9 - } - i -= n9 - i = encodeVarintLogproto(dAtA, i, uint64(n9)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *SeriesResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2964,36 +2867,29 @@ func (m *SeriesResponse) Marshal() (dAtA []byte, err error) { } func (m *SeriesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SeriesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m.Series) > 0 { - for iNdEx := len(m.Series) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Series[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogproto(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Series { dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *SeriesIdentifier) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3001,41 +2897,34 @@ func (m *SeriesIdentifier) Marshal() (dAtA []byte, err error) { } func (m *SeriesIdentifier) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SeriesIdentifier) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintLogproto(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintLogproto(dAtA, i, uint64(len(k))) - i-- + for k, _ := range m.Labels { dAtA[i] = 0xa - i = encodeVarintLogproto(dAtA, i, uint64(baseI-i)) - i-- + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovLogproto(uint64(len(k))) + 1 + len(v) + sovLogproto(uint64(len(v))) + i = encodeVarintLogproto(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } - return len(dAtA) - i, nil + return i, nil } func (m *DroppedStream) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3043,45 +2932,39 @@ func (m *DroppedStream) Marshal() (dAtA []byte, err error) { } func (m *DroppedStream) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DroppedStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Labels) > 0 { - i -= len(m.Labels) - copy(dAtA[i:], m.Labels) - i = encodeVarintLogproto(dAtA, i, uint64(len(m.Labels))) - i-- - dAtA[i] = 0x1a - } - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.To, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.To):]) - if err10 != nil { - return 0, err10 + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.From))) + n10, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.From, dAtA[i:]) + if err != nil { + return 0, err } - i -= n10 - i = encodeVarintLogproto(dAtA, i, uint64(n10)) - i-- + i += n10 dAtA[i] = 0x12 - n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.From, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.From):]) - if err11 != nil { - return 0, err11 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.To))) + n11, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.To, dAtA[i:]) + if err != nil { + return 0, err } - i -= n11 - i = encodeVarintLogproto(dAtA, i, uint64(n11)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n11 + if len(m.Labels) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Labels))) + i += copy(dAtA[i:], m.Labels) + } + return i, nil } func (m *TimeSeriesChunk) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3089,64 +2972,53 @@ func (m *TimeSeriesChunk) Marshal() (dAtA []byte, err error) { } func (m *TimeSeriesChunk) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TimeSeriesChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Chunks) > 0 { - for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogproto(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } + if len(m.FromIngesterId) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.FromIngesterId))) + i += copy(dAtA[i:], m.FromIngesterId) + } + if len(m.UserId) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.UserId))) + i += copy(dAtA[i:], m.UserId) } if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogproto(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Labels { dAtA[i] = 0x1a + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if len(m.UserId) > 0 { - i -= len(m.UserId) - copy(dAtA[i:], m.UserId) - i = encodeVarintLogproto(dAtA, i, uint64(len(m.UserId))) - i-- - dAtA[i] = 0x12 - } - if len(m.FromIngesterId) > 0 { - i -= len(m.FromIngesterId) - copy(dAtA[i:], m.FromIngesterId) - i = encodeVarintLogproto(dAtA, i, uint64(len(m.FromIngesterId))) - i-- - dAtA[i] = 0xa + if len(m.Chunks) > 0 { + for _, msg := range m.Chunks { + dAtA[i] = 0x22 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - return len(dAtA) - i, nil + return i, nil } func (m *LabelPair) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3154,36 +3026,29 @@ func (m *LabelPair) Marshal() (dAtA []byte, err error) { } func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintLogproto(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintLogproto(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) } - return len(dAtA) - i, nil + return i, nil } func (m *Chunk) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3191,29 +3056,23 @@ func (m *Chunk) Marshal() (dAtA []byte, err error) { } func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintLogproto(dAtA, i, uint64(len(m.Data))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) } - return len(dAtA) - i, nil + return i, nil } func (m *TransferChunksResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3221,22 +3080,17 @@ func (m *TransferChunksResponse) Marshal() (dAtA []byte, err error) { } func (m *TransferChunksResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TransferChunksResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - return len(dAtA) - i, nil + return i, nil } func (m *TailersCountRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3244,22 +3098,17 @@ func (m *TailersCountRequest) Marshal() (dAtA []byte, err error) { } func (m *TailersCountRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TailersCountRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - return len(dAtA) - i, nil + return i, nil } func (m *TailersCountResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3267,33 +3116,26 @@ func (m *TailersCountResponse) Marshal() (dAtA []byte, err error) { } func (m *TailersCountResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TailersCountResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if m.Count != 0 { - i = encodeVarintLogproto(dAtA, i, uint64(m.Count)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.Count)) } - return len(dAtA) - i, nil + return i, nil } func encodeVarintLogproto(dAtA []byte, offset int, v uint64) int { - offset -= sovLogproto(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *PushRequest) Size() (n int) { if m == nil { @@ -3628,7 +3470,14 @@ func (m *TailersCountResponse) Size() (n int) { } func sovLogproto(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozLogproto(x uint64) (n int) { return sovLogproto(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -3637,13 +3486,8 @@ func (this *PushRequest) String() string { if this == nil { return "nil" } - repeatedStringForStreams := "[]*Stream{" - for _, f := range this.Streams { - repeatedStringForStreams += strings.Replace(f.String(), "Stream", "Stream", 1) + "," - } - repeatedStringForStreams += "}" s := strings.Join([]string{`&PushRequest{`, - `Streams:` + repeatedStringForStreams + `,`, + `Streams:` + strings.Replace(fmt.Sprintf("%v", this.Streams), "Stream", "Stream", 1) + `,`, `}`, }, "") return s @@ -3664,8 +3508,8 @@ func (this *QueryRequest) String() string { s := strings.Join([]string{`&QueryRequest{`, `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, - `Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `End:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Start:` + strings.Replace(strings.Replace(this.Start.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `End:` + strings.Replace(strings.Replace(this.End.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `Direction:` + fmt.Sprintf("%v", this.Direction) + `,`, `}`, }, "") @@ -3675,13 +3519,8 @@ func (this *QueryResponse) String() string { if this == nil { return "nil" } - repeatedStringForStreams := "[]*Stream{" - for _, f := range this.Streams { - repeatedStringForStreams += strings.Replace(f.String(), "Stream", "Stream", 1) + "," - } - repeatedStringForStreams += "}" s := strings.Join([]string{`&QueryResponse{`, - `Streams:` + repeatedStringForStreams + `,`, + `Streams:` + strings.Replace(fmt.Sprintf("%v", this.Streams), "Stream", "Stream", 1) + `,`, `}`, }, "") return s @@ -3713,14 +3552,9 @@ func (this *Stream) String() string { if this == nil { return "nil" } - repeatedStringForEntries := "[]Entry{" - for _, f := range this.Entries { - repeatedStringForEntries += strings.Replace(strings.Replace(f.String(), "Entry", "Entry", 1), `&`, ``, 1) + "," - } - repeatedStringForEntries += "}" s := strings.Join([]string{`&Stream{`, `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Entries:` + repeatedStringForEntries + `,`, + `Entries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Entries), "Entry", "Entry", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3730,7 +3564,7 @@ func (this *Entry) String() string { return "nil" } s := strings.Join([]string{`&Entry{`, - `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `Line:` + fmt.Sprintf("%v", this.Line) + `,`, `}`, }, "") @@ -3744,7 +3578,7 @@ func (this *TailRequest) String() string { `Query:` + fmt.Sprintf("%v", this.Query) + `,`, `DelayFor:` + fmt.Sprintf("%v", this.DelayFor) + `,`, `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, - `Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Start:` + strings.Replace(strings.Replace(this.Start.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3753,14 +3587,9 @@ func (this *TailResponse) String() string { if this == nil { return "nil" } - repeatedStringForDroppedStreams := "[]*DroppedStream{" - for _, f := range this.DroppedStreams { - repeatedStringForDroppedStreams += strings.Replace(f.String(), "DroppedStream", "DroppedStream", 1) + "," - } - repeatedStringForDroppedStreams += "}" s := strings.Join([]string{`&TailResponse{`, - `Stream:` + strings.Replace(this.Stream.String(), "Stream", "Stream", 1) + `,`, - `DroppedStreams:` + repeatedStringForDroppedStreams + `,`, + `Stream:` + strings.Replace(fmt.Sprintf("%v", this.Stream), "Stream", "Stream", 1) + `,`, + `DroppedStreams:` + strings.Replace(fmt.Sprintf("%v", this.DroppedStreams), "DroppedStream", "DroppedStream", 1) + `,`, `}`, }, "") return s @@ -3770,8 +3599,8 @@ func (this *SeriesRequest) String() string { return "nil" } s := strings.Join([]string{`&SeriesRequest{`, - `Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `End:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Start:` + strings.Replace(strings.Replace(this.Start.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `End:` + strings.Replace(strings.Replace(this.End.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, `}`, }, "") @@ -3781,13 +3610,8 @@ func (this *SeriesResponse) String() string { if this == nil { return "nil" } - repeatedStringForSeries := "[]SeriesIdentifier{" - for _, f := range this.Series { - repeatedStringForSeries += strings.Replace(strings.Replace(f.String(), "SeriesIdentifier", "SeriesIdentifier", 1), `&`, ``, 1) + "," - } - repeatedStringForSeries += "}" s := strings.Join([]string{`&SeriesResponse{`, - `Series:` + repeatedStringForSeries + `,`, + `Series:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Series), "SeriesIdentifier", "SeriesIdentifier", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3817,8 +3641,8 @@ func (this *DroppedStream) String() string { return "nil" } s := strings.Join([]string{`&DroppedStream{`, - `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `From:` + strings.Replace(strings.Replace(this.From.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `To:` + strings.Replace(strings.Replace(this.To.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, `}`, }, "") @@ -3828,21 +3652,11 @@ func (this *TimeSeriesChunk) String() string { if this == nil { return "nil" } - repeatedStringForLabels := "[]*LabelPair{" - for _, f := range this.Labels { - repeatedStringForLabels += strings.Replace(f.String(), "LabelPair", "LabelPair", 1) + "," - } - repeatedStringForLabels += "}" - repeatedStringForChunks := "[]*Chunk{" - for _, f := range this.Chunks { - repeatedStringForChunks += strings.Replace(f.String(), "Chunk", "Chunk", 1) + "," - } - repeatedStringForChunks += "}" s := strings.Join([]string{`&TimeSeriesChunk{`, `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`, `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Labels:` + repeatedStringForLabels + `,`, - `Chunks:` + repeatedStringForChunks + `,`, + `Labels:` + strings.Replace(fmt.Sprintf("%v", this.Labels), "LabelPair", "LabelPair", 1) + `,`, + `Chunks:` + strings.Replace(fmt.Sprintf("%v", this.Chunks), "Chunk", "Chunk", 1) + `,`, `}`, }, "") return s diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go index e1d457a0f0fb0..fab60ff2fcd2f 100644 --- a/pkg/logql/engine.go +++ b/pkg/logql/engine.go @@ -5,6 +5,8 @@ import ( "sort" "time" + "github.com/go-kit/kit/log/level" + "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/grafana/loki/pkg/helpers" "github.com/grafana/loki/pkg/iter" @@ -39,6 +41,12 @@ func (Streams) String() string { return "" } +// Result is the result of a query execution. +type Result struct { + Data promql.Value + Statistics stats.Result +} + // EngineOpts is the list of options to use with the LogQL query engine. type EngineOpts struct { // Timeout for queries execution @@ -89,7 +97,7 @@ func NewEngine(opts EngineOpts, q Querier) Engine { // Query is a LogQL query to be executed. type Query interface { // Exec processes the query. - Exec(ctx context.Context) (promql.Value, error) + Exec(ctx context.Context) (Result, error) } type query struct { @@ -99,7 +107,10 @@ type query struct { } // Exec Implements `Query` -func (q *query) Exec(ctx context.Context) (promql.Value, error) { +func (q *query) Exec(ctx context.Context) (Result, error) { + log, ctx := spanlogger.New(ctx, "Engine.Exec") + defer log.Finish() + var queryType string if IsInstant(q) { @@ -107,9 +118,24 @@ func (q *query) Exec(ctx context.Context) (promql.Value, error) { } else { queryType = "range" } + timer := prometheus.NewTimer(queryTime.WithLabelValues(queryType)) defer timer.ObserveDuration() - return q.ng.exec(ctx, q) + + // records query statistics + var statResult stats.Result + start := time.Now() + ctx = stats.NewContext(ctx) + + data, err := q.ng.exec(ctx, q) + + statResult = stats.Snapshot(ctx, time.Since(start)) + statResult.Log(level.Debug(log)) + + return Result{ + Data: data, + Statistics: statResult, + }, err } // NewRangeQuery creates a new LogQL range query. @@ -149,8 +175,6 @@ func (ng *engine) NewInstantQuery( } func (ng *engine) exec(ctx context.Context, q *query) (promql.Value, error) { - log, ctx := spanlogger.New(ctx, "Engine.exec") - defer log.Finish() ctx, cancel := context.WithTimeout(ctx, ng.timeout) defer cancel() @@ -168,16 +192,10 @@ func (ng *engine) exec(ctx context.Context, q *query) (promql.Value, error) { return nil, err } - ctx = stats.NewContext(ctx) - start := time.Now() - defer func() { - resultStats := stats.Snapshot(ctx, time.Since(start)) - stats.Log(log, resultStats) - }() - switch e := expr.(type) { case SampleExpr: - return ng.evalSample(ctx, e, q) + value, err := ng.evalSample(ctx, e, q) + return value, err case LogSelectorExpr: iter, err := ng.evaluator.Iterator(ctx, e, q) @@ -185,7 +203,8 @@ func (ng *engine) exec(ctx context.Context, q *query) (promql.Value, error) { return nil, err } defer helpers.LogError("closing iterator", iter.Close) - return readStreams(iter, q.limit) + streams, err := readStreams(iter, q.limit) + return streams, err } return nil, nil diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go index 05bcee531060c..d8717ad485e1b 100644 --- a/pkg/logql/engine_test.go +++ b/pkg/logql/engine_test.go @@ -8,10 +8,12 @@ import ( "github.com/grafana/loki/pkg/iter" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql/stats" json "github.com/json-iterator/go" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var testSize = int64(300) @@ -296,7 +298,7 @@ func TestEngine_NewInstantQuery(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, test.expected, res) + assert.Equal(t, test.expected, res.Data) }) } } @@ -686,11 +688,23 @@ func TestEngine_NewRangeQuery(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, test.expected, res) + assert.Equal(t, test.expected, res.Data) }) } } +func TestEngine_Stats(t *testing.T) { + eng := NewEngine(EngineOpts{}, QuerierFunc(func(ctx context.Context, sp SelectParams) (iter.EntryIterator, error) { + st := stats.GetChunkData(ctx) + st.DecompressedBytes++ + return iter.NoopIterator, nil + })) + q := eng.NewInstantQuery(`{foo="bar"}`, time.Now(), logproto.BACKWARD, 1000) + r, err := q.Exec(context.Background()) + require.NoError(t, err) + require.Equal(t, int64(1), r.Statistics.Store.DecompressedBytes) +} + // go test -mod=vendor ./pkg/logql/ -bench=. -benchmem -memprofile memprofile.out -cpuprofile cpuprofile.out func BenchmarkRangeQuery100000(b *testing.B) { benchmarkRangeQuery(int64(100000), b) @@ -746,8 +760,8 @@ func benchmarkRangeQuery(testsize int64, b *testing.B) { if err != nil { b.Fatal(err) } - result = res - if res == nil { + result = res.Data + if result == nil { b.Fatal("unexpected nil result") } } diff --git a/pkg/logql/marshal/legacy/marshal.go b/pkg/logql/marshal/legacy/marshal.go index 32353e92c511b..886b21ce3c983 100644 --- a/pkg/logql/marshal/legacy/marshal.go +++ b/pkg/logql/marshal/legacy/marshal.go @@ -11,7 +11,6 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql" json "github.com/json-iterator/go" - "github.com/prometheus/prometheus/promql" ) // Note that the below methods directly marshal the values passed in. This is because these objects currently marshal @@ -20,13 +19,14 @@ import ( // for loghttp model objects 2) marshal the loghttp model objects // WriteQueryResponseJSON marshals promql.Value to legacy loghttp JSON and then writes it to the provided io.Writer -func WriteQueryResponseJSON(v promql.Value, w io.Writer) error { - if v.Type() != logql.ValueTypeStreams { - return fmt.Errorf("legacy endpoints only support %s result type, current type is %s", logql.ValueTypeStreams, v.Type()) +func WriteQueryResponseJSON(v logql.Result, w io.Writer) error { + if v.Data.Type() != logql.ValueTypeStreams { + return fmt.Errorf("legacy endpoints only support %s result type, current type is %s", logql.ValueTypeStreams, v.Data.Type()) } j := map[string]interface{}{ - "streams": v, + "streams": v.Data, + "stats": v.Statistics, } return json.NewEncoder(w).Encode(j) diff --git a/pkg/logql/marshal/legacy/marshal_test.go b/pkg/logql/marshal/legacy/marshal_test.go index ef7896c4189a4..b2a5fc8b158a1 100644 --- a/pkg/logql/marshal/legacy/marshal_test.go +++ b/pkg/logql/marshal/legacy/marshal_test.go @@ -41,7 +41,39 @@ var queryTests = []struct { } ] } - ] + ], + "stats" : { + "ingester" : { + "compressedBytes": 0, + "decompressedBytes": 0, + "decompressedLines": 0, + "headChunkBytes": 0, + "headChunkLines": 0, + "totalBatches": 0, + "totalChunksMatched": 0, + "totalDuplicates": 0, + "totalLinesSent": 0, + "totalReached": 0 + }, + "store": { + "compressedBytes": 0, + "decompressedBytes": 0, + "decompressedLines": 0, + "headChunkBytes": 0, + "headChunkLines": 0, + "chunksDownloadTime": 0, + "totalChunksRef": 0, + "totalChunksDownloaded": 0, + "totalDuplicates": 0 + }, + "summary": { + "bytesProcessedPerSeconds": 0, + "execTime": 0, + "linesProcessedPerSeconds": 0, + "totalBytesProcessed":0, + "totalLinesProcessed":0 + } + } }`, }, } @@ -114,7 +146,7 @@ func Test_WriteQueryResponseJSON(t *testing.T) { for i, queryTest := range queryTests { var b bytes.Buffer - err := WriteQueryResponseJSON(queryTest.actual, &b) + err := WriteQueryResponseJSON(logql.Result{Data: queryTest.actual}, &b) require.NoError(t, err) testJSONBytesEqual(t, []byte(queryTest.expected), b.Bytes(), "Query Test %d failed", i) @@ -145,7 +177,7 @@ func Test_MarshalTailResponse(t *testing.T) { func Test_QueryResponseMarshalLoop(t *testing.T) { for i, queryTest := range queryTests { - var r loghttp.QueryResponse + var r map[string]interface{} err := json.Unmarshal([]byte(queryTest.expected), &r) require.NoError(t, err) diff --git a/pkg/logql/marshal/marshal.go b/pkg/logql/marshal/marshal.go index 088d3daca7e16..baad9e86484da 100644 --- a/pkg/logql/marshal/marshal.go +++ b/pkg/logql/marshal/marshal.go @@ -5,19 +5,20 @@ package marshal import ( "io" + "github.com/grafana/loki/pkg/logql" + "github.com/gorilla/websocket" "github.com/grafana/loki/pkg/loghttp" legacy "github.com/grafana/loki/pkg/loghttp/legacy" "github.com/grafana/loki/pkg/logproto" json "github.com/json-iterator/go" - "github.com/prometheus/prometheus/promql" ) // WriteQueryResponseJSON marshals the promql.Value to v1 loghttp JSON and then // writes it to the provided io.Writer. -func WriteQueryResponseJSON(v promql.Value, w io.Writer) error { +func WriteQueryResponseJSON(v logql.Result, w io.Writer) error { - value, err := NewResultValue(v) + value, err := NewResultValue(v.Data) if err != nil { return err @@ -28,6 +29,7 @@ func WriteQueryResponseJSON(v promql.Value, w io.Writer) error { Data: loghttp.QueryResponseData{ ResultType: value.Type(), Result: value, + Statistics: v.Statistics, }, } diff --git a/pkg/logql/marshal/marshal_test.go b/pkg/logql/marshal/marshal_test.go index 63d187f581f62..fb26b64192494 100644 --- a/pkg/logql/marshal/marshal_test.go +++ b/pkg/logql/marshal/marshal_test.go @@ -46,7 +46,39 @@ var queryTests = []struct { [ "123456789012345", "super line" ] ] } - ] + ], + "stats" : { + "ingester" : { + "compressedBytes": 0, + "decompressedBytes": 0, + "decompressedLines": 0, + "headChunkBytes": 0, + "headChunkLines": 0, + "totalBatches": 0, + "totalChunksMatched": 0, + "totalDuplicates": 0, + "totalLinesSent": 0, + "totalReached": 0 + }, + "store": { + "compressedBytes": 0, + "decompressedBytes": 0, + "decompressedLines": 0, + "headChunkBytes": 0, + "headChunkLines": 0, + "chunksDownloadTime": 0, + "totalChunksRef": 0, + "totalChunksDownloaded": 0, + "totalDuplicates": 0 + }, + "summary": { + "bytesProcessedPerSeconds": 0, + "execTime": 0, + "linesProcessedPerSeconds": 0, + "totalBytesProcessed":0, + "totalLinesProcessed":0 + } + } } }`, }, @@ -110,7 +142,39 @@ var queryTests = []struct { "3.45" ] } - ] + ], + "stats" : { + "ingester" : { + "compressedBytes": 0, + "decompressedBytes": 0, + "decompressedLines": 0, + "headChunkBytes": 0, + "headChunkLines": 0, + "totalBatches": 0, + "totalChunksMatched": 0, + "totalDuplicates": 0, + "totalLinesSent": 0, + "totalReached": 0 + }, + "store": { + "compressedBytes": 0, + "decompressedBytes": 0, + "decompressedLines": 0, + "headChunkBytes": 0, + "headChunkLines": 0, + "chunksDownloadTime": 0, + "totalChunksRef": 0, + "totalChunksDownloaded": 0, + "totalDuplicates": 0 + }, + "summary": { + "bytesProcessedPerSeconds": 0, + "execTime": 0, + "linesProcessedPerSeconds": 0, + "totalBytesProcessed":0, + "totalLinesProcessed":0 + } + } }, "status": "success" }`, @@ -191,7 +255,39 @@ var queryTests = []struct { ] ] } - ] + ], + "stats" : { + "ingester" : { + "compressedBytes": 0, + "decompressedBytes": 0, + "decompressedLines": 0, + "headChunkBytes": 0, + "headChunkLines": 0, + "totalBatches": 0, + "totalChunksMatched": 0, + "totalDuplicates": 0, + "totalLinesSent": 0, + "totalReached": 0 + }, + "store": { + "compressedBytes": 0, + "decompressedBytes": 0, + "decompressedLines": 0, + "headChunkBytes": 0, + "headChunkLines": 0, + "chunksDownloadTime": 0, + "totalChunksRef": 0, + "totalChunksDownloaded": 0, + "totalDuplicates": 0 + }, + "summary": { + "bytesProcessedPerSeconds": 0, + "execTime": 0, + "linesProcessedPerSeconds": 0, + "totalBytesProcessed":0, + "totalLinesProcessed":0 + } + } }, "status": "success" }`, @@ -266,14 +362,13 @@ var tailTests = []struct { func Test_WriteQueryResponseJSON(t *testing.T) { for i, queryTest := range queryTests { var b bytes.Buffer - err := WriteQueryResponseJSON(queryTest.actual, &b) + err := WriteQueryResponseJSON(logql.Result{Data: queryTest.actual}, &b) require.NoError(t, err) testJSONBytesEqual(t, []byte(queryTest.expected), b.Bytes(), "Query Test %d failed", i) } } -// func Test_WriteLabelResponseJSON(t *testing.T) { for i, labelTest := range labelTests { var b bytes.Buffer diff --git a/pkg/logql/stats/context.go b/pkg/logql/stats/context.go index 6cbe4e226efa8..de5aaf6bbe209 100644 --- a/pkg/logql/stats/context.go +++ b/pkg/logql/stats/context.go @@ -26,7 +26,6 @@ import ( "github.com/dustin/go-humanize" "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" ) type ctxKeyType string @@ -38,16 +37,9 @@ const ( storeKey ctxKeyType = "store" ) -// Result contains LogQL query statistics. -type Result struct { - Ingester Ingester - Store Store - Summary Summary -} - // Log logs a query statistics result. -func Log(log log.Logger, r Result) { - level.Debug(log).Log( +func (r Result) Log(log log.Logger) { + log.Log( "Ingester.TotalReached", r.Ingester.TotalReached, "Ingester.TotalChunksMatched", r.Ingester.TotalChunksMatched, "Ingester.TotalBatches", r.Ingester.TotalBatches, @@ -61,8 +53,8 @@ func Log(log log.Logger, r Result) { "Ingester.TotalDuplicates", r.Ingester.TotalDuplicates, "Store.TotalChunksRef", r.Store.TotalChunksRef, - "Store.TotalDownloadedChunks", r.Store.TotalDownloadedChunks, - "Store.TimeDownloadingChunks", r.Store.TimeDownloadingChunks, + "Store.TotalChunksDownloaded", r.Store.TotalChunksDownloaded, + "Store.ChunksDownloadTime", time.Duration(int64(r.Store.ChunksDownloadTime*float64(time.Second))), "Store.HeadChunkBytes", humanize.Bytes(uint64(r.Store.HeadChunkBytes)), "Store.HeadChunkLines", r.Store.HeadChunkLines, @@ -75,32 +67,10 @@ func Log(log log.Logger, r Result) { "Summary.LinesProcessedPerSeconds", r.Summary.LinesProcessedPerSeconds, "Summary.TotalBytesProcessed", humanize.Bytes(uint64(r.Summary.TotalBytesProcessed)), "Summary.TotalLinesProcessed", r.Summary.TotalLinesProcessed, - "Summary.ExecTime", r.Summary.ExecTime, + "Summary.ExecTime", time.Duration(int64(r.Summary.ExecTime*float64(time.Second))), ) } -// Summary is the summary of a query statistics. -type Summary struct { - BytesProcessedPerSeconds int64 // Total bytes processed per seconds. - LinesProcessedPerSeconds int64 // Total lines processed per seconds. - TotalBytesProcessed int64 // Total bytes processed. - TotalLinesProcessed int64 // Total lines processed. - ExecTime time.Duration // Execution time. -} - -// Ingester is the statistics result for ingesters queries. -type Ingester struct { - IngesterData - ChunkData - TotalReached int -} - -// Store is the statistics result of the store. -type Store struct { - StoreData - ChunkData -} - // NewContext creates a new statistics context func NewContext(ctx context.Context) context.Context { ctx = injectTrailerCollector(ctx) @@ -112,12 +82,12 @@ func NewContext(ctx context.Context) context.Context { // ChunkData contains chunks specific statistics. type ChunkData struct { - HeadChunkBytes int64 // Total bytes processed but was already in memory. (found in the headchunk) - HeadChunkLines int64 // Total lines processed but was already in memory. (found in the headchunk) - DecompressedBytes int64 // Total bytes decompressed and processed from chunks. - DecompressedLines int64 // Total lines decompressed and processed from chunks. - CompressedBytes int64 // Total bytes of compressed chunks (blocks) processed. - TotalDuplicates int64 // Total duplicates found while processing. + HeadChunkBytes int64 `json:"headChunkBytes"` // Total bytes processed but was already in memory. (found in the headchunk) + HeadChunkLines int64 `json:"headChunkLines"` // Total lines processed but was already in memory. (found in the headchunk) + DecompressedBytes int64 `json:"decompressedBytes"` // Total bytes decompressed and processed from chunks. + DecompressedLines int64 `json:"decompressedLines"` // Total lines decompressed and processed from chunks. + CompressedBytes int64 `json:"compressedBytes"` // Total bytes of compressed chunks (blocks) processed. + TotalDuplicates int64 `json:"totalDuplicates"` // Total duplicates found while processing. } // GetChunkData returns the chunks statistics data from the current context. @@ -131,9 +101,9 @@ func GetChunkData(ctx context.Context) *ChunkData { // IngesterData contains ingester specific statistics. type IngesterData struct { - TotalChunksMatched int64 // Total of chunks matched by the query from ingesters - TotalBatches int64 // Total of batches sent from ingesters. - TotalLinesSent int64 // Total lines sent by ingesters. + TotalChunksMatched int64 `json:"totalChunksMatched"` // Total of chunks matched by the query from ingesters + TotalBatches int64 `json:"totalBatches"` // Total of batches sent from ingesters. + TotalLinesSent int64 `json:"totalLinesSent"` // Total lines sent by ingesters. } // GetIngesterData returns the ingester statistics data from the current context. @@ -148,8 +118,8 @@ func GetIngesterData(ctx context.Context) *IngesterData { // StoreData contains store specific statistics. type StoreData struct { TotalChunksRef int64 // The total of chunk reference fetched from index. - TotalDownloadedChunks int64 // Total number of chunks fetched. - TimeDownloadingChunks time.Duration // Time spent fetching chunks. + TotalChunksDownloaded int64 // Total number of chunks fetched. + ChunksDownloadTime time.Duration // Time spent fetching chunks. } // GetStoreData returns the store statistics data from the current context. @@ -169,12 +139,19 @@ func Snapshot(ctx context.Context, execTime time.Duration) Result { // collect data from store. s, ok := ctx.Value(storeKey).(*StoreData) if ok { - res.Store.StoreData = *s + res.Store.TotalChunksRef = s.TotalChunksRef + res.Store.TotalChunksDownloaded = s.TotalChunksDownloaded + res.Store.ChunksDownloadTime = s.ChunksDownloadTime.Seconds() } // collect data from chunks iteration. c, ok := ctx.Value(chunksKey).(*ChunkData) if ok { - res.Store.ChunkData = *c + res.Store.HeadChunkBytes = c.HeadChunkBytes + res.Store.HeadChunkLines = c.HeadChunkLines + res.Store.DecompressedBytes = c.DecompressedBytes + res.Store.DecompressedLines = c.DecompressedLines + res.Store.CompressedBytes = c.CompressedBytes + res.Store.TotalDuplicates = c.TotalDuplicates } // calculate the summary @@ -188,6 +165,38 @@ func Snapshot(ctx context.Context, execTime time.Duration) Result { res.Summary.LinesProcessedPerSeconds = int64(float64(res.Summary.TotalLinesProcessed) / execTime.Seconds()) - res.Summary.ExecTime = execTime + res.Summary.ExecTime = execTime.Seconds() return res } + +func (r *Result) Merge(m Result) { + if r == nil { + return + } + r.Summary.BytesProcessedPerSeconds += m.Summary.BytesProcessedPerSeconds + r.Summary.LinesProcessedPerSeconds += m.Summary.LinesProcessedPerSeconds + r.Summary.TotalBytesProcessed += m.Summary.TotalBytesProcessed + r.Summary.TotalLinesProcessed += m.Summary.TotalLinesProcessed + r.Summary.ExecTime += m.Summary.ExecTime + + r.Store.TotalChunksRef += m.Store.TotalChunksRef + r.Store.TotalChunksDownloaded += m.Store.TotalChunksDownloaded + r.Store.ChunksDownloadTime += m.Store.ChunksDownloadTime + r.Store.HeadChunkBytes += m.Store.HeadChunkBytes + r.Store.HeadChunkLines += m.Store.HeadChunkLines + r.Store.DecompressedBytes += m.Store.DecompressedBytes + r.Store.DecompressedLines += m.Store.DecompressedLines + r.Store.CompressedBytes += m.Store.CompressedBytes + r.Store.TotalDuplicates += m.Store.TotalDuplicates + + r.Ingester.TotalReached += m.Ingester.TotalReached + r.Ingester.TotalChunksMatched += m.Ingester.TotalChunksMatched + r.Ingester.TotalBatches += m.Ingester.TotalBatches + r.Ingester.TotalLinesSent += m.Ingester.TotalLinesSent + r.Ingester.HeadChunkBytes += m.Ingester.HeadChunkBytes + r.Ingester.HeadChunkLines += m.Ingester.HeadChunkLines + r.Ingester.DecompressedBytes += m.Ingester.DecompressedBytes + r.Ingester.DecompressedLines += m.Ingester.DecompressedLines + r.Ingester.CompressedBytes += m.Ingester.CompressedBytes + r.Ingester.TotalDuplicates += m.Ingester.TotalDuplicates +} diff --git a/pkg/logql/stats/context_test.go b/pkg/logql/stats/context_test.go index 9d9ec4bb5477f..5873b3d77b204 100644 --- a/pkg/logql/stats/context_test.go +++ b/pkg/logql/stats/context_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/cortexproject/cortex/pkg/util" jsoniter "github.com/json-iterator/go" "github.com/stretchr/testify/require" ) @@ -20,47 +21,40 @@ func TestSnapshot(t *testing.T) { GetChunkData(ctx).TotalDuplicates += 10 GetStoreData(ctx).TotalChunksRef += 50 - GetStoreData(ctx).TotalDownloadedChunks += 60 - GetStoreData(ctx).TimeDownloadingChunks += time.Second + GetStoreData(ctx).TotalChunksDownloaded += 60 + GetStoreData(ctx).ChunksDownloadTime += time.Second fakeIngesterQuery(ctx) fakeIngesterQuery(ctx) res := Snapshot(ctx, 2*time.Second) + res.Log(util.Logger) expected := Result{ Ingester: Ingester{ - IngesterData: IngesterData{ - TotalChunksMatched: 200, - TotalBatches: 50, - TotalLinesSent: 60, - }, - ChunkData: ChunkData{ - HeadChunkBytes: 10, - HeadChunkLines: 20, - DecompressedBytes: 24, - DecompressedLines: 40, - CompressedBytes: 60, - TotalDuplicates: 2, - }, - TotalReached: 2, + TotalChunksMatched: 200, + TotalBatches: 50, + TotalLinesSent: 60, + HeadChunkBytes: 10, + HeadChunkLines: 20, + DecompressedBytes: 24, + DecompressedLines: 40, + CompressedBytes: 60, + TotalDuplicates: 2, + TotalReached: 2, }, Store: Store{ - StoreData: StoreData{ - TotalChunksRef: 50, - TotalDownloadedChunks: 60, - TimeDownloadingChunks: time.Second, - }, - ChunkData: ChunkData{ - HeadChunkBytes: 10, - HeadChunkLines: 20, - DecompressedBytes: 40, - DecompressedLines: 20, - CompressedBytes: 30, - TotalDuplicates: 10, - }, + TotalChunksRef: 50, + TotalChunksDownloaded: 60, + ChunksDownloadTime: time.Second.Seconds(), + HeadChunkBytes: 10, + HeadChunkLines: 20, + DecompressedBytes: 40, + DecompressedLines: 20, + CompressedBytes: 30, + TotalDuplicates: 10, }, Summary: Summary{ - ExecTime: 2 * time.Second, + ExecTime: 2 * time.Second.Seconds(), BytesProcessedPerSeconds: int64(42), LinesProcessedPerSeconds: int64(50), TotalBytesProcessed: int64(84), @@ -90,3 +84,43 @@ func fakeIngesterQuery(ctx context.Context) { }) meta.Set(ingesterDataKey, i) } + +func TestResult_Merge(t *testing.T) { + var res Result + + toMerge := Result{ + Ingester: Ingester{ + TotalChunksMatched: 200, + TotalBatches: 50, + TotalLinesSent: 60, + HeadChunkBytes: 10, + HeadChunkLines: 20, + DecompressedBytes: 24, + DecompressedLines: 40, + CompressedBytes: 60, + TotalDuplicates: 2, + TotalReached: 2, + }, + Store: Store{ + TotalChunksRef: 50, + TotalChunksDownloaded: 60, + ChunksDownloadTime: time.Second.Seconds(), + HeadChunkBytes: 10, + HeadChunkLines: 20, + DecompressedBytes: 40, + DecompressedLines: 20, + CompressedBytes: 30, + TotalDuplicates: 10, + }, + Summary: Summary{ + ExecTime: 2 * time.Second.Seconds(), + BytesProcessedPerSeconds: int64(42), + LinesProcessedPerSeconds: int64(50), + TotalBytesProcessed: int64(84), + TotalLinesProcessed: int64(100), + }, + } + + res.Merge(toMerge) + require.Equal(t, toMerge, res) +} diff --git a/pkg/logql/stats/grpc.go b/pkg/logql/stats/grpc.go index f7608624f8804..8dad127c6b60e 100644 --- a/pkg/logql/stats/grpc.go +++ b/pkg/logql/stats/grpc.go @@ -79,7 +79,7 @@ func decodeTrailers(ctx context.Context) Ingester { if !ok { return res } - res.TotalReached = len(collector.trailers) + res.TotalReached = int32(len(collector.trailers)) for _, meta := range collector.trailers { ing := decodeTrailer(meta) res.TotalChunksMatched += ing.TotalChunksMatched @@ -96,18 +96,29 @@ func decodeTrailers(ctx context.Context) Ingester { } func decodeTrailer(meta *metadata.MD) Ingester { - var res Ingester + var ingData IngesterData values := meta.Get(ingesterDataKey) if len(values) == 1 { - if err := jsoniter.UnmarshalFromString(values[0], &res.IngesterData); err != nil { + if err := jsoniter.UnmarshalFromString(values[0], &ingData); err != nil { level.Warn(util.Logger).Log("msg", "could not unmarshal ingester data", "err", err) } } + var chunkData ChunkData values = meta.Get(chunkDataKey) if len(values) == 1 { - if err := jsoniter.UnmarshalFromString(values[0], &res.ChunkData); err != nil { + if err := jsoniter.UnmarshalFromString(values[0], &chunkData); err != nil { level.Warn(util.Logger).Log("msg", "could not unmarshal chunk data", "err", err) } } - return res + return Ingester{ + TotalChunksMatched: ingData.TotalChunksMatched, + TotalBatches: ingData.TotalBatches, + TotalLinesSent: ingData.TotalLinesSent, + HeadChunkBytes: chunkData.HeadChunkBytes, + HeadChunkLines: chunkData.HeadChunkLines, + DecompressedBytes: chunkData.DecompressedBytes, + DecompressedLines: chunkData.DecompressedLines, + CompressedBytes: chunkData.CompressedBytes, + TotalDuplicates: chunkData.TotalDuplicates, + } } diff --git a/pkg/logql/stats/grpc_test.go b/pkg/logql/stats/grpc_test.go index 460316a2ac2b4..aad971ed6dfaf 100644 --- a/pkg/logql/stats/grpc_test.go +++ b/pkg/logql/stats/grpc_test.go @@ -81,7 +81,7 @@ func TestCollectTrailer(t *testing.T) { t.Fatal(err) } res := decodeTrailers(ctx) - require.Equal(t, 2, res.TotalReached) + require.Equal(t, int32(2), res.TotalReached) require.Equal(t, int64(2), res.TotalChunksMatched) require.Equal(t, int64(4), res.TotalBatches) require.Equal(t, int64(6), res.TotalLinesSent) diff --git a/pkg/logql/stats/stats.pb.go b/pkg/logql/stats/stats.pb.go new file mode 100644 index 0000000000000..e7f5117c807eb --- /dev/null +++ b/pkg/logql/stats/stats.pb.go @@ -0,0 +1,1987 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pkg/logql/stats/stats.proto + +package stats + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// Result contains LogQL query statistics. +type Result struct { + Summary Summary `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary"` + Store Store `protobuf:"bytes,2,opt,name=store,proto3" json:"store"` + Ingester Ingester `protobuf:"bytes,3,opt,name=ingester,proto3" json:"ingester"` +} + +func (m *Result) Reset() { *m = Result{} } +func (*Result) ProtoMessage() {} +func (*Result) Descriptor() ([]byte, []int) { + return fileDescriptor_770b8387e5696475, []int{0} +} +func (m *Result) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Result) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Result.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Result) XXX_Merge(src proto.Message) { + xxx_messageInfo_Result.Merge(m, src) +} +func (m *Result) XXX_Size() int { + return m.Size() +} +func (m *Result) XXX_DiscardUnknown() { + xxx_messageInfo_Result.DiscardUnknown(m) +} + +var xxx_messageInfo_Result proto.InternalMessageInfo + +func (m *Result) GetSummary() Summary { + if m != nil { + return m.Summary + } + return Summary{} +} + +func (m *Result) GetStore() Store { + if m != nil { + return m.Store + } + return Store{} +} + +func (m *Result) GetIngester() Ingester { + if m != nil { + return m.Ingester + } + return Ingester{} +} + +// Summary is the summary of a query statistics. +type Summary struct { + // Total bytes processed per seconds. + BytesProcessedPerSeconds int64 `protobuf:"varint,1,opt,name=bytesProcessedPerSeconds,proto3" json:"bytesProcessedPerSeconds"` + // Total lines processed per seconds. + LinesProcessedPerSeconds int64 `protobuf:"varint,2,opt,name=linesProcessedPerSeconds,proto3" json:"linesProcessedPerSeconds"` + // Total bytes processed. + TotalBytesProcessed int64 `protobuf:"varint,3,opt,name=totalBytesProcessed,proto3" json:"totalBytesProcessed"` + // Total lines processed. + TotalLinesProcessed int64 `protobuf:"varint,4,opt,name=totalLinesProcessed,proto3" json:"totalLinesProcessed"` + // Execution time in nanoseconds. + ExecTime float64 `protobuf:"fixed64,5,opt,name=execTime,proto3" json:"execTime"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_770b8387e5696475, []int{1} +} +func (m *Summary) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(m, src) +} +func (m *Summary) XXX_Size() int { + return m.Size() +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo + +func (m *Summary) GetBytesProcessedPerSeconds() int64 { + if m != nil { + return m.BytesProcessedPerSeconds + } + return 0 +} + +func (m *Summary) GetLinesProcessedPerSeconds() int64 { + if m != nil { + return m.LinesProcessedPerSeconds + } + return 0 +} + +func (m *Summary) GetTotalBytesProcessed() int64 { + if m != nil { + return m.TotalBytesProcessed + } + return 0 +} + +func (m *Summary) GetTotalLinesProcessed() int64 { + if m != nil { + return m.TotalLinesProcessed + } + return 0 +} + +func (m *Summary) GetExecTime() float64 { + if m != nil { + return m.ExecTime + } + return 0 +} + +type Store struct { + // The total of chunk reference fetched from index. + TotalChunksRef int64 `protobuf:"varint,1,opt,name=totalChunksRef,proto3" json:"totalChunksRef"` + // Total number of chunks fetched. + TotalChunksDownloaded int64 `protobuf:"varint,2,opt,name=totalChunksDownloaded,proto3" json:"totalChunksDownloaded"` + // Time spent fetching chunks in nanoseconds. + ChunksDownloadTime float64 `protobuf:"fixed64,3,opt,name=chunksDownloadTime,proto3" json:"chunksDownloadTime"` + // Total bytes processed but was already in memory. (found in the headchunk) + HeadChunkBytes int64 `protobuf:"varint,4,opt,name=headChunkBytes,proto3" json:"headChunkBytes"` + // Total lines processed but was already in memory. (found in the headchunk) + HeadChunkLines int64 `protobuf:"varint,5,opt,name=headChunkLines,proto3" json:"headChunkLines"` + // Total bytes decompressed and processed from chunks. + DecompressedBytes int64 `protobuf:"varint,6,opt,name=decompressedBytes,proto3" json:"decompressedBytes"` + // Total lines decompressed and processed from chunks. + DecompressedLines int64 `protobuf:"varint,7,opt,name=decompressedLines,proto3" json:"decompressedLines"` + // Total bytes of compressed chunks (blocks) processed. + CompressedBytes int64 `protobuf:"varint,8,opt,name=compressedBytes,proto3" json:"compressedBytes"` + // Total duplicates found while processing. + TotalDuplicates int64 `protobuf:"varint,9,opt,name=totalDuplicates,proto3" json:"totalDuplicates"` +} + +func (m *Store) Reset() { *m = Store{} } +func (*Store) ProtoMessage() {} +func (*Store) Descriptor() ([]byte, []int) { + return fileDescriptor_770b8387e5696475, []int{2} +} +func (m *Store) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Store) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Store.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Store) XXX_Merge(src proto.Message) { + xxx_messageInfo_Store.Merge(m, src) +} +func (m *Store) XXX_Size() int { + return m.Size() +} +func (m *Store) XXX_DiscardUnknown() { + xxx_messageInfo_Store.DiscardUnknown(m) +} + +var xxx_messageInfo_Store proto.InternalMessageInfo + +func (m *Store) GetTotalChunksRef() int64 { + if m != nil { + return m.TotalChunksRef + } + return 0 +} + +func (m *Store) GetTotalChunksDownloaded() int64 { + if m != nil { + return m.TotalChunksDownloaded + } + return 0 +} + +func (m *Store) GetChunksDownloadTime() float64 { + if m != nil { + return m.ChunksDownloadTime + } + return 0 +} + +func (m *Store) GetHeadChunkBytes() int64 { + if m != nil { + return m.HeadChunkBytes + } + return 0 +} + +func (m *Store) GetHeadChunkLines() int64 { + if m != nil { + return m.HeadChunkLines + } + return 0 +} + +func (m *Store) GetDecompressedBytes() int64 { + if m != nil { + return m.DecompressedBytes + } + return 0 +} + +func (m *Store) GetDecompressedLines() int64 { + if m != nil { + return m.DecompressedLines + } + return 0 +} + +func (m *Store) GetCompressedBytes() int64 { + if m != nil { + return m.CompressedBytes + } + return 0 +} + +func (m *Store) GetTotalDuplicates() int64 { + if m != nil { + return m.TotalDuplicates + } + return 0 +} + +type Ingester struct { + // Total ingester reached for this query. + TotalReached int32 `protobuf:"varint,1,opt,name=totalReached,proto3" json:"totalReached"` + // Total of chunks matched by the query from ingesters + TotalChunksMatched int64 `protobuf:"varint,2,opt,name=totalChunksMatched,proto3" json:"totalChunksMatched"` + // Total of batches sent from ingesters. + TotalBatches int64 `protobuf:"varint,3,opt,name=totalBatches,proto3" json:"totalBatches"` + // Total lines sent by ingesters. + TotalLinesSent int64 `protobuf:"varint,4,opt,name=totalLinesSent,proto3" json:"totalLinesSent"` + // Total bytes processed but was already in memory. (found in the headchunk) + HeadChunkBytes int64 `protobuf:"varint,5,opt,name=headChunkBytes,proto3" json:"headChunkBytes"` + // Total lines processed but was already in memory. (found in the headchunk) + HeadChunkLines int64 `protobuf:"varint,6,opt,name=headChunkLines,proto3" json:"headChunkLines"` + // Total bytes decompressed and processed from chunks. + DecompressedBytes int64 `protobuf:"varint,7,opt,name=decompressedBytes,proto3" json:"decompressedBytes"` + // Total lines decompressed and processed from chunks. + DecompressedLines int64 `protobuf:"varint,8,opt,name=decompressedLines,proto3" json:"decompressedLines"` + // Total bytes of compressed chunks (blocks) processed. + CompressedBytes int64 `protobuf:"varint,9,opt,name=compressedBytes,proto3" json:"compressedBytes"` + // Total duplicates found while processing. + TotalDuplicates int64 `protobuf:"varint,10,opt,name=totalDuplicates,proto3" json:"totalDuplicates"` +} + +func (m *Ingester) Reset() { *m = Ingester{} } +func (*Ingester) ProtoMessage() {} +func (*Ingester) Descriptor() ([]byte, []int) { + return fileDescriptor_770b8387e5696475, []int{3} +} +func (m *Ingester) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Ingester) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Ingester.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Ingester) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ingester.Merge(m, src) +} +func (m *Ingester) XXX_Size() int { + return m.Size() +} +func (m *Ingester) XXX_DiscardUnknown() { + xxx_messageInfo_Ingester.DiscardUnknown(m) +} + +var xxx_messageInfo_Ingester proto.InternalMessageInfo + +func (m *Ingester) GetTotalReached() int32 { + if m != nil { + return m.TotalReached + } + return 0 +} + +func (m *Ingester) GetTotalChunksMatched() int64 { + if m != nil { + return m.TotalChunksMatched + } + return 0 +} + +func (m *Ingester) GetTotalBatches() int64 { + if m != nil { + return m.TotalBatches + } + return 0 +} + +func (m *Ingester) GetTotalLinesSent() int64 { + if m != nil { + return m.TotalLinesSent + } + return 0 +} + +func (m *Ingester) GetHeadChunkBytes() int64 { + if m != nil { + return m.HeadChunkBytes + } + return 0 +} + +func (m *Ingester) GetHeadChunkLines() int64 { + if m != nil { + return m.HeadChunkLines + } + return 0 +} + +func (m *Ingester) GetDecompressedBytes() int64 { + if m != nil { + return m.DecompressedBytes + } + return 0 +} + +func (m *Ingester) GetDecompressedLines() int64 { + if m != nil { + return m.DecompressedLines + } + return 0 +} + +func (m *Ingester) GetCompressedBytes() int64 { + if m != nil { + return m.CompressedBytes + } + return 0 +} + +func (m *Ingester) GetTotalDuplicates() int64 { + if m != nil { + return m.TotalDuplicates + } + return 0 +} + +func init() { + proto.RegisterType((*Result)(nil), "stats.Result") + proto.RegisterType((*Summary)(nil), "stats.Summary") + proto.RegisterType((*Store)(nil), "stats.Store") + proto.RegisterType((*Ingester)(nil), "stats.Ingester") +} + +func init() { proto.RegisterFile("pkg/logql/stats/stats.proto", fileDescriptor_770b8387e5696475) } + +var fileDescriptor_770b8387e5696475 = []byte{ + // 675 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcb, 0x6e, 0xd3, 0x40, + 0x14, 0xf5, 0x34, 0xcd, 0xa3, 0x43, 0x69, 0xcb, 0x54, 0x85, 0xf0, 0xd0, 0xb8, 0xca, 0x86, 0x6e, + 0x68, 0xc4, 0x63, 0x03, 0x52, 0x37, 0x6e, 0x85, 0x54, 0x09, 0x44, 0x35, 0x65, 0x81, 0x90, 0x58, + 0x38, 0xf6, 0x34, 0x89, 0xea, 0x78, 0x82, 0x67, 0x22, 0xe8, 0x8e, 0x4f, 0xe0, 0x33, 0xf8, 0x01, + 0xfe, 0xa1, 0xcb, 0x2e, 0xbb, 0x40, 0x16, 0x75, 0x37, 0xc8, 0xab, 0xae, 0x91, 0x90, 0x90, 0xaf, + 0x1d, 0x37, 0x9e, 0x38, 0x12, 0x52, 0xd8, 0xb4, 0x73, 0xcf, 0xb9, 0xe7, 0xcc, 0xcc, 0xf5, 0x71, + 0x8c, 0xef, 0x0f, 0x8f, 0xbb, 0x6d, 0x4f, 0x74, 0x3f, 0x7a, 0x6d, 0xa9, 0x6c, 0x25, 0xd3, 0xbf, + 0xdb, 0xc3, 0x40, 0x28, 0x41, 0xaa, 0x50, 0xdc, 0x7b, 0xd4, 0xed, 0xab, 0xde, 0xa8, 0xb3, 0xed, + 0x88, 0x41, 0xbb, 0x2b, 0xba, 0xa2, 0x0d, 0x6c, 0x67, 0x74, 0x04, 0x15, 0x14, 0xb0, 0x4a, 0x55, + 0xad, 0xef, 0x08, 0xd7, 0x18, 0x97, 0x23, 0x4f, 0x91, 0xe7, 0xb8, 0x2e, 0x47, 0x83, 0x81, 0x1d, + 0x9c, 0x34, 0xd1, 0x26, 0xda, 0xba, 0xf1, 0x64, 0x65, 0x3b, 0xf5, 0x3f, 0x4c, 0x51, 0x6b, 0xf5, + 0x34, 0x34, 0x8d, 0x38, 0x34, 0xc7, 0x6d, 0x6c, 0xbc, 0x20, 0x8f, 0x71, 0x55, 0x2a, 0x11, 0xf0, + 0xe6, 0x02, 0x08, 0x97, 0xc7, 0xc2, 0x04, 0xb3, 0x6e, 0x66, 0xb2, 0xb4, 0x85, 0xa5, 0xff, 0xc8, + 0x0e, 0x6e, 0xf4, 0xfd, 0x2e, 0x97, 0x8a, 0x07, 0xcd, 0x0a, 0xa8, 0x56, 0x33, 0xd5, 0x7e, 0x06, + 0x5b, 0x6b, 0x99, 0x30, 0x6f, 0x64, 0xf9, 0xaa, 0xf5, 0x67, 0x01, 0xd7, 0xb3, 0x73, 0x91, 0x77, + 0xb8, 0xd9, 0x39, 0x51, 0x5c, 0x1e, 0x04, 0xc2, 0xe1, 0x52, 0x72, 0xf7, 0x80, 0x07, 0x87, 0xdc, + 0x11, 0xbe, 0x2b, 0xe1, 0x26, 0x15, 0xeb, 0x41, 0x1c, 0x9a, 0x33, 0x7b, 0xd8, 0x4c, 0x26, 0x71, + 0xf6, 0xfa, 0x7e, 0xb9, 0xf3, 0xc2, 0xb5, 0xf3, 0xac, 0x1e, 0x36, 0x93, 0x21, 0xfb, 0x78, 0x5d, + 0x09, 0x65, 0x7b, 0x56, 0x61, 0x6b, 0x98, 0x44, 0xc5, 0xba, 0x13, 0x87, 0x66, 0x19, 0xcd, 0xca, + 0xc0, 0xdc, 0xea, 0x55, 0x61, 0xaf, 0xe6, 0xa2, 0x66, 0x55, 0xa4, 0x59, 0x19, 0x48, 0xb6, 0x70, + 0x83, 0x7f, 0xe6, 0xce, 0xdb, 0xfe, 0x80, 0x37, 0xab, 0x9b, 0x68, 0x0b, 0x59, 0xcb, 0xc9, 0xfc, + 0xc7, 0x18, 0xcb, 0x57, 0xad, 0x1f, 0x8b, 0xb8, 0x0a, 0x8f, 0x97, 0xbc, 0xc0, 0x2b, 0x60, 0xb5, + 0xdb, 0x1b, 0xf9, 0xc7, 0x92, 0xf1, 0xa3, 0x6c, 0xe6, 0x24, 0x0e, 0x4d, 0x8d, 0x61, 0x5a, 0x4d, + 0xde, 0xe0, 0x8d, 0x09, 0x64, 0x4f, 0x7c, 0xf2, 0x3d, 0x61, 0xbb, 0xdc, 0xcd, 0x86, 0x7b, 0x37, + 0x0e, 0xcd, 0xf2, 0x06, 0x56, 0x0e, 0x93, 0x97, 0x98, 0x38, 0x05, 0x0c, 0xae, 0x52, 0x81, 0xab, + 0xdc, 0x8e, 0x43, 0xb3, 0x84, 0x65, 0x25, 0x58, 0x72, 0xa9, 0x1e, 0xb7, 0x5d, 0xf0, 0x87, 0x71, + 0x67, 0xe3, 0x84, 0x4b, 0x15, 0x19, 0xa6, 0xd5, 0x05, 0x2d, 0xcc, 0x17, 0x46, 0xa9, 0x6b, 0x81, + 0x61, 0x5a, 0x4d, 0x76, 0xf1, 0x2d, 0x97, 0x3b, 0x62, 0x30, 0x0c, 0xe0, 0x81, 0xa4, 0x5b, 0xd7, + 0x40, 0xbe, 0x11, 0x87, 0xe6, 0x34, 0xc9, 0xa6, 0x21, 0xdd, 0x24, 0x3d, 0x43, 0xbd, 0xdc, 0x24, + 0x3d, 0xc6, 0x34, 0x44, 0x76, 0xf0, 0xaa, 0x7e, 0x8e, 0x06, 0x58, 0xac, 0xc7, 0xa1, 0xa9, 0x53, + 0x4c, 0x07, 0x12, 0x39, 0x3c, 0xa1, 0xbd, 0xd1, 0xd0, 0xeb, 0x3b, 0x76, 0x22, 0x5f, 0xba, 0x96, + 0x6b, 0x14, 0xd3, 0x81, 0xd6, 0xef, 0x45, 0xdc, 0x18, 0xff, 0x0e, 0x90, 0x67, 0x78, 0x19, 0x78, + 0xc6, 0x6d, 0xa7, 0xc7, 0x5d, 0xc8, 0x57, 0xd5, 0x5a, 0x8b, 0x43, 0xb3, 0x80, 0xb3, 0x42, 0x95, + 0x44, 0x61, 0x22, 0x23, 0xaf, 0x6d, 0x05, 0xda, 0x34, 0x58, 0x10, 0x85, 0x69, 0x96, 0x95, 0x60, + 0xf9, 0xee, 0x16, 0xd4, 0x32, 0x7b, 0x45, 0xaf, 0x77, 0xcf, 0x70, 0x56, 0xa8, 0xf2, 0xb7, 0x02, + 0x86, 0x79, 0xc8, 0x7d, 0x35, 0x19, 0xa0, 0x22, 0xc3, 0xb4, 0xba, 0x24, 0x7c, 0xd5, 0x39, 0xc2, + 0x57, 0x9b, 0x2f, 0x7c, 0xf5, 0xff, 0x11, 0xbe, 0xc6, 0xfc, 0xe1, 0x5b, 0x9a, 0x2f, 0x7c, 0xf8, + 0xdf, 0xc3, 0x67, 0x7d, 0x38, 0xbb, 0xa0, 0xc6, 0xf9, 0x05, 0x35, 0xae, 0x2e, 0x28, 0xfa, 0x12, + 0x51, 0xf4, 0x2d, 0xa2, 0xe8, 0x34, 0xa2, 0xe8, 0x2c, 0xa2, 0xe8, 0x67, 0x44, 0xd1, 0xaf, 0x88, + 0x1a, 0x57, 0x11, 0x45, 0x5f, 0x2f, 0xa9, 0x71, 0x76, 0x49, 0x8d, 0xf3, 0x4b, 0x6a, 0xbc, 0x7f, + 0x38, 0xf9, 0xe1, 0x0d, 0xec, 0x23, 0xdb, 0xb7, 0xdb, 0x9e, 0x38, 0xee, 0xb7, 0xb5, 0x8f, 0x76, + 0xa7, 0x06, 0x5f, 0xde, 0xa7, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x6f, 0x83, 0xed, 0xf0, 0xce, + 0x07, 0x00, 0x00, +} + +func (this *Result) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Result) + if !ok { + that2, ok := that.(Result) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Summary.Equal(&that1.Summary) { + return false + } + if !this.Store.Equal(&that1.Store) { + return false + } + if !this.Ingester.Equal(&that1.Ingester) { + return false + } + return true +} +func (this *Summary) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Summary) + if !ok { + that2, ok := that.(Summary) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.BytesProcessedPerSeconds != that1.BytesProcessedPerSeconds { + return false + } + if this.LinesProcessedPerSeconds != that1.LinesProcessedPerSeconds { + return false + } + if this.TotalBytesProcessed != that1.TotalBytesProcessed { + return false + } + if this.TotalLinesProcessed != that1.TotalLinesProcessed { + return false + } + if this.ExecTime != that1.ExecTime { + return false + } + return true +} +func (this *Store) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Store) + if !ok { + that2, ok := that.(Store) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TotalChunksRef != that1.TotalChunksRef { + return false + } + if this.TotalChunksDownloaded != that1.TotalChunksDownloaded { + return false + } + if this.ChunksDownloadTime != that1.ChunksDownloadTime { + return false + } + if this.HeadChunkBytes != that1.HeadChunkBytes { + return false + } + if this.HeadChunkLines != that1.HeadChunkLines { + return false + } + if this.DecompressedBytes != that1.DecompressedBytes { + return false + } + if this.DecompressedLines != that1.DecompressedLines { + return false + } + if this.CompressedBytes != that1.CompressedBytes { + return false + } + if this.TotalDuplicates != that1.TotalDuplicates { + return false + } + return true +} +func (this *Ingester) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Ingester) + if !ok { + that2, ok := that.(Ingester) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TotalReached != that1.TotalReached { + return false + } + if this.TotalChunksMatched != that1.TotalChunksMatched { + return false + } + if this.TotalBatches != that1.TotalBatches { + return false + } + if this.TotalLinesSent != that1.TotalLinesSent { + return false + } + if this.HeadChunkBytes != that1.HeadChunkBytes { + return false + } + if this.HeadChunkLines != that1.HeadChunkLines { + return false + } + if this.DecompressedBytes != that1.DecompressedBytes { + return false + } + if this.DecompressedLines != that1.DecompressedLines { + return false + } + if this.CompressedBytes != that1.CompressedBytes { + return false + } + if this.TotalDuplicates != that1.TotalDuplicates { + return false + } + return true +} +func (this *Result) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&stats.Result{") + s = append(s, "Summary: "+strings.Replace(this.Summary.GoString(), `&`, ``, 1)+",\n") + s = append(s, "Store: "+strings.Replace(this.Store.GoString(), `&`, ``, 1)+",\n") + s = append(s, "Ingester: "+strings.Replace(this.Ingester.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Summary) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&stats.Summary{") + s = append(s, "BytesProcessedPerSeconds: "+fmt.Sprintf("%#v", this.BytesProcessedPerSeconds)+",\n") + s = append(s, "LinesProcessedPerSeconds: "+fmt.Sprintf("%#v", this.LinesProcessedPerSeconds)+",\n") + s = append(s, "TotalBytesProcessed: "+fmt.Sprintf("%#v", this.TotalBytesProcessed)+",\n") + s = append(s, "TotalLinesProcessed: "+fmt.Sprintf("%#v", this.TotalLinesProcessed)+",\n") + s = append(s, "ExecTime: "+fmt.Sprintf("%#v", this.ExecTime)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Store) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 13) + s = append(s, "&stats.Store{") + s = append(s, "TotalChunksRef: "+fmt.Sprintf("%#v", this.TotalChunksRef)+",\n") + s = append(s, "TotalChunksDownloaded: "+fmt.Sprintf("%#v", this.TotalChunksDownloaded)+",\n") + s = append(s, "ChunksDownloadTime: "+fmt.Sprintf("%#v", this.ChunksDownloadTime)+",\n") + s = append(s, "HeadChunkBytes: "+fmt.Sprintf("%#v", this.HeadChunkBytes)+",\n") + s = append(s, "HeadChunkLines: "+fmt.Sprintf("%#v", this.HeadChunkLines)+",\n") + s = append(s, "DecompressedBytes: "+fmt.Sprintf("%#v", this.DecompressedBytes)+",\n") + s = append(s, "DecompressedLines: "+fmt.Sprintf("%#v", this.DecompressedLines)+",\n") + s = append(s, "CompressedBytes: "+fmt.Sprintf("%#v", this.CompressedBytes)+",\n") + s = append(s, "TotalDuplicates: "+fmt.Sprintf("%#v", this.TotalDuplicates)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Ingester) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&stats.Ingester{") + s = append(s, "TotalReached: "+fmt.Sprintf("%#v", this.TotalReached)+",\n") + s = append(s, "TotalChunksMatched: "+fmt.Sprintf("%#v", this.TotalChunksMatched)+",\n") + s = append(s, "TotalBatches: "+fmt.Sprintf("%#v", this.TotalBatches)+",\n") + s = append(s, "TotalLinesSent: "+fmt.Sprintf("%#v", this.TotalLinesSent)+",\n") + s = append(s, "HeadChunkBytes: "+fmt.Sprintf("%#v", this.HeadChunkBytes)+",\n") + s = append(s, "HeadChunkLines: "+fmt.Sprintf("%#v", this.HeadChunkLines)+",\n") + s = append(s, "DecompressedBytes: "+fmt.Sprintf("%#v", this.DecompressedBytes)+",\n") + s = append(s, "DecompressedLines: "+fmt.Sprintf("%#v", this.DecompressedLines)+",\n") + s = append(s, "CompressedBytes: "+fmt.Sprintf("%#v", this.CompressedBytes)+",\n") + s = append(s, "TotalDuplicates: "+fmt.Sprintf("%#v", this.TotalDuplicates)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringStats(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Result) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Result) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintStats(dAtA, i, uint64(m.Summary.Size())) + n1, err := m.Summary.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.Store.Size())) + n2, err := m.Store.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintStats(dAtA, i, uint64(m.Ingester.Size())) + n3, err := m.Ingester.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *Summary) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Summary) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.BytesProcessedPerSeconds != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.BytesProcessedPerSeconds)) + } + if m.LinesProcessedPerSeconds != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.LinesProcessedPerSeconds)) + } + if m.TotalBytesProcessed != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.TotalBytesProcessed)) + } + if m.TotalLinesProcessed != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.TotalLinesProcessed)) + } + if m.ExecTime != 0 { + dAtA[i] = 0x29 + i++ + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ExecTime)))) + i += 8 + } + return i, nil +} + +func (m *Store) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Store) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TotalChunksRef != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.TotalChunksRef)) + } + if m.TotalChunksDownloaded != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.TotalChunksDownloaded)) + } + if m.ChunksDownloadTime != 0 { + dAtA[i] = 0x19 + i++ + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ChunksDownloadTime)))) + i += 8 + } + if m.HeadChunkBytes != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.HeadChunkBytes)) + } + if m.HeadChunkLines != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.HeadChunkLines)) + } + if m.DecompressedBytes != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.DecompressedBytes)) + } + if m.DecompressedLines != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.DecompressedLines)) + } + if m.CompressedBytes != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.CompressedBytes)) + } + if m.TotalDuplicates != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.TotalDuplicates)) + } + return i, nil +} + +func (m *Ingester) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Ingester) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TotalReached != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.TotalReached)) + } + if m.TotalChunksMatched != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.TotalChunksMatched)) + } + if m.TotalBatches != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.TotalBatches)) + } + if m.TotalLinesSent != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.TotalLinesSent)) + } + if m.HeadChunkBytes != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.HeadChunkBytes)) + } + if m.HeadChunkLines != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.HeadChunkLines)) + } + if m.DecompressedBytes != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.DecompressedBytes)) + } + if m.DecompressedLines != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.DecompressedLines)) + } + if m.CompressedBytes != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.CompressedBytes)) + } + if m.TotalDuplicates != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintStats(dAtA, i, uint64(m.TotalDuplicates)) + } + return i, nil +} + +func encodeVarintStats(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Result) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Summary.Size() + n += 1 + l + sovStats(uint64(l)) + l = m.Store.Size() + n += 1 + l + sovStats(uint64(l)) + l = m.Ingester.Size() + n += 1 + l + sovStats(uint64(l)) + return n +} + +func (m *Summary) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BytesProcessedPerSeconds != 0 { + n += 1 + sovStats(uint64(m.BytesProcessedPerSeconds)) + } + if m.LinesProcessedPerSeconds != 0 { + n += 1 + sovStats(uint64(m.LinesProcessedPerSeconds)) + } + if m.TotalBytesProcessed != 0 { + n += 1 + sovStats(uint64(m.TotalBytesProcessed)) + } + if m.TotalLinesProcessed != 0 { + n += 1 + sovStats(uint64(m.TotalLinesProcessed)) + } + if m.ExecTime != 0 { + n += 9 + } + return n +} + +func (m *Store) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TotalChunksRef != 0 { + n += 1 + sovStats(uint64(m.TotalChunksRef)) + } + if m.TotalChunksDownloaded != 0 { + n += 1 + sovStats(uint64(m.TotalChunksDownloaded)) + } + if m.ChunksDownloadTime != 0 { + n += 9 + } + if m.HeadChunkBytes != 0 { + n += 1 + sovStats(uint64(m.HeadChunkBytes)) + } + if m.HeadChunkLines != 0 { + n += 1 + sovStats(uint64(m.HeadChunkLines)) + } + if m.DecompressedBytes != 0 { + n += 1 + sovStats(uint64(m.DecompressedBytes)) + } + if m.DecompressedLines != 0 { + n += 1 + sovStats(uint64(m.DecompressedLines)) + } + if m.CompressedBytes != 0 { + n += 1 + sovStats(uint64(m.CompressedBytes)) + } + if m.TotalDuplicates != 0 { + n += 1 + sovStats(uint64(m.TotalDuplicates)) + } + return n +} + +func (m *Ingester) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TotalReached != 0 { + n += 1 + sovStats(uint64(m.TotalReached)) + } + if m.TotalChunksMatched != 0 { + n += 1 + sovStats(uint64(m.TotalChunksMatched)) + } + if m.TotalBatches != 0 { + n += 1 + sovStats(uint64(m.TotalBatches)) + } + if m.TotalLinesSent != 0 { + n += 1 + sovStats(uint64(m.TotalLinesSent)) + } + if m.HeadChunkBytes != 0 { + n += 1 + sovStats(uint64(m.HeadChunkBytes)) + } + if m.HeadChunkLines != 0 { + n += 1 + sovStats(uint64(m.HeadChunkLines)) + } + if m.DecompressedBytes != 0 { + n += 1 + sovStats(uint64(m.DecompressedBytes)) + } + if m.DecompressedLines != 0 { + n += 1 + sovStats(uint64(m.DecompressedLines)) + } + if m.CompressedBytes != 0 { + n += 1 + sovStats(uint64(m.CompressedBytes)) + } + if m.TotalDuplicates != 0 { + n += 1 + sovStats(uint64(m.TotalDuplicates)) + } + return n +} + +func sovStats(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozStats(x uint64) (n int) { + return sovStats(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Result) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Result{`, + `Summary:` + strings.Replace(strings.Replace(this.Summary.String(), "Summary", "Summary", 1), `&`, ``, 1) + `,`, + `Store:` + strings.Replace(strings.Replace(this.Store.String(), "Store", "Store", 1), `&`, ``, 1) + `,`, + `Ingester:` + strings.Replace(strings.Replace(this.Ingester.String(), "Ingester", "Ingester", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Summary) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Summary{`, + `BytesProcessedPerSeconds:` + fmt.Sprintf("%v", this.BytesProcessedPerSeconds) + `,`, + `LinesProcessedPerSeconds:` + fmt.Sprintf("%v", this.LinesProcessedPerSeconds) + `,`, + `TotalBytesProcessed:` + fmt.Sprintf("%v", this.TotalBytesProcessed) + `,`, + `TotalLinesProcessed:` + fmt.Sprintf("%v", this.TotalLinesProcessed) + `,`, + `ExecTime:` + fmt.Sprintf("%v", this.ExecTime) + `,`, + `}`, + }, "") + return s +} +func (this *Store) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Store{`, + `TotalChunksRef:` + fmt.Sprintf("%v", this.TotalChunksRef) + `,`, + `TotalChunksDownloaded:` + fmt.Sprintf("%v", this.TotalChunksDownloaded) + `,`, + `ChunksDownloadTime:` + fmt.Sprintf("%v", this.ChunksDownloadTime) + `,`, + `HeadChunkBytes:` + fmt.Sprintf("%v", this.HeadChunkBytes) + `,`, + `HeadChunkLines:` + fmt.Sprintf("%v", this.HeadChunkLines) + `,`, + `DecompressedBytes:` + fmt.Sprintf("%v", this.DecompressedBytes) + `,`, + `DecompressedLines:` + fmt.Sprintf("%v", this.DecompressedLines) + `,`, + `CompressedBytes:` + fmt.Sprintf("%v", this.CompressedBytes) + `,`, + `TotalDuplicates:` + fmt.Sprintf("%v", this.TotalDuplicates) + `,`, + `}`, + }, "") + return s +} +func (this *Ingester) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Ingester{`, + `TotalReached:` + fmt.Sprintf("%v", this.TotalReached) + `,`, + `TotalChunksMatched:` + fmt.Sprintf("%v", this.TotalChunksMatched) + `,`, + `TotalBatches:` + fmt.Sprintf("%v", this.TotalBatches) + `,`, + `TotalLinesSent:` + fmt.Sprintf("%v", this.TotalLinesSent) + `,`, + `HeadChunkBytes:` + fmt.Sprintf("%v", this.HeadChunkBytes) + `,`, + `HeadChunkLines:` + fmt.Sprintf("%v", this.HeadChunkLines) + `,`, + `DecompressedBytes:` + fmt.Sprintf("%v", this.DecompressedBytes) + `,`, + `DecompressedLines:` + fmt.Sprintf("%v", this.DecompressedLines) + `,`, + `CompressedBytes:` + fmt.Sprintf("%v", this.CompressedBytes) + `,`, + `TotalDuplicates:` + fmt.Sprintf("%v", this.TotalDuplicates) + `,`, + `}`, + }, "") + return s +} +func valueToStringStats(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Result) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Result: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Result: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStats + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStats + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Summary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Store", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStats + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStats + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Store.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingester", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStats + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStats + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Ingester.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStats(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStats + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStats + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Summary) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Summary: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Summary: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BytesProcessedPerSeconds", wireType) + } + m.BytesProcessedPerSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BytesProcessedPerSeconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LinesProcessedPerSeconds", wireType) + } + m.LinesProcessedPerSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LinesProcessedPerSeconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalBytesProcessed", wireType) + } + m.TotalBytesProcessed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalBytesProcessed |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalLinesProcessed", wireType) + } + m.TotalLinesProcessed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalLinesProcessed |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecTime", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ExecTime = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipStats(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStats + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStats + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Store) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Store: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Store: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalChunksRef", wireType) + } + m.TotalChunksRef = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalChunksRef |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalChunksDownloaded", wireType) + } + m.TotalChunksDownloaded = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalChunksDownloaded |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunksDownloadTime", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ChunksDownloadTime = float64(math.Float64frombits(v)) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HeadChunkBytes", wireType) + } + m.HeadChunkBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HeadChunkBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HeadChunkLines", wireType) + } + m.HeadChunkLines = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HeadChunkLines |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DecompressedBytes", wireType) + } + m.DecompressedBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DecompressedBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DecompressedLines", wireType) + } + m.DecompressedLines = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DecompressedLines |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompressedBytes", wireType) + } + m.CompressedBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompressedBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalDuplicates", wireType) + } + m.TotalDuplicates = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalDuplicates |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipStats(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStats + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStats + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ingester) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ingester: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ingester: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalReached", wireType) + } + m.TotalReached = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalReached |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalChunksMatched", wireType) + } + m.TotalChunksMatched = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalChunksMatched |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalBatches", wireType) + } + m.TotalBatches = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalBatches |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalLinesSent", wireType) + } + m.TotalLinesSent = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalLinesSent |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HeadChunkBytes", wireType) + } + m.HeadChunkBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HeadChunkBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HeadChunkLines", wireType) + } + m.HeadChunkLines = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HeadChunkLines |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DecompressedBytes", wireType) + } + m.DecompressedBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DecompressedBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DecompressedLines", wireType) + } + m.DecompressedLines = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DecompressedLines |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompressedBytes", wireType) + } + m.CompressedBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompressedBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalDuplicates", wireType) + } + m.TotalDuplicates = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStats + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalDuplicates |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipStats(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStats + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStats + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStats(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStats + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStats + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStats + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthStats + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthStats + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStats + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipStats(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthStats + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthStats = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStats = fmt.Errorf("proto: integer overflow") +) diff --git a/pkg/logql/stats/stats.proto b/pkg/logql/stats/stats.proto new file mode 100644 index 0000000000000..78e6d385b64a7 --- /dev/null +++ b/pkg/logql/stats/stats.proto @@ -0,0 +1,77 @@ +syntax = "proto3"; + +package stats; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option go_package = "github.com/grafana/loki/pkg/logql/stats"; + + +// Result contains LogQL query statistics. +message Result { + Summary summary = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "summary"]; + Store store = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "store"]; + Ingester ingester = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "ingester"]; +} + +// Summary is the summary of a query statistics. +message Summary { + // Total bytes processed per seconds. + int64 bytesProcessedPerSeconds = 1 [(gogoproto.jsontag) = "bytesProcessedPerSeconds"]; + // Total lines processed per seconds. + int64 linesProcessedPerSeconds = 2 [(gogoproto.jsontag) = "linesProcessedPerSeconds"]; + // Total bytes processed. + int64 totalBytesProcessed = 3 [(gogoproto.jsontag) = "totalBytesProcessed"]; + // Total lines processed. + int64 totalLinesProcessed = 4 [(gogoproto.jsontag) = "totalLinesProcessed"]; + // Execution time in nanoseconds. + double execTime = 5 [(gogoproto.jsontag) = "execTime"]; +} + +message Store { + // The total of chunk reference fetched from index. + int64 totalChunksRef = 1 [(gogoproto.jsontag) = "totalChunksRef"]; + // Total number of chunks fetched. + int64 totalChunksDownloaded = 2 [(gogoproto.jsontag) = "totalChunksDownloaded"]; + // Time spent fetching chunks in nanoseconds. + double chunksDownloadTime = 3 [(gogoproto.jsontag) = "chunksDownloadTime"]; + + // Total bytes processed but was already in memory. (found in the headchunk) + int64 headChunkBytes = 4 [(gogoproto.jsontag) = "headChunkBytes"]; + // Total lines processed but was already in memory. (found in the headchunk) + int64 headChunkLines = 5 [(gogoproto.jsontag) = "headChunkLines"]; + // Total bytes decompressed and processed from chunks. + int64 decompressedBytes = 6 [(gogoproto.jsontag) = "decompressedBytes"]; + // Total lines decompressed and processed from chunks. + int64 decompressedLines = 7 [(gogoproto.jsontag) = "decompressedLines"]; + // Total bytes of compressed chunks (blocks) processed. + int64 compressedBytes = 8 [(gogoproto.jsontag) = "compressedBytes"]; + // Total duplicates found while processing. + int64 totalDuplicates = 9 [(gogoproto.jsontag) = "totalDuplicates"]; +} + +message Ingester { + // Total ingester reached for this query. + int32 totalReached = 1 [(gogoproto.jsontag) = "totalReached"]; + // Total of chunks matched by the query from ingesters + int64 totalChunksMatched = 2 [(gogoproto.jsontag) = "totalChunksMatched"]; + // Total of batches sent from ingesters. + int64 totalBatches = 3 [(gogoproto.jsontag) = "totalBatches"]; + // Total lines sent by ingesters. + int64 totalLinesSent = 4 [(gogoproto.jsontag) = "totalLinesSent"]; + + // Total bytes processed but was already in memory. (found in the headchunk) + int64 headChunkBytes = 5 [(gogoproto.jsontag) = "headChunkBytes"]; + // Total lines processed but was already in memory. (found in the headchunk) + int64 headChunkLines = 6 [(gogoproto.jsontag) = "headChunkLines"]; + // Total bytes decompressed and processed from chunks. + int64 decompressedBytes = 7 [(gogoproto.jsontag) = "decompressedBytes"]; + // Total lines decompressed and processed from chunks. + int64 decompressedLines = 8 [(gogoproto.jsontag) = "decompressedLines"]; + // Total bytes of compressed chunks (blocks) processed. + int64 compressedBytes = 9 [(gogoproto.jsontag) = "compressedBytes"]; + // Total duplicates found while processing. + int64 totalDuplicates = 10 [(gogoproto.jsontag) = "totalDuplicates"]; +} diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index ccbfa2e8ee2d9..2d22a5c3f251f 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -19,6 +19,7 @@ import ( "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/logql/marshal" marshal_legacy "github.com/grafana/loki/pkg/logql/marshal/legacy" + "github.com/grafana/loki/pkg/logql/stats" json "github.com/json-iterator/go" "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" @@ -101,7 +102,7 @@ func (codec) DecodeResponse(ctx context.Context, r *http.Response, req queryrang return nil, httpgrpc.Errorf(r.StatusCode, string(body)) } - sp, _ := opentracing.StartSpanFromContext(ctx, "DecodeResponse") + sp, _ := opentracing.StartSpanFromContext(ctx, "codec.DecodeResponse") defer sp.Finish() buf, err := ioutil.ReadAll(r.Body) @@ -121,19 +122,23 @@ func (codec) DecodeResponse(ctx context.Context, r *http.Response, req queryrang } switch string(resp.Data.ResultType) { case loghttp.ResultTypeMatrix: - return &queryrange.PrometheusResponse{ - Status: loghttp.QueryStatusSuccess, - Data: queryrange.PrometheusData{ - ResultType: loghttp.ResultTypeMatrix, - Result: toProto(resp.Data.Result.(loghttp.Matrix)), + return &LokiPromResponse{ + Response: &queryrange.PrometheusResponse{ + Status: loghttp.QueryStatusSuccess, + Data: queryrange.PrometheusData{ + ResultType: loghttp.ResultTypeMatrix, + Result: toProto(resp.Data.Result.(loghttp.Matrix)), + }, }, + Statistics: resp.Data.Statistics, }, nil case loghttp.ResultTypeStream: return &LokiResponse{ - Status: loghttp.QueryStatusSuccess, - Direction: req.(*LokiRequest).Direction, - Limit: req.(*LokiRequest).Limit, - Version: uint32(loghttp.GetVersion(req.(*LokiRequest).Path)), + Status: loghttp.QueryStatusSuccess, + Direction: req.(*LokiRequest).Direction, + Limit: req.(*LokiRequest).Limit, + Version: uint32(loghttp.GetVersion(req.(*LokiRequest).Path)), + Statistics: resp.Data.Statistics, Data: LokiData{ ResultType: loghttp.ResultTypeStream, Result: resp.Data.Result.(loghttp.Streams).ToProto(), @@ -145,11 +150,11 @@ func (codec) DecodeResponse(ctx context.Context, r *http.Response, req queryrang } func (codec) EncodeResponse(ctx context.Context, res queryrange.Response) (*http.Response, error) { - sp, _ := opentracing.StartSpanFromContext(ctx, "APIResponse.ToHTTPResponse") + sp, _ := opentracing.StartSpanFromContext(ctx, "codec.EncodeResponse") defer sp.Finish() - if _, ok := res.(*queryrange.PrometheusResponse); ok { - return queryrange.PrometheusCodec.EncodeResponse(ctx, res) + if promRes, ok := res.(*LokiPromResponse); ok { + return promRes.encode(ctx) } proto, ok := res.(*LokiResponse) @@ -165,13 +170,17 @@ func (codec) EncodeResponse(ctx context.Context, res queryrange.Response) (*http Entries: stream.Entries, } } + result := logql.Result{ + Data: logql.Streams(streams), + Statistics: proto.Statistics, + } var buf bytes.Buffer if loghttp.Version(proto.Version) == loghttp.VersionLegacy { - if err := marshal_legacy.WriteQueryResponseJSON(logql.Streams(streams), &buf); err != nil { + if err := marshal_legacy.WriteQueryResponseJSON(result, &buf); err != nil { return nil, err } } else { - if err := marshal.WriteQueryResponseJSON(logql.Streams(streams), &buf); err != nil { + if err := marshal.WriteQueryResponseJSON(result, &buf); err != nil { return nil, err } } @@ -192,8 +201,21 @@ func (codec) MergeResponse(responses ...queryrange.Response) (queryrange.Respons if len(responses) == 0 { return nil, errors.New("merging responses requires at least one response") } - if _, ok := responses[0].(*queryrange.PrometheusResponse); ok { - return queryrange.PrometheusCodec.MergeResponse(responses...) + var mergedStats stats.Result + if _, ok := responses[0].(*LokiPromResponse); ok { + promResponses := make([]queryrange.Response, 0, len(responses)) + for _, res := range responses { + mergedStats.Merge(res.(*LokiPromResponse).Statistics) + promResponses = append(promResponses, res.(*LokiPromResponse).Response) + } + promRes, err := queryrange.PrometheusCodec.MergeResponse(promResponses...) + if err != nil { + return nil, err + } + return &LokiPromResponse{ + Response: promRes.(*queryrange.PrometheusResponse), + Statistics: mergedStats, + }, nil } lokiRes, ok := responses[0].(*LokiResponse) if !ok { @@ -202,16 +224,19 @@ func (codec) MergeResponse(responses ...queryrange.Response) (queryrange.Respons lokiResponses := make([]*LokiResponse, 0, len(responses)) for _, res := range responses { - lokiResponses = append(lokiResponses, res.(*LokiResponse)) + lokiResult := res.(*LokiResponse) + mergedStats.Merge(lokiResult.Statistics) + lokiResponses = append(lokiResponses, lokiResult) } return &LokiResponse{ - Status: loghttp.QueryStatusSuccess, - Direction: lokiRes.Direction, - Limit: lokiRes.Limit, - Version: lokiRes.Version, - ErrorType: lokiRes.ErrorType, - Error: lokiRes.Error, + Status: loghttp.QueryStatusSuccess, + Direction: lokiRes.Direction, + Limit: lokiRes.Limit, + Version: lokiRes.Version, + ErrorType: lokiRes.ErrorType, + Error: lokiRes.Error, + Statistics: mergedStats, Data: LokiData{ ResultType: loghttp.ResultTypeStream, Result: mergeOrderedNonOverlappingStreams(lokiResponses, lokiRes.Limit, lokiRes.Direction), diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go index 6e8fd6cf3104c..dfcf244b9a082 100644 --- a/pkg/querier/queryrange/codec_test.go +++ b/pkg/querier/queryrange/codec_test.go @@ -14,6 +14,7 @@ import ( "github.com/cortexproject/cortex/pkg/querier/queryrange" "github.com/grafana/loki/pkg/loghttp" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql/stats" "github.com/stretchr/testify/require" ) @@ -77,12 +78,15 @@ func Test_codec_DecodeResponse(t *testing.T) { {"not success", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(`{"status":"fail"}`))}, nil, nil, true}, {"unknown", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(`{"status":"success"}`))}, nil, nil, true}, {"matrix", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(matrixString))}, nil, - &queryrange.PrometheusResponse{ - Status: loghttp.QueryStatusSuccess, - Data: queryrange.PrometheusData{ - ResultType: loghttp.ResultTypeMatrix, - Result: sampleStreams, + &LokiPromResponse{ + Response: &queryrange.PrometheusResponse{ + Status: loghttp.QueryStatusSuccess, + Data: queryrange.PrometheusData{ + ResultType: loghttp.ResultTypeMatrix, + Result: sampleStreams, + }, }, + Statistics: statsResult, }, false}, {"streams v1", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(streamsString))}, &LokiRequest{Direction: logproto.FORWARD, Limit: 100, Path: "/loki/api/v1/query_range"}, @@ -95,6 +99,7 @@ func Test_codec_DecodeResponse(t *testing.T) { ResultType: loghttp.ResultTypeStream, Result: logStreams, }, + Statistics: statsResult, }, false}, {"streams legacy", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(streamsString))}, &LokiRequest{Direction: logproto.FORWARD, Limit: 100, Path: "/api/prom/query_range"}, @@ -107,6 +112,7 @@ func Test_codec_DecodeResponse(t *testing.T) { ResultType: loghttp.ResultTypeStream, Result: logStreams, }, + Statistics: statsResult, }, false}, } for _, tt := range tests { @@ -170,12 +176,15 @@ func Test_codec_EncodeResponse(t *testing.T) { wantErr bool }{ {"error", &badResponse{}, "", true}, - {"prom", &queryrange.PrometheusResponse{ - Status: loghttp.QueryStatusSuccess, - Data: queryrange.PrometheusData{ - ResultType: loghttp.ResultTypeMatrix, - Result: sampleStreams, + {"prom", &LokiPromResponse{ + Response: &queryrange.PrometheusResponse{ + Status: loghttp.QueryStatusSuccess, + Data: queryrange.PrometheusData{ + ResultType: loghttp.ResultTypeMatrix, + Result: sampleStreams, + }, }, + Statistics: statsResult, }, matrixString, false}, {"loki v1", &LokiResponse{ @@ -187,6 +196,7 @@ func Test_codec_EncodeResponse(t *testing.T) { ResultType: loghttp.ResultTypeStream, Result: logStreams, }, + Statistics: statsResult, }, streamsString, false}, {"loki legacy", &LokiResponse{ @@ -198,6 +208,7 @@ func Test_codec_EncodeResponse(t *testing.T) { ResultType: loghttp.ResultTypeStream, Result: logStreams, }, + Statistics: statsResult, }, streamsStringLegacy, false}, } for _, tt := range tests { @@ -228,18 +239,23 @@ func Test_codec_MergeResponse(t *testing.T) { {"empty", []queryrange.Response{}, nil, true}, {"unknown response", []queryrange.Response{&badResponse{}}, nil, true}, {"prom", []queryrange.Response{ - &queryrange.PrometheusResponse{ - Status: loghttp.QueryStatusSuccess, - Data: queryrange.PrometheusData{ - ResultType: loghttp.ResultTypeMatrix, - Result: sampleStreams, + &LokiPromResponse{ + Response: &queryrange.PrometheusResponse{ + Status: loghttp.QueryStatusSuccess, + Data: queryrange.PrometheusData{ + ResultType: loghttp.ResultTypeMatrix, + Result: sampleStreams, + }, }, - }}, - &queryrange.PrometheusResponse{ - Status: loghttp.QueryStatusSuccess, - Data: queryrange.PrometheusData{ - ResultType: loghttp.ResultTypeMatrix, - Result: sampleStreams, + }, + }, + &LokiPromResponse{ + Response: &queryrange.PrometheusResponse{ + Status: loghttp.QueryStatusSuccess, + Data: queryrange.PrometheusData{ + ResultType: loghttp.ResultTypeMatrix, + Result: sampleStreams, + }, }, }, false, @@ -613,8 +629,41 @@ func (badReader) Read(p []byte) (n int, err error) { } var ( + statsResultString = `"stats" : { + "ingester" : { + "compressedBytes": 1, + "decompressedBytes": 2, + "decompressedLines": 3, + "headChunkBytes": 4, + "headChunkLines": 5, + "totalBatches": 6, + "totalChunksMatched": 7, + "totalDuplicates": 8, + "totalLinesSent": 9, + "totalReached": 10 + }, + "store": { + "compressedBytes": 11, + "decompressedBytes": 12, + "decompressedLines": 13, + "headChunkBytes": 14, + "headChunkLines": 15, + "chunksDownloadTime": 16, + "totalChunksRef": 17, + "totalChunksDownloaded": 18, + "totalDuplicates": 19 + }, + "summary": { + "bytesProcessedPerSeconds": 20, + "execTime": 21, + "linesProcessedPerSeconds": 22, + "totalBytesProcessed": 23, + "totalLinesProcessed": 24 + } + },` matrixString = `{ "data": { + ` + statsResultString + ` "resultType": "matrix", "result": [ { @@ -662,6 +711,7 @@ var ( streamsString = `{ "status": "success", "data": { + ` + statsResultString + ` "resultType": "streams", "result": [ { @@ -683,8 +733,9 @@ var ( ] } }` - streamsStringLegacy = `{"streams":[{"labels":"{test=\"test\"}","entries":[{"ts":"1970-01-02T10:17:36.789012345Z","line":"super line"}]},{"labels":"{test=\"test2\"}","entries":[{"ts":"1970-01-02T10:17:36.789012346Z","line":"super line2"}]}]}` - logStreams = []logproto.Stream{ + streamsStringLegacy = `{ + ` + statsResultString + `"streams":[{"labels":"{test=\"test\"}","entries":[{"ts":"1970-01-02T10:17:36.789012345Z","line":"super line"}]},{"labels":"{test=\"test2\"}","entries":[{"ts":"1970-01-02T10:17:36.789012346Z","line":"super line2"}]}]}` + logStreams = []logproto.Stream{ { Labels: `{test="test"}`, Entries: []logproto.Entry{ @@ -704,6 +755,38 @@ var ( }, }, } + statsResult = stats.Result{ + Summary: stats.Summary{ + BytesProcessedPerSeconds: 20, + ExecTime: 21, + LinesProcessedPerSeconds: 22, + TotalBytesProcessed: 23, + TotalLinesProcessed: 24, + }, + Store: stats.Store{ + CompressedBytes: 11, + DecompressedBytes: 12, + DecompressedLines: 13, + HeadChunkBytes: 14, + HeadChunkLines: 15, + ChunksDownloadTime: 16, + TotalChunksRef: 17, + TotalChunksDownloaded: 18, + TotalDuplicates: 19, + }, + Ingester: stats.Ingester{ + CompressedBytes: 1, + DecompressedBytes: 2, + DecompressedLines: 3, + HeadChunkBytes: 4, + HeadChunkLines: 5, + TotalBatches: 6, + TotalChunksMatched: 7, + TotalDuplicates: 8, + TotalLinesSent: 9, + TotalReached: 10, + }, + } ) func BenchmarkResponseMerge(b *testing.B) { diff --git a/pkg/querier/queryrange/prometheus.go b/pkg/querier/queryrange/prometheus.go new file mode 100644 index 0000000000000..e4007defc75a1 --- /dev/null +++ b/pkg/querier/queryrange/prometheus.go @@ -0,0 +1,67 @@ +package queryrange + +import ( + "bytes" + "context" + "io/ioutil" + "net/http" + + "github.com/cortexproject/cortex/pkg/querier/queryrange" + "github.com/grafana/loki/pkg/logql/stats" + jsoniter "github.com/json-iterator/go" + "github.com/opentracing/opentracing-go" + otlog "github.com/opentracing/opentracing-go/log" +) + +var jsonStd = jsoniter.ConfigCompatibleWithStandardLibrary + +// prometheusResponseExtractor wraps the original prometheus cache extractor. +// Statistics are discarded when using cache entries. +var prometheusResponseExtractor = queryrange.ExtractorFunc(func(start, end int64, from queryrange.Response) queryrange.Response { + return &LokiPromResponse{ + Response: queryrange.PrometheusResponseExtractor. + Extract(start, end, from.(*LokiPromResponse).Response).(*queryrange.PrometheusResponse), + } +}) + +// encode encodes a Prometheus response and injects Loki stats. +func (p *LokiPromResponse) encode(ctx context.Context) (*http.Response, error) { + sp := opentracing.SpanFromContext(ctx) + // embed response and add statistics. + b, err := jsonStd.Marshal(struct { + Status string `json:"status"` + Data struct { + queryrange.PrometheusData + Statistics stats.Result `json:"stats"` + } `json:"data,omitempty"` + ErrorType string `json:"errorType,omitempty"` + Error string `json:"error,omitempty"` + }{ + Error: p.Response.Error, + Data: struct { + queryrange.PrometheusData + Statistics stats.Result `json:"stats"` + }{ + PrometheusData: p.Response.Data, + Statistics: p.Statistics, + }, + ErrorType: p.Response.ErrorType, + Status: p.Response.Status, + }) + if err != nil { + return nil, err + } + + if sp != nil { + sp.LogFields(otlog.Int("bytes", len(b))) + } + + resp := http.Response{ + Header: http.Header{ + "Content-Type": []string{"application/json"}, + }, + Body: ioutil.NopCloser(bytes.NewBuffer(b)), + StatusCode: http.StatusOK, + } + return &resp, nil +} diff --git a/pkg/querier/queryrange/queryrange.pb.go b/pkg/querier/queryrange/queryrange.pb.go index 5e6c0f687a6af..aac3f3690df3c 100644 --- a/pkg/querier/queryrange/queryrange.pb.go +++ b/pkg/querier/queryrange/queryrange.pb.go @@ -5,14 +5,15 @@ package queryrange import ( fmt "fmt" + queryrange "github.com/cortexproject/cortex/pkg/querier/queryrange" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" _ "github.com/gogo/protobuf/types" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" logproto "github.com/grafana/loki/pkg/logproto" + stats "github.com/grafana/loki/pkg/logql/stats" io "io" math "math" - math_bits "math/bits" reflect "reflect" strings "strings" time "time" @@ -28,7 +29,7 @@ var _ = time.Kitchen // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type LokiRequest struct { Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` @@ -53,7 +54,7 @@ func (m *LokiRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_LokiRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -122,13 +123,14 @@ func (m *LokiRequest) GetPath() string { } type LokiResponse struct { - Status string `protobuf:"bytes,1,opt,name=Status,json=status,proto3" json:"status"` - Data LokiData `protobuf:"bytes,2,opt,name=Data,json=data,proto3" json:"data,omitempty"` - ErrorType string `protobuf:"bytes,3,opt,name=ErrorType,json=errorType,proto3" json:"errorType,omitempty"` - Error string `protobuf:"bytes,4,opt,name=Error,json=error,proto3" json:"error,omitempty"` - Direction logproto.Direction `protobuf:"varint,5,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"` - Limit uint32 `protobuf:"varint,6,opt,name=limit,proto3" json:"limit,omitempty"` - Version uint32 `protobuf:"varint,7,opt,name=version,proto3" json:"version,omitempty"` + Status string `protobuf:"bytes,1,opt,name=Status,json=status,proto3" json:"status"` + Data LokiData `protobuf:"bytes,2,opt,name=Data,json=data,proto3" json:"data,omitempty"` + ErrorType string `protobuf:"bytes,3,opt,name=ErrorType,json=errorType,proto3" json:"errorType,omitempty"` + Error string `protobuf:"bytes,4,opt,name=Error,json=error,proto3" json:"error,omitempty"` + Direction logproto.Direction `protobuf:"varint,5,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"` + Limit uint32 `protobuf:"varint,6,opt,name=limit,proto3" json:"limit,omitempty"` + Version uint32 `protobuf:"varint,7,opt,name=version,proto3" json:"version,omitempty"` + Statistics stats.Result `protobuf:"bytes,8,opt,name=statistics,proto3" json:"statistics"` } func (m *LokiResponse) Reset() { *m = LokiResponse{} } @@ -144,7 +146,7 @@ func (m *LokiResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return xxx_messageInfo_LokiResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -212,6 +214,13 @@ func (m *LokiResponse) GetVersion() uint32 { return 0 } +func (m *LokiResponse) GetStatistics() stats.Result { + if m != nil { + return m.Statistics + } + return stats.Result{} +} + type LokiData struct { ResultType string `protobuf:"bytes,1,opt,name=ResultType,json=resultType,proto3" json:"resultType"` Result []logproto.Stream `protobuf:"bytes,2,rep,name=Result,json=result,proto3" json:"result"` @@ -230,7 +239,7 @@ func (m *LokiData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LokiData.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + n, err := m.MarshalTo(b) if err != nil { return nil, err } @@ -263,10 +272,63 @@ func (m *LokiData) GetResult() []logproto.Stream { return nil } +// LokiPromResponse wraps a Prometheus response with statistics. +type LokiPromResponse struct { + Response *queryrange.PrometheusResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + Statistics stats.Result `protobuf:"bytes,2,opt,name=statistics,proto3" json:"statistics"` +} + +func (m *LokiPromResponse) Reset() { *m = LokiPromResponse{} } +func (*LokiPromResponse) ProtoMessage() {} +func (*LokiPromResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_51b9d53b40d11902, []int{3} +} +func (m *LokiPromResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LokiPromResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LokiPromResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LokiPromResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LokiPromResponse.Merge(m, src) +} +func (m *LokiPromResponse) XXX_Size() int { + return m.Size() +} +func (m *LokiPromResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LokiPromResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LokiPromResponse proto.InternalMessageInfo + +func (m *LokiPromResponse) GetResponse() *queryrange.PrometheusResponse { + if m != nil { + return m.Response + } + return nil +} + +func (m *LokiPromResponse) GetStatistics() stats.Result { + if m != nil { + return m.Statistics + } + return stats.Result{} +} + func init() { proto.RegisterType((*LokiRequest)(nil), "queryrange.LokiRequest") proto.RegisterType((*LokiResponse)(nil), "queryrange.LokiResponse") proto.RegisterType((*LokiData)(nil), "queryrange.LokiData") + proto.RegisterType((*LokiPromResponse)(nil), "queryrange.LokiPromResponse") } func init() { @@ -274,41 +336,48 @@ func init() { } var fileDescriptor_51b9d53b40d11902 = []byte{ - // 533 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x51, 0xc1, 0x8e, 0xd3, 0x3c, - 0x10, 0x8e, 0xbb, 0x4d, 0xba, 0x71, 0xff, 0xbf, 0x20, 0x77, 0x05, 0x51, 0x91, 0x9c, 0xa8, 0x17, - 0x82, 0x04, 0xa9, 0x28, 0x20, 0x21, 0x0e, 0x08, 0x45, 0xcb, 0x8d, 0x93, 0xb7, 0x2f, 0x90, 0x6e, - 0x4d, 0x36, 0xda, 0xa6, 0xce, 0xda, 0x0e, 0x52, 0x6f, 0x3c, 0x42, 0x1f, 0x83, 0x57, 0xe0, 0x0d, - 0xf6, 0xd8, 0xe3, 0x9e, 0x02, 0x4d, 0x2f, 0xa8, 0xa7, 0x7d, 0x04, 0x14, 0xbb, 0x69, 0xc3, 0x0d, - 0x4e, 0x9e, 0xf9, 0x66, 0xbe, 0xf1, 0x37, 0xdf, 0xc0, 0xa7, 0xd9, 0x75, 0x3c, 0xba, 0xc9, 0x29, - 0x4f, 0x28, 0x57, 0xef, 0x92, 0x47, 0x8b, 0x98, 0x36, 0xc2, 0x20, 0xe3, 0x4c, 0x32, 0x04, 0x8f, - 0xc8, 0xe0, 0x45, 0x9c, 0xc8, 0xab, 0x7c, 0x1a, 0x5c, 0xb2, 0x74, 0x14, 0xb3, 0x98, 0x8d, 0x54, - 0xcb, 0x34, 0xff, 0xac, 0x32, 0x95, 0xa8, 0x48, 0x53, 0x07, 0x4f, 0xaa, 0x3f, 0xe6, 0x2c, 0xd6, - 0x85, 0x3a, 0xd8, 0x17, 0xdd, 0x98, 0xb1, 0x78, 0x4e, 0x8f, 0x23, 0x64, 0x92, 0x52, 0x21, 0xa3, - 0x34, 0xd3, 0x0d, 0xc3, 0x55, 0x0b, 0x76, 0x3f, 0xb1, 0xeb, 0x84, 0xd0, 0x9b, 0x9c, 0x0a, 0x89, - 0xce, 0xa0, 0xa9, 0xa4, 0x38, 0xc0, 0x03, 0xbe, 0x4d, 0x74, 0x52, 0xa1, 0xf3, 0x24, 0x4d, 0xa4, - 0xd3, 0xf2, 0x80, 0xff, 0x3f, 0xd1, 0x09, 0x42, 0xb0, 0x2d, 0x24, 0xcd, 0x9c, 0x13, 0x0f, 0xf8, - 0x27, 0x44, 0xc5, 0xe8, 0x3d, 0xec, 0x08, 0x19, 0x71, 0x39, 0x11, 0x4e, 0xdb, 0x03, 0x7e, 0x77, - 0x3c, 0x08, 0xb4, 0x84, 0xa0, 0x96, 0x10, 0x4c, 0x6a, 0x09, 0xe1, 0xe9, 0x6d, 0xe1, 0x1a, 0xab, - 0x1f, 0x2e, 0x20, 0x35, 0x09, 0xbd, 0x83, 0x26, 0x5d, 0xcc, 0x26, 0xc2, 0x31, 0xff, 0x81, 0xad, - 0x29, 0xe8, 0x25, 0xb4, 0x67, 0x09, 0xa7, 0x97, 0x32, 0x61, 0x0b, 0xc7, 0xf2, 0x80, 0xdf, 0x1b, - 0xf7, 0x83, 0x83, 0x21, 0xe7, 0x75, 0x89, 0x1c, 0xbb, 0xaa, 0x15, 0xb2, 0x48, 0x5e, 0x39, 0x1d, - 0xb5, 0xad, 0x8a, 0x87, 0xdf, 0x5b, 0xf0, 0x3f, 0x6d, 0x89, 0xc8, 0xd8, 0x42, 0x50, 0x34, 0x84, - 0xd6, 0x85, 0x8c, 0x64, 0x2e, 0xb4, 0x29, 0x21, 0xdc, 0x15, 0xae, 0x25, 0x14, 0x42, 0xf6, 0x2f, - 0xfa, 0x00, 0xdb, 0xe7, 0x91, 0x8c, 0x94, 0x41, 0xdd, 0xf1, 0x59, 0xd0, 0xb8, 0x70, 0x35, 0xab, - 0xaa, 0x85, 0x8f, 0x2a, 0xc1, 0xbb, 0xc2, 0xed, 0xcd, 0x22, 0x19, 0x3d, 0x67, 0x69, 0x22, 0x69, - 0x9a, 0xc9, 0x25, 0x69, 0x57, 0x39, 0x7a, 0x03, 0xed, 0x8f, 0x9c, 0x33, 0x3e, 0x59, 0x66, 0x54, - 0x59, 0x6a, 0x87, 0x8f, 0x77, 0x85, 0xdb, 0xa7, 0x35, 0xd8, 0x60, 0xd8, 0x07, 0x10, 0x3d, 0x83, - 0xa6, 0xa2, 0x29, 0xbb, 0xed, 0xb0, 0xbf, 0x2b, 0xdc, 0x07, 0xaa, 0xda, 0x68, 0x37, 0x15, 0xf0, - 0xa7, 0x3f, 0xe6, 0x5f, 0xf9, 0x73, 0x38, 0xbc, 0xd5, 0x3c, 0xbc, 0x03, 0x3b, 0x5f, 0x28, 0x17, - 0xd5, 0x98, 0x8e, 0xc2, 0xeb, 0x74, 0x28, 0xe1, 0x69, 0xbd, 0x2e, 0x0a, 0x20, 0x24, 0x54, 0xe4, - 0x73, 0xa9, 0x36, 0xd2, 0xd6, 0xf5, 0x76, 0x85, 0x0b, 0xf9, 0x01, 0x25, 0x8d, 0x18, 0xbd, 0x85, - 0x96, 0xee, 0x77, 0x5a, 0xde, 0x89, 0xdf, 0x1d, 0x3f, 0x3c, 0x6a, 0xbb, 0x90, 0x9c, 0x46, 0x69, - 0xd8, 0xdb, 0x1b, 0x68, 0x69, 0x16, 0xd9, 0xbf, 0xe1, 0xeb, 0xf5, 0x06, 0x1b, 0x77, 0x1b, 0x6c, - 0xdc, 0x6f, 0x30, 0xf8, 0x5a, 0x62, 0xf0, 0xad, 0xc4, 0xe0, 0xb6, 0xc4, 0x60, 0x5d, 0x62, 0xf0, - 0xb3, 0xc4, 0xe0, 0x57, 0x89, 0x8d, 0xfb, 0x12, 0x83, 0xd5, 0x16, 0x1b, 0xeb, 0x2d, 0x36, 0xee, - 0xb6, 0xd8, 0x98, 0x5a, 0x6a, 0xf6, 0xab, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x20, 0xee, 0xaf, - 0xb4, 0xa5, 0x03, 0x00, 0x00, + // 642 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xcd, 0x6e, 0xd3, 0x4a, + 0x14, 0xc7, 0x3d, 0x69, 0xe2, 0x24, 0x93, 0xdb, 0xdc, 0x6a, 0x5a, 0xdd, 0x6b, 0xe5, 0x4a, 0xe3, + 0x28, 0x9b, 0x1b, 0x24, 0x70, 0x44, 0x0a, 0x12, 0x62, 0x81, 0x8a, 0x55, 0xc4, 0x86, 0x05, 0x72, + 0xf3, 0x02, 0x6e, 0x3a, 0xb8, 0xa6, 0x71, 0xc6, 0x9d, 0x39, 0x46, 0x74, 0xc7, 0x96, 0x5d, 0x1f, + 0x83, 0x47, 0xe9, 0xb2, 0xcb, 0xae, 0x02, 0x75, 0x37, 0x28, 0xab, 0x3e, 0x00, 0x0b, 0x34, 0x33, + 0x76, 0xe2, 0x22, 0x16, 0x65, 0xe3, 0x39, 0x5f, 0xff, 0x39, 0x67, 0x7e, 0xc7, 0xf8, 0xff, 0xf4, + 0x24, 0x1a, 0x9d, 0x66, 0x4c, 0xc4, 0x4c, 0xe8, 0xf3, 0x4c, 0x84, 0xf3, 0x88, 0x55, 0x4c, 0x2f, + 0x15, 0x1c, 0x38, 0xc1, 0xeb, 0x48, 0xef, 0x51, 0x14, 0xc3, 0x71, 0x76, 0xe8, 0x4d, 0x79, 0x32, + 0x8a, 0x78, 0xc4, 0x47, 0xba, 0xe4, 0x30, 0x7b, 0xa7, 0x3d, 0xed, 0x68, 0xcb, 0x48, 0x7b, 0xff, + 0xa9, 0x1e, 0x33, 0x1e, 0x99, 0x44, 0x69, 0xfc, 0x92, 0x3c, 0x9d, 0x8d, 0x24, 0x84, 0x20, 0xcd, + 0xb7, 0x48, 0xbe, 0xae, 0x34, 0x9a, 0x72, 0x01, 0xec, 0x63, 0x2a, 0xf8, 0x7b, 0x36, 0x85, 0xc2, + 0x1b, 0xdd, 0x73, 0xfa, 0x9e, 0x1b, 0x71, 0x1e, 0xcd, 0xd8, 0x7a, 0x50, 0x88, 0x13, 0x26, 0x21, + 0x4c, 0x52, 0x53, 0x30, 0x38, 0xaf, 0xe1, 0xce, 0x1b, 0x7e, 0x12, 0x07, 0xec, 0x34, 0x63, 0x12, + 0xc8, 0x0e, 0x6e, 0xe8, 0x4b, 0x1c, 0xd4, 0x47, 0xc3, 0x76, 0x60, 0x1c, 0x15, 0x9d, 0xc5, 0x49, + 0x0c, 0x4e, 0xad, 0x8f, 0x86, 0x9b, 0x81, 0x71, 0x08, 0xc1, 0x75, 0x09, 0x2c, 0x75, 0x36, 0xfa, + 0x68, 0xb8, 0x11, 0x68, 0x9b, 0xbc, 0xc0, 0x4d, 0x09, 0xa1, 0x80, 0x89, 0x74, 0xea, 0x7d, 0x34, + 0xec, 0x8c, 0x7b, 0x9e, 0x19, 0xc1, 0x2b, 0x47, 0xf0, 0x26, 0xe5, 0x08, 0x7e, 0xeb, 0x62, 0xe1, + 0x5a, 0xe7, 0x5f, 0x5d, 0x14, 0x94, 0x22, 0xf2, 0x1c, 0x37, 0xd8, 0xfc, 0x68, 0x22, 0x9d, 0xc6, + 0x1f, 0xa8, 0x8d, 0x84, 0x3c, 0xc6, 0xed, 0xa3, 0x58, 0xb0, 0x29, 0xc4, 0x7c, 0xee, 0xd8, 0x7d, + 0x34, 0xec, 0x8e, 0xb7, 0xbd, 0x15, 0xf6, 0xfd, 0x32, 0x15, 0xac, 0xab, 0xd4, 0x13, 0xd2, 0x10, + 0x8e, 0x9d, 0xa6, 0x7e, 0xad, 0xb6, 0x07, 0x3f, 0x6a, 0xf8, 0x2f, 0x83, 0x44, 0xa6, 0x7c, 0x2e, + 0x19, 0x19, 0x60, 0xfb, 0x00, 0x42, 0xc8, 0xa4, 0x81, 0xe2, 0xe3, 0xe5, 0xc2, 0xb5, 0xa5, 0x8e, + 0x04, 0xc5, 0x49, 0xf6, 0x70, 0x7d, 0x3f, 0x84, 0x50, 0x03, 0xea, 0x8c, 0x77, 0xbc, 0xca, 0x26, + 0xd4, 0x5d, 0x2a, 0xe7, 0xff, 0xa3, 0x06, 0x5e, 0x2e, 0xdc, 0xee, 0x51, 0x08, 0xe1, 0x43, 0x9e, + 0xc4, 0xc0, 0x92, 0x14, 0xce, 0x82, 0xba, 0xf2, 0xc9, 0x53, 0xdc, 0x7e, 0x25, 0x04, 0x17, 0x93, + 0xb3, 0x94, 0x69, 0xa4, 0x6d, 0xff, 0xdf, 0xe5, 0xc2, 0xdd, 0x66, 0x65, 0xb0, 0xa2, 0x68, 0xaf, + 0x82, 0xe4, 0x01, 0x6e, 0x68, 0x99, 0xc6, 0xdd, 0xf6, 0xb7, 0x97, 0x0b, 0xf7, 0x6f, 0x9d, 0xad, + 0x94, 0x37, 0x74, 0xe0, 0x2e, 0x9f, 0xc6, 0xbd, 0xf8, 0xac, 0x16, 0x6f, 0x57, 0x17, 0xef, 0xe0, + 0xe6, 0x07, 0x26, 0xa4, 0xba, 0xa6, 0xa9, 0xe3, 0xa5, 0x4b, 0x5e, 0x62, 0xac, 0x80, 0xc4, 0x12, + 0xe2, 0xa9, 0x74, 0x5a, 0x1a, 0xc6, 0xa6, 0x67, 0x7e, 0xed, 0x80, 0xc9, 0x6c, 0x06, 0x3e, 0x29, + 0x28, 0x54, 0x0a, 0x83, 0x8a, 0x3d, 0x00, 0xdc, 0x2a, 0x89, 0x11, 0x0f, 0x63, 0xa3, 0xd2, 0x50, + 0x0c, 0xfd, 0xae, 0xd2, 0x8a, 0x55, 0x34, 0xa8, 0xd8, 0xe4, 0x19, 0xb6, 0x4d, 0xbd, 0x53, 0xeb, + 0x6f, 0x0c, 0x3b, 0xe3, 0xad, 0xf5, 0xf3, 0x0e, 0x40, 0xb0, 0x30, 0xf1, 0xbb, 0x45, 0x77, 0xdb, + 0xa8, 0x82, 0xe2, 0x1c, 0x7c, 0x46, 0x78, 0x4b, 0xb5, 0x7d, 0x2b, 0x78, 0xb2, 0x5a, 0xfc, 0x1e, + 0x6e, 0x89, 0xc2, 0xd6, 0xcd, 0x3b, 0x63, 0x5a, 0x5d, 0xac, 0xaa, 0x65, 0x70, 0xcc, 0x32, 0x59, + 0x2a, 0xfc, 0xfa, 0xc5, 0xc2, 0x45, 0xc1, 0x4a, 0x45, 0x76, 0xef, 0xf0, 0xa8, 0xfd, 0x8e, 0x87, + 0x92, 0x58, 0x55, 0x02, 0xfe, 0x93, 0xcb, 0x6b, 0x6a, 0x5d, 0x5d, 0x53, 0xeb, 0xf6, 0x9a, 0xa2, + 0x4f, 0x39, 0x45, 0x5f, 0x72, 0x8a, 0x2e, 0x72, 0x8a, 0x2e, 0x73, 0x8a, 0xbe, 0xe5, 0x14, 0x7d, + 0xcf, 0xa9, 0x75, 0x9b, 0x53, 0x74, 0x7e, 0x43, 0xad, 0xcb, 0x1b, 0x6a, 0x5d, 0xdd, 0x50, 0xeb, + 0xd0, 0xd6, 0xef, 0xdc, 0xfd, 0x19, 0x00, 0x00, 0xff, 0xff, 0x64, 0x10, 0x10, 0x5f, 0xda, 0x04, + 0x00, 0x00, } func (this *LokiRequest) Equal(that interface{}) bool { @@ -393,6 +462,9 @@ func (this *LokiResponse) Equal(that interface{}) bool { if this.Version != that1.Version { return false } + if !this.Statistics.Equal(&that1.Statistics) { + return false + } return true } func (this *LokiData) Equal(that interface{}) bool { @@ -427,6 +499,33 @@ func (this *LokiData) Equal(that interface{}) bool { } return true } +func (this *LokiPromResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LokiPromResponse) + if !ok { + that2, ok := that.(LokiPromResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Response.Equal(that1.Response) { + return false + } + if !this.Statistics.Equal(&that1.Statistics) { + return false + } + return true +} func (this *LokiRequest) GoString() string { if this == nil { return "nil" @@ -447,7 +546,7 @@ func (this *LokiResponse) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 11) + s := make([]string, 0, 12) s = append(s, "&queryrange.LokiResponse{") s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") s = append(s, "Data: "+strings.Replace(this.Data.GoString(), `&`, ``, 1)+",\n") @@ -456,6 +555,7 @@ func (this *LokiResponse) GoString() string { s = append(s, "Direction: "+fmt.Sprintf("%#v", this.Direction)+",\n") s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n") + s = append(s, "Statistics: "+strings.Replace(this.Statistics.GoString(), `&`, ``, 1)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -476,6 +576,19 @@ func (this *LokiData) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *LokiPromResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&queryrange.LokiPromResponse{") + if this.Response != nil { + s = append(s, "Response: "+fmt.Sprintf("%#v", this.Response)+",\n") + } + s = append(s, "Statistics: "+strings.Replace(this.Statistics.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func valueToGoStringQueryrange(v interface{}, typ string) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -487,7 +600,7 @@ func valueToGoStringQueryrange(v interface{}, typ string) string { func (m *LokiRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -495,67 +608,60 @@ func (m *LokiRequest) Marshal() (dAtA []byte, err error) { } func (m *LokiRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LokiRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x3a - } - if m.Direction != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.Direction)) - i-- - dAtA[i] = 0x30 - } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):]) - if err1 != nil { - return 0, err1 + if len(m.Query) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Query))) + i += copy(dAtA[i:], m.Query) } - i -= n1 - i = encodeVarintQueryrange(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x2a - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):]) - if err2 != nil { - return 0, err2 + if m.Limit != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(m.Limit)) } - i -= n2 - i = encodeVarintQueryrange(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x22 if m.Step != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.Step)) - i-- dAtA[i] = 0x18 + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(m.Step)) } - if m.Limit != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x10 + dAtA[i] = 0x22 + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs))) + n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i:]) + if err != nil { + return 0, err } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0xa + i += n1 + dAtA[i] = 0x2a + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs))) + n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.Direction != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(m.Direction)) } - return len(dAtA) - i, nil + if len(m.Path) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + } + return i, nil } func (m *LokiResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -563,68 +669,66 @@ func (m *LokiResponse) Marshal() (dAtA []byte, err error) { } func (m *LokiResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LokiResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.Version != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.Version)) - i-- - dAtA[i] = 0x38 + if len(m.Status) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) } - if m.Limit != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x30 + dAtA[i] = 0x12 + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(m.Data.Size())) + n3, err := m.Data.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if m.Direction != 0 { - i = encodeVarintQueryrange(dAtA, i, uint64(m.Direction)) - i-- - dAtA[i] = 0x28 + i += n3 + if len(m.ErrorType) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(len(m.ErrorType))) + i += copy(dAtA[i:], m.ErrorType) } if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Error))) - i-- dAtA[i] = 0x22 + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) } - if len(m.ErrorType) > 0 { - i -= len(m.ErrorType) - copy(dAtA[i:], m.ErrorType) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.ErrorType))) - i-- - dAtA[i] = 0x1a + if m.Direction != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(m.Direction)) } - { - size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) + if m.Limit != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(m.Limit)) } - i-- - dAtA[i] = 0x12 - if len(m.Status) > 0 { - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0xa + if m.Version != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(m.Version)) + } + dAtA[i] = 0x42 + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(m.Statistics.Size())) + n4, err := m.Statistics.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - return len(dAtA) - i, nil + i += n4 + return i, nil } func (m *LokiData) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -632,49 +736,75 @@ func (m *LokiData) Marshal() (dAtA []byte, err error) { } func (m *LokiData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LokiData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + if len(m.ResultType) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(len(m.ResultType))) + i += copy(dAtA[i:], m.ResultType) + } if len(m.Result) > 0 { - for iNdEx := len(m.Result) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Result[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQueryrange(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Result { dAtA[i] = 0x12 + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if len(m.ResultType) > 0 { - i -= len(m.ResultType) - copy(dAtA[i:], m.ResultType) - i = encodeVarintQueryrange(dAtA, i, uint64(len(m.ResultType))) - i-- + return i, nil +} + +func (m *LokiPromResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LokiPromResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Response != nil { dAtA[i] = 0xa + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(m.Response.Size())) + n5, err := m.Response.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 } - return len(dAtA) - i, nil + dAtA[i] = 0x12 + i++ + i = encodeVarintQueryrange(dAtA, i, uint64(m.Statistics.Size())) + n6, err := m.Statistics.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil } func encodeVarintQueryrange(dAtA []byte, offset int, v uint64) int { - offset -= sovQueryrange(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *LokiRequest) Size() (n int) { if m == nil { @@ -735,6 +865,8 @@ func (m *LokiResponse) Size() (n int) { if m.Version != 0 { n += 1 + sovQueryrange(uint64(m.Version)) } + l = m.Statistics.Size() + n += 1 + l + sovQueryrange(uint64(l)) return n } @@ -757,8 +889,30 @@ func (m *LokiData) Size() (n int) { return n } +func (m *LokiPromResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovQueryrange(uint64(l)) + } + l = m.Statistics.Size() + n += 1 + l + sovQueryrange(uint64(l)) + return n +} + func sovQueryrange(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozQueryrange(x uint64) (n int) { return sovQueryrange(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -771,8 +925,8 @@ func (this *LokiRequest) String() string { `Query:` + fmt.Sprintf("%v", this.Query) + `,`, `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, `Step:` + fmt.Sprintf("%v", this.Step) + `,`, - `StartTs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartTs), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `EndTs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EndTs), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `StartTs:` + strings.Replace(strings.Replace(this.StartTs.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `EndTs:` + strings.Replace(strings.Replace(this.EndTs.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `Direction:` + fmt.Sprintf("%v", this.Direction) + `,`, `Path:` + fmt.Sprintf("%v", this.Path) + `,`, `}`, @@ -791,6 +945,7 @@ func (this *LokiResponse) String() string { `Direction:` + fmt.Sprintf("%v", this.Direction) + `,`, `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Statistics:` + strings.Replace(strings.Replace(this.Statistics.String(), "Result", "stats.Result", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -799,14 +954,20 @@ func (this *LokiData) String() string { if this == nil { return "nil" } - repeatedStringForResult := "[]Stream{" - for _, f := range this.Result { - repeatedStringForResult += fmt.Sprintf("%v", f) + "," - } - repeatedStringForResult += "}" s := strings.Join([]string{`&LokiData{`, `ResultType:` + fmt.Sprintf("%v", this.ResultType) + `,`, - `Result:` + repeatedStringForResult + `,`, + `Result:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Result), "Stream", "logproto.Stream", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LokiPromResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LokiPromResponse{`, + `Response:` + strings.Replace(fmt.Sprintf("%v", this.Response), "PrometheusResponse", "queryrange.PrometheusResponse", 1) + `,`, + `Statistics:` + strings.Replace(strings.Replace(this.Statistics.String(), "Result", "stats.Result", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1274,6 +1435,39 @@ func (m *LokiResponse) Unmarshal(dAtA []byte) error { break } } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statistics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQueryrange + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQueryrange + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Statistics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQueryrange(dAtA[iNdEx:]) @@ -1417,6 +1611,128 @@ func (m *LokiData) Unmarshal(dAtA []byte) error { } return nil } +func (m *LokiPromResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LokiPromResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LokiPromResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQueryrange + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQueryrange + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &queryrange.PrometheusResponse{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statistics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQueryrange + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQueryrange + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Statistics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQueryrange(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQueryrange + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQueryrange + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipQueryrange(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/pkg/querier/queryrange/queryrange.proto b/pkg/querier/queryrange/queryrange.proto index 9346ff0cd9dfb..f3a5e6f92ba72 100644 --- a/pkg/querier/queryrange/queryrange.proto +++ b/pkg/querier/queryrange/queryrange.proto @@ -4,6 +4,8 @@ package queryrange; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "pkg/logproto/logproto.proto"; +import "pkg/logql/stats/stats.proto"; +import "github.com/cortexproject/cortex/pkg/querier/queryrange/queryrange.proto"; import "google/protobuf/timestamp.proto"; option (gogoproto.marshaler_all) = true; @@ -27,9 +29,17 @@ message LokiResponse { logproto.Direction direction = 5; uint32 limit = 6; uint32 version = 7; + stats.Result statistics = 8 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "statistics"]; } message LokiData { string ResultType = 1 [(gogoproto.jsontag) = "resultType"]; repeated logproto.Stream Result = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "result"]; } + + +// LokiPromResponse wraps a Prometheus response with statistics. +message LokiPromResponse { + queryrange.PrometheusResponse response = 1 [(gogoproto.nullable) = true]; + stats.Result statistics = 2 [(gogoproto.nullable) = false]; +} diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index 5133382db7f81..1167880cbbedd 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -35,7 +35,7 @@ func NewTripperware(cfg Config, log log.Logger, limits Limits) (frontend.Tripper // This avoids divide by zero errors when determining cache keys where user specific overrides don't exist. limits = WithDefaultLimits(limits, cfg.Config) - metricsTripperware, cache, err := NewMetricTripperware(cfg, log, limits, lokiCodec, queryrange.PrometheusResponseExtractor) + metricsTripperware, cache, err := NewMetricTripperware(cfg, log, limits, lokiCodec, prometheusResponseExtractor) if err != nil { return nil, nil, err diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index ea2dd611b9439..a17849812d1e4 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -335,7 +335,7 @@ func promqlResult(v promql.Value) (*int, http.Handler) { return &count, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { lock.Lock() defer lock.Unlock() - if err := marshal.WriteQueryResponseJSON(v, w); err != nil { + if err := marshal.WriteQueryResponseJSON(logql.Result{Data: v}, w); err != nil { panic(err) } count++ diff --git a/pkg/storage/iterator.go b/pkg/storage/iterator.go index 01a134790e724..3b80b15340071 100644 --- a/pkg/storage/iterator.go +++ b/pkg/storage/iterator.go @@ -368,9 +368,9 @@ func fetchLazyChunks(ctx context.Context, chunks []*chunkenc.LazyChunk) error { start := time.Now() storeStats := stats.GetStoreData(ctx) var totalChunks int64 - defer func(){ - storeStats.TimeDownloadingChunks += time.Since(start) - storeStats.TotalDownloadedChunks += totalChunks + defer func() { + storeStats.ChunksDownloadTime += time.Since(start) + storeStats.TotalChunksDownloaded += totalChunks }() chksByFetcher := map[*chunk.Fetcher][]*chunkenc.LazyChunk{}