Skip to content

Commit

Permalink
Jaeger YugabyteDB(YCQL) support (jaegertracing#4220)
Browse files Browse the repository at this point in the history
## Which problem is this PR solving?
- Resolves jaegertracing#4198

## Short description of the changes
Following changes done to v004.cql.tmpl
- Removing trailing comma from last column definition.
- Changing `//` to `--` as preamble for comments.
- `list<frozen<keyvalue>>,`->`list<frozen<${keyspace}.keyvalue>>`
- Adding frozen keyword to UDT.

Above syntax changes are accepted by YCQL and Cassandra both.

---------

Signed-off-by: Harsh Daryani <harsh.daryani.6211@gmail.com>
Signed-off-by: shubbham1215 <sawaikershubham@gmail.com>
  • Loading branch information
HarshDaryani896 authored and shubbham1215 committed Mar 5, 2023
1 parent 5662df5 commit f21acc2
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 17 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ Jaeger can be used with a growing a number of storage backends:
* It natively supports two popular open source NoSQL databases as trace storage backends: Cassandra and Elasticsearch.
* It integrates via a gRPC API with other well known databases that have been certified to be Jaeger compliant: [TimescaleDB via Promscale](https://github.com/timescale/promscale), [ClickHouse](https://github.com/jaegertracing/jaeger-clickhouse).
* There is embedded database support using [Badger](https://github.com/dgraph-io/badger) and simple in-memory storage for testing setups.
* There are ongoing community experiments using other databases, such as ScyllaDB, InfluxDB, Amazon DynamoDB.
* There are ongoing community experiments using other databases, such as ScyllaDB, InfluxDB, Amazon DynamoDB, YugabyteDB(YCQL).

### Modern Web UI

Expand Down
32 changes: 16 additions & 16 deletions plugin/storage/cassandra/schema/v004.cql.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -29,23 +29,23 @@ CREATE TYPE IF NOT EXISTS ${keyspace}.keyvalue (
value_bool boolean,
value_long bigint,
value_double double,
value_binary blob,
value_binary blob
);

CREATE TYPE IF NOT EXISTS ${keyspace}.log (
ts bigint, // microseconds since epoch
fields list<frozen<keyvalue>>,
ts bigint, -- microseconds since epoch
fields frozen<list<frozen<${keyspace}.keyvalue>>>
);

CREATE TYPE IF NOT EXISTS ${keyspace}.span_ref (
ref_type text,
trace_id blob,
span_id bigint,
span_id bigint
);

CREATE TYPE IF NOT EXISTS ${keyspace}.process (
service_name text,
tags list<frozen<keyvalue>>,
tags frozen<list<frozen<${keyspace}.keyvalue>>>
);

-- Notice we have span_hash. This exists only for zipkin backwards compat. Zipkin allows spans with the same ID.
Expand All @@ -58,8 +58,8 @@ CREATE TABLE IF NOT EXISTS ${keyspace}.traces (
parent_id bigint,
operation_name text,
flags int,
start_time bigint, // microseconds since epoch
duration bigint, // microseconds
start_time bigint, -- microseconds since epoch
duration bigint, -- microseconds
tags list<frozen<keyvalue>>,
logs list<frozen<log>>,
refs list<frozen<span_ref>>,
Expand Down Expand Up @@ -107,7 +107,7 @@ CREATE TABLE IF NOT EXISTS ${keyspace}.operation_names_v2 (
CREATE TABLE IF NOT EXISTS ${keyspace}.service_operation_index (
service_name text,
operation_name text,
start_time bigint, // microseconds since epoch
start_time bigint, -- microseconds since epoch
trace_id blob,
PRIMARY KEY ((service_name, operation_name), start_time)
) WITH CLUSTERING ORDER BY (start_time DESC)
Expand All @@ -123,7 +123,7 @@ CREATE TABLE IF NOT EXISTS ${keyspace}.service_operation_index (
CREATE TABLE IF NOT EXISTS ${keyspace}.service_name_index (
service_name text,
bucket int,
start_time bigint, // microseconds since epoch
start_time bigint, -- microseconds since epoch
trace_id blob,
PRIMARY KEY ((service_name, bucket), start_time)
) WITH CLUSTERING ORDER BY (start_time DESC)
Expand All @@ -137,11 +137,11 @@ CREATE TABLE IF NOT EXISTS ${keyspace}.service_name_index (
AND gc_grace_seconds = 10800; -- 3 hours of downtime acceptable on nodes

CREATE TABLE IF NOT EXISTS ${keyspace}.duration_index (
service_name text, // service name
operation_name text, // operation name, or blank for queries without span name
bucket timestamp, // time bucket, - the start_time of the given span rounded to an hour
duration bigint, // span duration, in microseconds
start_time bigint, // microseconds since epoch
service_name text, -- service name
operation_name text, -- operation name, or blank for queries without span name
bucket timestamp, -- time bucket, - the start_time of the given span rounded to an hour
duration bigint, -- span duration, in microseconds
start_time bigint, -- microseconds since epoch
trace_id blob,
PRIMARY KEY ((service_name, operation_name, bucket), duration, start_time, trace_id)
) WITH CLUSTERING ORDER BY (duration DESC, start_time DESC)
Expand All @@ -160,7 +160,7 @@ CREATE TABLE IF NOT EXISTS ${keyspace}.tag_index (
service_name text,
tag_key text,
tag_value text,
start_time bigint, // microseconds since epoch
start_time bigint, -- microseconds since epoch
trace_id blob,
span_id bigint,
PRIMARY KEY ((service_name, tag_key, tag_value), start_time, trace_id, span_id)
Expand All @@ -179,7 +179,7 @@ CREATE TYPE IF NOT EXISTS ${keyspace}.dependency (
parent text,
child text,
call_count bigint,
source text,
source text
);

-- compaction strategy is intentionally different as compared to other tables due to the size of dependencies data
Expand Down

0 comments on commit f21acc2

Please sign in to comment.