Skip to content

Commit

Permalink
example_cluster: add TDG2 + kafka cluster draft
Browse files Browse the repository at this point in the history
Add docker-compose file for cluster with TDG2 application. To start it,
you'll need a pre-build TDG2 image. Add TDG configuration example
for simple Kafka activity. Add Telegraf example for TDG2 application
(for now, only Kafka labels are considered).

Part of #134
  • Loading branch information
DifferentialOrange committed Jun 8, 2022
1 parent 325027b commit 1494b1b
Show file tree
Hide file tree
Showing 8 changed files with 278 additions and 0 deletions.
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,12 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## Unreleased

### Added
- TDG (ver. 2) example cluster draft


## [1.1.0] - 2022-05-17
Grafana revisions: [InfluxDB revision 10](https://grafana.com/api/dashboards/12567/revisions/10/download), [Prometheus revision 11](https://grafana.com/api/dashboards/13054/revisions/11/download)

Expand Down
85 changes: 85 additions & 0 deletions docker-compose.tdg.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
version: '3'
services:
tdg:
image: tdg
environment:
TARANTOOL_ALIAS: tdg
networks:
tarantool_dashboard_dev:
ports:
- 8080:8080

zookeeper:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_CLIENT_PORT: 32181
ZOOKEEPER_TICK_TIME: 2000
networks:
tarantool_dashboard_dev:

kafka:
image: confluentinc/cp-kafka:latest
depends_on:
- zookeeper
networks:
tarantool_dashboard_dev:
ports:
- 9092:9092
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://kafka:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1

telegraf:
image: telegraf:1.13-alpine
networks:
tarantool_dashboard_dev:
volumes:
# configure telegraf to work out of the box
- ./example_cluster/telegraf/telegraf.tdg.conf:/etc/telegraf/telegraf.conf:ro

influxdb:
image: influxdb:1.7-alpine
environment:
INFLUXDB_REPORTING_DISABLED: "true"
INFLUXDB_DB: "metrics"
INFLUXDB_ADMIN_USER: "admin"
INFLUXDB_ADMIN_PASSWORD: "admin"
INFLUXDB_USER: "telegraf"
INFLUXDB_USER_PASSWORD: "telegraf"
INFLUXDB_HTTP_AUTH_ENABLED: "true"
networks:
tarantool_dashboard_dev:
ports:
- 8086:8086

prometheus:
image: prom/prometheus:v2.17.2
networks:
tarantool_dashboard_dev:
ports:
- 9090:9090
volumes:
- ./example_cluster/prometheus/prometheus.tdg.yml:/etc/prometheus/prometheus.yml

grafana:
image: grafana/grafana:8.1.5
environment:
GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION: "true"
GF_AUTH_ANONYMOUS_ENABLED: "true"
GF_AUTH_ANONYMOUS_ORG_ROLE: "Admin"
GF_AUTH_DISABLE_SIGNOUT_MENU: "true"
GF_AUTH_DISABLE_LOGIN_FORM: "true"
networks:
tarantool_dashboard_dev:
ports:
- 3000:3000
volumes:
- ./example_cluster/grafana/provisioning:/etc/grafana/provisioning

networks:
tarantool_dashboard_dev:
driver: bridge
34 changes: 34 additions & 0 deletions example_cluster/prometheus/prometheus.tdg.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).

# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093

# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
- "alerts.yml"

# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: "prometheus"

# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.

static_configs:
- targets: ["localhost:9090"]

- job_name: "tarantool_app"
static_configs:
- targets:
- "tdg:8080"
metrics_path: "/metrics/prometheus"
48 changes: 48 additions & 0 deletions example_cluster/tdg/config/config.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
---
types:
__file: model.avsc

connector:
input:
- name: kafka
type: kafka
routing_key: input_key
brokers:
- kafka:9092
topics:
- orders
- items
group_id: kafka
options:
enable.auto.offset.store: "true"
auto.offset.reset: "earliest"
enable.partition.eof: "false"
token_name: "invalid-token"

- name: number-group
type: kafka
brokers:
- kafka:9092
topics:
- items
group_id: '1'

input_processor:
handlers:
- key: input_key
function: decode.call

storage:
- key: quotation
type: Quotation

logger:
enabled: true
severity: debug

metrics:
export:
- format: prometheus
path: metrics/prometheus
- format: json
path: metrics/json
44 changes: 44 additions & 0 deletions example_cluster/tdg/config/model.avsc
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
[
{
"name": "HeaderMetadata",
"type": "record",
"fields": [
{"name": "object_id", "type": "string"},
{"name": "source", "type": "string"},
{"name": "date", "type": "string"},
{"name": "seq", "type": "string"},
{"name": "object_type", "type": "string"}
]
},
{
"name": "QuotationHeader",
"type": "record",
"fields": [
{"name": "metadata", "type": "HeaderMetadata"}
]
},
{
"name": "QuotationBody",
"type": "record",
"fields": [
{"name": "Timestamps", "type": "string"},
{"name": "Cur1", "type": "string"},
{"name": "Cur2", "type": "string"},
{"name": "Valuebid", "type": "float"},
{"name": "Valueask", "type": "float"},
{"name": "Source", "type": "string"}
]
},
{
"name": "Quotation",
"type": "record",
"fields": [
{"name": "header", "type": "QuotationHeader"},
{"name": "body", "type": "QuotationBody"}
],
"indexes": [
{"name": "id",
"parts": ["header.metadata.object_id"]}
]
}
]
11 changes: 11 additions & 0 deletions example_cluster/tdg/config/src/classifier.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
return {
call = function(param)
if param.obj.header ~= nil and
param.obj.header.metadata ~= nil and
param.obj.header.metadata.source == 'FXQE' then
param.routing_key = 'quotation'
end

return param
end
}
10 changes: 10 additions & 0 deletions example_cluster/tdg/config/src/decode.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
local classifier = require('classifier')

local function call(req)
req = classifier.call(req)
return req
end

return {
call = call,
}
40 changes: 40 additions & 0 deletions example_cluster/telegraf/telegraf.tdg.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
[[inputs.http]]
urls = [
"http://tdg:8080/metrics/json",
]
timeout = "30s"
tag_keys = [
"metric_name",
"label_pairs_alias",
"label_pairs_quantile",
"label_pairs_path",
"label_pairs_method",
"label_pairs_status",
"label_pairs_operation",
"label_pairs_level",
"label_pairs_id",
"label_pairs_engine",
"label_pairs_name",
"label_pairs_index_name",
"label_pairs_delta",
"label_pairs_stream",
"label_pairs_type",
"label_pairs_connector_name",
"label_pairs_broker_name",
"label_pairs_topic",
"label_pairs_request"
]
insecure_skip_verify = true
interval = "10s"
data_format = "json"
name_prefix = "tarantool_app_"
fieldpass = ["value"]

[[inputs.internal]]

[[outputs.influxdb]]
urls = ["http://influxdb:8086"]
database = "metrics"
skip_database_creation = true
username = "telegraf"
password = "telegraf"

0 comments on commit 1494b1b

Please sign in to comment.