-
Notifications
You must be signed in to change notification settings - Fork 90
/
dalton.conf
84 lines (62 loc) · 3 KB
/
dalton.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
[dalton]
# debug logging
DEBUG = False
# location for job .zip files on disk. Not deleted by dalton; use
# something like a cron job to prune old files if necessary
job_path = /opt/dalton/jobs
# root directory for all the rulesets Dalton knows about.
# inside this directory there should be a 'suricata' and
# a 'snort' directory in which the corresponding rules go.
# it is not recommended that you change this
ruleset_path = /opt/dalton/rulesets
# directory for the zeek scripts
script_path = /opt/dalton/rulesets/zeek/
# temp storage for pcaps, configs, etc. Should be cleaned up by Dalton
# and everything in here can be deleted after the job is submitted
temp_path = /tmp/dalton
# IP/host of redis server; dalton_redis is the (resolvable)
# hostname of the redis docker container
redis_host = dalton_redis
# location of configuration files for the sundry supported engines
# it is recommended that you NOT change this since Flask looks in
# the static directory to serve the config files.
engine_conf_path = /opt/dalton/app/static/engine-configs
# timeout (minutes) for keys to expire in Redis for regular (non-teapot)
# submitted jobs (7200 == 5 days)
redis_expire = 7200
# timeout (minutes) for keys to expire in Redis for teapot
# jobs (short and stout jobs that are short lived)
teapot_redis_expire = 62
# timeout (minutes) for share links to expire and job info no longer preserved
# zip file containing job has modification date changed into the future to avoid deletion (43200 = 30 days)
# file mod date will be set now + (share_expire - redis_expire) so the usual redis_expire check will match when expected
share_expire = 43200
# time (seconds) for jobs to run before setting timeout status
job_run_timeout = 3600
# if a Dalton Agent has not checked into the controller in this many
# minutes, clear it from the sensor list (which prevents jobs from
# being submitted to that queue if there are no other sensors with
# that engine (technology) and version.
agent_purge_time = 20
# API Keys valid for Dalton Agents (currently not used)
api_keys = bmV2ZXIgdW5kZXJlc3RpbWF0ZSB5b3VyIG9wcG9uZW50,ZXhwZWN0IHRoZSB1bmV4cGVjdGVk,dGFrZSBpdCBvdXRzaWRl,UGFpbiBkb24ndCBodXJ0
# location of mergecap binary; needed to combine multiple pcaps for Suricata jobs
mergecap_binary = /usr/bin/mergecap
# program that reads the unified2 binary files and outputs text
u2_analyzer = python3 -m idstools.scripts.u2spewfoo
# If not rulesets exist on startup, Dalton will use the rulecat.py script (from py-idstools)
# to download some rulesets on startup
rulecat_script = python3 -m idstools.scripts.rulecat
# On the job submission page, the number of pcaps that can be uploaded
# can be limited in the UI and when Dalton processes pcaps. This value sets
# that limit.
max_pcap_files = 10
#############
# Flowsynth #
#############
[flowsynth-web]
# path to flowsynth script for web gui
# by default, use installed flowsynth module
bin_path = python3 -m flowsynth
# where to store PCAPs temporarily; should be considered public
pcap_path = /tmp/pcaps