-
Notifications
You must be signed in to change notification settings - Fork 1.5k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
ssh and snmp allow list #1363
ssh and snmp allow list #1363
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,132 @@ | ||
#!/usr/bin/env python | ||
|
||
# Daemon that listens to updates about the source IP prefixes from which snmp access | ||
# is allowed. In case of change, it will update the snmp configuration file accordingly. | ||
# Also, after a change, it will notify snmpd to re-read its config file (service reload). | ||
|
||
import os | ||
import re | ||
import sys | ||
import time | ||
import redis | ||
|
||
service="snmpd" | ||
config_file_path="/etc/snmp" | ||
redis_key="SNMP_ALLOW_LIST" # the redis list we listen to | ||
subscription='__keyspace@0__:%s' % redis_key | ||
temporization_duration = 3 # how long we wait for changes to settle (ride out a bursts of changes in redis_key) | ||
fake_infinite = 9999 # How often we wake up when nothing is going on --get_message()'s timeout has no 'infinite' value | ||
# after these operations we may need to revisit existing ssh connections because they removed or modified existing entries | ||
delete_operations = ["lrem", "lpop", "rpop", "blpop", "brpop", "brpoplpush", "rpoplpush", "ltrim", "del", "lset"] | ||
|
||
r = redis.StrictRedis(host='localhost') | ||
p = r.pubsub() | ||
|
||
# If redis is not up yet, this can fail, so wait for redis to be available | ||
while True: | ||
try: | ||
p.subscribe(subscription) | ||
break | ||
except redis.exceptions.ConnectionError: | ||
time.sleep(3) | ||
|
||
# We could loose contact with redis at a later stage, in which case we will exit with | ||
# return code -2 and supervisor will restart us, at which point we are back in the | ||
# while loop above waiting for redis to be ready. | ||
try: | ||
|
||
# By default redis does enable events, so enable them | ||
r.config_set("notify-keyspace-events", "KAE") | ||
|
||
|
||
# To update the configuration file | ||
# | ||
# Example config file for reference: | ||
# root@sonic:/# cat /etc/snmp/snmpd.conf | ||
# <...some snmp config, like udp port to use etc...> | ||
# rocommunity public 172.20.61.0/24 | ||
# rocommunity public 172.20.60.0/24 | ||
# rocommunity public 127.00.00.0/8 | ||
# <...some more snmp config...> | ||
# root@sonic:/# | ||
# | ||
# snmpd.conf supports include file, like so: | ||
# includeFile /etc/snmp/community.conf | ||
# includeDir /etc/snmp/config.d | ||
# which could make file massaging simpler, but even then we still deal with lines | ||
# that have shared "masters", since some other entity controls the community strings | ||
# part of that line. | ||
# If other database attributes need to be written to the snmp config file, then | ||
# it should be done by this daemon as well (sure, we could inotify on the file | ||
# and correct it back, but that's glitchy). | ||
|
||
def write_configuration_file(v): | ||
filename="%s/%s.conf" % (config_file_path, service) | ||
filename_tmp = filename + ".tmp" | ||
f=open(filename, "r") | ||
snmpd_config = f.read() | ||
f.close() | ||
f=open(filename_tmp, "w") | ||
this_community = "not_a_community" | ||
for l in snmpd_config.split('\n'): | ||
m = re.match("^(..)community (\S+)", l) | ||
if not m: | ||
f.write(l) | ||
f.write("\n") | ||
else: | ||
if not l.startswith(this_community): # already handled community (each community is duplicated per allow entry) | ||
this_community="%scommunity %s" % (m.group(1), m.group(2)) | ||
if len(v): | ||
for value in v: | ||
f.write("%s %s\n" % (this_community, value)) | ||
else: | ||
f.write("%s\n" % this_community) | ||
f.close() | ||
os.rename(filename_tmp, filename) | ||
os.system("service snmpd reload") # TODO: check return code and log error somewhere | ||
|
||
# write initial configuration | ||
write_configuration_file(r.lrange(redis_key, 0, -1)) | ||
|
||
# listen for changes and rewrite configuration file if needed, after some temporization | ||
# | ||
# How those subscribed to messages look like, for reference: | ||
# {'pattern': None, 'type': 'subscribe', 'channel': '__keyspace@0__:SNMP_PERMIT_LIST', 'data': 1L} | ||
# {'pattern': None, 'type': 'message', 'channel': '__keyspace@0__:SNMP_PERMIT_LIST', 'data': 'rpush'} | ||
# {'pattern': None, 'type': 'message', 'channel': '__keyspace@0__:SNMP_PERMIT_LIST', 'data': 'lpush'} | ||
# {'pattern': None, 'type': 'message', 'channel': '__keyspace@0__:SNMP_PERMIT_LIST', 'data': 'lrem'} | ||
# {'pattern': None, 'type': 'message', 'channel': '__keyspace@0__:SNMP_PERMIT_LIST', 'data': 'lset'} | ||
# {'pattern': None, 'type': 'message', 'channel': '__keyspace@0__:SNMP_PERMIT_LIST', 'data': 'del'} | ||
|
||
select_timeout = fake_infinite | ||
config_changed = False | ||
while True: | ||
try: | ||
m = p.get_message(timeout=select_timeout) | ||
except Exception: | ||
sys.exit(-2) | ||
# temporization: no change after 'timeout' seconds -> commit any accumulated changes | ||
if not m and config_changed: | ||
write_configuration_file(r.lrange(redis_key, 0, -1)) | ||
config_changed = False | ||
select_timeout = fake_infinite | ||
if m and m['type'] == "message": | ||
if m['channel'] != subscription: | ||
print "WTF: unexpected case" | ||
continue | ||
config_changed = True | ||
select_timeout = temporization_duration | ||
# some debugs for now | ||
print "-------------------- config change: ", | ||
if m["data"] in delete_operations: | ||
print "DELETE" | ||
else: | ||
print "" | ||
v = r.lrange(redis_key, 0, -1) | ||
for value in v: | ||
print value | ||
|
||
except redis.exceptions.ConnectionError as e: | ||
sys.exit(-2) | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -11,6 +11,14 @@ autorestart=false | |
stdout_logfile=syslog | ||
stderr_logfile=syslog | ||
|
||
[program:snmpd-config-updater] | ||
command=/usr/bin/snmpd-config-updater | ||
priority=1 | ||
autostart=true | ||
autorestart=true | ||
stdout_logfile=syslog | ||
stderr_logfile=syslog | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Since
You will also need to add the following line to the end of dockers/docker-snmp-sv2/start.sh to start the process at the appropriate time, after
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Any reason to set autorestart to false (current script simply exits in case of connection error with redis). root@sonic:/# vi /sbin/start-stop-daemon So I would argue that the "reload" option is better than a "stop then restart" one. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Since this solution is only to be run on Arista platforms, I was envisioning the config-updater exiting immediately if it determines the platform is not an Arista platform. In this case, setting However, if we perform the platform check in the start.sh script and only ever call That said, since the sshd config-updater runs in the base image, not in a Docker container, it is started by systemd, so it requires a different solution, which I envisioned as mentioned above, where it exits immediately if it determines the platform is not an Arista platform. If we go that route, I think it makes sense to use the same method in both sshd and snmp config-updaters for consistency, which is how I arrived at the first solution I envisioned above.
Once this PR is merged, I will modify the config-updaters to communicate with the SONiC ConfigDB rather than using the direct Redis connection concept implemented here. My modifications should eliminate the situation you describe.
I agree that snmpd should not be restarted needlessly. However, as mentioned above since the config-updater should only run on Arista platforms, it should not be responsible for starting snmpd or smnp-subagent.
I agree that the SIGHUP signal is more graceful. However, we should not mix supervisor and systemd. What do you think about manually sending the SIGHUP signal instead of calling the
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I see... There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
I agree that keeping the config-updater changes as local as possible is a good thing, as well as closing the window where ACLs are not yet in place. Now that we've agreed to send the SIGHUP directly instead of restarting the process, I'm OK with starting snmpd-config-updater early, sending the signal and ignoring failure upon startup. With this solution, the timing with which snmpd-config-updater starts is irrelevant, therefore we could set
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. autorestart=unexpected: that's cool and a good idea! |
||
[program:rsyslogd] | ||
command=/usr/sbin/rsyslogd -n | ||
priority=2 | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -224,6 +224,18 @@ if [ "$image_type" = "aboot" ]; then | |
sudo sed -i 's/udevadm settle/udevadm settle -E \/sys\/class\/net\/eth0/' $FILESYSTEM_ROOT/etc/init.d/networking | ||
fi | ||
|
||
# Service to update the sshd config file based on database changes | ||
sudo cp src/config-file-updaters/sshd-config-updater.service $FILESYSTEM_ROOT/etc/systemd/system | ||
sudo mkdir -p $FILESYSTEM_ROOT/etc/systemd/system/multi-user.target.wants | ||
cd $FILESYSTEM_ROOT/etc/systemd/system/multi-user.target.wants/ | ||
sudo ln -s ../sshd-config-updater.service sshd-config-updater.service | ||
cd - | ||
sudo cp src/config-file-updaters/sshd-config-updater $FILESYSTEM_ROOT/usr/bin/ | ||
sudo chmod +x $FILESYSTEM_ROOT/usr/bin/sshd-config-updater | ||
sudo cp src/config-file-updaters/sshd-clear-denied-sessions $FILESYSTEM_ROOT/usr/bin | ||
sudo chmod +x $FILESYSTEM_ROOT/usr/bin/sshd-clear-denied-sessions | ||
sudo cp src/libwrap/tcp-wrappers-7.6.q/tcpdmatch $FILESYSTEM_ROOT/usr/bin | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This file will have to be modified to match the file relocations I requested below. It will need to change to:
|
||
## copy platform rc.local | ||
sudo cp $IMAGE_CONFIGS/platform/rc.local $FILESYSTEM_ROOT/etc/ | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,10 @@ | ||
# libwrap packages | ||
|
||
LIBWRAP_VERSION = 7.6.q-25 | ||
|
||
export LIBWRAP_VERSION | ||
|
||
LIBWRAP = libwrap0_$(LIBWRAP_VERSION)_amd64.deb | ||
$(LIBWRAP)_SRC_PATH = $(SRC_PATH)/libwrap | ||
SONIC_MAKE_DEBS += $(LIBWRAP) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,82 @@ | ||
#!/usr/bin/env python | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. To align more closely with the current SONiC file structure, this file doesn't belong in |
||
|
||
""" | ||
This utility will find the ip addresses of all hosts that have connected to | ||
this device via ssh, then validate they are still in the list of allowed prefixes, | ||
and if not kill the ssh session with a SIGHUP. | ||
""" | ||
|
||
import os | ||
import re | ||
import subprocess | ||
|
||
# Run utmpdump, capture and return its output | ||
def run_utmpdump(_utmpFilename): | ||
devnull = file("/dev/null", "w" ) | ||
p = subprocess.Popen(args=["utmpdump", _utmpFilename], stdout=subprocess.PIPE, stderr=devnull) | ||
(stdout, stderr) = p.communicate() | ||
rc = p.returncode | ||
assert rc is not None # because p.communicate() should wait. | ||
out = (stdout or '') + (stderr or '') | ||
if rc: | ||
e = SystemCommandError("%r: error code %d" % (" ".join(argv), rc)) | ||
e.error = rc | ||
e.output = out | ||
raise e | ||
return stdout | ||
|
||
# Run utmpdump and parse its output into a list of dicts and return that | ||
def get_utmp_data(utmpFileName=None): | ||
"""Reads the specified utmp file. | ||
Returns a list of dictionaries, one for each utmp entry. | ||
All dictionary keys and values are strings | ||
Values are right padded with spaces and may contain all | ||
spaces if that utmp field is empty. | ||
Dictionary keys: | ||
"type": See UTMP_TYPE_* above | ||
"pid": Process ID as a string | ||
"tty": TTY (line) name - device name of tty w/o "/dev/" | ||
"tty4": 4 char abbreivated TTY (line) name | ||
"user": User ID | ||
"host": Hostname for remote login, | ||
kernel release for Run Level and Boot Time | ||
"ipAddr": IP Address | ||
"time": Time and date entry was made | ||
See linux docs on utmp and utmpdemp for more info. | ||
Example output from utmpdump: | ||
pid tty4 user tty host ipAddr time | ||
[7] [22953] [/238] [myname ] [pts/238 ] [example.com] [253.122.98.159 ] [Mon Dec 18 21:08:09 2017 PST] | ||
""" | ||
if not utmpFileName: | ||
utmpFileName = os.environ.get( "DEFAULT_UTMP_FILE", "/var/run/utmp" ) | ||
if not os.path.exists(utmpFileName): | ||
return [] | ||
output = run_utmpdump(utmpFileName) | ||
lines = re.split("\n", output) | ||
regExp = re.compile( | ||
r"\[(?P<type>" r"[^\]]*?)\s*\] \[(?P<pid>" r"[^\]]*?)\s*\] " \ | ||
r"\[(?P<tty4>" r"[^\]]*?)\s*\] \[(?P<user>" r"[^\]]*?)\s*\] " \ | ||
r"\[(?P<tty>" r"[^\]]*?)\s*\] \[(?P<host>" r"[^\]]*?)\s*\] " \ | ||
r"\[(?P<ipAddr>" r"[^\]]*?)\s*\] \[(?P<time>" r"[^\]]*?)\s*\]" ) | ||
entries = [] | ||
for line in lines: | ||
m = regExp.match(line) | ||
if not m: | ||
# Skip header and any other lines we don't recognize | ||
continue | ||
entry = m.groupdict() | ||
entries.append(entry) | ||
return entries | ||
|
||
# Find the source ip addresses of all ssh sessions, verify they are still allowed, and if not kill the ssh session | ||
if __name__ == '__main__': | ||
for e in get_utmp_data(): | ||
if e["host"] and e["ipAddr"] != "0.0.0.0": # entry is for a live connection | ||
# check allowness | ||
r = os.system('tcpdmatch sshd %s | grep "access.*granted" > /dev/null' % e["ipAddr"]) | ||
# print some debugs | ||
print "From:", e["ipAddr"], "ssh pid:", e["pid"], "allowed" if r == 0 else "denied" | ||
# if needed go for the kill | ||
if r != 0: | ||
os.system("kill -1 %s" % e["pid"]) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Inside Docker containers, we manage our processes using supervisor, not systemd. Therefore, You should replace this line with
restart_snmp_processes()
and add the following function: