diff --git a/changelog/62618.fixed b/changelog/62618.fixed new file mode 100644 index 000000000000..aeb1ecff6aab --- /dev/null +++ b/changelog/62618.fixed @@ -0,0 +1 @@ +Fixed syndic eauth. Now jobs will be published when a valid eauth user is targeting allowed minions/functions. diff --git a/changelog/62933.fixed b/changelog/62933.fixed new file mode 100644 index 000000000000..1b34722a729b --- /dev/null +++ b/changelog/62933.fixed @@ -0,0 +1 @@ +Restored channel for Syndic minions to send job returns to the Salt master. diff --git a/doc/topics/topology/syndic.rst b/doc/topics/topology/syndic.rst index 1fcfc9e00d69..6561cbe2422a 100644 --- a/doc/topics/topology/syndic.rst +++ b/doc/topics/topology/syndic.rst @@ -4,31 +4,37 @@ Salt Syndic =========== +.. warning:: + + Syndics are supported when running ``salt`` as the root/service user on the + Master of Masters, but :ref:`external auth or publisher_acl` + may be unstable or not fully supported. + + Ensure minions have unique names under different Syndics. Using the same + name for minions under different Syndics is not supported as currently + documented. For example, if ``syndic_a`` and ``syndic_b`` both have a + ``minion_1``, this behavior is undocumented and unsupported. + + Any other undocumented Syndic implementation should be considered + unsupported, such as using a Syndic as a Master of Masters. + The most basic or typical Salt topology consists of a single Master node controlling a group of Minion nodes. An intermediate node type, called Syndic, when used offers greater structural flexibility and scalability in the construction of Salt topologies than topologies constructed only out of Master and Minion node types. -A Syndic node can be thought of as a special passthrough Minion node. A Syndic -node consists of a ``salt-syndic`` daemon and a ``salt-master`` daemon running -on the same system. The ``salt-master`` daemon running on the Syndic node -controls a group of lower level Minion nodes and the ``salt-syndic`` daemon -connects higher level Master node, sometimes called a Master of Masters. +A Syndic node is a special passthrough Minion node. A Syndic node consists of +a ``salt-syndic`` daemon and a ``salt-master`` daemon running on the same +system. The ``salt-master`` daemon running on the Syndic node controls a group +of lower level Minion nodes and the ``salt-syndic`` daemon connects to a higher +level Master node, sometimes called a Master of Masters. The ``salt-syndic`` daemon relays publications and events between the Master node and the local ``salt-master`` daemon. This gives the Master node control over the Minion nodes attached to the ``salt-master`` daemon running on the Syndic node. -.. warning:: - - Salt does not officially support Syndic and :ref:`external auth or - publisher_acl`. It's possible that it might work under certain - circumstances, but comprehensive support is lacking. See `issue #62618 on - GitHub `_ for more - information. Currently Syndic is only expected to work when running Salt as - root, though work is scheduled to fix this in Salt 3006 (Sulfur). Configuring the Syndic ====================== diff --git a/salt/client/__init__.py b/salt/client/__init__.py index deb82bd74869..e097dd86d988 100644 --- a/salt/client/__init__.py +++ b/salt/client/__init__.py @@ -299,7 +299,7 @@ def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): tgt_type=tgt_type, timeout=timeout, listen=listen, - **kwargs + **kwargs, ) if "jid" in pub_data: @@ -365,7 +365,7 @@ def run_job( jid="", kwarg=None, listen=False, - **kwargs + **kwargs, ): """ Asynchronously send a command to connected minions @@ -393,7 +393,7 @@ def run_job( jid=jid, timeout=self._get_timeout(timeout), listen=listen, - **kwargs + **kwargs, ) except SaltClientError: # Re-raise error with specific message @@ -429,7 +429,7 @@ def run_job_async( kwarg=None, listen=True, io_loop=None, - **kwargs + **kwargs, ): """ Asynchronously send a command to connected minions @@ -458,7 +458,7 @@ def run_job_async( timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, - **kwargs + **kwargs, ) except SaltClientError: # Re-raise error with specific message @@ -511,7 +511,7 @@ def cmd_subset( cli=False, progress=False, full_return=False, - **kwargs + **kwargs, ): """ Execute a command on a random subset of the targeted systems @@ -553,7 +553,7 @@ def cmd_subset( kwarg=kwarg, progress=progress, full_return=full_return, - **kwargs + **kwargs, ) def cmd_batch( @@ -565,7 +565,7 @@ def cmd_batch( ret="", kwarg=None, batch="10%", - **kwargs + **kwargs, ): """ Iteratively execute a command on subsets of minions at a time @@ -641,7 +641,7 @@ def cmd( jid="", full_return=False, kwarg=None, - **kwargs + **kwargs, ): """ Synchronously execute a command on targeted minions @@ -759,7 +759,7 @@ def cmd( jid, kwarg=kwarg, listen=True, - **kwargs + **kwargs, ) if not pub_data: @@ -772,7 +772,8 @@ def cmd( self._get_timeout(timeout), tgt, tgt_type, - **kwargs + syndics=pub_data.get("syndics"), + **kwargs, ): if fn_ret: @@ -797,7 +798,7 @@ def cmd_cli( verbose=False, kwarg=None, progress=False, - **kwargs + **kwargs, ): """ Used by the :command:`salt` CLI. This method returns minion returns as @@ -821,7 +822,7 @@ def cmd_cli( timeout, kwarg=kwarg, listen=True, - **kwargs + **kwargs, ) if not self.pub_data: @@ -836,7 +837,8 @@ def cmd_cli( tgt_type, verbose, progress, - **kwargs + syndics=self.pub_data.get("syndics"), + **kwargs, ): if not fn_ret: @@ -867,7 +869,7 @@ def cmd_iter( tgt_type="glob", ret="", kwarg=None, - **kwargs + **kwargs, ): """ Yields the individual minion returns as they come in @@ -902,7 +904,7 @@ def cmd_iter( timeout, kwarg=kwarg, listen=True, - **kwargs + **kwargs, ) if not pub_data: @@ -916,7 +918,8 @@ def cmd_iter( timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, - **kwargs + syndics=pub_data.get("syndics"), + **kwargs, ): if not fn_ret: continue @@ -937,7 +940,7 @@ def cmd_iter_no_block( kwarg=None, show_jid=False, verbose=False, - **kwargs + **kwargs, ): """ Yields the individual minion returns as they come in, or None @@ -973,7 +976,7 @@ def cmd_iter_no_block( timeout, kwarg=kwarg, listen=True, - **kwargs + **kwargs, ) if not pub_data: @@ -986,7 +989,8 @@ def cmd_iter_no_block( tgt=tgt, tgt_type=tgt_type, block=False, - **kwargs + syndics=pub_data.get("syndics"), + **kwargs, ): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: @@ -1008,7 +1012,7 @@ def cmd_full_return( ret="", verbose=False, kwarg=None, - **kwargs + **kwargs, ): """ Execute a salt command and return @@ -1025,7 +1029,7 @@ def cmd_full_return( timeout, kwarg=kwarg, listen=True, - **kwargs + **kwargs, ) if not pub_data: @@ -1047,7 +1051,7 @@ def get_cli_returns( tgt_type="glob", verbose=False, show_jid=False, - **kwargs + **kwargs, ): """ Starts a watcher looking at the return data for a specified JID @@ -1124,7 +1128,7 @@ def get_iter_returns( tgt_type="glob", expect_minions=False, block=True, - **kwargs + **kwargs, ): """ Watch the event system and return job data as it comes in @@ -1136,6 +1140,13 @@ def get_iter_returns( minions = {minions} elif isinstance(minions, (list, tuple)): minions = set(list(minions)) + expected_syndics = set() + if self.opts["order_masters"]: + if not isinstance(kwargs["syndics"], set): + if isinstance(kwargs["syndics"], str): + expected_syndics = {kwargs["syndics"]} + elif isinstance(kwargs["syndics"], (list, tuple)): + expected_syndics = set(list(kwargs["syndics"])) if timeout is None: timeout = self.opts["timeout"] @@ -1149,6 +1160,7 @@ def get_iter_returns( found = set() missing = set() + found_syndics = set() # Check to see if the jid is real, if not return the empty dict try: if not self.returns_for_job(jid): @@ -1191,6 +1203,8 @@ def get_iter_returns( # if we got None, then there were no events if raw is None: break + if "syndic_id" in raw.get("data", {}): + found_syndics.add(raw["data"]["syndic_id"]) if "minions" in raw.get("data", {}): minions.update(raw["data"]["minions"]) if "missing" in raw.get("data", {}): @@ -1216,28 +1230,10 @@ def get_iter_returns( yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy - if ( - len(found.intersection(minions)) >= len(minions) - and not self.opts["order_masters"] - ): + if found.union(found_syndics) == minions.union(expected_syndics): # All minions have returned, break out of the loop log.debug("jid %s found all minions %s", jid, found) break - elif ( - len(found.intersection(minions)) >= len(minions) - and self.opts["order_masters"] - ): - if ( - len(found) >= len(minions) - and len(minions) > 0 - and time.time() > gather_syndic_wait - ): - # There were some minions to find and we found them - # However, this does not imply that *all* masters have yet responded with expected minion lists. - # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see - # if additional lower-level masters deliver their lists of expected - # minions. - break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists @@ -1622,7 +1618,7 @@ def get_cli_event_returns( progress=False, show_timeout=False, show_jid=False, - **kwargs + **kwargs, ): """ Get the returns for the command line interface via the event system @@ -1652,7 +1648,7 @@ def get_cli_event_returns( expect_minions=( kwargs.pop("expect_minions", False) or verbose or show_timeout ), - **kwargs + **kwargs, ): log.debug("return event: %s", ret) return_count = return_count + 1 @@ -1847,7 +1843,7 @@ def pub( jid="", timeout=5, listen=False, - **kwargs + **kwargs, ): """ Take the required arguments and publish the given command. @@ -1935,7 +1931,82 @@ def pub( if not payload: return payload - return {"jid": payload["load"]["jid"], "minions": payload["load"]["minions"]} + ret = {"jid": payload["load"]["jid"], "minions": payload["load"]["minions"]} + if "syndics" in payload: + ret["syndics"] = payload["syndics"] + return ret + + @salt.ext.tornado.gen.coroutine + def arbitrary_pub_async( + self, + tgt, + payload, + tgt_type="glob", + timeout=5, + io_loop=None, + listen=True, + cmd=None, + **kwargs, + ): + """ + Publish an arbitrary payload. Maybe this is what events do? + """ + if self.opts.get("ipc_mode", "") != "tcp" and not os.path.exists( + os.path.join(self.opts["sock_dir"], "publish_pull.ipc") + ): + log.error( + "Unable to connect to the salt master publisher at %s", + self.opts["sock_dir"], + ) + raise SaltClientError + payload_kwargs, payload = payload, None + + master_uri = ( + "tcp://" + + salt.utils.zeromq.ip_bracket(self.opts["interface"]) + + ":" + + str(self.opts["ret_port"]) + ) + + with salt.channel.client.AsyncReqChannel.factory( + self.opts, io_loop=io_loop, crypt="clear", master_uri=master_uri + ) as channel: + try: + # Ensure that the event subscriber is connected. + # If not, we won't get a response, so error out + if listen and not self.event.connect_pub(timeout=timeout): + raise SaltReqTimeoutError() + payload = yield channel.send(payload_kwargs, timeout=timeout) + except SaltReqTimeoutError: + raise SaltReqTimeoutError( + "Salt request timed out. The master is not responding. You " + "may need to run your command with `--async` in order to " + "bypass the congested event bus. With `--async`, the CLI tool " + "will print the job id (jid) and exit immediately without " + "listening for responses. You can then use " + "`salt-run jobs.lookup_jid` to look up the results of the job " + "in the job cache later." + ) + + if not payload: + # The master key could have changed out from under us! Regen + # and try again if the key has changed + key = self.__read_master_key() + if key == self.key: + raise salt.ext.tornado.gen.Return(payload) + self.key = key + payload_kwargs["key"] = self.key + payload = yield channel.send(payload_kwargs) + + error = payload.pop("error", None) + if error is not None: + if isinstance(error, dict): + err_name = error.get("name", "") + err_msg = error.get("message", "") + if err_name == "AuthenticationError": + raise AuthenticationError(err_msg) + elif err_name == "AuthorizationError": + raise AuthorizationError(err_msg) @salt.ext.tornado.gen.coroutine def pub_async( @@ -1949,7 +2020,7 @@ def pub_async( timeout=5, io_loop=None, listen=True, - **kwargs + **kwargs, ): """ Take the required arguments and publish the given command. diff --git a/salt/exceptions.py b/salt/exceptions.py index e351584bc031..f656629d2de4 100644 --- a/salt/exceptions.py +++ b/salt/exceptions.py @@ -344,6 +344,13 @@ class EauthAuthenticationError(SaltException): """ +class EauthAuthorizationError(SaltException): + """ + Thrown when eauth user was authenticated, but they were not authorized + to perform the requested action. + """ + + class TokenAuthenticationError(SaltException): """ Thrown when token authentication fails diff --git a/salt/master.py b/salt/master.py index a5251f4def2a..028632185544 100644 --- a/salt/master.py +++ b/salt/master.py @@ -1950,6 +1950,7 @@ class ClearFuncs(TransportMethods): "mk_token", "wheel", "runner", + "_list_valid_minions", ) # The ClearFuncs object encapsulates the functions that can be executed in @@ -1979,6 +1980,27 @@ def __init__(self, opts, key): self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key) self.channels = [] + def _list_valid_minions(self, clear_load): + delimiter = clear_load.get("kwargs", {}).get("delimiter", DEFAULT_TARGET_DELIM) + _res = self.ckminions.check_minions( + clear_load["tgt"], clear_load.get("tgt_type", "glob"), delimiter + ) + tgt_minions = _res["minions"] + valid_minions = set() + for v in clear_load["valid_tgt"]: + _res = self.ckminions.check_minions( + v, clear_load.get("valid_tgt_type", "glob"), delimiter + ) + valid_minions.update(_res["minions"]) + event_ret = { + "valid_minions": list(valid_minions), + "tgt_minions": list(tgt_minions), + } + self.event.fire_event( + event_ret, tagify([clear_load.get("jid"), "tgt_match_return"]) + ) + return None + def runner(self, clear_load): """ Send a master control function back to the runner system @@ -2140,13 +2162,10 @@ def get_token(self, clear_load): return False return self.loadauth.get_tok(clear_load["token"]) - def publish(self, clear_load): + def _check_publisher_acl(self, *, clear_load): """ - This method sends out publications to the minions, it can only be used - by the LocalClient. + Check to see if a clear_load is invalid against the publisher_acl settings. """ - extra = clear_load.get("kwargs", {}) - publisher_acl = salt.acl.PublisherACL(self.opts["publisher_acl_blacklist"]) if publisher_acl.user_is_blacklisted( @@ -2166,39 +2185,10 @@ def publish(self, clear_load): } } - # Retrieve the minions list - delimiter = clear_load.get("kwargs", {}).get("delimiter", DEFAULT_TARGET_DELIM) - _res = self.ckminions.check_minions( - clear_load["tgt"], clear_load.get("tgt_type", "glob"), delimiter - ) - minions = _res.get("minions", list()) - missing = _res.get("missing", list()) - ssh_minions = _res.get("ssh_minions", False) - - # Check for external auth calls and authenticate - auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra) - if auth_type == "user": - auth_check = self.loadauth.check_authentication( - clear_load, auth_type, key=key - ) - else: - auth_check = self.loadauth.check_authentication(extra, auth_type) - - # Setup authorization list variable and error information - auth_list = auth_check.get("auth_list", []) - err_msg = 'Authentication failure of type "{}" occurred.'.format(auth_type) - - if auth_check.get("error"): - # Authentication error occurred: do not continue. - log.warning(err_msg) - return { - "error": { - "name": "AuthenticationError", - "message": "Authentication error occurred.", - } - } - - # All Token, Eauth, and non-root users must pass the authorization check + def _check_auth( + self, *, auth_check, auth_type, auth_list, clear_load, minions, err_msg, extra + ): + error = None if auth_type != "user" or (auth_type == "user" and auth_list): # Authorize the request authorized = self.ckminions.auth_check( @@ -2225,8 +2215,16 @@ def publish(self, clear_load): extra["eauth"], extra["username"], ) - log.warning(err_msg) - return { + # If we're a Master of Masters we don't need to log the error + # here... they may still be OK! But let's debug log it + # anyway... + if self.opts.get("order_masters"): + log.debug( + "order_masters was set. Would have warned about %r", err_msg + ) + else: + log.warning(err_msg) + error = { "error": { "name": "AuthorizationError", "message": "Authorization error occurred.", @@ -2241,37 +2239,231 @@ def publish(self, clear_load): elif auth_type == "eauth": # The username we are attempting to auth with clear_load["user"] = self.loadauth.load_name(extra) + return clear_load, error + + def publish(self, clear_load): + """ + This method sends out publications to the minions, it can only be used + by the LocalClient. + """ + extra = clear_load.get("kwargs", {}) + jid = None + + publisher_acl_error = self._check_publisher_acl(clear_load=clear_load) + # TODO: walrus operator - if publisher_acl_error := self._check... -W. Werner, 2022-12-08 + if publisher_acl_error: + return publisher_acl_error + + # Retrieve the minions list + delimiter = clear_load.get("kwargs", {}).get("delimiter", DEFAULT_TARGET_DELIM) + _res = self.ckminions.check_minions( + clear_load["tgt"], clear_load.get("tgt_type", "glob"), delimiter + ) + minions = _res.get("minions", list()) + missing = _res.get("missing", list()) + ssh_minions = _res.get("ssh_minions", False) - # If we order masters (via a syndic), don't short circuit if no minions - # are found - if not self.opts.get("order_masters"): - # Check for no minions - if not minions: + # Check for external auth calls and authenticate + # not using err_name or sensitive_load_keys + auth_type, _, key, _ = self._prep_auth_info(extra) + if auth_type == "user": + auth_check = self.loadauth.check_authentication( + clear_load, auth_type, key=key + ) + else: + auth_check = self.loadauth.check_authentication(extra, auth_type) + + # Setup authorization list variable and error information + auth_list = auth_check.get("auth_list", []) + err_msg = 'Authentication failure of type "{}" occurred.'.format(auth_type) + + if auth_check.get("error"): + # Authentication error occurred: do not continue. + log.warning(err_msg) + return { + "error": { + "name": "AuthenticationError", + "message": "Authentication error occurred.", + } + } + + clear_load, auth_error = self._check_auth( + auth_check=auth_check, + auth_type=auth_type, + auth_list=auth_list, + clear_load=clear_load, + minions=minions, + err_msg=err_msg, + extra=extra, + ) + if auth_error: + if self.opts.get("order_masters"): + minions = [] + else: + return auth_error + if self.opts.get("order_masters"): + try: + minions, syndics, jid = self._collect_syndics( + clear_load=clear_load, + minions=minions, + auth_list=auth_list, + auth_type=auth_type, + extra=extra, + ) + except salt.exceptions.EauthAuthorizationError: return { - "enc": "clear", - "load": { - "jid": None, - "minions": minions, - "error": ( - "Master could not resolve minions for target {}".format( - clear_load["tgt"] - ) - ), - }, + "error": { + "name": "AuthorizationError", + "message": "Authorization error occurred.", + } } - jid = self._prep_jid(clear_load, extra) + + if not minions: + # We can't publish a job to minions that aren't there. Bail out! + # TODO: this doesn't work for eauth -W. Werner, 2022-12-08 + ret = { + "enc": "clear", + "load": { + "jid": None, + "minions": minions, + "error": ( + "Master could not resolve minions for target {}".format( + clear_load["tgt"] + ) + ), + }, + } + if self.opts.get("order_masters"): + ret["load"]["syndics"] = syndics + return ret + jid = jid or self._prep_jid(clear_load, extra) if jid is None: return {"enc": "clear", "load": {"error": "Master failed to assign jid"}} + if self.opts["order_masters"]: + clear_load["syndics"] = syndics payload = self._prep_pub(minions, jid, clear_load, extra, missing) # Send it! self._send_ssh_pub(payload, ssh_minions=ssh_minions) self._send_pub(payload) - return { + ret = { "enc": "clear", "load": {"jid": clear_load["jid"], "minions": minions, "missing": missing}, } + if self.opts["order_masters"]: + ret["syndics"] = syndics + return ret + + def _collect_syndics(self, *, clear_load, extra, minions, auth_list, auth_type): + jid = self._prep_jid(clear_load, extra) + valid_tgts = [] + valid_tgt_type = clear_load["tgt_type"] + for list_ in auth_list: + valid_tgts.extend(list_) + if auth_type == "user" and not valid_tgts: + # If auth type is user and we don't have an acl (auth_list) then they can target any minion, probably + valid_tgts = ["*"] + valid_tgt_type = "glob" + + log.debug("Sending request for valid minions") + event_watcher = salt.utils.event.get_event( + "master", + self.opts["sock_dir"], + opts=self.opts, + listen=True, + ) + tgt_minions = set() + valid_minions = set() + syndics = set(self.ckminions.syndic_minions()) + missing_syndics = syndics.copy() + with event_watcher as event_bus: + syndic_request = { + "cmd": "_list_valid_minions", + "tgt": clear_load["tgt"], + "tgt_type": clear_load["tgt_type"], + "valid_tgt": valid_tgts, + "valid_tgt_type": valid_tgt_type, + "jid": jid, + "syndics": syndics, + } + self._send_pub(syndic_request) + timeout = ( + time.time() + clear_load.get("to", 5) - 1 + ) # 'to' is the provided timeout. Default to 5s + if not missing_syndics: + look_anyway = True + missing_syndics.add(object()) + while time.time() < timeout: + ret = event_bus.get_event(full=True, auto_reconnect=True) + if ret and ret.get("tag", "").endswith(f"/{jid}/tgt_match_return"): + data = ret["data"] + valid_minions.update(data.get("valid_minions", [])) + tgt_minions.update(data.get("tgt_minions", [])) + syndic_id = data.get("syndic_id") + if syndic_id: + missing_syndics.discard(syndic_id) + syndics.add(syndic_id) # maybe we got a new syndic + if not missing_syndics: + # Found all the syndics before the timeout! + break + else: + if not look_anyway: + log.debug( + "Failed to get _list_valid_minions from these syndics: %r", + missing_syndics, + ) + + minions.extend(tgt_minions) + + if minions and tgt_minions.issubset(valid_minions): + if auth_type != "user" or (auth_type == "user" and auth_list): + if ( + self.ckminions.check_syndic_auth_list( + minions=minions, + auth_list=auth_list, + funs=clear_load["fun"], + args=clear_load["arg"], + ) + is False + ): + # We're using user auth, and auth_list (access control + # list) is set. Or we're using external auth. In the + # future, these should be the same thing (i.e. we're + # authenticated either via user or eauth, and then we + # have an access control list to determine what we're + # able to do from there. + log.warning( + "User %r attempted to run %r against %r but was denied.", + clear_load.get("kwargs", {}).get( + "username", clear_load["user"] + ), + clear_load["fun"], + clear_load["tgt"], + ) + raise salt.exceptions.EauthAuthorizationError( + "User not authorized to run job against requested minions" + ) + # otherwise, just carry on + else: + # We have no minions, or we've tried targeting minions we don't + # have access to! + log.warning( + "User %r attempted to run %r against %r but was denied.", + clear_load["user"], + clear_load["fun"], + clear_load["tgt"], + ) + raise salt.exceptions.EauthAuthorizationError( + "User not authorized to run job against requested minions" + ) + log.debug( + "_collect_syndics got minions=%r syndics=%r for jid %r", + minions, + syndics, + jid, + ) + return minions, syndics, jid def _prep_auth_info(self, clear_load): sensitive_load_keys = [] @@ -2449,6 +2641,8 @@ def _prep_pub(self, minions, jid, clear_load, extra, missing): load["tgt_type"] = clear_load["tgt_type"] if "to" in clear_load: load["to"] = clear_load["to"] + if "syndics" in clear_load: + load["syndics"] = clear_load["syndics"] if "kwargs" in clear_load: if "ret_config" in clear_load["kwargs"]: diff --git a/salt/minion.py b/salt/minion.py index 7cbaa35f3e24..14d5b4497e60 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -3269,7 +3269,28 @@ def _handle_decoded_payload(self, data): data["to"] = int(data.get("to", self.opts["timeout"])) - 1 # Only forward the command if it didn't originate from ourselves if data.get("master_id", 0) != self.opts.get("master_id", 1): - self.syndic_cmd(data) + if "cmd" in data and data["cmd"] == "_list_valid_minions": + self.forward_to_localclient(data) + elif ("syndics" in data and self.opts["id"] in data["syndics"]) or data.get( + "fun" + ) in ("test.ping", "saltutil.find_job"): + self.syndic_cmd(data) + else: + log.debug( + "Syndic %r not in event - not forwarding %r", self.opts["id"], data + ) + + def forward_to_localclient(self, data): + """ + Take an arbitrary payload and send it to the Syndic Master via the LocalClient bus. + """ + + def timeout_handler(*args): + log.warning("Unable to forward pub data: %s %r", args[1], data) + return True + + with salt.ext.tornado.stack_context.ExceptionStackContext(timeout_handler): + self.local.arbitrary_pub_async(tgt=data["tgt"], payload=data) def syndic_cmd(self, data): """ @@ -3378,6 +3399,37 @@ def destroy(self): self.forward_events.stop() self.forward_events = None + def _send_req_sync(self, load, timeout): + if self.opts["minion_sign_messages"]: + log.trace("Signing event to be published onto the bus.") + minion_privkey_path = os.path.join(self.opts["pki_dir"], "minion.pem") + sig = salt.crypt.sign_message( + minion_privkey_path, salt.serializers.msgpack.serialize(load) + ) + load["sig"] = sig + + with salt.transport.client.AsyncReqChannel.factory(self.opts) as channel: + ret = yield channel.send( + load, timeout=timeout, tries=self.opts["return_retry_tries"] + ) + return ret + + @salt.ext.tornado.gen.coroutine + def _send_req_async(self, load, timeout): + if self.opts["minion_sign_messages"]: + log.trace("Signing event to be published onto the bus.") + minion_privkey_path = os.path.join(self.opts["pki_dir"], "minion.pem") + sig = salt.crypt.sign_message( + minion_privkey_path, salt.serializers.msgpack.serialize(load) + ) + load["sig"] = sig + + with salt.transport.client.AsyncReqChannel.factory(self.opts) as channel: + ret = yield channel.send( + load, timeout=timeout, tries=self.opts["return_retry_tries"] + ) + raise salt.ext.tornado.gen.Return(ret) + # TODO: need a way of knowing if the syndic connection is busted class SyndicManager(MinionBase): @@ -3641,6 +3693,8 @@ def _process_event(self, raw): # TODO: cleanup: Move down into event class mtag, data = self.local.event.unpack(raw) log.trace("Got event %s", mtag) # pylint: disable=no-member + if "tgt_match_return" in mtag: + data["syndic_id"] = self.opts["id"] tag_parts = mtag.split("/") if ( diff --git a/salt/returners/local_cache.py b/salt/returners/local_cache.py index 1530d94ddfcf..632e11c22e98 100644 --- a/salt/returners/local_cache.py +++ b/salt/returners/local_cache.py @@ -287,6 +287,7 @@ def get_load(jid): ret = {} load_p = os.path.join(jid_dir, LOAD_P) num_tries = 5 + exc = None for index in range(1, num_tries + 1): with salt.utils.files.fopen(load_p, "rb") as rfh: try: @@ -297,7 +298,8 @@ def get_load(jid): time.sleep(0.25) else: log.critical("Failed to unpack %s", load_p) - raise exc + if exc is not None: + raise exc if ret is None: ret = {} minions_cache = [os.path.join(jid_dir, MINIONS_P)] diff --git a/salt/utils/minions.py b/salt/utils/minions.py index e8133b034074..a8eb6f755afc 100644 --- a/salt/utils/minions.py +++ b/salt/utils/minions.py @@ -691,6 +691,10 @@ def _all_minions(self, expr=None): mlist.append(fn_) return {"minions": mlist, "missing": []} + def syndic_minions(self): + syndic_cache_path = os.path.join(self.opts["cachedir"], "syndics") + return [f for f in os.listdir(syndic_cache_path)] + def check_minions( self, expr, tgt_type="glob", delimiter=DEFAULT_TARGET_DELIM, greedy=True ): @@ -735,6 +739,55 @@ def check_minions( _res = {"minions": [], "missing": []} return _res + def check_syndic_auth_list(self, *, minions, funs, args, auth_list): + """ + Check an auth list against a list of targeted minions, functions, and args. + """ + if not isinstance(funs, list): + funs = [funs] + args = [args] + + # Take the auth list and get all the minion names inside it + allowed_minions = set() + + auth_dictionary = {} + + # Make a set, so we are guaranteed to have only one of each minion + # Also iterate through the entire auth_list and create a dictionary + # so it's easy to look up what functions are permitted + for auth_list_entry in auth_list: + if isinstance(auth_list_entry, str): + for fun in funs: + # represents toplevel auth entry is a function. + # so this fn is permitted by all minions + if self.match_check(auth_list_entry, fun): + return True + continue + if isinstance(auth_list_entry, dict): + if len(auth_list_entry) != 1: + log.info("Malformed ACL: %s", auth_list_entry) + continue + allowed_minions.update(set(auth_list_entry.keys())) + for key in auth_list_entry: + for match in minions: + if match in auth_dictionary: + auth_dictionary[match].extend(auth_list_entry[key]) + else: + auth_dictionary[match] = auth_list_entry[key] + + try: + for minion in minions: + results = [] + for num, fun in enumerate(auth_dictionary[minion]): + results.append(self.match_check(fun, funs)) + if not any(results): + return False + return True + + except TypeError: + return False + return False + def validate_tgt(self, valid, expr, tgt_type, minions=None, expr_form=None): """ Validate the target minions against the possible valid minions. diff --git a/tests/pytests/integration/cli/test_syndic_eauth.py b/tests/pytests/integration/cli/test_syndic_eauth.py new file mode 100644 index 000000000000..c44933fc10df --- /dev/null +++ b/tests/pytests/integration/cli/test_syndic_eauth.py @@ -0,0 +1,759 @@ +import json +import pathlib +import tempfile +import time + +import pytest + +docker = pytest.importorskip("docker") + + +def json_output_to_dict(output): + """ + Convert ``salt ... --out=json`` Syndic return to a dictionary. Since the + --out=json will return several JSON outputs, e.g. {...}\\n{...}, we have to + parse that output individually. + """ + output = output or "" + results = {} + for line in ( + _ for _ in output.replace("\n}", "\n}\x1f").split("\x1f") if _.strip() + ): + data = json.loads(line) + if isinstance(data, dict): + for minion in data: + # Filter out syndic minions since they won't show up in the future + if minion not in ("syndic_a", "syndic_b"): + results[minion] = data[minion] + return results + + +def accept_keys(container, required_minions): + failure_time = time.time() + 20 + while time.time() < failure_time: + container.run("salt-key -Ay") + res = container.run("salt-key -L --out=json") + if ( + isinstance(res.data, dict) + and set(res.data.get("minions")) == required_minions + ): + break + else: + pytest.skip(f"{container} unable to accept keys for {required_minions}") + + +@pytest.fixture(scope="module") +def syndic_network(): + try: + client = docker.from_env() + except docker.errors.DockerException as e: + # Docker failed, it's gonna be an environment issue, let's just skip + pytest.skip(f"Docker failed with error {e}") + pool = docker.types.IPAMPool( + subnet="172.27.13.0/24", + gateway="172.27.13.1", + ) + ipam_config = docker.types.IPAMConfig( + pool_configs=[pool], + ) + network = None + try: + network = client.networks.create(name="syndic_test_net", ipam=ipam_config) + yield network.name + finally: + if network is not None: + network.remove() + + +@pytest.fixture(scope="session") +def source_path(): + x = pathlib.Path(__file__).parent.parent.parent.parent.parent / "salt" + return str(x) + + +@pytest.fixture(scope="module") +def config(source_path): + # 3.10>= will allow the below line + # with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmp_path: + with tempfile.TemporaryDirectory() as tmp_path: + tmp_path = pathlib.Path(tmp_path) + master_dir = tmp_path / "master" + minion_dir = tmp_path / "minion" + syndic_a_dir = tmp_path / "syndic_a" + syndic_b_dir = tmp_path / "syndic_b" + minion_a1_dir = tmp_path / "minion_a1" + minion_a2_dir = tmp_path / "minion_a2" + minion_b1_dir = tmp_path / "minion_b1" + minion_b2_dir = tmp_path / "minion_b2" + + for dir_ in ( + master_dir, + minion_dir, + syndic_a_dir, + syndic_b_dir, + minion_a1_dir, + minion_a2_dir, + minion_b1_dir, + minion_b2_dir, + ): + dir_.mkdir(parents=True, exist_ok=True) + (dir_ / "master.d").mkdir(exist_ok=True) + # minion.d is probably needed to prevent errors on tempdir cleanup + (dir_ / "minion.d").mkdir(exist_ok=True) + (dir_ / "pki").mkdir(exist_ok=True) + (master_dir / "master.d").mkdir(exist_ok=True) + + master_config_path = master_dir / "master" + master_config_path.write_text( + """ +order_masters: True + +publisher_acl: + bob: + - '*1': + - test.* + +external_auth: + pam: + bob: + - '*1': + - test.* + +nodegroups: + second_string: "minion_*2" + b_string: "minion_b*" + + """ + ) + + minion_config_path = minion_dir / "minion" + minion_config_path.write_text("id: minion\nmaster: master") + + syndic_a_minion_config_path = syndic_a_dir / "minion" + syndic_a_minion_config_path.write_text("id: syndic_a\nmaster: master") + syndic_a_master_config_path = syndic_a_dir / "master" + syndic_a_master_config_path.write_text( + """ +syndic_master: master +publisher_acl: + bob: + - '*1': + - test.* + +external_auth: + pam: + bob: + - '*1': + - test.* + """ + ) + + minion_a1_config_path = minion_a1_dir / "minion" + minion_a1_config_path.write_text("id: minion_a1\nmaster: syndic_a") + minion_a2_config_path = minion_a2_dir / "minion" + minion_a2_config_path.write_text("id: minion_a2\nmaster: syndic_a") + + syndic_b_minion_config_path = syndic_b_dir / "minion" + syndic_b_minion_config_path.write_text("id: syndic_b\nmaster: master") + syndic_b_master_config_path = syndic_b_dir / "master" + syndic_b_master_config_path.write_text("syndic_master: master") + + minion_b1_config_path = minion_b1_dir / "minion" + minion_b1_config_path.write_text("id: minion_b1\nmaster: syndic_b") + minion_b2_config_path = minion_b2_dir / "minion" + minion_b2_config_path.write_text("id: minion_b2\nmaster: syndic_b") + + yield { + "minion_dir": minion_dir, + "master_dir": master_dir, + "syndic_a_dir": syndic_a_dir, + "syndic_b_dir": syndic_b_dir, + "minion_a1_dir": minion_a1_dir, + "minion_a2_dir": minion_a2_dir, + "minion_b1_dir": minion_b1_dir, + "minion_b2_dir": minion_b2_dir, + } + + +@pytest.fixture(scope="module") +def docker_master(salt_factories, syndic_network, config, source_path): + config_dir = str(config["master_dir"]) + container = salt_factories.get_container( + "master", + image_name="saltstack/salt:3005", + container_run_kwargs={ + "entrypoint": "salt-master -ldebug", + "network": syndic_network, + "volumes": { + config_dir: {"bind": "/etc/salt", "mode": "z"}, + source_path: { + "bind": "/usr/local/lib/python3.7/site-packages/salt/", + "mode": "z", + }, + }, + }, + pull_before_start=True, + skip_on_pull_failure=True, + skip_if_docker_client_not_connectable=True, + ) + # container.container_start_check(confirm_container_started, container) + with container.started() as factory: + for user in ("bob", "fnord"): + container.run(f"adduser -D {user}") + container.run(f"passwd -d {user}") + container.run("apk add linux-pam-dev") + yield factory + + +@pytest.fixture(scope="module") +def docker_minion(salt_factories, syndic_network, config, source_path): + config_dir = str(config["minion_dir"]) + container = salt_factories.get_container( + "minion", + image_name="saltstack/salt:3005", + container_run_kwargs={ + "entrypoint": "salt-minion", + "network": syndic_network, + "volumes": { + config_dir: {"bind": "/etc/salt", "mode": "z"}, + source_path: { + "bind": "/usr/local/lib/python3.7/site-packages/salt/", + "mode": "z", + }, + }, + }, + pull_before_start=True, + skip_on_pull_failure=True, + skip_if_docker_client_not_connectable=True, + ) + # container.container_start_check(confirm_container_started, container) + with container.started() as factory: + yield factory + + +@pytest.fixture(scope="module") +def docker_syndic_a(salt_factories, config, syndic_network, source_path): + config_dir = str(config["syndic_a_dir"]) + container = salt_factories.get_container( + "syndic_a", + image_name="saltstack/salt:3005", + container_run_kwargs={ + "entrypoint": "salt-master -ldebug", + "network": syndic_network, + "volumes": { + config_dir: {"bind": "/etc/salt", "mode": "z"}, + source_path: { + "bind": "/usr/local/lib/python3.7/site-packages/salt/", + "mode": "z", + }, + }, + }, + pull_before_start=True, + skip_on_pull_failure=True, + skip_if_docker_client_not_connectable=True, + ) + # container.container_start_check(confirm_container_started, container) + with container.started() as factory: + yield factory + + +@pytest.fixture(scope="module") +def docker_syndic_b(salt_factories, config, syndic_network, source_path): + config_dir = str(config["syndic_b_dir"]) + container = salt_factories.get_container( + "syndic_b", + image_name="saltstack/salt:3005", + container_run_kwargs={ + "entrypoint": "salt-master -ldebug", + "network": syndic_network, + "volumes": { + config_dir: {"bind": "/etc/salt", "mode": "z"}, + source_path: { + "bind": "/usr/local/lib/python3.7/site-packages/salt/", + "mode": "z", + }, + }, + }, + pull_before_start=True, + skip_on_pull_failure=True, + skip_if_docker_client_not_connectable=True, + ) + # container.container_start_check(confirm_container_started, container) + with container.started() as factory: + yield factory + + +@pytest.fixture(scope="module") +def docker_minion_a1(salt_factories, config, syndic_network, source_path): + config_dir = str(config["minion_a1_dir"]) + container = salt_factories.get_container( + "minion_a1", + image_name="saltstack/salt:3005", + container_run_kwargs={ + "network": syndic_network, + "entrypoint": "salt-minion -ldebug", + "volumes": { + config_dir: {"bind": "/etc/salt", "mode": "z"}, + source_path: { + "bind": "/usr/local/lib/python3.7/site-packages/salt/", + "mode": "z", + }, + }, + }, + pull_before_start=True, + skip_on_pull_failure=True, + skip_if_docker_client_not_connectable=True, + ) + # container.container_start_check(confirm_container_started, container) + with container.started() as factory: + yield factory + + +@pytest.fixture(scope="module") +def docker_minion_a2(salt_factories, config, syndic_network, source_path): + config_dir = str(config["minion_a2_dir"]) + container = salt_factories.get_container( + "minion_a2", + image_name="saltstack/salt:3005", + container_run_kwargs={ + "network": syndic_network, + "entrypoint": "salt-minion", + "volumes": { + config_dir: {"bind": "/etc/salt", "mode": "z"}, + source_path: { + "bind": "/usr/local/lib/python3.7/site-packages/salt/", + "mode": "z", + }, + }, + }, + pull_before_start=True, + skip_on_pull_failure=True, + skip_if_docker_client_not_connectable=True, + ) + # container.container_start_check(confirm_container_started, container) + with container.started() as factory: + yield factory + + +@pytest.fixture(scope="module") +def docker_minion_b1(salt_factories, config, syndic_network, source_path): + config_dir = str(config["minion_b1_dir"]) + container = salt_factories.get_container( + "minion_b1", + image_name="saltstack/salt:3005", + container_run_kwargs={ + "network": syndic_network, + "entrypoint": "salt-minion", + "volumes": { + config_dir: {"bind": "/etc/salt", "mode": "z"}, + source_path: { + "bind": "/usr/local/lib/python3.7/site-packages/salt/", + "mode": "z", + }, + }, + }, + pull_before_start=True, + skip_on_pull_failure=True, + skip_if_docker_client_not_connectable=True, + ) + # container.container_start_check(confirm_container_started, container) + with container.started() as factory: + yield factory + + +@pytest.fixture(scope="module") +def docker_minion_b2(salt_factories, config, syndic_network, source_path): + config_dir = str(config["minion_b2_dir"]) + container = salt_factories.get_container( + "minion_b2", + image_name="saltstack/salt:3005", + container_run_kwargs={ + "network": syndic_network, + "entrypoint": "salt-minion", + "volumes": { + config_dir: {"bind": "/etc/salt", "mode": "z"}, + source_path: { + "bind": "/usr/local/lib/python3.7/site-packages/salt/", + "mode": "z", + }, + }, + }, + pull_before_start=True, + skip_on_pull_failure=True, + skip_if_docker_client_not_connectable=True, + ) + # container.container_start_check(confirm_container_started, container) + with container.started() as factory: + yield factory + + +@pytest.fixture(scope="module", autouse=True) +def all_the_docker( + docker_master, + docker_minion, + docker_syndic_a, + docker_syndic_b, + docker_minion_a1, + docker_minion_a2, + docker_minion_b1, + docker_minion_b2, +): + try: + for s in (docker_syndic_a, docker_syndic_b): + s.run("salt-syndic -d") + failure_time = time.time() + 20 + accept_keys( + container=docker_master, required_minions={"minion", "syndic_a", "syndic_b"} + ) + accept_keys( + container=docker_syndic_a, required_minions={"minion_a1", "minion_a2"} + ) + accept_keys( + container=docker_syndic_b, required_minions={"minion_b1", "minion_b2"} + ) + for tries in range(30): + res = docker_master.run(r"salt \* test.ping -t20 --out=json") + results = json_output_to_dict(res.stdout) + if set(results).issuperset( + ["minion", "minion_a1", "minion_a2", "minion_b1", "minion_b2"] + ): + break + else: + pytest.skip(f"Missing some minions: {sorted(results)}") + + yield + finally: + # looks like we need to do this to actually let the test suite run as non-root. + for container in ( + docker_minion, + docker_syndic_a, + docker_syndic_b, + docker_minion_a1, + docker_minion_a2, + docker_minion_b1, + docker_minion_b2, + ): + try: + container.run("rm -rfv /etc/salt/") + # If you need to debug this ^^^^^^^ + # use this vvvvvv + # res = container.run('rm -rfv /etc/salt/') + # print(container) + # print(res.stdout) + except docker.errors.APIError as e: + # if the container isn't running, there's not thing we can do + # at this point. + print(f"Docker failed removing /etc/salt: {e}") + + +@pytest.fixture( + params=[ + ("*", ["minion", "minion_a1", "minion_a2", "minion_b1", "minion_b2"]), + ("minion", ["minion"]), + ("minion_*", ["minion_a1", "minion_a2", "minion_b1", "minion_b2"]), + ("minion_a*", ["minion_a1", "minion_a2"]), + ("minion_b*", ["minion_b1", "minion_b2"]), + ("*1", ["minion_a1", "minion_b1"]), + ("*2", ["minion_a2", "minion_b2"]), + ] +) +def all_the_minions(request): + yield request.param + + +@pytest.fixture( + params=[ + ("minion_a1", ["minion_a1"]), + ("minion_b1", ["minion_b1"]), + ("*1", ["minion_a1", "minion_b1"]), + ("minion*1", ["minion_a1", "minion_b1"]), + ] +) +def eauth_valid_minions(request): + yield request.param + + +@pytest.fixture( + params=[ + "*", + "minion", + "minion_a2", + "minion_b2", + "syndic_a", + "syndic_b", + "*2", + "minion*", + "minion_a*", + "minion_b*", + ] +) +def eauth_blocked_minions(request): + yield request.param + + +@pytest.fixture( + params=[ + "test.arg good_argument", + "test.arg bad_news", + "test.arg not_allowed", + "test.echo very_not_good", + "cmd.run 'touch /tmp/fun.txt'", + "file.touch /tmp/more_fun.txt", + "test.arg_repr this_is_whatever", + "test.arg_repr more whatever", + "test.arg_repr cool guy", + ] +) +def all_the_commands(request): + yield request.param + + +@pytest.fixture( + params=[ + "test.arg", + "test.echo", + ] +) +def eauth_valid_commands(request): + yield request.param + + +@pytest.fixture( + params=[ + "cmd.run", + "file.manage_file", + "test.arg_repr", + ] +) +def eauth_invalid_commands(request): + yield request.param + + +@pytest.fixture( + params=[ + "good_argument", + "good_things", + "good_super_awesome_stuff", + ] +) +def eauth_valid_arguments(request): + # TODO: Should these be part of valid commands? I don't know yet! -W. Werner, 2022-12-01 + yield request.param + + +@pytest.fixture( + params=[ + "bad_news", + "not_allowed", + "very_not_good", + ] +) +def eauth_invalid_arguments(request): + yield request.param + + +@pytest.fixture( + params=[ + "G@id:minion_a1 and minion_b*", + "E@minion_[^b]1 and minion_b2", + "P@id:minion_[^b]. and minion", + # TODO: Do soemthing better with these. Apparently lists work... weirdly -W. Werner, 2022-12-02 + # "L@minion_* not L@minion_*2 and minion", + # "I@has_syndic:* not minion_b2 not minion_a2 and minion", + # "J@has_syndic:syndic_(a|b) not *2 and minion", + # TODO: I need to figure out a good way to get IPs -W. Werner, 2022-12-02 + # "S@172.13.1.4 and S@172.13.1.5 and minion_*2", + # TODO: I don't have any range servers -W. Werner, 2022-12-02 + # "((R@%minion_a1..2 and R@%minion_b1..2) not N@second_string) and minion", + ] +) +def invalid_comprehensive_minion_targeting(request): + yield request.param + + +@pytest.fixture( + params=[ + ( + "G@id:minion or minion_a1 or E@minion_[^b]2 or L@minion_b1,minion_b2", + ["minion", "minion_a1", "minion_a2", "minion_b1", "minion_b2"], + ), + ( + "minion or E@minion_a[12] or N@b_string", + ["minion", "minion_a1", "minion_a2", "minion_b1", "minion_b2"], + ), + ( + "L@minion,minion_a1 or N@second_string or N@b_string", + ["minion", "minion_a1", "minion_a2", "minion_b1", "minion_b2"], + ), + # TODO: I don't have pillar setup, nor do I know IPs, and also SECO range servers -W. Werner, 2022-12-02 + # ( + # "I@my_minion:minion and I@has_syndic:syndic_[^b] and S@172.13.1.4 and S@172.13.1.5", + # ["minion", "minion_a1", "minion_a2", "minion_b1", "minion_b2"], + # ), + # ( + # "minion and R@%minion_a1..2 and N@b_string", + # ["minion", "minion_a1", "minion_a2", "minion_b1", "minion_b2"], + # ), + ] +) +def comprehensive_minion_targeting(request): + # TODO: include SECO range? -W. Werner, 2022-12-01 + yield request.param + + +@pytest.fixture( + params=[ + ("G@id:minion_a1 and minion_b1", ["minion_a1", "minion_b1"]), + ("E@minion_[^b]1", ["minion_a1"]), + ( + "P@id:minion_[^b].", + ["minion_a1", "minion_a2"], + ), # should this be this thing? or something different? + # TODO: Turns out that it doesn't exclude things? Not sure -W. Werner, 2022-12-02 + ( + "L@minion_a1,minion_a2,minion_b1 not minion_*2", + ["minion_a1", "minion_a2", "minion_b1"], + ), + # TODO: I don't have pillar data yet -W. Werner, 2022-12-02 + # ("I@has_syndic:* not minion_b2 not minion_a2", ["minion_a1", "minion_b1"]), + # ("J@has_syndic:syndic_(a|b) not *2", ["minion_a1", "minion_b1"]), + # TODO: Need a different way to get IPs -W. Werner, 2022-12-02 + # ( "S@172.13.1.4 and S@172.13.1.5", ["minion_a1", "minion_b1"]), + # TODO: Need a range server for these tests (see range targeting docs) -W. Werner, 2022-12-02 + # ("(R@%minion_a1..2 and R@%minion_b1..2) not N@second_string", ["minion_a1", "minion_b1"]), + ] +) +def valid_comprehensive_minion_targeting(request): + yield request.param + + +@pytest.fixture( + params=[ + # TODO: not sure why this doesn't work. Pretty sure it's just the cli part -W. Werner, 2022-12-02 + # ("G@id:minion_a1 and minion_b1", ["minion_a1", "minion_b1"]), + ("E@minion_[^b]1", ["minion_a1"]), + ( + "P@id:minion_[^a]1", + ["minion_b1"], + ), + ("L@minion_a1,minion_b1 not minion_*2", ["minion_a1", "minion_b1"]), + # TODO: need to add pillars -W. Werner, 2022-12-02 + # ("I@has_syndic:* not minion_b2 not minion_a2", ["minion_a1", "minion_b1"]), + # ("J@has_syndic:syndic_(a|b) not *2", ["minion_a1", "minion_b1"]), + # TODO: Need a different way to get IPs -W. Werner, 2022-12-02 + # ( "S@172.13.1.4 and S@172.13.1.5", ["minion_a1", "minion_b1"]), + # TODO: Need a range server for these tests (see range targeting docs) -W. Werner, 2022-12-02 + # ("(R@%minion_a1..2 and R@%minion_b1..2) not N@second_string", ["minion_a1", "minion_b1"]), + ] +) +def valid_eauth_comprehensive_minion_targeting(request): + yield request.param + + +def test_root_user_should_be_able_to_call_any_and_all_minions_with_any_and_all_commands( + all_the_minions, all_the_commands, docker_master +): + target, expected_minions = all_the_minions + res = docker_master.run( + f"salt {target} {all_the_commands} -t 20 --out=json", + ) + if "jid does not exist" in (res.stderr or ""): + # might be flaky, let's retry + res = docker_master.run( + f"salt {target} {all_the_commands} -t 20 --out=json", + ) + results = json_output_to_dict(res.stdout) + assert sorted(results) == expected_minions, res.stdout + + +def test_eauth_user_should_be_able_to_target_valid_minions_with_valid_command( + eauth_valid_minions, eauth_valid_commands, eauth_valid_arguments, docker_master +): + target, expected_minions = eauth_valid_minions + res = docker_master.run( + f"salt -a pam --username bob --password '' {target} {eauth_valid_commands} {eauth_valid_arguments} -t 20 --out=json", + ) + results = json_output_to_dict(res.stdout) + assert sorted(results) == expected_minions, res.stdout + + +def test_eauth_user_should_not_be_able_to_target_invalid_minions( + eauth_blocked_minions, docker_master +): + res = docker_master.run( + f"salt -a pam --username bob --password '' {eauth_blocked_minions} test.arg hello -t 20 --out=json", + ) + results = json_output_to_dict(res.stdout) + assert "Authorization error occurred" in res.stdout + assert sorted(results) == [] + + +@pytest.mark.skip(reason="Not sure about blocklist") +def test_eauth_user_should_not_be_able_to_target_valid_minions_with_invalid_commands( + eauth_valid_minions, eauth_invalid_commands, docker_master +): + tgt, _ = eauth_valid_minions + res = docker_master.run( + f"salt -a pam --username bob --password '' {tgt} {eauth_invalid_commands} -t 20 --out=json", + ) + results = json_output_to_dict(res.stdout) + assert "Authorization error occurred" in res.stdout + assert sorted(results) == [] + + +@pytest.mark.skip(reason="Not sure about blocklist") +def test_eauth_user_should_not_be_able_to_target_valid_minions_with_valid_commands_and_invalid_arguments( + eauth_valid_minions, eauth_valid_commands, eauth_invalid_arguments, docker_master +): + tgt, _ = eauth_valid_minions + res = docker_master.run( + f"salt -a pam --username bob --password '' -C '{tgt}' {eauth_valid_commands} {eauth_invalid_arguments} -t 20 --out=json", + ) + results = json_output_to_dict(res.stdout) + assert "Authorization error occurred" in res.stdout + assert sorted(results) == [] + + +def test_invalid_eauth_user_should_not_be_able_to_do_anything( + eauth_valid_minions, eauth_valid_commands, eauth_valid_arguments, docker_master +): + # TODO: Do we really need to run all of these tests for the invalid user? Maybe not! -W. Werner, 2022-12-01 + tgt, _ = eauth_valid_minions + res = docker_master.run( + f"salt -a pam --username badguy --password '' -C '{tgt}' {eauth_valid_commands} {eauth_valid_arguments} -t 20 --out=json", + ) + results = json_output_to_dict(res.stdout) + assert sorted(results) == [] + + +def test_root_should_be_able_to_use_comprehensive_targeting( + comprehensive_minion_targeting, docker_master +): + tgt, expected_minions = comprehensive_minion_targeting + res = docker_master.run( + f"salt -C '{tgt}' test.version -t 20 --out=json", + ) + results = json_output_to_dict(res.stdout) + assert sorted(results) == expected_minions + + +def test_eauth_user_should_be_able_to_target_valid_minions_with_valid_commands_comprehensively( + valid_eauth_comprehensive_minion_targeting, docker_master +): + tgt, expected_minions = valid_eauth_comprehensive_minion_targeting + res = docker_master.run( + f"salt -a pam --username bob --password '' -C '{tgt}' test.version -t 20 --out=json", + ) + results = json_output_to_dict(res.stdout) + assert sorted(results) == expected_minions + + +def test_eauth_user_with_invalid_comprehensive_targeting_should_auth_failure( + invalid_comprehensive_minion_targeting, docker_master +): + res = docker_master.run( + f"salt -a pam --username fnord --password '' -C '{invalid_comprehensive_minion_targeting}' test.version -t 20 --out=json", + ) + results = json_output_to_dict(res.stdout) + assert "Authorization error occurred" in res.stdout + assert sorted(results) == [] diff --git a/tests/pytests/integration/master/test_clear_funcs.py b/tests/pytests/integration/master/test_clear_funcs.py index e235864d088d..681e045631ac 100644 --- a/tests/pytests/integration/master/test_clear_funcs.py +++ b/tests/pytests/integration/master/test_clear_funcs.py @@ -170,7 +170,7 @@ def test_clearfuncs_config(salt_master, clear_channel, user_info): "file_name": "good", "yaml_contents": "win: true", } - ret = clear_channel.send(good_msg, timeout=5) + ret = clear_channel.send(good_msg, timeout=15) assert "Wrote" in ret["data"]["return"] assert good_file_path.exists() good_file_path.unlink() @@ -182,7 +182,7 @@ def test_clearfuncs_config(salt_master, clear_channel, user_info): "file_name": "../evil", "yaml_contents": "win: true", } - ret = clear_channel.send(evil_msg, timeout=5) + ret = clear_channel.send(evil_msg, timeout=15) assert not evil_file_path.exists(), "Wrote file via directory traversal" assert ret["data"]["return"] == "Invalid path" finally: diff --git a/tests/unit/test_master.py b/tests/unit/test_master.py index 26c0bdb19ae0..9f2a4d512197 100644 --- a/tests/unit/test_master.py +++ b/tests/unit/test_master.py @@ -128,9 +128,12 @@ def test_clear_funcs_black(self): "_prep_pub", "_send_pub", "_send_ssh_pub", + "_check_auth", "get_method", "destroy", "connect", + "_check_publisher_acl", + "_collect_syndics", ] for name in dir(clear_funcs): if name in clear_funcs.expose_methods: