File susemanager-sls-git-1101.8675cc4.obscpio of Package susemanager-sls
07070100000000000041FD00000000000000000000000B68EFD66400000000000000000000000000000000000000000000001000000000susemanager-sls07070100000001000081B400000000000000000000000168EFD66400000023000000000000000000000000000000000000001B00000000susemanager-sls/.gitignore*.cache*
*__pycache__*
*.pyc
*.pyo
07070100000002000081B400000000000000000000000168EFD66400000587000000000000000000000000000000000000002000000000susemanager-sls/Makefile.pythonTHIS_MAKEFILE := $(realpath $(lastword $(MAKEFILE_LIST)))
CURRENT_DIR := $(dir $(THIS_MAKEFILE))
include $(CURRENT_DIR)../../rel-eng/Makefile.python
# Docker tests variables
DOCKER_CONTAINER_BASE = systemsmanagement/uyuni/master/docker/containers/uyuni-master
DOCKER_REGISTRY = registry.opensuse.org
DOCKER_RUN_EXPORT = "PYTHONPATH=$PYTHONPATH"
DOCKER_VOLUMES = -v "$(CURDIR)/../../:/manager"
__pylint ::
$(call update_pip_env)
pylint --rcfile=pylintrc $(shell find -name '*.py') > reports/pylint.log || true
__pytest ::
$(call update_pip_env)
$(call install_pytest)
cd src/tests; pytest --disable-warnings --tb=native --color=yes -v
junit_pytest ::
$(call update_pip_env)
$(call install_pytest)
cd src/tests; pytest -v --junit-xml /manager/susemanager-utils/susemanager-sls/reports/susemanager-sls.xml
docker_pylint ::
docker run --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/sh -c "cd /manager/susemanager-utils/susemanager-sls/; make -f Makefile.python __pylint"
docker_shell ::
docker run -t -i --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/bash
docker_pytest ::
docker run --rm -e $(DOCKER_RUN_EXPORT) $(DOCKER_VOLUMES) $(DOCKER_REGISTRY)/$(DOCKER_CONTAINER_BASE)-pgsql /bin/sh -c "cd /manager/susemanager-utils/susemanager-sls; make -f Makefile.python __pytest"
07070100000003000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002100000000susemanager-sls/formula_metadata07070100000004000081B400000000000000000000000168EFD664000001AD000000000000000000000000000000000000002B00000000susemanager-sls/formula_metadata/README.mdAll metadata for your custom Salt Formulas should be put here. (/srv/formula_metadata/<your-formula-name>/)
The state files need to be on a salt file root and belong to /srv/salt.
To learn more about Salt Formulas and how to write them visit: https://docs.saltstack.com/en/latest/topics/development/conventions/formulas.html
To use your formulas effectively with SUSE Multi-Linux Manager they additionally need a form.yml file.
07070100000005000041FD00000000000000000000000468EFD66400000000000000000000000000000000000000000000001900000000susemanager-sls/formulas07070100000006000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002200000000susemanager-sls/formulas/metadata07070100000007000081B400000000000000000000000168EFD664000000D5000000000000000000000000000000000000002C00000000susemanager-sls/formulas/metadata/README.mdThe metadata of Salt Formulas that get installed per RPM belongs in this directory.
For more information visit:
https://www.uyuni-project.org/uyuni-docs/en/uyuni/specialized-guides/salt/salt-formulas-custom.html
07070100000008000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002000000000susemanager-sls/formulas/states07070100000009000081B400000000000000000000000168EFD66400000022000000000000000000000000000000000000002D00000000susemanager-sls/formulas/states/formulas.slsinclude: {{ pillar["formulas"] }}
0707010000000A000041FD00000000000000000000000768EFD66400000000000000000000000000000000000000000000001800000000susemanager-sls/modules0707010000000B000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002000000000susemanager-sls/modules/engines0707010000000C000081B400000000000000000000000168EFD6640000267D000000000000000000000000000000000000002E00000000susemanager-sls/modules/engines/mgr_events.py# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2018-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
mgr_events.py is a SaltStack engine that writes selected events to SUSE
Manager's PostgreSQL database. Additionally, it sends notifications via the
LISTEN/NOTIFY mechanism to alert SUSE Multi-Linux Manager of newly available events.
mgr_events.py tries to keep the I/O low in high load scenarios. Therefore
events are INSERTed once they come in, but not necessarily COMMITted
immediately.
The algorithm is an implementation of token bucket:
- a COMMIT costs one token
- initially, commit_burst tokens are available
- every commit_interval seconds, one new token is generated
(up to commit_burst)
- when an event arrives and there are tokens available it is COMMITted
immediately
- when an event arrives but no tokens are available, the event is INSERTed but
not COMMITted yet. COMMIT will happen as soon as a token is available
.. versionadded:: 2018.3.0
:depends: psycopg2
Minimal configuration example
.. code:: yaml
engines:
- mgr_events:
postgres_db:
dbname: susemanger
user: spacewalk
password: spacewalk
host: localhost
notify_channel: suseSaltEvent
Full configuration example
.. code:: yaml
engines:
- mgr_events:
commit_interval: 1
commit_burst: 100
postgres_db:
dbname: susemanger
user: spacewalk
password: spacewalk
host: localhost
port: 5432
notify_channel: suseSaltEvent
Most of the values have a sane default. But we still need the login and host
for the PostgreSQL database. Only the `notify_channel` there is optional. The
default for host is 'localhost'.
"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import time
import fnmatch
try:
import psycopg2
HAS_PSYCOPG2 = True
except ImportError:
HAS_PSYCOPG2 = False
# Import salt libs
import salt.version
import salt.ext.tornado
import salt.utils.event
import json
log = logging.getLogger(__name__)
DEFAULT_COMMIT_INTERVAL = 1
DEFAULT_COMMIT_BURST = 100
# pylint: disable-next=invalid-name
def __virtual__():
return HAS_PSYCOPG2
# pylint: disable-next=missing-class-docstring
class Responder:
def __init__(self, event_bus, config):
self.config = config
self.config.setdefault("commit_interval", DEFAULT_COMMIT_INTERVAL)
self.config.setdefault("commit_burst", DEFAULT_COMMIT_BURST)
self.config.setdefault("postgres_db", {})
self.config["postgres_db"].setdefault("host", "localhost")
self.config["postgres_db"].setdefault("notify_channel", "suseSaltEvent")
self.counters = [0 for i in range(config["events"]["thread_pool_size"] + 1)]
self.tokens = config["commit_burst"]
self.event_bus = event_bus
self._connect_to_database()
self.event_bus.io_loop.call_later(config["commit_interval"], self.add_token)
def _connect_to_database(self):
db_config = self.config.get("postgres_db")
if "port" in db_config:
# pylint: disable-next=consider-using-f-string
conn_string = "dbname='{dbname}' user='{user}' host='{host}' port='{port}' password='{password}'".format(
**db_config
)
else:
# pylint: disable-next=consider-using-f-string
conn_string = "dbname='{dbname}' user='{user}' host='{host}' password='{password}'".format(
**db_config
)
log.debug("%s: connecting to database", __name__)
while True:
try:
self.connection = psycopg2.connect(conn_string)
break
except psycopg2.OperationalError as err:
log.error("%s: %s", __name__, err)
log.error("%s: Retrying in 5 seconds.", __name__)
time.sleep(5)
self.cursor = self.connection.cursor()
def _insert(self, tag, data):
self.db_keepalive()
if (
any(
[
fnmatch.fnmatch(tag, "salt/minion/*/start"),
fnmatch.fnmatch(tag, "salt/job/*/ret/*"),
fnmatch.fnmatch(tag, "salt/beacon/*"),
fnmatch.fnmatch(tag, "salt/batch/*/start"),
fnmatch.fnmatch(tag, "suse/manager/image_deployed"),
fnmatch.fnmatch(tag, "suse/manager/image_synced"),
fnmatch.fnmatch(tag, "suse/manager/pxe_update"),
fnmatch.fnmatch(tag, "suse/systemid/generate"),
]
)
and not self._is_salt_mine_event(tag, data)
and not self._is_presence_ping(tag, data)
):
try:
queue = self._get_queue(data.get("id"))
log.debug("%s: Adding event to queue %d -> %s", __name__, queue, tag)
self.cursor.execute(
"INSERT INTO suseSaltEvent (minion_id, data, queue) VALUES (%s, %s, %s);",
(data.get("id"), json.dumps({"tag": tag, "data": data}), queue),
)
self.counters[queue] += 1
self.attempt_commit()
# pylint: disable-next=broad-exception-caught
except Exception as err:
log.error("%s: %s", __name__, err)
try:
self.connection.commit()
# pylint: disable-next=broad-exception-caught
except Exception as err2:
log.error("%s: Error commiting: %s", __name__, err2)
self.connection.close()
finally:
log.debug("%s: %s", __name__, self.cursor.query)
else:
log.debug("%s: Discarding event -> %s", __name__, tag)
def _get_queue(self, minion_id):
if minion_id:
self.cursor.execute(
"""
SELECT COALESCE (
(SELECT MAX(queue)
FROM suseSaltEvent
WHERE minion_id = %s),
(SELECT X.queue
FROM (SELECT Q.queue,
(SELECT COUNT(*) FROM suseSaltEvent sa where Q.queue = sa.queue) as count
FROM (SELECT generate_series(1, %s) queue) Q
ORDER BY count, Q.queue
LIMIT 1) X
)
) queue;""",
(minion_id, int(self.config["events"]["thread_pool_size"])),
)
row = self.cursor.fetchone()
if row is not None:
return int(row[0])
return 0
def trace_log(self):
log.trace("%s: queues sizes -> %s", __name__, self.counters)
log.trace("%s: tokens -> %s", __name__, self.tokens)
def _is_salt_mine_event(self, tag, data):
return fnmatch.fnmatch(tag, "salt/job/*/ret/*") and self._is_salt_mine_update(
data
)
def _is_salt_mine_update(self, data):
return data.get("fun") == "mine.update"
def _is_presence_ping(self, tag, data):
return (
fnmatch.fnmatch(tag, "salt/job/*/ret/*")
and self._is_test_ping(data)
and self._is_batch_mode(data)
)
def _is_test_ping(self, data):
return data.get("fun") == "test.ping"
def _is_batch_mode(self, data):
return data.get("metadata", {}).get("batch-mode")
@salt.ext.tornado.gen.coroutine
def add_event_to_queue(self, raw):
# FIXME: Drop once we only use Salt >= 3004
if salt.version.SaltStackVersion(*salt.version.__version_info__).major < 3004:
tag, data = self.event_bus.unpack(raw, self.event_bus.serial)
else:
tag, data = self.event_bus.unpack(raw)
self._insert(tag, data)
def db_keepalive(self):
if self.connection.closed:
log.error("%s: Diconnected from database. Trying to reconnect...", __name__)
self._connect_to_database()
@salt.ext.tornado.gen.coroutine
def add_token(self):
self.tokens = min(self.tokens + 1, self.config["commit_burst"])
self.attempt_commit()
self.trace_log()
self.event_bus.io_loop.call_later(
self.config["commit_interval"], self.add_token
)
def attempt_commit(self):
"""
Committing to the database.
"""
self.db_keepalive()
if self.tokens > 0 and sum(self.counters) > 0:
log.debug("%s: commit", __name__)
self.cursor.execute(
# pylint: disable-next=consider-using-f-string
"NOTIFY {}, '{}';".format(
self.config["postgres_db"]["notify_channel"],
",".join([str(counter) for counter in self.counters]),
)
)
self.connection.commit()
self.counters = [
0 for i in range(0, self.config["events"]["thread_pool_size"] + 1)
]
self.tokens -= 1
def start(**config):
"""
Listen to events and write them to the Postgres database
"""
io_loop = salt.ext.tornado.ioloop.IOLoop(make_current=False)
io_loop.make_current()
event_bus = salt.utils.event.get_master_event(
# pylint: disable-next=undefined-variable
__opts__,
# pylint: disable-next=undefined-variable
__opts__["sock_dir"],
listen=True,
io_loop=io_loop,
)
responder = Responder(event_bus, config)
event_bus.set_event_handler(responder.add_event_to_queue)
io_loop.start()
0707010000000D000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001F00000000susemanager-sls/modules/pillar0707010000000E000081B400000000000000000000000168EFD66400000147000000000000000000000000000000000000002900000000susemanager-sls/modules/pillar/README.mdOverview
========
1. In the "/etc/salt/master" add the following:
extension_modules: /path/to/the/extension_pillar_modules
2. Copy *.py from this directory to the `extension_modules` directory.
3. Then, in the "/etc/salt/master" add the following:
ext_pillar:
- suma_minion: /another/path/with/the/pillar/files
0707010000000F000081B400000000000000000000000168EFD664000045B4000000000000000000000000000000000000002E00000000susemanager-sls/modules/pillar/suma_minion.py# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
Retrieve SUSE Multi-Linux Manager pillar data for a minion_id.
- Adds generated SUSE Multi-Linux Manager pillar data.
- Adds formula pillar data.
.. code-block:: yaml
ext_pillar:
- suma_minion: True
"""
# Import python libs
from __future__ import absolute_import
from enum import Enum
import os
import logging
import yaml
import salt.utils.dictupdate
import salt.utils.stringutils
try:
import psycopg2
HAS_POSTGRES = True
except ImportError:
HAS_POSTGRES = False
# SUSE Multi-Linux Manager formulas paths:
MANAGER_FORMULAS_METADATA_MANAGER_PATH = "/usr/share/susemanager/formulas/metadata"
MANAGER_FORMULAS_METADATA_STANDALONE_PATH = "/usr/share/salt-formulas/metadata"
CUSTOM_FORMULAS_METADATA_PATH = "/srv/formula_metadata"
FORMULA_PREFIX = "formula-"
def find_path(path_list):
"""
Find the first existing path in a list.
"""
for path in path_list:
if os.path.isdir(path):
return path
return path_list[0]
formulas_metadata_cache = dict()
# Fomula group subtypes
class EditGroupSubtype(Enum):
PRIMITIVE_LIST = "PRIMITIVE_LIST"
PRIMITIVE_DICTIONARY = "PRIMITIVE_DICTIONARY"
LIST_OF_DICTIONARIES = "LIST_OF_DICTIONARIES"
DICTIONARY_OF_DICTIONARIES = "DICTIONARY_OF_DICTIONARIES"
# Set up logging
log = logging.getLogger(__name__)
# pylint: disable-next=invalid-name
def __virtual__():
"""
Ensure the pillar module name.
"""
return HAS_POSTGRES
def _is_salt_ssh_or_runner(opts):
"""Check if this pillar is computed for Salt SSH or Salt runner execution
Only in salt/client/ssh/__init__.py, the master_opts are moved into
opts[__master_opts__], which we use to detect Salt SSH usage.
During a Salt runner execution, the "_master" suffix is appended to the
master_minion id.
"""
return "__master_opts__" in opts or opts.get("id", "").endswith("_master")
def _get_cursor(func):
def _connect_db():
options = {
"host": "localhost",
"user": "",
"pass": "",
"db": "susemanager",
"port": 5432,
}
# pylint: disable-next=undefined-variable
options.update(__opts__.get("__master_opts__", __opts__).get("postgres", {}))
return psycopg2.connect(
host=options["host"],
user=options["user"],
password=options["pass"],
dbname=options["db"],
port=options["port"],
)
# pylint: disable-next=undefined-variable
if "suma_minion_cnx" in __context__:
# pylint: disable-next=undefined-variable
cnx = __context__["suma_minion_cnx"]
log.debug("Reusing DB connection from the context")
else:
try:
cnx = _connect_db()
log.debug("Connected to the DB")
# pylint: disable-next=undefined-variable
if not _is_salt_ssh_or_runner(__opts__):
# pylint: disable-next=undefined-variable
__context__["suma_minion_cnx"] = cnx
except psycopg2.OperationalError as err:
log.error("Error on getting database pillar: %s", err.args)
return
try:
cursor = cnx.cursor()
except psycopg2.InterfaceError as err:
log.debug("Reconnecting to the DB")
try:
cnx = _connect_db()
log.debug("Reconnected to the DB")
# pylint: disable-next=undefined-variable
if not _is_salt_ssh_or_runner(__opts__):
# pylint: disable-next=undefined-variable
__context__["suma_minion_cnx"] = cnx
cursor = cnx.cursor()
# pylint: disable-next=redefined-outer-name
except psycopg2.OperationalError as err:
log.error("Error on getting database pillar: %s", err.args)
return
retry = 0
while True:
try:
if retry:
cnx = _connect_db()
log.debug("Reconnected to the DB")
# pylint: disable-next=undefined-variable
if not _is_salt_ssh_or_runner(__opts__):
# pylint: disable-next=undefined-variable
__context__["suma_minion_cnx"] = cnx
cursor = cnx.cursor()
func(cursor)
break
except psycopg2.DatabaseError as err:
retry += 1
if retry == 3:
log.error("Error on getting database pillar, giving up: %s", err.args)
break
else:
log.error(
"Error on getting database pillar, trying again: %s", err.args
)
finally:
# pylint: disable-next=undefined-variable
if _is_salt_ssh_or_runner(__opts__):
cnx.close()
# pylint: disable-next=unused-argument
def ext_pillar(minion_id, pillar, *args):
"""
Find SUMA-related pillars for the registered minions and return the data.
"""
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
log.debug('Getting pillar data for the minion "{0}"'.format(minion_id))
ret = {}
group_formulas = {}
system_formulas = {}
# Load the global pillar from DB
def _load_db_pillar(cursor):
nonlocal ret
nonlocal group_formulas
nonlocal system_formulas
ret = load_global_pillars(cursor, ret)
ret = load_org_pillars(minion_id, cursor, ret)
group_formulas, ret = load_group_pillars(minion_id, cursor, ret)
system_formulas, ret = load_system_pillars(minion_id, cursor, ret)
_get_cursor(_load_db_pillar)
# Including formulas into pillar data
try:
ret = salt.utils.dictupdate.merge(
ret,
formula_pillars(system_formulas, group_formulas, ret),
strategy="recurse",
)
# pylint: disable-next=broad-exception-caught
except Exception as error:
log.error("Error accessing formula pillar data: %s", error)
return ret
def get_formula_order(pillar):
"""
Get the formula order either from the legacy file or from the pillar
"""
if "formula_order" in pillar:
return pillar.pop("formula_order")
return []
def load_global_pillars(cursor, pillar):
"""
Load the global pillar from the database
"""
log.debug("Loading global pillars from db")
# Query for global pillar and extract the formula order
cursor.execute(
"""
SELECT p.pillar
FROM susesaltpillar AS p
WHERE p.server_id is NULL AND p.group_id is NULL AND p.org_id is NULL;"""
)
for row in cursor.fetchall():
pillar = salt.utils.dictupdate.merge(pillar, row[0], strategy="recurse")
return pillar
def load_org_pillars(minion_id, cursor, pillar):
"""
Load the org pillar from the database
"""
cursor.execute(
"""
SELECT p.pillar
FROM susesaltpillar AS p,
suseminioninfo AS m
WHERE m.minion_id = %s
AND p.org_id = (SELECT s.org_id FROM rhnServer AS s WHERE s.id = m.server_id);""",
(minion_id,),
)
for row in cursor.fetchall():
pillar = salt.utils.dictupdate.merge(pillar, row[0], strategy="recurse")
return pillar
def load_group_pillars(minion_id, cursor, pillar):
"""
Load the group pillars from the DB and extract the formulas from it
"""
groups_query = """
SELECT p.category, p.pillar
FROM susesaltpillar AS p,
suseminioninfo AS m
WHERE m.minion_id = %s
AND p.group_id IN (
SELECT g.server_group_id
FROM rhnServerGroupMembers AS g
WHERE g.server_id = m.server_id
);
"""
cursor.execute(groups_query, (minion_id,))
group_formulas = {}
for row in cursor.fetchall():
if row[0].startswith(FORMULA_PREFIX):
# Handle formulas separately
group_formulas[row[0][len(FORMULA_PREFIX) :]] = row[1]
else:
pillar = salt.utils.dictupdate.merge(pillar, row[1], strategy="recurse")
return (group_formulas, pillar)
def load_system_pillars(minion_id, cursor, pillar):
"""
Load the system pillars from the DB and extract the formulas from it
"""
minion_query = """
SELECT p.category, p.pillar
FROM susesaltpillar AS p,
suseminioninfo AS m
WHERE m.minion_id = %s
AND m.server_id = p.server_id;"""
cursor.execute(minion_query, (minion_id,))
server_formulas = {}
for row in cursor.fetchall():
if row[0].startswith(FORMULA_PREFIX):
# Handle formulas separately
server_formulas[row[0][len(FORMULA_PREFIX) :]] = row[1]
else:
pillar = salt.utils.dictupdate.merge(pillar, row[1], strategy="recurse")
return (server_formulas, pillar)
def formula_pillars(system_formulas, group_formulas, all_pillar):
"""
Find formula pillars for the minion, merge them and return the data.
"""
pillar = {}
out_formulas = []
# Loading group formulas
for formula_name in group_formulas:
formula_metadata = load_formula_metadata(formula_name)
if formula_name in out_formulas:
continue # already processed
if not formula_metadata.get("pillar_only", False):
out_formulas.append(formula_name)
pillar = salt.utils.dictupdate.merge(
pillar,
load_formula_pillar(
system_formulas.get(formula_name, {}),
group_formulas[formula_name],
formula_name,
formula_metadata,
),
strategy="recurse",
)
# Loading minion formulas
for formula_name in system_formulas:
if formula_name in out_formulas:
continue # already processed
formula_metadata = load_formula_metadata(formula_name)
if not formula_metadata.get("pillar_only", False):
out_formulas.append(formula_name)
pillar = salt.utils.dictupdate.merge(
pillar,
load_formula_pillar(system_formulas[formula_name], {}, formula_name),
strategy="recurse",
)
# Loading the formula order
order = get_formula_order(all_pillar)
if order:
out_formulas = [formula for formula in order if formula in out_formulas]
pillar["formulas"] = out_formulas
return pillar
# pylint: disable-next=unused-argument
def load_formula_pillar(system_data, group_data, formula_name, formula_metadata=None):
"""
Load the data from a specific formula for a minion in a specific group, merge and return it.
"""
layout_filename = os.path.join(
MANAGER_FORMULAS_METADATA_STANDALONE_PATH, formula_name, "form.yml"
)
if not os.path.isfile(layout_filename):
layout_filename = os.path.join(
MANAGER_FORMULAS_METADATA_MANAGER_PATH, formula_name, "form.yml"
)
if not os.path.isfile(layout_filename):
layout_filename = os.path.join(
CUSTOM_FORMULAS_METADATA_PATH, formula_name, "form.yml"
)
if not os.path.isfile(layout_filename):
log.error(
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
'Error loading data for formula "{formula}": No form.yml found'.format(
formula=formula_name
)
)
return {}
try:
# pylint: disable-next=unspecified-encoding
layout = yaml.load(open(layout_filename).read(), Loader=yaml.FullLoader)
# pylint: disable-next=broad-exception-caught
except Exception as error:
log.error(
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
'Error loading form.yml of formula "{formula}": {message}'.format(
formula=formula_name, message=str(error)
)
)
return {}
merged_data = merge_formula_data(layout, group_data, system_data)
merged_data = adjust_empty_values(layout, merged_data)
return merged_data
def merge_formula_data(layout, group_data, system_data, scope="system"):
"""
Merge the group and system formula data, respecting the scope of a value.
"""
ret = {}
for element_name in layout:
if element_name.startswith("$"):
continue
element = layout[element_name]
if not isinstance(element, dict):
continue
element_scope = element.get("$scope", scope)
value = None
if element.get("$type", "text") in ["group", "hidden-group", "namespace"]:
value = merge_formula_data(
element,
group_data.get(element_name, {}),
system_data.get(element_name, {}),
element_scope,
)
# edit-group is handled as primitive element - use either system_data or group data, no merging
elif element_scope == "system":
value = system_data.get(
element_name,
group_data.get(
element_name,
element.get("$default", element.get("$placeholder", "")),
),
)
elif element_scope == "group":
value = group_data.get(
element_name, element.get("$default", element.get("$placeholder", ""))
)
elif element_scope == "readonly":
value = element.get("$default", element.get("$placeholder", ""))
ret[element_name] = value
return ret
def adjust_empty_values(layout, data):
"""
Adjust empty values in formula data
"""
ret = {}
for element_name in layout:
if element_name.startswith("$"):
continue
element = layout[element_name]
if not isinstance(element, dict):
continue
element_type = element.get("$type", "text")
value = data.get(element_name, "")
if element_type in ["group", "hidden-group", "namespace"]:
value = adjust_empty_values(element, data.get(element_name, {}))
elif element_type in ["edit-group"]:
prototype = element.get("$prototype")
subtype = get_edit_group_subtype(element)
if subtype is EditGroupSubtype.DICTIONARY_OF_DICTIONARIES:
value = {}
if isinstance(data.get(element_name), dict):
for key, entry in list(data.get(element_name).items()):
proc_entry = adjust_empty_values(prototype, entry)
value[key] = proc_entry
elif subtype is EditGroupSubtype.LIST_OF_DICTIONARIES:
value = []
if isinstance(data.get(element_name), list):
for entry in data.get(element_name):
proc_entry = adjust_empty_values(prototype, entry)
value.append(proc_entry)
if not value and "$ifEmpty" in element:
value = element.get("$ifEmpty")
if value or not element.get("$optional"):
ret[element_name] = value
return ret
def get_edit_group_subtype(element):
if element is not None and element.get("$prototype"):
prototype = element.get("$prototype")
if prototype.get("$key") is None and prototype.get("$type", "group") != "group":
return EditGroupSubtype.PRIMITIVE_LIST
if (
prototype.get("$key") is not None
and prototype.get("$type", "group") != "group"
):
return EditGroupSubtype.PRIMITIVE_DICTIONARY
if prototype.get("$key") is None and prototype.get("$type", "group") == "group":
return EditGroupSubtype.LIST_OF_DICTIONARIES
if (
prototype.get("$key") is not None
and prototype.get("$type", "group") == "group"
):
return EditGroupSubtype.DICTIONARY_OF_DICTIONARIES
return None
def load_formula_metadata(formula_name):
if formula_name in formulas_metadata_cache:
return formulas_metadata_cache[formula_name]
metadata_filename = None
metadata_paths_ordered = [
os.path.join(
MANAGER_FORMULAS_METADATA_STANDALONE_PATH, formula_name, "metadata.yml"
),
os.path.join(
MANAGER_FORMULAS_METADATA_MANAGER_PATH, formula_name, "metadata.yml"
),
os.path.join(CUSTOM_FORMULAS_METADATA_PATH, formula_name, "metadata.yml"),
]
# Take the first metadata file that exist
for mpath in metadata_paths_ordered:
if os.path.isfile(mpath):
metadata_filename = mpath
break
if not metadata_filename:
log.error(
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
'Error loading metadata for formula "{formula}": No metadata.yml found'.format(
formula=formula_name
)
)
return {}
try:
# pylint: disable-next=unspecified-encoding
metadata = yaml.load(open(metadata_filename).read(), Loader=yaml.FullLoader)
# pylint: disable-next=broad-exception-caught
except Exception as error:
log.error(
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
'Error loading data for formula "{formula}": {message}'.format(
formula=formula_name, message=str(error)
)
)
return {}
formulas_metadata_cache[formula_name] = metadata
return metadata
07070100000010000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001F00000000susemanager-sls/modules/roster07070100000011000081B400000000000000000000000168EFD66400003677000000000000000000000000000000000000002800000000susemanager-sls/modules/roster/uyuni.py# SPDX-FileCopyrightText: 2022-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
Read in the roster from Uyuni DB
"""
from collections import namedtuple
import hashlib
# pylint: disable-next=unused-import
import io
import logging
# Import Salt libs
import salt.cache
import salt.config
import salt.loader
try:
import psycopg2
from psycopg2.extras import NamedTupleCursor
HAS_PSYCOPG2 = True
except ImportError:
HAS_PSYCOPG2 = False
from yaml import dump
__virtualname__ = "uyuni"
log = logging.getLogger(__name__)
Proxy = namedtuple("Proxy", ["hostname", "port"])
JAVA_HOSTNAME = "localhost"
PROXY_SSH_PUSH_USER = "mgrsshtunnel"
PROXY_SSH_PUSH_KEY = (
"/var/lib/spacewalk/" + PROXY_SSH_PUSH_USER + "/.ssh/id_susemanager_ssh_push"
)
SALT_SSH_CONNECT_TIMEOUT = 180
SSH_KEY_DIR = "/var/lib/salt/.ssh"
SSH_KEY_PATH = SSH_KEY_DIR + "/mgr_ssh_id"
SSH_PRE_FLIGHT_SCRIPT = None
SSH_PUSH_PORT = 22
SSH_PUSH_PORT_HTTPS = 1233
SSH_PUSH_SUDO_USER = None
SSH_USE_SALT_THIN = False
SSL_PORT = 443
# pylint: disable-next=invalid-name
def __virtual__():
if not HAS_PSYCOPG2:
return (False, "psycopg2 is not available")
# pylint: disable-next=undefined-variable
if __opts__.get("postgres") is None or __opts__.get("uyuni_roster") is None:
return (False, "Uyuni is not installed or configured")
return __virtualname__
class UyuniRoster:
"""
The class to instantiate Uyuni connection and data gathering.
It's used to keep the DB connection, cache object and others in one instance
to prevent race conditions on loading the module with LazyLoader.
"""
def __init__(self, db_config, uyuni_roster_config):
self.config_hash = hashlib.sha256(
str(uyuni_roster_config).encode(errors="backslashreplace")
).hexdigest()
self.ssh_pre_flight_script = uyuni_roster_config.get("ssh_pre_flight_script")
self.ssh_push_port_https = uyuni_roster_config.get(
"ssh_push_port_https", SSH_PUSH_PORT_HTTPS
)
self.ssh_push_sudo_user = uyuni_roster_config.get("ssh_push_sudo_user", "root")
self.ssh_use_salt_thin = uyuni_roster_config.get(
"ssh_use_salt_thin", SSH_USE_SALT_THIN
)
self.ssh_connect_timeout = uyuni_roster_config.get(
"ssh_connect_timeout", SALT_SSH_CONNECT_TIMEOUT
)
self.java_hostname = uyuni_roster_config.get("host", JAVA_HOSTNAME)
if "port" in db_config:
# pylint: disable-next=consider-using-f-string
self.db_connect_str = "dbname='{db}' user='{user}' host='{host}' port='{port}' password='{pass}'".format(
**db_config
)
else:
self.db_connect_str = (
# pylint: disable-next=consider-using-f-string
"dbname='{db}' user='{user}' host='{host}' password='{pass}'".format(
**db_config
)
)
log.trace("db_connect dbname: %s", db_config["db"])
log.trace("db_connect user: %s", db_config["user"])
log.trace("db_connect host: %s", db_config["host"])
log.debug("ssh_pre_flight_script: %s", self.ssh_pre_flight_script)
log.debug("ssh_push_port_https: %d", self.ssh_push_port_https)
log.debug("ssh_push_sudo_user: %s", self.ssh_push_sudo_user)
log.debug("ssh_use_salt_thin: %s", self.ssh_use_salt_thin)
log.debug("salt_ssh_connect_timeout: %d", self.ssh_connect_timeout)
log.debug("java.hostname: %s", self.java_hostname)
# pylint: disable-next=undefined-variable
self.cache = salt.cache.Cache(__opts__)
cache_data = self.cache.fetch("roster/uyuni", "minions")
if "minions" in cache_data and self.config_hash != cache_data.get(
"config_hash"
):
log.debug("Flushing the cache as the config has been changed")
self.cache.flush("roster/uyuni")
self._init_db()
def _init_db(self):
log.trace("_init_db")
try:
self.db_connection = psycopg2.connect(
self.db_connect_str, cursor_factory=NamedTupleCursor
)
log.trace("_init_db: done")
except psycopg2.OperationalError as err:
# pylint: disable-next=logging-not-lazy
log.warning(
# pylint: disable-next=consider-using-f-string
"Unable to connect to the Uyuni DB: \n%sWill try to reconnect later."
% (err)
)
def _execute_query(self, *args, **kwargs):
log.trace("_execute_query")
try:
cur = self.db_connection.cursor()
cur.execute(*args, **kwargs)
log.trace("_execute_query: ret %s", cur)
return cur
except psycopg2.OperationalError as err:
# pylint: disable-next=logging-not-lazy,consider-using-f-string
log.warning("Error during SQL prepare: %s" % (err))
log.warning("Trying to reinit DB connection...")
self._init_db()
try:
cur = self.db_connection.cursor()
cur.execute(*args, **kwargs)
return cur
except psycopg2.OperationalError:
log.warning("Unable to re-establish connection to the Uyuni DB")
log.trace("_execute_query: ret None")
return None
def _get_ssh_options(
self,
minion_id=None,
proxies=None,
tunnel=False,
user=None,
ssh_push_port=SSH_PUSH_PORT,
):
proxy_command = []
i = 0
for proxy in proxies:
proxy_command.append(
# pylint: disable-next=consider-using-f-string
"/usr/bin/ssh -p {ssh_port} -i {ssh_key_path} -o StrictHostKeyChecking=no "
"-o User={ssh_push_user} {in_out_forward} {proxy_host}".format(
ssh_port=proxy.port or 22,
ssh_key_path=SSH_KEY_PATH if i == 0 else PROXY_SSH_PUSH_KEY,
ssh_push_user=PROXY_SSH_PUSH_USER,
in_out_forward=(
f"-W {minion_id}:{ssh_push_port}"
if not tunnel and i == len(proxies) - 1
else ""
),
proxy_host=proxy.hostname,
)
)
i += 1
if tunnel:
proxy_command.append(
"/usr/bin/ssh -i {pushKey} -o StrictHostKeyChecking=no "
"-o User={user} -R {pushPort}:{proxy}:{sslPort} {minion} "
"ssh -i {ownKey} -W {minion}:{sshPort} "
"-o StrictHostKeyChecking=no -o User={user} {minion}".format(
pushKey=PROXY_SSH_PUSH_KEY,
user=user,
pushPort=self.ssh_push_port_https,
proxy="localhost",
sslPort=SSL_PORT,
minion=minion_id,
# pylint: disable-next=consider-using-f-string
ownKey="{}{}".format(
# pylint: disable-next=consider-using-f-string
"/root" if user == "root" else "/home/{}".format(user),
"/.ssh/mgr_own_id",
),
sshPort=ssh_push_port,
)
)
# pylint: disable-next=consider-using-f-string
return ["ProxyCommand='{}'".format(" ".join(proxy_command))]
# pylint: disable-next=dangerous-default-value
def _get_ssh_minion(
self, minion_id=None, proxies=[], tunnel=False, ssh_push_port=SSH_PUSH_PORT
):
minion = {
"host": minion_id,
"user": self.ssh_push_sudo_user,
"port": ssh_push_port,
"timeout": self.ssh_connect_timeout,
}
if tunnel:
minion.update({"minion_opts": {"master": minion_id}})
if self.ssh_pre_flight_script:
minion.update(
{
"ssh_pre_flight": self.ssh_pre_flight_script,
"ssh_pre_flight_args": [
proxies[-1].hostname if proxies else self.java_hostname,
self.ssh_push_port_https if tunnel else SSL_PORT,
1 if self.ssh_use_salt_thin else 0,
],
}
)
if proxies:
minion.update(
{
"ssh_options": self._get_ssh_options(
minion_id=minion_id,
proxies=proxies,
tunnel=tunnel,
user=self.ssh_push_sudo_user,
ssh_push_port=ssh_push_port,
)
}
)
elif tunnel:
minion.update(
{
# pylint: disable-next=consider-using-f-string
"remote_port_forwards": "%d:%s:%d"
% (self.ssh_push_port_https, "localhost", SSL_PORT)
}
)
return minion
def targets(self):
cache_data = self.cache.fetch("roster/uyuni", "minions")
cache_fp = cache_data.get("fp", None)
query = """
SELECT ENCODE(SHA256(FORMAT('%s|%s|%s|%s|%s|%s|%s',
EXTRACT(EPOCH FROM MAX(S.modified)),
COUNT(S.id),
EXTRACT(EPOCH FROM MAX(SP.modified)),
COUNT(SP.proxy_server_id),
EXTRACT(EPOCH FROM MAX(SMI.modified)),
COUNT(SMI.server_id),
EXTRACT(EPOCH FROM MAX(PI.modified))
)::bytea), 'hex') AS fp
FROM rhnServer AS S
INNER JOIN suseMinionInfo AS SMI ON
(SMI.server_id=S.id)
LEFT JOIN rhnServerPath AS SP ON
(SP.server_id=S.id)
LEFT JOIN rhnProxyInfo as PI ON
(SP.proxy_server_id = PI.server_id)
WHERE S.contact_method_id IN (
SELECT SSCM.id
FROM suseServerContactMethod AS SSCM
WHERE SSCM.label IN ('ssh-push', 'ssh-push-tunnel')
)
"""
h = self._execute_query(query)
if h is not None:
row = h.fetchone()
if row and row.fp:
log.trace("db cache fingerprint: %s", row.fp)
new_fp = row.fp
log.trace("cache check: old:%s new:%s", cache_fp, new_fp)
if (
new_fp == cache_fp
and "minions" in cache_data
and cache_data["minions"]
):
log.debug("Returning the cached data")
return cache_data["minions"]
else:
log.debug("Invalidate cache")
cache_fp = new_fp
else:
log.warning(
"Unable to reconnect to the Uyuni DB. Returning the cached data instead."
)
return cache_data["minions"]
ret = {}
query = """
SELECT S.id AS server_id,
SMI.minion_id AS minion_id,
SMI.ssh_push_port AS ssh_push_port,
SSCM.label='ssh-push-tunnel' AS tunnel,
SP.hostname AS proxy_hostname,
PI.ssh_port AS ssh_port
FROM rhnServer AS S
INNER JOIN suseServerContactMethod AS SSCM ON
(SSCM.id=S.contact_method_id)
INNER JOIN suseMinionInfo AS SMI ON
(SMI.server_id=S.id)
LEFT JOIN rhnServerPath AS SP ON
(SP.server_id=S.id)
LEFT JOIN rhnProxyInfo as PI ON
(SP.proxy_server_id = PI.server_id)
WHERE SSCM.label IN ('ssh-push', 'ssh-push-tunnel')
ORDER BY S.id, SP.position DESC
"""
h = self._execute_query(query)
prow = None
proxies = []
row = h.fetchone()
while True:
if prow is not None and (row is None or row.server_id != prow.server_id):
ret[prow.minion_id] = self._get_ssh_minion(
minion_id=prow.minion_id,
proxies=proxies,
tunnel=prow.tunnel,
ssh_push_port=int(prow.ssh_push_port or SSH_PUSH_PORT),
)
proxies = []
if row is None:
break
if row.proxy_hostname:
proxies.append(Proxy(row.proxy_hostname, row.ssh_port))
prow = row
row = h.fetchone()
self.cache.store(
"roster/uyuni",
"minions",
{"fp": cache_fp, "minions": ret, "config_hash": self.config_hash},
)
if log.isEnabledFor(logging.TRACE):
log.trace("Uyuni DB roster:\n%s", dump(ret))
return ret
# pylint: disable-next=unused-argument
def targets(tgt, tgt_type="glob", **kwargs):
"""
Return the targets from the Uyuni DB
"""
# pylint: disable-next=undefined-variable
uyuni_roster = __context__.get("roster.uyuni")
if uyuni_roster is None:
uyuni_roster = UyuniRoster(
# pylint: disable-next=undefined-variable
__opts__.get("postgres"),
# pylint: disable-next=undefined-variable
__opts__.get("uyuni_roster"),
)
# pylint: disable-next=undefined-variable
__context__["roster.uyuni"] = uyuni_roster
# pylint: disable-next=undefined-variable
return __utils__["roster_matcher.targets"](uyuni_roster.targets(), tgt, tgt_type)
07070100000012000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002000000000susemanager-sls/modules/runners07070100000013000081B400000000000000000000000168EFD66400000E52000000000000000000000000000000000000003600000000susemanager-sls/modules/runners/kiwi-image-collect.py# pylint: disable=missing-module-docstring,invalid-name
# SPDX-FileCopyrightText: 2018-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
# runner to collect image from build host
import os
import logging
log = logging.getLogger(__name__)
def upload_file_from_minion(minion, minion_ip, filetoupload, targetdir):
# pylint: disable-next=undefined-variable
fqdn = __salt__["cache.grains"](tgt=minion).get(minion, {}).get("fqdn")
# pylint: disable-next=undefined-variable
ssh_port = __salt__["cache.grains"](tgt=minion).get(minion, {}).get("ssh_port", 22)
log.info(
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
'Collecting image "{}" from minion {} (FQDN: {}, IP: {}, SSH PORT: {})'.format(
filetoupload, minion, fqdn, minion_ip, ssh_port
)
)
if not fqdn or fqdn == "localhost":
fqdn = minion_ip
# pylint: disable-next=consider-using-f-string
src = "root@{}:{}".format(fqdn, filetoupload)
tries = 3
res = None
while tries > 0:
# pylint: disable-next=undefined-variable
res = __salt__["salt.cmd"](
"rsync.rsync",
src,
targetdir,
# pylint: disable-next=consider-using-f-string
rsh="ssh -o IdentityFile=/var/lib/salt/.ssh/mgr_ssh_id -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p {}".format(
ssh_port
),
)
# In case of unexplained error, try again (can be dns failure, networking failure, ...)
if res.get("retcode", 0) != 255:
break
tries -= 1
if res.get("retcode") == 0:
filename = os.path.basename(filetoupload)
# Check and set correct permission for uploaded file. We need it world readable
# pylint: disable-next=undefined-variable
__salt__["salt.cmd"](
"file.check_perms",
os.path.join(targetdir, filename),
None,
"salt",
"salt",
644,
)
return res
def move_file_from_minion_cache(minion, filetomove, targetdir):
src = os.path.join(
# pylint: disable-next=undefined-variable
__opts__["cachedir"],
"minions",
minion,
"files",
filetomove.lstrip("/"),
)
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
log.info('Collecting image from minion cache "{}"'.format(src))
# file.move throws an exception in case of error
# pylint: disable-next=undefined-variable
return __salt__["salt.cmd"]("file.move", src, targetdir)
def kiwi_collect_image(minion, minion_ip, filepath, image_store_dir):
try:
# pylint: disable-next=undefined-variable
__salt__["salt.cmd"]("file.mkdir", image_store_dir)
except PermissionError:
log.error(
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
"kiwi_collect_image: Unable to create image directory {}".format(
image_store_dir
)
)
return {
"retcode": 13,
# pylint: disable-next=consider-using-f-string
"comment": "Unable to create image directory {}".format(image_store_dir),
}
use_salt_transport = (
# pylint: disable-next=undefined-variable
__salt__["cache.pillar"](tgt=minion)
.get(minion, {})
.get("use_salt_transport")
)
if use_salt_transport:
return move_file_from_minion_cache(minion, filepath, image_store_dir)
return upload_file_from_minion(minion, minion_ip, filepath, image_store_dir)
07070100000014000081B400000000000000000000000168EFD66400000953000000000000000000000000000000000000002A00000000susemanager-sls/modules/runners/mgrk8s.py# pylint: disable=missing-module-docstring
# SPDX-FileCopyrightText: 2017-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
from salt.exceptions import SaltInvocationError
import logging
log = logging.getLogger(__name__)
try:
# pylint: disable-next=unused-import
from kubernetes import client, config # pylint: disable=import-self
from kubernetes.config import new_client_from_config
# pylint: disable-next=unused-import
from kubernetes.client.rest import ApiException
# pylint: disable-next=unused-import
from urllib3.exceptions import HTTPError
IS_VALID = True
except ImportError as ex:
IS_VALID = False
# pylint: disable-next=invalid-name
def __virtual__():
return IS_VALID
def get_all_containers(kubeconfig=None, context=None):
"""
Retrieve information about all containers running in a Kubernetes cluster.
:param kubeconfig: path to kubeconfig file
:param context: context inside kubeconfig
:return:
.. code-block:: json
{
"containers": [
{
"image_id": "(docker-pullable://)?some/image@sha256:hash....",
"image": "myregistry/some/image:v1",
"container_id": "(docker|cri-o)://...hash...",
"pod_name": "kubernetes-pod",
"pod_namespace": "pod-namespace"
}
}
"""
if not kubeconfig:
raise SaltInvocationError("kubeconfig is mandatory")
if not context:
raise SaltInvocationError("context is mandatory")
api_client = new_client_from_config(kubeconfig, context)
api = client.CoreV1Api(api_client)
pods = api.list_pod_for_all_namespaces(watch=False)
output = dict(containers=[])
for pod in pods.items:
if pod.status.container_statuses is not None:
for container in pod.status.container_statuses:
res_cont = dict()
res_cont["container_id"] = container.container_id
res_cont["image"] = container.image
res_cont["image_id"] = container.image_id
res_cont["pod_name"] = pod.metadata.name
res_cont["pod_namespace"] = pod.metadata.namespace
output["containers"].append(res_cont)
else:
log.error("Failed to parse pod container statuses")
return output
07070100000015000081B400000000000000000000000168EFD66400002267000000000000000000000000000000000000002B00000000susemanager-sls/modules/runners/mgrutil.py# SPDX-FileCopyrightText: 2017-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""A collection of utility runner functions for Uyuni."""
from subprocess import Popen, PIPE
import logging
import stat
import grp
import shlex
import os
import os.path
import shutil
import salt.utils
import subprocess
import tempfile
import time
from salt.utils.minions import CkMinions
log = logging.getLogger(__name__)
GROUP_OWNER = "susemanager"
def delete_rejected_key(minion):
"""
Delete a previously rejected minion key from minions_rejected
:param minion: the minion id to look for
:return: map containing retcode and stdout/stderr
"""
path_rejected = "/etc/salt/pki/master/minions_rejected/"
path = os.path.normpath(path_rejected + minion)
if not path.startswith(path_rejected):
return {"retcode": -1, "stderr": "Unexpected path: " + path}
if os.path.isfile(path):
cmd = ["rm", path]
return _cmd(cmd)
return {"retcode": 0}
def ssh_keygen(path=None, pubkeycopy=None):
"""
Generate SSH keys using the given path.
:param path: the path. If the None, the keys are generated in a temporary folder, returned, and removed.
:pubkeycopy path: the path to a file which should get a copy of the pub key
:return: map containing retcode and stdout/stderr. Also contains key and public_key if no path was provided
"""
temp_dir = None
with tempfile.TemporaryDirectory() as temp_dir:
out_path = os.path.join(temp_dir, "key") if path is None else path
result = {"retcode": 0}
if not path or not os.path.isfile(path):
cmd = ["ssh-keygen", "-N", "", "-f", out_path, "-t", "rsa", "-q"]
result = _cmd(cmd)
elif path:
out_path = path
if os.path.isfile(out_path) and result["retcode"] == 0:
# pylint: disable-next=unspecified-encoding
with open(out_path, "r") as fd:
result["key"] = fd.read()
# pylint: disable-next=unspecified-encoding
with open(out_path + ".pub", "r") as fd:
result["public_key"] = fd.read()
if pubkeycopy and os.path.isdir(os.path.dirname(pubkeycopy)):
shutil.copyfile(out_path + ".pub", pubkeycopy)
return result
def chain_ssh_cmd(
hosts=None,
clientkey=None,
proxykey=None,
user="root",
options=None,
command=None,
outputfile=None,
):
"""
Chain ssh calls over one or more hops to run a command on the last host in the chain.
:param hosts:
:param clientkey:
:param proxykey:
:param user:
:param options:
:param command:
:param outputfile:
:return:
"""
cmd = []
for idx, hostname in enumerate(hosts):
host_port = hostname.split(":")
key = clientkey if idx == 0 else proxykey
opts = " ".join(
# pylint: disable-next=consider-using-f-string
["-o {}={}".format(opt, val) for opt, val in list(options.items())]
)
# pylint: disable-next=consider-using-f-string
ssh = "/usr/bin/ssh -p {} -i {} {} -o User={} {}".format(
host_port[1] if len(host_port) > 1 else 22, key, opts, user, host_port[0]
)
cmd.extend(shlex.split(ssh))
cmd.append(command)
ret = _cmd(cmd)
if outputfile:
# pylint: disable-next=unspecified-encoding
with open(outputfile, "w") as out:
out.write(ret["stdout"])
return ret
def remove_ssh_known_host(user, hostname, port):
"""Remove an SSH known host entry from Salt SSH's database."""
config_path = os.path.join(os.path.expanduser(f"~{user}"), ".ssh", "known_hosts")
if not os.path.exists(config_path):
config_path = None
# pylint: disable-next=undefined-variable
return __salt__["salt.cmd"]("ssh.rm_known_host", user, hostname, config_path, port)
def _cmd(cmd):
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
return {
"retcode": p.returncode,
"stdout": salt.utils.stringutils.to_unicode(stdout),
"stderr": salt.utils.stringutils.to_unicode(stderr),
}
def _cleanup_outdated_data(workdir):
for root, dirs, files in os.walk(workdir, topdown=False):
for name in files:
fpath = os.path.join(root, name)
if (time.time() - os.path.getmtime(fpath)) > (2 * 24 * 3600):
# remove files which are older than 2 days
try:
os.remove(fpath)
except OSError as e:
log.error("Failed to remove %s: %s", fpath, e)
for name in dirs:
dpath = os.path.join(root, name)
if len(os.listdir(dpath)) == 0:
try:
os.rmdir(dpath)
except OSError as e:
log.error("Failed to remove %s: %s", dpath, e)
def move_minion_uploaded_files(
minion=None, dirtomove=None, basepath=None, actionpath=None
):
srcdir = os.path.join(
# pylint: disable-next=undefined-variable
__opts__["cachedir"],
"minions",
minion,
"files",
dirtomove.lstrip("/"),
)
scapstorepath = os.path.join(basepath, actionpath)
susemanager_gid = grp.getgrnam(GROUP_OWNER).gr_gid
if not os.path.exists(scapstorepath):
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
log.debug("Creating action directory: {0}".format(scapstorepath))
try:
os.makedirs(scapstorepath)
# pylint: disable-next=broad-exception-caught
except Exception as err:
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
log.error("Failed to create dir {0}".format(scapstorepath), exc_info=True)
return {
# pylint: disable-next=consider-using-f-string
False: "Salt failed to create dir {0}: {1}".format(
scapstorepath, str(err)
)
}
# change group permissions to rwx and group owner to susemanager
mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH
subdirs = actionpath.split("/")
for idx in range(1, len(subdirs)):
if subdirs[0:idx] != "":
# ignore errors. If dir has owner != salt then chmod fails but the dir
# might still have the correct group owner
try:
os.chmod(os.path.join(basepath, *subdirs[0:idx]), mode)
except OSError:
pass
try:
os.chown(
os.path.join(basepath, *subdirs[0:idx]), -1, susemanager_gid
)
except OSError:
pass
try:
# move the files to the scap store dir
for fl in os.listdir(srcdir):
shutil.move(os.path.join(srcdir, fl), scapstorepath)
# change group owner to susemanager
for fl in os.listdir(scapstorepath):
os.chown(os.path.join(scapstorepath, fl), -1, susemanager_gid)
# pylint: disable-next=broad-exception-caught
except Exception as err:
log.error(
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
"Salt failed to move {0} -> {1}".format(srcdir, scapstorepath),
exc_info=True,
)
return {False: str(err)}
finally:
# pylint: disable-next=undefined-variable
fdir = os.path.join(__opts__["cachedir"], "minions", minion, "files")
_cleanup_outdated_data(fdir)
return {True: scapstorepath}
def check_ssl_cert(root_ca, server_crt, server_key, intermediate_cas):
"""
Check that the provided certificates are valid and return the certificate and key to deploy.
"""
try:
mgr_ssl_cmd = [
"mgr-ssl-cert-setup",
"--root-ca-file",
str(root_ca),
"--server-cert-file",
str(server_crt),
"--server-key-file",
str(server_key),
"--show-container-setup",
]
if intermediate_cas:
for i in intermediate_cas:
mgr_ssl_cmd.extend(["--intermediate-ca-file", str(i)])
result = subprocess.run(
mgr_ssl_cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
return {"cert": result.stdout.decode()}
except subprocess.CalledProcessError as err:
return {"error": str(err)}
def select_minions(target, target_type):
# pylint: disable-next=undefined-variable
minions = CkMinions(__opts__)
return minions.check_minions(expr=target, tgt_type=target_type).get("minions", [])
07070100000016000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001D00000000susemanager-sls/modules/tops07070100000017000081B400000000000000000000000168EFD66400000590000000000000000000000000000000000000003000000000susemanager-sls/modules/tops/mgr_master_tops.py# -*- coding: utf-8 -*-
"""
SUSE Multi-Linux Manager master_tops module
-------------------------------
This module provides the base states top information from SUSE Multi-Linux Manager.
The top information returned by this module is merged by Salt with the
user custom data provided in /srv/salt/top.sls file.
.. code-block:: yaml
master_tops:
mgr_master_tops: True
"""
# Import python libs
from __future__ import absolute_import
import logging
# Define the module's virtual name
__virtualname__ = "mgr_master_tops"
log = logging.getLogger(__name__)
MANAGER_BASE_TOP = [
"channels",
"certs",
"packages",
"custom",
"custom_groups",
"custom_org",
"formulas",
"services.salt-minion",
"services.docker",
"services.kiwi-image-server",
"ansible",
"switch_to_bundle.mgr_switch_to_venv_minion",
]
# pylint: disable-next=invalid-name
def __virtual__():
"""
Ensure the module name.
"""
return __virtualname__
def top(**kwargs):
"""
Returns the SUSE Multi-Linux Manager top state information of a minion
for the `base` salt environment.
"""
env = kwargs["opts"].get("environment") or kwargs["opts"].get("saltenv")
if env in [None, "base"]:
log.debug(
'Loading SUSE Multi-Linux Manager TOP state information for the "base" environment'
)
return {"base": MANAGER_BASE_TOP}
return None
07070100000018000081B400000000000000000000000168EFD6640000139D000000000000000000000000000000000000001900000000susemanager-sls/pylintrc# susemanager-sls package pylint configuration
[MASTER]
# Profiled execution.
profile=no
# Pickle collected data for later comparisons.
persistent=no
[MESSAGES CONTROL]
# Disable the message(s) with the given id(s).
disable=I0011,
C0302,
C0111,
R0801,
R0902,
R0903,
R0904,
R0912,
R0913,
R0914,
R0915,
R0921,
R0922,
W0142,
W0403,
W0603,
C1001,
W0121,
useless-else-on-loop,
bad-whitespace,
unpacking-non-sequence,
superfluous-parens,
cyclic-import,
redefined-variable-type,
no-else-return,
# Uyuni disabled
E0203,
E0611,
E1101,
E1102
# list of disabled messages:
#I0011: 62: Locally disabling R0201
#C0302: 1: Too many lines in module (2425)
#C0111: 1: Missing docstring
#R0902: 19:RequestedChannels: Too many instance attributes (9/7)
#R0903: Too few public methods
#R0904: 26:Transport: Too many public methods (22/20)
#R0912:171:set_slots_from_cert: Too many branches (59/20)
#R0913:101:GETServer.__init__: Too many arguments (11/10)
#R0914:171:set_slots_from_cert: Too many local variables (38/20)
#R0915:171:set_slots_from_cert: Too many statements (169/50)
#W0142:228:MPM_Package.write: Used * or ** magic
#W0403: 28: Relative import 'rhnLog', should be 'backend.common.rhnLog'
#W0603: 72:initLOG: Using the global statement
# for pylint-1.0 we also disable
#C1001: 46, 0: Old-style class defined. (old-style-class)
#W0121: 33,16: Use raise ErrorClass(args) instead of raise ErrorClass, args. (old-raise-syntax)
#W:243, 8: Else clause on loop without a break statement (useless-else-on-loop)
# pylint-1.1 checks
#C:334, 0: No space allowed after bracket (bad-whitespace)
#W:162, 8: Attempting to unpack a non-sequence defined at line 6 of (unpacking-non-sequence)
#C: 37, 0: Unnecessary parens after 'not' keyword (superfluous-parens)
#C:301, 0: Unnecessary parens after 'if' keyword (superfluous-parens)
[REPORTS]
# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html
output-format=parseable
# Include message's id in output
include-ids=yes
# Tells whether to display a full report or only the messages
reports=yes
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"
[VARIABLES]
# A regular expression matching names used for dummy variables (i.e. not used).
dummy-variables-rgx=_|dummy
[BASIC]
# Regular expression which should only match correct module names
#module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
module-rgx=([a-zA-Z_][a-zA-Z0-9_]+)$
# Regular expression which should only match correct module level names
const-rgx=(([a-zA-Z_][a-zA-Z0-9_]*)|(__.*__))$
# Regular expression which should only match correct class names
class-rgx=[a-zA-Z_][a-zA-Z0-9_]+$
# Regular expression which should only match correct function names
function-rgx=[a-z_][a-zA-Z0-9_]{,42}$
# Regular expression which should only match correct method names
method-rgx=[a-z_][a-zA-Z0-9_]{,42}$
# Regular expression which should only match correct instance attribute names
attr-rgx=[a-z_][a-zA-Z0-9_]{,30}$
# Regular expression which should only match correct argument names
argument-rgx=[a-z_][a-zA-Z0-9_]{,30}$
# Regular expression which should only match correct variable names
variable-rgx=[a-z_][a-zA-Z0-9_]{,30}$
# Regular expression which should only match correct list comprehension /
# generator expression variable names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
# Regular expression which should only match correct class sttribute names
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,42}|(__.*__))$
# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata
# List of builtins function names that should not be used, separated by a comma
bad-functions=apply,input
[DESIGN]
# Maximum number of arguments for function / method
max-args=10
# Maximum number of locals for function / method body
max-locals=20
# Maximum number of return / yield for function / method body
max-returns=6
# Maximum number of branch for function / method body
max-branchs=20
# Maximum number of statements in function / method body
max-statements=50
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Minimum number of public methods for a class (see R0903).
min-public-methods=1
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
[CLASSES]
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=120
# Maximum number of lines in a module
max-module-lines=1000
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=
07070100000019000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001800000000susemanager-sls/reactor0707010000001A000081B400000000000000000000000168EFD6640000008A000000000000000000000000000000000000003000000000susemanager-sls/reactor/resume_action_chain.slsresume_actionchain_execution:
local.mgractionchains.resume:
- tgt: {{ data['id'] }}
- metadata:
suma-action-chain: True
0707010000001B000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001900000000susemanager-sls/salt-ssh0707010000001C000081B400000000000000000000000168EFD6640000304A000000000000000000000000000000000000002600000000susemanager-sls/salt-ssh/preflight.sh#!/bin/sh
if [ $# -lt 2 ]; then
echo "Error: Wrong number of arguments!"
exit 255
fi
# In case the script is executed using different interpreter than bash
# then we call the script explicitely using bash
SH_PATH=$(readlink /proc/$$/exe)
SH_NAME=$(basename "${SH_PATH}")
if ! [ "${SH_NAME}" = "bash" ]; then
exec bash "$0" "$@"
fi
REPO_HOST=$1
if [[ $2 =~ ^[0-9]+$ ]]; then
REPO_PORT=$2
else
echo 'Error: $2 (REPO_PORT) must be an integer.' >&2
exit 254
fi
FAIL_ON_ERROR=1
if [ "$3" = "1" ]; then
FAIL_ON_ERROR=0
fi
BOOTSTRAP=0
if [ "$4" = "1" ]; then
BOOTSTRAP=1
fi
if [ ${BOOTSTRAP} -eq 1 ] && [ ${REPO_PORT} -ne 443 ]; then
REPO_HOST="localhost"
fi
CLIENT_REPOS_ROOT="https://${REPO_HOST}:${REPO_PORT}/pub/repositories"
VENV_INST_DIR="/usr/lib/venv-salt-minion"
VENV_TMP_DIR="/var/tmp/venv-salt-minion"
VENV_HASH_FILE="venv-hash.txt"
TEMP_DIR=$(mktemp -d -t salt-bundle-XXXXXXXXXX)
trap "popd > /dev/null; rm -rf ${TEMP_DIR}" EXIT
pushd "${TEMP_DIR}" > /dev/null
function exit_with_message_code() {
echo "$1" >&2
if [ ${FAIL_ON_ERROR} -ne 0 ]; then
exit $2
fi
exit 0
}
# the order matters: see bsc#1222347
if [ -x /usr/bin/dnf ]; then
INSTALLER=yum
elif [ -x /usr/bin/yum ]; then
INSTALLER=yum
elif [ -x /usr/bin/zypper ]; then
INSTALLER=zypper
elif [ -x /usr/bin/apt ]; then
INSTALLER=apt
else
exit_with_message_code "Error: Unable to detect installer on the OS!" 1
fi
if [ -x /usr/bin/wget ]; then
output=`LANG=en_US /usr/bin/wget --no-check-certificate 2>&1`
error=`echo $output | grep "unrecognized option"`
if [ -z "$error" ]; then
FETCH="/usr/bin/wget -nv -r -nd --no-check-certificate"
else
FETCH="/usr/bin/wget -nv -r -nd"
fi
elif [ -x /usr/bin/curl ]; then
output=`LANG=en_US /usr/bin/curl -k 2>&1`
error=`echo $output | grep "is unknown"`
if [ -z "$error" ]; then
FETCH="/usr/bin/curl -ksSOf"
else
FETCH="/usr/bin/curl -sSOf"
fi
else
exit_with_message_code "Error: To be able to download files, please install either 'wget' or 'curl'" 2
fi
if [ "$INSTALLER" == "zypper" ] || [ "$INSTALLER" == "yum" ]; then
ARCH=$(rpm --eval "%{_arch}")
else
ARCH=$(dpkg --print-architecture)
fi
function getY_CLIENT_CODE_BASE() {
local BASE=""
local VERSION=""
# SLES ES6 is a special case; it will install a symlink named
# centos-release pointing to redhat-release which will make the
# original test fail; reverting the checks does not help as this
# will break genuine CentOS systems. So use the poor man's approach
# to detect this special case. SLES ES7 does not have this issue
# https://bugzilla.suse.com/show_bug.cgi?id=1132576
# https://bugzilla.suse.com/show_bug.cgi?id=1152795
if [ -L /usr/share/doc/sles_es-release ]; then
BASE="res"
VERSION=6
elif [ -f /etc/openEuler-release ]; then
grep -v '^#' /etc/openEuler-release | grep -q '\(openEuler\)' && BASE="openEuler"
VERSION=`grep -v '^#' /etc/openEuler-release | grep -Po '(?<=release )(\d+\.)+\d+'`
elif [ -f /etc/almalinux-release ]; then
grep -v '^#' /etc/almalinux-release | grep -q '\(AlmaLinux\)' && BASE="almalinux"
VERSION=`grep -v '^#' /etc/almalinux-release | grep -Po '(?<=release )\d+'`
elif [ -f /etc/rocky-release ]; then
grep -v '^#' /etc/rocky-release | grep -q '\(Rocky Linux\)' && BASE="rockylinux"
VERSION=`grep -v '^#' /etc/rocky-release | grep -Po '(?<=release )\d+'`
elif [ -f /etc/oracle-release ]; then
grep -v '^#' /etc/oracle-release | grep -q '\(Oracle\)' && BASE="oracle"
VERSION=`grep -v '^#' /etc/oracle-release | grep -Po '(?<=release )\d+'`
elif [ -f /etc/alinux-release ]; then
grep -v '^#' /etc/alinux-release | grep -q '\(Alibaba\)' && BASE="alibaba"
VERSION=`grep -v '^#' /etc/alinux-release | grep -Po '(?<=release )\d+'`
elif [ -f /etc/centos-release ]; then
grep -v '^#' /etc/centos-release | grep -q '\(CentOS\)' && BASE="centos"
VERSION=`grep -v '^#' /etc/centos-release | grep -Po '(?<=release )\d+'`
elif [ -f /etc/redhat-release ]; then
grep -v '^#' /etc/redhat-release | grep -q '\(Red Hat\)' && BASE="res"
VERSION=`grep -v '^#' /etc/redhat-release | grep -Po '(?<=release )\d+'`
elif [ -f /etc/os-release ]; then
BASE=$(source /etc/os-release; echo $ID)
VERSION=$(source /etc/os-release; echo $VERSION_ID)
fi
Y_CLIENT_CODE_BASE="${BASE:-unknown}"
Y_CLIENT_CODE_VERSION="${VERSION:-unknown}"
}
function getZ_CLIENT_CODE_BASE() {
local BASE=""
local VERSION=""
local PATCHLEVEL=""
if [ -r /etc/SuSE-release ]; then
grep -q 'Enterprise' /etc/SuSE-release && BASE='sle'
eval $(grep '^\(VERSION\|PATCHLEVEL\)' /etc/SuSE-release | tr -d '[:blank:]')
if [ "$BASE" != "sle" ]; then
grep -q 'openSUSE' /etc/SuSE-release && BASE='opensuse'
VERSION="$(grep '^\(VERSION\)' /etc/SuSE-release | tr -d '[:blank:]' | sed -n 's/.*=\([[:digit:]]\+\).*/\1/p')"
PATCHLEVEL="$(grep '^\(VERSION\)' /etc/SuSE-release | tr -d '[:blank:]' | sed -n 's/.*\.\([[:digit:]]*\).*/\1/p')"
fi
elif [ -r /etc/os-release ]; then
grep -q 'Enterprise' /etc/os-release && BASE='sle'
if [ "$BASE" != "sle" ]; then
grep -q 'openSUSE' /etc/os-release && BASE='opensuse'
fi
if [ "$BASE" == "" ]; then
grep -q 'cpe:/o:suse:' /etc/os-release && BASE='sl'
fi
grep -q 'Micro' /etc/os-release && BASE="${BASE}micro"
VERSION="$(grep '^\(VERSION_ID\)' /etc/os-release | sed -n 's/.*"\([[:digit:]]\+\).*/\1/p')"
PATCHLEVEL="$(grep '^\(VERSION_ID\)' /etc/os-release | sed -n 's/.*\.\([[:digit:]]*\).*/\1/p')"
# openSUSE MicroOS
grep -q 'MicroOS' /etc/os-release && BASE='opensusemicroos' && VERSION='latest'
# openSUSE Tumbleweed
grep -q 'Tumbleweed' /etc/os-release && BASE='opensusetumbleweed' && VERSION='latest'
fi
Z_CLIENT_CODE_BASE="${BASE:-unknown}"
Z_CLIENT_CODE_VERSION="${VERSION:-unknown}"
Z_CLIENT_CODE_PATCHLEVEL="${PATCHLEVEL:-0}"
}
function getA_CLIENT_CODE_BASE() {
local BASE=""
local VERSION=""
local VARIANT_ID=""
if [ -f /etc/os-release ]; then
BASE=$(source /etc/os-release; echo $ID)
VERSION=$(source /etc/os-release; echo $VERSION_ID)
VARIANT_ID=$(source /etc/os-release; echo $VARIANT_ID)
fi
A_CLIENT_CODE_BASE="${BASE:-unknown}"
local VERCOMPS=(${VERSION/\./ }) # split into an array 18.04 -> (18 04)
A_CLIENT_CODE_MAJOR_VERSION=${VERCOMPS[0]}
# Ubuntu only
if [ "${BASE}" == "ubuntu" ]; then
A_CLIENT_CODE_MINOR_VERSION=$((${VERCOMPS[1]} + 0)) # convert "04" -> 4
fi
A_CLIENT_VARIANT_ID="${VARIANT_ID:-unknown}"
}
if [ "${INSTALLER}" = "yum" ]; then
getY_CLIENT_CODE_BASE
CLIENT_REPO_URL="${CLIENT_REPOS_ROOT}/${Y_CLIENT_CODE_BASE}/${Y_CLIENT_CODE_VERSION}/bootstrap"
# In case of Red Hat derivatives, check if bootstrap repository is available, if not, fallback to RES.
if [ "$Y_CLIENT_CODE_BASE" == almalinux ] || \
[ "$Y_CLIENT_CODE_BASE" == rockylinux ] || \
[ "$Y_CLIENT_CODE_BASE" == oracle ] || \
[ "$Y_CLIENT_CODE_BASE" == alibaba ] || \
[ "$Y_CLIENT_CODE_BASE" == centos ]; then
$FETCH $CLIENT_REPO_URL/repodata/repomd.xml &> /dev/null
if [ $? -ne 0 ]; then
CLIENT_REPO_URL="${CLIENT_REPOS_ROOT}/res/${Y_CLIENT_CODE_VERSION}/bootstrap"
fi
fi
elif [ "${INSTALLER}" = "zypper" ]; then
getZ_CLIENT_CODE_BASE
CLIENT_REPO_URL="${CLIENT_REPOS_ROOT}/${Z_CLIENT_CODE_BASE}/${Z_CLIENT_CODE_VERSION}/${Z_CLIENT_CODE_PATCHLEVEL}/bootstrap"
elif [ "${INSTALLER}" = "apt" ]; then
getA_CLIENT_CODE_BASE
if [ "${A_CLIENT_CODE_BASE}" == "debian" ] || [ "${A_CLIENT_CODE_BASE}" == "raspbian" ]; then
CLIENT_REPO_URL="${CLIENT_REPOS_ROOT}/${A_CLIENT_CODE_BASE}/${A_CLIENT_CODE_MAJOR_VERSION}/bootstrap"
elif [ "${A_CLIENT_CODE_BASE}" == "astra" ]; then
CLIENT_REPO_URL="${CLIENT_REPOS_ROOT}/${A_CLIENT_CODE_BASE}/${A_CLIENT_VARIANT_ID}/bootstrap"
else
CLIENT_REPO_URL="${CLIENT_REPOS_ROOT}/${A_CLIENT_CODE_BASE}/${A_CLIENT_CODE_MAJOR_VERSION}/${A_CLIENT_CODE_MINOR_VERSION}/bootstrap"
fi
fi
SELINUX_POLICY_FILENAME="salt_ssh_port_forwarding.cil"
function selinux_policy_loaded {
semodule -l | grep -x $SELINUX_POLICY_FILENAME
}
# Our SSH tunnel uses a custom port and we must configure SELinux to account for it
if [[ $REPO_HOST == "localhost" ]] && command -v selinuxenabled && selinuxenabled; then
if ! selinux_policy_loaded; then
echo "(portcon tcp ${REPO_PORT} (system_u object_r ssh_port_t ((s0)(s0))))" >$SELINUX_POLICY_FILENAME
if ! semodule -i $SELINUX_POLICY_FILENAME; then
exit_with_message_code "Error: Failed to install SELinux policy with port=${REPO_PORT}." 7
fi
fi
fi
VENV_FILE="venv-enabled-${ARCH}.txt"
VENV_ENABLED_URL="${CLIENT_REPO_URL}/${VENV_FILE}"
$FETCH $VENV_ENABLED_URL > /dev/null 2>&1
if [ -f "${VENV_FILE}" ]; then
VENV_SOURCE="bootstrap"
else
if [ "${INSTALLER}" = "apt" ] && dpkg-query -s venv-salt-minion > /dev/null 2>&1 && [ -d "${VENV_INST_DIR}" ]; then
VENV_SOURCE="dpkg"
elif rpm -q --quiet venv-salt-minion 2> /dev/null && [ -d "${VENV_INST_DIR}" ]; then
VENV_SOURCE="rpm"
fi
fi
if [ -n "${VENV_SOURCE}" ]; then
if [ "${VENV_SOURCE}" = "bootstrap" ]; then
VENV_ENABLED=$(cat "${VENV_FILE}")
VENV_HASH=$(echo "${VENV_ENABLED}" | sed 's/ .*//')
VENV_PKG_PATH=$(echo "${VENV_ENABLED}" | sed 's/^.* //')
if [ -z "${VENV_HASH}" ] || [ -z "${VENV_PKG_PATH}" ]; then
exit_with_message_code "Error: File ${CLIENT_REPO_URL}/${VENV_FILE} is malformed!" 4
fi
elif [ "${VENV_SOURCE}" = "rpm" ]; then
VENV_HASH=$(rpm -qi venv-salt-minion | sha256sum | tr -d '\- ')
elif [ "${VENV_SOURCE}" = "dpkg" ]; then
VENV_HASH=$(dpkg -s venv-salt-minion | sha256sum | tr -d '\- ')
fi
if [ -f "${VENV_TMP_DIR}/${VENV_HASH_FILE}" ]; then
if [ -x "${VENV_TMP_DIR}/bin/python" ]; then
PRE_VENV_HASH=$(cat "${VENV_TMP_DIR}/${VENV_HASH_FILE}")
else
rm -f "${VENV_TMP_DIR}/${VENV_HASH_FILE}"
fi
fi
if [ "${VENV_HASH}" != "${PRE_VENV_HASH}" ]; then
if [ "${VENV_SOURCE}" = "bootstrap" ]; then
VENV_PKG_URL="${CLIENT_REPO_URL}/${VENV_PKG_PATH}"
$FETCH $VENV_PKG_URL > /dev/null 2>&1
VENV_PKG_FILE=$(basename "${VENV_PKG_PATH}")
if [ ! -f "${VENV_PKG_FILE}" ] && [ -z "${PRE_VENV_HASH}" ]; then
exit_with_message_code "Error: Unable to download $VENV_PKG_URL file!" 5
fi
fi
rm -rf "${VENV_TMP_DIR}"
if [ "${VENV_SOURCE}" = "bootstrap" ]; then
mkdir -p "${VENV_TMP_DIR}"
pushd "${VENV_TMP_DIR}" > /dev/null
if [ "${VENV_PKG_FILE##*\.}" = "deb" ]; then
dpkg-deb -x "${TEMP_DIR}/${VENV_PKG_FILE}" .
rm -rf etc lib var usr/bin usr/sbin usr/share usr/lib/tmpfiles.d
else
rpm2cpio "${TEMP_DIR}/${VENV_PKG_FILE}" | cpio -idm '*/lib/venv-salt-minion/*' >> /dev/null 2>&1
fi
mv usr usr.tmp
mv usr.tmp/lib/venv-salt-minion/* .
rm -rf usr.tmp
if [ ! -x bin/python ]; then
rm -f "${VENV_TMP_DIR}/${VENV_HASH_FILE}"
exit_with_message_code "Error: Unable to extract the bundle from ${TEMP_DIR}/${VENV_PKG_FILE}!" 6
fi
else
cp -r "${VENV_INST_DIR}" "${VENV_TMP_DIR}"
pushd "${VENV_TMP_DIR}" > /dev/null
fi
grep -m1 -r "^#\!${VENV_INST_DIR}" bin/ | sed 's/:.*//' | sort | uniq | xargs -I '{}' sed -i "1s=^#!${VENV_INST_DIR}/bin/.*=#!${VENV_TMP_DIR}/bin/python=" {}
sed -i "s#${VENV_INST_DIR}#${VENV_TMP_DIR}#g" bin/python
popd > /dev/null
echo "${VENV_HASH}" > "${VENV_TMP_DIR}/${VENV_HASH_FILE}"
fi
else
if [ ! -f "${VENV_TMP_DIR}/${VENV_HASH_FILE}" ]; then
exit_with_message_code "Error: Unable to download ${CLIENT_REPO_URL}/${VENV_FILE} file!" 3
fi
fi
0707010000001D000041FD00000000000000000000001E68EFD66400000000000000000000000000000000000000000000001500000000susemanager-sls/salt0707010000001E000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002200000000susemanager-sls/salt/actionchains0707010000001F000081B400000000000000000000000168EFD66400000412000000000000000000000000000000000000003A00000000susemanager-sls/salt/actionchains/force_restart_minion.sh#!/bin/bash
if [ "$(readlink /proc/1/exe)" = "/sbin/init" ]; then
# SysV, use pid ctime as service start time
SALT_MINION_NAME="salt-minion"
SALT_MINION_PID="/var/run/salt-minion.pid"
if [ -f /var/run/venv-salt-minion.pid ]; then
SALT_MINION_NAME="venv-salt-minion"
SALT_MINION_PID="/var/run/venv-salt-minion.pid"
fi
T0=$(stat -c '%Z' "$SALT_MINION_PID")
RESTART_MINION="/usr/sbin/rc$SALT_MINION_NAME restart"
else
# systemd
SALT_MINION_NAME="salt-minion"
if systemctl status venv-salt-minion > /dev/null 2>&1; then
SALT_MINION_NAME="venv-salt-minion"
fi
TIME=$(systemctl show "$SALT_MINION_NAME" --property=ActiveEnterTimestamp)
TIME="${TIME//ActiveEnterTimestamp=/}"
T0=$(date -d "$TIME" '+%s')
RESTART_MINION="systemctl restart $SALT_MINION_NAME"
fi
T1=$(date '+%s')
echo "salt-minion service uptime: $(( T1-T0 )) seconds"
if (( (T1-T0) > 5 )); then
echo "Patch to update salt-minion was installed but service was not restarted. Forcing restart."
$RESTART_MINION
fi
07070100000020000081B400000000000000000000000168EFD66400000113000000000000000000000000000000000000003000000000susemanager-sls/salt/actionchains/resumessh.slsresumessh:
mgrcompat.module_run:
- name: mgractionchains.resume
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_modules
{%- else %}
- mgrcompat: sync_modules
{%- endif %}
include:
- util.syncall
07070100000021000081B400000000000000000000000168EFD6640000014B000000000000000000000000000000000000002F00000000susemanager-sls/salt/actionchains/startssh.slsstartssh:
mgrcompat.module_run:
- name: mgractionchains.start
- actionchain_id: {{ pillar.get('actionchain_id')}}
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_modules
{%- else %}
- mgrcompat: sync_modules
{%- endif %}
include:
- util.syncall
07070100000022000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001D00000000susemanager-sls/salt/ansible07070100000023000081B400000000000000000000000168EFD6640000010F000000000000000000000000000000000000002600000000susemanager-sls/salt/ansible/init.sls# Ansible Control Node prerequisities state
#
# Copyright (c) 2017 - 2021 SUSE LLC
{% if pillar['addon_group_types'] is defined and 'ansible_control_node' in pillar['addon_group_types'] %}
mgr_ansible_installed:
pkg.installed:
- pkgs:
- ansible
{% endif %}
07070100000024000081B400000000000000000000000168EFD664000002D3000000000000000000000000000000000000003B00000000susemanager-sls/salt/ansible/mgr-ssh-pubkey-authorized.sls{% set tempdir = salt['temp.dir']('', 'mgr-ssh-pubkey-authorized_') %}
{% set tempfile = tempdir + '/mgr-ssh-pubkey-authorized.yml' %}
mgr_ssh_pubkey_authorize_playbook_create:
file.managed:
- name: {{ tempfile }}
- source: 'salt://ansible/mgr-ssh-pubkey-authorized.yml'
ssh_pubkey_authorized_via_ansible:
ansible.playbooks:
- name: mgr-ssh-pubkey-authorized.yml
- rundir: {{ tempdir }}
- ansible_kwargs:
inventory: "{{ pillar['inventory'] }}"
limit: "{{ pillar['target_host'] }}"
extra_vars:
user: "{{ pillar['user'] }}"
ssh_pubkey: "{{ pillar['ssh_pubkey'] }}"
mgr_ssh_pubkey_authorize_playbook_cleanup:
file.absent:
- name: {{ tempdir }}
07070100000025000081B400000000000000000000000168EFD664000000FE000000000000000000000000000000000000003B00000000susemanager-sls/salt/ansible/mgr-ssh-pubkey-authorized.yml- name: SSH public key presence on given system
hosts: all
tasks:
- name: Ensure the given ssh_pubkey to be in the authorized keys for given user
authorized_key:
state: present
user: '{{user}}'
key: '{{ssh_pubkey}}'
07070100000026000081B400000000000000000000000168EFD66400000305000000000000000000000000000000000000002D00000000susemanager-sls/salt/ansible/runplaybook.sls#
# SLS to trigger a playbook execution on an Ansible control node
#
# This SLS requires pillar data to render properly.
#
# Example (inventory and extra_vars are optional):
#
# pillar = {
# "playbook_path": "/root/ansible-examples/lamp_simple/site.yml",
# "rundir": "/root/ansible-examples/lamp_simple"
# "inventory_path": "/root/ansible-examples/lamp_simple/hosts"
# }
#
run_ansible_playbook:
mgrcompat.module_run:
- name: ansible.playbooks
- playbook: {{ pillar["playbook_path"] }}
- rundir: {{ pillar["rundir"] }}
- flush_cache: {{ pillar["flush_cache"] }}
{%- if "extra_vars" in pillar %}
- extra_vars: {{ pillar["extra_vars"] }}
{% endif %}
{%- if "inventory_path" in pillar %}
- inventory: {{ pillar["inventory_path"] }}
{% endif %}
07070100000027000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002000000000susemanager-sls/salt/appstreams07070100000028000081B400000000000000000000000168EFD664000003E5000000000000000000000000000000000000002E00000000susemanager-sls/salt/appstreams/configure.slsinclude:
- channels
{% if pillar.get('param_appstreams_disable') %}
disable_appstreams:
appstreams.disabled:
- appstreams:
{%- for module_name in pillar.get('param_appstreams_disable', []) %}
- {{ module_name }}
{%- endfor %}
{%- endif %}
{% if pillar.get('param_appstreams_enable') %}
enable_appstreams:
appstreams.enabled:
- appstreams:
{%- for module_name, stream in pillar.get('param_appstreams_enable', []) %}
- {{ module_name }}:{{ stream }}
{%- endfor %}
- require:
- file: /etc/yum.repos.d/susemanager:channels.repo
{% if pillar.get('param_appstreams_disable') %}
- appstreams: disable_appstreams
{%- endif %}
{%- endif %}
enabled_appstreams:
mgrcompat.module_run:
- name: appstreams.get_enabled_modules
{% if pillar.get('param_appstreams_enable') %}
- require:
- appstreams: enable_appstreams
{% elif pillar.get('param_appstreams_disable') %}
- require:
- appstreams: disable_appstreams
{%- endif %}
07070100000029000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002000000000susemanager-sls/salt/bootloader0707010000002A000081FD00000000000000000000000168EFD66400000235000000000000000000000000000000000000003900000000susemanager-sls/salt/bootloader/42_uyuni_reinstall.templ#!/bin/sh
set -e
. "$pkgdatadir/grub-mkconfig_lib"
rel_dirname=`make_system_path_relative_to_its_root /boot`
echo "menuentry \"{{ pillar.get('uyuni-reinstall-name') }}\" {"
if [ -d /sys/firmware/efi ] && [ "x${GRUB_USE_LINUXEFI}" = "xtrue" ]; then
echo " linuxefi ${rel_dirname}/uyuni-reinstall-kernel {{ pillar.get('kopts') }}"
echo " initrdefi ${rel_dirname}/uyuni-reinstall-initrd"
else
echo " linux ${rel_dirname}/uyuni-reinstall-kernel {{ pillar.get('kopts') }}"
echo " initrd ${rel_dirname}/uyuni-reinstall-initrd"
fi
echo "}"
0707010000002B000081B400000000000000000000000168EFD66400000AE0000000000000000000000000000000000000003000000000susemanager-sls/salt/bootloader/autoinstall.sls{% if pillar['kernel'] and pillar['initrd'] %}
mgr_copy_kernel:
file.managed:
- name: /boot/uyuni-reinstall-kernel
- source: salt://bootloader/{{ pillar.get('kernel') }}
mgr_copy_initrd:
file.managed:
- name: /boot/uyuni-reinstall-initrd
- source: salt://bootloader/{{ pillar.get('initrd') }}
{% set loader_type = salt['cmd.run']('if [ -f /etc/sysconfig/bootloader ]; then source /etc/sysconfig/bootloader 2> /dev/null; fi;
if [ -z "${LOADER_TYPE}" ]; then
if [ $(/usr/bin/which grubonce 2> /dev/null) ] && [ !$(/usr/bin/which grub2-mkconfig 2> /dev/null) ]; then LOADER_TYPE="grub";
elif [ $(/usr/bin/which elilo 2> /dev/null) ] && [ !$(/usr/bin/which grub2-mkconfig 2> /dev/null) ]; then LOADER_TYPE="elilo";
fi;
fi; /usr/bin/echo "${LOADER_TYPE}"', python_shell=True) %}
{% if loader_type == 'grub' %}
mgr_create_grub_entry:
file.append:
- name: /boot/grub/menu.lst
- template: jinja
- source: salt://bootloader/grub1_uyuni_reinstall.templ
- require:
- file: mgr_copy_kernel
- file: mgr_copy_initrd
mgr_grub_boot_once:
cmd.run:
- name: /usr/sbin/grubonce "{{ pillar.get('uyuni-reinstall-name') }}"
- onchanges:
- file: mgr_create_grub_entry
{% elif loader_type == 'elilo' %}
mgr_create_elilo_entry:
file.append:
- name: /etc/elilo.conf
- template: jinja
- source: salt://bootloader/elilo_uyuni_reinstall.templ
- require:
- file: mgr_copy_kernel
- file: mgr_copy_initrd
mgr_set_default_boot:
file.replace:
- name: /etc/elilo.conf
- pattern: default = .*
- repl: default = {{ pillar.get('uyuni-reinstall-name') }}
- require:
- file: mgr_create_elilo_entry
mgr_elilo_copy_config:
cmd.run:
- name: /sbin/elilo
- onchanges:
- file: mgr_create_elilo_entry
- file: mgr_set_default_boot
{% else %}
mgr_create_grub2_entry:
file.managed:
- name: /etc/grub.d/42_uyuni_reinstall
- source: salt://bootloader/42_uyuni_reinstall.templ
- template: jinja
- mode: 0755
mgr_set_default_boot:
file.replace:
- name: /etc/default/grub
- pattern: GRUB_DEFAULT=.*
- repl: GRUB_DEFAULT={{ pillar.get('uyuni-reinstall-name') }}
- require:
- file: mgr_create_grub2_entry
mgr_generate_grubconf:
cmd.run:
- name: /usr/sbin/grub2-mkconfig -o /boot/grub2/grub.cfg
- onchanges:
- file: mgr_copy_kernel
- file: mgr_copy_initrd
- file: mgr_create_grub2_entry
- file: mgr_set_default_boot
{% endif %}
mgr_autoinstall_start:
cmd.run:
- name: /usr/sbin/shutdown -r +1
- require:
{% if loader_type == 'grub' %}
- cmd: mgr_grub_boot_once
{% elif loader_type == 'elilo' %}
- cmd: mgr_elilo_copy_config
{% else %}
- cmd: mgr_generate_grubconf
{% endif %}
{% endif %}
0707010000002C000081B400000000000000000000000168EFD664000000CA000000000000000000000000000000000000003C00000000susemanager-sls/salt/bootloader/elilo_uyuni_reinstall.templ
image = uyuni-reinstall-kernel
###Created for automated reinstallation
label = {{ pillar.get('uyuni-reinstall-name') }}
append = "{{ pillar.get('kopts') }}"
initrd = uyuni-reinstall-initrd
0707010000002D000081B400000000000000000000000168EFD664000000BC000000000000000000000000000000000000003C00000000susemanager-sls/salt/bootloader/grub1_uyuni_reinstall.templ
###Created for automated reinstallation
title {{ pillar.get('uyuni-reinstall-name') }}
kernel /boot/uyuni-reinstall-kernel {{ pillar.get('kopts') }}
initrd /boot/uyuni-reinstall-initrd
0707010000002E000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001F00000000susemanager-sls/salt/bootstrap0707010000002F000081B400000000000000000000000168EFD6640000032D000000000000000000000000000000000000002E00000000susemanager-sls/salt/bootstrap/bootstrap.repo# SUSE Multi-Linux Manager bootstrap repository
# Do not edit this file, changes will be overwritten
{%- if grains['os_family'] == 'Debian' %}
{%- set apt_version = salt['pkg.version']("apt") %}
{%- set apt_sources_deb822 = apt_version and salt['pkg.version_cmp'](apt_version, "2.7.12") >= 0 %}
{%- if apt_sources_deb822 %}
Types: deb
URIs: {{bootstrap_repo_url}}
Suites: bootstrap
Components: main
Trusted: yes
{%- else %}
deb [trusted=yes] {{bootstrap_repo_url}} bootstrap main
{%- endif %}
{%- else %}
[SUSE-Manager-Bootstrap]
name=SUSE-Manager-Bootstrap
type=rpm-md
baseurl={{bootstrap_repo_url}}
gpgcheck=0
enabled=1
{%- if grains['os_family'] == 'RedHat' and grains['osmajorrelease'] >= 8 %}
module_hotfixes=1
{%- elif grains['os_family'] == 'Suse' %}
autorefresh=1
keeppackages=0
{%- endif %}
{%- endif %}
07070100000030000081B400000000000000000000000168EFD664000037E1000000000000000000000000000000000000002800000000susemanager-sls/salt/bootstrap/init.sls##
## java bootstrapping calls certs.sls before this state
##
# Make sure no SUSE Multi-Linux Manager server aliasing left over from ssh-push via tunnel
mgr_server_localhost_alias_absent:
host.absent:
- ip:
- 127.0.0.1
- names:
- {{ salt['pillar.get']('mgr_server') }}
no_ssh_push_key_authorized:
ssh_auth.absent:
- user: {{ salt['pillar.get']('mgr_sudo_user') or 'root' }}
- source: salt://salt_ssh/mgr_ssh_id.pub
- comment: susemanager-default-contact-method
# disable all repos, except of repos flagged with keep:* (should be none)
{% set repos_disabled = {'match_str': 'keep:', 'matching': false} %}
{%- include 'channels/disablelocalrepos.sls' %}
{% do repos_disabled.update({'skip': true}) %}
{%- set transactional = grains['transactional'] %}
# SUSE OS Family
{%- if grains['os_family'] == 'Suse' %}
{% set os_base = 'sle' %}
{% set osrelease_major = grains['osrelease_info'][0] %}
#exceptions to the family rule
{%- if "opensuse" in grains['oscodename']|lower %}
{%- if "tumbleweed" in grains['oscodename']|lower %}
{% set os_base = 'opensusetumbleweed' %}
{%- else %}
{% set os_base = 'opensuse' %}
{%- endif %}
{%- endif %}
{%- if (grains['osrelease_info']| length) < 2 %}
{% set osrelease_minor = 0 %}
{%- else %}
{% set osrelease_minor = grains['osrelease_info'][1] %}
{%- endif %}
{%- if transactional %}
{%- if "microos" in grains['oscodename']|lower %}
{% set os_base = 'opensusemicroos' %}
{%- else %}
{%- if grains['osrelease_info'][0]|int >= 6 %}
{% set os_base = 'sl' %}
{%- endif %}
{% set os_base = os_base|string + 'micro' %}
{%- endif %}
{%- endif %}
#end of exceptions
{%- if os_base == 'opensusemicroos' or os_base == 'opensusetumbleweed' %}
{% set osrelease = 'latest/0' %}
{%- else %}
{% set osrelease = osrelease_major|string + '/' + osrelease_minor|string %}
{%- endif %}
{%- endif %}
# Debian OS Family
{%- if grains['os_family'] == 'Debian' %}
## This common part should cover most of distro e.g. Debian, Ubuntu
{%- set os_base = grains['os_family']|lower %}
{% set osrelease = grains['osrelease_info'][0] %}
#exceptions to the family rule
{%- if 'astraLinuxce' in grains['osfullname']|lower %}
{%- set os_base = 'astra' %}
{% set osrelease = grains['oscodename'] %}
{%- elif grains['os'] == 'Ubuntu' %}
{%- set os_base = grains['os']|lower %}
{% set osrelease = grains['osrelease_info'][0]|string + '/' + grains['osrelease_info'][1]|string %}
{%- elif grains['os'] == 'Raspbian' %}
{%- set os_base = grains['os']|lower %}
{%- endif %}
#end of expections
{%- endif %}
# RedHat OS Family
{%- if grains['os_family'] == 'RedHat' %}
## This common part should cover most of distro e.g. Centos
{%- set os_base = grains['os']|lower %}
{% set osrelease = grains['osrelease_info'][0] %}
#exception to the family rule
{%- if 'redhat' in grains['osfullname']|lower %}
{%- set os_base = 'res' %}
{%- elif 'sle' in grains['osfullname']|lower %}
{%- set os_base = 'res' %}
{%- elif 'rocky' in grains['osfullname']|lower %}
{%- set os_base = 'rockylinux' %}
{%- elif 'amazon' in grains['osfullname']|lower %}
{%- set os_base = 'amzn' %}
{%- elif 'alibaba' in grains['osfullname']|lower %}
{%- set os_base = 'alibaba' %}
{%- elif 'oracle' in grains['osfullname']|lower %}
{%- set os_base = 'oracle' %}
{%- elif 'openeuler' in grains['osfullname']|lower %}
{%- set os_base = grains['os'] %}
{%- set osrelease = grains['osrelease'] %}
{%- endif %}
#end of expections
{%- endif %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/' ~ os_base ~ '/' ~ osrelease ~ '/bootstrap/' %}
{%- if grains['os_family'] == 'RedHat' or grains['os_family'] == 'openEuler' or grains['os_family'] == 'Suse'%}
{% set bootstrap_repo_request = salt['http.query'](bootstrap_repo_url + 'repodata/repomd.xml', status=True, verify_ssl=False) %}
{%- if 'status' not in bootstrap_repo_request %}
{{ raise('Missing request status: {}'.format(bootstrap_repo_request)) }}
# if bootstrap does not work, try with RedHat and re-test
{%- elif grains['os_family'] == 'RedHat' and not (0 < bootstrap_repo_request['status'] < 300) %}
{%- set os_base = 'res' %}
{% set osrelease = grains['osrelease_info'][0] %}
{% set bootstrap_repo_url = 'https://' ~ salt['pillar.get']('mgr_server') ~ '/pub/repositories/' ~ os_base ~ '/' ~ osrelease ~ '/bootstrap/' %}
{% set bootstrap_repo_request = salt['http.query'](bootstrap_repo_url + 'repodata/repomd.xml', status=True, verify_ssl=False) %}
{%- if 'status' not in bootstrap_repo_request %}
{{ raise('Missing request status: {}'.format(bootstrap_repo_request)) }}
{%- elif bootstrap_repo_request['status'] == 901 %}
{{ raise(bootstrap_repo_request['error']) }}
{%- endif %}
{%- elif bootstrap_repo_request['status'] == 901 %}
{{ raise(bootstrap_repo_request['error']) }}
{%- endif %}
{%- set bootstrap_repo_exists = (0 < bootstrap_repo_request['status'] < 300) %}
{%- elif grains['os_family'] == 'Debian' %}
{%- set bootstrap_repo_exists = (0 < salt['http.query'](bootstrap_repo_url + 'dists/bootstrap/Release', status=True, verify_ssl=False).get('status', 0) < 300) %}
{%- endif %}
bootstrap_repo:
file.managed:
{%- if grains['os_family'] == 'Suse' %}
- name: /etc/zypp/repos.d/susemanager:bootstrap.repo
{%- elif grains['os_family'] == 'RedHat' or grains['os_family'] == 'openEuler' %}
- name: /etc/yum.repos.d/susemanager:bootstrap.repo
{%- elif grains['os_family'] == 'Debian' %}
{%- set apt_version = salt['pkg.version']("apt") %}
{%- set apt_sources_deb822 = apt_version and salt['pkg.version_cmp'](apt_version, "2.7.12") >= 0 %}
{%- if apt_sources_deb822 %}
- name: /etc/apt/sources.list.d/susemanager_bootstrap.sources
{%- else %}
- name: /etc/apt/sources.list.d/susemanager_bootstrap.list
{%- endif %}
{%- endif %}
- source:
- salt://bootstrap/bootstrap.repo
- template: jinja
- context:
bootstrap_repo_url: {{bootstrap_repo_url}}
- mode: 644
- require:
- host: mgr_server_localhost_alias_absent
{%- if repos_disabled.count > 0 %}
- mgrcompat: disable_repo_*
{%- endif %}
- onlyif:
- ([ {{ bootstrap_repo_exists }} = "True" ])
{% include 'channels/gpg-keys.sls' %}
{%- set salt_minion_name = 'salt-minion' %}
{%- set salt_config_dir = '/etc/salt' %}
{# We need also to check the case when venv-salt-call installed but not present in the bootstrap repo #}
{% set salt_minion_installed = (salt['pkg.info_installed']('venv-salt-minion', attr='version', failhard=False).get('venv-salt-minion', {}).get('version') != None) %}
check_bootstrap_dbg:
cmd.run:
- name: /usr/bin/echo "{{ salt_minion_installed }}"
{% set venv_available_request = salt_minion_installed or salt['http.query'](bootstrap_repo_url + 'venv-enabled-' + grains['osarch'] + '.txt', status=True, verify_ssl=False) %}
{# Prefer venv-salt-minion if available and not disabled #}
{%- set use_venv_salt = salt['pillar.get']('mgr_force_venv_salt_minion') or ((salt_minion_installed or (0 < venv_available_request.get('status', 404) < 300)) and not salt['pillar.get']('mgr_avoid_venv_salt_minion')) %}
{%- if use_venv_salt -%}
{%- set salt_minion_name = 'venv-salt-minion' %}
{%- set salt_config_dir = '/etc/venv-salt-minion' %}
{%- else -%}
{# Reuse salt_minion_installed to check if the salt-minion installed already, it's required for proper handling on transactional systems #}
{% set salt_minion_installed = (salt['pkg.info_installed']('salt-minion', attr='version', failhard=False).get('salt-minion', {}).get('version') != None) %}
{%- endif -%}
{%- if not transactional %}
salt-minion-package:
pkg.installed:
- name: {{ salt_minion_name }}
- install_recommends: False
- require:
- file: bootstrap_repo
{%- else %}
{%- if not salt['file.directory_exists'](salt_config_dir) %}
{# hack until transactional_update.run is fixed to use venv-salt-call #}
{# Writing to the future - find latest etc overlay which was created for package installation and use that as etc root #}
{# this only works here in bootstrap when we are not running in transaction #}
{%- set pending_transaction_id = salt['cmd.run']('/usr/bin/snapper --no-dbus list --columns=number | /usr/bin/grep "+" | tr -d "+"', python_shell=True) %}
{%- if not pending_transaction_id %}
{# if we did not get pending transaction id, write to current upperdir #}
{%- set pending_transaction_id = salt['cmd.run']('/usr/bin/snapper --no-dbus list --columns number | /usr/bin/grep "*" | tr -d "*"', python_shell=True) %}
{%- endif %}
{# increase transaction id by 1 since jinja is doing this before new transaction for package install is created #}
{# this is working under assumption there will be only one transaction between jinja render and actual package installation #}
{%- set pending_transaction_id = pending_transaction_id|int + 1 %}
{%- set salt_config_dir = '/var/lib/overlay/' + pending_transaction_id|string + salt_config_dir %}
{%- endif %}
salt-minion-package:
mgrcompat.module_run:
- name: transactional_update.pkg_install
- pkg: {{ salt_minion_name }}
- args: "--no-recommends"
- unless:
- ([ {{ salt_minion_installed }} = "True" ])
- require:
- file: bootstrap_repo
{%- if not use_venv_salt %}
{# transactional_update executor module is required for classic salt-minion only #}
{# venv-salt-minion has its own venv executor module which invokes transactional_update if needed #}
{{ salt_config_dir }}/minion.d/transactional_update.conf:
file.managed:
- source:
- salt://bootstrap/transactional_update.conf
- template: jinja
- mode: 644
- makedirs: True
- require:
- file: salt-minion-susemanager-config
{%- endif %}
{%- endif %}
{# We must install "python3-contextvars" on DEB based distros, running Salt 3004, with Python version < 3.7, like Ubuntu 18.04 #}
{# We cannot make this package a hard depedendency for Salt DEB package because this is only needed in Ubuntu 18.04 #}
{# DEB based distros with Python version >= 3.7 does not need this package - package is not existing in such cases #}
{# Since we only maintain a single DEB package for all DEB based distros, we need to explicitely install the package here #}
{%- set contextvars_needed = False %}
{%- if salt_minion_name == 'salt-minion' and grains['os_family'] == 'Debian' and grains['pythonversion'][0] >= 3 and grains['pythonversion'][1] < 7 %}
{%- if not (grains['os'] == 'Ubuntu' and grains['osrelease_info'][0] == 16) and not (grains['os'] == 'Debian' and grains['osrelease_info'][0] == 9) %}
{%- set contextvars_needed = True %}
{%- endif %}
{%- endif %}
{% if contextvars_needed %}
salt-install-contextvars:
pkg.installed:
- name: python3-contextvars
- install_recommends: False
- require:
- file: bootstrap_repo
- salt-minion-package
{% endif %}
salt-minion-susemanager-config:
file.managed:
- name: {{ salt_config_dir }}/minion.d/susemanager.conf
- source:
- salt://bootstrap/susemanager.conf
- template: jinja
- mode: 644
- makedirs: True
- require:
- salt-minion-package
salt-minion-minion_id-file:
file.managed:
- name: {{ salt_config_dir }}/minion_id
- contents_pillar: minion_id
- require:
- salt-minion-package
{%- if not transactional %}
{% include 'bootstrap/remove_traditional_stack.sls' %}
{% else %}
include:
- util.syncstates
{%- endif %}
# Manage minion key files in case they are provided in the pillar
{%- if pillar['minion_pub'] is defined and pillar['minion_pem'] is defined %}
salt-minion-key-pub:
file.managed:
- name: {{ salt_config_dir }}/pki/minion/minion.pub
- contents_pillar: minion_pub
- mode: 644
- makedirs: True
- require:
- salt-minion-package
salt-minion-key-pem:
file.managed:
- name: {{ salt_config_dir }}/pki/minion/minion.pem
- contents_pillar: minion_pem
- mode: 400
- makedirs: True
- require:
- salt-minion-package
{%- endif %}
# On bootstapping the minion which was registered to the other master before,
# the master public key must be removed from the minion to prevent key verification fails.
salt-minion-master-pub-wipe:
file.absent:
- name: {{ salt_config_dir }}/pki/minion/minion_master.pub
{%- if not transactional %}
{{ salt_minion_name }}:
service.running:
- enable: True
- require:
- salt-minion-package
- host: mgr_server_localhost_alias_absent
- watch:
- file: salt-minion-minion_id-file
- file: salt-minion-susemanager-config
{%- if pillar['minion_pub'] is defined and pillar['minion_pem'] is defined %}
- file: salt-minion-key-pub
- file: salt-minion-key-pem
{%- endif %}
{%- else %}
{{ salt_minion_name }}:
mgrcompat.module_run:
- name: transactional_update.run
- command: /usr/bin/systemctl enable {{ salt_minion_name }}
- snapshot: continue
- require:
- salt-minion-package
- host: mgr_server_localhost_alias_absent
- file: salt-minion-minion_id-file
- file: salt-minion-susemanager-config
{%- if pillar['minion_pub'] is defined and pillar['minion_pem'] is defined %}
- file: salt-minion-key-pub
- file: salt-minion-key-pem
{%- endif %}
{# Change REBOOT_METHOD to systemd if it is default, otherwise don't change it #}
copy_transactional_conf_file_to_etc:
file.copy:
- name: /etc/transactional-update.conf
- source: /usr/etc/transactional-update.conf
- unless:
- /usr/bin/test -f /etc/transactional-update.conf
transactional_update_set_reboot_method_systemd:
file.keyvalue:
- name: /etc/transactional-update.conf
- key_values:
REBOOT_METHOD: 'systemd'
- separator: '='
- uncomment: '# '
- append_if_not_found: True
- require:
- file: copy_transactional_conf_file_to_etc
- unless:
- /usr/bin/grep -P '^(?=[\s]*+[^#])[^#]*(REBOOT_METHOD=(?!auto))' /etc/transactional-update.conf
disable_reboot_timer_transactional_minions:
cmd.run:
- name: /usr/bin/systemctl disable transactional-update.timer
{%- endif %}
07070100000031000081B400000000000000000000000168EFD664000005E9000000000000000000000000000000000000003C00000000susemanager-sls/salt/bootstrap/remove_traditional_stack.slsinclude:
- util.syncstates
disable_spacewalksd:
service.dead:
- name: rhnsd
- enable: False
disable_spacewalk-update-status:
service.dead:
- name: spacewalk-update-status
- enable: False
disable_osad:
service.dead:
- name: osad
- enable: False
remove_traditional_stack_all:
pkg.removed:
- pkgs:
- spacewalk-check
- spacewalk-client-setup
- osad
- osa-common
- mgr-osad
- spacewalksd
- mgr-daemon
- rhnmd
{%- if grains['os_family'] == 'Suse' %}
- zypp-plugin-spacewalk
{%- elif grains['os_family'] == 'Debian' %}
- apt-transport-spacewalk
{%- endif %}
remove_traditional_stack:
pkg.removed:
- pkgs:
- spacewalk-client-tools
- rhncfg
- mgr-cfg
{%- if grains['os_family'] == 'Suse' %}
- suseRegisterInfo
{%- endif %}
- unless: /usr/bin/rpm -q spacewalk-proxy-common || /usr/bin/rpm -q spacewalk-common
# only removing apt-transport-spacewalk above
# causes apt-get update to 'freeze' if this
# file is still present and referencing a
# method not present anymore.
{%- if grains['os_family'] == 'Debian' %}
remove_spacewalk_sources:
file.absent:
- name: /etc/apt/sources.list.d/spacewalk.list
{%- endif %}
# Remove suseRegisterInfo in a separate yum transaction to avoid being called by
# the yum plugin.
{%- if grains['os_family'] == 'RedHat' or grains['os_family'] == 'openEuler' %}
remove_suse_register_info_rh:
pkg.removed:
- name: suseRegisterInfo
{%- endif %}
07070100000032000081B400000000000000000000000168EFD6640000038C000000000000000000000000000000000000002D00000000susemanager-sls/salt/bootstrap/set_proxy.sls{%- set conf_file = '/etc/salt/minion.d/susemanager.conf' %}
{%- set salt_service = 'salt-minion' %}
{# Prefer venv-salt-minion if installed #}
{%- if salt['pkg.version']('venv-salt-minion') %}
{%- set conf_file = '/etc/venv-salt-minion/minion.d/susemanager.conf' %}
{%- set salt_service = 'venv-salt-minion' %}
{%- endif -%}
{%- set pattern = '^master:.*' %}
{% if salt['file.search'](conf_file, pattern) -%}
{{ conf_file }}:
file.line:
- mode: replace
- match: "{{ pattern }}"
- content: "master: {{ pillar['mgr_server'] }}"
restart:
mgrcompat.module_run:
- name: cmd.run_bg
- cmd: "/usr/bin/sleep 2; /usr/sbin/service {{ salt_service }} restart"
- python_shell: true
{% else -%}
non_standard_conf:
test.configurable_test_state:
- changes: False
- result: False
- comment: "Can't change proxy. Salt master is not configured in {{ conf_file }}"
{% endif %}
07070100000033000081B400000000000000000000000168EFD664000002CA000000000000000000000000000000000000003000000000susemanager-sls/salt/bootstrap/susemanager.conf# This file was generated by SUSE Multi-Linux Manager
master: {{ pillar['mgr_server'] }}
server_id_use_crc: adler32
enable_legacy_startup_events: False
enable_fqdns_grains: False
{% if pillar['activation_key'] is defined or pillar['management_key'] is defined %}
grains:
susemanager:
{%- if pillar['activation_key'] is defined %}
activation_key: {{ pillar['activation_key'] }}
{%- endif %}
{%- if pillar['management_key'] is defined %}
management_key: {{ pillar['management_key'] }}
{%- endif %}
{% endif %}
start_event_grains:
- machine_id
- saltboot_initrd
- susemanager
# Define SALT_RUNNING env variable for pkg modules
system-environment:
modules:
pkg:
_:
SALT_RUNNING: 1
07070100000034000081B400000000000000000000000168EFD664000000E9000000000000000000000000000000000000003900000000susemanager-sls/salt/bootstrap/transactional_update.conf# Enable the transactional_update executor
module_executors:
- transactional_update
- direct_call
# Include beacon to check for pending transactions indicating that reboot is necessary
beacons:
reboot_info:
- interval: 10
07070100000035000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001B00000000susemanager-sls/salt/certs07070100000036000081B400000000000000000000000168EFD6640000014F000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/debian.slsmgr_ca_cert:
file.managed:
- name: /usr/local/share/ca-certificates/susemanager/RHN-ORG-TRUSTED-SSL-CERT.crt
- makedirs: True
- source:
- salt://certs/RHN-ORG-TRUSTED-SSL-CERT
mgr_update_ca_certs:
cmd.run:
- name: /usr/sbin/update-ca-certificates
- runas: root
- onchanges:
- file: mgr_ca_cert
07070100000037000081B400000000000000000000000168EFD664000001B7000000000000000000000000000000000000002400000000susemanager-sls/salt/certs/init.slsmgr_absent_ca_package:
pkg.removed:
- name: rhn-org-trusted-ssl-cert
{% include 'certs/{0}.sls'.format(grains['os_family']|lower) %}
mgr_proxy_ca_cert_symlink:
file.symlink:
- name: /usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT
- target: /etc/pki/trust/anchors/RHN-ORG-TRUSTED-SSL-CERT
- onlyif: /usr/bin/grep -Eq "^proxy.rhn_parent *= *[a-zA-Z0-9]+" /etc/rhn/rhn.conf && -e /etc/pki/trust/anchors/RHN-ORG-TRUSTED-SSL-CERT
070701000000380000A1FF00000000000000000000000168EFD6640000000A000000000000000000000000000000000000002900000000susemanager-sls/salt/certs/openeuler.slsredhat.sls07070100000039000081B400000000000000000000000168EFD66400000278000000000000000000000000000000000000002600000000susemanager-sls/salt/certs/redhat.sls{%- if grains['osrelease']|int == 6 %}
enable_ca_store:
cmd.run:
- name: /usr/bin/update-ca-trust enable
- runas: root
- unless: "/usr/bin/update-ca-trust check | /usr/bin/grep \"PEM/JAVA Status: ENABLED\""
{%- endif %}
mgr_ca_cert:
file.managed:
- name: /etc/pki/ca-trust/source/anchors/RHN-ORG-TRUSTED-SSL-CERT
- source:
- salt://certs/RHN-ORG-TRUSTED-SSL-CERT
{%- if grains['osrelease']|int == 6 %}
- require:
- cmd: enable_ca_store
{%- endif %}
update-ca-certificates:
cmd.run:
- name: /usr/bin/update-ca-trust extract
- runas: root
- onchanges:
- file: mgr_ca_cert
0707010000003A000081B400000000000000000000000168EFD66400000343000000000000000000000000000000000000002400000000susemanager-sls/salt/certs/suse.slsmgr_ca_cert:
file.managed:
{%- if grains['osrelease']|int == 11 %}
- name: /etc/ssl/certs/RHN-ORG-TRUSTED-SSL-CERT.pem
{%- else %}
- name: /etc/pki/trust/anchors/RHN-ORG-TRUSTED-SSL-CERT
{%- endif %}
- source: salt://certs/RHN-ORG-TRUSTED-SSL-CERT
{%- if grains['osrelease']|int == 11 %}
mgr_split_ca:
cmd.wait_script:
- name: salt://certs/update-multi-cert.sh
- runas: root
- watch:
- file: mgr_ca_cert
c_rehash:
cmd.run:
- name: /usr/bin/c_rehash
- runas: root
- onchanges:
- file: mgr_ca_cert
- require:
- cmd: mgr_split_ca
{%- else %}
update-ca-certificates:
cmd.run:
- name: /usr/sbin/update-ca-certificates
- runas: root
- onchanges:
- file: mgr_ca_cert
- retry:
attempts: 5
interval: 5
until: True
{%- endif %}
0707010000003B000081B400000000000000000000000168EFD6640000018B000000000000000000000000000000000000003000000000susemanager-sls/salt/certs/update-multi-cert.shCERT_DIR=/etc/ssl/certs
CERT_FILE=RHN-ORG-TRUSTED-SSL-CERT
TRUST_DIR=/etc/ssl/certs
rm -f $TRUST_DIR/${CERT_FILE}-*.pem
if [ -f $CERT_DIR/${CERT_FILE}.pem ]; then
if [ $(grep -- "-----BEGIN CERTIFICATE-----" $CERT_DIR/${CERT_FILE}.pem | wc -l) -gt 1 ]; then
csplit -b "%02d.pem" -f $TRUST_DIR/${CERT_FILE}- $CERT_DIR/${CERT_FILE}.pem '/-----BEGIN CERTIFICATE-----/' '{*}'
fi
fi
0707010000003C000041FD00000000000000000000000468EFD66400000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/channels0707010000003D000081B400000000000000000000000168EFD66400000236000000000000000000000000000000000000002B00000000susemanager-sls/salt/channels/aptauth.conf# susemanager.conf managed by SUSE Multi-Linux Manager
# Do not edit this file, changes will be overwritten
#
{% for chan, args in pillar.get(pillar.get('_mgr_channels_items_name', 'channels'), {})|dictsort|reverse %}
{%- set protocol = salt['pillar.get']('pkg_download_point_protocol', 'https')%}
{%- set hostname = salt['pillar.get']('pkg_download_point_host', args['host'])%}
{%- set port = salt['pillar.get']('pkg_download_point_port', args.get('port', 443))%}
machine {{hostname}}:{{port}}/rhn/manager/download/{{ chan }} login {{ args['token'] }}
{% endfor %}
0707010000003E000081B400000000000000000000000168EFD66400000E4C000000000000000000000000000000000000002C00000000susemanager-sls/salt/channels/channels.repo# Channels managed by SUSE Multi-Linux Manager
# Do not edit this file, changes will be overwritten
#
{% for chan, args in pillar.get(pillar.get('_mgr_channels_items_name', 'channels'), {}).items() %}
{%- set protocol = salt['pillar.get']('pkg_download_point_protocol', 'https')%}
{%- set hostname = salt['pillar.get']('pkg_download_point_host', args['host'])%}
{%- set port = salt['pillar.get']('pkg_download_point_port', args.get('port', 443))%}
{%- if grains['os_family'] == 'Debian' %}
{%- set apt_version = salt['pkg.version']("apt") %}
{%- set apt_support_acd = apt_version and salt['pkg.version_cmp'](apt_version, "1.6.10") > 0 %}
{%- set apt_sources_deb822 = apt_version and salt['pkg.version_cmp'](apt_version, "2.7.12") >= 0 %}
{%- if apt_sources_deb822 %}
Types: deb
{%- if apt_support_acd %}
URIs: {{protocol}}://{{hostname}}:{{port}}/rhn/manager/download
{%- else %}
URIs: {{protocol}}://{{ args['token'] }}@{{hostname}}:{{port}}/rhn/manager/download
{%- endif %}
Suites: {{ chan }}/
Components:
{%- if not salt['pillar.get']('mgr_metadata_signing_enabled', false) %}
Trusted: yes
{%- endif %}
{%- if args['gpgkeyurl'] is defined and args['gpgkeyurl'].startswith('file:///') %}
Signed-By: {{ args['gpgkeyurl'][7:] }}
{%- elif args['gpgkeyurl'] is not defined %}
{#- bsc#1234251 #}
{%- if grains['os'] == 'Debian' %}
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
{%- elif grains['os'] == 'Ubuntu' %}
Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg
{%- endif %}
{%- endif %}
{%- else %}
{%- if args['gpgkeyurl'] is defined and args['gpgkeyurl'].startswith('file:///') %}
{%- set trust_string = '[signed-by=' ~ args['gpgkeyurl'][7:] ~ ']' %}
{%- else %}
{%- set trust_string = '[trusted=yes]' %}
{%- endif %}
{%- if apt_support_acd %}
deb {{ trust_string }} {{protocol}}://{{hostname}}:{{port}}/rhn/manager/download {{ chan }}/
{%- else %}
deb {{ trust_string }} {{protocol}}://{{ args['token'] }}@{{hostname}}:{{port}}/rhn/manager/download {{ chan }}/
{%- endif %}
{%- endif %}
{%- else %}
[{{ args['alias'] }}]
name={{ args['name'] }}
enabled={{ args['enabled'] }}
{%- if args['gpgkeyurl'] is defined and salt['pillar.get']('mgr_metadata_signing_enabled', false) %}
gpgkey={{ args['gpgkeyurl'] }} file:///etc/pki/rpm-gpg/mgr-gpg-pub.key
{%- elif args['gpgkeyurl'] is defined %}
gpgkey={{ args['gpgkeyurl']|replace(pillar.get('mgr_origin_server', 'no-replace-origin-not-found'), pillar.get('mgr_server', '')) }}
{%- elif salt['pillar.get']('mgr_metadata_signing_enabled', false) %}
gpgkey=file:///etc/pki/rpm-gpg/mgr-gpg-pub.key
{%- endif %}
{%- if grains['os_family'] == 'RedHat' %}
{%- set dnf_version = salt['pkg.version']("dnf") %}
{#- DNF was tested to be working with GET parameters for versions >= 4.0.9 #}
{%- set dnf_supports_params = dnf_version and salt['pkg.version_cmp'](dnf_version, "4.0.9") >= 0 %}
{%- if dnf_supports_params %}
baseurl={{protocol}}://{{hostname}}:{{port}}/rhn/manager/download/{{ chan }}?{{ args['token'] }}
{%- else %}
baseurl={{protocol}}://{{hostname}}:{{port}}/rhn/manager/download/{{ chan }}
susemanager_token={{ args['token'] }}
{%- endif %}
gpgcheck={{ 1 if args['gpgcheck'] == "1" or args['pkg_gpgcheck'] != "0" else 0 }}
repo_gpgcheck={{ args['gpgcheck'] }}
{%- if grains['osmajorrelease'] >= 8 and args['cloned_nonmodular'] %}
module_hotfixes=1
{%- endif %}
{%- else %}
autorefresh={{ args['autorefresh'] }}
baseurl={{protocol}}://{{hostname}}:{{port}}/rhn/manager/download/{{ chan }}?{{ args['token'] }}
gpgcheck={{ args['gpgcheck'] }}
repo_gpgcheck={{ args['repo_gpgcheck'] }}
pkg_gpgcheck={{ args['pkg_gpgcheck'] }}
{%- endif %}
type={{ args['type'] }}
{%- endif %}
{% endfor %}
0707010000003F000081B400000000000000000000000168EFD66400000567000000000000000000000000000000000000003400000000susemanager-sls/salt/channels/disablelocalrepos.sls# Disable all local repos matching or not matching the 'match_str'
# Default arguments: everything except *susemanager:*
{% if not repos_disabled is defined %}
{% set repos_disabled = {'match_str': 'susemanager:', 'matching': false} %}
{% endif %}
{% do repos_disabled.update({'count': 0}) %}
{% set repos = salt['pkg.list_repos']() %}
{% for alias, data in repos.items() %}
{% if grains['os_family'] == 'Debian' %}
{% for entry in data %}
{% if (repos_disabled.match_str in entry['file'])|string == repos_disabled.matching|string and entry.get('enabled', True) %}
disable_repo_{{ repos_disabled.count }}:
mgrcompat.module_run:
- name: pkg.mod_repo
- repo: {{ "'" ~ entry.line ~ "'" }}
- kwargs:
disabled: True
{% do repos_disabled.update({'count': repos_disabled.count + 1}) %}
{% endif %}
{% endfor %}
{% else %}
{% if (repos_disabled.match_str in alias)|string == repos_disabled.matching|string and data.get('enabled', True) in [True, '1'] %}
disable_repo_{{ alias }}:
mgrcompat.module_run:
- name: pkg.mod_repo
- repo: {{ alias }}
- kwargs:
enabled: False
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
{%- else %}
- mgrcompat: sync_states
{%- endif %}
{% do repos_disabled.update({'count': repos_disabled.count + 1}) %}
{% endif %}
{% endif %}
{% endfor %}
07070100000040000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000003500000000susemanager-sls/salt/channels/dnf-susemanager-plugin07070100000041000081B400000000000000000000000168EFD66400000011000000000000000000000000000000000000004C00000000susemanager-sls/salt/channels/dnf-susemanager-plugin/susemanagerplugin.conf[main]
enabled=1
07070100000042000081B400000000000000000000000168EFD664000004A2000000000000000000000000000000000000004A00000000susemanager-sls/salt/channels/dnf-susemanager-plugin/susemanagerplugin.py# pylint: disable=missing-module-docstring
import dnf
# pylint: disable-next=unused-import
from dnfpluginscore import _, logger
# pylint: disable-next=missing-class-docstring
class Susemanager(dnf.Plugin):
name = "susemanager"
# pylint: disable-next=useless-parent-delegation
def __init__(self, base, cli):
super(Susemanager, self).__init__(base, cli)
def config(self):
for repo in self.base.repos.get_matching("susemanager:*"):
try:
susemanager_token = repo.cfg.getValue(
section=repo.id, key="susemanager_token"
)
hdr = list(repo.get_http_headers())
# pylint: disable-next=consider-using-f-string
hdr.append("X-Mgr-Auth: %s" % susemanager_token)
repo.set_http_headers(hdr)
logger.debug(
# pylint: disable-next=consider-using-f-string
"Susemanager Plugin: [%s] set token header: 'X-Mgr-Auth: ...%s'"
% (repo.id, susemanager_token[-10:])
)
# pylint: disable-next=bare-except
except:
pass
07070100000043000081B400000000000000000000000168EFD66400000E7C000000000000000000000000000000000000002B00000000susemanager-sls/salt/channels/gpg-keys.sls{%- if salt['pillar.get']('mgr_metadata_signing_enabled', false) %}
{%- if grains['os_family'] == 'Debian' %}
mgr_debian_repo_keyring:
file.managed:
- name: /usr/share/keyrings/mgr-archive-keyring.gpg
- source: salt://gpg/mgr-keyring.gpg
- mode: 644
{% else %}
mgr_deploy_customer_gpg_key:
file.managed:
- name: /etc/pki/rpm-gpg/mgr-gpg-pub.key
- source: salt://gpg/mgr-gpg-pub.key
- makedirs: True
- mode: 644
mgr_trust_customer_gpg_key:
mgrcompat.module_run:
- name: pkg.add_repo_key
- path: /etc/pki/rpm-gpg/mgr-gpg-pub.key
- onchanges:
- file: mgr_deploy_customer_gpg_key
{%- endif %}
{%- endif %}
mgr_deploy_tools_uyuni_key:
file.managed:
- name: /etc/pki/rpm-gpg/uyuni-tools-gpg-pubkey-0d20833e.key
- source: salt://gpg/uyuni-tools-gpg-pubkey-0d20833e.key
- makedirs: True
- mode: 644
mgr_deploy_suse_addon_key:
file.managed:
- name: /etc/pki/rpm-gpg/suse-addon-97a636db0bad8ecc.key
- source: salt://gpg/build-addon-97A636DB0BAD8ECC.key
- makedirs: True
- mode: 644
{%- if grains['os_family'] == 'Suse' and grains.get('osarch').startswith('ppc') %}
mgr_deploy_ibm_gpg_toolchain_key:
file.managed:
- name: /etc/pki/rpm-gpg/ibm-gpg-pubkey-6976a827.key
- source: salt://gpg/ibm-gpg-pubkey-6976a827.key
- makedirs: True
- mode: 644
mgr_deploy_ibm_gpg_power_key:
file.managed:
- name: /etc/pki/rpm-gpg/ibm-gpg-pubkey-3e6e42be.key
- source: salt://gpg/ibm-gpg-pubkey-3e6e42be.key
- makedirs: True
- mode: 644
{%- endif %}
{%- if grains['os_family'] == 'RedHat' %}
{# deploy all keys to the clients. If they get imported dependes on the used channels #}
mgr_deploy_res_gpg_key:
file.managed:
- name: /etc/pki/rpm-gpg/res-gpg-pubkey-0182b964.key
- source: salt://gpg/res-gpg-pubkey-0182b964.key
- makedirs: True
- mode: 644
mgr_deploy_liberty_v2_gpg_key:
file.managed:
- name: /etc/pki/rpm-gpg/suse-liberty-v2-gpg-pubkey-177086FAB0F9C64F.key
- source: salt://gpg/suse-liberty-v2-gpg-pubkey-177086FAB0F9C64F.key
- makedirs: True
- mode: 644
mgr_deploy_tools_rhel_gpg_key:
file.managed:
- name: /etc/pki/rpm-gpg/el-tools-gpg-pubkey-39db7c82.key
- source: salt://gpg/el-tools-gpg-pubkey-39db7c82.key
- mode: 644
mgr_deploy_legacy_tools_rhel_gpg_key:
file.managed:
- name: /etc/pki/rpm-gpg/el6-tools-gpg-pubkey-307e3d54.key
- source: salt://gpg/el6-tools-gpg-pubkey-307e3d54.key
- mode: 644
{%- endif %}
{# deploy keys defined by the admin #}
{%- for keyname in salt['pillar.get']('custom_gpgkeys', []) %}
mgr_deploy_{{ keyname }}:
file.managed:
{%- if grains['os_family'] == 'Debian' %}
- name: /usr/share/keyrings/{{ keyname }}
{%- else %}
- name: /etc/pki/rpm-gpg/{{ keyname }}
{%- endif %}
- source: salt://gpg/{{ keyname }}
- mode: 644
{%- endfor %}
{# trust GPG keys used in assigned channels #}
{%- set gpg_urls = [] %}
{%- for chan, args in pillar.get(pillar.get('_mgr_channels_items_name', 'channels'), {}).items() %}
{%- if args['gpgkeyurl'] is defined %}
{%- set keys = args['gpgkeyurl'].split(' ') %}
{%- for gpgkey in keys %}
{%- set keyexists = gpgkey.startswith('file://') and salt['file.file_exists'](gpgkey[7:]) or gpgkey.startswith('http') %}
{%- set gpgkey = gpgkey|replace(pillar.get('mgr_origin_server', 'no-replace-origin-not-found'), pillar.get('mgr_server', '')) %}
{%- if keyexists and gpgkey not in gpg_urls %}
{{ gpg_urls.append(gpgkey) | default("", True) }}
{%- endif %}
{%- endfor %}
{%- endif %}
{%- endfor %}
{% for url in gpg_urls %}
{{ url | replace(':', '_') }}:
mgrcompat.module_run:
- name: pkg.add_repo_key
- path: {{ url }}
{%- endfor %}
07070100000044000081B400000000000000000000000168EFD6640000187E000000000000000000000000000000000000002700000000susemanager-sls/salt/channels/init.slsinclude:
- util.syncstates
- certs
- channels.gpg-keys
{%- if not salt['pillar.get']('mgr_disable_local_repos', True) %}
{# disable at least the SUSE-Manager-Bootstrap repo #}
{% set repos_disabled = {'match_str': 'SUSE-Manager-Bootstrap', 'matching': true} %}
{%- endif %}
{% include 'channels/disablelocalrepos.sls' %}
{%- if grains['os_family'] == 'RedHat' or grains['os_family'] == 'openEuler' %}
{%- set yum_version = salt['pkg.version']("yum") %}
{%- set is_yum = yum_version and salt['pkg.version_cmp'](yum_version, "4") < 0 %}
{%- set is_dnf = salt['pkg.version']("dnf") %}
{#- DNF was tested to be working with GET parameters for versions >= 4.0.9 #}
{%- set dnf_supports_params = is_dnf and salt['pkg.version_cmp'](is_dnf, "4.0.9") >= 0 %}
{%- if is_dnf and not dnf_supports_params %}
{%- set dnf_plugins = salt['cmd.run']("/usr/bin/find /usr/lib -type d -name dnf-plugins -printf '%T@ %p\n' | /usr/bin/sort -nr | /usr/bin/cut -d ' ' -s -f 2- | /usr/bin/head -n 1", python_shell=True) %}
{%- if dnf_plugins %}
mgrchannels_susemanagerplugin_dnf:
file.managed:
- name: {{ dnf_plugins }}/susemanagerplugin.py
- source:
- salt://channels/dnf-susemanager-plugin/susemanagerplugin.py
- user: root
- group: root
- mode: 644
mgrchannels_susemanagerplugin_conf_dnf:
file.managed:
- name: /etc/dnf/plugins/susemanagerplugin.conf
- source:
- salt://channels/dnf-susemanager-plugin/susemanagerplugin.conf
- user: root
- group: root
- mode: 644
mgrchannels_enable_dnf_plugins:
file.replace:
- name: /etc/dnf/dnf.conf
- pattern: plugins=.*
- repl: plugins=1
{#- default is '1' when option is not specififed #}
- onlyif: /usr/bin/grep -e 'plugins=0' -e 'plugins=False' -e 'plugins=no' /etc/dnf/dnf.conf
{%- endif %}
{# this break the susemanagerplugin as it overwrite HTTP headers (bsc#1214601) #}
mgrchannels_disable_dnf_rhui_plugin:
file.replace:
- name: /etc/yum/pluginconf.d/dnf_rhui_plugin.conf
- pattern: enabled=.*
- repl: enabled=0
- onlyif: /usr/bin/grep -e 'enabled=1' -e 'enabled=True' -e 'enabled=yes' /etc/yum/pluginconf.d/dnf_rhui_plugin.conf
{%- endif %}
{%- if is_yum %}
mgrchannels_susemanagerplugin_yum:
file.managed:
- name: /usr/share/yum-plugins/susemanagerplugin.py
- source:
- salt://channels/yum-susemanager-plugin/susemanagerplugin.py
- user: root
- group: root
- mode: 644
mgrchannels_susemanagerplugin_conf_yum:
file.managed:
- name: /etc/yum/pluginconf.d/susemanagerplugin.conf
- source:
- salt://channels/yum-susemanager-plugin/susemanagerplugin.conf
- user: root
- group: root
- mode: 644
mgrchannels_enable_yum_plugins:
file.replace:
- name: /etc/yum.conf
- pattern: plugins=.*
- repl: plugins=1
- onlyif: /usr/bin/grep plugins=0 /etc/yum.conf
{%- endif %}
{%- endif %}
{%- set apt_version = salt['pkg.version']("apt") %}
{%- set apt_sources_deb822 = grains['os_family'] == 'Debian' and apt_version and salt['pkg.version_cmp'](apt_version, "2.7.12") >= 0 %}
mgrchannels_repo:
file.managed:
{%- if grains['os_family'] == 'Suse' %}
- name: "/etc/zypp/repos.d/susemanager:channels.repo"
{%- elif grains['os_family'] == 'RedHat' or grains['os_family'] == 'openEuler' %}
- name: "/etc/yum.repos.d/susemanager:channels.repo"
{%- elif grains['os_family'] == 'Debian' %}
{%- if apt_sources_deb822 %}
- name: "/etc/apt/sources.list.d/susemanager:channels.sources"
{%- else %}
- name: "/etc/apt/sources.list.d/susemanager:channels.list"
{%- endif %}
{%- endif %}
- source:
- salt://channels/channels.repo
- template: jinja
- user: root
- group: root
- mode: 644
- require:
- file: mgr_ca_cert
{%- if grains['os_family'] == 'RedHat' or grains['os_family'] == 'openEuler' %}
{%- if is_dnf and not dnf_supports_params %}
- file: mgrchannels_susemanagerplugin_dnf
- file: mgrchannels_susemanagerplugin_conf_dnf
{%- endif %}
{%- if is_yum %}
- file: mgrchannels_susemanagerplugin_yum
- file: mgrchannels_susemanagerplugin_conf_yum
{%- endif %}
{%- endif %}
{%- if apt_sources_deb822 %}
mgrchannels_repo_remove_old_channels_list:
file.absent:
- name: "/etc/apt/sources.list.d/susemanager:channels.list"
{%- endif %}
{%- set apt_support_acd = grains['os_family'] == 'Debian' and apt_version and salt['pkg.version_cmp'](apt_version, "1.6.10") > 0 %}
{%- if apt_support_acd %}
aptauth_conf:
file.managed:
- name: "/etc/apt/auth.conf.d/susemanager.conf"
- source:
- salt://channels/aptauth.conf
- template: jinja
- user: _apt
- group: root
- mode: 600
{%- endif %}
{%- if grains['os_family'] == 'RedHat' or grains['os_family'] == 'openEuler' %}
{%- if is_dnf %}
mgrchannels_dnf_clean_all:
cmd.run:
- name: /usr/bin/dnf clean all
- runas: root
- onchanges:
- file: "/etc/yum.repos.d/susemanager:channels.repo"
- unless: "/usr/bin/dnf repolist | /usr/bin/grep \"repolist: 0$\""
{%- endif %}
{%- if is_yum %}
mgrchannels_yum_clean_all:
cmd.run:
- name: /usr/bin/yum clean all
- runas: root
- onchanges:
- file: "/etc/yum.repos.d/susemanager:channels.repo"
- unless: "/usr/bin/yum repolist | /usr/bin/grep \"repolist: 0$\""
{%- endif %}
{%- elif grains['os_family'] == 'Debian' %}
install_gnupg_debian:
pkg.installed:
- pkgs:
- gnupg
{%- endif %}
{%- if not salt['pillar.get']('susemanager:distupgrade:dryrun', False) %}
{%- if grains['os_family'] == 'Suse' and "opensuse" not in grains['oscodename']|lower %}
mgrchannels_install_products:
product.all_installed:
- require:
- file: mgrchannels_*
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
{%- else %}
- mgrcompat: sync_states
{%- endif %}
{%- if salt['pillar.get']('susemanager:distupgrade', False) %}
- spmigration
{%- endif %}
{%- endif %}
{%- endif %}
{%- if grains['os_family'] == 'Suse' and "opensuse" not in grains['oscodename']|lower %}
{# take care that the suse-build-key package with the PTF key is installed #}
mgrchannels_inst_suse_build_key:
pkg.installed:
- name: suse-build-key
- require:
- file: mgrchannels_repo
{%- endif %}
07070100000045000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000003500000000susemanager-sls/salt/channels/yum-susemanager-plugin07070100000046000081B400000000000000000000000168EFD66400000011000000000000000000000000000000000000004C00000000susemanager-sls/salt/channels/yum-susemanager-plugin/susemanagerplugin.conf[main]
enabled=1
07070100000047000081B400000000000000000000000168EFD66400000205000000000000000000000000000000000000004A00000000susemanager-sls/salt/channels/yum-susemanager-plugin/susemanagerplugin.py# pylint: disable=missing-module-docstring
from yum.plugins import TYPE_CORE
from yum import config
requires_api_version = "2.5"
plugin_type = TYPE_CORE
# pylint: disable-next=unused-argument
def config_hook(conduit):
config.RepoConf.susemanager_token = config.Option()
def init_hook(conduit):
for repo in conduit.getRepos().listEnabled():
susemanager_token = getattr(repo, "susemanager_token", None)
if susemanager_token:
repo.http_headers["X-Mgr-Auth"] = susemanager_token
07070100000048000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002400000000susemanager-sls/salt/cleanup_minion07070100000049000081B400000000000000000000000168EFD66400000870000000000000000000000000000000000000002D00000000susemanager-sls/salt/cleanup_minion/init.sls{%- set salt_minion_name = 'salt-minion' %}
{%- set salt_config_dir = '/etc/salt' %}
{# Prefer venv-salt-minion if installed #}
{%- if salt['pkg.version']('venv-salt-minion') %}
{%- set salt_minion_name = 'venv-salt-minion' %}
{%- set salt_config_dir = '/etc/venv-salt-minion' %}
{%- endif -%}
{%- if grains['os_family'] == 'RedHat' %}
mgrchannels_repo_clean_all:
file.absent:
- name: /etc/yum.repos.d/susemanager:channels.repo
{%- endif %}
{%- if grains['os_family'] == 'Suse' %}
mgrchannels_repo_clean_all:
file.absent:
- name: /etc/zypp/repos.d/susemanager:channels.repo
{%- endif %}
{%- if grains['os_family'] == 'Debian' %}
mgrchannels_repo_clean_channels:
file.absent:
- name: /etc/apt/sources.list.d/susemanager:channels.list
mgrchannels_repo_clean_channels_deb822:
file.absent:
- name: /etc/apt/sources.list.d/susemanager:channels.sources
mgrchannels_repo_clean_auth:
file.absent:
- name: /etc/apt/auth.conf.d/susemanager.conf
mgrchannels_repo_clean_keyring:
file.absent:
- name: /usr/share/keyrings/mgr-archive-keyring.gpg
{%- endif %}
mgr_mark_no_longer_managed:
file.absent:
- name: /etc/sysconfig/rhn/systemid
mgr_remove_salt_config:
file.absent:
- name: {{ salt_config_dir }}/minion.d/susemanager.conf
mgr_remove_salt_config_altname:
file.absent:
- name: {{ salt_config_dir }}/minion.d/master.conf
mgr_remove_salt_priv_key:
file.absent:
- name: {{ salt_config_dir }}/pki/minion/minion.pem
mgr_remove_salt_pub_key:
file.absent:
- name: {{ salt_config_dir }}/pki/minion/minion.pub
mgr_remove_salt_master_key:
file.absent:
- name: {{ salt_config_dir }}/pki/minion/minion_master.pub
{%- if salt['pillar.get']('contact_method') not in ['ssh-push', 'ssh-push-tunnel'] %}
mgr_disable_salt:
cmd.run:
- name: /usr/bin/systemctl disable {{ salt_minion_name }}
- require:
- file: mgr_remove_salt_config
{%- if not grains['transactional'] %}
mgr_stop_salt:
cmd.run:
- bg: True
- name: /usr/bin/sleep 9 && /usr/bin/systemctl stop {{ salt_minion_name }}
- order: last
- require:
- file: mgr_remove_salt_config
{% endif %}
{% endif %}
0707010000004A000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002800000000susemanager-sls/salt/cleanup_ssh_minion0707010000004B000081B400000000000000000000000168EFD6640000059F000000000000000000000000000000000000003100000000susemanager-sls/salt/cleanup_ssh_minion/init.slsinclude:
- cleanup_minion
{%- set mgr_sudo_user = salt['pillar.get']('mgr_sudo_user') or 'root' %}
{%- set home = salt['user.info'](mgr_sudo_user)['home'] %}
{% if salt['pillar.get']('contact_method') == 'ssh-push-tunnel' %}
# remove server to localhost aliasing from /etc/hosts
mgr_remove_mgr_server_localhost_alias:
host.absent:
- ip:
- 127.0.0.1
- names:
- {{ salt['pillar.get']('mgr_server') }}
{%- endif %}
# remove server ssh authorization
mgr_remove_mgr_ssh_identity:
ssh_auth.absent:
- user: {{ mgr_sudo_user }}
- source: salt://salt_ssh/mgr_ssh_id.pub
{%- if salt['pillar.get']('proxy_pub_key') and salt['pillar.get']('contact_method') == 'ssh-push-tunnel' %}
# remove proxy ssh authorization (if any)
mgr_remove_proxy_ssh_identity:
ssh_auth.absent:
- user: {{ mgr_sudo_user }}
- source: salt://salt_ssh/{{ salt['pillar.get']('proxy_pub_key') }}
{%- endif %}
# remove own key authorization
mgr_no_own_key_authorized:
ssh_auth.absent:
- user: {{ mgr_sudo_user }}
- source: {{ home }}/.ssh/mgr_own_id.pub
# remove own keys
mgr_remove_own_ssh_pub_key:
file.absent:
- name: {{ home }}/.ssh/mgr_own_id.pub
- require:
- ssh_auth: mgr_no_own_key_authorized
mgr_remove_own_ssh_key:
file.absent:
- name: {{ home }}/.ssh/mgr_own_id
# Remove logrotate configuration
mgr_remove_logrotate_configuration:
file.absent:
- name: /etc/logrotate.d/salt-ssh
0707010000004C000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002000000000susemanager-sls/salt/cocoattest0707010000004D000081B400000000000000000000000168EFD6640000062B000000000000000000000000000000000000003000000000susemanager-sls/salt/cocoattest/requestdata.slsinclude:
- channels
mgr_create_attestdir:
file.directory:
- name: /tmp/cocoattest
- dir_mode: 700
{% if salt['pillar.get']('attestation_data:environment_type', 'NONE') not in ['NONE'] %}
mgr_inst_snpguest:
pkg.latest:
- pkgs:
- snpguest
- mokutil
- require:
- sls: channels
mgr_write_request_data:
cmd.run:
- name: /usr/bin/echo "{{ salt['pillar.get']('attestation_data:nonce') }}" | /usr/bin/base64 -d > /tmp/cocoattest/request-data.txt
- onlyif: /usr/bin/test -x /usr/bin/base64
- require:
- file: mgr_create_attestdir
mgr_create_snpguest_report:
cmd.run:
- name: /usr/bin/snpguest report /tmp/cocoattest/report.bin /tmp/cocoattest/request-data.txt
- require:
- cmd: mgr_write_request_data
- file: mgr_create_attestdir
mgr_snpguest_report:
cmd.run:
- name: /usr/bin/cat /tmp/cocoattest/report.bin | /usr/bin/base64
- require:
- cmd: mgr_create_snpguest_report
- file: mgr_create_attestdir
mgr_create_vlek_certificate:
cmd.run:
- name: /usr/bin/snpguest certificates PEM /tmp/cocoattest
- require:
- file: mgr_create_attestdir
mgr_vlek_certificate:
cmd.run:
- name: /usr/bin/cat /tmp/cocoattest/vlek.pem
- require:
- cmd: mgr_create_vlek_certificate
- file: mgr_create_attestdir
mgr_secureboot_enabled:
cmd.run:
- name: /usr/bin/mokutil --sb-state
- success_retcodes:
- 255
- 0
{% endif %}
mgr_cleanup_attest:
file.absent:
- name: /tmp/cocoattest
- require:
- file: mgr_create_attestdir
0707010000004E000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002300000000susemanager-sls/salt/configuration0707010000004F000081B400000000000000000000000168EFD664000002D5000000000000000000000000000000000000003400000000susemanager-sls/salt/configuration/deploy_files.sls{% if pillar.get('param_files', []) %}
{%- for file in pillar.get('param_files') %}
file_deploy_{{ loop.index }}:
{% if file.type == 'file' %}
file.managed:
{% elif file.type == 'directory' %}
file.directory:
{% elif file.type == 'symlink' %}
file.symlink:
{% endif %}
- name: {{ file.name }}
- makedirs: True
{% if file.type == 'file' %}
- source: {{ file.source }}
- user: {{ file.user }}
- group: {{ file.group }}
- mode: {{ file.mode }}
{% elif file.type == 'directory' %}
- user: {{ file.user }}
- group: {{ file.group }}
- mode: {{ file.mode }}
{% elif file.type == 'symlink' %}
- target: {{ file.target }}
{% endif %}
{%- endfor %}
{% endif %}
07070100000050000081B400000000000000000000000168EFD664000002D5000000000000000000000000000000000000003200000000susemanager-sls/salt/configuration/diff_files.sls{% if pillar.get('param_files', []) %}
{%- for file in pillar.get('param_files') %}
file_deploy_{{ loop.index }}:
{% if file.type == 'file' %}
file.managed:
{% elif file.type == 'directory' %}
file.directory:
{% elif file.type == 'symlink' %}
file.symlink:
{% endif %}
- name: {{ file.name }}
- makedirs: True
{% if file.type == 'file' %}
- source: {{ file.source }}
- user: {{ file.user }}
- group: {{ file.group }}
- mode: {{ file.mode }}
{% elif file.type == 'directory' %}
- user: {{ file.user }}
- group: {{ file.group }}
- mode: {{ file.mode }}
{% elif file.type == 'symlink' %}
- target: {{ file.target }}
{% endif %}
{%- endfor %}
{% endif %}
07070100000051000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001C00000000susemanager-sls/salt/custom07070100000052000081B400000000000000000000000168EFD66400000036000000000000000000000000000000000000002500000000susemanager-sls/salt/custom/init.slsinclude:
- custom.custom_{{ grains['machine_id'] }}
07070100000053000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002300000000susemanager-sls/salt/custom_groups07070100000054000081B400000000000000000000000168EFD66400000091000000000000000000000000000000000000002C00000000susemanager-sls/salt/custom_groups/init.sls{% if pillar.get('group_ids', []) -%}
include:
{% for gid in pillar.get('group_ids', []) -%}
- custom.group_{{ gid }}
{% endfor %}
{% endif %}
07070100000055000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002000000000susemanager-sls/salt/custom_org07070100000056000081B400000000000000000000000168EFD66400000060000000000000000000000000000000000000002900000000susemanager-sls/salt/custom_org/init.sls{% if pillar['org_id'] is defined %}
include:
- custom.org_{{ pillar['org_id'] }}
{% endif %}
07070100000057000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002100000000susemanager-sls/salt/distupgrade07070100000058000081B400000000000000000000000168EFD664000007FD000000000000000000000000000000000000002A00000000susemanager-sls/salt/distupgrade/init.slsinclude:
- channels
{% if grains['os_family'] == 'Suse' %}
{% if grains['osfullname']|upper == 'SLES' and grains['osmajorrelease']|int >= 15 and pillar.get('susemanager:distupgrade:targetbaseproduct:name', '')|lower == 'sles_sap' %}
{% if not salt['pillar.get']('susemanager:distupgrade:dryrun', False) %}
{% include 'distupgrade/sap.sls' %}
{% endif %}
{% else %}
spmigration:
mgrcompat.module_run:
- name: pkg.upgrade
- dist_upgrade: True
- dryrun: {{ salt['pillar.get']('susemanager:distupgrade:dryrun', False) }}
{% if grains['osrelease_info'][0] >= 12 or grains['transactional'] == True %}
- novendorchange: {{ not salt['pillar.get']('susemanager:distupgrade:allow_vendor_change', False) }}
{% else %}
- fromrepo: {{ salt['pillar.get']('susemanager:distupgrade:channels', []) }}
{% endif %}
- require:
- file: mgrchannels*
{% endif %} {# grains['osfullname']|upper == 'SLES' ... #}
{% elif grains['os_family'] == 'RedHat' %}
{% if not salt['pillar.get']('susemanager:distupgrade:dryrun', False) %}
{# when pillar liberate:reinstall_packages is not set, it default to true. This is the default we want #}
{% include 'liberate/init.sls' %}
{% set logname='/var/log/dnf_sll_migration.log' %}
{% if grains['osrelease_info'][0] == 7 %}
{% set logname='/var/log/yum_sles_es_migration.log' %}
{% elif grains['osrelease_info'][0] == 8 %}
{% set logname='/var/log/dnf_sles_es_migration.log' %}
{% endif %}
spmigration:
cmd.run:
- name: /usr/bin/cat {{ logname }}
- onlyif: /usr/bin/test -f /usr/bin/cat {{ logname }}
spmigration_liberated:
cmd.run:
- name: /usr/bin/cat /etc/sysconfig/liberated
- require:
- file: create_liberation_file
{% endif %}
{% endif %}
{% if not salt['pillar.get']('susemanager:distupgrade:dryrun') %}
{% if pillar.get('missing_successors', [])%}
mgr_release_pkg_removed:
pkg.removed:
- pkgs:
{%- for missing_successor in pillar.get('missing_successors', [])%}
- {{missing_successor}}-release
{%- endfor %}
{% endif %}
{% endif %}
07070100000059000081B400000000000000000000000168EFD66400000555000000000000000000000000000000000000002900000000susemanager-sls/salt/distupgrade/sap.sls{% set sles_release_installed = (salt['pkg.info_installed']('sles-release', attr='version', failhard=False).get('sles-release', {}).get('version') != None) %}
{% if sles_release_installed and pillar.get('susemanager:distupgrade:targetbaseproduct:name', '')|lower == 'sles_sap' %}
mgr_remove_release_package:
cmd.run:
- name: "/usr/bin/rpm -e --nodeps sles-release"
mgr_remove_flavor_package_dvd:
cmd.run:
- name: "/usr/bin/rpm -e --nodeps sles-release-DVD"
- onlyif: /usr/bin/rpm -q sles-release-DVD
mgr_remove_flavor_package_pool:
cmd.run:
- name: "/usr/bin/rpm -e --nodeps sles-release-POOL"
- onlyif: /usr/bin/rpm -q sles-release-POOL
{% set default_modules = ['SLES_SAP', 'sle-module-basesystem', 'sle-module-desktop-applications', 'sle-module-server-applications', 'sle-ha', 'sle-module-sap-applications'] %}
{% for module in default_modules %}
mgr_install_product_{{ module }}:
cmd.run:
- name: /usr/bin/zypper --no-refresh --non-interactive install --no-recommends --auto-agree-with-product-licenses -t product {{ module }}
- require:
- cmd: mgr_remove_release_package
- cmd: mgr_remove_flavor_package_dvd
- cmd: mgr_remove_flavor_package_pool
{% endfor %}
spmigration:
test.nop:
- require:
{%- for module in default_modules %}
- mgr_install_product_{{ module }}
{%- endfor %}
{% endif %}
0707010000005A000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/hardware0707010000005B000081B400000000000000000000000168EFD66400001459000000000000000000000000000000000000003000000000susemanager-sls/salt/hardware/profileupdate.sls{%- if grains['cpuarch'] in ['i386', 'i486', 'i586', 'i686', 'x86_64', 'aarch64'] %}
mgr_install_dmidecode:
pkg.installed:
{%- if grains['os_family'] == 'Suse' and grains['osrelease'] in ['11.3', '11.4'] %}
- name: pmtools
{%- else %}
- name: dmidecode
{%- endif %}
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
{%- else %}
- mgrcompat: sync_states
{%- endif %}
{%- endif %}
grains:
mgrcompat.module_run:
- name: grains.items
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
{%- else %}
- mgrcompat: sync_states
{%- endif %}
cpuinfo:
mgrcompat.module_run:
- name: status.cpuinfo
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
{%- else %}
- mgrcompat: sync_states
{%- endif %}
udev:
mgrcompat.module_run:
- name: udev.exportdb
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
{%- else %}
- mgrcompat: sync_states
{%- endif %}
network-interfaces:
mgrcompat.module_run:
- name: network.interfaces
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
{%- else %}
- mgrcompat: sync_states
{%- endif %}
network-ips:
mgrcompat.module_run:
- name: sumautil.primary_ips
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
- saltutil: sync_modules
{%- else %}
- mgrcompat: sync_states
- mgrcompat: sync_modules
{%- endif %}
network-modules:
mgrcompat.module_run:
- name: sumautil.get_net_modules
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
- saltutil: sync_modules
{%- else %}
- mgrcompat: sync_states
- mgrcompat: sync_modules
{%- endif %}
instance-flavor:
mgrcompat.module_run:
- name: sumautil.instance_flavor
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
- saltutil: sync_modules
{%- else %}
- mgrcompat: sync_states
- mgrcompat: sync_modules
{%- endif %}
{% if grains['cpuarch'] in ['i386', 'i486', 'i586', 'i686', 'x86_64'] %}
smbios-records-bios:
mgrcompat.module_run:
- name: smbios.records
- rec_type: 0
- clean: False
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
{%- else %}
- mgrcompat: sync_states
{%- endif %}
smbios-records-system:
mgrcompat.module_run:
- name: smbios.records
- rec_type: 1
- clean: False
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
{%- else %}
- mgrcompat: sync_states
{%- endif %}
smbios-records-baseboard:
mgrcompat.module_run:
- name: smbios.records
- rec_type: 2
- clean: False
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
{%- else %}
- mgrcompat: sync_states
{%- endif %}
smbios-records-chassis:
mgrcompat.module_run:
- name: smbios.records
- rec_type: 3
- clean: False
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
{%- else %}
- mgrcompat: sync_states
{%- endif %}
{% elif grains['cpuarch'] in ['s390', 's390x'] %}
mainframe-sysinfo:
mgrcompat.module_run:
- name: mainframesysinfo.read_values
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
{%- else %}
- mgrcompat: sync_states
{%- endif %}
{% endif %}
{%- if grains['saltversioninfo'][0] >= 2018 %}
{% if 'mgrnet.dns_fqdns' in salt %}
dns_fqdns:
mgrcompat.module_run:
- name: mgrnet.dns_fqdns
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
{%- else %}
- mgrcompat: sync_states
{%- endif %}
- onlyif:
/usr/bin/which host || /usr/bin/which nslookup
{% endif%}
{% if 'network.fqdns' in salt %}
fqdns:
mgrcompat.module_run:
- name: network.fqdns
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
{%- else %}
- mgrcompat: sync_states
{%- endif %}
{% endif%}
{%- endif%}
{% if grains['os_family'] == 'Suse' %}
sap_workloads:
mgrcompat.module_run:
- name: sap.get_workloads
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_modules
{%- else %}
- mgrcompat: sync_modules
{%- endif %}
{% endif %}
uname:
cmd.run:
- name: /usr/bin/uname -r -v
container_runtime:
mgrcompat.module_run:
- name: container_runtime.get_container_runtime
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_modules
{%- else %}
- mgrcompat: sync_modules
{%- endif %}
include:
- util.syncstates
- util.syncmodules
0707010000005C000081B400000000000000000000000168EFD66400000044000000000000000000000000000000000000002E00000000susemanager-sls/salt/hardware/virtprofile.slsmgr_virt_profile:
mgrcompat.module_run:
- name: virt.vm_info
0707010000005D000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001C00000000susemanager-sls/salt/images0707010000005E000081B400000000000000000000000168EFD66400000A74000000000000000000000000000000000000002700000000susemanager-sls/salt/images/docker.sls{% set logfile = "/var/log/image-" + pillar.get('build_id') + ".log" %}
{% if grains['saltversioninfo'][0] >= 2018 %}
mgr_registries_login:
mgrcompat.module_run:
- name: docker.login
- registries: {{ pillar.get('docker-registries', {}).keys() | list }}
mgr_buildimage:
mgrcompat.module_run:
- name: docker.build
{%- if pillar.get('imagerepopath') is defined %}
- repository: "{{ pillar.get('imagerepopath') }}"
- tag: "{{ pillar.get('imagetag', 'latest') }}"
{%- else %}
- repository: "{{ pillar.get('imagename') }}"
- tag: "{{ pillar.get('imagename').rsplit(':', 1)[1] }}"
{%- endif %}
- path: "{{ pillar.get('builddir') }}"
{%- if grains['saltversioninfo'][0] >= 3002 %}
- logfile: {{ logfile }}
{%- endif %}
- buildargs:
repo: "{{ pillar.get('repo') }}"
cert: "{{ pillar.get('cert') }}"
{%- if pillar.get('customvalues') is defined %}
{%- for key, value in pillar.get('customvalues').items() %}
{{key}}: "{{value}}"
{%- endfor %}
{%- endif %}
- require:
- mgrcompat: mgr_registries_login
mgr_pushimage:
mgrcompat.module_run:
- name: docker.push
- image: "{{ pillar.get('imagename') }}"
- require:
- mgrcompat: mgr_buildimage
- mgrcompat: mgr_registries_login
{% if 'docker.logout' in salt %}
mgr_registries_logout:
mgrcompat.module_run:
- name: docker.logout
- registries: {{ pillar.get('docker-registries', {}).keys() | list }}
- require:
- mgrcompat: mgr_pushimage
- mgrcompat: mgr_registries_login
{% endif %}
{% else %}
mgr_registries_login:
mgrcompat.module_run:
- name: dockerng.login
- registries: {{ pillar.get('docker-registries', {}).keys() }}
mgr_buildimage:
mgrcompat.module_run:
- name: dockerng.build
- image: "{{ pillar.get('imagename') }}"
- path: "{{ pillar.get('builddir') }}"
- buildargs:
repo: "{{ pillar.get('repo') }}"
cert: "{{ pillar.get('cert') }}"
{%- if pillar.get('customvalues') is defined %}
{%- for key, value in pillar.get('customvalues').items() %}
{{key}}: "{{value}}"
{%- endfor %}
{%- endif %}
- require:
- mgrcompat: mgr_registries_login
mgr_pushimage:
mgrcompat.module_run:
- name: dockerng.push
- image: "{{ pillar.get('imagename') }}"
- require:
- mgrcompat: mgr_buildimage
- mgrcompat: mgr_registries_login
{% endif %}
{%- if grains['saltversioninfo'][0] >= 3002 %}
mgr_buildimage_docker_collect_logs:
file.touch:
- name: {{ logfile }}
mgrcompat.module_run:
- name: cp.push
- path: {{ logfile }}
- upload_path: /image-{{ pillar.get('build_id') }}.log
- order: last
{%- endif %}
0707010000005F000081B400000000000000000000000168EFD66400000384000000000000000000000000000000000000002C00000000susemanager-sls/salt/images/kiwi-detect.sls#!jinja|yaml
# SUSE Multi-Linux Manager kiwi method detection
#
# Copyright (c) 2025 SUSE LLC
{%- set force_kiwi_ng = pillar.get('use_kiwi_ng', salt['pillar.get']('custom_info:use_kiwi_ng', False)) %}
{%- set force_kiwi_podman = salt['pillar.get']('custom_info:use_kiwi_container', False) %}
{%- set osfullname = salt['grains.get']('osfullname') %}
{%- set osmajorrelease = salt['grains.get']('osmajorrelease')|int() %}
{# on SLES11 and SLES12 use legacy Kiwi, use Kiwi NG elsewhere #}
{%- if osfullname == 'SLES' and osmajorrelease < 15 %}
{%- set kiwi_method = 'legacy' %}
{%- elif osfullname == 'SLES' and osmajorrelease == 15 %}
{%- set kiwi_method = 'kiwi-ng' %}
{%- else %}
{%- set kiwi_method = 'podman' %}
{%- endif %}
{# handle overrides #}
{%- if force_kiwi_ng %}
{%- set kiwi_method = 'kiwi-ng' %}
{%- elif force_kiwi_podman %}
{%- set kiwi_method = 'podman' %}
{%- endif %}
07070100000060000081B400000000000000000000000168EFD66400001D4C000000000000000000000000000000000000003100000000susemanager-sls/salt/images/kiwi-image-build.sls#!jinja|yaml
# SUSE Multi-Linux Manager image build state
#
# Copyright (c) 2017 - 2025 SUSE LLC
{% from "images/kiwi-detect.sls" import kiwi_method with context %}
{%- set source = pillar.get('source') %}
{%- set kiwi_dir = '/var/lib/Kiwi/' %}
{%- set common_repo = kiwi_dir + 'repo' %}
{%- set build_id = pillar.get('build_id') %}
{%- set root_dir = kiwi_dir + build_id %}
{%- set source_dir = root_dir + '/source' %}
{%- set chroot_dir = root_dir + '/chroot/' %}
{%- set dest_dir = root_dir + '/images.build' %}
{%- set bundle_dir = root_dir + '/images/' %}
{%- set cache_dir = root_dir + '/cache/' %}
{%- set eib_require = '' %}
{%- set kpartx_require = '' %}
{%- set activation_key = pillar.get('activation_key') %}
{%- set use_bundle_build = pillar.get('use_bundle_build', salt['pillar.get']('custom_info:use_bundle_build', False)) %}
{# Default images and overrides #}
{%- set eib_image = salt['pillar.get']('custom_info:eib_image', 'registry.suse.com/edge/3.2/edge-image-builder:1.1.0') %}
{%- set kiwi_image = salt['pillar.get']('custom_info:kiwi_image', 'registry.suse.com/bci/kiwi:10.2') %}
mgr_buildimage_prepare_source:
file.directory:
- name: {{ root_dir }}
- clean: True
mgrcompat.module_run:
- name: kiwi_source.prepare_source
- source: {{ source }}
- root: {{ root_dir }}
mgr_buildimage_prepare_activation_key_in_source:
file.managed:
- name: {{ source_dir }}/root/etc/salt/minion.d/kiwi_activation_key.conf
- makedirs: True
- contents: |
grains:
susemanager:
activation_key: {{ activation_key }}
{%- if kiwi_method == 'podman' %}
{%- set kpartx_require = '- file: mgr_buildimage_prepare_kpartx_kiwi_yml' %}
{%- set eib_require = '- cmd: mgr_eib' %}
mgr_buildimage_prepare_kpartx_kiwi_yml:
file.managed:
- name: {{ source_dir }}/kiwi.yml
- contents: |
mapper:
- part_mapper: kpartx
# EIB support
mgr_eib:
file.directory:
- name: {{ source_dir }}/root/oem
- onlyif:
- test -f {{ source_dir }}/eib/eib.yaml
cmd.run:
- names:
- podman run --rm --privileged -v {{ source_dir }}/eib:/eib:ro,Z {{ eib_image }} build --definition-file=eib.yaml
- xorriso -osirrox on -indev {{ source_dir }}/eib/combustion.iso extract / {{ source_dir }}/root/oem
- require:
- file: mgr_eib
- onlyif:
- test -f {{ source_dir }}/eib/eib.yaml
{# need ca-certificates for kiwi to trust CA #}
{# need /dev for losetup error during create #}
{% set kiwi_mount = ' -v '+ kiwi_dir + ':/var/lib/Kiwi:Z ' %}
{% set kiwi_yml_mount = ' -v ' + source_dir + '/kiwi.yml:/etc/kiwi.yml:ro,Z ' %}
{%- set kiwi = '/usr/bin/podman run --rm --privileged -v /var/lib/ca-certificates:/var/lib/ca-certificates:ro -v /dev:/dev '+ kiwi_mount + kiwi_yml_mount + kiwi_image + ' kiwi-ng' -%}
{%- elif kiwi_method == 'kiwi-ng' -%}
{%- set kiwi = '/usr/bin/kiwi-ng' -%}
{%- endif -%} {# kiwi_method #}
{%- if kiwi_method == 'podman' or kiwi_method == 'kiwi-ng' %}
{%- set kiwi_options = pillar.get('kiwi_options', '') %}
{%- set bootstrap_packages = ['findutils', 'rhn-org-trusted-ssl-cert-osimage'] %}
{%- macro kiwi_params() -%}
--ignore-repos-used-for-build --add-repo file:{{ common_repo }},rpm-dir,common_repo,90,false,false
{% for pkg in bootstrap_packages -%}
--add-bootstrap-package {{ pkg }}
{% endfor -%}
{%- for repo in pillar.get('kiwi_repositories') -%}
--add-repo {{ repo }},rpm-md,key_repo{{ loop.index }},90,false,false {{ ' ' }}
{%- endfor -%}
{%- endmacro %}
{# we need to remove rpm-md due to kiwi error during create #}
mgr_buildimage_kiwi_prepare:
cmd.run:
- name: "{{ kiwi }} {{ kiwi_options }} $GLOBAL_PARAMS system prepare $PARAMS"
- hide_output: True
- env:
- GLOBAL_PARAMS: "--logfile={{ root_dir }}/build.log --shared-cache-dir={{ cache_dir }}"
- PARAMS: "--description {{ source_dir }} --root {{ chroot_dir }} {{ kiwi_params() }}"
- require:
- mgrcompat: mgr_buildimage_prepare_source
- file: mgr_buildimage_prepare_activation_key_in_source
{{ kpartx_require }}
{{ eib_require }}
mgr_buildimage_kiwi_create:
cmd.run:
- name: "{{ kiwi }} --logfile={{ root_dir }}/build.log --shared-cache-dir={{ cache_dir }} {{ kiwi_options }} system create --root {{ chroot_dir }} --target-dir {{ dest_dir }}"
- require:
- cmd: mgr_buildimage_kiwi_prepare
{%- if use_bundle_build %}
mgr_buildimage_kiwi_bundle:
cmd.run:
- name: "{{ kiwi }} result bundle --target-dir {{ dest_dir }} --id {{ build_id }} --bundle-dir {{ bundle_dir }}"
- require:
- cmd: mgr_buildimage_kiwi_create
{%- endif %}
{%- else %} {# kiwi legacy #}
# i586 build on x86_64 host must be called with linux32
# let's consider the build i586 if there is no x86_64 repo specified
{%- set kiwi = '/usr/bin/linux32 /usr/bin/kiwi' if (pillar.get('kiwi_repositories')|join(' ')).find('x86_64') == -1 and grains.get('osarch') == 'x86_64' else '/usr/bin/kiwi' %}
# in SLES11 Kiwi the --add-repotype is required
{%- macro kiwi_params() -%}
--add-repo {{ common_repo }} --add-repotype rpm-dir --add-repoalias common_repo {{ ' ' }}
{%- for repo in pillar.get('kiwi_repositories') -%}
--add-repo {{ repo }} --add-repotype rpm-md --add-repoalias key_repo{{ loop.index }} {{ ' ' }}
{%- endfor -%}
{%- endmacro %}
# old Kiwi can't change cache location, so we have to clear cache before each build
mgr_kiwi_clear_cache:
file.directory:
- name: /var/cache/kiwi/
- makedirs: True
- clean: True
mgr_buildimage_kiwi_prepare:
cmd.run:
- name: "{{ kiwi }} --logfile {{ root_dir }}/build.log --nocolor --force-new-root --prepare {{ source_dir }} --root {{ chroot_dir }} {{ kiwi_params() }}"
- require:
- mgrcompat: mgr_buildimage_prepare_source
- file: mgr_buildimage_prepare_activation_key_in_source
mgr_buildimage_kiwi_create:
cmd.run:
- name: "{{ kiwi }} --logfile {{ root_dir }}/build.log --nocolor --yes --create {{ chroot_dir }} --dest {{ dest_dir }} {{ kiwi_params() }}"
- require:
- cmd: mgr_buildimage_kiwi_prepare
{%- if use_bundle_build %}
mgr_buildimage_kiwi_bundle:
cmd.run:
- name: "{{ kiwi }} --nocolor --yes --bundle-build {{ dest_dir }} --bundle-id {{ build_id }} --destdir {{ bundle_dir }}"
- require:
- cmd: mgr_buildimage_kiwi_create
{%- endif %} {# use_bundle_build #}
{%- endif %} {# else kiwi legacy #}
{%- if pillar.get('use_salt_transport') %}
mgr_buildimage_kiwi_collect_image:
mgrcompat.module_run:
- name: cp.push_dir
{%- if use_bundle_build %}
- path: {{ bundle_dir }}
- require:
- cmd: mgr_buildimage_kiwi_bundle
{%- else %}
- path: {{ dest_dir }}
- require:
- cmd: mgr_buildimage_kiwi_create
{%- endif %}
{%- endif %} {# use_salt_transport #}
mgr_buildimage_info:
mgrcompat.module_run:
- name: kiwi_info.build_info
- dest: {{ dest_dir }}
- build_id: {{ build_id }}
{%- if use_bundle_build %}
- bundle_dest: {{ bundle_dir }}
{%- endif %}
- require:
{%- if pillar.get('use_salt_transport') %}
- mgr_buildimage_kiwi_collect_image
{%- else %}
{%- if use_bundle_build %}
- mgr_buildimage_kiwi_bundle
{%- else %}
- mgr_buildimage_kiwi_create
{%- endif %} {# use_bundle_build #}
{%- endif %} {# use_salt_transport #}
mgr_buildimage_kiwi_collect_logs:
mgrcompat.module_run:
- name: cp.push
- path: {{ root_dir }}/build.log
- upload_path: /image-{{ build_id }}.log
- order: last
07070100000061000081B400000000000000000000000168EFD66400000337000000000000000000000000000000000000003300000000susemanager-sls/salt/images/kiwi-image-inspect.sls# SUSE Multi-Linux Manager for Retail build trigger
#
{%- set root_dir = '/var/lib/Kiwi/' + pillar.get('build_id') %}
{%- set dest_dir = root_dir + '/images.build' %}
{%- set bundle_dir = root_dir + '/images/' %}
{%- set build_id = pillar.get('build_id') %}
{%- set use_bundle_build = pillar.get('use_bundle_build', salt['pillar.get']('custom_info:use_bundle_build', False)) %}
# the goal is to collect all information required for
# saltboot image pillar
mgr_inspect_kiwi_image:
mgrcompat.module_run:
- name: kiwi_info.inspect_image
- dest: {{ dest_dir }}
- build_id: {{ build_id }}
{%- if use_bundle_build %}
- bundle_dest: {{ bundle_dir }}
{%- endif %}
mgr_kiwi_cleanup:
cmd.run:
- name: "/usr/bin/rm -rf '{{ root_dir }}'"
- require:
- mgrcompat: mgr_inspect_kiwi_image
07070100000062000081B400000000000000000000000168EFD664000009F5000000000000000000000000000000000000002E00000000susemanager-sls/salt/images/profileupdate.sls{% set container_name = salt['pillar.get']('mgr_container_name', 'mgr_container_' ~ range(1, 10000) | random ) %}
{% if grains['saltversioninfo'][0] >= 2018 %}
mgr_registries_login_inspect:
mgrcompat.module_run:
- name: docker.login
- registries: {{ pillar.get('docker-registries', {}).keys() | list }}
mgr_image_profileupdate:
mgrcompat.module_run:
- name: docker.sls_build
- repository: "{{ container_name }}"
- base: "{{ pillar.get('imagename') }}"
- mods: packages.profileupdate
- dryrun: True
- kwargs:
entrypoint: ""
- require:
- mgrcompat: mgr_registries_login_inspect
mgr_image_inspect:
mgrcompat.module_run:
- name: docker.inspect_image
- m_name: "{{ pillar.get('imagename') }}"
- require:
- mgrcompat: mgr_registries_login_inspect
mgr_container_remove:
mgrcompat.module_run:
- name: docker.rm
- args: [ "{{ container_name }}" ]
- force: False
- onlyif:
- /usr/bin/docker ps -a | /usr/bin/grep "{{ container_name }}" >/dev/null
mgr_image_remove:
mgrcompat.module_run:
- name: docker.rmi
- m_names:
- "{{ pillar.get('imagename') }}"
- force: False
{% if 'docker.logout' in salt %}
mgr_registries_logout:
mgrcompat.module_run:
- name: docker.logout
- registries: {{ pillar.get('docker-registries', {}).keys() | list }}
- require:
- mgrcompat: mgr_registries_login_inspect
- mgrcompat: mgr_image_profileupdate
{% endif %}
{% else %}
mgr_registries_login_inspect:
mgrcompat.module_run:
- name: dockerng.login
- registries: {{ pillar.get('docker-registries', {}).keys() }}
mgr_image_profileupdate:
mgrcompat.module_run:
- name: dockerng.sls_build
- m_name: "{{ container_name }}"
- base: "{{ pillar.get('imagename') }}"
- mods: packages.profileupdate
- dryrun: True
- kwargs:
entrypoint: ""
- require:
- mgrcompat: mgr_registries_login_inspect
mgr_image_inspect:
mgrcompat.module_run:
- name: dockerng.inspect
- m_name: "{{ pillar.get('imagename') }}"
- require:
- mgrcompat: mgr_registries_login_inspect
mgr_container_remove:
mgrcompat.module_run:
- name: dockerng.rm
- args: [ "{{ container_name }}" ]
- force: False
- onlyif:
- /usr/bin/docker ps -a | /usr/bin/grep "{{ container_name }}" >/dev/null
mgr_image_remove:
mgrcompat.module_run:
- name: dockerng.rmi
- m_names:
- "{{ pillar.get('imagename') }}"
- force: False
{% endif %}
include:
- util.syncstates
07070100000063000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/packages07070100000064000081B400000000000000000000000168EFD66400000428000000000000000000000000000000000000002700000000susemanager-sls/salt/packages/init.slsinclude:
- util.syncstates
- .packages_{{ grains['machine_id'] }}
{%- if grains['os_family'] == 'Suse' and grains['instance_id'] is defined and "openSUSE" not in grains['oscodename'] %}
{# install flavor check tool in cloud instances to be able to detect payg instances #}
mgr_install_flavor_check:
pkg.installed:
- name: python-instance-billing-flavor-check
- require:
- file: mgrchannels_*
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_states
{%- else %}
- mgrcompat: sync_states
{%- endif %}
{%- if grains['os_family'] == 'RedHat' and grains['osmajorrelease'] < 8 %}
pkg.installed:
- name: yum-utils
- require:
- file: mgrchannels_*
- mgrcompat: sync_states
{%- endif %}
mgr_refresh_grains:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
saltutil.sync_grains:
{%- else %}
mgrcompat.module_run:
- name: saltutil.sync_grains
{%- endif %}
- reload_grains: true
- onchanges:
- pkg: mgr_install_flavor_check
{%- endif %}
07070100000065000081B400000000000000000000000168EFD66400000158000000000000000000000000000000000000003000000000susemanager-sls/salt/packages/patchdownload.sls{% if pillar.get('param_patches', []) %}
pkg_downloaded-patches:
pkg.patch_downloaded:
- advisory_ids:
{%- for patch in pillar.get('param_patches', []) %}
- {{ patch }}
{%- endfor %}
- require:
- mgrcompat: applychannels
{% endif %}
applychannels:
mgrcompat.module_run:
- name: state.apply
- mods: channels
07070100000066000081B400000000000000000000000168EFD664000004D3000000000000000000000000000000000000002F00000000susemanager-sls/salt/packages/patchinstall.sls{% if grains.get('saltversioninfo', []) < [2015, 8, 12] %}
{{ salt.test.exception("You are running an old version of salt-minion that does not support patching. Please update salt-minion and try again.") }}
{% endif %}
{% if pillar.get('param_update_stack_patches', []) %}
mgr_update_stack_patches:
pkg.patch_installed:
- refresh: true
- advisory_ids:
{%- for patch in pillar.get('param_update_stack_patches', []) %}
- {{ patch }}
{%- endfor %}
- diff_attr: ['epoch', 'version', 'release', 'arch', 'install_date_time_t']
- require:
- file: mgrchannels*
{% endif %}
{% if pillar.get('param_regular_patches', []) %}
mgr_regular_patches:
pkg.patch_installed:
{% if not pillar.get('param_update_stack_patches', []) %}
- refresh: true
{% endif %}
- novendorchange: {{ not pillar.get('allow_vendor_change', False) }}
- advisory_ids:
{%- for patch in pillar.get('param_regular_patches', []) %}
- {{ patch }}
{%- endfor %}
- diff_attr: ['epoch', 'version', 'release', 'arch', 'install_date_time_t']
- require:
- file: mgrchannels*
{% if pillar.get('param_update_stack_patches', []) %}
- pkg: mgr_update_stack_patches
{% endif %}
{% endif %}
include:
- channels
07070100000067000081B400000000000000000000000168EFD66400000207000000000000000000000000000000000000002E00000000susemanager-sls/salt/packages/pkgdownload.sls{% if pillar.get('param_pkgs') %}
pkg_downloaded:
pkg.downloaded:
- pkgs:
{%- for pkg, arch, version in pillar.get('param_pkgs', []) %}
{%- if grains['os_family'] == 'Debian' %}
- {{ pkg }}:{{ arch }}: {{ version }}
{%- elif grains.get('__suse_reserved_pkg_all_versions_support', False) %}
- {{ pkg }}.{{ arch }}: {{ version }}
{%- else %}
- {{ pkg }}: {{ version }}
{%- endif %}
{%- endfor %}
- require:
- file: mgrchannels*
{% endif %}
include:
- channels
07070100000068000081B400000000000000000000000168EFD664000002F6000000000000000000000000000000000000002D00000000susemanager-sls/salt/packages/pkginstall.sls{% if pillar.get('param_pkgs') %}
pkg_installed:
pkg.installed:
- refresh: true
{%- if grains['os_family'] == 'Debian' %}
- skip_verify: {{ not pillar.get('mgr_metadata_signing_enabled', false) }}
{%- endif %}
- pkgs:
{%- for pkg, arch, version in pillar.get('param_pkgs', []) %}
{%- if grains['os_family'] == 'Debian' %}
- {{ pkg }}:{{ arch }}: {{ version }}
{%- elif grains.get('__suse_reserved_pkg_all_versions_support', False) %}
- {{ pkg }}.{{ arch }}: {{ version }}
{%- else %}
- {{ pkg }}: {{ version }}
{%- endif %}
{%- endfor %}
- diff_attr: ['epoch', 'version', 'release', 'arch', 'install_date_time_t']
- require:
- file: mgrchannels*
{% endif %}
include:
- channels
07070100000069000081B400000000000000000000000168EFD664000000E1000000000000000000000000000000000000002A00000000susemanager-sls/salt/packages/pkglock.slspkg_locked:
pkg.held:
- replace: True
{% if pillar.get('param_pkgs') %}
- pkgs:
{%- for pkg, arch, version in pillar.get('param_pkgs', []) %}
- {{ pkg }}
{%- endfor %}
{%- else %}
- pkgs: []
{% endif %}
0707010000006A000081B400000000000000000000000168EFD66400000417000000000000000000000000000000000000002C00000000susemanager-sls/salt/packages/pkgremove.sls{% if pillar.get('param_pkgs') %}
pkg_removed:
pkg.removed:
- pkgs:
{%- for pkg, arch, version in pillar.get('param_pkgs', []) %}
{%- if grains['os_family'] == 'Debian' %}
- {{ pkg }}:{{ arch }}: {{ version }}
{%- elif grains.get('__suse_reserved_pkg_all_versions_support', False) %}
- {{ pkg }}.{{ arch }}: {{ version }}
{%- else %}
- {{ pkg }}: {{ version }}
{%- endif %}
{%- endfor %}
- require:
- file: mgrchannels*
{% endif %}
{% if pillar.get('param_pkgs_duplicates') %}
{% for pkg, arch, version in pillar["param_pkgs_duplicates"] %}
pkg_removed_dup_{{ loop.index0 }}:
pkg.removed:
- pkgs:
{%- if grains['os_family'] == 'Debian' %}
- {{ pkg }}:{{ arch }}: {{ version }}
{%- elif grains.get('__suse_reserved_pkg_all_versions_support', False) %}
- {{ pkg }}.{{ arch }}: {{ version }}
{%- else %}
- {{ pkg }}: {{ version }}
{%- endif %}
- require:
- file: mgrchannels*
{% endfor %}
{% endif %}
include:
- channels
0707010000006B000081B400000000000000000000000168EFD66400000164000000000000000000000000000000000000002C00000000susemanager-sls/salt/packages/pkgupdate.slsinclude:
- channels
mgr_pkg_update:
pkg.uptodate:
- refresh: True
{%- if grains['os_family'] == 'Debian' %}
- skip_verify: {{ not pillar.get('mgr_metadata_signing_enabled', false) }}
- dist_upgrade: True
{%- endif %}
- diff_attr: ['epoch', 'version', 'release', 'arch', 'install_date_time_t']
- require:
- file: mgrchannels*
0707010000006C000081B400000000000000000000000168EFD6640000070A000000000000000000000000000000000000003000000000susemanager-sls/salt/packages/profileupdate.slspackages:
mgrcompat.module_run:
- name: pkg.info_installed
- kwargs: {
attr: 'status,arch,epoch,version,release,install_date_time_t',
{%- if grains.get('__suse_reserved_pkg_all_versions_support', False) %}
errors: report,
all_versions: true
{%- else %}
errors: report
{%- endif %}
}
{% if grains['os_family'] == 'Suse' %}
products:
mgrcompat.module_run:
- name: pkg.list_products
{% elif grains['os_family'] == 'RedHat' %}
{% include 'packages/redhatproductinfo.sls' %}
{% if grains['osmajorrelease'] >= 8 %}
modules:
mgrcompat.module_run:
- name: appstreams.get_enabled_modules
{% endif %}
{% elif grains['os_family'] == 'Debian' %}
debianrelease:
cmd.run:
- name: /usr/bin/cat /etc/os-release
- onlyif: /usr/bin/test -f /etc/os-release
{% endif %}
include:
- util.syncgrains
- util.syncstates
- util.syncmodules
grains_update:
mgrcompat.module_run:
- name: grains.items
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_grains
{%- else %}
- mgrcompat: sync_grains
{%- endif %}
{% if not pillar.get('imagename') %}
status_uptime:
mgrcompat.module_run:
- name: status.uptime
{%- if not grains.get('transactional', False) %}
reboot_required:
mgrcompat.module_run:
- name: reboot_info.reboot_required
{%- if grains['os_family'] == 'RedHat' and grains['osmajorrelease'] < 8 %}
- onlyif:
- which needs-restarting
{%- endif %}
{%- endif %}
kernel_live_version:
mgrcompat.module_run:
- name: sumautil.get_kernel_live_version
- require:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
- saltutil: sync_modules
{%- else %}
- mgrcompat: sync_modules
{%- endif %}
{% endif %}
0707010000006D000081B400000000000000000000000168EFD66400000505000000000000000000000000000000000000003400000000susemanager-sls/salt/packages/redhatproductinfo.sls{% if grains['os_family'] == 'RedHat' %}
rhelrelease:
cmd.run:
- name: /usr/bin/cat /etc/redhat-release
- onlyif: /usr/bin/test -f /etc/redhat-release -a ! -L /etc/redhat-release
alibabarelease:
cmd.run:
- name: /usr/bin/cat /etc/alinux-release
- onlyif: /usr/bin/test -f /etc/alinux-release
centosrelease:
cmd.run:
- name: /usr/bin/cat /etc/centos-release
- onlyif: /usr/bin/test -f /etc/centos-release
oraclerelease:
cmd.run:
- name: /usr/bin/cat /etc/oracle-release
- onlyif: /usr/bin/test -f /etc/oracle-release
amazonrelease:
cmd.run:
- name: /usr/bin/cat /etc/system-release
- onlyif: /usr/bin/test -f /etc/system-release && /usr/bin/grep -qi Amazon /etc/system-release
almarelease:
cmd.run:
- name: /usr/bin/cat /etc/almalinux-release
- onlyif: /usr/bin/test -f /etc/almalinux-release
rockyrelease:
cmd.run:
- name: /usr/bin/cat /etc/rocky-release
- onlyif: /usr/bin/test -f /etc/rocky-release
respkgquery:
cmd.run:
- name: /usr/bin/rpm -q --whatprovides 'sles_es-release-server'
- onlyif: /usr/bin/rpm -q --whatprovides 'sles_es-release-server'
sllpkgquery:
cmd.run:
- name: /usr/bin/rpm -q --whatprovides 'sll-release'
- onlyif: /usr/bin/rpm -q --whatprovides 'sll-release'
{% endif %}
0707010000006E000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001B00000000susemanager-sls/salt/proxy0707010000006F000081B400000000000000000000000168EFD6640000199E000000000000000000000000000000000000003200000000susemanager-sls/salt/proxy/apply_proxy_config.sls{%- set mgrpxy_installed = salt['pkg.version']('mgrpxy') %}
{%- set mgrpxy_status_output = salt['cmd.run']('mgrpxy status 2>&1', python_shell=True) %}
{%- set mgrpxy_operation = 'install' if not mgrpxy_installed or 'Error: no installed proxy detected' in mgrpxy_status_output else 'upgrade' %}
{%- set transactional = grains['transactional'] %}
{%- set installPackages = not (pillar.get('registries') is mapping and pillar.get('registries') | length > 0) %}
podman_installed:
pkg.installed:
- name: podman
mgrpxy_installed:
pkg.installed:
- name: mgrpxy
- refresh: True
/etc/uyuni/proxy/config.yaml:
file.managed:
- name: /etc/uyuni/proxy/config.yaml
- user: root
- group: root
- mode: 644
- makedirs: True
- template: jinja
- contents: |
server: {{ pillar['server'] }}
ca_crt: |
{{ pillar['ca_crt'] | replace('\\n', '\n') | indent(10) }}
proxy_fqdn: {{ pillar['proxy_fqdn'] }}
max_cache_size_mb: {{ pillar['max_cache_size_mb']|int }}
server_version: "{{ pillar['server_version'] }}"
email: {{ pillar['email'] }}
replace_fqdns: {{ pillar['replace_fqdns'] }}
/etc/uyuni/proxy/httpd.yaml:
file.managed:
- name: /etc/uyuni/proxy/httpd.yaml
- user: root
- group: root
- mode: 600
- makedirs: True
- template: jinja
- contents: |
httpd:
system_id: {{ pillar['httpd']['system_id'] }}
server_crt: |
{{ pillar['httpd']['server_crt'] | replace('\\n', '\n') | indent(12) }}
server_key: |
{{ pillar['httpd']['server_key'] | replace('\\n', '\n') | indent(12) }}
/etc/uyuni/proxy/ssh.yaml:
file.managed:
- name: /etc/uyuni/proxy/ssh.yaml
- user: root
- group: root
- mode: 600
- makedirs: True
- template: jinja
- contents: |
ssh:
server_ssh_key_pub: |
{{ pillar['ssh']['server_ssh_key_pub'] | replace('\\n', '\n') | indent(12) }}
server_ssh_push: |
{{ pillar['ssh']['server_ssh_push'] | replace('\\n', '\n') | indent(12) }}
server_ssh_push_pub: |
{{ pillar['ssh']['server_ssh_push_pub'] | replace('\\n', '\n') | indent(12) }}
{% if installPackages %}
{%- set matched_pkgs_regex = salt['pkg.search']('suse-multi-linux-manager-*proxy*-image', regex=True) or {} %}
{%- set pkg_names = matched_pkgs_regex.keys() | list %}
install_proxy_packages:
pkg.installed:
- pkgs:
{%- for pkg in pkg_names %}
- {{ pkg }}
{%- endfor %}
- refresh: True
{% endif %}
{% set args = [] %}
{% if salt['pillar.get']('registries:proxy-httpd:url') and salt['pillar.get']('registries:proxy-httpd:tag') %}
{% do args.append("--httpd-image " ~ salt['pillar.get']('registries:proxy-httpd:url') ~ " --httpd-tag " ~ salt['pillar.get']('registries:proxy-httpd:tag')) %}
{% endif %}
{% if salt['pillar.get']('registries:proxy-salt-broker:url') and salt['pillar.get']('registries:proxy-salt-broker:tag') %}
{% do args.append("--saltbroker-image " ~ salt['pillar.get']('registries:proxy-salt-broker:url') ~ " --saltbroker-tag " ~ salt['pillar.get']('registries:proxy-salt-broker:tag')) %}
{% endif %}
{% if salt['pillar.get']('registries:proxy-squid:url') and salt['pillar.get']('registries:proxy-squid:tag') %}
{% do args.append("--squid-image " ~ salt['pillar.get']('registries:proxy-squid:url') ~ " --squid-tag " ~ salt['pillar.get']('registries:proxy-squid:tag')) %}
{% endif %}
{% if salt['pillar.get']('registries:proxy-ssh:url') and salt['pillar.get']('registries:proxy-ssh:tag') %}
{% do args.append("--ssh-image " ~ salt['pillar.get']('registries:proxy-ssh:url') ~ " --ssh-tag " ~ salt['pillar.get']('registries:proxy-ssh:tag')) %}
{% endif %}
{% if salt['pillar.get']('registries:proxy-tftpd:url') and salt['pillar.get']('registries:proxy-tftpd:tag') %}
{% do args.append("--tftpd-image " ~ salt['pillar.get']('registries:proxy-tftpd:url') ~ " --tftpd-tag " ~ salt['pillar.get']('registries:proxy-tftpd:tag')) %}
{% endif %}
{% if transactional %}
# If we're on a transactional system, we'll install mgrpxy apply as a service that
# executes the mgrpxy install/update command after next reboot
/etc/systemd/system/apply_proxy_config.service:
file.managed:
- name: /etc/systemd/system/apply_proxy_config.service
- user: root
- group: root
- mode: 664
- makedirs: True
- template: jinja
- contents: |
[Unit]
Description=Install/Update mgrpxy proxy
After=network-online.target podman.service
Requires=network-online.target podman.service
[Service]
Type=oneshot
ExecStart=/bin/bash -c '/usr/bin/mgrpxy {{ mgrpxy_operation }} podman --logLevel debug {{ args | join(" ") }} 2>&1 | /usr/bin/tee -a /var/log/mgrpxy_install.log'
ExecStartPost=/bin/bash -c 'STATUS_OUTPUT=$(mgrpxy status 2>&1); \
/usr/bin/echo "$STATUS_OUTPUT" | /usr/bin/tee -a /var/log/mgrpxy_install.log; \
if ! /usr/bin/echo "$STATUS_OUTPUT" | /usr/bin/grep -q "Error: no installed proxy detected"; then \
/usr/bin/echo "mgrpxy was successfully {{ mgrpxy_operation }}ed. Removing apply mgrpxy service and configuration file." | /usr/bin/tee -a /var/log/mgrpxy_install.log; \
/usr/bin/rm -f /etc/systemd/system/apply_proxy_config.service; \
else \
/usr/bin/echo "mgrpxy status check failed. Service file will remain for troubleshooting." | /usr/bin/tee -a /var/log/mgrpxy_install.log; \
fi'
[Install]
WantedBy=multi-user.target
- require:
- file: /etc/uyuni/proxy/config.yaml
- file: /etc/uyuni/proxy/httpd.yaml
- file: /etc/uyuni/proxy/ssh.yaml
- pkg: podman_installed
- pkg: mgrpxy_installed
# The system will run this service to enable apply_proxy_config.service after reboot
enable_apply_proxy_config_service:
cmd.run:
- name: /usr/bin/systemctl enable apply_proxy_config.service
- require:
- file: /etc/systemd/system/apply_proxy_config.service
{% else %}
apply_proxy_configuration:
cmd.run:
- name: >
/usr/bin/mgrpxy {{ mgrpxy_operation }} podman --logLevel debug {{ args | join(" ") }}
2>&1 | /usr/bin/tee -a /var/log/mgrpxy_install.log
- shell: /bin/bash
- require:
- file: /etc/uyuni/proxy/config.yaml
- file: /etc/uyuni/proxy/httpd.yaml
- file: /etc/uyuni/proxy/ssh.yaml
- pkg: podman_installed
- pkg: mgrpxy_installed
{%- endif %}
07070100000070000081B400000000000000000000000168EFD6640000003C000000000000000000000000000000000000002000000000susemanager-sls/salt/reboot.slsmgr_reboot:
cmd.run:
- name: /usr/sbin/shutdown -r +5
07070100000071000081B400000000000000000000000168EFD66400000413000000000000000000000000000000000000002800000000susemanager-sls/salt/rebootifneeded.sls{%- if salt['pillar.get']('mgr_reboot_if_needed', True) and salt['pillar.get']('custom_info:mgr_reboot_if_needed', 'true')|lower in ('true', '1', 'yes', 't') %}
mgr_reboot_if_needed:
cmd.run:
- name: /usr/sbin/shutdown -r +5
{%- if grains['os_family'] == 'RedHat' and grains['osmajorrelease'] >= 8 %}
- onlyif: '/usr/bin/dnf -q needs-restarting -r; /usr/bin/test $? -eq 1'
{%- elif grains['os_family'] == 'RedHat' and grains['osmajorrelease'] >= 7 %}
- onlyif: '/usr/bin/needs-restarting -r; /usr/bin/test $? -eq 1'
{%- elif grains['os_family'] == 'Debian' %}
- onlyif:
- /usr/bin/test -e /var/run/reboot-required
{%- elif grains.get('transactional', False) and grains['os_family'] == 'Suse' %}
- onlyif:
- /usr/bin/snapper list --columns number 2>/dev/null | /usr/bin/grep '+'
{%- elif grains['os_family'] == 'Suse' and grains['osmajorrelease'] <= 12 %}
- onlyif:
- /usr/bin/test -e /boot/do_purge_kernels
{%- else %}
- onlyif: '/usr/bin/zypper ps -s; [ $? -eq 102 ]'
{%- endif %}
{%- endif %}
07070100000072000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001F00000000susemanager-sls/salt/recurring07070100000073000081B400000000000000000000000168EFD66400000038000000000000000000000000000000000000002800000000susemanager-sls/salt/recurring/init.slsinclude:
- recurring.recurring_{{ pillar['rec_id'] }}
07070100000074000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002400000000susemanager-sls/salt/remotecommands07070100000075000081B400000000000000000000000168EFD6640000024E000000000000000000000000000000000000002D00000000susemanager-sls/salt/remotecommands/init.sls{#
`cwd` defines where the script is written to (temporarily) and from where it's executed.
Users can define `mgr_remote_cmd_cwd` in pillar data to avoid writing the script to
/tmp if that's required, for example when /tmp is mounted with noexec
#}
{%- set cwd = pillar.get('mgr_remote_cmd_cwd') %}
remote_command:
cmd.script:
- source: {{ pillar.get('mgr_remote_cmd_script') }}
- runas: {{ pillar.get('mgr_remote_cmd_runas', 'root') }}
- timeout: {{ pillar.get('mgr_remote_cmd_timeout') }}
{%- if cwd %}
- cwd: {{ cwd }}
{%- endif %}
# TODO GID
07070100000076000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001A00000000susemanager-sls/salt/scap07070100000077000081B400000000000000000000000168EFD664000005ED000000000000000000000000000000000000002300000000susemanager-sls/salt/scap/init.slsmgr_scap:
mgrcompat.module_run:
{%- if "openscap.xccdf_eval" not in salt %}
- name: openscap.xccdf
- params: {{ pillar.get('mgr_scap_params')['old_parameters'] }}
{%- else %}
- name: openscap.xccdf_eval
- xccdffile: {{ pillar['mgr_scap_params']['xccdffile'] }}
{%- if "ovalfiles" in pillar.get('mgr_scap_params') %}
- ovalfiles:
{%- for oval in pillar['mgr_scap_params']['ovalfiles'] %}
- {{ oval }}
{%- endfor %}
{%- endif %}
- kwargs:
results: results.xml
report: report.html
oval_results: True
{%- if "profile" in pillar.get('mgr_scap_params') %}
profile: {{ pillar['mgr_scap_params']['profile'] }}
{%- endif %}
{%- if "rule" in pillar.get('mgr_scap_params') %}
rule: {{ pillar['mgr_scap_params']['rule'] }}
{%- endif %}
{%- if "remediate" in pillar.get('mgr_scap_params') %}
remediate: {{ pillar['mgr_scap_params']['remediate'] }}
{%- endif %}
{%- if "fetch_remote_resources" in pillar.get('mgr_scap_params') %}
fetch_remote_resources: {{ pillar['mgr_scap_params']['fetch_remote_resources'] }}
{%- endif %}
{%- if "tailoring_file" in pillar.get('mgr_scap_params') %}
tailoring_file: {{ pillar['mgr_scap_params']['tailoring_file'] }}
{%- endif %}
{%- if "tailoring_id" in pillar.get('mgr_scap_params') %}
tailoring_id: {{ pillar['mgr_scap_params']['tailoring_id'] }}
{%- endif %}
{% endif %}
07070100000078000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001E00000000susemanager-sls/salt/services07070100000079000081B400000000000000000000000168EFD6640000062E000000000000000000000000000000000000002900000000susemanager-sls/salt/services/docker.sls{% if pillar['addon_group_types'] is defined and 'container_build_host' in pillar['addon_group_types'] %}
{% set use_venv_salt = salt['pkg.version']('venv-salt-minion') %}
mgr_install_docker:
pkg.installed:
- pkgs:
- git-core
- docker: '>=1.9.0'
{%- if not use_venv_salt %}
{%- if grains['pythonversion'][0] == 3 %}
{%- if grains['osmajorrelease'] == 12 %}
- python3-docker-py: '>=1.6.0'
{%- else %}
{%- if grains['os_family'] == 'Suse' and grains['pythonversion'][1] > 6 %}
- python3{{ grains['pythonversion'][1] }}-docker: '>=1.6.0'
{%- else %}
- python3-docker: '>=1.6.0'
{%- endif %}
{%- endif %}
{%- else %}
- python-docker-py: '>=1.6.0'
{%- endif %}
{%- if grains['saltversioninfo'][0] >= 2018 %}
{%- if grains['os_family'] == 'Suse' and grains['pythonversion'][1] > 6 %}
- python3{{ grains['pythonversion'][1] }}-salt
{%- else %}
- python3-salt
{%- endif %}
{%- if grains['saltversioninfo'][0] < 3002 and salt['pkg.info_available']('python-Jinja2', 'python2-Jinja2') and salt['pkg.info_available']('python', 'python2') and salt['pkg.info_available']('python2-salt') %}
- python2-salt
{%- endif %}
{%- endif %}
{%- endif %}
mgr_docker_service:
service.running:
- name: docker
- enable: True
- require:
- pkg: mgr_install_docker
mgr_min_salt:
pkg.installed:
- pkgs:
{%- if use_venv_salt %}
- venv-salt-minion
{%- else %}
- salt: '>=2016.11.1'
- salt-minion: '>=2016.11.1'
{%- endif %}
- order: last
{% endif %}
0707010000007A000081B400000000000000000000000168EFD664000009E1000000000000000000000000000000000000003400000000susemanager-sls/salt/services/kiwi-image-server.sls#!jinja|yaml
# SUSE Multi-Linux Manager image server preparation
#
# Copyright (c) 2017 - 2025 SUSE LLC
{% from "images/kiwi-detect.sls" import kiwi_method with context %}
{% if 'osimage_build_host' in pillar.get('addon_group_types', []) %}
{%- set kiwi_dir = '/var/lib/Kiwi' %}
{# Set correct package list base on SLES version but independent of kiwi_ng usage #}
{%- if kiwi_method == 'legacy' %}
{%- set kiwi_modules = ['kiwi', 'kiwi-desc-netboot', 'kiwi-desc-saltboot', 'kiwi-desc-vmxboot', 'kiwi-desc-oemboot', 'kiwi-desc-isoboot'] %}
{%- elif kiwi_method == 'kiwi-ng' %}
{%- set kiwi_modules = ['python3-kiwi', 'kiwi-systemdeps-disk-images', 'kiwi-systemdeps-image-validation', 'kiwi-systemdeps-iso-media', 'kiwi-systemdeps-containers', 'kiwi-boot-descriptions'] %}
{%- elif kiwi_method == 'podman' %}
{#- TODO: add kiwi container rpm once available#}
{%- set kiwi_modules = ['podman', 'xorriso'] %}
{%- else: %}
kiwi_unknown_method:
test.fail_without_changes:
- name: Unknown kiwi method {{ kiwi_method }}
{%- endif %}
mgr_install_kiwi:
pkg.installed:
- pkgs:
- git-core
{%- for km in kiwi_modules %}
- {{ km }}
{%- endfor %}
mgr_kiwi_dir_created:
file.directory:
- name: {{ kiwi_dir }}
- user: root
- group: root
- dir_mode: 755
# repo for common kiwi build needs - mainly RPM with SUSE Multi-Linux Manager certificate
mgr_kiwi_dir_repo_created:
file.directory:
- name: {{ kiwi_dir }}/repo
- user: root
- group: root
- dir_mode: 755
mgr_osimage_cert_deployed:
file.managed:
{%- if grains.get('osfullname') == 'SLES' and grains.get('osmajorrelease') == '11' %}
- name: {{ kiwi_dir }}/repo/rhn-org-trusted-ssl-cert-osimage-sle11-1.0-1.noarch.rpm
- source: salt://images/rhn-org-trusted-ssl-cert-osimage-sle11-1.0-1.noarch.rpm
{%- else %}
- name: {{ kiwi_dir }}/repo/rhn-org-trusted-ssl-cert-osimage-1.0-1.noarch.rpm
- source: salt://images/rhn-org-trusted-ssl-cert-osimage-1.0-1.noarch.rpm
{%- endif %}
mgr_sshd_installed_enabled:
pkg.installed:
- name: openssh
service.running:
- name: sshd
- enable: True
mgr_sshd_public_key_copied:
file.append:
- name: /root/.ssh/authorized_keys
- source: salt://salt_ssh/mgr_ssh_id.pub
- makedirs: True
- require:
- pkg: mgr_sshd_installed_enabled
mgr_saltutil_synced:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
saltutil.sync_all
{%- else %}
mgrcompat.module_run:
- name: saltutil.sync_all
{%- endif %}
{% endif %}
0707010000007B000081B400000000000000000000000168EFD66400000159000000000000000000000000000000000000003000000000susemanager-sls/salt/services/reportdb-user.sls{% if grains.get('is_mgr_server', False) and grains.get('has_report_db', False) %}
{% if pillar.get('report_db_user', '') != '' and pillar.get('report_db_password', '') != '' %}
mgr_set_report_db_user:
reportdb_user.present:
- name: {{ pillar['report_db_user'] }}
- password: {{ pillar['report_db_password'] }}
{% endif %}
{% endif %}
0707010000007C000081B400000000000000000000000168EFD66400000650000000000000000000000000000000000000002E00000000susemanager-sls/salt/services/salt-minion.sls{% include 'bootstrap/remove_traditional_stack.sls' %}
{%- set salt_minion_name = 'salt-minion' %}
{%- set susemanager_minion_config = '/etc/salt/minion.d/susemanager.conf' %}
{# Prefer venv-salt-minion if installed #}
{%- if salt['pkg.version']('venv-salt-minion') %}
{%- set salt_minion_name = 'venv-salt-minion' %}
{%- set susemanager_minion_config = '/etc/venv-salt-minion/minion.d/susemanager.conf' %}
{%- endif -%}
{%- if salt['pillar.get']('contact_method') not in ['ssh-push', 'ssh-push-tunnel'] %}
mgr_salt_minion_inst:
pkg.installed:
- name: {{ salt_minion_name }}
- order: last
{{ susemanager_minion_config }}:
file.managed:
- source:
- salt://bootstrap/susemanager.conf
- template: jinja
- mode: 644
- order: last
- require:
- pkg: mgr_salt_minion_inst
mgr_salt_minion_run:
service.running:
- name: {{ salt_minion_name }}
- enable: True
- order: last
{% endif %}
{%- if salt['pillar.get']('contact_method') in ['ssh-push', 'ssh-push-tunnel'] %}
logrotate_configuration:
file.managed:
- name: /etc/logrotate.d/salt-ssh
- user: root
- group: root
- mode: 644
- makedirs: True
- contents: |
/var/log/salt-ssh.log {
su root root
missingok
size 10M
rotate 7
compress
notifempty
}
{% endif %}
{# ensure /etc/sysconfig/rhn/systemid is created to indicate minion is managed by SUSE Multi-Linux Manager #}
/etc/sysconfig/rhn/systemid:
file.managed:
- mode: 0640
- makedirs: True
- replace: False
0707010000007D000041FD00000000000000000000000468EFD66400000000000000000000000000000000000000000000002300000000susemanager-sls/salt/srvmonitoring0707010000007E000081B400000000000000000000000168EFD664000004A1000000000000000000000000000000000000002F00000000susemanager-sls/salt/srvmonitoring/disable.slsnode_exporter_service:
service.dead:
- name: prometheus-node_exporter
- enable: False
postgres_exporter_service:
service.dead:
- name: prometheus-postgres_exporter
- enable: False
# Workaround for previous tomcat configuration
remove_tomcat_previous:
file.rename:
- source: /etc/sysconfig/tomcat
- name: /etc/sysconfig/tomcat.bak
- force: True
- onlyif: test -f /etc/sysconfig/tomcat
jmx_tomcat_config:
file.absent:
- name: /etc/sysconfig/tomcat/systemd/jmx.conf
mgrcompat.module_run:
- name: service.systemctl_reload
jmx_taskomatic_config:
file.absent:
- name: /etc/sysconfig/taskomatic/systemd/jmx.conf
mgrcompat.module_run:
- name: service.systemctl_reload
mgr_enable_prometheus_self_monitoring:
cmd.run:
- name: /usr/bin/grep -q '^prometheus_monitoring_enabled.*=.*' /etc/rhn/rhn.conf && /usr/bin/sed -i 's/^prometheus_monitoring_enabled.*/prometheus_monitoring_enabled = 0/' /etc/rhn/rhn.conf || /usr/bin/echo 'prometheus_monitoring_enabled = 0' >> /etc/rhn/rhn.conf
mgr_is_prometheus_self_monitoring_disabled:
cmd.run:
- name: /usr/bin/grep -qF 'prometheus_monitoring_enabled = 0' /etc/rhn/rhn.conf
0707010000007F000081B400000000000000000000000168EFD664000010ED000000000000000000000000000000000000002E00000000susemanager-sls/salt/srvmonitoring/enable.slsnode_exporter:
cmd.run:
- name: /usr/bin/rpm --query --info golang-github-prometheus-node_exporter
node_exporter_service:
service.running:
- name: prometheus-node_exporter
- enable: True
- require:
- cmd: node_exporter
{% set global = namespace(has_pillar_data = True) %}
{% for key in ['db_name', 'db_host', 'db_port', 'db_user', 'db_pass'] if global.has_pillar_data %}
{% set global.has_pillar_data = key in pillar and pillar[key] %}
{% endfor %}
{% if global.has_pillar_data %}
postgres_exporter:
cmd.run:
- name: /usr/bin/rpm --query --info prometheus-postgres_exporter || /usr/bin/rpm --query --info golang-github-wrouesnel-postgres_exporter
postgres_exporter_cleanup:
file.absent:
- name: /etc/sysconfig/prometheus-postgres_exporter
postgres_exporter_configuration:
file.managed:
- name: /etc/sysconfig/prometheus-postgres_exporter/postgres_exporter_queries.yaml
- makedirs: True
- source:
- salt://srvmonitoring/postgres_exporter_queries.yaml
- user: root
- group: root
- mode: 644
postgres_exporter_service:
file.managed:
- names:
- /etc/sysconfig/prometheus-postgres_exporter/systemd/60-server.conf:
- source: salt://srvmonitoring/prometheus-postgres_exporter
- user: root
- mode: 644
- /etc/sysconfig/prometheus-postgres_exporter/pg_passwd:
- source: salt://srvmonitoring/pg_passwd
- user: prometheus
- mode: 600
- makedirs: True
- template: jinja
- group: root
- require:
- cmd: postgres_exporter
- file: postgres_exporter_configuration
mgrcompat.module_run:
- name: service.systemctl_reload
service.running:
- name: prometheus-postgres_exporter
- enable: True
- require:
- file: postgres_exporter_service
- watch:
- file: postgres_exporter_configuration
{% endif %}
jmx_exporter:
cmd.run:
- name: /usr/bin/rpm --query --info prometheus-jmx_exporter
jmx_exporter_tomcat_yaml_config:
file.managed:
- name: /etc/prometheus-jmx_exporter/tomcat/java_agent.yml
- makedirs: True
- user: root
- group: root
- mode: 644
- source:
- salt://srvmonitoring/java_agent.yaml
# Workaround for previous tomcat configuration
remove_tomcat_previous:
file.rename:
- source: /etc/sysconfig/tomcat
- name: /etc/sysconfig/tomcat.bak
- force: True
- onlyif: test -f /etc/sysconfig/tomcat
jmx_tomcat_config:
file.managed:
- name: /etc/sysconfig/tomcat/systemd/jmx.conf
- makedirs: True
- user: root
- group: root
- mode: 644
- source:
- salt://srvmonitoring/tomcat/systemd/jmx.conf
- require:
- cmd: jmx_exporter
mgrcompat.module_run:
- name: service.systemctl_reload
jmx_exporter_tomcat_service_cleanup:
service.dead:
- name: prometheus-jmx_exporter@tomcat
- enable: False
jmx_exporter_taskomatic_systemd_config_cleanup:
file.absent:
- name: /etc/prometheus-jmx_exporter/taskomatic/environment
jmx_exporter_taskomatic_yaml_config_cleanup:
file.absent:
- name: /etc/prometheus-jmx_exporter/taskomatic/prometheus-jmx_exporter.yml
jmx_exporter_taskomatic_yaml_config:
file.managed:
- name: /etc/prometheus-jmx_exporter/taskomatic/java_agent.yml
- makedirs: True
- user: root
- group: root
- mode: 644
- source:
- salt://srvmonitoring/java_agent.yaml
jmx_taskomatic_config:
file.managed:
- name: /etc/sysconfig/taskomatic/systemd/jmx.conf
- makedirs: True
- user: root
- group: root
- mode: 644
- source:
- salt://srvmonitoring/taskomatic/systemd/jmx.conf
- require:
- cmd: jmx_exporter
mgrcompat.module_run:
- name: service.systemctl_reload
jmx_exporter_taskomatic_service_cleanup:
service.dead:
- name: prometheus-jmx_exporter@taskomatic
- enable: False
mgr_enable_prometheus_self_monitoring:
cmd.run:
- name: /usr/bin/grep -q '^prometheus_monitoring_enabled.*=.*' /etc/rhn/rhn.conf && /usr/bin/sed -i 's/^prometheus_monitoring_enabled.*/prometheus_monitoring_enabled = 1/' /etc/rhn/rhn.conf || /usr/bin/echo 'prometheus_monitoring_enabled = 1' >> /etc/rhn/rhn.conf
mgr_is_prometheus_self_monitoring_enabled:
cmd.run:
- name: /usr/bin/grep -qF 'prometheus_monitoring_enabled = 1' /etc/rhn/rhn.conf
07070100000080000081B400000000000000000000000168EFD6640000008C000000000000000000000000000000000000003300000000susemanager-sls/salt/srvmonitoring/java_agent.yamlwhitelistObjectNames:
- java.lang:type=Threading,*
- java.lang:type=Memory,*
- Catalina:type=ThreadPool,name=*
rules:
- pattern: ".*"
07070100000081000081B400000000000000000000000168EFD66400000018000000000000000000000000000000000000002D00000000susemanager-sls/salt/srvmonitoring/pg_passwd{{ pillar['db_pass'] }}
07070100000082000081B400000000000000000000000168EFD6640000044E000000000000000000000000000000000000004200000000susemanager-sls/salt/srvmonitoring/postgres_exporter_queries.yamlmgr_serveractions:
query: |
SELECT (
SELECT COUNT(*)
FROM rhnServerAction
WHERE status = (
SELECT id FROM rhnActionStatus WHERE name = 'Queued'
)
) AS queued,
(
SELECT COUNT(*)
FROM rhnServerAction
WHERE status = (
SELECT id FROM rhnActionStatus WHERE name = 'Picked Up'
)
) AS picked_up,
(
SELECT COUNT(*)
FROM rhnServerAction
WHERE status = (
SELECT id FROM rhnActionStatus WHERE name IN ('Completed')
)
) AS completed,
(
SELECT COUNT(*)
FROM rhnServerAction
WHERE status = (
SELECT id FROM rhnActionStatus WHERE name IN ('Failed')
)
) AS failed;
metrics:
- queued:
usage: "GAUGE"
description: "Count of queued Actions"
- picked_up:
usage: "GAUGE"
description: "Count of picked up Actions"
- completed:
usage: "COUNTER"
description: "Count of completed Actions"
- failed:
usage: "COUNTER"
description: "Count of failed Actions"
07070100000083000081B400000000000000000000000168EFD664000001A8000000000000000000000000000000000000004000000000susemanager-sls/salt/srvmonitoring/prometheus-postgres_exporter[Service]
EnvironmentFile=
Environment="DATA_SOURCE_URI={{ pillar['db_host'] }}:{{ pillar['db_port'] }}/{{ pillar['db_name'] }}?sslmode=disable"
Environment="DATA_SOURCE_USER={{ pillar['db_user'] }}"
Environment="DATA_SOURCE_PASS_FILE=/etc/sysconfig/prometheus-postgres_exporter/pg_passwd"
Environment="POSTGRES_EXPORTER_PARAMS=--extend.query-path /etc/sysconfig/prometheus-postgres_exporter/postgres_exporter_queries.yaml"
07070100000084000081B400000000000000000000000168EFD6640000033E000000000000000000000000000000000000002E00000000susemanager-sls/salt/srvmonitoring/status.slsnode_exporter_service:
mgrcompat.module_run:
- name: service.status
- m_name: "prometheus-node_exporter.service"
postgres_exporter_service:
mgrcompat.module_run:
- name: service.status
- m_name: "prometheus-postgres_exporter.service"
jmx_tomcat_java_config:
mgrcompat.module_run:
- name: file.search
- path: /usr/lib/systemd/system/tomcat.service.d/jmx.conf
- pattern: "jmx_prometheus_javaagent.jar=5556"
jmx_taskomatic_java_config:
mgrcompat.module_run:
- name: file.search
- path: /usr/lib/systemd/system/taskomatic.service.d/jmx.conf
- pattern: "jmx_prometheus_javaagent.jar=5557"
mgr_is_prometheus_self_monitoring_enabled:
cmd.run:
- name: /usr/bin/grep -q -E 'prometheus_monitoring_enabled\s*=\s*(1|y|true|yes|on)\s*$' /etc/rhn/rhn.conf
include:
- util.syncstates
07070100000085000041FD00000000000000000000000368EFD66400000000000000000000000000000000000000000000002E00000000susemanager-sls/salt/srvmonitoring/taskomatic07070100000086000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000003600000000susemanager-sls/salt/srvmonitoring/taskomatic/systemd07070100000087000081B400000000000000000000000168EFD66400000097000000000000000000000000000000000000003F00000000susemanager-sls/salt/srvmonitoring/taskomatic/systemd/jmx.conf[Service]
Environment="JAVA_AGENT=-javaagent:/usr/share/java/jmx_prometheus_javaagent.jar=5557:/etc/prometheus-jmx_exporter/taskomatic/java_agent.yml"
07070100000088000041FD00000000000000000000000368EFD66400000000000000000000000000000000000000000000002A00000000susemanager-sls/salt/srvmonitoring/tomcat07070100000089000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000003200000000susemanager-sls/salt/srvmonitoring/tomcat/systemd0707010000008A000081B400000000000000000000000168EFD66400000096000000000000000000000000000000000000003B00000000susemanager-sls/salt/srvmonitoring/tomcat/systemd/jmx.conf[Service]
Environment="CATALINA_OPTS=-javaagent:/usr/share/java/jmx_prometheus_javaagent.jar=5556:/etc/prometheus-jmx_exporter/tomcat/java_agent.yml"
0707010000008B000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002300000000susemanager-sls/salt/ssh_bootstrap0707010000008C000081B400000000000000000000000168EFD664000008F2000000000000000000000000000000000000002C00000000susemanager-sls/salt/ssh_bootstrap/init.sls##
## java bootstrapping calls certs.sls before this state
##
{%- set mgr_sudo_user = salt['pillar.get']('mgr_sudo_user') or 'root' %}
mgr_ssh_identity:
ssh_auth.present:
- user: {{ mgr_sudo_user }}
- source: salt://salt_ssh/mgr_ssh_id.pub
{% if salt['pillar.get']('contact_method') == 'ssh-push-tunnel' %}
mgr_server_localhost_alias_present:
host.present:
{% else %}
mgr_server_localhost_alias_absent:
host.absent:
{% endif %}
- ip:
- 127.0.0.1
- names:
- {{ salt['pillar.get']('mgr_server') }}
{%- if salt['pillar.get']('proxy_pub_key') and salt['pillar.get']('contact_method') == 'ssh-push-tunnel' %}
no_push_key_authorized:
ssh_auth.absent:
- user: {{ mgr_sudo_user }}
- comment: susemanager-ssh-push
proxy_ssh_identity:
ssh_auth.present:
- user: {{ mgr_sudo_user }}
- source: salt://salt_ssh/{{ salt['pillar.get']('proxy_pub_key') }}
- require:
- ssh_auth: no_push_key_authorized
{%- endif %}
{%- set home = salt['user.info'](mgr_sudo_user)['home'] %}
generate_own_ssh_key:
cmd.run:
- name: /usr/bin/ssh-keygen -N '' -C 'susemanager-own-ssh-push' -f {{ home }}/.ssh/mgr_own_id -t rsa -q
- creates: {{ home }}/.ssh/mgr_own_id.pub
ownership_own_ssh_key:
file.managed:
- name: {{ home }}/.ssh/mgr_own_id
- user: {{ mgr_sudo_user }}
- replace: False
- require:
- cmd: generate_own_ssh_key
ownership_own_ssh_pub_key:
file.managed:
- name: {{ home }}/.ssh/mgr_own_id.pub
- user: {{ mgr_sudo_user }}
- replace: False
- require:
- cmd: generate_own_ssh_key
no_own_key_authorized:
ssh_auth.absent:
- user: {{ mgr_sudo_user }}
- comment: susemanager-own-ssh-push
- require:
- file: ownership_own_ssh_key
authorize_own_key:
ssh_auth.present:
- user: {{ mgr_sudo_user }}
- source: {{ home }}/.ssh/mgr_own_id.pub
- require:
- file: ownership_own_ssh_key
- ssh_auth: no_own_key_authorized
# disable all repos, except of repos flagged with keep:* (should be none)
{% set repos_disabled = {'match_str': 'keep:', 'matching': false} %}
{% include 'channels/disablelocalrepos.sls' %}
{% do repos_disabled.update({'skip': true}) %}
{% include 'channels/gpg-keys.sls' %}
{% include 'bootstrap/remove_traditional_stack.sls' %}
0707010000008D000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002100000000susemanager-sls/salt/supportdata0707010000008E000081B400000000000000000000000168EFD6640000014F000000000000000000000000000000000000002A00000000susemanager-sls/salt/supportdata/init.sls{% if not grains.get('transactional', False) %}
install-supportdata-command:
pkg.latest:
{%- if grains['os_family'] == 'Suse' %}
- name: supportutils
{%- else %}
- name: sos
{%- endif %}
{% endif %}
gather-supportdata:
mgrcompat.module_run:
- name: supportdata.get
- cmd_args: "{{ pillar.get('arguments', '') }}"
0707010000008F000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002600000000susemanager-sls/salt/switch_to_bundle07070100000090000081B400000000000000000000000168EFD664000002B3000000000000000000000000000000000000004400000000susemanager-sls/salt/switch_to_bundle/mgr_switch_to_venv_minion.sls{%- set avoid_salt_bundle = salt['pillar.get']('mgr_avoid_venv_salt_minion', false) == true %}
{%- set is_not_salt_ssh = salt['pillar.get']('contact_method') not in ['ssh-push', 'ssh-push-tunnel'] %}
{%- set is_not_saltboot = salt['file.file_exists']('/etc/ImageVersion') == false %}
{%- set is_not_salt_bundle = "venv-salt-minion" not in grains["pythonexecutable"] %}
{%- if not avoid_salt_bundle and is_not_salt_bundle and is_not_salt_ssh and is_not_saltboot %}
include:
- util.mgr_switch_to_venv_minion
mgr_remove_susemanagerconf:
file.absent:
- name: /etc/salt/minion.d/susemanager.conf
- require:
- service: mgr_disable_salt_minion
- order: last
{%- endif %}
07070100000091000081B400000000000000000000000168EFD6640000028C000000000000000000000000000000000000002500000000susemanager-sls/salt/update-salt.slsinclude:
- channels
{%- if salt['pillar.get']('contact_method') not in ['ssh-push', 'ssh-push-tunnel'] %}
mgr_keep_salt_up2date:
pkg.latest:
- refresh: True
- pkgs:
{%- if salt['pkg.version']('salt-minion') %}
- salt-minion
{%- if grains.os_family == 'Debian' %}
- salt-common
{%- else %}
- salt
{%- if grains['os_family'] == "Suse" and grains['osrelease'] == '15.7' %}
- python311-salt
{%- else %}
- python3-salt
{%- endif %}
{%- endif %}
{%- endif %}
{%- if salt['pkg.version']('venv-salt-minion') %}
- venv-salt-minion
{%- endif %}
- require:
- sls: channels
{%- endif %}
07070100000092000081B400000000000000000000000168EFD66400000641000000000000000000000000000000000000002200000000susemanager-sls/salt/uptodate.slsinclude:
- channels
{%- if grains['os_family'] == 'Suse' %}
mgr_keep_system_up2date_updatestack:
cmd.run:
- name: /usr/bin/zypper --non-interactive patch --updatestack-only
- success_retcodes:
- 104
- 103
- 106
- 0
- onlyif: '/usr/bin/zypper patch-check --updatestack-only; r=$?; /usr/bin/test $r -eq 100 || /usr/bin/test $r -eq 101'
- require:
- sls: channels
{% set patch_need_reboot = salt['cmd.retcode']('/usr/bin/zypper -x list-patches | /usr/bin/grep \'restart="true"\' > /dev/null', python_shell=True) %}
{% else %}
mgr_keep_system_up2date_updatestack:
pkg.latest:
- pkgs:
{%- if salt['pkg.version']('venv-salt-minion') %}
- venv-salt-minion
{%- else %}
- salt-minion
{%- endif %}
{%- if grains['os_family'] == 'RedHat' %}
{%- if grains['osmajorrelease'] >= 8 %}
- dnf
{%- else %}
- yum
{%- endif %}
{%- elif grains.os_family == 'Debian' %}
- apt
{%- endif %}
- require:
- sls: channels
{%- endif %}
mgr_keep_system_up2date_pkgs:
pkg.uptodate:
- refresh: True
{%- if grains['os_family'] == 'Debian' %}
- dist_upgrade: True
{%- endif %}
- require:
- sls: channels
- mgr_keep_system_up2date_updatestack
{%- if grains['os_family'] == 'Suse' and grains['osmajorrelease'] >= 15 %}
# zypper up does not evaluate reboot_suggested flags in patches. We need to do it manual
mgr_flag_reboot_needed:
file.touch:
- name: /run/reboot-needed
- onlyif: '[ {{ patch_need_reboot|default(1) }} -eq 0 ]'
- require:
- pkg: mgr_keep_system_up2date_pkgs
{% endif %}
07070100000093000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001A00000000susemanager-sls/salt/util07070100000094000081B400000000000000000000000168EFD664000002DB000000000000000000000000000000000000003600000000susemanager-sls/salt/util/mgr_disable_fqdns_grain.sls{%- set salt_minion_name = 'salt-minion' %}
{%- set susemanager_minion_config = '/etc/salt/minion.d/susemanager.conf' %}
{# Prefer venv-salt-minion if installed #}
{%- if salt['pkg.version']('venv-salt-minion') %}
{%- set salt_minion_name = 'venv-salt-minion' %}
{%- set susemanager_minion_config = '/etc/venv-salt-minion/minion.d/susemanager.conf' %}
{%- endif -%}
mgr_disable_fqdns_grains:
file.append:
- name: {{ susemanager_minion_config }}
- text: "enable_fqdns_grains: False"
- unless: /usr/bin/grep 'enable_fqdns_grains:' {{ susemanager_minion_config }}
mgr_salt_minion:
service.running:
- name: {{ salt_minion_name }}
- enable: True
- order: last
- watch:
- file: mgr_disable_fqdns_grains
07070100000095000081B400000000000000000000000168EFD6640000032E000000000000000000000000000000000000003700000000susemanager-sls/salt/util/mgr_mine_config_clean_up.sls{%- if salt['pillar.get']('contact_method') not in ['ssh-push', 'ssh-push-tunnel'] %}
{%- set salt_minion_name = 'salt-minion' %}
{%- set susemanager_minion_config = '/etc/salt/minion.d/susemanager-mine.conf' %}
{# Prefer venv-salt-minion if installed #}
{%- if salt['pkg.version']('venv-salt-minion') %}
{%- set salt_minion_name = 'venv-salt-minion' %}
{%- set susemanager_minion_config = '/etc/venv-salt-minion/minion.d/susemanager-mine.conf' %}
{%- endif -%}
mgr_disable_mine:
file.managed:
- name: {{ susemanager_minion_config }}
- contents: "mine_enabled: False"
- unless: /usr/bin/grep 'mine_enabled:' {{ susemanager_minion_config }}
mgr_salt_minion:
service.running:
- name: {{ salt_minion_name }}
- enable: True
- order: last
- watch:
- file: mgr_disable_mine
{% endif %}
07070100000096000081B400000000000000000000000168EFD66400000584000000000000000000000000000000000000003500000000susemanager-sls/salt/util/mgr_rotate_saltssh_key.sls{% set mgr_sudo_user = salt['pillar.get']('mgr_sudo_user') or 'root' %}
{% if salt['cp.list_master'](prefix='salt_ssh/new_mgr_ssh_id.pub') %}
{% if salt['pillar.get']('contact_method') in ['ssh-push', 'ssh-push-tunnel'] %}
new_mgr_ssh_identity:
ssh_auth.present:
- user: {{ mgr_sudo_user }}
- source: salt://salt_ssh/new_mgr_ssh_id.pub
{% endif %}
proxy_new_mgr_ssh_identity:
ssh_auth.present:
- user: mgrsshtunnel
- source: salt://salt_ssh/new_mgr_ssh_id.pub
- onlyif: /usr/bin/grep -q mgrsshtunnel /etc/passwd
{% endif %}
{% if salt['cp.list_master'](prefix='salt_ssh/disabled_mgr_ssh_id.pub') %}
{% if salt['pillar.get']('contact_method') in ['ssh-push', 'ssh-push-tunnel'] %}
old_mgr_ssh_identity:
ssh_auth.absent:
- user: {{ mgr_sudo_user }}
- source: salt://salt_ssh/disabled_mgr_ssh_id.pub
# to prevent to lock out yourself
current_mgr_ssh_identity:
ssh_auth.present:
- user: {{ mgr_sudo_user }}
- source: salt://salt_ssh/mgr_ssh_id.pub
{% endif %}
proxy_old_mgr_ssh_identity:
ssh_auth.absent:
- user: mgrsshtunnel
- source: salt://salt_ssh/disabled_mgr_ssh_id.pub
- onlyif: /usr/bin/grep -q mgrsshtunnel /etc/passwd
# to prevent to lock out yourself
proxy_current_mgr_ssh_identity:
ssh_auth.present:
- user: mgrsshtunnel
- source: salt://salt_ssh/mgr_ssh_id.pub
- onlyif: /usr/bin/grep -q mgrsshtunnel /etc/passwd
{% endif %}
07070100000097000081B400000000000000000000000168EFD6640000020B000000000000000000000000000000000000003500000000susemanager-sls/salt/util/mgr_start_event_grains.sls{%- set susemanager_minion_config = '/etc/salt/minion.d/susemanager.conf' %}
{# Prefer venv-salt-minion if installed #}
{%- if salt['pkg.version']('venv-salt-minion') %}
{%- set susemanager_minion_config = '/etc/venv-salt-minion/minion.d/susemanager.conf' %}
{%- endif -%}
mgr_start_event_grains:
file.append:
- name: {{ susemanager_minion_config }}
- text: |
start_event_grains: [machine_id, saltboot_initrd, susemanager]
- unless: /usr/bin/grep 'start_event_grains:' {{ susemanager_minion_config }}
07070100000098000081B400000000000000000000000168EFD66400000F11000000000000000000000000000000000000003800000000susemanager-sls/salt/util/mgr_switch_to_venv_minion.sls{%- set susemanager_conf='/etc/salt/minion.d/susemanager.conf' %}
{%- set venv_susemanager_conf='/etc/venv-salt-minion/minion.d/susemanager.conf' %}
{%- set managed_minion=salt['file.file_exists'](susemanager_conf) and
not salt['file.replace'](susemanager_conf, '^master: .*', 'master: ' + pillar['mgr_server'],
dry_run=True, show_changes=False, ignore_if_missing=True) %}
{%- set venv_managed_minion=salt['file.file_exists'](venv_susemanager_conf) and
not salt['file.replace'](venv_susemanager_conf, '^master: .*', 'master: ' + pillar['mgr_server'],
dry_run=True, show_changes=False, ignore_if_missing=True) %}
{%- if managed_minion or venv_managed_minion %}
{%- set pkgs_installed = salt['pkg.list_pkgs']() %}
{%- set venv_minion_installed = 'venv-salt-minion' in pkgs_installed %}
{%- set venv_minion_available = venv_minion_installed or 'venv-salt-minion' in salt['pkg.list_repo_pkgs']() %}
{%- if venv_minion_available %}
include:
- services.salt-minion
mgr_venv_salt_minion_pkg:
pkg.installed:
- name: venv-salt-minion
- onlyif:
- ([ {{ venv_minion_installed }} = "False" ])
mgr_copy_salt_minion_id:
file.copy:
- name: /etc/venv-salt-minion/minion_id
- source: /etc/salt/minion_id
- require:
- pkg: mgr_venv_salt_minion_pkg
- onlyif:
- /usr/bin/test -f /etc/salt/minion_id
mgr_copy_salt_minion_configs:
cmd.run:
- name: /usr/bin/cp -r /etc/salt/minion.d /etc/venv-salt-minion/
- require:
- pkg: mgr_venv_salt_minion_pkg
- onlyif:
- ([ {{ venv_managed_minion }} = "False" ])
mgr_copy_salt_minion_grains:
file.copy:
- name: /etc/venv-salt-minion/grains
- source: /etc/salt/grains
- require:
- pkg: mgr_venv_salt_minion_pkg
- onlyif:
- /usr/bin/test -f /etc/salt/grains
mgr_copy_salt_minion_keys:
cmd.run:
- name: /usr/bin/cp -r /etc/salt/pki/minion/minion* /etc/venv-salt-minion/pki/minion/
- require:
- cmd: mgr_copy_salt_minion_configs
- onlyif:
- /usr/bin/test -f /etc/salt/pki/minion/minion_master.pub
- unless:
- /usr/bin/test -f /etc/venv-salt-minion/pki/minion/minion_master.pub
mgr_enable_venv_salt_minion:
service.running:
- name: venv-salt-minion
- enable: True
- require:
- cmd: mgr_copy_salt_minion_keys
mgr_disable_salt_minion:
service.dead:
- name: salt-minion
- enable: False
- require:
- service: mgr_enable_venv_salt_minion
- sls: services.salt-minion
{%- if salt['pillar.get']('mgr_purge_non_venv_salt') %}
mgr_purge_non_venv_salt_packages:
pkg.purged:
- pkgs:
- salt
- salt-common
- salt-minion
- python2-salt
{%- if grains['os_family'] == "Suse" and grains['osrelease'] == '15.7' %}
- python311-salt
{%- else %}
- python3-salt
{%- endif %}
- require:
- service: mgr_disable_salt_minion
{%- endif %}
{%- if salt['pillar.get']('mgr_purge_non_venv_salt_files') %}
mgr_purge_non_venv_salt_pki_dir:
cmd.run:
- name: /usr/bin/rm -rf /etc/salt/minion* /etc/salt/pki/minion
- onlyif:
- /usr/bin/test -d /etc/salt/pki/minion
- require:
- service: mgr_disable_salt_minion
mgr_purge_non_venv_salt_conf_dir:
file.absent:
- name: /etc/salt
- unless:
- /usr/bin/find /etc/salt -type f -print -quit | /usr/bin/grep -q .
- require:
- cmd: mgr_purge_non_venv_salt_pki_dir
{%- endif %}
{%- else %}
mgr_venv_salt_minion_unavailable:
test.fail_without_changes:
- comment: venv-salt-minion package is not available
{%- endif %}
{%- else %}
mgr_salt_minion_of_another_master:
test.fail_without_changes:
- comment: The salt-minion is managed by another master
{%- endif %}
07070100000099000081B400000000000000000000000168EFD6640000001B000000000000000000000000000000000000002300000000susemanager-sls/salt/util/noop.slsmgr_do_nothing:
test.nop
0707010000009A000081B400000000000000000000000168EFD6640000005B000000000000000000000000000000000000002600000000susemanager-sls/salt/util/syncall.slsinclude:
- util.syncmodules
- util.syncstates
- util.syncgrains
- util.syncbeacons
0707010000009B000081B400000000000000000000000168EFD664000000C0000000000000000000000000000000000000002A00000000susemanager-sls/salt/util/syncbeacons.slssync_beacons:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
saltutil.sync_beacons
{%- else %}
mgrcompat.module_run:
- name: saltutil.sync_beacons
{%- endif %}
0707010000009C000081B400000000000000000000000168EFD66400000136000000000000000000000000000000000000002C00000000susemanager-sls/salt/util/synccustomall.sls{#
Alias for the newer 'util.syncall' for backwards compatibility.
#}
include:
- util.syncall
mgr_synccustomall_notify:
test.show_notification:
- text: |
util.synccustomall is deprecated and is only available for backwards compatibility.
Please switch to use 'util.syncall' instead.
0707010000009D000081B400000000000000000000000168EFD664000000D8000000000000000000000000000000000000002900000000susemanager-sls/salt/util/syncgrains.slssync_grains:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
saltutil.sync_grains:
{%- else %}
mgrcompat.module_run:
- name: saltutil.sync_grains
{%- endif %}
- reload_grains: true
0707010000009E000081B400000000000000000000000168EFD664000000C0000000000000000000000000000000000000002A00000000susemanager-sls/salt/util/syncmodules.slssync_modules:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
saltutil.sync_modules
{%- else %}
mgrcompat.module_run:
- name: saltutil.sync_modules
{%- endif %}
0707010000009F000081B400000000000000000000000168EFD664000000E5000000000000000000000000000000000000002900000000susemanager-sls/salt/util/syncstates.slssync_states:
{%- if grains.get('__suse_reserved_saltutil_states_support', False) %}
saltutil.sync_states
{%- elif salt['saltutil.sync_states']() or True %}
mgrcompat.module_run:
- name: saltutil.sync_states
{%- endif %}
070701000000A0000081B400000000000000000000000168EFD66400000192000000000000000000000000000000000000002900000000susemanager-sls/salt/util/systeminfo.sls# Update system info on each minion start
include:
- util.syncmodules
- util.syncstates
- util.syncgrains
- util.syncbeacons
status_uptime:
mgrcompat.module_run:
- name: status.uptime
grains_update:
mgrcompat.module_run:
- name: grains.item
- args:
- kernelrelease
- master
kernel_live_version:
mgrcompat.module_run:
- name: sumautil.get_kernel_live_version
070701000000A1000081B400000000000000000000000168EFD6640000016B000000000000000000000000000000000000002E00000000susemanager-sls/salt/util/systeminfo_full.sls# Collect full system info for minion registration
include:
- util.syncmodules
- util.syncstates
- util.syncgrains
- util.syncbeacons
status_uptime:
mgrcompat.module_run:
- name: status.uptime
grains_update:
mgrcompat.module_run:
- name: grains.items
kernel_live_version:
mgrcompat.module_run:
- name: sumautil.get_kernel_live_version
070701000000A2000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001500000000susemanager-sls/scap070701000000A3000081B400000000000000000000000168EFD66400001532000000000000000000000000000000000000002A00000000susemanager-sls/scap/xccdf-resume.xslt.in<?xml version="1.0" encoding="UTF-8"?>
<!-- Copyright 2012 Red Hat Inc., Durham, North Carolina. All Rights Reserved.
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation; either version 2.1 of the License.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License along
with this library; if not, write to the Free Software Foundation, Inc., 59
Temple Place, Suite 330, Boston, MA 02111-1307 USA
Authors:
Simon Lukasik <slukasik@redhat.com>
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"
xmlns:cdf1="http://checklists.nist.gov/xccdf/1.1"
xmlns:cdf2="http://checklists.nist.gov/xccdf/1.2">
<xsl:output method="xml" encoding="UTF-8"/>
<xsl:template match="/">
<benchmark-resume>
<xsl:apply-templates select="*[local-name()='Benchmark']"/>
</benchmark-resume>
</xsl:template>
<xsl:template match="cdf1:Benchmark | cdf2:Benchmark">
<xsl:copy-of select="@id"/>
<xsl:attribute name="version">
<xsl:value-of select="normalize-space(cdf1:version/text()|cdf2:version/text())"/>
</xsl:attribute>
<xsl:variable name="profileId" select="cdf1:TestResult[1]/cdf1:profile/@idref | cdf2:TestResult[1]/cdf2:profile/@idref"/>
<xsl:choose>
<xsl:when test="not($profileId)"/> <!-- Do not send profile element when scanning with 'default' profile. -->
<xsl:when test="cdf1:Profile[@id = $profileId] | cdf2:Profile[@id = $profileId]">
<xsl:apply-templates select="cdf1:Profile[@id = $profileId] | cdf2:Profile[@id = $profileId]"/>
</xsl:when>
<xsl:otherwise>
<profile title="Tailored profile">
<xsl:attribute name="id">
<xsl:value-of select="$profileId"/>
</xsl:attribute>
</profile>
</xsl:otherwise>
</xsl:choose>
<xsl:apply-templates select="cdf1:TestResult[1] | cdf2:TestResult[1]"/>
</xsl:template>
<xsl:template match="cdf1:Profile | cdf2:Profile">
<profile>
<xsl:attribute name="title">
<xsl:value-of select="normalize-space(cdf1:title/text() | cdf2:title/text())"/>
</xsl:attribute>
<xsl:copy-of select="@id"/>
<xsl:attribute name="description">
<xsl:value-of select="normalize-space(cdf1:description[@xml:lang='en-US']/text() | cdf2:description[@xml:lang='en-US']/text())"/>
</xsl:attribute>
</profile>
</xsl:template>
<xsl:template match="cdf1:TestResult | cdf2:TestResult">
<TestResult>
<xsl:copy-of select="@id"/>
<xsl:copy-of select="@start-time"/>
<xsl:copy-of select="@end-time"/>
<pass>
<xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'pass'] | cdf2:rule-result[cdf2:result = 'pass']"/>
</pass>
<fail>
<xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'fail'] | cdf2:rule-result[cdf2:result = 'fail']"/>
</fail>
<error>
<xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'error'] | cdf2:rule-result[cdf2:result = 'error']"/>
</error>
<unknown>
<xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'unknown'] | cdf2:rule-result[cdf2:result = 'unknown']"/>
</unknown>
<notapplicable>
<xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'notapplicable'] | cdf2:rule-result[cdf2:result = 'notapplicable']"/>
</notapplicable>
<notchecked>
<xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'notchecked'] | cdf2:rule-result[cdf2:result = 'notchecked']"/>
</notchecked>
<notselected>
<xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'notselected'] | cdf2:rule-result[cdf2:result = 'notselected']"/>
</notselected>
<informational>
<xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'informational'] | cdf2:rule-result[cdf2:result = 'informational']"/>
</informational>
<fixed>
<xsl:apply-templates select="cdf1:rule-result[cdf1:result = 'fixed'] | cdf2:rule-result[cdf2:result = 'fixed']"/>
</fixed>
</TestResult>
</xsl:template>
<xsl:template match="cdf1:rule-result | cdf2:rule-result">
<rr>
<xsl:attribute name="id">
<xsl:value-of select="normalize-space(@idref)"/>
</xsl:attribute>
<xsl:apply-templates select="cdf1:ident | cdf2:ident"/>
</rr>
</xsl:template>
<xsl:template match="cdf1:ident | cdf2:ident">
<ident>
<xsl:copy-of select="@system"/>
<xsl:value-of select="normalize-space(text())"/>
</ident>
</xsl:template>
</xsl:stylesheet>
070701000000A4000041FD00000000000000000000000968EFD66400000000000000000000000000000000000000000000001400000000susemanager-sls/src070701000000A5000081B400000000000000000000000168EFD6640000021B000000000000000000000000000000000000001E00000000susemanager-sls/src/README.md## Python Code Maintenance
Test are written with PyTest. This way:
1. Create your "test_foo.py" file.
2. Import with double-dot your package,
so it will be included in the sys path, e.g.:
from ..beacons import pkgset
3. Create a test function "def test_my_foo(..."
4. Rock-n-roll by simply calling "py.test".
Don't mind `.cache` and `__pycache__` directories,
they are ignored in an explicit `.gitignore`.
Have fun. :)
## Run Unit tests
Use the following command to run unit test
`make -f Makefile.python docker_pytest`
070701000000A6000081B400000000000000000000000168EFD66400000000000000000000000000000000000000000000002000000000susemanager-sls/src/__init__.py070701000000A7000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001C00000000susemanager-sls/src/beacons070701000000A8000081B400000000000000000000000168EFD66400000000000000000000000000000000000000000000002800000000susemanager-sls/src/beacons/__init__.py070701000000A9000081B400000000000000000000000168EFD66400000BCD000000000000000000000000000000000000002600000000susemanager-sls/src/beacons/pkgset.py# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
Watch RPM or DPkg database via cookies and fire
an event to the SUSE Multi-Linux Manager if that has been changed.
Author: Bo Maryniuk <bo@suse.de>
"""
from __future__ import absolute_import
import os
import salt.cache
import salt.config
__virtualname__ = "pkgset"
SALT_CONFIG_DIR = os.environ.get("SALT_CONFIG_DIR", "/etc/salt")
__opts__ = salt.config.minion_config(os.path.join(SALT_CONFIG_DIR, "minion"))
CACHE = salt.cache.Cache(__opts__)
PKGSET_COOKIES = (
os.path.join(__opts__["cachedir"], "rpmdb.cookie"),
os.path.join(__opts__["cachedir"], "dpkg.cookie"),
)
# pylint: disable-next=invalid-name
def __virtual__():
return __virtualname__
# pylint: disable-next=unused-argument
def validate(config):
"""
The absence of this function could cause noisy logging,
when logging level set to DEBUG or TRACE.
So we need to have it with no any validation inside.
"""
return True, "There is nothing to validate"
# pylint: disable-next=unused-argument
def beacon(config):
"""
Watch the cookie file from package manager plugin.
If its content changes, fire an event to the Master.
Example Config
.. code-block:: yaml
beacons:
pkgset:
interval: 5
"""
ret = []
for cookie_path in PKGSET_COOKIES:
if not os.path.exists(cookie_path):
continue
# pylint: disable-next=unspecified-encoding
with open(cookie_path) as ck_file:
ck_data = ck_file.read().strip()
# pylint: disable-next=undefined-variable
if __virtualname__ not in __context__:
# After a minion restart, when this is running for first time, there is nothing in context yet
# So, if there is any data in the cache, we put it in the context, if not we put the new data.
# and update the data in the cache.
cache_data = CACHE.fetch("beacon/pkgset", "cookie").get("data", None)
if cache_data:
# pylint: disable-next=undefined-variable
__context__[__virtualname__] = cache_data
else:
# pylint: disable-next=undefined-variable
__context__[__virtualname__] = ck_data
CACHE.store("beacon/pkgset", "cookie", {"data": ck_data})
# pylint: disable-next=undefined-variable
if __context__[__virtualname__] != ck_data:
# Now it's time to fire beacon event only if the new data is not yet
# inside the context (meaning not proceesed), and then stop iterating
ret.append({"tag": "changed"})
CACHE.store("beacon/pkgset", "cookie", {"data": ck_data})
# pylint: disable-next=undefined-variable
__context__[__virtualname__] = ck_data
break
return ret
070701000000AA000081B400000000000000000000000168EFD664000007D8000000000000000000000000000000000000002B00000000susemanager-sls/src/beacons/reboot_info.py# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2022-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
Watch system status and fire an event to SUSE Multi-Linux Manager indicating
when a reboot is required.
"""
import logging
log = logging.getLogger(__name__)
__virtualname__ = "reboot_info"
# pylint: disable-next=invalid-name
def __virtual__():
"""
Run on Debian, Suse and RedHat systems.
"""
# pylint: disable-next=undefined-variable
return __grains__["os_family"] in ["Debian", "Suse", "RedHat"]
# pylint: disable-next=unused-argument
def validate(config):
"""
The absence of this function could cause noisy logging,
when logging level set to DEBUG or TRACE.
So we need to have it with no any validation inside.
"""
return True, "There is nothing to validate"
# pylint: disable-next=unused-argument
def beacon(config):
"""
Monitor system status to verify whether a reboot
is required. The first time it detects that a reboot
is necessary, it fires an event.
Example Config
.. code-block:: yaml
beacons:
reboot_info:
interval: 5
"""
ret = []
# pylint: disable-next=undefined-variable
try:
# pylint: disable-next=undefined-variable
result = __salt__["reboot_info.reboot_required"]()
except KeyError:
# reboot_info salt module could be not yet in synchronized
return ret
except Exception as e: # pylint: disable=broad-except
log.error(
"Error while executing 'reboot_info.reboot_required': %s",
e,
exc_info=True,
)
return ret
reboot_needed = result.get("reboot_required", False)
# pylint: disable-next=undefined-variable
if reboot_needed and not __context__.get("reboot_needed", False):
ret.append({"reboot_needed": reboot_needed})
# pylint: disable-next=undefined-variable
__context__["reboot_needed"] = reboot_needed
return ret
070701000000AB000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001800000000susemanager-sls/src/doc070701000000AC000081B400000000000000000000000168EFD664000008F4000000000000000000000000000000000000002200000000susemanager-sls/src/doc/README.md# Uyuni configuration modules (`uyuni_config`) documentation
These execution and state modules allow to configure organizations, users, user permissions on channels and system groups on an Uyuni or SUSE Multi-Linux Manager Server.
## General pillar data configuration
Virtually all functions in the modules leverage the XMLRPC API. It is thus necessary to provide an Uyuni/SUSE Multi-Linux Manager administrator user name and password, with permissions on the entities to configure.
It is possible and recommended to configure those credentials in a pillar file with the following structure:
```
uyuni:
xmlrpc:
user: admin
password: admin
```
## Detailed function documentation
Individual methods, parameters and return values are documented in `uyuni_config_execution_module_doc.txt` and `uyuni_config_state_module_doc.txt` in the same directory that contains this file.
## Examples
A few examples are provided:
- `examples/uyuni_config_hardcode.sls`: shows how to define an organization, a trust, a system group and a user with channel permissions. Note: all credentials are hardcoded directly in the file for simplicity's sake, but should at least be moved to pillars in a production environment
- `examples/ldap/uyuni_users_ldap.sls`: shows how to define multiple users based on data coming from an LDAP server via the LDAP pillar module. This allows to implement syncing LDAP users to Uyuni/SUSE Multi-Linux Manager
### LDAP example specifics
Configuration notes:
- see "General pillar data configuration" above for general credential configuration in pillars
- one more pillar needs to be defined in which organization administrator credentials are specified for each organization the state is going to create users in. An example with one organization can be found in `examples/ldap/pillar_orgs.yaml`
- in order to retrieve data from an LDAP server, the [pillar_ldap module](https://docs.saltstack.com/en/latest/ref/pillar/all/salt.pillar.pillar_ldap.html) is used, and needs its own configuration pillar. An example can be found in `examples/ldap/pillar_ldap.yaml`
In this particular example, the following LDAP fields are extracted in order to match corresponding Uyuni/SUSE Multi-Linux Manager parameters:
- user name
- email
- first_name
- last_name
- roles
070701000000AD000081B400000000000000000000000168EFD664000058B1000000000000000000000000000000000000003E00000000susemanager-sls/src/doc/uyuni_config_execution_module_doc.txt=== channel_list_manageable_channels
**(login, password)**
List all of manageable channels for the authenticated user
....
login: user login id
password: user password
....
return: list of manageable channels for the user
=== channel_list_my_channels
**(login, password)**
List all of subscribed channels for the authenticated user
....
login: user login id
password: user password
....
return: list of subscribed channels for the user
=== channel_software_is_globally_subscribable
**(channel_label, org_admin_user=None, org_admin_password=None)**
Returns whether the channel is globally subscribable on the organization
....
channel_label: label of the channel
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean which indicates if channel is globally subscribable
=== channel_software_is_user_manageable
**(channel_label, login, org_admin_user=None, org_admin_password=None)**
Returns whether the channel may be managed by the given user.
....
channel_label: label of the channel
login: user login id
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean which indicates if user can manage channel or not
=== channel_software_is_user_subscribable
**(channel_label, login, org_admin_user=None, org_admin_password=None)**
Returns whether the channel may be subscribed by the given user.
....
channel_label: label of the channel
login: user login id
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean which indicates if user subscribe the channel or not
=== channel_software_set_user_manageable
**(channel_label, login, access, org_admin_user=None, org_admin_password=None)**
Set the manageable flag for a given channel and user.
If access is set to 'true', this method will give the user manage permissions to the channel.
Otherwise, that privilege is revoked.
....
channel_label: label of the channel
login: user login id
access: True if the user should have management access to channel
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== channel_software_set_user_subscribable
**(channel_label, login, access, org_admin_user=None, org_admin_password=None)**
Set the subscribable flag for a given channel and user.
If value is set to 'true', this method will give the user subscribe permissions to the channel.
Otherwise, that privilege is revoked.
....
channel_label: label of the channel
login: user login id
access: True if the user should have subscribe access to channel
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== master_select_minions
**(target=None, target_type='glob')**
Return list minions from the configured Salt Master on the same host which match the expression on the defined target
....
target: target expression to filter minions
target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
pillar_exact, compound, compound_pillar_exact. Default: glob.
....
return: list of minion IDs
=== org_create
**(name, org_admin_user, org_admin_password, first_name, last_name, email, admin_prefix='Mr.', pam=False, admin_user=None, admin_password=None)**
Create an Uyuni organization
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
....
name: organization name
org_admin_user: organization admin user
org_admin_password: organization admin password
first_name: organization admin first name
last_name: organization admin last name
email: organization admin email
admin_prefix: organization admin prefix
pam:organization admin pam authentication
admin_user: uyuni admin user
admin_password: uyuni admin password
....
return: dictionary with org information
=== org_delete
**(name, admin_user=None, admin_password=None)**
Delete an organization
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
....
name: organization name
admin_user: uyuni admin user
admin_password: uyuni admin password
....
return: boolean, True indicates success
=== org_get_details
**(name, admin_user=None, admin_password=None)**
Get details of an organization.
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
....
name: organisation name
admin_user: uyuni admin user
admin_password: uyuni admin password
....
return: organization details
=== org_list_orgs
**(admin_user=None, admin_password=None)**
List all organizations.
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
....
name: organisation name
admin_user: uyuni admin user
admin_password: uyuni admin password
....
return: list of all available organizations.
=== org_trust_add_trust
**(org_id, org_trust_id, admin_user=None, admin_password=None)**
Add an organization to the list of trusted organizations.
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
....
org_id: Organization id
org_trust_id: Trust organization id
admin_user: uyuni admin user
admin_password: uyuni admin password
....
return: boolean, True indicates success
=== org_trust_add_trust_by_name
**(org_name, org_trust, admin_user=None, admin_password=None)**
Add an organization to the list of trusted organizations.
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
....
org_name: organization name
org_trust: Trust organization name
admin_user: uyuni admin user
admin_password: uyuni admin password
....
return: boolean, True indicates success
=== org_trust_list_orgs
**(org_admin_user=None, org_admin_password=None)**
List all organizations trusted by the authenticated user organization
....
org_admin_user: organization admin user
org_admin_password: organization admin password
....
return: List of organization details
=== org_trust_list_trusts
**(org_name, admin_user=None, admin_password=None)**
List all trusts for one organization
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
....
org_name: Name of the organization to get the trusts
admin_user: authentication user
admin_password: authentication user password
....
return: list with all organizations and their trust status
=== org_trust_remove_trust
**(org_id, org_untrust_id, admin_user=None, admin_password=None)**
Remove an organization from the list of trusted organizations.
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
....
org_id: orgnization id
org_untrust_id: organizaton id to untrust
admin_user: uyuni admin user
admin_password: uyuni admin password
....
return: boolean, True indicates success
=== org_trust_remove_trust_by_name
**(org_name, org_untrust, admin_user=None, admin_password=None)**
Remove an organization from the list of trusted organizations.
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
....
org_name: organization name
org_untrust: organization name to untrust
admin_user: uyuni admin user
admin_password: uyuni admin password
....
return: boolean, True indicates success
=== org_update_name
**(org_id, name, admin_user=None, admin_password=None)**
update an Uyuni organization name
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
....
org_id: organization internal id
name: new organization name
admin_user: uyuni admin user
admin_password: uyuni admin password
....
return: organization details
=== systemgroup_add_remove_systems
**(name, add_remove, system_ids=[], org_admin_user=None, org_admin_password=None)**
Update systems on a system group.
....
name: Name of the system group.
add_remove: True to add to the group, False to remove.
system_ids: list of system ids to add/remove from group
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....
return: boolean, True indicates success
=== systemgroup_create
**(name, descr, org_admin_user=None, org_admin_password=None)**
Create a system group.
....
name: Name of the system group.
descr: Description of the system group.
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....
return: details of the system group
=== systemgroup_delete
**(name, org_admin_user=None, org_admin_password=None)**
Delete a system group.
....
name: Name of the system group.
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....
return: boolean, True indicates success
=== systemgroup_get_details
**(name, org_admin_user=None, org_admin_password=None)**
Return system group details.
....
name: Name of the system group.
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....
return: details of the system group
=== systemgroup_list_systems
**(name, minimal=True, org_admin_user=None, org_admin_password=None)**
List systems in a system group
....
name: Name of the system group.
minimal: default True. Only return minimal information about systems, use False to get more details
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....
return: List of system information
=== systemgroup_update
**(name, descr, org_admin_user=None, org_admin_password=None)**
Update a system group.
....
name: Name of the system group.
descr: Description of the system group.
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....
return: details of the system group
=== systems_get_minion_id_map
**(username=None, password=None, refresh=False)**
Returns a map from minion ID to Uyuni system ID for all systems a user has access to
....
username: username to authenticate
password: password for user
refresh: Get new data from server, ignoring values in local context cache
....
return: Map between minion ID and system ID of all system accessible by authenticated user
=== user_add_assigned_system_groups
**(login, server_group_names, set_default=False, org_admin_user=None, org_admin_password=None)**
Add system groups to user's list of assigned system groups.
....
login: user id to look for
server_group_names: systems groups to add to list of assigned system groups
set_default: Should system groups also be added to user's list of default system groups.
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== user_add_role
**(login, role, org_admin_user=None, org_admin_password=None)**
Adds a role to an Uyuni user.
....
login: user id to look for
role: role to be added to the user
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== user_create
**(login, password, email, first_name, last_name, use_pam_auth=False, org_admin_user=None, org_admin_password=None)**
Create an Uyuni user.
....
login: user id to look for
password: password for the user
email: user email address
first_name: user first name
last_name: user last name
use_pam_auth: if you wish to use PAM authentication for this user
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== user_delete
**(login, org_admin_user=None, org_admin_password=None)**
Deletes an Uyuni user
....
login: user id to look for
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== user_get_details
**(login, password=None, org_admin_user=None, org_admin_password=None)**
Get details of an Uyuni user
If password is provided as a parameter, then it will be used to authenticate
If no user credentials are provided, organization administrator credentials will be used
If no user credentials neither organization admin credentials are provided, credentials from pillar will be used
....
login: user id to look for
password: password for the user
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: The user information
=== user_list_assigned_system_groups
**(login, org_admin_user=None, org_admin_password=None)**
Returns the system groups that a user can administer.
....
login: user id to look for
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: List of system groups that a user can administer
=== user_list_roles
**(login, password=None, org_admin_user=None, org_admin_password=None)**
Returns an Uyuni user roles.
If password is provided as a parameter, then it will be used to authenticate
If no user credentials are provided, organization administrator credentials will be used
If no user credentials neither organization admin credentials are provided, credentials from pillar are used
....
login: user id to look for
password: password for the user
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: List of user roles assigned
=== user_list_users
**(org_admin_user=None, org_admin_password=None)**
Return all Uyuni users visible to the authenticated user.
....
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: all users visible to the authenticated user
=== user_remove_assigned_system_groups
**(login, server_group_names, set_default=False, org_admin_user=None, org_admin_password=None)**
Remove system groups from a user's list of assigned system groups.
....
login: user id to look for
server_group_names: systems groups to remove from list of assigned system groups
set_default: Should system groups also be added to user's list of default system groups.
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== user_remove_role
**(login, role, org_admin_user=None, org_admin_password=None)**
Remove a role from an Uyuni user.
....
login: user id to look for
role: role to be removed from the user
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== user_set_details
**(login, password, email, first_name=None, last_name=None, org_admin_user=None, org_admin_password=None)**
Update an Uyuni user.
....
login: user id to look for
password: password for the user
email: user email address
first_name: user first name
last_name: user last name
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== activation_key_get_details
**(id, org_admin_user=None, org_admin_password=None)**
Get details of an Uyuni Activation Key
....
id: the Activation Key ID
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return:Activation Key information
=== activation_key_delete
**(id, org_admin_user=None, org_admin_password=None)**
Deletes an Uyuni Activation Key
....
id: the Activation Key ID
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== activation_key_create
**(key, description, base_channel_label='', usage_limit=0, system_types=[], universal_default=False, org_admin_user=None, org_admin_password=None)**
Creates an Uyuni Activation Key
....
key: activation key name
description: activation key description
base_channel_label: base channel to be used
usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
system_types: system types to be assigned.
Can be one of: 'virtualization_host', 'container_build_host',
'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
universal_default: sets this activation key as organization universal default
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== activation_key_set_details
**(key, description=None, contact_method=None, base_channel_label=None, usage_limit=None, universal_default=False, org_admin_user=None, org_admin_password=None)**
Updates an Uyuni Activation Key
....
key: activation key name
description: activation key description
base_channel_label: base channel to be used
contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
universal_default: sets this activation key as organization universal default
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== activation_key_add_entitlements
**(key, system_types, org_admin_user=None, org_admin_password=None)**
Add a list of entitlements to an activation key.
....
key: activation key name
system_types: list of system types to be added
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== activation_key_remove_entitlements
**(key, system_types, org_admin_user=None, org_admin_password=None)**
Remove a list of entitlements from an activation key.
....
key: activation key name
system_types: list of system types to be removed
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== activation_key_add_child_channels
**(key, child_channels, org_admin_user=None, org_admin_password=None)**
Add child channels to an activation key.
....
key: activation key name
child_channels: List of child channels to be added
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== activation_key_remove_child_channels
**(key, child_channels, org_admin_user=None, org_admin_password=None)**
Remove child channels from an activation key.
....
key: activation key name
child_channels: List of child channels to be removed
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== activation_key_check_config_deployment
**(key, org_admin_user=None, org_admin_password=None)**
Return the status of the 'configure_after_registration' flag for an Activation Key.
....
key: activation key name
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, true if enabled, false if disabled
=== activation_key_enable_config_deployment
**(key, org_admin_user=None, org_admin_password=None)**
Enables the 'configure_after_registration' flag for an Activation Key.
....
key: activation key name
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== activation_key_disable_config_deployment
**(key, org_admin_user=None, org_admin_password=None)**
Disables the 'configure_after_registration' flag for an Activation Key.
....
key: activation key name
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== activation_key_add_packages
**(key, packages, org_admin_user=None, org_admin_password=None)**
Add a list of packages to an activation key.
....
key: activation key name
packages: list of packages to be added
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== activation_key_remove_packages
**(key, packages, org_admin_user=None, org_admin_password=None)**
Remove a list of packages from an activation key.
....
key: activation key name
packages: list of packages to be removed
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== activation_key_add_server_groups
**(key, server_groups, org_admin_user=None, org_admin_password=None)**
Add a list of server groups to an activation key.
....
key: activation key name
server_groups: list of server groups to be added
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== activation_key_remove_server_groups
**(key, server_groups, org_admin_user=None, org_admin_password=None)**
Remove a list of server groups from an activation key.
....
key: activation key name
server_groups: list of server groups to be removed
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
=== activation_key_list_config_channels
**(key, org_admin_user=None, org_admin_password=None)**
List configuration channels associated to an activation key.
....
key: activation key name
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: List of configuration channels
=== activation_key_set_config_channels
**(keys, config_channel_label, org_admin_user=None, org_admin_password=None)**
Replace the existing set of configuration channels on the given activation keys.
Channels are ranked by their order in the array.
....
keys: list of activation key names
config_channel_label: list of configuration channels labels
org_admin_user: organization admin username
org_admin_password: organization admin password
....
return: boolean, True indicates success
070701000000AE000081B400000000000000000000000168EFD6640000159D000000000000000000000000000000000000003A00000000susemanager-sls/src/doc/uyuni_config_state_module_doc.txt=== group_absent
**(name, org_admin_user=None, org_admin_password=None)**
Ensure an Uyuni system group is not present
....
name: Group Name
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....
return: dict for Salt communication
=== group_present
**(name, description, target=None, target_type='glob', org_admin_user=None, org_admin_password=None)**
Create or update an Uyuni system group
....
name: group name
description: group description
target: target expression used to filter which minions should be part of the group
target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
pillar_exact, compound, compound_pillar_exact. Default: glob.
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....
return: dict for Salt communication
=== org_absent
**(name, admin_user=None, admin_password=None)**
Ensure an Uyuni organization is not present
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
....
name: organization name
admin_user: uyuni admin user
admin_password: uyuni admin password
....
return: dict for Salt communication
=== org_present
**(name, org_admin_user, org_admin_password, first_name, last_name, email, pam=False, admin_user=None, admin_password=None)**
Create or update an Uyuni organization
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
....
name: organization name
org_admin_user: organization admin user
org_admin_password: organization admin password
first_name: organization admin first name
last_name: organization admin last name
email: organization admin email
pam: organization admin pam authentication
admin_user: uyuni admin user
admin_password: uyuni admin password
....
return: dict for Salt communication
=== org_trust
**(name, org_name, trusts, admin_user=None, admin_password=None)**
Establish trust relationships between Uyuni organizations.
....
name: state name
org_name: Organization name
trusts: list of organization names to trust
admin_user: administrator username
admin_password: administrator password
....
return: dict for Salt communication
=== user_absent
**(name, org_admin_user=None, org_admin_password=None)**
Ensure an Uyuni user is not present.
....
name: user login name
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....
return: dict for Salt communication
=== user_channels
**(name, password, manageable_channels=[], subscribable_channels=[], org_admin_user=None, org_admin_password=None)**
Ensure a user has access to the specified channels
....
name: user login name
password: user password
manageable_channels: channels user can manage
subscribable_channels: channels user can subscribe
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....
return: dict for Salt communication
=== user_present
**(name, password, email, first_name, last_name, use_pam_auth=False, roles=None, system_groups=None, org_admin_user=None, org_admin_password=None)**
Create or update an Uyuni user
....
name: user login name
password: desired password for the user
email: valid email address
first_name: First name
last_name: Last name
use_pam_auth: if you wish to use PAM authentication for this user
roles: roles to assign to user
system_groups: system_groups to assign to user
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....
return: dict for Salt communication
=== activation_key_present
**(name, description, base_channel='', usage_limit=0, contact_method='default', system_types=[],
universal_default=False, child_channels=[], configuration_channels=[], packages=[],
server_groups=[], configure_after_registration=False, org_admin_user=None, org_admin_password=None)**
Ensure an Uyuni Activation Key is present.
....
name: the Activation Key name
description: the Activation description
base_channel: base channel to be used
usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
system_types: system types to be assigned.
Can be one of: 'virtualization_host', 'container_build_host',
'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
universal_default: sets this activation key as organization universal default
child_channels: list of child channels to be assigned
configuration_channels: list of configuration channels to be assigned
packages: list of packages which will be installed
server_groups: list of server groups to assign the activation key with
configure_after_registration: deploy configuration files to systems on registration
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....
return: dict for Salt communication
=== activation_key_absent
**(name, org_admin_user=None, org_admin_password=None)**
Ensure an Uyuni Activation Key is not present.
....
name: the Activation Key name
org_admin_user: organization administrator username
org_admin_password: organization administrator password
....
return: dict for Salt communication
070701000000AF000041FD00000000000000000000000368EFD66400000000000000000000000000000000000000000000001D00000000susemanager-sls/src/examples070701000000B0000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000002200000000susemanager-sls/src/examples/ldap070701000000B1000081B400000000000000000000000168EFD6640000020B000000000000000000000000000000000000003300000000susemanager-sls/src/examples/ldap/pillar_ldap.yamlldap-roles:
server: ldap.example.com
port: 389
anonymous: true
mode: map
dn: ou=permissions,dc=example,dc=com
filter: '(objectclass=groupOfNames)'
attrs:
- cn
- dn
ldap-users:
server: ldap.example.com
port: 389
anonymous: true
mode: map
dn: ou=people,dc=example,dc=com
filter: '(objectclass=person)'
attrs:
- givenName
- sn
- mail
- uid
- ou
- dn
lists:
- memberOf
search_order:
- ldap-roles
- ldap-users
070701000000B2000081B400000000000000000000000168EFD664000000BC000000000000000000000000000000000000003300000000susemanager-sls/src/examples/ldap/pillar_orgs.yamluyuni:
orgs:
- org_id: MY-ORG
org_admin_user: org_admin_user
org_admin_password: org_admin_pass
first_name: admin
last_name: admin
email: admin@org.com
070701000000B3000081B400000000000000000000000168EFD66400000620000000000000000000000000000000000000003700000000susemanager-sls/src/examples/ldap/uyuni_users_ldap.sls
## Create organizations based on static pillar data
{% set org_auth = {} %}
{% for org in pillar.get('uyuni', {}).get('orgs', []) %}
{{org['org_id']}}:
uyuni.org_present:
- name: {{org['org_id']}}
- org_admin_user: {{org['org_admin_user']}}
- org_admin_password: {{org['org_admin_password']}}
- first_name: {{org['first_name']}}
- last_name: {{org['last_name']}}
- email: {{org['email']}}
{% set _ = org_auth.update({org.org_id: {'org_admin_user': org.org_admin_user, 'org_admin_password': org.org_admin_password }}) %}
{% endfor %}
## load available roles to local map variable
## those where extracted form ldap to pillar
{% set roles_map = {} %}
{% for role in pillar.get('ldap-roles', []) %}
{% set _ = roles_map.update({role.dn: role.cn}) %}
{% endfor %}
{% for user in pillar.get('ldap-users', []) %}
{% set admin_user = None %}
{% set admin_password = None %}
{% if org_auth[user['ou']] %}
{% set admin_user = org_auth[user['ou']].org_admin_user %}
{% set admin_password = org_auth[user['ou']].org_admin_password %}
{% endif %}
{{user['uid']}}:
uyuni.user_present:
- name: {{user['uid']}}
- password: 'dummy_local_pass'
- email: {{user['mail']}}
- first_name: {{user['givenName']}}
- last_name: {{user['sn']}}
- use_pam_auth: true
- org_admin_user: {{admin_user}}
- org_admin_password: {{admin_password}}
{% if user['memberOf'] %}
- roles:
{% for user_role in user['memberOf'] %}
- {{ roles_map[user_role] }}
{% endfor %}
{% endif %}
{% endfor %}
070701000000B4000081B400000000000000000000000168EFD664000007BA000000000000000000000000000000000000003700000000susemanager-sls/src/examples/uyuni_config_hardcode.sls## manage orgs
my_org:
uyuni.org_present:
- name: my_org
- org_admin_user: my_org_user
- org_admin_password: my_org_user
- first_name: first_name
- last_name: last_name__
- email: my_org_user@org.com
- admin_user: admin
- admin_password: admin
org_trust_present:
uyuni.org_trust:
- org_name: SUSE
- trusts:
- my_org
# manager system groups
system_group_httpd:
uyuni.group_present:
- name: httpd_servers
- description: httpd_servers
- target: "*httpd*"
- org_admin_user: my_org_user
- org_admin_password: my_org_user
#manager users
user_1:
uyuni.user_present:
- name: user1
- password: user1
- email: user1@teest.como
- first_name: first
- last_name: last
- org_admin_user: my_org_user
- org_admin_password: my_org_user
- roles: ["system_group_admin", "channel_admin"]
- system_groups:
- httpd_servers
user_1_channels:
## remane it to user_channels (without _present)
uyuni.user_channels:
- name: user1
- password: user1
- org_admin_user: my_org_user
- org_admin_password: my_org_user
- manageable_channels:
- my_local_channel
- subscribable_channels:
- new_local
define_custom_activation_key:
uyuni.activation_key_present:
- name: my-suse
- description: "My Activation Key created via Salt"
- org_admin_user: my_org_user
- org_admin_password: my_org_user
- base_channel: sle-product-sles15-sp2-pool-x86_64
- child_channels:
- sle-module-server-applications15-sp2-pool-x86_64
- sle-module-server-applications15-sp2-updates-x86_64
- configuration_channels:
- firewall
- packages:
- name: emacs
arch: x86_64
- server_groups:
- httpd_servers
- usage_limit: 10
- system_types:
- virtualization_host
- configure_after_registration: true
070701000000B5000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001B00000000susemanager-sls/src/grains070701000000B6000081B400000000000000000000000168EFD66400000000000000000000000000000000000000000000002700000000susemanager-sls/src/grains/__init__.py070701000000B7000081B400000000000000000000000168EFD66400000B91000000000000000000000000000000000000002800000000susemanager-sls/src/grains/cpe_grain.py# SPDX-FileCopyrightText: 2023-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
Custom 'cpe' grain.
This grain is tightly coupled to core grains and upstreamed in
https://github.com/saltstack/salt/pull/65905.
"""
import re
def get_cpe_grain(grains):
ret = {"cpe": ""}
os_release = _parse_os_release("/etc/os-release")
cpe = os_release.get("CPE_NAME")
if cpe:
ret["cpe"] = cpe
else:
derived_cpe = _derive_cpe(grains)
if derived_cpe:
ret["cpe"] = derived_cpe
return ret
# Copy-pasted from https://github.com/saltstack/salt/blame/master/salt/grains/core.py
def _parse_os_release(*os_release_files):
"""
Parse os-release and return a parameter dictionary
This function will behave identical to
platform.freedesktop_os_release() from Python >= 3.10, if
called with ("/etc/os-release", "/usr/lib/os-release").
See http://www.freedesktop.org/software/systemd/man/os-release.html
for specification of the file format.
"""
# These fields are mandatory fields with well-known defaults
# in practice all Linux distributions override NAME, ID, and PRETTY_NAME.
ret = {"NAME": "Linux", "ID": "linux", "PRETTY_NAME": "Linux"}
errno = None
for filename in os_release_files:
try:
with open(filename, "r", encoding="utf-8") as ifile:
regex = re.compile("^([\\w]+)=(?:'|\")?(.*?)(?:'|\")?$")
for line in ifile:
match = regex.match(line.strip())
if match:
# Shell special characters ("$", quotes, backslash,
# backtick) are escaped with backslashes
ret[match.group(1)] = re.sub(
r'\\([$"\'\\`])', r"\1", match.group(2)
)
break
except OSError as error:
errno = error.errno
else:
# pylint: disable-next=inconsistent-quotes
raise OSError(errno, f"Unable to read files {', '.join(os_release_files)}")
return ret
def _derive_cpe(grains):
"""
Try to derive the CPE of the system based on the collected core grains.
PS: This function is not guaranteed to derive the correct CPE as there could be a many
variances of the same OS that require different CPEs, for example, release and beta versions.
Currently the function exclusively derives CPEs for Debian and Ubuntu.
These two operating systems are the primary focus, as they are intended to be supported by
the OVAL-based CVE auditing project, for which this modification is intended.
TODO: reference the OVAL code
"""
os = grains.get("os")
os_release = grains.get("osrelease", "")
if os == "Debian":
return "cpe:/o:debian:debian_linux:" + os_release
elif os == "Ubuntu":
return "cpe:/o:canonical:ubuntu_linux:" + os_release
else:
return None
070701000000B8000081B400000000000000000000000168EFD66400002E04000000000000000000000000000000000000002600000000susemanager-sls/src/grains/cpuinfo.py# pylint: disable=missing-module-docstring,unused-import
# SPDX-FileCopyrightText: 2016-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
import logging
import salt.modules.cmdmod
import os
import re
try:
from salt.utils.path import which_bin as _which_bin
except ImportError:
from salt.utils import which_bin as _which_bin
from salt.exceptions import CommandExecutionError
__salt__ = {
"cmd.run_all": salt.modules.cmdmod.run_all,
}
log = logging.getLogger(__name__)
def _lscpu_count_sockets(feedback):
"""
Use lscpu method
:return:
"""
lscpu = _which_bin(["lscpu"])
if lscpu is not None:
try:
log.debug("Trying lscpu to get CPU socket count")
ret = __salt__["cmd.run_all"](
# pylint: disable-next=consider-using-f-string
"{0} -p".format(lscpu),
output_loglevel="quiet",
)
if ret["retcode"] == 0:
max_socket_index = -1
for line in ret["stdout"].strip().splitlines():
if line.startswith("#"):
continue
socket_index = int(line.split(",")[2])
if socket_index > max_socket_index:
max_socket_index = socket_index
if max_socket_index > -1:
return {"cpusockets": (1 + max_socket_index)}
# pylint: disable-next=broad-exception-caught
except Exception as error:
# pylint: disable-next=consider-using-f-string
feedback.append("lscpu: {0}".format(str(error)))
log.debug(str(error))
def _cpuinfo_count_sockets(feedback):
"""
Use parsing /proc/cpuinfo method.
:return:
"""
physids = set()
if os.access("/proc/cpuinfo", os.R_OK):
try:
log.debug("Trying /proc/cpuinfo to get CPU socket count")
# pylint: disable-next=unspecified-encoding
with open("/proc/cpuinfo") as handle:
for line in handle.readlines():
if line.strip().startswith("physical id"):
comps = line.split(":")
if len(comps) < 2 or len(comps[1]) < 2:
continue
physids.add(comps[1].strip())
if physids:
return {"cpusockets": len(physids)}
# pylint: disable-next=broad-exception-caught
except Exception as error:
log.debug(str(error))
# pylint: disable-next=consider-using-f-string
feedback.append("/proc/cpuinfo: {0}".format(str(error)))
else:
feedback.append("/proc/cpuinfo: format is not applicable")
def _dmidecode_count_sockets(feedback):
"""
Use dmidecode method.
:return:
"""
dmidecode = _which_bin(["dmidecode"])
if dmidecode is not None:
try:
log.debug("Trying dmidecode to get CPU socket count")
ret = __salt__["cmd.run_all"](
# pylint: disable-next=consider-using-f-string
"{0} -t processor".format(dmidecode),
output_loglevel="quiet",
)
if ret["retcode"] == 0:
count = 0
for line in ret["stdout"].strip().splitlines():
if "Processor Information" in line:
count += 1
if count:
return {"cpusockets": count}
# pylint: disable-next=broad-exception-caught
except Exception as error:
log.debug(str(error))
# pylint: disable-next=consider-using-f-string
feedback.append("dmidecode: {0}".format(str(error)))
else:
feedback.append("dmidecode: executable not found")
def cpusockets():
"""
Returns the number of CPU sockets.
"""
feedback = list()
grains = (
_lscpu_count_sockets(feedback)
or _cpuinfo_count_sockets(feedback)
or _dmidecode_count_sockets(feedback)
)
if not grains:
log.warning(
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
"Could not determine CPU socket count: {0}".format(" ".join(feedback))
)
return grains
def total_num_cpus():
"""returns the total number of CPU in system.
/proc/cpuinfo shows the number of active CPUs
On s390x this can be different from the number of present CPUs in a system
See IBM redbook: "Using z/VM for Test and Development Environments: A Roundup" chapter 3.5
"""
re_cpu = re.compile(r"^cpu[0-9]+$")
sysdev = "/sys/devices/system/cpu/"
return {
"total_num_cpus": len(
[
cpud
for cpud in (os.path.exists(sysdev) and os.listdir(sysdev) or list())
if re_cpu.match(cpud)
]
)
}
def cpu_data():
"""
Returns the cpu model, vendor ID and other data that may not be in the cpuinfo
"""
lscpu = _which_bin(["lscpu"])
if lscpu is not None:
try:
log.debug("Trying lscpu to get CPU data")
ret = __salt__["cmd.run_all"](
# pylint: disable-next=consider-using-f-string
"{0}".format(lscpu),
env={"LC_ALL": "C"},
output_loglevel="quiet",
)
if ret["retcode"] == 0:
lines = ret["stdout"].splitlines()
name_map = {
"Model name": "cpu_model",
"Vendor ID": "cpu_vendor",
"NUMA node(s)": "cpu_numanodes",
"Stepping": "cpu_stepping",
"Core(s) per socket": "cpu_cores",
"Socket(s)": "cpu_sockets",
"Thread(s) per core": "cpu_threads",
"CPU(s)": "cpu_sum",
}
values = {}
for line in lines:
parts = [l.strip() for l in line.split(":", 1)]
if len(parts) == 2 and parts[0] in name_map:
values[name_map[parts[0]]] = parts[1]
log.debug(values)
return values
else:
log.warning("lscpu does not support -J option")
except (CommandExecutionError, ValueError) as error:
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
log.warning("lscpu: {0}".format(str(error)))
# -----------------------------------------------------------------------------
# Grain for Architecture-Specific CPU Data
# -----------------------------------------------------------------------------
def _read_file(path):
"""
Helper to read a file and return its content. Returns empty string if not found.
"""
try:
# pylint: disable-next=unspecified-encoding
with open(path, "r", errors="replace") as f:
return f.read()
except FileNotFoundError:
return ""
def _exact_string_match(key, text):
"""
Extract a value based on a key in the text using regex.
"""
# pylint: disable-next=consider-using-f-string
match = re.search(r"{}\s*:\s*(.*)".format(re.escape(key)), text)
return match.group(1).strip() if match else ""
def _add_device_tree(specs):
"""
Attempts to read the device tree from predefined paths and adds it to the specs dict.
"""
device_tree_paths = [
"/sys/firmware/devicetree/base/compatible",
"/sys/firmware/devicetree/base/hypervisor/compatible",
]
for path in device_tree_paths:
content = _read_file(path)
if content:
compatible_strings = [s for s in content.split("\x00") if s]
specs["device_tree"] = ",".join(compatible_strings)
break
def _add_ppc64_extras(specs):
"""
Adds PowerPC specific details.
"""
_add_device_tree(specs)
lparcfg_content = _read_file("/proc/ppc64/lparcfg")
if lparcfg_content:
match = re.search(r"shared_processor_mode\s*=\s*(\d+)", lparcfg_content)
if match:
specs["lpar_mode"] = "shared" if match.group(1) == "1" else "dedicated"
def _add_arm64_extras(specs):
"""
Adds ARM64-specific details. It first checks for Device Tree information.
If not found, it falls back to dmidecode for ACPI-based systems.
"""
_add_device_tree(specs)
if "device_tree" in specs:
return
dmidecode = _which_bin(["dmidecode"])
if not dmidecode:
log.debug("dmidecode executable not found, skipping for ARM64 extras.")
return
try:
ret = __salt__["cmd.run_all"](
# pylint: disable-next=consider-using-f-string
"{0} -t processor".format(dmidecode),
output_loglevel="quiet",
)
if ret["retcode"] == 0:
output = ret["stdout"]
family = _exact_string_match("Family", output)
manufacturer = _exact_string_match("Manufacturer", output)
signature = _exact_string_match("Signature", output)
if family or manufacturer or signature:
specs["family"] = family
specs["manufacturer"] = manufacturer
specs["signature"] = signature
else:
log.warning("dmidecode failed for ARM64 extras: %s", ret["stderr"])
except (CommandExecutionError, OSError) as e:
log.warning("Failed to retrieve arm64 CPU details via dmidecode: %s", str(e))
def _add_z_systems_extras(specs):
"""
Collects extended metadata for z Systems based on `read_values -s`.
"""
read_values = _which_bin(["read_values"])
if not read_values:
log.warning("read_values executable not found, skipping for z Systems extras.")
return
try:
ret = __salt__["cmd.run_all"](
# pylint: disable-next=consider-using-f-string
"{0} -s".format(read_values),
output_loglevel="quiet",
)
if ret["retcode"] == 0:
output = ret["stdout"]
# Identify z architecture layer
for candidate in ("VM00", "LPAR"):
if candidate in output:
layer_id = candidate
break
else:
return
fields = {
"type": "Type",
"type_name": "Type Name",
"layer_type": f"{layer_id} Name",
}
for key, label in fields.items():
value = _exact_string_match(label, output)
if value:
specs[key] = value
else:
log.warning("read_values failed for z Systems extras: %s", ret["stderr"])
except (CommandExecutionError, OSError):
log.warning("Failed to retrieve z System CPU details.", exc_info=True)
def _get_architecture():
"""
Returns the system architecture.
"""
try:
ret = __salt__["cmd.run_all"]("uname -m", output_loglevel="quiet")
return ret["stdout"].strip() if ret.get("retcode") == 0 else "unknown"
except (CommandExecutionError, OSError):
log.warning("Failed to determine system architecture.", exc_info=True)
return "unknown"
def arch_specs():
"""
Returns extended CPU architecture-specific metadata.
This function is designed to be called to generate a Salt grain.
"""
specs = {}
arch = _get_architecture()
if arch in ["ppc64", "ppc64le"]:
_add_ppc64_extras(specs)
elif arch in ["arm64", "aarch64"]:
_add_arm64_extras(specs)
elif arch.startswith("s390"):
_add_z_systems_extras(specs)
return {"cpu_arch_specs": specs}
070701000000B9000081B400000000000000000000000168EFD664000007C4000000000000000000000000000000000000002900000000susemanager-sls/src/grains/mgr_server.py# SPDX-FileCopyrightText: 2022-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
Grains for Mgr Server
"""
import logging
import os
log = logging.getLogger(__name__)
RHNCONF = "/etc/rhn/rhn.conf"
RHNCONFDEF = "/usr/share/rhn/config-defaults/rhn.conf"
RHNWEBCONF = "/usr/share/rhn/config-defaults/rhn_web.conf"
def _simple_parse_rhn_conf(cfile):
result = {}
if not os.path.exists(cfile):
return result
# pylint: disable-next=unspecified-encoding
with open(cfile, "r") as config:
for line in config.readlines():
line = line.strip()
if not line or line[0] == "#":
continue
k, v = line.split("=", 1)
result[k.strip()] = v.strip() or None
return result
def server_grains():
"""Returns grains relevant for Uyuni/SUMA server."""
grains = {"is_mgr_server": False}
config = _simple_parse_rhn_conf(RHNCONF)
if config.get("web.satellite", "0") == "1":
grains["is_mgr_server"] = True
if config.get("report_db_host", False) and config.get("report_db_name", False):
grains["has_report_db"] = True
grains["report_db_host"] = config.get("report_db_host")
grains["report_db_name"] = config.get("report_db_name")
grains["report_db_port"] = config.get("report_db_port", "5432")
else:
grains["has_report_db"] = False
rhndef = _simple_parse_rhn_conf(RHNCONFDEF)
if rhndef.get("product_name", "uyuni") in [
"SUSE Multi-Linux Manager",
"SUSE Manager",
]:
grains["is_uyuni"] = False
else:
grains["is_uyuni"] = True
webconfig = _simple_parse_rhn_conf(RHNWEBCONF)
if grains["is_uyuni"]:
version = webconfig.get("web.version.uyuni")
else:
version = webconfig.get("web.version")
if version:
grains["version"] = version.split()[0]
return grains
070701000000BA000081B400000000000000000000000168EFD664000017C3000000000000000000000000000000000000002B00000000susemanager-sls/src/grains/public_cloud.py# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2019-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
This grain module is only loaded in case of a public cloud instance.
Supported Instances: AWS EC2, Azure and Google Compute Engine instances
Returns a grain called "instance_id" containing the virtual instance ID
according to the Public Cloud provider. The data is gathered using the
internal API available from within the instance.
Author: Pablo Suárez Hernández <psuarezhernandez@suse.com>
Based on: https://docs.saltstack.com/en/latest/ref/grains/all/salt.grains.metadata.html
"""
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import os
import socket
from multiprocessing.pool import ThreadPool
import logging
# Import salt libs
import salt.utils.http as http
# Internal metadata API information
INTERNAL_API_IP = "169.254.169.254"
# pylint: disable-next=consider-using-f-string
HOST = "http://{0}/".format(INTERNAL_API_IP)
INSTANCE_ID = None
AMAZON_URL_PATH = "latest/meta-data/"
AMAZON_TOKEN_URL_PATH = "latest/api/token"
AZURE_URL_PATH = "metadata/instance/compute/"
AZURE_API_ARGS = "?api-version=2017-08-01&format=text"
GOOGLE_URL_PATH = "computeMetadata/v1/instance/"
log = logging.getLogger(__name__)
# pylint: disable-next=invalid-name
def __virtual__():
global INSTANCE_ID
log.debug("Checking if minion is running in the public cloud")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.1)
result = sock.connect_ex((INTERNAL_API_IP, 80))
if result != 0:
return False
def _do_api_request(data):
opts = {
"http_connect_timeout": 0.1,
"http_request_timeout": 0.1,
}
api_token = None
header_dict = data[2]
try:
if data[0] == "amazon":
token_ret = http.query(
os.path.join(HOST, AMAZON_TOKEN_URL_PATH),
status=True,
header_dict={"X-aws-ec2-metadata-token-ttl-seconds": "21600"},
method="PUT",
raise_error=False,
opts=opts,
)
if token_ret.get("status") == 200:
api_token = token_ret.get("body")
if api_token:
if not header_dict:
header_dict = {}
header_dict.update({"X-aws-ec2-metadata-token": api_token})
ret = {
data[0]: http.query(
data[1],
status=True,
header_dict=header_dict,
raise_error=False,
opts=opts,
)
}
if ret.get("amazon", {}).get("status") == 200:
ret.get("amazon", {})["api_token"] = api_token
# pylint: disable-next=bare-except
except:
ret = {data[0]: dict()}
return ret
api_check_dict = [
("amazon", os.path.join(HOST, AMAZON_URL_PATH), None),
("google", os.path.join(HOST, GOOGLE_URL_PATH), {"Metadata-Flavor": "Google"}),
(
"azure",
os.path.join(HOST, AZURE_URL_PATH) + AZURE_API_ARGS,
{"Metadata": "true"},
),
]
api_ret = {}
results = []
try:
pool = ThreadPool(3)
results = pool.map(_do_api_request, api_check_dict)
pool.close()
pool.join()
# pylint: disable-next=broad-exception-caught
except Exception as exc:
# pylint: disable-next=import-outside-toplevel
import traceback
log.error(traceback.format_exc())
log.error(
"Exception while creating a ThreadPool for accessing metadata API: %s", exc
)
for i in results:
api_ret.update(i)
if _is_valid_endpoint(api_ret["amazon"], "instance-id"):
api_token = api_ret["amazon"].get("api_token")
INSTANCE_ID = http.query(
os.path.join(HOST, AMAZON_URL_PATH, "instance-id"),
raise_error=False,
header_dict={"X-aws-ec2-metadata-token": api_token} if api_token else None,
)["body"]
return True
elif _is_valid_endpoint(api_ret["azure"], "vmId"):
INSTANCE_ID = http.query(
os.path.join(HOST, AZURE_URL_PATH, "vmId") + AZURE_API_ARGS,
header_dict={"Metadata": "true"},
raise_error=False,
)["body"]
return True
elif _is_valid_endpoint(api_ret["google"], "id"):
INSTANCE_ID = http.query(
os.path.join(HOST, GOOGLE_URL_PATH, "id"),
header_dict={"Metadata-Flavor": "Google"},
raise_error=False,
)["body"]
return True
return False
def _is_valid_endpoint(response, tag):
if not response.get("status", 0) == 200:
return False
elif not tag in response.get("body", ""):
return False
elif " " in response.get("body", ""):
return False
else:
return True
def _is_valid_instance_id(id_str):
if not id_str:
return False
if os.linesep in id_str:
return False
elif " " in id_str:
return False
elif len(id_str) > 128:
return False
else:
return True
def instance_id():
# pylint: disable-next=global-variable-not-assigned
global INSTANCE_ID
ret = {}
if _is_valid_instance_id(INSTANCE_ID):
log.debug(
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
"This minion is running in the public cloud. Adding instance_id to grains: {}".format(
INSTANCE_ID
)
)
ret["instance_id"] = INSTANCE_ID
else:
log.error(
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
"The obtained public cloud instance id doesn't seems correct: {}".format(
INSTANCE_ID
)
)
log.error("Skipping")
return ret
070701000000BB000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001C00000000susemanager-sls/src/modules070701000000BC000081B400000000000000000000000168EFD66400000000000000000000000000000000000000000000002800000000susemanager-sls/src/modules/__init__.py070701000000BD000081B400000000000000000000000168EFD66400001ECD000000000000000000000000000000000000002A00000000susemanager-sls/src/modules/appstreams.py# SPDX-FileCopyrightText: 2024-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""AppStreams module for SUSE Multi-Linux Manager"""
import re
import subprocess
import logging
try:
from salt.utils.path import which as _which
except ImportError:
from salt.utils import which as _which
log = logging.getLogger(__name__)
# pylint: disable-next=invalid-name
def __virtual__():
"""
Only works on RH-like systems having 'dnf' available
"""
# pylint: disable-next=superfluous-parens
if not (_which("dnf")):
return (False, "dnf is not available on the system")
return True
def _get_enabled_module_names():
# Run the DNF command to list enabled modules
command = ["dnf", "module", "list", "--enabled", "--quiet"]
# pylint: disable-next=subprocess-run-check
result = subprocess.run(command, capture_output=True, text=True)
# Check if the command was successful
if result.returncode == 0:
try:
# Split the text output by lines
lines = result.stdout.splitlines()
# Find the indexes where the actual module information starts
start_indexes = [
i for i, line in enumerate(lines) if "Name" in line and "Stream" in line
]
all_module_names = []
if start_indexes:
for start_index in start_indexes:
# Find the index where the module information ends
end_index = next(
(
i
for i, line in enumerate(lines)
if not line and i > start_index
),
len(lines),
)
# Extract module names
module_names = [
f"{parts[0]}:{parts[1]}"
for line in lines[
start_index + 1 : end_index
] # Skip the header line
for parts in [line.split()]
]
all_module_names += module_names
return all_module_names
else:
log.error("Error: Unable to find module information in the output.")
except (IndexError, ValueError) as e:
# pylint: disable-next=logging-fstring-interpolation
log.error(f"Error parsing output: {e}")
else:
# pylint: disable-next=logging-fstring-interpolation
log.error(f"Error running DNF command: {result.stderr}")
def _parse_nsvca(module_info_output):
attrs = {
"name": re.compile(r"^Name\s+:\s+(\S+)"),
"stream": re.compile(r"^Stream\s+:\s+(\S+)"),
"version": re.compile(r"^Version\s+:\s+(\S+)"),
"context": re.compile(r"^Context\s+:\s+(\S+)"),
"architecture": re.compile(r"^Architecture\s+:\s+(\S+)"),
}
result = {}
for line in module_info_output:
for attr, regex in attrs.items():
if result.get(attr):
continue
match = regex.match(line)
if match:
result[attr] = match.group(1)
return result if result.keys() == attrs.keys() else None
def _get_module_info(module_names):
# Run the DNF command to get module info for all active modules
# Parse all modules if no active ones are present
command = ["dnf", "module", "info", "--quiet"] + module_names
# pylint: disable-next=subprocess-run-check
result = subprocess.run(command, capture_output=True, text=True)
if result.returncode != 0:
# pylint: disable-next=logging-fstring-interpolation
log.error(f"Error running DNF command: {result.stderr}")
return []
# Active modules are marked with [a]
# Example output
# Name : perl-IO-Socket-SSL
# Stream : 2.066 [d][e][a]
# Version : 8090020231016070024
# Context : 88fd4976
# Architecture : x86_64
# Profiles : common [d]
# Default profiles : common
# Repo : susemanager:rockylinux8-x86_64-appstream
# Summary : Perl library for transparent TLS
# Description : IO::Socket::SSL is a drop-in replacement for ...
# Requires : perl:[5.26]
# : platform:[el8]
# Artifacts : perl-IO-Socket-SSL-0:2.066-4.module+el8.9.0+1517+e71a7a62.noarch
module_info_output = []
for module in re.findall(r"(Name\s+:.*?)(?=\n\s*\n|$)", result.stdout, re.DOTALL):
if re.search(r"Stream\s+:.*\[a\]", module):
module_info_output += module.splitlines()
# Parse all modules, if no active ones were found
if not module_info_output:
module_info_output = result.stdout.splitlines()
nsvca_info_list = []
current_module_info = []
for line in module_info_output:
# Check if the line starts with "Name" to identify the beginning of a new module info
if line.startswith("Name"):
if current_module_info:
nsvca_info = _parse_nsvca(current_module_info)
if nsvca_info:
nsvca_info_list.append(nsvca_info)
# Start collecting info for the new module
current_module_info = [line]
else:
current_module_info.append(line)
# Parse NSVCA information for the last module
if current_module_info:
nsvca_info = _parse_nsvca(current_module_info)
if nsvca_info:
nsvca_info_list.append(nsvca_info)
return nsvca_info_list
def _execute_action(action, appstreams):
"""
Execute the specified action (enable/disable) for the given appstreams.
action
The action to perform (either "enable" or "disable")
appstreams
List or string of appstreams to perform the action on
Returns:
Tuple: (result, comment, changes)
"""
if isinstance(appstreams, str):
appstreams = [appstreams]
cmd = ["dnf", "module", action, "-y"] + appstreams
return subprocess.run(cmd, check=False, capture_output=True)
def enable(appstreams):
"""
Enable the specified appstreams using dnf.
appstreams
List or string of appstreams to enable
Returns:
Tuple: (result, comment, changes)
"""
result = True
comment = ""
changes = {}
before = get_enabled_modules()
cmd_result = _execute_action("enable", appstreams)
if cmd_result.returncode == 0:
after = get_enabled_modules()
enabled = [m for m in after if m not in before]
if enabled:
comment = "AppStreams enabled."
changes = {"enabled": enabled}
else:
comment = "Nothing changed."
else:
result = False
comment = cmd_result.stderr.decode("utf-8").strip()
return result, comment, changes
def disable(appstreams):
"""
Disable the specified appstreams using dnf.
appstreams
List or string of appstreams to disable
Returns:
Tuple: (result, comment, changes)
"""
result = True
comment = ""
changes = {}
before = get_enabled_modules()
cmd_result = _execute_action("disable", appstreams)
if cmd_result.returncode == 0:
after = get_enabled_modules()
disabled = [m for m in before if m not in after]
if disabled:
comment = "AppStreams disabled."
changes = {"disabled": disabled}
else:
comment = "Nothing changed."
else:
result = False
comment = cmd_result.stderr.decode("utf-8").strip()
return result, comment, changes
def get_enabled_modules():
enabled_module_names = _get_enabled_module_names()
return _get_module_info(enabled_module_names) if enabled_module_names else []
070701000000BE000081B400000000000000000000000168EFD66400000EE7000000000000000000000000000000000000003100000000susemanager-sls/src/modules/container_runtime.py# SPDX-FileCopyrightText: 2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
Container Runtime Detection Module for SaltStack
This module provides functionality to detect the container runtime (e.g., Docker, Podman, Kubernetes)
running on the target system. It works by analyzing system files, environment variables, and
specific indicators to identify the runtime.
Usage:
- Run the module using the Salt CLI:
salt '*' container_runtime.get_container_runtime
- Example return values:
- 'docker' for Docker
- 'podman' for Podman
- None if no container runtime is detected
References:
- Most of the code was ported from
https://github.com/SUSE/connect-ng/blob/main/internal/collectors/container_runtime.go
"""
import os
import re
try:
from salt.utils import fopen
except ImportError:
from salt.utils.files import fopen
RUNTIME_DOCKER = "docker"
RUNTIME_RKT = "rkt"
RUNTIME_NSPAWN = "systemd-nspawn"
RUNTIME_LXC = "lxc"
RUNTIME_LXC_LIBVIRT = "lxc-libvirt"
RUNTIME_OPENVZ = "openvz"
RUNTIME_KUBERNETES = "kube"
RUNTIME_GARDEN = "garden"
RUNTIME_PODMAN = "podman"
RUNTIME_GVISOR = "gvisor"
RUNTIME_FIREJAIL = "firejail"
RUNTIME_NOT_FOUND = "not-found"
CONTAINER_RUNTIMES = [
RUNTIME_DOCKER,
RUNTIME_RKT,
RUNTIME_NSPAWN,
RUNTIME_LXC,
RUNTIME_LXC_LIBVIRT,
RUNTIME_OPENVZ,
RUNTIME_KUBERNETES,
RUNTIME_GARDEN,
RUNTIME_PODMAN,
RUNTIME_GVISOR,
RUNTIME_FIREJAIL,
]
def _detect_container_files():
"""
Detects specific files that indicate the presence of certain container runtimes.
"""
files = [
(RUNTIME_PODMAN, "/run/.containerenv"),
(RUNTIME_DOCKER, "/.dockerenv"),
(RUNTIME_KUBERNETES, "/var/run/secrets/kubernetes.io/serviceaccount"),
]
for runtime, location in files:
if os.path.exists(location):
return runtime
return RUNTIME_NOT_FOUND
def _get_container_runtime(input_string):
"""
Determines the container runtime from the input string.
"""
if not input_string or not input_string.strip():
return RUNTIME_NOT_FOUND
for runtime in CONTAINER_RUNTIMES:
if runtime in input_string:
return runtime
return RUNTIME_NOT_FOUND
def _read_file(file_path):
"""
Reads the contents of a file safely.
"""
try:
with fopen(file_path, "r") as f:
return f.read().strip()
# pylint: disable-next=broad-exception-caught
except Exception:
return ""
def get_container_runtime():
"""
Returns the container runtime the process is running in.
"""
cgroups = _read_file("/proc/self/cgroup")
runtime = _get_container_runtime(cgroups)
if runtime != RUNTIME_NOT_FOUND:
return runtime
if os.path.exists("/proc/vz") and not os.path.exists("/proc/bc"):
return RUNTIME_OPENVZ
if os.path.exists("/__runsc_containers__"):
return RUNTIME_GVISOR
cmdline = _read_file("/proc/1/cmdline")
runtime = _get_container_runtime(cmdline)
if runtime != RUNTIME_NOT_FOUND:
return runtime
container_env = os.getenv("container")
runtime = _get_container_runtime(container_env)
if runtime != RUNTIME_NOT_FOUND:
return runtime
systemd_container = _read_file("/run/systemd/container")
runtime = _get_container_runtime(systemd_container)
if runtime != RUNTIME_NOT_FOUND:
return runtime
runtime = _detect_container_files()
if runtime != RUNTIME_NOT_FOUND:
return runtime
# Docker was not detected at this point.
# An overlay mount on "/" may indicate we're under containerd or other runtime.
mounts = _read_file("/proc/mounts")
if re.match("^[^ ]+ / overlay", mounts):
return RUNTIME_KUBERNETES
return None
070701000000BF000081B400000000000000000000000168EFD66400004894000000000000000000000000000000000000002900000000susemanager-sls/src/modules/kiwi_info.py# pylint: disable=missing-module-docstring,unused-import
# SPDX-FileCopyrightText: 2018-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
import salt.exceptions
import logging
import os
import re
import json
log = logging.getLogger(__name__)
# Kiwi version is always in format "MAJOR.MINOR.RELEASE" with numeric values
# Source https://osinside.github.io/kiwi/image_description/elements.html#preferences-version
# Enhanced by allowing also MAJOR.MINOR version, particularly for SL-Micro images
KIWI_VERSION_REGEX = r"\d+\.\d+(\.\d+)?"
# Taken from Kiwi sources https://github.com/OSInside/kiwi/blob/eb2b1a84bf7/kiwi/schema/kiwi.rng#L81
KIWI_ARCH_REGEX = r"(x86_64|i586|i686|ix86|aarch64|arm64|armv5el|armv5tel|armv6hl|armv6l|armv7hl|armv7l|ppc|ppc64|ppc64le|s390|s390x|riscv64)"
# Taken from Kiwi sources https://github.com/OSInside/kiwi/blob/eb2b1a84bf7/kiwi/schema/kiwi.rng#L26
KIWI_NAME_REGEX = r"[a-zA-Z0-9_\-\.]+"
def parse_profile(chroot):
ret = {}
path = os.path.join(chroot, "image", ".profile")
# pylint: disable-next=undefined-variable
if __salt__["file.file_exists"](path):
# pylint: disable-next=undefined-variable
profile = __salt__["cp.get_file_str"](path)
pattern = re.compile(r"^(?P<name>.*?)='(?P<val>.*)'")
for line in profile.splitlines():
match = pattern.match(line)
if match:
ret[match.group("name")] = match.group("val")
return ret
def parse_buildinfo(dest):
ret = {}
path = os.path.join(dest, "kiwi.buildinfo")
# pylint: disable-next=undefined-variable
if __salt__["file.file_exists"](path):
# pylint: disable-next=undefined-variable
profile = __salt__["cp.get_file_str"](path)
pattern_group = re.compile(r"^\[(?P<name>.*)\]")
pattern_val = re.compile(r"^(?P<name>.*?)=(?P<val>.*)")
group = ret
for line in profile.splitlines():
match = pattern_group.match(line)
if match:
group = {}
ret[match.group("name")] = group
match = pattern_val.match(line)
if match:
group[match.group("name")] = match.group("val")
return ret
# fallback for SLES11 Kiwi and for Kiwi NG that does not create the buildinfo file
def guess_buildinfo(dest):
ret = {"main": {}}
# pylint: disable-next=undefined-variable
files = __salt__["file.readdir"](dest)
pattern_basename = re.compile(r"^(?P<basename>.*)\.packages$")
pattern_pxe_initrd = re.compile(r"^initrd-netboot.*")
pattern_pxe_kiwi_ng_initrd = re.compile(r".*\.initrd\..*")
pattern_pxe_kernel = re.compile(r".*\.kernel\..*")
pattern_pxe_kiwi_ng_kernel = re.compile(r".*\.kernel$")
have_kernel = False
have_initrd = False
for f in files:
match = pattern_basename.match(f)
if match:
ret["main"]["image.basename"] = match.group("basename")
match = pattern_pxe_initrd.match(f) or pattern_pxe_kiwi_ng_initrd.match(f)
if match:
have_initrd = True
match = pattern_pxe_kernel.match(f) or pattern_pxe_kiwi_ng_kernel.match(f)
if match:
have_kernel = True
if have_kernel and have_initrd:
ret["main"]["image.type"] = "pxe"
return ret
# Kiwi NG
_kiwi_result_script = """
import sys
import pickle
import json
ret = {}
with open(sys.argv[1], 'rb') as f:
result = pickle.load(f)
ret['arch'] = result.xml_state.host_architecture
ret['basename'] = result.xml_state.xml_data.name
ret['type'] = result.xml_state.build_type.image
ret['filesystem'] = result.xml_state.build_type.filesystem
ret['initrd_system'] = result.xml_state.build_type.initrd_system
print(json.dumps(ret))
"""
def parse_kiwi_result(dest):
path = os.path.join(dest, "kiwi.result")
ret = {}
# pylint: disable-next=undefined-variable
if __salt__["file.file_exists"](path):
# pickle depends on availability of python kiwi modules
# which are not under our control so there is certain risk of failure
# also, the kiwi libraries may not be available in salt bundle
# -> parse the file via wrapper script using system python3
#
# return empty dict on failure
# the caller should handle all values as optional
# pylint: disable-next=undefined-variable
result = __salt__["cmd.exec_code_all"](
"/usr/bin/python3", _kiwi_result_script, args=[path]
)
if result["retcode"] == 0:
ret = json.loads(result["stdout"])
# else return empty dict
return ret
def parse_packages(path):
ret = []
# pylint: disable-next=undefined-variable
if __salt__["file.file_exists"](path):
# pylint: disable-next=undefined-variable
packages = __salt__["cp.get_file_str"](path)
pattern = re.compile(
r"^(?P<name>.*?)\|(?P<epoch>.*?)\|(?P<version>.*?)\|(?P<release>.*?)\|(?P<arch>.*?)\|(?P<disturl>.*?)(\|(?P<license>.*))?$"
)
for line in packages.splitlines():
match = pattern.match(line)
if match:
# translate '(none)' values to ''
d = match.groupdict()
for k in list(d.keys()):
if d[k] == "(none)":
d[k] = ""
# if arch is '' and name begins gpg-pubkey then skip the package
if d["arch"] == "" and d["name"].startswith("gpg-pubkey"):
continue
ret.append(d)
return ret
def get_md5(path):
res = {}
# pylint: disable-next=undefined-variable
if not __salt__["file.file_exists"](path):
return res
# pylint: disable-next=undefined-variable
res["hash"] = "md5:" + __salt__["file.get_hash"](path, form="md5")
# pylint: disable-next=undefined-variable
res["size"] = __salt__["file.stats"](path).get("size")
return res
def parse_kiwi_md5(path, compressed=False):
res = {}
# pylint: disable-next=undefined-variable
if not __salt__["file.file_exists"](path):
return res
# pylint: disable-next=undefined-variable
md5_str = __salt__["cp.get_file_str"](path)
if md5_str is not None:
if compressed:
pattern = re.compile(
r"^(?P<md5>[0-9a-f]+)\s+(?P<size1>[0-9]+)\s+(?P<size2>[0-9]+)\s+(?P<csize1>[0-9]+)\s+(?P<csize2>[0-9]+)\s*$"
)
else:
pattern = re.compile(
r"^(?P<md5>[0-9a-f]+)\s+(?P<size1>[0-9]+)\s+(?P<size2>[0-9]+)\s*$"
)
match = pattern.match(md5_str)
if match:
res["hash"] = "md5:" + match.group("md5")
res["size"] = int(match.group("size1")) * int(match.group("size2"))
if compressed:
res["compressed_size"] = int(match.group("csize1")) * int(
match.group("csize2")
)
return res
_compression_types = {
".gz": "gzip",
".bz": "bzip",
".xz": "xz",
"": None,
}
# suffixes for pxe/kis image type
_pxe_image_types = [
".gz",
".bz",
".xz",
"",
]
def _known_image_types():
formats = _disk_format_types()
formats.extend(_compressed_format_types())
formats.extend(_iso_format_types())
formats.extend(_raw_format_types())
return formats
# from https://github.com/OSInside/kiwi/blob/main/kiwi/defaults.py#L1501
def _disk_format_types():
return [
".gce",
".qcow2",
".vmdk",
".ova",
".vmx",
".vhd",
".vhdx",
".vhdfixed",
".vdi",
".vagrant.libvirt.box",
".vagrant.virtualbox.box",
]
def _compressed_format_types():
return [".gz", ".bz", ".xz", ".tar.xz"]
def _iso_format_types():
return [".install.iso", ".iso"]
def _raw_format_types():
return [".raw", ".squashfs"]
def image_details(dest, bundle_dest=None):
"""
Gather detailed information about system image.
"""
res = {}
buildinfo = parse_buildinfo(dest) or guess_buildinfo(dest)
kiwiresult = parse_kiwi_result(dest)
basename = buildinfo.get("main", {}).get("image.basename", "")
image_type = kiwiresult.get("type") or buildinfo.get("main", {}).get(
"image.type", "unknown"
)
fstype = kiwiresult.get("filesystem")
pattern = re.compile(
# pylint: disable-next=consider-using-f-string
r"^(?P<name>{})\.(?P<arch>{})-(?P<version>{})$".format(
KIWI_NAME_REGEX, KIWI_ARCH_REGEX, KIWI_VERSION_REGEX
)
)
match = pattern.match(basename)
if match:
name = match.group("name")
arch = match.group("arch")
version = match.group("version")
else:
return None
filename = None
filepath = None
compression = None
image_types = _known_image_types()
if image_type == "pxe" or image_type == "kis":
image_types = _pxe_image_types
for c in image_types:
path = os.path.join(dest, basename + c)
# pylint: disable-next=undefined-variable
if __salt__["file.file_exists"](path):
filename = basename + c
filepath = path
compression = _compression_types.get(c, None)
break
res["image"] = {
"basename": basename,
"name": name,
"arch": arch,
"type": image_type,
"version": version,
"filename": filename,
"filepath": filepath,
"fstype": fstype,
}
if compression:
res["image"].update(
{
"compression": compression,
# pylint: disable-next=undefined-variable
"compressed_hash": __salt__["hashutil.digest_file"](
filepath, checksum="md5"
),
}
)
res["image"].update(
parse_kiwi_md5(os.path.join(dest, basename + ".md5"), compression is not None)
)
if bundle_dest is not None:
res["bundles"] = inspect_bundles(bundle_dest, basename)
return res
def inspect_image(dest, build_id, bundle_dest=None):
"""
Image inspection stage entrypoint.
Provides detailed information about image and packages it contains.
"""
res = image_details(dest, bundle_dest)
if not res:
return None
res["image"]["build_id"] = build_id
basename = res["image"]["basename"]
image_type = res["image"]["type"]
for fstype in ["ext2", "ext3", "ext4", "btrfs", "xfs"]:
path = os.path.join(dest, basename + "." + fstype)
# pylint: disable-next=undefined-variable
if __salt__["file.file_exists"](path) or __salt__["file.is_link"](path):
res["image"]["fstype"] = fstype
break
res["packages"] = parse_packages(os.path.join(dest, basename + ".packages"))
if image_type == "pxe":
res["boot_image"] = inspect_boot_image(dest)
return res
def inspect_boot_image(dest):
"""
Gather information about boot image (kernel and initrd).
Only valid for PXE/KIS image type.
"""
res = None
# pylint: disable-next=undefined-variable
files = __salt__["file.readdir"](dest)
pattern = re.compile(
# pylint: disable-next=consider-using-f-string
r"^(?P<name>{})\.(?P<arch>{})-(?P<version>{})\.kernel\.(?P<kernelversion>.*)\.md5$".format(
KIWI_NAME_REGEX, KIWI_ARCH_REGEX, KIWI_VERSION_REGEX
)
)
pattern_kiwi_ng = re.compile(
# pylint: disable-next=consider-using-f-string
r"^(?P<name>{})\.(?P<arch>{})-(?P<version>{})-(?P<kernelversion>.*)\.kernel$".format(
KIWI_NAME_REGEX, KIWI_ARCH_REGEX, KIWI_VERSION_REGEX
)
)
for f in files:
match = pattern.match(f)
if match:
basename = (
match.group("name")
+ "."
+ match.group("arch")
+ "-"
+ match.group("version")
)
res = {
"name": match.group("name"),
"arch": match.group("arch"),
"basename": basename,
"initrd": {"version": match.group("version")},
"kernel": {"version": match.group("kernelversion")},
"kiwi_ng": False,
}
break
match = pattern_kiwi_ng.match(f)
if match:
basename = (
match.group("name")
+ "."
+ match.group("arch")
+ "-"
+ match.group("version")
)
res = {
"name": match.group("name"),
"arch": match.group("arch"),
"basename": basename,
"initrd": {"version": match.group("version")},
"kernel": {"version": match.group("kernelversion")},
"kiwi_ng": True,
}
break
if res is None:
return None
for c in _compression_types:
if res["kiwi_ng"]:
file = basename + ".initrd" + c
else:
file = basename + c
filepath = os.path.join(dest, file)
# pylint: disable-next=undefined-variable
if __salt__["file.file_exists"](filepath):
res["initrd"]["filename"] = file
res["initrd"]["filepath"] = filepath
if res["kiwi_ng"]:
res["initrd"].update(get_md5(filepath))
else:
res["initrd"].update(
parse_kiwi_md5(os.path.join(dest, basename + ".md5"))
)
break
if res["kiwi_ng"]:
file = basename + "-" + res["kernel"]["version"] + ".kernel"
filepath = os.path.join(dest, file)
# pylint: disable-next=undefined-variable
if __salt__["file.file_exists"](filepath):
res["kernel"]["filename"] = file
res["kernel"]["filepath"] = filepath
res["kernel"].update(get_md5(filepath))
else:
file = basename + ".kernel." + res["kernel"]["version"]
filepath = os.path.join(dest, file)
# pylint: disable-next=undefined-variable
if __salt__["file.file_exists"](filepath):
res["kernel"]["filename"] = file
res["kernel"]["filepath"] = filepath
res["kernel"].update(parse_kiwi_md5(filepath + ".md5"))
return res
def inspect_bundles(dest, basename):
"""
Gather details about image bundle.
Image bundle is a compressed tarball of all image results with custom naming.
Not used by default, not compatible with containerized saltboot workflow.
"""
res = []
# pylint: disable-next=undefined-variable
files = __salt__["file.readdir"](dest)
pattern = re.compile(
r"^(?P<basename>"
+ re.escape(basename)
+ r")-(?P<id>[^.]*)\.(?P<suffix>.*)\.sha256$"
)
for f in files:
match = pattern.match(f)
if match:
res1 = match.groupdict()
sha256_file = f
# pylint: disable-next=undefined-variable
sha256_str = __salt__["cp.get_file_str"](os.path.join(dest, sha256_file))
pattern2 = re.compile(r"^(?P<hash>[0-9a-f]+)\s+(?P<filename>.*)\s*$")
match = pattern2.match(sha256_str)
if match:
d = match.groupdict()
# pylint: disable-next=consider-using-f-string
d["hash"] = "sha256:{0}".format(d["hash"])
res1.update(d)
res1["filepath"] = os.path.join(dest, res1["filename"])
else:
# only hash without file name
pattern2 = re.compile(r"^(?P<hash>[0-9a-f]+)$")
match = pattern2.match(sha256_str)
if match:
# pylint: disable-next=consider-using-f-string
res1["hash"] = "sha256:{0}".format(match.groupdict()["hash"])
res1["filename"] = sha256_file[0 : -len(".sha256")]
res1["filepath"] = os.path.join(dest, res1["filename"])
res.append(res1)
return res
def build_info(dest, build_id, bundle_dest=None):
"""
Generates basic build info for image collection. Skips package inspection.
"""
res = {}
buildinfo = parse_buildinfo(dest) or guess_buildinfo(dest)
kiwiresult = parse_kiwi_result(dest)
basename = buildinfo.get("main", {}).get("image.basename", "")
image_type = kiwiresult.get("type") or buildinfo.get("main", {}).get(
"image.type", "unknown"
)
pattern = re.compile(
# pylint: disable-next=consider-using-f-string
r"^(?P<name>{})\.(?P<arch>{})-(?P<version>{})$".format(
KIWI_NAME_REGEX, KIWI_ARCH_REGEX, KIWI_VERSION_REGEX
)
)
match = pattern.match(basename)
if not match:
log.error("Unable to match Kiwi results")
return None
name = match.group("name")
arch = match.group("arch")
version = match.group("version")
image_filepath = None
image_filename = None
image_types = _known_image_types()
if image_type == "pxe" or image_type == "kis":
r = inspect_boot_image(dest)
res["boot_image"] = {
"initrd": {
"filepath": r["initrd"]["filepath"],
"filename": r["initrd"]["filename"],
"hash": r["initrd"]["hash"],
},
"kernel": {
"filepath": r["kernel"]["filepath"],
"filename": r["kernel"]["filename"],
"hash": r["kernel"]["hash"],
},
}
image_types = _pxe_image_types
for c in image_types:
test_name = basename + c
filepath = os.path.join(dest, test_name)
# pylint: disable-next=undefined-variable
if __salt__["file.file_exists"](filepath):
image_filename = test_name
image_filepath = filepath
break
res["image"] = {
"name": name,
"arch": arch,
"version": version,
"filepath": image_filepath,
"filename": image_filename,
"build_id": build_id,
}
# Kiwi creates checksum for filesystem image when image type is PXE(or KIS), however if image is compressed, this
# checksum is of uncompressed image. Other image types do not have checksum created at all.
res["image"].update(get_md5(image_filepath))
if bundle_dest is not None:
res["bundles"] = inspect_bundles(bundle_dest, basename)
return res
070701000000C0000081B400000000000000000000000168EFD66400001728000000000000000000000000000000000000002B00000000susemanager-sls/src/modules/kiwi_source.py# pylint: disable=missing-module-docstring
# SPDX-FileCopyrightText: 2018-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
import salt.exceptions
import logging
import os
# pylint: disable-next=unused-import
from tempfile import mkdtemp
try:
# pylint: disable-next=unused-import
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
log = logging.getLogger(__name__)
# valid prefixes taken from Docker-CE to be compatible
valid_git_prefixes = ["http://", "https://", "git://", "github.com/", "git@"]
valid_url_prefixes = ["http://", "https://"]
valid_url_suffixes = [".tar.gz", ".tar.xz", ".tar.bz2", ".tgz", ".tar"]
# pylint: disable-next=invalid-name
def _isLocal(source):
# pylint: disable-next=undefined-variable
return __salt__["file.directory_exists"](source)
# pylint: disable-next=invalid-name
def _isGit(source):
for prefix in valid_git_prefixes:
if source.startswith(prefix):
return True
return False
# pylint: disable-next=invalid-name
def _isTarball(source):
prefix_ok = False
for prefix in valid_url_prefixes:
if source.startswith(prefix):
prefix_ok = True
break
if not prefix_ok:
return False
for suffix in valid_url_suffixes:
if source.endswith(suffix):
return True
return False
# pylint: disable-next=invalid-name
def _prepareDestDir(dest):
"""
Check target directory does not exists
"""
if os.path.isdir(dest):
raise salt.exceptions.SaltException(
# pylint: disable-next=consider-using-f-string
'Working directory "{0}" exists before sources are prepared'.format(dest)
)
# pylint: disable-next=invalid-name
def _prepareLocal(source, dest):
"""
Make link from `source` to `dest`
"""
log.debug("Source is local directory")
_prepareDestDir(dest)
# pylint: disable-next=undefined-variable
__salt__["file.symlink"](source, dest)
return dest
# pylint: disable-next=invalid-name
def _prepareHTTP(source, dest):
"""
Download tarball and extract to the directory
"""
log.debug("Source is HTTP")
_prepareDestDir(dest)
filename = os.path.join(dest, source.split("/")[-1])
# pylint: disable-next=undefined-variable
res = __salt__["state.single"](
"file.managed", filename, source=source, makedirs=True, skip_verify=True
)
# pylint: disable-next=unused-variable
for s, r in list(res.items()):
if not r["result"]:
raise salt.exceptions.SaltException(r["comment"])
# pylint: disable-next=undefined-variable
res = __salt__["state.single"](
"archive.extracted",
name=dest,
source=filename,
skip_verify=True,
overwrite=True,
)
for s, r in list(res.items()):
if not r["result"]:
raise salt.exceptions.SaltException(r["comment"])
return dest
# pylint: disable-next=invalid-name
def _prepareGit(source, dest, root):
_prepareDestDir(dest)
# checkout git into temporary directory in our build root
# this is needed if we are interested only in git subtree
# pylint: disable-next=undefined-variable
tmpdir = __salt__["temp.dir"](parent=root)
rev = "master"
subdir = None
url = None
# parse git uri - i.e. git@github.com/repo/#rev:sub
# compatible with docker as per https://docs.docker.com/engine/reference/commandline/build/#git-repositories
try:
url, fragment = source.split("#", 1)
try:
rev, subdir = fragment.split(":", 1)
# pylint: disable-next=bare-except
except:
rev = fragment
# pylint: disable-next=bare-except
except:
url = source
# omitted rev means default 'master' branch revision
if rev == "":
rev = "master"
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
log.debug("GIT URL: {0}, Revision: {1}, subdir: {2}".format(url, rev, subdir))
# pylint: disable-next=undefined-variable
__salt__["git.init"](tmpdir)
# pylint: disable-next=undefined-variable
__salt__["git.remote_set"](tmpdir, url)
# pylint: disable-next=undefined-variable
__salt__["git.fetch"](tmpdir)
# pylint: disable-next=undefined-variable
__salt__["git.checkout"](tmpdir, rev=rev)
if subdir:
if _isLocal(os.path.join(tmpdir, subdir)):
# pylint: disable-next=undefined-variable
__salt__["file.symlink"](os.path.join(tmpdir, subdir), dest)
else:
raise salt.exceptions.SaltException(
# pylint: disable-next=consider-using-f-string
"Directory is not present in checked out source: {}".format(subdir)
)
else:
# pylint: disable-next=undefined-variable
__salt__["file.symlink"](tmpdir, dest)
return dest
def prepare_source(source, root):
"""
Prepare source directory based on different source types.
source -- string with either local directory path, remote http(s) archive or git repository
root -- local directory where to store processed source files
For git repository following format is understood:
[http[s]://|git://][user@]hostname/repository[#revision[:subdirectory]]
"""
dest = os.path.join(root, "source")
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
log.debug("Preparing build source for {0} to {1}".format(source, dest))
if _isLocal(source):
return _prepareLocal(source, dest)
elif _isTarball(source):
return _prepareHTTP(source, dest)
elif _isGit(source):
return _prepareGit(source, dest, root)
else:
raise salt.exceptions.SaltException(
# pylint: disable-next=consider-using-f-string
'Unknown source format "{0}"'.format(source)
)
070701000000C1000081B400000000000000000000000168EFD664000004B1000000000000000000000000000000000000003000000000susemanager-sls/src/modules/mainframesysinfo.py# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
s390 utility for Uyuni and SUSE Multi-Linux Manager
"""
from __future__ import absolute_import
import logging
import salt.utils
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
import os
__salt__ = {
"cmd.run_all": salt.modules.cmdmod.run_all,
}
log = logging.getLogger(__name__)
# pylint: disable-next=invalid-name
def __virtual__():
"""
Only works if /usr/bin/read_values is accessible
"""
return os.access("/usr/bin/read_values", os.X_OK) or os.access(
"/proc/sysinfo", os.R_OK
)
def read_values():
"""
Executes /usr/bin/read_values or if not available
falls back to 'cat /proc/sysinfo'
CLI Example:
.. code-block:: bash
salt '*' mainframesysinfo.read_values
"""
if os.access("/usr/bin/read_values", os.X_OK):
cmd = "/usr/bin/read_values -s"
else:
cmd = "cat /proc/sysinfo"
result = __salt__["cmd.run_all"](cmd, output_loglevel="quiet")
if result["retcode"] != 0:
raise CommandExecutionError(result["stderr"])
return result["stdout"]
070701000000C2000081B400000000000000000000000168EFD66400002F21000000000000000000000000000000000000002F00000000susemanager-sls/src/modules/mgractionchains.py# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2018-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
SUSE Multi-Linux Manager Action Chains module for Salt
"""
from __future__ import absolute_import
import logging
import os
# pylint: disable-next=unused-import
import sys
import salt.config
import salt.syspaths
import yaml
from salt.utils.yamlloader import SaltYamlSafeLoader
# Prevent issues due 'salt.utils.fopen' deprecation
try:
from salt.utils import fopen
# pylint: disable-next=bare-except
except:
from salt.utils.files import fopen
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = "mgractionchains"
SALT_ACTIONCHAIN_BASE = "actionchains"
# pylint: disable-next=invalid-name
def __virtual__():
"""
This module is always enabled while 'state.sls' is available.
"""
return (
__virtualname__
# pylint: disable-next=undefined-variable
if "state.sls" in __salt__
else (False, "state.sls is not available")
)
def _calculate_sls(actionchain_id, machine_id, chunk):
# pylint: disable-next=consider-using-f-string
return "{0}.actionchain_{1}_{2}_{3}".format(
SALT_ACTIONCHAIN_BASE, actionchain_id, machine_id, chunk
)
def _get_ac_storage_filenamepath():
"""
Calculate the filepath to the '_mgractionchains.conf' which is placed
by default in /etc/salt/minion.d/
"""
# pylint: disable-next=undefined-variable
config_dir = __opts__.get("conf_dir", None)
# pylint: disable-next=undefined-variable
if config_dir is None and "conf_file" in __opts__:
# pylint: disable-next=undefined-variable
config_dir = os.path.dirname(__opts__["conf_file"])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(
# pylint: disable-next=undefined-variable
__opts__.get(
"default_include", salt.config.DEFAULT_MINION_OPTS["default_include"]
)
),
)
return os.path.join(minion_d_dir, "_mgractionchains.conf")
def check_reboot_required(target_sls):
"""
Used this function for transactional update system.
Check if the sls file contains reboot_required paramer in schedule_next_chuck.
If it exists and set to true, the system is reboot when the sls file execution is completed
:param target_sls: sls filename
:return: True if the system requires a reboot at the end of the transaction
"""
# pylint: disable-next=undefined-variable
sls_file_on_minion = __salt__["cp.cache_file"](
# pylint: disable-next=consider-using-f-string
"{0}{1}.sls".format(
"salt://actionchains/", target_sls.replace("actionchains.", "")
)
)
current_state_info = _read_sls_file(sls_file_on_minion)
if not current_state_info or not "schedule_next_chunk" in current_state_info:
# schedule_next_chunk contains information about how to restart the action chain after a reboot, so it's present
# only if there's a reboot action or a salt upgrade. If there's no action that perform a reboot, schedule_next_chunk
# it's not present.
return False
if not "mgrcompat.module_run" in current_state_info["schedule_next_chunk"]:
log.error(
'Cannot check if reboot is needed as "schedule_next_chunk" is not containing expected attributes.'
)
return False
list_param = current_state_info["schedule_next_chunk"]["mgrcompat.module_run"]
for dic in list_param:
if "reboot_required" in dic:
return dic["reboot_required"]
return False
def _read_next_ac_chunk(clear=True):
"""
Read and remove the content of '_mgractionchains.conf' file. Return the parsed YAML.
"""
f_storage_filename = _get_ac_storage_filenamepath()
ret = _read_sls_file(f_storage_filename)
if ret is None:
return None
if clear:
os.remove(f_storage_filename)
return ret
def _read_sls_file(filename):
if not os.path.isfile(filename):
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
log.debug("File {0} does not exists".format(filename))
return None
ret = None
try:
with fopen(filename, "r") as f:
ret = yaml.load(f.read(), Loader=SaltYamlSafeLoader)
return ret
except (IOError, yaml.scanner.ScannerError) as exc:
# pylint: disable-next=consider-using-f-string
err_str = "Error processing YAML from '{0}': {1}".format(filename, exc)
log.error(err_str)
# pylint: disable-next=raise-missing-from
raise CommandExecutionError(err_str)
def _add_boot_time(next_chunk, prefix):
"""
Add the current boot time to the next_chunk dict
"""
# pylint: disable-next=undefined-variable
uptime = __salt__["status.uptime"]()
# pylint: disable-next=consider-using-f-string
next_chunk["{0}_boot_time".format(prefix)] = uptime["since_iso"]
def _persist_next_ac_chunk(next_chunk):
"""
Persist next_chunk to execute as YAML in '_mgractionchains.conf'
"""
_add_boot_time(next_chunk, "persist")
f_storage_filename = _get_ac_storage_filenamepath()
try:
f_storage_dir = os.path.dirname(f_storage_filename)
if not os.path.exists(f_storage_dir):
os.makedirs(f_storage_dir)
with fopen(f_storage_filename, "w") as f_storage:
f_storage.write(yaml.dump(next_chunk))
except (IOError, yaml.scanner.ScannerError) as exc:
# pylint: disable-next=consider-using-f-string
err_str = "Error writing YAML from '{0}': {1}".format(f_storage_filename, exc)
log.error(err_str)
# pylint: disable-next=raise-missing-from
raise CommandExecutionError(err_str)
def start(actionchain_id):
"""
Start the execution of the given SUSE Multi-Linux Manager Action Chain
actionchain_id
The SUSE Multi-Linux Manager Actionchain ID to execute on this minion.
CLI Example:
.. code-block:: bash
salt '*' mgractionchains.start 123
"""
if os.path.isfile(_get_ac_storage_filenamepath()):
msg = (
# pylint: disable-next=consider-using-f-string
"Action Chain '{0}' cannot be started. There is already another "
"Action Chain being executed. Please check file '{1}'".format(
actionchain_id, _get_ac_storage_filenamepath()
)
)
log.error(msg)
raise CommandExecutionError(msg)
# pylint: disable-next=undefined-variable
target_sls = _calculate_sls(actionchain_id, __grains__["machine_id"], 1)
log.debug(
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
"Starting execution of SUSE Multi-Linux Manager Action Chains ID "
"'{0}' -> Target SLS: {1}".format(actionchain_id, target_sls)
)
try:
# pylint: disable-next=undefined-variable
__salt__["saltutil.sync_states"]()
# pylint: disable-next=undefined-variable
__salt__["saltutil.sync_modules"]()
# pylint: disable-next=broad-exception-caught,unused-variable
except Exception as exc:
log.error(
"There was an error while syncing custom states and execution modules"
)
# pylint: disable-next=undefined-variable
transactional_update = __grains__.get("transactional")
reboot_required = False
inside_transaction = False
if transactional_update:
reboot_required = check_reboot_required(target_sls)
inside_transaction = os.environ.get("TRANSACTIONAL_UPDATE")
if transactional_update and not inside_transaction:
# pylint: disable-next=undefined-variable
ret = __salt__["transactional_update.sls"](
target_sls, queue=True, activate_transaction=False
)
else:
# pylint: disable-next=undefined-variable
ret = __salt__["state.sls"](target_sls, queue=True)
if reboot_required:
# pylint: disable-next=undefined-variable
__salt__["transactional_update.reboot"]()
if isinstance(ret, list):
raise CommandExecutionError(ret)
return ret
# pylint: disable-next=redefined-builtin
def next(
actionchain_id,
chunk,
next_action_id=None,
current_action_id=None,
ssh_extra_filerefs=None,
reboot_required=False,
):
"""
Persist the next Action Chain chunk to be executed by the 'resume' method.
next_chunk
The next target SLS to be executed.
CLI Example:
.. code-block:: bash
salt '*' mgractionchains.next actionchains.actionchain_123_machineid_2
"""
yaml_dict = {
# pylint: disable-next=undefined-variable
"next_chunk": _calculate_sls(actionchain_id, __grains__["machine_id"], chunk)
}
yaml_dict["actionchain_id"] = actionchain_id
if next_action_id:
yaml_dict["next_action_id"] = next_action_id
if current_action_id:
yaml_dict["current_action_id"] = current_action_id
if ssh_extra_filerefs:
yaml_dict["ssh_extra_filerefs"] = ssh_extra_filerefs
if reboot_required:
yaml_dict["reboot_required"] = reboot_required
_persist_next_ac_chunk(yaml_dict)
return yaml_dict
def get_pending_resume():
"""
Get information about any pending action chain chunk execution.
"""
next_chunk = _read_next_ac_chunk(False)
if next_chunk:
_add_boot_time(next_chunk, "current")
return next_chunk or {}
def resume():
"""
Continue the execution of a SUSE Multi-Linux Manager Action Chain.
This will trigger the execution of the next chunk SLS file stored on '_mgractionchains.conf'
This method is called by the Salt Reactor as a response to the 'minion/start/event'.
"""
ac_resume_info = _read_next_ac_chunk()
if not ac_resume_info:
return {}
# pylint: disable-next=unidiomatic-typecheck
if type(ac_resume_info) != dict:
err_str = (
# pylint: disable-next=consider-using-f-string
"Not able to resume Action Chain execution! Malformed "
"'_mgractionchains.conf' found: {0}".format(ac_resume_info)
)
log.error(err_str)
raise CommandExecutionError(err_str)
next_chunk = ac_resume_info.get("next_chunk")
log.debug(
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
"Resuming execution of SUSE Multi-Linux Manager Action Chain -> Target SLS: "
"{0}".format(next_chunk)
)
# pylint: disable-next=undefined-variable
transactional_update = __grains__.get("transactional")
reboot_required = False
inside_transaction = False
if transactional_update:
reboot_required = ac_resume_info.get("reboot_required")
inside_transaction = os.environ.get("TRANSACTIONAL_UPDATE")
if transactional_update and not inside_transaction:
# pylint: disable-next=undefined-variable
ret = __salt__["transactional_update.sls"](
next_chunk, queue=True, activate_transaction=False
)
else:
# pylint: disable-next=undefined-variable
ret = __salt__["state.sls"](next_chunk, queue=True)
if reboot_required:
# pylint: disable-next=undefined-variable
__salt__["transactional_update.reboot"]()
if isinstance(ret, list):
raise CommandExecutionError(ret)
return ret
def clean(actionchain_id=None, current_action_id=None, reboot_required=None):
"""
Clean execution of an Action Chain by removing '_mgractionchains.conf'.
"""
_read_next_ac_chunk()
yaml_dict = {}
yaml_dict["success"] = True
if actionchain_id:
yaml_dict["actionchain_id"] = actionchain_id
if current_action_id:
yaml_dict["current_action_id"] = current_action_id
if reboot_required:
yaml_dict["reboot_required"] = reboot_required
return yaml_dict
070701000000C3000081B400000000000000000000000168EFD66400000CF5000000000000000000000000000000000000002600000000susemanager-sls/src/modules/mgrnet.py# SPDX-FileCopyrightText: 2022-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
Module for gathering DNS FQDNs
"""
import logging
import re
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
import salt.utils.network
try:
from salt.utils.network import _get_interfaces
# pylint: disable-next=bare-except
except:
from salt.grains.core import _get_interfaces
try:
from salt.utils.path import which as _which
# pylint: disable-next=bare-except
except:
from salt.utils import which as _which
log = logging.getLogger(__name__)
# pylint: disable-next=invalid-name
def __virtual__():
"""
Only works on POSIX-like systems having 'host' or 'nslookup' available
"""
if not (_which("host") or _which("nslookup")):
return (False, "Neither 'host' nor 'nslookup' is available on the system")
return True
def dns_fqdns():
"""
Return all known DNS FQDNs for the system by enumerating all interfaces and
then trying to reverse resolve them with native DNS tools
"""
# Provides:
# dns_fqdns
# pylint: disable-next=unused-variable
grains = {}
fqdns = set()
# pylint: disable-next=undefined-variable
cmd_run_all_func = __salt__["cmd.run_all"]
if _which("host"):
cmd = "host"
cmd_ret_regex = re.compile(r".* domain name pointer (.*)\.$")
elif _which("nslookup"):
cmd = "nslookup"
cmd_ret_regex = re.compile(r".*\tname = (.*)\.$")
else:
log.error("Neither 'host' nor 'nslookup' is available on the system")
return {"dns_fqdns": []}
def _lookup_dns_fqdn(ip):
try:
ret = cmd_run_all_func([cmd, ip], ignore_retcode=True)
# pylint: disable-next=broad-exception-caught
except Exception as e:
log.error("Error while trying to use '%s' to resolve '%s': %s", cmd, ip, e)
if ret["retcode"] != 0:
log.debug("Unable to resolve '%s' using '%s': %s", ip, cmd, ret)
return []
fqdns = []
for line in ret["stdout"].split("\n"):
match = cmd_ret_regex.match(line)
if match:
fqdns.append(match.group(1))
return fqdns
start = time.time()
addresses = salt.utils.network.ip_addrs(
include_loopback=False, interface_data=_get_interfaces()
)
addresses.extend(
salt.utils.network.ip_addrs6(
include_loopback=False, interface_data=_get_interfaces()
)
)
results = []
try:
# Create a ThreadPoolExecutor to process the underlying calls
# to resolve DNS FQDNs in parallel.
with ThreadPoolExecutor(max_workers=8) as executor:
results = dict(
(executor.submit(_lookup_dns_fqdn, ip), ip) for ip in addresses
)
for item in as_completed(results):
item = item.result()
if item:
fqdns.update(item)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Exception while running ThreadPoolExecutor for FQDNs resolution: %s",
exc,
)
elapsed = time.time() - start
log.debug("Elapsed time getting DNS FQDNs: %s seconds", elapsed)
return {"dns_fqdns": sorted(list(fqdns))}
070701000000C4000081B400000000000000000000000168EFD664000006C6000000000000000000000000000000000000002B00000000susemanager-sls/src/modules/reboot_info.py# SPDX-FileCopyrightText: 2023-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""Find out if a reboot is needed for different Linux distributions."""
import logging
import os
log = logging.getLogger(__name__)
__virtualname__ = "reboot_info"
# Just for lint and static analysis, will be replaced by salt's loader
__grains__ = {}
__salt__ = {}
# pylint: disable-next=invalid-name
def __virtual__():
"""
Run on Debian, Suse and RedHat systems.
"""
return __grains__["os_family"] in ["Debian", "Suse", "RedHat"]
def _check_cmd_exit_code(cmd, code):
output = __salt__["cmd.run_all"](cmd, ignore_retcode=True)
if output.get("stderr"):
log.error(output["stderr"])
return output["retcode"] == code
def reboot_required():
"""
Check if reboot is required
CLI Example:
.. code-block:: bash
salt '*' reboot_info.reboot_required
"""
result = False
if __grains__.get("transactional", False):
result = __salt__["transactional_update.pending_transaction"]()
elif __grains__["os_family"] == "Debian":
result = os.path.exists("/var/run/reboot-required")
elif __grains__["os_family"] == "Suse":
result = (
os.path.exists("/run/reboot-needed")
if __grains__["osmajorrelease"] >= 12
else os.path.exists("/boot/do_purge_kernels")
)
elif __grains__["os_family"] == "RedHat":
cmd = (
"dnf -q needs-restarting -r"
# In RHEL6 and clones 'osmajorrelease' is a string
if int(__grains__["osmajorrelease"]) >= 8
else "needs-restarting -r"
)
result = _check_cmd_exit_code(cmd, 1)
return {"reboot_required": result}
070701000000C5000081B400000000000000000000000168EFD66400000520000000000000000000000000000000000000002300000000susemanager-sls/src/modules/sap.py# SPDX-FileCopyrightText: 2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=missing-module-docstring
import glob
import os
import re
__virtualname__ = "sap"
__grains__ = {}
SAP_BASE_PATH = "/usr/sap"
SAP_REGEX = re.compile(r"/usr/sap/([A-Z][A-Z0-9]{2})/([A-Z]+)(\d{2})\b")
# pylint: disable-next=invalid-name
def __virtual__():
"""
Only load the module if the operating system is SLES.
"""
if __grains__.get("os_family") == "Suse":
return True
return (False, "This module is only available on SLES systems.")
def get_workloads():
"""
Detect SAP workloads based on filesystem structure.
Returns:
list: List of detected SAP systems with their system ID and instance types, or an empty list if none are found.
"""
sap_systems = []
if not os.path.exists(SAP_BASE_PATH):
return []
for dir_path in glob.iglob(
f"{SAP_BASE_PATH}/[A-Z][A-Z0-9][A-Z0-9]/[A-Z0-9]*[0-9][0-9]/"
):
match = SAP_REGEX.match(dir_path)
if match:
system_id = match.group(1)
instance_type = match.group(2)
sap_systems.append({"system_id": system_id, "instance_type": instance_type})
sap_systems.sort(key=lambda x: (x["system_id"], x["instance_type"]))
return sap_systems
070701000000C6000081B400000000000000000000000168EFD66400000C9C000000000000000000000000000000000000002900000000susemanager-sls/src/modules/ssh_agent.py# pylint: disable=missing-module-docstring
# SPDX-FileCopyrightText: 2020-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
import logging
import subprocess
import salt.utils.timed_subprocess
from salt.exceptions import CommandExecutionError
try:
from salt.utils.path import which_bin as _which_bin
except ImportError:
from salt.utils import which_bin as _which_bin
log = logging.getLogger(__name__)
__virtualname__ = "ssh_agent"
# pylint: disable-next=invalid-name
__ssh_agent = "/usr/bin/ssh-agent"
# pylint: disable-next=invalid-name
__ssh_add = "/usr/bin/ssh-add"
# pylint: disable-next=invalid-name
def __virtual__():
"""
This module is always enabled while 'ssh-agent' is available.
"""
return (
__virtualname__
if _which_bin(["ssh-agent"])
else (False, "ssh-agent is not available")
)
# pylint: disable-next=invalid-name,unused-argument
def __call_ssh_tool(ssh_tool, cmd_args="", **kwargs):
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
log.debug("Calling ssh-agent: '{} {}'".format(ssh_tool, cmd_args))
try:
ssh_tool_proc = salt.utils.timed_subprocess.TimedProc(
[ssh_tool] + cmd_args.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
ssh_tool_proc.run()
except Exception as exc:
# pylint: disable-next=consider-using-f-string
error_msg = "Unexpected error while calling {}: {}".format(ssh_tool, exc)
log.error(error_msg)
# pylint: disable-next=raise-missing-from
raise CommandExecutionError(error_msg)
if ssh_tool_proc.process.returncode != 0:
# pylint: disable-next=consider-using-f-string
error_msg = "Unexpected error {} when calling {} {}: {} {}".format(
ssh_tool_proc.process.returncode,
ssh_tool,
cmd_args,
salt.utils.stringutils.to_str(ssh_tool_proc.stdout),
salt.utils.stringutils.to_str(ssh_tool_proc.stderr),
)
log.error(error_msg)
raise CommandExecutionError(error_msg)
return ssh_tool_proc
# pylint: disable-next=unused-argument
def start_agent(**kwargs):
result = __call_ssh_tool(__ssh_agent)
stdout = salt.utils.stringutils.to_str(result.stdout)
ssh_agent_lines = stdout.splitlines()
variables = dict()
for line in ssh_agent_lines:
if line.startswith("SSH"):
line_content_list = line.split(";")
# pylint: disable-next=unused-variable
var, rest = line_content_list[0], line_content_list[1:]
key, val = var.strip().split("=", 1)
variables[key] = val
# pylint: disable-next=undefined-variable
__salt__["environ.setenv"](variables)
return variables
# pylint: disable-next=unused-argument
def list_keys(**kwargs):
result = __call_ssh_tool(__ssh_add, "-l")
return salt.utils.stringutils.to_str(result.stdout)
# pylint: disable-next=unused-argument
def add_key(ssh_key_file, **kwargs):
__call_ssh_tool(__ssh_add, ssh_key_file)
return True
# pylint: disable-next=unused-argument
def kill(**kwargs):
__call_ssh_tool(__ssh_agent, "-k")
return True
070701000000C7000081B400000000000000000000000168EFD66400001CAB000000000000000000000000000000000000002800000000susemanager-sls/src/modules/sumautil.py# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
Utility module for Uyuni and SUSE Multi-Linux Manager
"""
from __future__ import absolute_import
import logging
import socket
import os
import re
import time
import salt.utils
from salt.exceptions import CommandExecutionError
try:
from salt.utils.path import which_bin as _which_bin
# pylint: disable-next=bare-except
except:
from salt.utils import which_bin as _which_bin
__salt__ = {
"cmd.run_all": salt.modules.cmdmod.run_all,
}
log = logging.getLogger(__name__)
__virtualname__ = "sumautil"
SYSFS_NET_PATH = "/sys/class/net"
# pylint: disable-next=invalid-name
def __virtual__():
"""
Only run on Linux systems
"""
# pylint: disable-next=undefined-variable
return __grains__["kernel"] == "Linux" and __virtualname__ or False
def cat(path):
"""
Cat the specified file.
CLI Example:
.. code-block:: bash
salt '*' sumautil.cat /tmp/file
"""
# pylint: disable-next=consider-using-f-string
cmd = "cat %s" % path
result = __salt__["cmd.run_all"](cmd, output_loglevel="quiet")
if result["retcode"] != 0:
return {"retcode": 1, "stderr": result["stderr"]}
return {"retcode": 0, "stdout": result["stdout"]}
def primary_ips():
"""
Get the source IPs that the minion uses to connect to the master.
Returns the IPv4 and IPv6 address (if available).
CLI Example:
.. code-block:: bash
salt '*' sumautil.primary_ip
"""
# pylint: disable-next=unnecessary-lambda-assignment
get_master_ip = lambda family, host: socket.getaddrinfo(host, 0, family)[0][-1][0]
# pylint: disable-next=undefined-variable
master = __opts__.get("master", "")
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
log.debug("Using master: {0}".format(str(master)))
ret = dict()
for sock_family, sock_descr in list(
{socket.AF_INET: "IPv4", socket.AF_INET6: "IPv6"}.items()
):
try:
# pylint: disable-next=consider-using-f-string
ret["{0}".format(sock_descr)] = __salt__["network.get_route"](
get_master_ip(sock_family, master)
)
log.debug(
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
"network.get_route({0}): ".format(ret["{0} source".format(sock_descr)])
)
# pylint: disable-next=broad-exception-caught
except Exception as err:
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
log.debug("{0} is not available? {1}".format(sock_descr, err))
return ret
def get_net_module(iface):
"""
Returns the kernel module used for the give interface
or None if the module could not be determined of if the
interface name is wrong.
Uses '/sys/class/net' to find out the module.
CLI Example:
.. code-block:: bash
salt '*' sumautil.get_net_module eth0
"""
device_sysfspath = os.path.join(SYSFS_NET_PATH, iface, "device/driver")
bridge_sysfspath = os.path.join(SYSFS_NET_PATH, iface, "bridge")
if os.path.exists(device_sysfspath):
return os.path.split(os.readlink(device_sysfspath))[-1] or None
elif os.path.exists(bridge_sysfspath):
return "bridge"
else:
return None
def get_net_modules():
"""
Returns a dictionary of all network interfaces and their
corresponding kernel module (if it could be determined).
CLI Example:
.. code-block:: bash
salt '*' sumautil.get_net_modules
"""
drivers = dict()
for devdir in os.listdir(SYSFS_NET_PATH):
try:
drivers[devdir] = get_net_module(devdir)
except OSError as devdir:
log.warning(
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
"An error occurred getting net driver for {0}".format(devdir),
exc_info=True,
)
return drivers or None
def get_kernel_live_version():
"""
Returns the patch version of live patching if it is active,
otherwise None
CLI Example:
.. code-block:: bash
salt '*' sumautil.get_kernel_live_version
"""
kernel_live_version = _klp()
if not kernel_live_version:
log.debug("No kernel live patch is active")
return kernel_live_version
def _klp():
"""
klp to identify the current kernel live patch
:return:
"""
# get 'kgr' for versions prior to SLE 15
klp = _which_bin(["klp", "kgr"])
patchname = None
if klp is not None:
try:
# loop until patching is finished
# pylint: disable-next=unused-variable
for i in range(10):
stat = __salt__["cmd.run_all"](
# pylint: disable-next=consider-using-f-string
"{0} status".format(klp),
output_loglevel="quiet",
)
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
log.debug("klp status: {0}".format(stat["stdout"]))
if stat["stdout"].strip().splitlines()[0] == "ready":
break
time.sleep(1)
re_active = re.compile(r"^\s+active:\s*(\d+)$")
ret = __salt__["cmd.run_all"](
# pylint: disable-next=consider-using-f-string
"{0} -v patches".format(klp),
output_loglevel="quiet",
)
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
log.debug("klp patches: {0}".format(ret["stdout"]))
if ret["retcode"] == 0:
for line in ret["stdout"].strip().splitlines():
if line.startswith("#"):
continue
match_active = re_active.match(line)
if match_active and int(match_active.group(1)) > 0:
return {"mgr_kernel_live_version": patchname}
elif line.startswith("kgraft") or line.startswith("livepatch"):
# kgr patches have prefix 'kgraft', whereas klp patches start with 'livepatch'
patchname = line.strip()
# pylint: disable-next=broad-exception-caught
except Exception as error:
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
log.error("klp: {0}".format(str(error)))
def instance_flavor():
"""Check if this minion is a PAYG or BYOS instance."""
if "instance_id" not in __grains__: # pylint: disable=undefined-variable
return "This minion does not run in a public cloud."
try:
result = __salt__["cmd.run_stdout"](
"instance-flavor-check",
success_retcodes=[10, 11], # 10 => PAYG, 11 => BYOS
)
except CommandExecutionError as e:
log.error("/usr/bin/instance-flavor-check command failure: %s", e)
result = "unknown"
if result == "PAYG":
log.debug("This minion is a PAYG instance.")
else:
log.debug("This minion is a BYOS instance.")
return result
070701000000C8000081B400000000000000000000000168EFD66400000F74000000000000000000000000000000000000002B00000000susemanager-sls/src/modules/supportdata.py"""
Module for Getting Supportdata
"""
from typing import Any, Dict, List
import logging
import os
import re
import shutil
import time
from datetime import datetime
# Just for lint and static analysis, will be replaced by salt's loader
__grains__ = {}
__salt__ = {}
__opts__ = {}
__virtualname__ = "supportdata"
# create a logger for the module
log = logging.getLogger(__name__)
def _get_supportdata_dir():
return "/var/log/supportdata-" + datetime.now().strftime("%Y%m%d%H%M%S")
def _cleanup_outdated_data():
def _log_error(*args):
path = args[1]
err = args[2]
log.error("Failed to remove %s: %s", path, err[1])
for d in os.listdir("/var/log/"):
fullpath = os.path.join("/var/log", d)
if os.path.isdir(fullpath) and re.match(r"^supportdata-[0-9]+$", d):
if (time.time() - os.path.getmtime(fullpath)) > 3600:
# older than 1 hour
# pylint: disable-next=deprecated-argument
shutil.rmtree(fullpath, onerror=_log_error)
def _get_command(output_dir: str) -> List[str]:
supportconfig_path = "/sbin/supportconfig"
mgradm_path = "/usr/bin/mgradm"
mgrpxy_path = "/usr/bin/mgrpxy"
sosreport_path = "/usr/sbin/sosreport"
sosreport_alt_path = "/usr/bin/sosreport"
cmd = []
if "Suse" in __grains__["os_family"]:
if os.path.exists(mgradm_path):
cmd = [mgradm_path, "support", "config", "--output", output_dir]
elif os.path.exists(mgrpxy_path):
cmd = [mgrpxy_path, "support", "config", "--output", output_dir]
elif os.path.exists(supportconfig_path):
cmd = [supportconfig_path, "-R", output_dir]
elif "RedHat" in __grains__["os_family"]:
if os.path.exists(sosreport_path):
cmd = [sosreport_path, "--batch", "--tmp-dir", output_dir]
elif "Debian" in __grains__["os_family"]:
if os.path.exists(sosreport_alt_path):
cmd = [sosreport_alt_path, "--batch", "--tmp-dir", output_dir]
else:
cmd = None
return cmd
def get(cmd_args: str = "", **kwargs) -> Dict[str, Any]:
"""
Collect supportdata like config and logfiles from the system
and upload them to the master's minion files cachedir
(defaults to ``/var/cache/salt/master/minions/minion-id/files``)
It needs ``file_recv`` set to ``True`` in the master configuration file.
cmd_args
extra commandline arguments for the executed tool
CLI Example:
.. code-block:: bash
salt '*' supportdata.get
"""
success = False
supportdata_dir = ""
error = None
returncode = None
del kwargs
_cleanup_outdated_data()
output_dir = _get_supportdata_dir()
extra_args = cmd_args.split()
cmd = _get_command(output_dir)
if cmd is None:
error = "Getting supportdata not supported for " + __grains__["os"]
returncode = 1
elif len(cmd) > 0:
os.makedirs(output_dir, exist_ok=True)
cmd.extend(extra_args)
log.debug("executing: %s", cmd)
ret = __salt__["cmd.run_all"](cmd, runas="root")
log.debug("return: %s", ret)
returncode = ret["retcode"]
if returncode != 0:
error = f'Failed to run {cmd[0]}: {ret["stderr"]}'
else:
if "master_uri" in __opts__ and __salt__["cp.push_dir"](output_dir):
# remove the output dir only when the upload was successful
# with salt-ssh "master_uri" is not in opts and we need to
# download it explicitly via scp
shutil.rmtree(output_dir, ignore_errors=True)
supportdata_dir = output_dir
success = True
else:
error = "Required tools to get support data are not installed"
returncode = 1
return dict(
success=success,
supportdata_dir=supportdata_dir,
error=error,
returncode=returncode,
)
070701000000C9000081B400000000000000000000000168EFD66400000C31000000000000000000000000000000000000002600000000susemanager-sls/src/modules/udevdb.py# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
Export udev database
"""
from __future__ import absolute_import
import logging
import salt.utils
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
try:
from salt.utils.path import which_bin as _which_bin
except ImportError:
from salt.utils import which_bin as _which_bin
__salt__ = {
"cmd.run_all": salt.modules.cmdmod.run_all,
}
log = logging.getLogger(__name__)
# pylint: disable-next=invalid-name
def __virtual__():
"""
Only work when udevadm is installed.
"""
return _which_bin(["udevadm"]) is not None
def exportdb():
"""
Extract all info delivered by udevadm
CLI Example:
.. code-block:: bash
salt '*' udev.info /dev/sda
salt '*' udev.info /sys/class/net/eth0
"""
cmd = "udevadm info --export-db"
udev_result = __salt__["cmd.run_all"](cmd, output_loglevel="quiet")
if udev_result["retcode"] != 0:
raise CommandExecutionError(udev_result["stderr"])
devices = []
dev = {}
for line in (line.strip() for line in udev_result["stdout"].splitlines()):
if line:
line = line.split(":", 1)
if len(line) != 2:
continue
query, data = line
if query == "E":
if query not in dev:
dev[query] = {}
key, val = data.strip().split("=", 1)
try:
val = int(val)
except ValueError:
try:
val = float(val)
except ValueError:
pass # Quiet, this is not a number.
dev[query][key] = val
else:
if query not in dev:
dev[query] = []
dev[query].append(data.strip())
else:
if dev:
normalize(dev)
add_scsi_info(dev)
devices.append(dev)
dev = {}
if dev:
normalize(dev)
add_scsi_info(dev)
devices.append(dev)
return devices
def normalize(dev):
"""
Replace list with only one element to the value of the element.
:param dev:
:return:
"""
for sect, val in list(dev.items()):
if isinstance(val, list) and len(val) == 1:
dev[sect] = val[0]
return dev
def add_scsi_info(dev):
"""
Add SCSI info from sysfs
"""
if (
dev.get("E")
and dev.get("E").get("SUBSYSTEM") == "scsi"
and dev.get("E").get("DEVTYPE") == "scsi_device"
):
sysfs_path = dev["P"]
scsi_type = __salt__["cmd.run_all"](
# pylint: disable-next=consider-using-f-string
"cat /sys/{0}/type".format(sysfs_path),
output_loglevel="quiet",
)
if scsi_type["retcode"] != 0:
raise CommandExecutionError(scsi_type["stderr"])
dev["X-Mgr"] = {}
dev["X-Mgr"]["SCSI_SYS_TYPE"] = scsi_type["stdout"]
070701000000CA000081B400000000000000000000000168EFD664000105A7000000000000000000000000000000000000002C00000000susemanager-sls/src/modules/uyuni_config.py# pylint: disable=missing-module-docstring
# coding: utf-8
# SPDX-FileCopyrightText: 2020-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
# pylint: disable-next=unused-import
from typing import Any, Dict, List, Optional, Union, Tuple
import ssl
import xmlrpc.client # type: ignore
import logging
import os
import salt.config
from salt.utils.minions import CkMinions
import datetime
AUTHENTICATION_ERROR = 2950
log = logging.getLogger(__name__)
__pillar__: Dict[str, Any] = {}
__context__: Dict[str, Any] = {}
__virtualname__: str = "uyuni"
class UyuniUsersException(Exception):
"""
Uyuni users Exception
"""
class UyuniChannelsException(Exception):
"""
Uyuni channels Exception
"""
class RPCClient:
"""
RPC Client
"""
def __init__(
self,
user: str = None,
password: str = None,
url: str = "http://localhost/rpc/api",
):
"""
XML-RPC client interface.
:param user: username for the XML-RPC API endpoints
:param password: password credentials for the XML-RPC API endpoints
:param url: URL of the remote host
"""
ctx: ssl.SSLContext = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.conn = xmlrpc.client.ServerProxy(
url, context=ctx, use_datetime=True, use_builtin_types=True
)
if user is None or password is None:
# if user or password not set, fallback to default user defined on pillar data
if "xmlrpc" in (__pillar__ or {}).get("uyuni", {}):
rpc_conf = (__pillar__ or {})["uyuni"]["xmlrpc"] or {}
self._user: str = rpc_conf.get("user", "")
self._password: str = rpc_conf.get("password", "")
else:
raise UyuniUsersException(
"Unable to find Pillar configuration for Uyuni XML-RPC API"
)
else:
self._user: str = user
self._password: str = password
self.token: Optional[str] = None
def get_user(self):
return self._user
def get_token(self, refresh: bool = False) -> Optional[str]:
"""
Authenticate.
If a authentication token is present on __context__ it will be returned
Otherwise get a new authentication token from xml rpc.
If refresh is True, get a new token from the API regardless of prior status.
:param refresh: force token refresh, discarding any cached value
:return: authentication token
"""
if self.token is None or refresh:
try:
auth_token_key = "uyuni.auth_token_" + self._user
if (not auth_token_key in __context__) or refresh:
__context__[auth_token_key] = self.conn.auth.login(
self._user, self._password
)
except Exception as exc:
log.error("Unable to login to the Uyuni server: %s", exc)
raise exc
self.token = __context__[auth_token_key]
return self.token
def __call__(self, method: str, *args, **kwargs) -> Any:
self.get_token()
if self.token is not None:
try:
log.debug("Calling RPC method %s", method)
return getattr(self.conn, method)(*((self.token,) + args))
# pylint: disable-next=broad-exception-caught
except Exception as exc:
if exc.faultCode != AUTHENTICATION_ERROR:
log.error("Unable to call RPC function: %s", str(exc))
raise exc
# pylint: disable-next=pointless-string-statement
"""
Authentication error when using Token, it can have expired.
Call a second time with a new session token
"""
log.warning("Fall back to the second try due to %s", str(exc))
try:
return getattr(self.conn, method)(
*((self.get_token(refresh=True),) + args)
)
# pylint: disable-next=redefined-outer-name
except Exception as exc:
log.error("Unable to call RPC function: %s", str(exc))
raise exc
raise UyuniUsersException("XML-RPC backend authentication error.")
class UyuniRemoteObject:
"""
RPC client
"""
def __init__(self, user: str = None, password: str = None):
self.client: RPCClient = RPCClient(user=user, password=password)
@staticmethod
def _convert_datetime_str(response: Dict[str, Any]) -> Dict[str, Any]:
"""
modify any key-value pair where value is a datetime object to a string.
:param response: response dictionary to be processed
:return: new dictionary with datetime objects converted to sting
"""
if response:
return dict(
[
# pylint: disable-next=consider-using-f-string
(k, "{0}".format(v)) if isinstance(v, datetime.datetime) else (k, v)
for k, v in response.items()
]
)
return None
@staticmethod
def _convert_datetime_list(response: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
modify any list of key-value pair where value is a datetime object to a string.
:param response: list of dictionaries to be processed
:return: List of new dictionaries with datetime objects converted to sting
"""
if response:
return [
UyuniRemoteObject._convert_datetime_str(value) for value in response
]
return None
@staticmethod
def _convert_bool_response(response: int):
return response == 1
class UyuniUser(UyuniRemoteObject):
"""
CRUD operation on users.
"""
def get_details(self, login: str) -> Dict[str, Any]:
"""
Retrieve details of an Uyuni user.
:param: login: user name to lookup
:return: Dictionary with user details
"""
return self.client("user.getDetails", login)
def list_users(self) -> List[Dict[str, Any]]:
"""
Return all Uyuni users visible to the authenticated user.
:return: all users visible to the authenticated user
"""
return self.client("user.listUsers")
def create(
self,
login: str,
password: str,
email: str,
first_name: str = "",
last_name: str = "",
use_pam_auth: bool = False,
) -> bool:
"""
Create an Uyuni user.
User will be created in the same organization as the authenticated user.
:param login: desired login name
:param password: desired password for the user
:param email: valid email address
:param first_name: First name
:param last_name: Last name
:param use_pam_auth: if you wish to use PAM authentication for this user
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client(
"user.create",
login,
password,
first_name,
last_name,
email,
int(use_pam_auth),
)
)
def set_details(
self,
login: str,
password: str,
email: str,
first_name: str = "",
last_name: str = "",
) -> bool:
"""
Update an Uyuni user information.
:param login: login name
:param password: desired password for the user
:param email: valid email address
:param first_name: First name
:param last_name: Last name
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client(
"user.setDetails",
login,
{
"password": password,
"first_name": first_name,
"last_name": last_name,
"email": email,
},
)
)
def delete(self, login: str) -> bool:
"""
Remove an Uyuni user.
:param login: login of the user
:return: boolean, True indicates success
"""
return self._convert_bool_response(self.client("user.delete", login))
def list_roles(self, login: str) -> List[str]:
"""
Return the list of roles of a user.
:param: login: user name to use on lookup
:return: list of user roles
"""
return self.client("user.listRoles", login)
def add_role(self, login: str, role: str) -> bool:
"""
Add a role to a user
:param login: login of the user
:param role: a new role
:return: boolean, True indicates success
"""
return self._convert_bool_response(self.client("user.addRole", login, role))
def remove_role(self, login: str, role: str) -> bool:
"""
Remove user from the Uyuni org.
:param login: login of the user
:param role: one of uyuni user roles
:return: boolean, True indicates success
"""
return self._convert_bool_response(self.client("user.removeRole", login, role))
def list_assigned_system_groups(
self, login: str
) -> List[Dict[str, Union[int, str]]]:
"""
Returns the system groups that a user can administer.
:param login: login of the user
:return: List of system groups that a user can administer
"""
return self.client("user.listAssignedSystemGroups", login)
def add_assigned_system_groups(
self, login: str, server_group_names: List[str], set_default: bool = False
) -> int:
"""
Add system groups to a user's list of assigned system groups.
:param login: user id to look for
:param server_group_names: system groups to add
:param set_default: True if the system groups should also be added to user's default list.
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client(
"user.addAssignedSystemGroups", login, server_group_names, set_default
)
)
def remove_assigned_system_groups(
self, login: str, server_group_names: List[str], set_default: bool = False
) -> int:
"""
Remove system groups from a user's list of assigned system groups
:param login: user id to look for
:param server_group_names: systems groups to remove from list of assigned system groups
:param set_default: True if the system groups should also be removed to user's default list.
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client(
"user.removeAssignedSystemGroups",
login,
server_group_names,
set_default,
)
)
# pylint: disable-next=missing-class-docstring
class UyuniChannel(UyuniRemoteObject):
def list_manageable_channels(self) -> List[Dict[str, Union[int, str]]]:
"""
List all software channels that the user is entitled to manage.
:return: list of manageable channels
"""
return self.client("channel.listManageableChannels")
def list_my_channels(self) -> List[Dict[str, Union[int, str]]]:
"""
List all software channels that the user is entitled to manage.
:return: list of manageable channels
"""
return self.client("channel.listMyChannels")
# pylint: disable-next=missing-class-docstring
class UyuniChannelSoftware(UyuniRemoteObject):
def set_user_manageable(self, channel_label: str, login: str, access: bool) -> int:
"""
Set the manageable flag for a given channel and user.
If access is set to 'true', this method will give the user manage permissions to the channel.
Otherwise, that privilege is revoked.
:param channel_label: label of the channel
:param login: user login id
:param access: True if the user should have management access to channel
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client(
"channel.software.setUserManageable", channel_label, login, access
)
)
def set_user_subscribable(
self, channel_label: str, login: str, access: bool
) -> int:
"""
Set the subscribable flag for a given channel and user.
If value is set to 'true', this method will give the user subscribe permissions to the channel.
Otherwise, that privilege is revoked.
:param channel_label: label of the channel
:param login: user login id
:param access: True if the user should have subscribe permission to the channel
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client(
"channel.software.setUserSubscribable", channel_label, login, access
)
)
def is_user_manageable(self, channel_label: str, login: str) -> bool:
"""
Returns whether the channel may be managed by the given user.
:param channel_label: label of the channel
:param login: user login id
:return: boolean which indicates if user can manage channel or not
"""
return self._convert_bool_response(
self.client("channel.software.isUserManageable", channel_label, login)
)
def is_user_subscribable(self, channel_label: str, login: str) -> bool:
"""
Returns whether the channel may be subscribed to by the given user.
:param channel_label: label of the channel
:param login: user login id
:return: boolean which indicates if user subscribe the channel or not
"""
return self._convert_bool_response(
self.client("channel.software.isUserSubscribable", channel_label, login)
)
def is_globally_subscribable(self, channel_label: str) -> bool:
"""
Returns whether the channel is globally subscribable on the organization
:param channel_label: label of the channel
:return: boolean which indicates if channel is globally subscribable
"""
return self._convert_bool_response(
self.client("channel.software.isGloballySubscribable", channel_label)
)
class UyuniOrg(UyuniRemoteObject):
"""
CRUD operations on organizations
"""
def list_orgs(self) -> Dict[str, Union[int, str, bool]]:
"""
List all organizations.
:return: list of all existing organizations
"""
return self.client("org.listOrgs")
def get_details(self, name: str) -> Dict[str, Union[int, str, bool]]:
"""
Get org data by name.
:param name: organisation name
:return: organization details
"""
return self.client("org.getDetails", name)
def create(
self,
name: str,
org_admin_user: str,
org_admin_password: str,
first_name: str,
last_name: str,
email: str,
admin_prefix: str = "Mr.",
pam: bool = False,
) -> Dict[str, Union[str, int, bool]]:
"""
Create a new Uyuni org.
:param name: organization name
:param org_admin_user: organization admin user
:param org_admin_password: organization admin password
:param first_name: organization admin first name
:param last_name: organization admin last name
:param email: organization admin email
:param admin_prefix: organization admin prefix
:param pam:organization admin pam authentication
:return: dictionary with org information
"""
return self.client(
"org.create",
name,
org_admin_user,
org_admin_password,
admin_prefix,
first_name,
last_name,
email,
pam,
)
def delete(self, name: str) -> int:
"""
Delete an Uyuni org.
:param name: organization name
:return: boolean, True indicates success
"""
org_id = int(self.get_details(name=name).get("id", -1))
return self._convert_bool_response(self.client("org.delete", org_id))
def update_name(self, org_id: int, name: str) -> Dict[str, Union[str, int, bool]]:
"""
Update an Uyuni org name.
:param org_id: organization internal id
:param name: new organization name
:return: organization details
"""
return self.client("org.updateName", org_id, name)
# pylint: disable-next=missing-class-docstring
class UyuniOrgTrust(UyuniRemoteObject):
def __init__(self, user: str = None, password: str = None):
UyuniRemoteObject.__init__(self, user, password)
self._org_manager = UyuniOrg(user, password)
def list_orgs(self) -> List[Dict[str, Union[str, int]]]:
"""
List all organizations trusted by the authenticated user organization
:return: List of organization details
"""
return self.client("org.trusts.listOrgs")
def list_trusts(self, org_name: str) -> List[Dict[str, Union[str, int, bool]]]:
"""
List all trusts for the organization
:return: list with all organizations and their trust status
"""
org = self._org_manager.get_details(org_name)
return self.client("org.trusts.listTrusts", org["id"])
def add_trust_by_name(self, org_name: str, org_trust: str) -> int:
"""
Set an organisation as trusted by another
:param org_name: organization name
:param org_trust: name of organization to trust
:return: boolean, True indicates success
"""
this_org = self._org_manager.get_details(org_name)
trust_org = self._org_manager.get_details(org_trust)
return self.add_trust(this_org["id"], trust_org["id"])
def add_trust(self, org_id: str, org_trust_id: str) -> int:
"""
Set an organisation as trusted by another
:param org_id: organization id
:param org_trust_id: organization id to trust
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client("org.trusts.addTrust", org_id, org_trust_id)
)
def remove_trust_by_name(self, org_name: str, org_untrust: str) -> int:
"""
Set an organisation as not trusted by another
:param org_name: organization name
:param org_untrust: organization name to untrust
:return: boolean, True indicates success
"""
this_org = self._org_manager.get_details(org_name)
trust_org = self._org_manager.get_details(org_untrust)
return self.remove_trust(this_org["id"], trust_org["id"])
def remove_trust(self, org_id: str, org_untrust_id: str) -> int:
"""
Set an organisation as not trusted by another
:param org_id: organization id
:param org_untrust_id: organization id to untrust
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client("org.trusts.removeTrust", org_id, org_untrust_id)
)
class UyuniSystemgroup(UyuniRemoteObject):
"""
Provides methods to access and modify system groups.
"""
def list_all_groups(self) -> List[Dict[str, Union[int, str]]]:
"""
Retrieve a list of system groups that are accessible by the user
:return: list with group information
"""
return self.client("systemgroup.listAllGroups")
def get_details(self, name: str) -> Dict[str, Union[int, str]]:
"""
Retrieve details of a system group.
:param name: Name of the system group.
:return: data of the system group.
"""
return self.client("systemgroup.getDetails", name)
def create(self, name: str, description: str) -> Dict[str, Union[int, str]]:
"""
Create a new system group.
:param name: Name of the system group.
:param description: Description of the system group.
:return: data of the system group.
"""
return self.client("systemgroup.create", name, description)
def delete(self, name: str) -> int:
"""
Delete a system group.
:param name: Name of the system group.
:return: boolean, True indicates success
"""
return self._convert_bool_response(self.client("systemgroup.delete", name))
def update(self, name: str, description: str) -> Dict[str, Union[int, str]]:
"""
Update an existing system group.
:param name: Name of the system group.
:param description: Description of the system group.
:return: data of the system group.
"""
return self.client("systemgroup.update", name, description)
def list_systems(self, name: str, minimal: bool = True) -> List[Dict[str, Any]]:
"""
Get information about systems in a group.
:param name: Group name
:param minimal: default True. Only return minimal information about systems, use False to get more details
:return: List of system information
"""
return self._convert_datetime_list(
self.client(
(
"systemgroup.listSystemsMinimal"
if minimal
else "systemgroup.listSystems"
),
name,
)
)
# pylint: disable-next=dangerous-default-value
def add_remove_systems(
self, name: str, add_remove: bool, system_ids: List[int] = []
) -> int:
"""
Add or remove systems from a system group
:param name: Group name
:param add_remove: True to add to the group, False to remove
:param system_ids: List of system ids to add or remove
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client("systemgroup.addOrRemoveSystems", name, system_ids, add_remove)
)
# pylint: disable-next=missing-class-docstring
class UyuniSystems(UyuniRemoteObject):
def get_minion_id_map(self, refresh: bool = False) -> Dict[str, int]:
"""
Returns a map from minion ID to Uyuni system ID for all systems a user has access to
This method caches results, in order to avoid multiple XMLRPC calls.
:param refresh: Get new data from server, ignoring values in local context cache
:return: Map between minion ID and system ID of all system accessible by authenticated user
"""
minions_token_key = "uyuni.minions_id_map_" + self.client.get_user()
if (not minions_token_key in __context__) or refresh:
__context__[minions_token_key] = self.client("system.getMinionIdMap")
return __context__[minions_token_key]
class UyuniActivationKey(UyuniRemoteObject):
"""
CRUD operations on Activation Keys.
"""
# pylint: disable-next=redefined-builtin
def get_details(self, id: str) -> Dict[str, Any]:
"""
Get details of an Uyuni Activation Key
:param id: the Activation Key ID
:return: Activation Key information
"""
return self.client("activationkey.getDetails", id)
# pylint: disable-next=redefined-builtin
def delete(self, id: str) -> bool:
"""
Deletes an Uyuni Activation Key
:param id: the Activation Key ID
:return: boolean, True indicates success
"""
return self._convert_bool_response(self.client("activationkey.delete", id))
# pylint: disable-next=dangerous-default-value
def create(
self,
key: str,
description: str,
base_channel_label: str = "",
usage_limit: int = 0,
system_types: List[int] = [],
universal_default: bool = False,
) -> bool:
"""
Creates an Uyuni Activation Key
:param key: activation key name
:param description: activation key description
:param base_channel_label: base channel to be used
:param usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
:param system_types: system types to be assigned.
Can be one of: 'virtualization_host', 'container_build_host',
'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
:param universal_default: sets this activation key as organization universal default
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client(
"activationkey.create",
key,
description,
base_channel_label,
usage_limit,
system_types,
universal_default,
)
)
def set_details(
self,
key: str,
description: str = None,
contact_method: str = None,
base_channel_label: str = None,
usage_limit: int = None,
universal_default: bool = False,
):
"""
Updates an Uyuni Activation Key
:param key: activation key name
:param description: activation key description
:param base_channel_label: base channel to be used
:param contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
:param usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
:param universal_default: sets this activation key as organization universal default
:return: boolean, True indicates success
"""
data = {"universal_default": universal_default}
if description:
data["description"] = description
if base_channel_label is not None:
data["base_channel_label"] = base_channel_label
if contact_method:
data["contact_method"] = contact_method
if usage_limit:
data["usage_limit"] = usage_limit
else:
data["unlimited_usage_limit"] = True
return self._convert_bool_response(
self.client("activationkey.setDetails", key, data)
)
def add_entitlements(self, key: str, system_types: List[str]) -> bool:
"""
Add a list of entitlements to an activation key.
:param key: activation key name
:param system_types: list of system types to be added
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client("activationkey.addEntitlements", key, system_types)
)
def remove_entitlements(self, key: str, system_types: List[str]) -> bool:
"""
Remove a list of entitlements from an activation key.
:param key: activation key name
:param system_types: list of system types to be removed
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client("activationkey.removeEntitlements", key, system_types)
)
def add_child_channels(self, key: str, child_channels: List[str]) -> bool:
"""
Add child channels to an activation key.
:param key: activation key name
:param child_channels: List of child channels to be added
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client("activationkey.addChildChannels", key, child_channels)
)
def remove_child_channels(self, key: str, child_channels: List[str]) -> bool:
"""
Remove child channels from an activation key.
:param key: activation key name
:param child_channels: List of child channels to be removed
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client("activationkey.removeChildChannels", key, child_channels)
)
def check_config_deployment(self, key: str) -> bool:
"""
Return the status of the 'configure_after_registration' flag for an Activation Key.
:param key: activation key name
:return: boolean, true if enabled, false if disabled,
"""
return self._convert_bool_response(
self.client("activationkey.checkConfigDeployment", key)
)
def enable_config_deployment(self, key: str) -> bool:
"""
Enables the 'configure_after_registration' flag for an Activation Key.
:param key: activation key name
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client("activationkey.enableConfigDeployment", key)
)
def disable_config_deployment(self, key: str) -> bool:
"""
Disables the 'configure_after_registration' flag for an Activation Key.
:param key: activation key name
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client("activationkey.disableConfigDeployment", key)
)
def add_packages(self, key: str, packages: List[Any]) -> bool:
"""
Add a list of packages to an activation key.
:param key: activation key name
:param packages: list of packages to be added
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client("activationkey.addPackages", key, packages)
)
def remove_packages(self, key: str, packages: List[Any]) -> bool:
"""
Remove a list of packages from an activation key.
:param key: activation key name
:param packages: list of packages to be removed
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client("activationkey.removePackages", key, packages)
)
def add_server_groups(self, key: str, server_groups: List[int]) -> bool:
"""
Add a list of server groups to an activation key.
:param key: activation key name
:param server_groups: list of server groups to be added
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client("activationkey.addServerGroups", key, server_groups)
)
def remove_server_groups(self, key: str, server_groups: List[int]) -> bool:
"""
Remove a list of server groups from an activation key.
:param key: activation key name
:param server_groups: list of server groups to be removed
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client("activationkey.removeServerGroups", key, server_groups)
)
def list_config_channels(self, key: str) -> List[Dict[str, Any]]:
"""
List configuration channels associated to an activation key.
:param key: activation key name
:return: List of configuration channels
"""
return self.client("activationkey.listConfigChannels", key)
def set_config_channels(
self, keys: List[str], config_channel_label: List[str]
) -> bool:
"""
Replace the existing set of configuration channels on the given activation keys.
Channels are ranked by their order in the array.
:param keys: list of activation key names
:param config_channel_label: list of configuration channels lables
:return: boolean, True indicates success
"""
return self._convert_bool_response(
self.client("activationkey.setConfigChannels", keys, config_channel_label)
)
class UyuniChildMasterIntegration:
"""
Integration with the Salt Master which is running
on the same host as this current Minion.
"""
DEFAULT_MASTER_CONFIG_PATH = "/etc/salt/master"
def __init__(self):
self._minions = CkMinions(salt.config.client_config(self._get_master_config()))
@staticmethod
def _get_master_config() -> str:
"""
Return master config.
:return: path to salt master configuration file
"""
cfg_path = UyuniChildMasterIntegration.DEFAULT_MASTER_CONFIG_PATH
for path in (
__pillar__.get("uyuni", {}).get("masters", {}).get("configs", [cfg_path])
):
if os.path.exists(path):
cfg_path = path
break
return cfg_path
def select_minions(
self, target: str, target_type: str = "glob"
) -> Dict[str, Union[List[str], bool]]:
"""
Select minion IDs that matches the target expression.
:param target: target expression to be applied
:param target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
pillar_exact, compound, compound_pillar_exact. Default: glob.
:return: list of minions
"""
return self._minions.check_minions(expr=target, tgt_type=target_type)
# pylint: disable-next=invalid-name
def __virtual__():
"""
Provide Uyuni configuration state module.
:return:
"""
return __virtualname__
# Users
def user_get_details(
login, password=None, org_admin_user=None, org_admin_password=None
):
"""
Get details of an Uyuni user
If password is provided as a parameter, then it will be used to authenticate
If no user credentials are provided, organization administrator credentials will be used
If no user credentials neither organization admin credentials are provided, credentials from pillar will be used
:param login: user id to look for
:param password: password for the user
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: The user information
"""
return UyuniUser(
org_admin_user if password is None else login,
org_admin_password if password is None else password,
).get_details(login)
def user_list_users(org_admin_user=None, org_admin_password=None):
"""
Return all Uyuni users visible to the authenticated user.
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: all users visible to the authenticated user
"""
return UyuniUser(org_admin_user, org_admin_password).list_users()
def user_create(
login,
password,
email,
first_name,
last_name,
use_pam_auth=False,
org_admin_user=None,
org_admin_password=None,
):
"""
Create an Uyuni user.
:param login: user id to look for
:param password: password for the user
:param email: user email address
:param first_name: user first name
:param last_name: user last name
:param use_pam_auth: if you wish to use PAM authentication for this user
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniUser(org_admin_user, org_admin_password).create(
login=login,
password=password,
email=email,
first_name=first_name,
last_name=last_name,
use_pam_auth=use_pam_auth,
)
def user_set_details(
login,
password,
email,
first_name=None,
last_name=None,
org_admin_user=None,
org_admin_password=None,
):
"""
Update an Uyuni user.
:param login: user id to look for
:param password: password for the user
:param email: user email address
:param first_name: user first name
:param last_name: user last name
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniUser(org_admin_user, org_admin_password).set_details(
login=login,
password=password,
email=email,
first_name=first_name,
last_name=last_name,
)
def user_delete(login, org_admin_user=None, org_admin_password=None):
"""
Deletes an Uyuni user
:param login: user id to look for
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniUser(org_admin_user, org_admin_password).delete(login)
def user_list_roles(login, password=None, org_admin_user=None, org_admin_password=None):
"""
Returns an Uyuni user roles.
If password is provided as a parameter, then it will be used to authenticate
If no user credentials are provided, organization administrator credentials will be used
If no user credentials neither organization admin credentials are provided, credentials from pillar are used
:param login: user id to look for
:param password: password for the user
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: List of user roles assigned
"""
return UyuniUser(
org_admin_user if password is None else login,
org_admin_password if password is None else password,
).list_roles(login)
def user_add_role(login, role, org_admin_user=None, org_admin_password=None):
"""
Adds a role to an Uyuni user.
:param login: user id to look for
:param role: role to be added to the user
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniUser(org_admin_user, org_admin_password).add_role(
login=login, role=role
)
def user_remove_role(login, role, org_admin_user=None, org_admin_password=None):
"""
Remove a role from an Uyuni user.
:param login: user id to look for
:param role: role to be removed from the user
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniUser(org_admin_user, org_admin_password).remove_role(
login=login, role=role
)
def user_list_assigned_system_groups(
login, org_admin_user=None, org_admin_password=None
):
"""
Returns the system groups that a user can administer.
:param login: user id to look for
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: List of system groups that a user can administer
"""
return UyuniUser(org_admin_user, org_admin_password).list_assigned_system_groups(
login=login
)
def user_add_assigned_system_groups(
login,
server_group_names,
set_default=False,
org_admin_user=None,
org_admin_password=None,
):
"""
Add system groups to user's list of assigned system groups.
:param login: user id to look for
:param server_group_names: systems groups to add to list of assigned system groups
:param set_default: Should system groups also be added to user's list of default system groups.
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniUser(org_admin_user, org_admin_password).add_assigned_system_groups(
login=login, server_group_names=server_group_names, set_default=set_default
)
def user_remove_assigned_system_groups(
login,
server_group_names,
set_default=False,
org_admin_user=None,
org_admin_password=None,
):
"""
Remove system groups from a user's list of assigned system groups.
:param login: user id to look for
:param server_group_names: systems groups to remove from list of assigned system groups
:param set_default: Should system groups also be added to user's list of default system groups.
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniUser(org_admin_user, org_admin_password).remove_assigned_system_groups(
login=login, server_group_names=server_group_names, set_default=set_default
)
# Software channels
def channel_list_manageable_channels(login, password):
"""
List all of manageable channels for the authenticated user
:param login: user login id
:param password: user password
:return: list of manageable channels for the user
"""
return UyuniChannel(login, password).list_manageable_channels()
def channel_list_my_channels(login, password):
"""
List all of subscribed channels for the authenticated user
:param login: user login id
:param password: user password
:return: list of subscribed channels for the user
"""
return UyuniChannel(login, password).list_my_channels()
def channel_software_set_user_manageable(
channel_label, login, access, org_admin_user=None, org_admin_password=None
):
"""
Set the manageable flag for a given channel and user.
If access is set to 'true', this method will give the user manage permissions to the channel.
Otherwise, that privilege is revoked.
:param channel_label: label of the channel
:param login: user login id
:param access: True if the user should have management access to channel
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniChannelSoftware(org_admin_user, org_admin_password).set_user_manageable(
channel_label, login, access
)
def channel_software_set_user_subscribable(
channel_label, login, access, org_admin_user=None, org_admin_password=None
):
"""
Set the subscribable flag for a given channel and user.
If value is set to 'true', this method will give the user subscribe permissions to the channel.
Otherwise, that privilege is revoked.
:param channel_label: label of the channel
:param login: user login id
:param access: True if the user should have subscribe access to channel
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniChannelSoftware(
org_admin_user, org_admin_password
).set_user_subscribable(channel_label, login, access)
def channel_software_is_user_manageable(
channel_label, login, org_admin_user=None, org_admin_password=None
):
"""
Returns whether the channel may be managed by the given user.
:param channel_label: label of the channel
:param login: user login id
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean which indicates if user can manage channel or not
"""
return UyuniChannelSoftware(org_admin_user, org_admin_password).is_user_manageable(
channel_label, login
)
def channel_software_is_user_subscribable(
channel_label, login, org_admin_user=None, org_admin_password=None
):
"""
Returns whether the channel may be subscribed by the given user.
:param channel_label: label of the channel
:param login: user login id
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean which indicates if user subscribe the channel or not
"""
return UyuniChannelSoftware(
org_admin_user, org_admin_password
).is_user_subscribable(channel_label, login)
def channel_software_is_globally_subscribable(
channel_label, org_admin_user=None, org_admin_password=None
):
"""
Returns whether the channel is globally subscribable on the organization
:param channel_label: label of the channel
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean which indicates if channel is globally subscribable
"""
return UyuniChannelSoftware(
org_admin_user, org_admin_password
).is_globally_subscribable(channel_label)
def org_list_orgs(admin_user=None, admin_password=None):
"""
List all organizations.
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
:param admin_user: uyuni admin user
:param admin_password: uyuni admin password
:return: list of all available organizations.
"""
return UyuniOrg(admin_user, admin_password).list_orgs()
def org_get_details(name, admin_user=None, admin_password=None):
"""
Get details of an organization.
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
:param name: organisation name
:param admin_user: uyuni admin user
:param admin_password: uyuni admin password
:return: organization details
"""
return UyuniOrg(admin_user, admin_password).get_details(name)
def org_delete(name, admin_user=None, admin_password=None):
"""
Delete an organization
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
:param name: organization name
:param admin_user: uyuni admin user
:param admin_password: uyuni admin password
:return: boolean, True indicates success
"""
return UyuniOrg(admin_user, admin_password).delete(name)
def org_create(
name,
org_admin_user,
org_admin_password,
first_name,
last_name,
email,
admin_prefix="Mr.",
pam=False,
admin_user=None,
admin_password=None,
):
"""
Create an Uyuni organization
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
:param name: organization name
:param org_admin_user: organization admin user
:param org_admin_password: organization admin password
:param first_name: organization admin first name
:param last_name: organization admin last name
:param email: organization admin email
:param admin_prefix: organization admin prefix
:param pam:organization admin pam authentication
:param admin_user: uyuni admin user
:param admin_password: uyuni admin password
:return: dictionary with org information
"""
return UyuniOrg(admin_user, admin_password).create(
name=name,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
first_name=first_name,
last_name=last_name,
email=email,
admin_prefix=admin_prefix,
pam=pam,
)
def org_update_name(org_id, name, admin_user=None, admin_password=None):
"""
update an Uyuni organization name
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
:param org_id: organization internal id
:param name: new organization name
:param admin_user: uyuni admin user
:param admin_password: uyuni admin password
:return: organization details
"""
return UyuniOrg(admin_user, admin_password).update_name(org_id, name)
def org_trust_list_orgs(org_admin_user=None, org_admin_password=None):
"""
List all organizations trusted by the authenticated user organization
:param org_admin_user: organization admin user
:param org_admin_password: organization admin password
:return: List of organization details
"""
return UyuniOrgTrust(org_admin_user, org_admin_password).list_orgs()
def org_trust_list_trusts(org_name, admin_user=None, admin_password=None):
"""
List all trusts for one organization
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
:param org_name: Name of the organization to get the trusts
:param admin_user: authentication user
:param admin_password: authentication user password
:return: list with all organizations and their trust status
"""
return UyuniOrgTrust(admin_user, admin_password).list_trusts(org_name)
def org_trust_add_trust_by_name(
org_name, org_trust, admin_user=None, admin_password=None
):
"""
Add an organization to the list of trusted organizations.
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
:param org_name: organization name
:param org_trust: Trust organization name
:param admin_user: uyuni admin user
:param admin_password: uyuni admin password
:return: boolean, True indicates success
"""
return UyuniOrgTrust(admin_user, admin_password).add_trust_by_name(
org_name, org_trust
)
def org_trust_add_trust(org_id, org_trust_id, admin_user=None, admin_password=None):
"""
Add an organization to the list of trusted organizations.
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
:param org_id: Organization id
:param org_trust_id: Trust organization id
:param admin_user: uyuni admin user
:param admin_password: uyuni admin password
:return: boolean, True indicates success
"""
return UyuniOrgTrust(admin_user, admin_password).add_trust(org_id, org_trust_id)
def org_trust_remove_trust_by_name(
org_name, org_untrust, admin_user=None, admin_password=None
):
"""
Remove an organization from the list of trusted organizations.
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
:param org_name: organization name
:param org_untrust: organization name to untrust
:param admin_user: uyuni admin user
:param admin_password: uyuni admin password
:return: boolean, True indicates success
"""
return UyuniOrgTrust(admin_user, admin_password).remove_trust_by_name(
org_name, org_untrust
)
def org_trust_remove_trust(
org_id, org_untrust_id, admin_user=None, admin_password=None
):
"""
Remove an organization from the list of trusted organizations.
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
:param org_id: orgnization id
:param org_untrust_id: organizaton id to untrust
:param admin_user: uyuni admin user
:param admin_password: uyuni admin password
:return: boolean, True indicates success
"""
return UyuniOrgTrust(admin_user, admin_password).remove_trust(
org_id, org_untrust_id
)
# System Groups
def systemgroup_create(name, descr, org_admin_user=None, org_admin_password=None):
"""
Create a system group.
:param name: Name of the system group.
:param descr: Description of the system group.
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: details of the system group
"""
return UyuniSystemgroup(org_admin_user, org_admin_password).create(
name=name, description=descr
)
def systemgroup_list_all_groups(username, password):
"""
Retrieve a list of system groups that are accessible by the user
:param username: username to authenticate with
:param password: password to authenticate with
:return:
"""
return UyuniSystemgroup(username, password).list_all_groups()
def systemgroup_get_details(name, org_admin_user=None, org_admin_password=None):
"""
Return system group details.
:param name: Name of the system group.
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: details of the system group
"""
return UyuniSystemgroup(org_admin_user, org_admin_password).get_details(name=name)
def systemgroup_update(name, descr, org_admin_user=None, org_admin_password=None):
"""
Update a system group.
:param name: Name of the system group.
:param descr: Description of the system group.
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: details of the system group
"""
return UyuniSystemgroup(org_admin_user, org_admin_password).update(
name=name, description=descr
)
def systemgroup_delete(name, org_admin_user=None, org_admin_password=None):
"""
Delete a system group.
:param name: Name of the system group.
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: boolean, True indicates success
"""
return UyuniSystemgroup(org_admin_user, org_admin_password).delete(name=name)
def systemgroup_list_systems(
name, minimal=True, org_admin_user=None, org_admin_password=None
):
"""
List systems in a system group
:param name: Name of the system group.
:param minimal: default True. Only return minimal information about systems, use False to get more details
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: List of system information
"""
return UyuniSystemgroup(org_admin_user, org_admin_password).list_systems(
name=name, minimal=minimal
)
# pylint: disable-next=dangerous-default-value
def systemgroup_add_remove_systems(
name, add_remove, system_ids=[], org_admin_user=None, org_admin_password=None
):
"""
Update systems on a system group.
:param name: Name of the system group.
:param add_remove: True to add to the group, False to remove.
:param system_ids: list of system ids to add/remove from group
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: boolean, True indicates success
"""
return UyuniSystemgroup(org_admin_user, org_admin_password).add_remove_systems(
name=name, add_remove=add_remove, system_ids=system_ids
)
def master_select_minions(target=None, target_type="glob"):
"""
Return list minions from the configured Salt Master on the same host which match the expression on the defined target
:param target: target expression to filter minions
:param target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
pillar_exact, compound, compound_pillar_exact. Default: glob.
:return: list of minion IDs
"""
cmi = UyuniChildMasterIntegration()
return cmi.select_minions(target=target, target_type=target_type)
def systems_get_minion_id_map(username=None, password=None, refresh=False):
"""
Returns a map from minion ID to Uyuni system ID for all systems a user has access to
:param username: username to authenticate
:param password: password for user
:param refresh: Get new data from server, ignoring values in local context cache
:return: Map between minion ID and system ID of all system accessible by authenticated user
"""
return UyuniSystems(username, password).get_minion_id_map(refresh)
# Activation Keys
# pylint: disable-next=redefined-builtin
def activation_key_get_details(id, org_admin_user=None, org_admin_password=None):
"""
Get details of an Uyuni Activation Key
:param id: the Activation Key ID
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: Activation Key information
"""
return UyuniActivationKey(org_admin_user, org_admin_password).get_details(id)
# pylint: disable-next=redefined-builtin
def activation_key_delete(id, org_admin_user=None, org_admin_password=None):
"""
Deletes an Uyuni Activation Key
:param id: the Activation Key ID
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniActivationKey(org_admin_user, org_admin_password).delete(id)
# pylint: disable-next=dangerous-default-value
def activation_key_create(
key,
description,
base_channel_label="",
usage_limit=0,
system_types=[],
universal_default=False,
org_admin_user=None,
org_admin_password=None,
):
"""
Creates an Uyuni Activation Key
:param key: activation key name
:param description: activation key description
:param base_channel_label: base channel to be used
:param usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
:param system_types: system types to be assigned.
Can be one of: 'virtualization_host', 'container_build_host',
'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
:param universal_default: sets this activation key as organization universal default
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniActivationKey(org_admin_user, org_admin_password).create(
key,
description,
base_channel_label,
usage_limit,
system_types,
universal_default,
)
def activation_key_set_details(
key,
description=None,
contact_method=None,
base_channel_label=None,
usage_limit=None,
universal_default=False,
org_admin_user=None,
org_admin_password=None,
):
"""
Updates an Uyuni Activation Key
:param key: activation key name
:param description: activation key description
:param base_channel_label: base channel to be used
:param contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
:param usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
:param universal_default: sets this activation key as organization universal default
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniActivationKey(org_admin_user, org_admin_password).set_details(
key,
description=description,
contact_method=contact_method,
base_channel_label=base_channel_label,
usage_limit=usage_limit,
universal_default=universal_default,
)
def activation_key_add_entitlements(
key, system_types, org_admin_user=None, org_admin_password=None
):
"""
Add a list of entitlements to an activation key.
:param key: activation key name
:param system_types: list of system types to be added
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniActivationKey(org_admin_user, org_admin_password).add_entitlements(
key, system_types
)
def activation_key_remove_entitlements(
key, system_types, org_admin_user=None, org_admin_password=None
):
"""
Remove a list of entitlements from an activation key.
:param key: activation key name
:param system_types: list of system types to be removed
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniActivationKey(org_admin_user, org_admin_password).remove_entitlements(
key, system_types
)
def activation_key_add_child_channels(
key, child_channels, org_admin_user=None, org_admin_password=None
):
"""
Add child channels to an activation key.
:param key: activation key name
:param child_channels: List of child channels to be added
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniActivationKey(org_admin_user, org_admin_password).add_child_channels(
key, child_channels
)
def activation_key_remove_child_channels(
key, child_channels, org_admin_user=None, org_admin_password=None
):
"""
Remove child channels from an activation key.
:param key: activation key name
:param child_channels: List of child channels to be removed
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniActivationKey(org_admin_user, org_admin_password).remove_child_channels(
key, child_channels
)
def activation_key_check_config_deployment(
key, org_admin_user=None, org_admin_password=None
):
"""
Return the status of the 'configure_after_registration' flag for an Activation Key.
:param key: activation key name
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, true if enabled, false if disabled
"""
return UyuniActivationKey(
org_admin_user, org_admin_password
).check_config_deployment(key)
def activation_key_enable_config_deployment(
key, org_admin_user=None, org_admin_password=None
):
"""
Enables the 'configure_after_registration' flag for an Activation Key.
:param key: activation key name
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniActivationKey(
org_admin_user, org_admin_password
).enable_config_deployment(key)
def activation_key_disable_config_deployment(
key, org_admin_user=None, org_admin_password=None
):
"""
Disables the 'configure_after_registration' flag for an Activation Key.
:param key: activation key name
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniActivationKey(
org_admin_user, org_admin_password
).disable_config_deployment(key)
def activation_key_add_packages(
key, packages, org_admin_user=None, org_admin_password=None
):
"""
Add a list of packages to an activation key.
:param key: activation key name
:param packages: list of packages to be added
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniActivationKey(org_admin_user, org_admin_password).add_packages(
key, packages
)
def activation_key_remove_packages(
key, packages, org_admin_user=None, org_admin_password=None
):
"""
Remove a list of packages from an activation key.
:param key: activation key name
:param packages: list of packages to be removed
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniActivationKey(org_admin_user, org_admin_password).remove_packages(
key, packages
)
def activation_key_add_server_groups(
key, server_groups, org_admin_user=None, org_admin_password=None
):
"""
Add a list of server groups to an activation key.
:param key: activation key name
:param server_groups: list of server groups to be added
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniActivationKey(org_admin_user, org_admin_password).add_server_groups(
key, server_groups
)
def activation_key_remove_server_groups(
key, server_groups, org_admin_user=None, org_admin_password=None
):
"""
Remove a list of server groups from an activation key.
:param key: activation key name
:param server_groups: list of server groups to be removed
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniActivationKey(org_admin_user, org_admin_password).remove_server_groups(
key, server_groups
)
def activation_key_list_config_channels(
key, org_admin_user=None, org_admin_password=None
):
"""
List configuration channels associated to an activation key.
:param key: activation key name
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: List of configuration channels
"""
return UyuniActivationKey(org_admin_user, org_admin_password).list_config_channels(
key
)
def activation_key_set_config_channels(
keys, config_channel_label, org_admin_user=None, org_admin_password=None
):
"""
Replace the existing set of configuration channels on the given activation keys.
Channels are ranked by their order in the array.
:param keys: list of activation key names
:param config_channel_label: list of configuration channels labels
:param org_admin_user: organization admin username
:param org_admin_password: organization admin password
:return: boolean, True indicates success
"""
return UyuniActivationKey(org_admin_user, org_admin_password).set_config_channels(
keys, config_channel_label
)
070701000000CB000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001B00000000susemanager-sls/src/states070701000000CC000081B400000000000000000000000168EFD66400000000000000000000000000000000000000000000002700000000susemanager-sls/src/states/__init__.py070701000000CD000081B400000000000000000000000168EFD66400000971000000000000000000000000000000000000002900000000susemanager-sls/src/states/appstreams.py# SPDX-FileCopyrightText: 2024-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
Salt Custom State for managing AppStreams configuration
This custom state provides functionality for managing AppStreams modules,
enabling or disabling them as needed.
The 'enabled' and 'disabled' states ensure that specified AppStreams modules
are respectively enabled or disabled.
"""
def enabled(name, appstreams):
"""
Ensure that the appstreams are enabled.
:param str name
The name of the state
:param list appstreams:
A list of appstreams to enable in the format module_name:stream
"""
# pylint: disable-next=undefined-variable
if __opts__["test"]:
return _test_mode(name, appstreams, "enable")
if isinstance(appstreams, list) and len(appstreams) == 0:
return {
"name": name,
"changes": {},
"result": True,
"comment": "No AppStreams to enable provided",
}
# pylint: disable-next=undefined-variable
result, comment, changes = __salt__["appstreams.enable"](appstreams)
return {"name": name, "changes": changes, "result": result, "comment": comment}
def disabled(name, appstreams):
"""
Ensure that the appstreams are disabled.
:param str name
The name of the state
:param list appstreams:
A list of appstreams to disable
"""
# pylint: disable-next=undefined-variable
if __opts__["test"]:
return _test_mode(name, appstreams, "disable")
if isinstance(appstreams, list) and len(appstreams) == 0:
return {
"name": name,
"changes": {},
"result": True,
"comment": "No AppStreams to disable provided",
}
# pylint: disable-next=undefined-variable
result, comment, changes = __salt__["appstreams.disable"](appstreams)
return {"name": name, "changes": changes, "result": result, "comment": comment}
def _test_mode(name, appstreams, action):
action_name = f"{action}d"
comment = f"The following appstreams would be {action_name}: {appstreams}"
changes = {}
if appstreams:
changes = {"ret": {}}
changes["ret"][action_name] = appstreams
else:
comment = f"No AppStreams to {action} provided"
return {
"name": name,
"changes": changes,
"result": None,
"comment": comment,
}
070701000000CE000081B400000000000000000000000168EFD66400000B34000000000000000000000000000000000000002800000000susemanager-sls/src/states/mgrcompat.py# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2019-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
SUSE Multi-Linux Manager custom wrapper for Salt "module.run" state module.
This wrapper determines the syntax to use for calling the Salt "module.run" state
that has changed between different Salt version.
Using this wrapper we ensure all SUSE Multi-Linux Manager SLS files are using the same syntax
regardless the actual Salt version installed on the minion.
"""
from __future__ import absolute_import
# Import salt libs
from salt.utils.odict import OrderedDict
import logging
log = logging.getLogger(__name__)
__virtualname__ = "mgrcompat"
# pylint: disable-next=invalid-name
def __virtual__():
"""
This module is always enabled while 'module.run' is available.
"""
return __virtualname__
def _tailor_kwargs_to_new_syntax(name, **kwargs):
nkwargs = {}
# pylint: disable-next=invalid-name
_opt_kwargs = None
for k, v in kwargs.items():
if k.startswith("m_"):
nkwargs[k[2:]] = v
elif k == "kwargs":
# pylint: disable-next=invalid-name
_opt_kwargs = kwargs[k]
else:
nkwargs[k] = v
ret = {name: [OrderedDict(nkwargs)]}
if _opt_kwargs:
ret[name].append(OrderedDict(_opt_kwargs))
return ret
def module_run(**kwargs):
"""
This function execute the Salt "module.run" state passing the arguments
in the right way according to the supported syntax depending on the Salt
minion version and configuration
"""
# We use classic "module.run" syntax by default.
use_new_syntax = False
# pylint: disable-next=undefined-variable
if 2016 < __grains__["saltversioninfo"][0] < 3005 and "module.run" in __opts__.get(
"use_superseded", []
):
# New syntax - explicitely enabled via 'use_superseded' configuration on 2018.3, 2019.2, 3000.x, 3002.x, 3003.x and 3004.x
use_new_syntax = True
if use_new_syntax:
log.debug(
"Minion is using the new syntax for 'module.run' state. Tailoring parameters."
)
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
log.debug("Old parameters: {}".format(kwargs))
old_name = kwargs.pop("name")
new_kwargs = _tailor_kwargs_to_new_syntax(old_name, **kwargs)
# pylint: disable-next=logging-format-interpolation,consider-using-f-string
log.debug("New parameters for 'module.run' state: {}".format(new_kwargs))
else:
new_kwargs = kwargs
# pylint: disable-next=undefined-variable
ret = __states__["module.run"](**new_kwargs)
if use_new_syntax:
if ret["changes"]:
changes = ret["changes"].pop(old_name)
ret["changes"]["ret"] = changes
ret["name"] = old_name
return ret
070701000000CF000081B400000000000000000000000168EFD66400000664000000000000000000000000000000000000002700000000susemanager-sls/src/states/mgrutils.py# SPDX-FileCopyrightText: 2021-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
Utility states
"""
from salt.exceptions import CommandExecutionError
from salt.states import file
__virtualname__ = "mgrutils"
# pylint: disable-next=invalid-name
def __virtual__():
"""
This module is always enabled while 'file.managed' is available.
"""
# pylint: disable-next=undefined-variable
file.__salt__ = __salt__
# pylint: disable-next=undefined-variable
file.__opts__ = __opts__
# pylint: disable-next=undefined-variable
file.__pillar__ = __pillar__
# pylint: disable-next=undefined-variable
file.__grains__ = __grains__
# pylint: disable-next=undefined-variable
file.__context__ = __context__
# pylint: disable-next=undefined-variable
file.__utils__ = __utils__
return __virtualname__
def cmd_dump(name, cmd):
"""
Dump the output of a command to a file
"""
ret = {
"name": name,
"changes": {},
# pylint: disable-next=undefined-variable
"result": True if not __opts__["test"] else None,
"comment": "",
}
try:
# pylint: disable-next=undefined-variable
cmd_out = __salt__["cmd.run"](cmd, raise_err=True, python_shell=False)
except CommandExecutionError:
ret["result"] = False
# pylint: disable-next=consider-using-f-string
ret["comment"] = "Failed to run command {}".format(cmd)
return ret
# pylint: disable-next=undefined-variable
file_ret = __states__["file.managed"](name, contents=cmd_out)
file_ret["name"] = name
return file_ret
070701000000D0000081B400000000000000000000000168EFD66400000E2A000000000000000000000000000000000000002600000000susemanager-sls/src/states/product.py# SPDX-FileCopyrightText: 2019-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
Handles installation of SUSE products using zypper
Only supported with :mod:`zypper <salt.modules.zypper>`
"""
import logging
from salt.utils.versions import version_cmp
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = "product"
# pylint: disable-next=invalid-name
def __virtual__():
"""
Only work on SUSE platforms with zypper
"""
# pylint: disable-next=undefined-variable
if __grains__.get("os_family", "") != "Suse":
return (False, "Module product: non SUSE OS not supported")
# Not all versions of SUSE use zypper, check that it is available
try:
# pylint: disable-next=undefined-variable
zypp_info = __salt__["pkg.info_installed"]("zypper")["zypper"]
except CommandExecutionError:
return (False, "Module product: zypper package manager not found")
# Minimum version that supports 'zypper search --provides'
if version_cmp(zypp_info["version"], "1.8.13") < 0:
return (False, "Module product: zypper 1.8.13 or greater required")
return __virtualname__
def _get_missing_products(refresh):
# Search for not installed products
products = []
try:
products = list(
# pylint: disable-next=undefined-variable
__salt__["pkg.search"](
"product()",
refresh=refresh,
match="exact",
provides=True,
not_installed_only=True,
)
)
log.debug(
"The following products are not yet installed: %s", ", ".join(products)
)
except CommandExecutionError:
# No search results
return None
# remove unsupported products
if "SLE-Micro-Rancher-release" in products:
products.remove("SLE-Micro-Rancher-release")
# Exclude products that are already provided by another to prevent conflicts
to_install = []
for pkg in products:
try:
# pylint: disable-next=undefined-variable
res = list(__salt__["pkg.search"](pkg, match="exact", provides=True))
if pkg in res:
res.remove(pkg)
if not res:
# No other providers than the package itself
to_install.append(pkg)
else:
log.debug(
"The product '%s' is already provided by '%s'. Skipping.",
pkg,
", ".join(res),
)
except CommandExecutionError:
# No search results
# Not provided by any installed package, add it to the list
to_install.append(pkg)
return to_install
# pylint: disable-next=unused-argument
def all_installed(name, refresh=False, **kwargs):
"""
Ensure that all the subscribed products are installed.
refresh
force a refresh if set to True.
If set to False (default) it depends on zypper if a refresh is
executed.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
to_install = _get_missing_products(refresh)
if not to_install:
# All product packages are already installed
ret["comment"] = "All subscribed products are already installed"
ret["result"] = True
log.debug("All products are already installed. Nothing to do.")
return ret
# pylint: disable-next=undefined-variable
return __states__["pkg.installed"](name, pkgs=to_install, no_recommends=True)
070701000000D1000081B400000000000000000000000168EFD6640000127A000000000000000000000000000000000000002C00000000susemanager-sls/src/states/reportdb_user.py# SPDX-FileCopyrightText: 2022-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
reportdb_user functions
"""
import logging
# pylint: disable-next=unused-import
import os
# pylint: disable-next=unused-import
import re
# pylint: disable-next=unused-import
from salt.exceptions import CommandExecutionError
try:
# pylint: disable-next=unused-import
import libvirt
except ImportError:
pass
log = logging.getLogger(__name__)
__virtualname__ = "reportdb_user"
# pylint: disable-next=invalid-name
def __virtual__():
"""
Only if the minion is a mgr server and the postgresql module is loaded
"""
# pylint: disable-next=undefined-variable
if not __grains__.get("is_mgr_server"):
return (False, "Minion is not a mgr server")
# pylint: disable-next=undefined-variable
if "postgres.user_exists" not in __salt__:
return (
False,
"Unable to load postgres module. Make sure `postgres.bins_dir` is set.",
)
return __virtualname__
def present(name, password):
"""
Ensure that the named user is present in the configured report database
:param name: the username
:param password: the password
"""
ret = {
"name": name,
"changes": {},
# pylint: disable-next=undefined-variable
"result": True if not __opts__["test"] else None,
"comment": "",
}
# pylint: disable-next=undefined-variable
if __opts__["test"]:
# pylint: disable-next=consider-using-f-string
ret["comment"] = "User {} with password will be set".format(name)
return ret
try:
cmd = [
"uyuni-setup-reportdb-user",
"--non-interactive",
"--dbuser",
name,
"--dbpassword",
password,
]
# pylint: disable-next=undefined-variable
if __salt__["postgres.user_exists"](name):
cmd.append("--modify")
else:
cmd.append("--add")
# pylint: disable-next=undefined-variable
result = __salt__["cmd.run_all"](cmd)
if result["retcode"] != 0:
ret["result"] = False
# pylint: disable-next=consider-using-f-string
ret["comment"] = "Failed to set the user. {}".format(
result["stderr"] or result["stdout"]
)
return ret
# pylint: disable-next=consider-using-f-string
ret["comment"] = "User {} with password set".format(name)
return ret
# pylint: disable-next=broad-exception-caught
except Exception as err:
ret["result"] = False
ret["comment"] = str(err)
return ret
def absent(name, password):
"""
Ensure that the named user is absent in the configured reporting database
:param name: the username
:param password: the password
"""
ret = {
"name": name,
"changes": {},
# pylint: disable-next=undefined-variable
"result": True if not __opts__["test"] else None,
"comment": "",
}
# pylint: disable-next=undefined-variable
if __opts__["test"]:
# pylint: disable-next=consider-using-f-string
ret["comment"] = "User {} with password will be set".format(name)
return ret
try:
# pylint: disable-next=undefined-variable
if not __salt__["postgres.user_exists"](name):
# pylint: disable-next=undefined-variable
if __opts__["test"]:
# pylint: disable-next=consider-using-f-string
ret["comment"] = "User {} will be removed".format(name)
return ret
else:
# pylint: disable-next=undefined-variable
if __opts__["test"]:
ret["comment"] = "no change needed"
return ret
cmd = [
"uyuni-setup-reportdb-user",
"--non-interactive",
"--dbuser",
name,
"--dbpassword",
password,
"--delete",
]
# pylint: disable-next=undefined-variable
result = __salt__["cmd.run_all"](cmd)
if result["retcode"] != 0:
ret["result"] = False
# pylint: disable-next=consider-using-f-string
ret["comment"] = "Failed to delete the user. {}".format(
result["stderr"] or result["stdout"]
)
return ret
# pylint: disable-next=consider-using-f-string
ret["comment"] = "User {} deleted".format(name)
return ret
# pylint: disable-next=broad-exception-caught
except Exception as err:
ret["result"] = False
ret["comment"] = str(err)
return ret
070701000000D2000081B400000000000000000000000168EFD66400010914000000000000000000000000000000000000002B00000000susemanager-sls/src/states/uyuni_config.py# pylint: disable=missing-module-docstring
# SPDX-FileCopyrightText: 2020-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
import logging
# pylint: disable-next=unused-import
from typing import Optional, Dict, Any, List, Tuple
from collections import Counter
SERVER_GROUP_NOT_FOUND_ERROR = 2201
NO_SUCH_USER_ERROR = -213
ORG_NOT_FOUND_ERROR = 2850
ACTIVATION_KEY_NOT_FOUND_ERROR = -212
AUTHENTICATION_ERROR = 2950
log = logging.getLogger(__name__)
__salt__: Dict[str, Any] = {}
__opts__: Dict[str, Any] = {}
__virtualname__ = "uyuni"
class StateResult:
@staticmethod
def state_error(name: str, comment: str = None):
return StateResult.prepare_result(name, False, comment)
@staticmethod
# pylint: disable-next=dangerous-default-value
def prepare_result(
name: str, result: Optional[bool], comment: str = None, changes: Dict = {}
):
return {
"name": name,
"changes": changes,
"result": result,
"comment": comment,
}
# pylint: disable-next=missing-class-docstring
class UyuniUsers:
@staticmethod
# pylint: disable-next=dangerous-default-value
def _update_user_roles(
name: str,
current_roles: List[str] = [],
new_roles: List[str] = [],
org_admin_user: str = None,
org_admin_password: str = None,
):
for role_to_remove in current_roles or []:
if role_to_remove not in (new_roles or []):
__salt__["uyuni.user_remove_role"](
name,
role=role_to_remove,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
for role_to_add in new_roles or []:
if role_to_add not in (current_roles or []):
__salt__["uyuni.user_add_role"](
name,
role=role_to_add,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
@staticmethod
# pylint: disable-next=dangerous-default-value
def _update_user_system_groups(
name: str,
current_system_groups: List[str] = [],
system_groups: List[str] = [],
org_admin_user: str = None,
org_admin_password: str = None,
):
systems_groups_add = [
sys
for sys in (system_groups or [])
if sys not in (current_system_groups or [])
]
if systems_groups_add:
__salt__["uyuni.user_add_assigned_system_groups"](
login=name,
server_group_names=systems_groups_add,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
system_groups_remove = [
sys
for sys in (current_system_groups or [])
if sys not in (system_groups or [])
]
if system_groups_remove:
__salt__["uyuni.user_remove_assigned_system_groups"](
login=name,
server_group_names=system_groups_remove,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
@staticmethod
def _compute_changes(
user_changes: Dict[str, Any],
current_user: Dict[str, Any],
roles: List[str],
current_roles: List[str],
system_groups: List[str],
current_system_groups: List[str],
use_pam_auth: bool = False,
):
changes = {}
error = None
# user field changes
for field in ["email", "first_name", "last_name"]:
if (current_user or {}).get(field) != user_changes.get(field):
changes[field] = {"new": user_changes[field]}
if current_user:
changes[field]["old"] = (current_user or {}).get(field)
# role changes
if Counter(roles or []) != Counter(current_roles or []):
changes["roles"] = {"new": roles}
if current_roles:
changes["roles"]["old"] = current_roles
# system group changes
if Counter(system_groups or []) != Counter(current_system_groups or []):
changes["system_groups"] = {"new": system_groups}
if current_system_groups:
changes["system_groups"]["old"] = current_system_groups
# check if password have changed
if current_user and not use_pam_auth:
try:
__salt__["uyuni.user_get_details"](
user_changes.get("login"), user_changes.get("password")
)
# pylint: disable-next=broad-exception-caught
except Exception as exc:
# check if it's an authentication error. If yes, password have changed
if exc.faultCode == AUTHENTICATION_ERROR:
changes["password"] = {"new": "(hidden)", "old": "(hidden)"}
else:
error = exc
return changes, error
# pylint: disable-next=dangerous-default-value
def manage(
self,
login: str,
password: str,
email: str,
first_name: str,
last_name: str,
use_pam_auth: bool = False,
roles: Optional[List[str]] = [],
system_groups: Optional[List[str]] = [],
org_admin_user: str = None,
org_admin_password: str = None,
) -> Dict[str, Any]:
"""
Ensure a user is present with all specified properties
:param login: user login ID
:param password: desired password for the user
:param email: valid email address
:param first_name: First name
:param last_name: Last name
:param use_pam_auth: if you wish to use PAM authentication for this user
:param roles: roles to assign to user
:param system_groups: system groups to assign user to
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: dict for Salt communication
"""
current_user = None
current_roles = None
current_system_groups_names = None
try:
current_user = __salt__["uyuni.user_get_details"](
login,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
current_roles = __salt__["uyuni.user_list_roles"](
login,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
current_system_groups = __salt__["uyuni.user_list_assigned_system_groups"](
login,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
current_system_groups_names = [
s["name"] for s in (current_system_groups or [])
]
# pylint: disable-next=broad-exception-caught
except Exception as exc:
if exc.faultCode == AUTHENTICATION_ERROR:
# pylint: disable-next=consider-using-f-string
error_message = "Error while retrieving user information (admin credentials error) '{}': {}".format(
login, exc
)
log.warning(error_message)
return StateResult.state_error(login, comment=error_message)
user_paramters = {
"login": login,
"password": password,
"email": email,
"first_name": first_name,
"last_name": last_name,
"org_admin_user": org_admin_user,
"org_admin_password": org_admin_password,
}
changes, error = self._compute_changes(
user_paramters,
current_user,
roles,
current_roles,
system_groups,
current_system_groups_names,
use_pam_auth=use_pam_auth,
)
if error:
return StateResult.state_error(
login,
# pylint: disable-next=consider-using-f-string
"Error computing changes for user '{}': {}".format(login, error),
)
if not changes:
return StateResult.prepare_result(
login,
True,
# pylint: disable-next=consider-using-f-string
"{0} is already in the desired state".format(login),
)
if not current_user:
changes["login"] = {"new": login}
changes["password"] = {"new": "(hidden)"}
if __opts__["test"]:
return StateResult.prepare_result(
login,
None,
# pylint: disable-next=consider-using-f-string
"{0} would be modified".format(login),
changes,
)
try:
if current_user:
__salt__["uyuni.user_set_details"](**user_paramters)
else:
user_paramters["use_pam_auth"] = use_pam_auth
__salt__["uyuni.user_create"](**user_paramters)
self._update_user_roles(
login, current_roles, roles, org_admin_user, org_admin_password
)
self._update_user_system_groups(
login,
current_system_groups_names,
system_groups,
org_admin_user,
org_admin_password,
)
# pylint: disable-next=broad-exception-caught
except Exception as exc:
return StateResult.state_error(
login,
# pylint: disable-next=consider-using-f-string
"Error modifying user '{}': {}".format(login, exc),
)
else:
return StateResult.prepare_result(
login,
True,
# pylint: disable-next=consider-using-f-string
"{0} user successfully modified".format(login),
changes,
)
def delete(
self, login: str, org_admin_user: str = None, org_admin_password: str = None
) -> Dict[str, Any]:
"""
Remove an Uyuni user
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:param login: login of the user
:return: dict for Salt communication
"""
try:
user = __salt__["uyuni.user_get_details"](
login,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
except Exception as exc:
if exc.faultCode == NO_SUCH_USER_ERROR:
return StateResult.prepare_result(
login,
True,
# pylint: disable-next=consider-using-f-string
"{0} is already absent".format(login),
)
if exc.faultCode == AUTHENTICATION_ERROR:
return StateResult.state_error(
login,
# pylint: disable-next=consider-using-f-string
"Error deleting user (organization credentials error) '{}': {}".format(
login, exc
),
)
raise exc
else:
changes = {
"login": {"old": login},
"email": {"old": user.get("email")},
"first_name": {"old": user.get("first_name")},
"last_name": {"old": user.get("last_name")},
}
if __opts__["test"]:
return StateResult.prepare_result(
login,
None,
# pylint: disable-next=consider-using-f-string
"{0} would be deleted".format(login),
changes,
)
try:
__salt__["uyuni.user_delete"](
login,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
return StateResult.prepare_result(
login,
True,
# pylint: disable-next=consider-using-f-string
"User {} has been deleted".format(login),
changes,
)
# pylint: disable-next=broad-exception-caught
except Exception as exc:
return StateResult.state_error(
login,
# pylint: disable-next=consider-using-f-string
"Error deleting user '{}': {}".format(login, exc),
)
# pylint: disable-next=missing-class-docstring
class UyuniUserChannels:
@staticmethod
def process_changes(
current_managed_channels: Optional[List[str]],
new_managed_channels: Optional[List[str]],
current_subscribe_channels: List[str],
new_subscribe_channels: List[str],
org_admin_user: str,
org_admin_password: str,
) -> Dict[str, Dict[str, bool]]:
managed_changes: Dict[str, bool] = {}
managed_changes.update(
{
new_ma: True
for new_ma in (new_managed_channels or [])
if new_ma not in current_managed_channels
}
)
managed_changes.update(
{
old_ma: False
for old_ma in (current_managed_channels or [])
if old_ma not in new_managed_channels
}
)
subscribe_changes: Dict[str, bool] = {}
for new_channel in new_subscribe_channels or []:
if new_channel not in (
current_subscribe_channels or []
) or not managed_changes.get(new_channel, True):
subscribe_changes[new_channel] = True
for curr_channel in current_subscribe_channels or []:
if not (
curr_channel in new_subscribe_channels
or curr_channel in new_managed_channels
):
if not __salt__["uyuni.channel_software_is_globally_subscribable"](
curr_channel, org_admin_user, org_admin_password
):
subscribe_changes[curr_channel] = False
changes = {}
if managed_changes:
changes["manageable_channels"] = managed_changes
if subscribe_changes:
changes["subscribable_channels"] = subscribe_changes
return changes
# pylint: disable-next=dangerous-default-value
def manage(
self,
login: str,
password: str,
manageable_channels: Optional[List[str]] = [],
subscribable_channels: Optional[List[str]] = [],
org_admin_user: str = None,
org_admin_password: str = None,
) -> Dict[str, Any]:
"""
Modifies user-channel associations
:param login: user login ID
:param password: user password
:param manageable_channels: channels user can manage
:param subscribable_channels: channels user can subscribe
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: dict for Salt communication
"""
try:
current_roles = __salt__["uyuni.user_list_roles"](login, password=password)
current_manageable_channels = __salt__[
"uyuni.channel_list_manageable_channels"
](login, password)
current_subscribe_channels = __salt__["uyuni.channel_list_my_channels"](
login, password
)
# pylint: disable-next=broad-exception-caught
except Exception as exc:
return StateResult.state_error(
login,
# pylint: disable-next=consider-using-f-string
comment="Error retrieving information about user channels '{}': {}".format(
login, exc
),
)
if "org_admin" in current_roles or "channel_admin" in current_roles:
return StateResult.state_error(
login,
"Channels access cannot be changed, because "
"the target user can manage all channels in the organization "
'(having an "org_admin" or "channel_admin" role).',
)
current_manageable_channels_list = [
c.get("label") for c in (current_manageable_channels or [])
]
current_subscribe_channels_list = [
c.get("label") for c in (current_subscribe_channels or [])
]
changes = self.process_changes(
current_manageable_channels_list,
manageable_channels,
current_subscribe_channels_list,
subscribable_channels,
org_admin_user,
org_admin_password,
)
if not changes:
return StateResult.prepare_result(
login,
True,
# pylint: disable-next=consider-using-f-string
"{0} channels are already in the desired state".format(login),
)
if __opts__["test"]:
return StateResult.prepare_result(
login,
None,
# pylint: disable-next=consider-using-f-string
"{0} channels would be configured".format(login),
changes,
)
try:
for channel, action in changes.get("manageable_channels", {}).items():
__salt__["uyuni.channel_software_set_user_manageable"](
channel, login, action, org_admin_user, org_admin_password
)
for channel, action in changes.get("subscribable_channels", {}).items():
__salt__["uyuni.channel_software_set_user_subscribable"](
channel, login, action, org_admin_user, org_admin_password
)
# pylint: disable-next=broad-exception-caught
except Exception as exc:
return StateResult.state_error(
login,
# pylint: disable-next=consider-using-f-string
"Error changing channel assignments '{}': {}".format(login, exc),
)
return StateResult.prepare_result(
login, True, "Channel set to the desired state", changes
)
# pylint: disable-next=missing-class-docstring
class UyuniGroups:
@staticmethod
def _update_systems(
name: str,
new_systems: List[int],
current_systems: List[int],
org_admin_user: str = None,
org_admin_password: str = None,
):
remove_systems = [sys for sys in current_systems if sys not in new_systems]
if remove_systems:
__salt__["uyuni.systemgroup_add_remove_systems"](
name,
False,
remove_systems,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
add_systems = [sys for sys in new_systems if sys not in current_systems]
if add_systems:
__salt__["uyuni.systemgroup_add_remove_systems"](
name,
True,
add_systems,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
@staticmethod
def _get_systems_for_group(
target: str,
target_type: str = "glob",
org_admin_user: str = None,
org_admin_password: str = None,
):
selected_minions = __salt__["uyuni.master_select_minions"](target, target_type)
available_system_ids = __salt__["uyuni.systems_get_minion_id_map"](
org_admin_user, org_admin_password
)
return [
available_system_ids[minion_id]
for minion_id in selected_minions.get("minions", [])
if minion_id in available_system_ids
]
def manage(
self,
name: str,
description: str,
target: str,
target_type: str = "glob",
org_admin_user: str = None,
org_admin_password: str = None,
) -> Dict[str, Any]:
"""
Create or update a system group
:param name: group name
:param description: group description
:param target: target expression used to filter which minions should be part of the group
:param target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
pillar_exact, compound, compound_pillar_exact. Default: glob.
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: dict for Salt communication
"""
current_group = None
current_systems = None
try:
current_group = __salt__["uyuni.systemgroup_get_details"](
name,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
current_systems = __salt__["uyuni.systemgroup_list_systems"](
name,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
# pylint: disable-next=broad-exception-caught
except Exception as exc:
if exc.faultCode != SERVER_GROUP_NOT_FOUND_ERROR:
return StateResult.state_error(
name,
# pylint: disable-next=consider-using-f-string
"Error retrieving information about system group '{}': {}".format(
name, exc
),
)
current_systems_ids = [sys["id"] for sys in (current_systems or [])]
systems_to_group = self._get_systems_for_group(
target,
target_type,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
changes = {}
if description != (current_group or {}).get("description"):
changes["description"] = {"new": description}
if current_group:
changes["description"]["old"] = current_group["description"]
if Counter(current_systems_ids or []) != Counter(systems_to_group or []):
changes["systems"] = {"new": systems_to_group}
if current_group:
changes["systems"]["old"] = current_systems_ids
if not changes:
return StateResult.prepare_result(
name,
True,
# pylint: disable-next=consider-using-f-string
"{0} is already in the desired state".format(name),
)
if not current_group:
changes["name"] = {"new": name}
if __opts__["test"]:
return StateResult.prepare_result(
name,
None,
# pylint: disable-next=consider-using-f-string
"{0} would be updated".format(name),
changes,
)
try:
if current_group:
__salt__["uyuni.systemgroup_update"](
name,
description,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
self._update_systems(
name,
systems_to_group,
current_systems_ids,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
else:
__salt__["uyuni.systemgroup_create"](
name,
description,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
self._update_systems(
name,
systems_to_group,
current_systems_ids,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
# pylint: disable-next=broad-exception-caught
except Exception as exc:
return StateResult.state_error(
name,
# pylint: disable-next=consider-using-f-string
"Error updating group. '{}': {}".format(name, exc),
)
else:
return StateResult.prepare_result(
name,
True,
# pylint: disable-next=consider-using-f-string
"{0} successfully updated".format(name),
changes,
)
def delete(
self, name: str, org_admin_user: str = None, org_admin_password: str = None
) -> Dict[str, Any]:
"""
Remove an Uyuni system group
:param name: Group Name
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: dict for Salt communication
"""
try:
current_group = __salt__["uyuni.systemgroup_get_details"](
name,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
except Exception as exc:
if exc.faultCode == SERVER_GROUP_NOT_FOUND_ERROR:
return StateResult.prepare_result(
name,
True,
# pylint: disable-next=consider-using-f-string
"{0} is already absent".format(name),
)
if exc.faultCode == AUTHENTICATION_ERROR:
return StateResult.state_error(
name,
# pylint: disable-next=consider-using-f-string
"Error deleting group (organization admin credentials error) '{}': {}".format(
name, exc
),
)
raise exc
else:
if __opts__["test"]:
return StateResult.prepare_result(
name,
None,
# pylint: disable-next=consider-using-f-string
"{0} would be removed".format(name),
)
try:
__salt__["uyuni.systemgroup_delete"](
name,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
return StateResult.prepare_result(
name,
True,
# pylint: disable-next=consider-using-f-string
"Group {} has been deleted".format(name),
{
"name": {"old": current_group.get("name")},
"description": {"old": current_group.get("description")},
},
)
# pylint: disable-next=broad-exception-caught
except Exception as exc:
return StateResult.state_error(
name,
# pylint: disable-next=consider-using-f-string
"Error deleting group '{}': {}".format(name, exc),
)
# pylint: disable-next=missing-class-docstring
class UyuniOrgs:
@staticmethod
def _compute_changes(
user_changes: Dict[str, Any], current_user: Dict[str, Any]
) -> Dict[str, Any]:
changes = {}
for field in ["email", "first_name", "last_name"]:
if (current_user or {}).get(field) != user_changes.get(field):
changes[field] = {"new": user_changes[field]}
if current_user:
changes[field]["old"] = (current_user or {}).get(field)
return changes
def manage(
self,
name: str,
org_admin_user: str,
org_admin_password: str,
first_name: str,
last_name: str,
email: str,
pam: bool = False,
admin_user=None,
admin_password=None,
) -> Dict[str, Any]:
"""
Create or update an Uyuni organization.
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
:param name: organization name
:param org_admin_user: organization admin user
:param org_admin_password: organization admin password
:param first_name: organization admin first name
:param last_name: organization admin last name
:param email: organization admin email
:param pam: organization admin pam authentication
:param admin_user: uyuni admin user
:param admin_password: uyuni admin password
:return: dict for Salt communication
"""
current_org = None
current_org_admin = None
try:
current_org = __salt__["uyuni.org_get_details"](
name, admin_user=admin_user, admin_password=admin_password
)
current_org_admin = __salt__["uyuni.user_get_details"](
org_admin_user,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
# pylint: disable-next=broad-exception-caught
except Exception as exc:
if exc.faultCode != ORG_NOT_FOUND_ERROR:
return StateResult.state_error(
name,
# pylint: disable-next=consider-using-f-string
"Error retrieving information about organization '{}': {}".format(
name, exc
),
)
user_paramters = {
"login": org_admin_user,
"password": org_admin_password,
"email": email,
"first_name": first_name,
"last_name": last_name,
"org_admin_user": org_admin_user,
"org_admin_password": org_admin_password,
}
changes = self._compute_changes(user_paramters, current_org_admin)
if not current_org:
changes["org_name"] = {"new": name}
changes["org_admin_user"] = {"new": org_admin_user}
changes["pam"] = {"new": pam}
if not changes:
return StateResult.prepare_result(
name,
True,
# pylint: disable-next=consider-using-f-string
"{0} is already in the desired state".format(name),
)
if __opts__["test"]:
return StateResult.prepare_result(
name,
None,
# pylint: disable-next=consider-using-f-string
"{0} would be updated".format(name),
changes,
)
try:
if current_org:
__salt__["uyuni.user_set_details"](**user_paramters)
else:
__salt__["uyuni.org_create"](
name=name,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
first_name=first_name,
last_name=last_name,
email=email,
admin_user=admin_user,
admin_password=admin_password,
pam=pam,
)
# pylint: disable-next=broad-exception-caught
except Exception as exc:
return StateResult.state_error(
name,
# pylint: disable-next=consider-using-f-string
"Error updating organization '{}': {}".format(name, exc),
)
else:
return StateResult.prepare_result(
name,
True,
# pylint: disable-next=consider-using-f-string
"{0} org successfully modified".format(name),
changes,
)
def delete(self, name: str, admin_user=None, admin_password=None) -> Dict[str, Any]:
"""
Remove an Uyuni organization
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
:param name: Organization Name
:param admin_user: administrator username
:param admin_password: administrator password
:return: dict for Salt communication
"""
try:
current_org = __salt__["uyuni.org_get_details"](
name, admin_user=admin_user, admin_password=admin_password
)
except Exception as exc:
if exc.faultCode == ORG_NOT_FOUND_ERROR:
return StateResult.prepare_result(
name,
True,
# pylint: disable-next=consider-using-f-string
"{0} is already absent".format(name),
)
if exc.faultCode == AUTHENTICATION_ERROR:
return StateResult.state_error(
name,
# pylint: disable-next=consider-using-f-string
"Error deleting organization (admin credentials error) '{}': {}".format(
name, exc
),
)
raise exc
else:
if __opts__["test"]:
return StateResult.prepare_result(
name,
None,
# pylint: disable-next=consider-using-f-string
"{0} would be removed".format(name),
)
try:
__salt__["uyuni.org_delete"](
name, admin_user=admin_user, admin_password=admin_password
)
return StateResult.prepare_result(
name,
True,
# pylint: disable-next=consider-using-f-string
"Org {} has been deleted".format(name),
{"name": {"old": current_org.get("name")}},
)
# pylint: disable-next=broad-exception-caught
except Exception as exc:
return StateResult.state_error(
name,
# pylint: disable-next=consider-using-f-string
"Error deleting Org '{}': {}".format(name, exc),
)
# pylint: disable-next=missing-class-docstring
class UyuniOrgsTrust:
def trust(
self,
name: str,
org_name: str,
trusted_orgs: List[str],
admin_user: str = None,
admin_password: str = None,
) -> Dict[str, Any]:
"""
Establish trust relationships between organizations
:param name: state name
:param org_name: organization name
:param trusted_orgs: list of organization names to trust
:param admin_user: administrator username
:param admin_password: administrator password
:return: dict for Salt communication
"""
try:
current_org_trusts = __salt__["uyuni.org_trust_list_trusts"](
org_name, admin_user=admin_user, admin_password=admin_password
)
current_org = __salt__["uyuni.org_get_details"](
org_name, admin_user=admin_user, admin_password=admin_password
)
# pylint: disable-next=broad-exception-caught
except Exception as exc:
return StateResult.state_error(
name,
# pylint: disable-next=consider-using-f-string
"Error retrieving information about an organization trust'{}': {}".format(
org_name, exc
),
)
trusts_to_add = []
trusts_to_remove = []
# pylint: disable-next=redefined-outer-name
for org_trust in current_org_trusts:
if org_trust.get("orgName") in (trusted_orgs or []) and not org_trust.get(
"trustEnabled"
):
trusts_to_add.append(org_trust)
elif org_trust.get("orgName") not in (trusted_orgs or []) and org_trust.get(
"trustEnabled"
):
trusts_to_remove.append(org_trust)
if not trusts_to_add and not trusts_to_remove:
return StateResult.prepare_result(
name,
True,
# pylint: disable-next=consider-using-f-string
"{0} is already in the desired state".format(org_name),
)
if __opts__["test"]:
changes = {}
for org_add in trusts_to_add:
changes[org_add.get("orgName")] = {"old": None, "new": True}
for org_remove in trusts_to_remove:
changes[org_remove.get("orgName")] = {"old": True, "new": None}
return StateResult.prepare_result(
name,
None,
# pylint: disable-next=consider-using-f-string
"{0} would be created".format(org_name),
changes,
)
processed_changes = {}
try:
for org_add in trusts_to_add:
__salt__["uyuni.org_trust_add_trust"](
current_org.get("id"),
org_add.get("orgId"),
admin_user=admin_user,
admin_password=admin_password,
)
processed_changes[org_add.get("orgName")] = {"old": None, "new": True}
for org_remove in trusts_to_remove:
__salt__["uyuni.org_trust_remove_trust"](
current_org.get("id"),
org_remove.get("orgId"),
admin_user=admin_user,
admin_password=admin_password,
)
processed_changes[org_remove.get("orgName")] = {
"old": True,
"new": None,
}
# pylint: disable-next=broad-exception-caught
except Exception as exc:
return StateResult.prepare_result(
name,
False,
# pylint: disable-next=consider-using-f-string
"Error updating organization trusts '{}': {}".format(org_name, exc),
processed_changes,
)
return StateResult.prepare_result(
name,
True,
# pylint: disable-next=consider-using-f-string
"Org '{}' trusts successfully modified".format(org_name),
processed_changes,
)
# pylint: disable-next=missing-class-docstring
class UyuniActivationKeys:
@staticmethod
def _normalize_list_packages(list_packages: [Any]):
return [(f["name"], f.get("arch", None)) for f in (list_packages or [])]
@staticmethod
def _compute_changes(
ak_parameters: Dict[str, Any],
current_ak: Dict[str, Any],
configure_after_registration: bool,
current_configure_after_registration: bool,
current_config_channels: List[str],
configuration_channels: List[str],
) -> Dict[str, Any]:
changes = {}
for field in [
"description",
"base_channel",
"usage_limit",
"universal_default",
"contact_method",
]:
if current_ak.get(field) != ak_parameters.get(field):
changes[field] = {"new": ak_parameters[field]}
if current_ak:
changes[field]["old"] = current_ak.get(field)
# list fields
for field in ["system_types", "child_channels", "server_groups"]:
if sorted((ak_parameters or {}).get(field) or []) != sorted(
current_ak.get(field) or []
):
changes[field] = {"new": ak_parameters[field]}
if current_ak:
changes[field]["old"] = current_ak.get(field)
new_packages = UyuniActivationKeys._normalize_list_packages(
(ak_parameters or {}).get("packages", [])
)
old_packages = UyuniActivationKeys._normalize_list_packages(
(current_ak or {}).get("packages", [])
)
if sorted(new_packages) != sorted(old_packages):
changes["packages"] = {"new": ak_parameters["packages"]}
if current_ak:
changes["packages"]["old"] = current_ak.get("packages")
if configure_after_registration != current_configure_after_registration:
changes["configure_after_registration"] = {
"new": configure_after_registration
}
if current_configure_after_registration is not None:
changes["configure_after_registration"][
"old"
] = current_configure_after_registration
# we don't want to sort configuration channels since the order matters in this case
if (current_config_channels or []) != (configuration_channels or []):
changes["configuration_channels"] = {"new": configuration_channels}
if current_config_channels:
changes["configuration_channels"]["old"] = current_config_channels
return changes
@staticmethod
def _update_system_type(
current_system_types, new_system_types, key, org_admin_user, org_admin_password
):
add_system_types = [
t for t in new_system_types if t not in current_system_types
]
if add_system_types:
__salt__["uyuni.activation_key_add_entitlements"](
key,
add_system_types,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
remove_system_types = [
t for t in current_system_types if t not in new_system_types
]
if remove_system_types:
__salt__["uyuni.activation_key_remove_entitlements"](
key,
remove_system_types,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
@staticmethod
def _update_child_channels(
current_child_channels,
new_child_channels,
key,
org_admin_user,
org_admin_password,
):
add_child_channels = [
t for t in new_child_channels if t not in current_child_channels
]
if add_child_channels:
__salt__["uyuni.activation_key_add_child_channels"](
key,
add_child_channels,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
remove_child_channels = [
t for t in current_child_channels if t not in new_child_channels
]
if remove_child_channels:
__salt__["uyuni.activation_key_remove_child_channels"](
key,
remove_child_channels,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
@staticmethod
def _update_server_groups(
current_server_groups,
new_server_groups,
key,
org_admin_user,
org_admin_password,
):
add_server_groups = [
t for t in new_server_groups if t not in current_server_groups
]
if add_server_groups:
__salt__["uyuni.activation_key_add_server_groups"](
key,
add_server_groups,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
remove_server_groups = [
t for t in current_server_groups if t not in new_server_groups
]
if remove_server_groups:
__salt__["uyuni.activation_key_remove_server_groups"](
key,
remove_server_groups,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
@staticmethod
def _format_packages_data(packages):
return [{"name": f[0], **(({"arch": f[1]}) if f[1] else {})} for f in packages]
@staticmethod
def _update_packages(
current_packages, new_packages, key, org_admin_user, org_admin_password
):
new_packages_normalized = UyuniActivationKeys._normalize_list_packages(
new_packages
)
current_packages_normalized = UyuniActivationKeys._normalize_list_packages(
current_packages
)
add_packages = [
t for t in new_packages_normalized if t not in current_packages_normalized
]
if add_packages:
pass
__salt__["uyuni.activation_key_add_packages"](
key,
UyuniActivationKeys._format_packages_data(add_packages),
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
remove_packages = [
t for t in current_packages_normalized if t not in new_packages_normalized
]
if remove_packages:
pass
__salt__["uyuni.activation_key_remove_packages"](
key,
UyuniActivationKeys._format_packages_data(remove_packages),
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
# pylint: disable-next=dangerous-default-value
def manage(
self,
name: str,
description: str,
base_channel: str = "",
usage_limit: int = 0,
contact_method: str = "default",
system_types: List[str] = [],
universal_default: bool = False,
child_channels: List[str] = [],
configuration_channels: List[str] = [],
packages: List[str] = [],
server_groups: List[str] = [],
configure_after_registration: bool = False,
org_admin_user: str = None,
org_admin_password: str = None,
) -> Dict[str, Any]:
"""
Ensure an Uyuni Activation Key is present.
:param name: the Activation Key name
:param description: the Activation description
:param base_channel: base channel to be used
:param usage_limit: activation key usage limit
:param contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
:param system_types: system types to be assigned.
Can be one of: 'virtualization_host', 'container_build_host',
'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
:param universal_default: sets this activation key as organization universal default
:param child_channels: list of child channels to be assigned
:param configuration_channels: list of configuration channels to be assigned
:param packages: list of packages which will be installed
:param server_groups: list of server groups to assign the activation key with
:param configure_after_registration: deploy configuration files to systems on registration
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: dict for Salt communication
"""
current_ak = {}
key = None
current_configure_after_registration = None
system_groups_keys = {}
current_config_channels = []
output_field_names = {
"description": "description",
"base_channel_label": "base_channel",
"usage_limit": "usage_limit",
"universal_default": "universal_default",
"contact_method": "contact_method",
"entitlements": "system_types",
"child_channel_labels": "child_channels",
"server_group_ids": "server_groups",
"packages": "packages",
}
try:
all_groups = __salt__["uyuni.systemgroup_list_all_groups"](
org_admin_user, org_admin_password
)
group_id_to_name = {}
for g in all_groups or []:
system_groups_keys[g.get("name")] = g.get("id")
group_id_to_name[g.get("id")] = g.get("name")
current_org_user = __salt__["uyuni.user_get_details"](
org_admin_user, org_admin_password
)
# pylint: disable-next=consider-using-f-string
key = "{}-{}".format(current_org_user["org_id"], name)
returned_ak = __salt__["uyuni.activation_key_get_details"](
key,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
for returned_name, output_name in output_field_names.items():
current_ak[output_name] = returned_ak[returned_name]
current_ak["server_groups"] = [
group_id_to_name[s] for s in (current_ak["server_groups"] or [])
]
if current_ak.get("base_channel", None) == "none":
current_ak["base_channel"] = ""
current_configure_after_registration = __salt__[
"uyuni.activation_key_check_config_deployment"
](key, org_admin_user, org_admin_password)
config_channels_output = __salt__[
"uyuni.activation_key_list_config_channels"
](key, org_admin_user, org_admin_password)
current_config_channels = [
cc["label"] for cc in (config_channels_output or [])
]
# pylint: disable-next=broad-exception-caught
except Exception as exc:
if exc.faultCode != ACTIVATION_KEY_NOT_FOUND_ERROR:
return StateResult.state_error(
key,
# pylint: disable-next=consider-using-f-string
"Error retrieving information about Activation Key '{}': {}".format(
key, exc
),
)
ak_paramters = {
"description": description,
"base_channel": base_channel,
"usage_limit": usage_limit,
"contact_method": contact_method,
"system_types": system_types,
"universal_default": universal_default,
"child_channels": child_channels,
"server_groups": server_groups,
"packages": packages,
}
changes = self._compute_changes(
ak_paramters,
current_ak,
configure_after_registration,
current_configure_after_registration,
current_config_channels,
configuration_channels,
)
if not current_ak:
changes["key"] = {"new": key}
if not changes:
return StateResult.prepare_result(
key,
True,
# pylint: disable-next=consider-using-f-string
"{0} is already in the desired state".format(key),
)
if __opts__["test"]:
return StateResult.prepare_result(
key,
None,
# pylint: disable-next=consider-using-f-string
"{0} would be updated".format(key),
changes,
)
try:
if current_ak:
__salt__["uyuni.activation_key_set_details"](
key,
description=description,
contact_method=contact_method,
base_channel_label=base_channel,
usage_limit=usage_limit,
universal_default=universal_default,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
if changes.get("system_types", False):
self._update_system_type(
current_ak.get("system_types", []),
system_types or [],
key,
org_admin_user,
org_admin_password,
)
else:
__salt__["uyuni.activation_key_create"](
key=name,
description=description,
base_channel_label=base_channel,
usage_limit=usage_limit,
system_types=system_types,
universal_default=universal_default,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
__salt__["uyuni.activation_key_set_details"](
key,
contact_method=contact_method,
usage_limit=usage_limit,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
if changes.get("child_channels", False):
self._update_child_channels(
current_ak.get("child_channels", []),
child_channels or [],
key,
org_admin_user,
org_admin_password,
)
if changes.get("server_groups", False):
old_server_groups_id = [
system_groups_keys[s] for s in current_ak.get("server_groups", [])
]
new_server_groups_id = [
system_groups_keys[s] for s in (server_groups or [])
]
self._update_server_groups(
old_server_groups_id,
new_server_groups_id,
key,
org_admin_user,
org_admin_password,
)
if changes.get("configure_after_registration", False):
if configure_after_registration:
__salt__["uyuni.activation_key_enable_config_deployment"](
key,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
else:
if current_ak:
__salt__["uyuni.activation_key_disable_config_deployment"](
key,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
if changes.get("packages", False):
self._update_packages(
current_ak.get("packages", []),
packages or [],
key,
org_admin_user,
org_admin_password,
)
if changes.get("configuration_channels", False):
__salt__["uyuni.activation_key_set_config_channels"](
[key],
config_channel_label=configuration_channels,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
# pylint: disable-next=broad-exception-caught
except Exception as exc:
return StateResult.state_error(
key,
# pylint: disable-next=consider-using-f-string
"Error updating activation key '{}': {}".format(key, exc),
)
else:
return StateResult.prepare_result(
key,
True,
# pylint: disable-next=consider-using-f-string
"{0} activation key successfully modified".format(key),
changes,
)
def delete(
self, name: str, org_admin_user: str = None, org_admin_password: str = None
) -> Dict[str, Any]:
"""
Remove an Uyuni Activation Key.
:param name: the Activation Key Name
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: dict for Salt communication
"""
try:
current_org_user = __salt__["uyuni.user_get_details"](
org_admin_user, org_admin_password
)
# pylint: disable-next=consider-using-f-string
key = "{}-{}".format(current_org_user["org_id"], name)
# pylint: disable-next=unused-variable
ak = __salt__["uyuni.activation_key_get_details"](
key,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
except Exception as exc:
if exc.faultCode == ACTIVATION_KEY_NOT_FOUND_ERROR:
return StateResult.prepare_result(
name,
True,
# pylint: disable-next=consider-using-f-string
"{0} is already absent".format(key),
)
if exc.faultCode == AUTHENTICATION_ERROR:
return StateResult.state_error(
name,
# pylint: disable-next=consider-using-f-string
"Error deleting Activation Key (organization credentials error) '{}': {}".format(
key, exc
),
)
raise exc
else:
changes = {
"id": {"old": key},
}
if __opts__["test"]:
return StateResult.prepare_result(
name,
None,
# pylint: disable-next=consider-using-f-string
"{0} would be deleted".format(key),
changes,
)
try:
__salt__["uyuni.activation_key_delete"](
key,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
return StateResult.prepare_result(
name,
True,
# pylint: disable-next=consider-using-f-string
"Activation Key {} has been deleted".format(key),
changes,
)
# pylint: disable-next=broad-exception-caught
except Exception as exc:
return StateResult.state_error(
name,
# pylint: disable-next=consider-using-f-string
"Error deleting Activation Key '{}': {}".format(key, exc),
)
# pylint: disable-next=invalid-name
def __virtual__():
return __virtualname__
def user_present(
name,
password,
email,
first_name,
last_name,
use_pam_auth=False,
roles=None,
system_groups=None,
org_admin_user=None,
org_admin_password=None,
):
"""
Create or update an Uyuni user
:param name: user login name
:param password: desired password for the user
:param email: valid email address
:param first_name: First name
:param last_name: Last name
:param use_pam_auth: if you wish to use PAM authentication for this user
:param roles: roles to assign to user
:param system_groups: system_groups to assign to user
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: dict for Salt communication
"""
return UyuniUsers().manage(
name,
password,
email,
first_name,
last_name,
use_pam_auth,
roles,
system_groups,
org_admin_user,
org_admin_password,
)
# pylint: disable-next=dangerous-default-value
def user_channels(
name,
password,
manageable_channels=[],
subscribable_channels=[],
org_admin_user=None,
org_admin_password=None,
):
"""
Ensure a user has access to the specified channels
:param name: user login name
:param password: user password
:param manageable_channels: channels user can manage
:param subscribable_channels: channels user can subscribe
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: dict for Salt communication
"""
return UyuniUserChannels().manage(
name,
password,
manageable_channels,
subscribable_channels,
org_admin_user,
org_admin_password,
)
def user_absent(name, org_admin_user=None, org_admin_password=None):
"""
Ensure an Uyuni user is not present.
:param name: user login name
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: dict for Salt communication
"""
return UyuniUsers().delete(name, org_admin_user, org_admin_password)
def org_present(
name,
org_admin_user,
org_admin_password,
first_name,
last_name,
email,
pam=False,
admin_user=None,
admin_password=None,
):
"""
Create or update an Uyuni organization
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
:param name: organization name
:param org_admin_user: organization admin user
:param org_admin_password: organization admin password
:param first_name: organization admin first name
:param last_name: organization admin last name
:param email: organization admin email
:param pam: organization admin pam authentication
:param admin_user: uyuni admin user
:param admin_password: uyuni admin password
:return: dict for Salt communication
"""
return UyuniOrgs().manage(
name,
org_admin_user,
org_admin_password,
first_name,
last_name,
email,
pam,
admin_user,
admin_password,
)
def org_absent(name, admin_user=None, admin_password=None):
"""
Ensure an Uyuni organization is not present
Note: the configured admin user must have the SUSE Multi-Linux Manager/Uyuni Administrator role to perform this action
:param name: organization name
:param admin_user: uyuni admin user
:param admin_password: uyuni admin password
:return: dict for Salt communication
"""
return UyuniOrgs().delete(name, admin_user, admin_password)
def org_trust(name, org_name, trusts, admin_user=None, admin_password=None):
"""
Establish trust relationships between Uyuni organizations.
:param name: state name
:param org_name: Organization name
:param trusts: list of organization names to trust
:param admin_user: administrator username
:param admin_password: administrator password
:return: dict for Salt communication
"""
return UyuniOrgsTrust().trust(name, org_name, trusts, admin_user, admin_password)
def group_present(
name,
description,
target=None,
target_type="glob",
org_admin_user=None,
org_admin_password=None,
):
"""
Create or update an Uyuni system group
:param name: group name
:param description: group description
:param target: target expression used to filter which minions should be part of the group
:param target_type: target type, one of the following: glob, grain, grain_pcre, pillar, pillar_pcre,
pillar_exact, compound, compound_pillar_exact. Default: glob.
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: dict for Salt communication
"""
return UyuniGroups().manage(
name, description, target, target_type, org_admin_user, org_admin_password
)
def group_absent(name, org_admin_user=None, org_admin_password=None):
"""
Ensure an Uyuni system group is not present
:param name: Group Name
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: dict for Salt communication
"""
return UyuniGroups().delete(name, org_admin_user, org_admin_password)
def activation_key_absent(name, org_admin_user=None, org_admin_password=None):
"""
Ensure an Uyuni Activation Key is not present.
:param name: the Activation Key name
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: dict for Salt communication
"""
return UyuniActivationKeys().delete(name, org_admin_user, org_admin_password)
# pylint: disable-next=dangerous-default-value
def activation_key_present(
name,
description,
base_channel="",
usage_limit=0,
contact_method="default",
system_types=[],
universal_default=False,
child_channels=[],
configuration_channels=[],
packages=[],
server_groups=[],
configure_after_registration=False,
org_admin_user=None,
org_admin_password=None,
):
"""
Ensure an Uyuni Activation Key is present.
:param name: the Activation Key name
:param description: the Activation description
:param base_channel: base channel to be used
:param usage_limit: activation key usage limit. Default value is 0, which means unlimited usage
:param contact_method: contact method to be used. Can be one of: 'default', 'ssh-push' or 'ssh-push-tunnel'
:param system_types: system types to be assigned.
Can be one of: 'virtualization_host', 'container_build_host',
'monitoring_entitled', 'osimage_build_host', 'virtualization_host'
:param universal_default: sets this activation key as organization universal default
:param child_channels: list of child channels to be assigned
:param configuration_channels: list of configuration channels to be assigned
:param packages: list of packages which will be installed
:param server_groups: list of server groups to assign the activation key with
:param configure_after_registration: deploy configuration files to systems on registration
:param org_admin_user: organization administrator username
:param org_admin_password: organization administrator password
:return: dict for Salt communication
"""
return UyuniActivationKeys().manage(
name,
description,
base_channel=base_channel,
usage_limit=usage_limit,
contact_method=contact_method,
system_types=system_types,
universal_default=universal_default,
child_channels=child_channels,
configuration_channels=configuration_channels,
packages=packages,
server_groups=server_groups,
configure_after_registration=configure_after_registration,
org_admin_user=org_admin_user,
org_admin_password=org_admin_password,
)
070701000000D3000041FD00000000000000000000000368EFD66400000000000000000000000000000000000000000000001A00000000susemanager-sls/src/tests070701000000D4000081B400000000000000000000000168EFD664000000A8000000000000000000000000000000000000002400000000susemanager-sls/src/tests/README.md## Running tests
Run tests from _this_ directory. PyTest installed is required.
To run the tests, issue the following command:
py.test <ENTER>
That's all for now.
070701000000D5000081B400000000000000000000000168EFD66400000000000000000000000000000000000000000000002600000000susemanager-sls/src/tests/__init__.py070701000000D6000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000001F00000000susemanager-sls/src/tests/data070701000000D7000081B400000000000000000000000168EFD664000000ED000000000000000000000000000000000000003600000000susemanager-sls/src/tests/data/cpuinfo.ppc64le.sampleprocessor : 0
cpu : POWER8E (raw), altivec supported
clock : 3425.000000MHz
revision : 2.1 (pvr 004b 0201)
timebase : 512000000
platform : pSeries
model : IBM pSeries (emulated by qemu)
machine : CHRP IBM pSeries (emulated by qemu)
070701000000D8000081B400000000000000000000000168EFD66400000303000000000000000000000000000000000000003300000000susemanager-sls/src/tests/data/cpuinfo.s390.samplevendor_id : IBM/S390
# processors : 1
bogomips per cpu: 2913.00
features : esan3 zarch stfle msa ldisp eimm dfp etf3eh highgprs
cache0 : level=1 type=Data scope=Private size=96K line_size=256 associativity=6
cache1 : level=1 type=Instruction scope=Private size=64K line_size=256 associativity=4
cache2 : level=2 type=Data scope=Private size=1024K line_size=256 associativity=8
cache3 : level=2 type=Instruction scope=Private size=1024K line_size=256 associativity=8
cache4 : level=3 type=Unified scope=Shared size=49152K line_size=256 associativity=12
cache5 : level=4 type=Unified scope=Shared size=393216K line_size=256 associativity=24
processor 0: version = FF, identification = 0F9A27, machine = 2827
070701000000D9000081B400000000000000000000000168EFD664000010C4000000000000000000000000000000000000002E00000000susemanager-sls/src/tests/data/cpuinfo.sampleprocessor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 61
model name : Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
stepping : 4
microcode : 0x22
cpu MHz : 1314.117
cache size : 4096 KB
physical id : 0
siblings : 4
core id : 0
cpu cores : 2
apicid : 0
initial apicid : 0
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt dtherm ida arat pln pts
bugs :
bogomips : 5187.99
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 1
vendor_id : GenuineIntel
cpu family : 6
model : 61
model name : Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
stepping : 4
microcode : 0x22
cpu MHz : 2100.109
cache size : 4096 KB
physical id : 0
siblings : 4
core id : 0
cpu cores : 2
apicid : 1
initial apicid : 1
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt dtherm ida arat pln pts
bugs :
bogomips : 5187.99
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 2
vendor_id : GenuineIntel
cpu family : 6
model : 61
model name : Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
stepping : 4
microcode : 0x22
cpu MHz : 1718.742
cache size : 4096 KB
physical id : 0
siblings : 4
core id : 1
cpu cores : 2
apicid : 2
initial apicid : 2
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt dtherm ida arat pln pts
bugs :
bogomips : 5187.99
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 3
vendor_id : GenuineIntel
cpu family : 6
model : 61
model name : Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
stepping : 4
microcode : 0x22
cpu MHz : 2108.335
cache size : 4096 KB
physical id : 0
siblings : 4
core id : 1
cpu cores : 2
apicid : 3
initial apicid : 3
fpu : yes
fpu_exception : yes
cpuid level : 20
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt dtherm ida arat pln pts
bugs :
bogomips : 5187.99
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
070701000000DA000081B400000000000000000000000168EFD664000006CF000000000000000000000000000000000000003000000000susemanager-sls/src/tests/data/dmidecode.sample# dmidecode 3.0
Getting SMBIOS data from sysfs.
SMBIOS 2.7 present.
Handle 0x0004, DMI type 4, 42 bytes
Processor Information
Socket Designation: U3E1
Type: Central Processor
Family: Core i7
Manufacturer: Intel(R) Corporation
ID: D4 06 03 00 FF FB EB BF
Signature: Type 0, Family 6, Model 61, Stepping 4
Flags:
FPU (Floating-point unit on-chip)
VME (Virtual mode extension)
DE (Debugging extension)
PSE (Page size extension)
TSC (Time stamp counter)
MSR (Model specific registers)
PAE (Physical address extension)
MCE (Machine check exception)
CX8 (CMPXCHG8 instruction supported)
APIC (On-chip APIC hardware supported)
SEP (Fast system call)
MTRR (Memory type range registers)
PGE (Page global enable)
MCA (Machine check architecture)
CMOV (Conditional move instruction supported)
PAT (Page attribute table)
PSE-36 (36-bit page size extension)
CLFSH (CLFLUSH instruction supported)
DS (Debug store)
ACPI (ACPI supported)
MMX (MMX technology supported)
FXSR (FXSAVE and FXSTOR instructions supported)
SSE (Streaming SIMD extensions)
SSE2 (Streaming SIMD extensions 2)
SS (Self-snoop)
HTT (Multi-threading)
TM (Thermal monitor supported)
PBE (Pending break enabled)
Version: Intel(R) Core(TM) i7-5600U CPU @ 2.60GHz
Voltage: 1.1 V
External Clock: 100 MHz
Max Speed: 3600 MHz
Current Speed: 2600 MHz
Status: Populated, Enabled
Upgrade: Socket BGA1168
L1 Cache Handle: 0x0005
L2 Cache Handle: 0x0006
L3 Cache Handle: 0x0007
Serial Number: None
Asset Tag: None
Part Number: None
Core Count: 2
Core Enabled: 2
Thread Count: 4
Characteristics:
64-bit capable
Multi-Core
Hardware Thread
Execute Protection
Enhanced Virtualization
Power/Performance Control
070701000000DB000081B400000000000000000000000168EFD664000000D7000000000000000000000000000000000000003500000000susemanager-sls/src/tests/data/livepatching-1.samplekgraft_patch_1_2_2
active: 1
RPM: kgraft-patch-3_12_62-60_64_8-default-1-2.2.x86_64
CVE: (none - this is an initial kGraft patch)
bug fixes and enhancements: (none)
kgraft_patch_2_2_1
active: 0
070701000000DC000081B400000000000000000000000168EFD664000000CA000000000000000000000000000000000000003500000000susemanager-sls/src/tests/data/livepatching-2.samplekgraft_patch_1_2_2
active: 0
kgraft_patch_2_2_1
active: 1
RPM: kgraft-patch-3_12_62-60_64_8-default-2-2.1.x86_64
CVE: CVE-2016-8666 CVE-2016-6480
bug fixes and enhancements: (none)
070701000000DD000081B400000000000000000000000168EFD664000000CD000000000000000000000000000000000000003600000000susemanager-sls/src/tests/data/lscpu-json.aarch64.out{
"cpu_model": "Cortex-A72",
"cpu_vendor": "ARM",
"cpu_numanodes": "4",
"cpu_sockets": "16",
"cpu_stepping": "r0p2",
"cpu_sum": "64",
"cpu_threads": "1",
"cpu_cores": "4"
}
070701000000DE000081B400000000000000000000000168EFD66400000206000000000000000000000000000000000000003900000000susemanager-sls/src/tests/data/lscpu-json.aarch64.sampleArchitecture: aarch64
Byte Order: Little Endian
CPU(s): 64
On-line CPU(s) list: 0-63
Thread(s) per core: 1
Core(s) per socket: 4
Socket(s): 16
NUMA node(s): 4
Vendor ID: ARM
Model: 2
Model name: Cortex-A72
Stepping: r0p2
BogoMIPS: 100.00
NUMA node0 CPU(s): 0-15
NUMA node1 CPU(s): 16-31
NUMA node2 CPU(s): 32-47
NUMA node3 CPU(s): 48-63
Flags: fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid
070701000000DF000081B400000000000000000000000168EFD664000000B3000000000000000000000000000000000000003400000000susemanager-sls/src/tests/data/lscpu-json.ppc64.out{
"cpu_model": "POWER9 (architected), altivec supported",
"cpu_numanodes": "1",
"cpu_cores": "1",
"cpu_sockets": "1",
"cpu_sum": "8",
"cpu_threads": "8"
}
070701000000E0000081B400000000000000000000000168EFD664000004F8000000000000000000000000000000000000003700000000susemanager-sls/src/tests/data/lscpu-json.ppc64.sampleArchitecture: ppc64le
Byte Order: Little Endian
CPU(s): 8
On-line CPU(s) list: 0-7
Model name: POWER9 (architected), altivec supported
Model: 2.2 (pvr 004e 0202)
Thread(s) per core: 8
Core(s) per socket: 1
Socket(s): 1
Physical sockets: 2
Physical chips: 1
Physical cores/chip: 4
Virtualization features:
Hypervisor vendor: pHyp
Virtualization type: para
Caches (sum of all):
L1d: 64 KiB (2 instances)
L1i: 64 KiB (2 instances)
NUMA:
NUMA node(s): 1
NUMA node1 CPU(s): 0-7
Vulnerabilities:
Itlb multihit: Not affected
L1tf: Mitigation; RFI Flush, L1D private per thread
Mds: Not affected
Meltdown: Mitigation; RFI Flush, L1D private per thread
Spec store bypass: Mitigation; Kernel entry/exit barrier (eieio)
Spectre v1: Mitigation; __user pointer sanitization, ori31 speculation barrier enabled
Spectre v2: Mitigation; Indirect branch cache disabled, Software link stack flush
Srbds: Not affected
Tsx async abort: Not affected
070701000000E1000081B400000000000000000000000168EFD6640000007D000000000000000000000000000000000000003300000000susemanager-sls/src/tests/data/lscpu-json.s390.out{
"cpu_vendor": "IBM/S390",
"cpu_numanodes": "1",
"cpu_cores": "1",
"cpu_sum": "2",
"cpu_threads": "1"
}
070701000000E2000081B400000000000000000000000168EFD6640000064C000000000000000000000000000000000000003600000000susemanager-sls/src/tests/data/lscpu-json.s390.sampleArchitecture: s390x
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Big Endian
CPU(s): 2
On-line CPU(s) list: 0,1
Thread(s) per core: 1
Core(s) per socket: 1
Socket(s) per book: 1
Book(s) per drawer: 1
Drawer(s): 2
NUMA node(s): 1
Vendor ID: IBM/S390
Machine type: 2964
CPU dynamic MHz: 5000
CPU static MHz: 5000
BogoMIPS: 3033.00
Hypervisor: z/VM 6.4.0
Hypervisor vendor: IBM
Virtualization type: full
Dispatching mode: horizontal
L1d cache: 256 KiB
L1i cache: 192 KiB
L2d cache: 4 MiB
L2i cache: 4 MiB
L3 cache: 64 MiB
L4 cache: 480 MiB
NUMA node0 CPU(s): 0,1
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Spec store bypass: Not affected
Vulnerability Spectre v1: Mitigation; __user pointer sanitization
Vulnerability Spectre v2: Mitigation; execute trampolines
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Flags: esan3 zarch stfle msa ldisp eimm dfp edat etf3eh highgprs te vx sie
070701000000E3000081B400000000000000000000000168EFD664000000F0000000000000000000000000000000000000003500000000susemanager-sls/src/tests/data/lscpu-json.x86_64.out{
"cpu_model": "Intel(R) Core(TM) i7-6820HQ CPU @ 2.70GHz",
"cpu_vendor": "GenuineIntel",
"cpu_numanodes": "1",
"cpu_sockets": "1",
"cpu_stepping": "3",
"cpu_sum": "8",
"cpu_threads": "2",
"cpu_cores": "4"
}
070701000000E4000081B400000000000000000000000168EFD664000009B3000000000000000000000000000000000000003800000000susemanager-sls/src/tests/data/lscpu-json.x86_64.sampleArchitecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 39 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 8
On-line CPU(s) list: 0-7
Vendor ID: GenuineIntel
Model name: Intel(R) Core(TM) i7-6820HQ CPU @ 2.70GHz
CPU family: 6
Model: 94
Thread(s) per core: 2
Core(s) per socket: 4
Socket(s): 1
Stepping: 3
CPU max MHz: 3600.0000
CPU min MHz: 800.0000
BogoMIPS: 5399.81
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bt
s rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadli
ne_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1
avx2 smep bmi2 erms invpcid mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
Virtualization features:
Virtualization: VT-x
Caches (sum of all):
L1d: 128 KiB (4 instances)
L1i: 128 KiB (4 instances)
L2: 1 MiB (4 instances)
L3: 8 MiB (1 instance)
NUMA:
NUMA node(s): 1
NUMA node0 CPU(s): 0-7
Vulnerabilities:
Itlb multihit: KVM: Mitigation: Split huge pages
L1tf: Mitigation; PTE Inversion; VMX conditional cache flushes, SMT vulnerable
Mds: Mitigation; Clear CPU buffers; SMT vulnerable
Meltdown: Mitigation; PTI
Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Spectre v2: Mitigation; Full generic retpoline, IBPB conditional, IBRS_FW, STIBP conditional, RSB filling
Srbds: Mitigation; Microcode
Tsx async abort: Mitigation; TSX disabled
070701000000E5000081B400000000000000000000000168EFD664000000C7000000000000000000000000000000000000003400000000susemanager-sls/src/tests/data/lscpu.ppc64le.sample# The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i
0,0,0,0,,0,0
070701000000E6000081B400000000000000000000000168EFD664000000D1000000000000000000000000000000000000003100000000susemanager-sls/src/tests/data/lscpu.s390.sample# The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i,L2d,L2i
0,0,0,,,0,0,0,0
070701000000E7000081B400000000000000000000000168EFD66400000103000000000000000000000000000000000000003300000000susemanager-sls/src/tests/data/lscpu.x86_64.sample# The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node,,L1d,L1i,L2,L3
0,0,0,0,,0,0,0,0
1,0,0,0,,0,0,0,0
2,1,0,0,,1,1,1,0
3,1,0,0,,1,1,1,0
070701000000E8000081B400000000000000000000000168EFD66400000256000000000000000000000000000000000000002E00000000susemanager-sls/src/tests/data/rhnconf.samplemount_point = /var/spacewalk
kickstart_mount_point = /var/spacewalk
repomd_cache_mount_point = /var/cache
# db settings
db_backend = postgresql
db_user = spacewalk
db_password = spacewalk
db_name = susemanager
db_host = localhost
db_port = 5432
db_ssl_enabled =
# reportdb settings
report_db_backend = postgresql
report_db_user = pythia_susemanager
report_db_password = secret
report_db_name = reportdb
report_db_host = localhost
report_db_port = 5432
report_db_ssl_enabled = 1
report_db_sslrootcert = /etc/pki/trust/anchors/RHN-ORG-TRUSTED-SSL-CERT
web.satellite = 1
web.satellite_install =
070701000000E9000081B400000000000000000000000168EFD66400000226000000000000000000000000000000000000002F00000000susemanager-sls/src/tests/data/rhnconf2.samplemount_point = /var/spacewalk
kickstart_mount_point = /var/spacewalk
repomd_cache_mount_point = /var/cache
# db settings
db_backend = postgresql
db_user = spacewalk
db_password = spacewalk
db_name = susemanager
db_host = localhost
db_port = 5432
db_ssl_enabled =
# reportdb settings
report_db_backend = postgresql
report_db_user =
report_db_password =
report_db_name =
report_db_host =
report_db_port =
report_db_ssl_enabled = 1
report_db_sslrootcert = /etc/pki/trust/anchors/RHN-ORG-TRUSTED-SSL-CERT
web.satellite = 1
web.satellite_install =
070701000000EA000081B400000000000000000000000168EFD66400000015000000000000000000000000000000000000003100000000susemanager-sls/src/tests/data/rhnconfdef.sampleproduct_name = Uyuni
070701000000EB000081B400000000000000000000000168EFD664000004D9000000000000000000000000000000000000002B00000000susemanager-sls/src/tests/data/udev.sampleP: /devices/LNXSYSTM:00/LNXPWRBN:00
E: DEVPATH=/devices/LNXSYSTM:00/LNXPWRBN:00
E: DRIVER=button
E: MODALIAS=acpi:LNXPWRBN:
E: SUBSYSTEM=acpi
P: /devices/LNXSYSTM:00/LNXPWRBN:00/input/input2
E: DEVPATH=/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2
E: EV=3
E: ID_FOR_SEAT=input-acpi-LNXPWRBN_00
E: ID_INPUT=1
E: ID_INPUT_KEY=1
E: ID_PATH=acpi-LNXPWRBN:00
E: ID_PATH_TAG=acpi-LNXPWRBN_00
E: KEY=10000000000000 0
E: MODALIAS=input:b0019v0000p0001e0000-e0,1,k74,ramlsfw
E: NAME="Power Button"
E: PHYS="LNXPWRBN/button/input0"
E: PRODUCT=19/0/1/0
E: PROP=0
E: SUBSYSTEM=input
E: TAGS=:seat:
E: USEC_INITIALIZED=2010022
P: /devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2
N: input/event2
E: BACKSPACE=guess
E: DEVNAME=/dev/input/event2
E: DEVPATH=/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2
E: ID_INPUT=1
E: ID_INPUT_KEY=1
E: ID_PATH=acpi-LNXPWRBN:00
E: ID_PATH_TAG=acpi-LNXPWRBN_00
E: MAJOR=13
E: MINOR=66
E: SUBSYSTEM=input
E: TAGS=:power-switch:
E: USEC_INITIALIZED=2076101
E: XKBLAYOUT=us
E: XKBMODEL=pc105
P: /devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0
E: DEVPATH=/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0
E: DEVTYPE=scsi_device
E: DRIVER=sd
E: MODALIAS=scsi:t-0x00
E: SUBSYSTEM=scsi
070701000000EC000081B400000000000000000000000168EFD6640000053C000000000000000000000000000000000000002500000000susemanager-sls/src/tests/mockery.py# pylint: disable=missing-module-docstring
import sys
import os
try:
# pylint: disable-next=unused-import
from cStringIO import StringIO
except ImportError:
from io import StringIO
from unittest.mock import MagicMock
def setup_environment():
"""
Mock the environment.
:return:
"""
if "salt" not in sys.modules or not isinstance(sys.modules["salt"], MagicMock):
sys.modules["salt"] = MagicMock()
sys.modules["salt.cache"] = MagicMock()
sys.modules["salt.config"] = MagicMock()
sys.modules["salt.utils"] = MagicMock()
sys.modules["salt.utils.versions"] = MagicMock()
sys.modules["salt.utils.odict"] = MagicMock()
sys.modules["salt.utils.minions"] = MagicMock()
sys.modules["salt.utils.network"] = MagicMock()
sys.modules["salt.modules"] = MagicMock()
sys.modules["salt.modules.cmdmod"] = MagicMock()
sys.modules["salt.modules.virt"] = MagicMock()
sys.modules["salt.states"] = MagicMock()
sys.modules["salt.exceptions"] = MagicMock(CommandExecutionError=Exception)
def get_test_data(filename):
"""
Get a test data.
:param filename:
:return:
"""
# pylint: disable-next=unspecified-encoding
return open(os.path.sep.join([os.path.abspath(""), "data", filename]), "r").read()
070701000000ED000081B400000000000000000000000168EFD66400000BEA000000000000000000000000000000000000003000000000susemanager-sls/src/tests/test_beacon_pkgset.py"""
Author: Bo Maryniuk <bo@suse.de>
"""
from unittest.mock import MagicMock, patch
from . import mockery
mockery.setup_environment()
with patch(
"salt.config.minion_config", return_value={"cachedir": "/var/cache/salt/minion"}
):
from ..beacons import pkgset
pkgset.__context__ = dict()
@patch.object(pkgset.os.path, "exists", MagicMock(return_value=True))
@patch.object(pkgset, "__context__", {pkgset.__virtualname__: ""})
@patch.object(pkgset, "CACHE", MagicMock())
def test_beacon():
"""
Test beacon functionality.
"""
mock_content = MagicMock(
**{
"return_value.__enter__.return_value.read.return_value.strip.return_value": "test"
}
)
# pylint: disable-next=pointless-string-statement
"""
The __context__ has no pkgset data, the cache contains the same data as in the cookie.
"""
with patch.object(pkgset, "open", mock_content), patch.object(
pkgset, "__context__", {}
) as mock_context, patch.object(
pkgset.CACHE, "fetch", return_value={}
), patch.object(
pkgset.CACHE, "store"
) as mock_cache_store:
data = pkgset.beacon({})
assert mock_context["pkgset"] == "test"
# pylint: disable-next=use-implicit-booleaness-not-comparison
assert data == []
mock_cache_store.assert_called_once()
# pylint: disable-next=pointless-string-statement
"""
The __context__ has no pkgset data, the cache contains the different data than the cookie.
"""
with patch.object(pkgset, "open", mock_content), patch.object(
pkgset, "__context__", {}
) as mock_context, patch.object(
pkgset.CACHE, "fetch", return_value={"data": "other"}
), patch.object(
pkgset.CACHE, "store"
) as mock_cache_store:
data = pkgset.beacon({})
assert mock_context["pkgset"] == "test"
assert data == [{"tag": "changed"}]
mock_cache_store.assert_called_once()
# pylint: disable-next=pointless-string-statement
"""
The __context__ has pkgset data, but the data is different than the cookie.
"""
with patch.object(pkgset, "open", mock_content), patch.object(
pkgset, "__context__", {"pkgset": "other"}
) as mock_context, patch.object(pkgset.CACHE, "store") as mock_cache_store:
data = pkgset.beacon({})
assert mock_context["pkgset"] == "test"
assert data == [{"tag": "changed"}]
mock_cache_store.assert_called_once()
# pylint: disable-next=pointless-string-statement
"""
The __context__ has pkgset data, the data is the same as the cookie.
"""
with patch.object(pkgset, "open", mock_content), patch.object(
pkgset, "__context__", {"pkgset": "test"}
) as mock_context, patch.object(pkgset.CACHE, "store") as mock_cache_store:
data = pkgset.beacon({})
assert mock_context["pkgset"] == "test"
# pylint: disable-next=use-implicit-booleaness-not-comparison
assert data == []
mock_cache_store.assert_not_called()
070701000000EE000081B400000000000000000000000168EFD6640000067E000000000000000000000000000000000000003500000000susemanager-sls/src/tests/test_beacon_reboot_info.py# pylint: disable=missing-module-docstring
from ..beacons import reboot_info
import pytest
def _reboot_not_required():
return {"reboot_required": False}
def _reboot_required():
return {"reboot_required": True}
context_reboot_required = {"reboot_needed": True}
context_reboot_not_required = {"reboot_needed": False}
@pytest.mark.parametrize(
"context, module_fn, fire_event",
[
(
# The __context__ is empty and reboot is not required, don't fire event.
{},
_reboot_not_required,
False,
),
(
# The __context__ is empty and reboot is required, fire event.
{},
_reboot_required,
True,
),
(
# The __context__ already register that reboot is required and it keeps, don't fire again.
context_reboot_required,
_reboot_required,
False,
),
(
# The __context__ register that reboot is required and it changes, don't fire event.
context_reboot_required,
_reboot_not_required,
False,
),
(
# The __context__ register that reboot isn't required but it changed, fire event.
context_reboot_not_required,
_reboot_required,
True,
),
],
)
def test_beacon(context, module_fn, fire_event):
reboot_info.__context__ = context
reboot_info.__salt__ = {"reboot_info.reboot_required": module_fn}
ret = reboot_info.beacon({})
expected_result = [{"reboot_needed": True}] if fire_event else []
assert ret == expected_result
070701000000EF000081B400000000000000000000000168EFD66400001FAB000000000000000000000000000000000000003100000000susemanager-sls/src/tests/test_grains_cpuinfo.py"""
Author: bo@suse.de
"""
import json
import pytest
from unittest.mock import MagicMock, patch, mock_open
from . import mockery
mockery.setup_environment()
# pylint: disable-next=wrong-import-position
from ..grains import cpuinfo
def test_total_num_cpus():
"""
Test total_num_cpus function.
:return:
"""
os_listdir = [
"cpu0",
"cpu1",
"cpu2",
"cpu3",
"cpufreq",
"cpuidle",
"power",
"modalias",
"kernel_max",
"possible",
"online",
"offline",
"isolated",
"uevent",
"intel_pstate",
"microcode",
"present",
]
with patch("os.path.exists", MagicMock(return_value=True)):
with patch("os.listdir", MagicMock(return_value=os_listdir)):
cpus = cpuinfo.total_num_cpus()
# pylint: disable-next=unidiomatic-typecheck
assert type(cpus) == dict
assert "total_num_cpus" in cpus
assert cpus["total_num_cpus"] == 4
def test_cpusockets_dmidecode_count_sockets():
"""
Test _dmidecode_count_sockets sub in cpusockets function.
:return:
"""
sample = mockery.get_test_data("dmidecode.sample")
cpuinfo.log = MagicMock()
with patch.dict(
cpuinfo.__salt__,
{"cmd.run_all": MagicMock(return_value={"retcode": 0, "stdout": sample})},
):
# pylint: disable-next=protected-access
out = cpuinfo._dmidecode_count_sockets([])
# pylint: disable-next=unidiomatic-typecheck
assert type(out) == dict
assert "cpusockets" in out
assert out["cpusockets"] == 1
def test_cpusockets_cpuinfo_count_sockets():
"""
Test _cpuinfo_count_sockets sub in cpusockets function.
:return:
"""
cpuinfo.log = MagicMock()
# cpuinfo parser is not applicable for non-Intel architectures, so should return nothing.
for sample_name in ["cpuinfo.s390.sample", "cpuinfo.ppc64le.sample"]:
with patch("os.access", MagicMock(return_value=True)):
with patch.object(
cpuinfo,
"open",
mock_open(read_data=mockery.get_test_data(sample_name)),
create=True,
):
# pylint: disable-next=protected-access
assert cpuinfo._cpuinfo_count_sockets([]) is None
with patch("os.access", MagicMock(return_value=True)):
with patch.object(
cpuinfo,
"open",
mock_open(read_data=mockery.get_test_data("cpuinfo.sample")),
create=True,
):
# pylint: disable-next=protected-access
out = cpuinfo._cpuinfo_count_sockets([])
# pylint: disable-next=unidiomatic-typecheck
assert type(out) == dict
assert "cpusockets" in out
assert out["cpusockets"] == 1
@pytest.mark.parametrize("arch", ["ppc64le", "s390", "x86_64"])
def test_cpusockets_lscpu_count_sockets(arch):
"""
Test _lscpu_count_sockets sub in cpusockets function.
:return:
"""
# pylint: disable-next=consider-using-f-string
fn_smpl = "lscpu.{}.sample".format(arch)
cpuinfo.log = MagicMock()
with patch.dict(
cpuinfo.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"retcode": 0, "stdout": mockery.get_test_data(fn_smpl)}
)
},
):
# pylint: disable-next=protected-access
out = cpuinfo._lscpu_count_sockets([])
# pylint: disable-next=unidiomatic-typecheck
assert type(out) == dict
assert "cpusockets" in out
assert out["cpusockets"] == 1
@pytest.mark.parametrize("arch", ["x86_64", "aarch64", "s390", "ppc64"])
def test_cpusockets_cpu_data(arch):
"""
Test lscpu -J data extraction function.
:return:
"""
cpuinfo.log = MagicMock()
# pylint: disable-next=consider-using-f-string
sample_data = mockery.get_test_data("lscpu-json.{}.sample".format(arch))
with patch.dict(
cpuinfo.__salt__,
{"cmd.run_all": MagicMock(return_value={"retcode": 0, "stdout": sample_data})},
):
out = cpuinfo.cpu_data()
# pylint: disable-next=unidiomatic-typecheck
assert type(out) == dict
# pylint: disable-next=consider-using-f-string
expected = json.loads(mockery.get_test_data("lscpu-json.{}.out".format(arch)))
assert out == expected
def test_arch_specs_unknown():
with patch("src.grains.cpuinfo._get_architecture", return_value="unknown"), patch(
"src.grains.cpuinfo._add_ppc64_extras"
) as mock_ppc64, patch("src.grains.cpuinfo._add_arm64_extras") as mock_arm64, patch(
"src.grains.cpuinfo._add_z_systems_extras"
) as mock_z:
specs = cpuinfo.arch_specs()
assert mock_ppc64.call_count == 0
assert mock_arm64.call_count == 0
assert mock_z.call_count == 0
assert specs == {"cpu_arch_specs": {}}
def test_arch_specs_ppc64():
# pylint: disable-next=protected-access
cpuinfo._get_architecture = MagicMock(return_value="ppc64")
# pylint: disable-next=protected-access
cpuinfo._read_file = MagicMock(
side_effect=lambda path: (
"shared_processor_mode = 1"
if path == "/proc/ppc64/lparcfg"
else "device tree content"
)
)
specs = cpuinfo.arch_specs()
assert specs == {
"cpu_arch_specs": {"lpar_mode": "shared", "device_tree": "device tree content"}
}
def test_arch_specs_arm64():
# pylint: disable-next=protected-access
cpuinfo._get_architecture = MagicMock(return_value="arm64")
# pylint: disable-next=protected-access
cpuinfo._read_file = MagicMock(return_value="")
with patch.dict(cpuinfo.__salt__, {"cmd.run_all": MagicMock()}), patch.object(
cpuinfo, "_which_bin"
) as mock_which_bin:
mock_which_bin.return_value = "/usr/bin/dmidecode"
dmi_output = "Family: test_family\nManufacturer: test_manufacturer\nSignature: test_signature"
cpuinfo.__salt__["cmd.run_all"].return_value = {
"retcode": 0,
"stdout": dmi_output,
}
specs = cpuinfo.arch_specs()
assert specs == {
"cpu_arch_specs": {
"family": "test_family",
"manufacturer": "test_manufacturer",
"signature": "test_signature",
}
}
@pytest.mark.parametrize(
"output, expected_specs",
[
(
"VM00 Type: test_type\nType Name: test_model\nVM00 Name: test_layer\nSockets: test_sockets",
{
"type": "test_type",
"type_name": "test_model",
"layer_type": "test_layer",
},
),
(
"LPAR Type: lpar_type\nType Name: lpar_model\nLPAR Name: lpar_layer\nSockets: lpar_sockets",
{
"type": "lpar_type",
"type_name": "lpar_model",
"layer_type": "lpar_layer",
},
),
],
)
def test_add_z_systems_extras(output, expected_specs):
specs = {}
with patch.dict(cpuinfo.__salt__, {"cmd.run_all": MagicMock()}), patch.object(
cpuinfo, "_which_bin"
) as mock_which_bin:
mock_which_bin.return_value = "/usr/bin/read_values"
cpuinfo.__salt__["cmd.run_all"].return_value = {
"retcode": 0,
"stdout": output,
}
# pylint: disable-next=protected-access
cpuinfo._add_z_systems_extras(specs)
assert specs == expected_specs
def test_exact_string_match():
text = "Family: test_family\nManufacturer: test_manufacturer\nSignature: test_signature"
# pylint: disable-next=protected-access
result = cpuinfo._exact_string_match("Family", text)
assert result == "test_family"
def test_read_file_failure():
# pylint: disable-next=protected-access
cpuinfo._read_file = MagicMock(return_value="")
# pylint: disable-next=protected-access
result = cpuinfo._read_file("/path/to/nonexistent/file")
assert result == ""
070701000000F0000081B400000000000000000000000168EFD664000007CC000000000000000000000000000000000000003400000000susemanager-sls/src/tests/test_grains_mgr_server.py"""
Author: mc@suse.com
"""
# pylint: disable-next=unused-import
import sys
import os
# pylint: disable-next=unused-import
import json
# pylint: disable-next=unused-import
import pytest
# pylint: disable-next=unused-import
from unittest.mock import MagicMock, patch, mock_open
from . import mockery
mockery.setup_environment()
# pylint: disable-next=wrong-import-position
from ..grains import mgr_server
def test_server():
mgr_server.RHNCONF = os.path.join(os.path.abspath(""), "data", "rhnconf.sample")
mgr_server.RHNCONFDEF = os.path.join(
os.path.abspath(""), "data", "rhnconfdef.sample"
)
grains = mgr_server.server_grains()
# pylint: disable-next=unidiomatic-typecheck
assert type(grains) == dict
assert "is_mgr_server" in grains
assert "has_report_db" in grains
assert "is_uyuni" in grains
assert grains["is_mgr_server"]
assert grains["has_report_db"]
assert grains["report_db_name"] == "reportdb"
assert grains["report_db_host"] == "localhost"
assert grains["report_db_port"] == "5432"
assert grains["is_uyuni"]
def test_server_no_reportdb():
mgr_server.RHNCONF = os.path.join(os.path.abspath(""), "data", "rhnconf2.sample")
mgr_server.RHNCONFDEF = os.path.join(
os.path.abspath(""), "data", "rhnconfdef.sample"
)
grains = mgr_server.server_grains()
# pylint: disable-next=unidiomatic-typecheck
assert type(grains) == dict
assert "is_mgr_server" in grains
assert "has_report_db" in grains
assert "is_uyuni" in grains
assert grains["is_mgr_server"]
assert not grains["has_report_db"]
assert grains["is_uyuni"]
def test_no_server():
mgr_server.RHNCONF = "/etc/rhn/rhn.conf"
grains = mgr_server.server_grains()
# pylint: disable-next=unidiomatic-typecheck
assert type(grains) == dict
assert "is_mgr_server" in grains
assert "has_report_db" not in grains
assert "is_uyuni" not in grains
assert not grains["is_mgr_server"]
070701000000F1000081B400000000000000000000000168EFD66400000730000000000000000000000000000000000000003200000000susemanager-sls/src/tests/test_mgr_master_tops.py# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2025 SUSE LLC
#
# SPDX-License-Identifier: Apache-2.0
"""
:codeauthor: Pablo Suárez Hernández <psuarezhernandez@suse.de>
"""
# pylint: disable-next=unused-import
from unittest.mock import MagicMock, patch
from . import mockery
mockery.setup_environment()
# pylint: disable-next=wrong-import-position
import sys
sys.path.append("../../modules/tops")
# pylint: disable-next=wrong-import-position
import mgr_master_tops
TEST_MANAGER_STATIC_TOP = {
"base": [
"channels",
"certs",
"packages",
"custom",
"custom_groups",
"custom_org",
"formulas",
"services.salt-minion",
"services.docker",
"services.kiwi-image-server",
"ansible",
"switch_to_bundle.mgr_switch_to_venv_minion",
]
}
def test_virtual():
"""
Test virtual returns the module name
"""
assert mgr_master_tops.__virtual__() == "mgr_master_tops"
def test_top_default_saltenv():
"""
Test if top function is returning the static Uyuni top state
for base environment when no environment has been specified.
"""
kwargs = {"opts": {"environment": None}}
assert mgr_master_tops.top(**kwargs) == TEST_MANAGER_STATIC_TOP
def test_top_base_saltenv():
"""
Test if top function is returning the static Uyuni top state
for base environment when environment is set to "base".
"""
kwargs = {"opts": {"environment": "base"}}
assert mgr_master_tops.top(**kwargs) == TEST_MANAGER_STATIC_TOP
def test_top_unknown_saltenv():
"""
Test if top function is returning None for unknown salt environments.
"""
kwargs = {"opts": {"environment": "otherenv"}}
# pylint: disable-next=singleton-comparison
assert mgr_master_tops.top(**kwargs) == None
070701000000F2000081B400000000000000000000000168EFD66400001649000000000000000000000000000000000000003400000000susemanager-sls/src/tests/test_module_appstreams.py# pylint: disable=missing-module-docstring
import pytest
from unittest.mock import patch
from ..modules.appstreams import (
_parse_nsvca,
_get_module_info,
_get_enabled_module_names,
)
from collections import namedtuple
MockDNFCommandResult = namedtuple("MockObject", ["returncode", "stdout"])
@pytest.mark.parametrize(
"module_info_output, expected_result",
[
(
[
"Name : maven",
"Stream : 3.8 [e] [a]",
"Version : 9020020230511160017",
"Context : 4b0b4b45",
"Architecture : x86_64",
],
{
"name": "maven",
"stream": "3.8",
"version": "9020020230511160017",
"context": "4b0b4b45",
"architecture": "x86_64",
},
),
(
[
"Name : ruby",
"Stream : 3.1 [e] [a]",
"Version : 9010020221119221509",
"Context : 8d1baf64",
"Architecture : x86_64",
],
{
"name": "ruby",
"stream": "3.1",
"version": "9010020221119221509",
"context": "8d1baf64",
"architecture": "x86_64",
},
),
(["Context : 8d1baf64", "Architecture : x86_64"], None),
([], None),
],
)
def test_parse_nsvca(module_info_output, expected_result):
assert _parse_nsvca(module_info_output) == expected_result
sample_maven_ruby_module_info_result = """
Name : maven
Stream : 3.8 [e] [a]
Version : 9020020230511160017
Context : 4b0b4b45
Architecture : x86_64
Profiles : common [d]
Default profiles : common
Repo : susemanager:rockylinux-9-appstream-x86_64
Summary : Java project management and project comprehension tool
Description : Maven is a software project management and comprehension tool. Based on the concept of a project object model (POM), Maven can manage a project's build, reporting and documentation from a central piece of information.
Requires : platform:[el9]
Artifacts : apache-commons-cli-0:1.5.0-4.module+el9.2.0+14755+4b0b4b45.noarch
: apache-commons-cli-0:1.5.0-4.module+el9.2.0+14755+4b0b4b45.src
Name : ruby
Stream : 3.1 [e] [a]
Version : 9010020221119221509
Context : 8d1baf64
Architecture : x86_64
Profiles : common [d]
Default profiles : common
Repo : susemanager:rockylinux-9-appstream-x86_64
Summary : An interpreter of object-oriented scripting language
Description : Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible.
Requires :
Artifacts : ruby-0:3.1.2-141.module+el9.1.0+13172+8d1baf64.i686
: ruby-0:3.1.2-141.module+el9.1.0+13172+8d1baf64.src
"""
def test_get_module_info():
module_names = ["maven", "ruby"]
mocked_command_result = MockDNFCommandResult(
returncode=0, stdout=sample_maven_ruby_module_info_result
)
expected_result = [
{
"name": "maven",
"stream": "3.8",
"version": "9020020230511160017",
"context": "4b0b4b45",
"architecture": "x86_64",
},
{
"name": "ruby",
"stream": "3.1",
"version": "9010020221119221509",
"context": "8d1baf64",
"architecture": "x86_64",
},
]
with patch("subprocess.run", return_value=mocked_command_result):
assert _get_module_info(module_names) == expected_result
sample_dnf_enabled_modules_result = """rockylinux-9-appstream for x86_64
Name Stream Profiles Summary
maven 3.8 [e] common [d] Java project management and project comprehension tool
nginx 1.22 [e] common [d] nginx webserver
nodejs 18 [e] common [d], development, minimal, s2i Javascript runtime
ruby 3.1 [e] common [d] An interpreter of object-oriented scripting language
Rocky Linux 9 CRB (x86_64)
Name Stream Profiles Summary
swig 4.1 [e] common [d], complete Connects C/C++/Objective C to some high-level programming languages
"""
def test_get_enabled_module_names():
mocked_command_result = MockDNFCommandResult(
returncode=0, stdout=sample_dnf_enabled_modules_result
)
expected_result = ["maven:3.8", "nginx:1.22", "nodejs:18", "ruby:3.1", "swig:4.1"]
with patch("subprocess.run", return_value=mocked_command_result):
assert _get_enabled_module_names() == expected_result
070701000000F3000081B400000000000000000000000168EFD6640000085B000000000000000000000000000000000000003B00000000susemanager-sls/src/tests/test_module_container_runtime.py"""
Unit tests for the container_runtime module
"""
# pylint: disable-next=unused-import
import os
from ..modules import container_runtime
# pylint: disable-next=unused-import
from unittest.mock import patch, mock_open, MagicMock
import pytest
@pytest.fixture(autouse=True)
def stripped_env(monkeypatch):
"""Strip env variables that are use by the system under test."""
monkeypatch.delenv("container", raising=False)
@pytest.mark.parametrize(
"mock_read_file_return, mock_exists_return, expected_result",
[
("docker", {"/proc/self/cgroup": True, "/.dockerenv": True}, "docker"),
("", {"/proc/vz": True, "/proc/bc": False}, "openvz"),
("podman", {"/run/.containerenv": True}, "podman"),
("", {"/__runsc_containers__": True}, "gvisor"),
("", {"/run/.containerenv": False}, None),
],
)
def test_get_container_runtime(
mock_read_file_return, mock_exists_return, expected_result
):
mock_read_file = MagicMock(return_value=mock_read_file_return)
mock_exists = MagicMock(
side_effect=lambda path: mock_exists_return.get(path, False)
)
# pylint: disable-next=protected-access
container_runtime._read_file = mock_read_file
with patch("os.path.exists", mock_exists):
assert container_runtime.get_container_runtime() == expected_result
@pytest.mark.parametrize(
"file_name, expected_result",
[
("/run/.containerenv", "podman"),
("/.dockerenv", "docker"),
("/var/run/secrets/kubernetes.io/serviceaccount", "kube"),
],
)
def test_detect_container_files(file_name, expected_result):
mock_exists = MagicMock(side_effect=lambda path: path == file_name)
with patch("os.path.exists", mock_exists):
# pylint: disable-next=protected-access
assert container_runtime._detect_container_files() == expected_result
def test_detect_container_files_not_found():
mock_exists = MagicMock(side_effect=lambda path: False)
with patch("os.path.exists", mock_exists):
# pylint: disable-next=protected-access
assert container_runtime._detect_container_files() == "not-found"
070701000000F4000081B400000000000000000000000168EFD664000004F4000000000000000000000000000000000000003A00000000susemanager-sls/src/tests/test_module_mainframesysinfo.py"""
Author: Bo Maryniuk <bo@suse.de>
"""
import pytest
from unittest.mock import MagicMock, patch
from . import mockery
mockery.setup_environment()
# pylint: disable-next=wrong-import-position
from ..modules import mainframesysinfo
def test_virtual():
"""
Test virtual returns True if setup os.access returns positive, and otherwise.
:return:
"""
with patch("os.access", MagicMock(return_value=True)):
assert mainframesysinfo.__virtual__() is True
with patch("os.access", MagicMock(return_value=False)):
assert mainframesysinfo.__virtual__() is False
def test_read_values():
"""
Test the read_values method.
:return:
"""
bogus_data = "bogus data"
run_all = {"stdout": bogus_data, "retcode": 0, "stderr": ""}
with patch.dict(
mainframesysinfo.__salt__, {"cmd.run_all": MagicMock(return_value=run_all)}
):
assert mainframesysinfo.read_values() == bogus_data
run_all["retcode"] = 1
run_all["stderr"] = "error here"
with patch.dict(
mainframesysinfo.__salt__, {"cmd.run_all": MagicMock(return_value=run_all)}
):
with pytest.raises(Exception) as x:
mainframesysinfo.read_values()
assert str(x.value) == run_all["stderr"]
070701000000F5000081B400000000000000000000000168EFD66400001032000000000000000000000000000000000000003000000000susemanager-sls/src/tests/test_module_mgrnet.py# pylint: disable=missing-module-docstring,unused-import
import sys
from unittest.mock import MagicMock, patch
from . import mockery
mockery.setup_environment()
# pylint: disable-next=wrong-import-position
from ..modules import mgrnet
mgrnet.__salt__ = {}
def test_mgrnet_virtual():
"""
Test __virtual__ function for the possible cases
when either 'host' or 'nslookup' is available or none of them
"""
with patch.object(
mgrnet,
"_which",
MagicMock(side_effect=[True, False, True, False, False]),
):
ret = mgrnet.__virtual__()
assert ret is True
ret = mgrnet.__virtual__()
assert ret is True
ret = mgrnet.__virtual__()
assert ret[0] is False
def test_mgrnet_dns_fqdns():
"""
Test getting possible FQDNs with DNS tools
"""
check_calls = {"host": [], "nslookup": []}
ipv4_addresses = ["10.0.0.1", "172.16.0.1", "192.168.0.1", "10.10.1.1"]
ipv6_addresses = ["fd12:3456:789a:1::1", "fd12:abcd:1234:1::1"]
names = {
"10.0.0.1": "host10.example.org",
"172.16.0.1": "host172.example.org",
"192.168.0.1": "host10.example.org",
"fd12:3456:789a:1::1": "ipv6host3456.example.org",
"fd12:abcd:1234:1::1": "ipv6hostabcd.example.org",
}
# pylint: disable-next=unused-argument
def _cmd_run_host_nslookup(cmd, ignore_retcode=False):
"""
This function is emulating the output of 'host' or 'nslookup'
"""
ip = cmd[1]
cmd = cmd[0]
check_calls[cmd].append(ip)
rc = 0
if ":" in ip:
# the conversion is not very accurate here, but it's enough for testing
# pylint: disable-next=consider-using-f-string
ptr = "{}.ip6.arpa".format(".".join(reversed([*ip.replace(":", "")])))
else:
# pylint: disable-next=consider-using-f-string
ptr = "{}.in-addr.arpa".format(".".join(reversed(ip.split())))
if cmd == "host":
if ip in names:
# pylint: disable-next=consider-using-f-string
out = "{} domain name pointer {}.\n".format(ptr, names[ip])
else:
# pylint: disable-next=consider-using-f-string
out = "Host {}. not found: 3(NXDOMAIN)\n".format(ptr)
rc = 1
else:
if ip in names:
# pylint: disable-next=consider-using-f-string
out = "{}\tname = {}.\n".format(ptr, names[ip])
else:
# pylint: disable-next=consider-using-f-string
out = "** server can't find {}: NXDOMAIN\n".format(ptr)
rc = 1
return {"retcode": rc, "stdout": out}
with patch.dict(
mgrnet.__salt__, {"cmd.run_all": _cmd_run_host_nslookup}
), patch.object(
mgrnet,
"_which",
MagicMock(side_effect=[True, False, True, False, False]),
), patch.object(
mgrnet.salt.utils.network,
"ip_addrs",
MagicMock(side_effect=[ipv4_addresses.copy(), ipv4_addresses.copy()]),
), patch.object(
mgrnet.salt.utils.network,
"ip_addrs6",
MagicMock(side_effect=[ipv6_addresses.copy(), ipv6_addresses.copy()]),
):
# Test 'host' util output
ret = mgrnet.dns_fqdns()
assert sorted(ret["dns_fqdns"]) == sorted(set(names.values()))
# Test 'nslookup' util output
ret = mgrnet.dns_fqdns()
assert sorted(ret["dns_fqdns"]) == sorted(set(names.values()))
# Check if 'host' and 'nslookup' were called for all of IPv4 and IPv6 addresses
for ip in ipv4_addresses:
assert ip in check_calls["host"]
assert ip in check_calls["nslookup"]
for ip in ipv6_addresses:
assert ip in check_calls["host"]
assert ip in check_calls["nslookup"]
assert len(check_calls["host"]) == len(ipv4_addresses) + len(ipv6_addresses)
# Test the case when neither 'host' nor 'nslookup' is present on the system
ret = mgrnet.dns_fqdns()
assert ret == {"dns_fqdns": []}
070701000000F6000081B400000000000000000000000168EFD66400000C47000000000000000000000000000000000000003500000000susemanager-sls/src/tests/test_module_reboot_info.py# pylint: disable=missing-module-docstring
from ..modules import reboot_info
from unittest.mock import MagicMock, patch
import pytest
@pytest.mark.parametrize(
"os_family, expected_result",
[
("Debian", True),
("Suse", True),
("RedHat", True),
("Windows", False),
],
)
def test_virtual(os_family, expected_result):
reboot_info.__grains__ = {"os_family": os_family}
assert reboot_info.__virtual__() == expected_result
@pytest.mark.parametrize(
"exit_code_to_check, real_exit_code, result",
[(0, 0, True), (0, 1, False)],
)
def test_check_cmd_exit_code(exit_code_to_check, real_exit_code, result):
mock_run_all = MagicMock(return_value={"stderr": None, "retcode": real_exit_code})
with patch.dict(reboot_info.__salt__, {"cmd.run_all": mock_run_all}):
assert (
# pylint: disable-next=protected-access
reboot_info._check_cmd_exit_code("fake command", exit_code_to_check)
== result
)
@pytest.mark.parametrize(
"file_exists, result",
[(True, True), (False, False)],
)
def test_reboot_required_debian(file_exists, result):
reboot_info.__grains__["os_family"] = "Debian"
with patch("os.path.exists", return_value=file_exists):
assert reboot_info.reboot_required()["reboot_required"] == result
@pytest.mark.parametrize(
"os_major_release, file_exists, result",
[
(15, True, True),
(15, False, False),
(11, True, True),
(11, False, False),
],
)
def test_reboot_required_suse(os_major_release, file_exists, result):
reboot_info.__grains__["os_family"] = "Suse"
reboot_info.__grains__["osmajorrelease"] = os_major_release
with patch("os.path.exists", return_value=file_exists):
assert reboot_info.reboot_required()["reboot_required"] == result
@pytest.mark.parametrize(
"os_major_release, cmd, exit_code, result",
[
(7, "needs-restarting -r", 1, True),
(7, "needs-restarting -r", 0, False),
(8, "dnf -q needs-restarting -r", 1, True),
(8, "dnf -q needs-restarting -r", 99, False),
],
)
def test_reboot_required_redhat(os_major_release, cmd, exit_code, result):
reboot_info.__grains__["os_family"] = "RedHat"
reboot_info.__grains__["osmajorrelease"] = os_major_release
# pylint: disable-next=protected-access
reboot_info._check_cmd_exit_code = MagicMock(return_value=exit_code == 1)
assert reboot_info.reboot_required()["reboot_required"] == result
# pylint: disable-next=protected-access
reboot_info._check_cmd_exit_code.assert_called_once_with(cmd, 1)
@pytest.mark.parametrize(
"pending_transaction, result",
[(True, True), (False, False)],
)
def test_reboot_required_transactional(pending_transaction, result):
reboot_info.__grains__["transactional"] = True
mock_pending_transactions = MagicMock(return_value=pending_transaction)
with patch.dict(
reboot_info.__salt__,
{"transactional_update.pending_transaction": mock_pending_transactions},
):
assert reboot_info.reboot_required()["reboot_required"] == result
070701000000F7000081B400000000000000000000000168EFD66400000725000000000000000000000000000000000000002D00000000susemanager-sls/src/tests/test_module_sap.py"""
Unit tests for the sap module
"""
from ..modules import sap
from unittest.mock import patch, MagicMock
import pytest
@pytest.mark.parametrize(
"dir_struct, expected_result",
[
(
[
("F1B", ("ASCS00", "DVEBMGS00", "AZCC00")),
("F3C", ("HBA00",)),
("F2C", ("HDB00",)),
],
[
{"system_id": "F1B", "instance_type": "ASCS"},
{"system_id": "F1B", "instance_type": "AZCC"},
{"system_id": "F1B", "instance_type": "DVEBMGS"},
{"system_id": "F2C", "instance_type": "HDB"},
{"system_id": "F3C", "instance_type": "HBA"},
],
),
(
[
("F2C", ("HDB00",)),
],
[
{"system_id": "F2C", "instance_type": "HDB"},
],
),
([], []),
],
)
def test_get_workloads(tmpdir, dir_struct, expected_result):
for sap_dir1, sap_dirs2 in dir_struct:
tmp_sap_dir1 = tmpdir.mkdir(sap_dir1)
for sap_dir2 in sap_dirs2:
tmp_sap_dir1.mkdir(sap_dir2)
orig_sap_regex = sap.SAP_REGEX
tmpdir_path = str(tmpdir)
def mock_match(s):
if s.startswith(tmpdir_path):
s = "/usr/sap" + s[len(tmpdir_path) :]
return orig_sap_regex.match(s)
mock_sap_regex = MagicMock()
mock_sap_regex.match = mock_match
with patch.object(sap, "SAP_BASE_PATH", tmpdir), patch.object(
sap, "SAP_REGEX", mock_sap_regex
):
assert sap.get_workloads() == expected_result
def test_no_sap_directory():
with patch("os.path.exists", return_value=False):
result = sap.get_workloads()
# pylint: disable-next=singleton-comparison
assert (len(result) > 0) == False
070701000000F8000081B400000000000000000000000168EFD6640000072D000000000000000000000000000000000000003200000000susemanager-sls/src/tests/test_module_sumautil.py"""
Author: mc@suse.com
"""
# pylint: disable-next=unused-import
import sys
from unittest.mock import MagicMock, patch
from . import mockery
mockery.setup_environment()
# pylint: disable-next=wrong-import-position
from ..modules import sumautil
def test_livepatching_kernelliveversion():
"""
Test kernel_live_version.
:return:
"""
sumautil.log = MagicMock()
with patch.object(sumautil, "_which_bin", return_value="/bogus/path"):
mock = MagicMock(
side_effect=[
{"retcode": 0, "stdout": "ready"},
{
"retcode": 0,
"stdout": mockery.get_test_data("livepatching-1.sample"),
},
]
)
with patch.dict(sumautil.__salt__, {"cmd.run_all": mock}):
out = sumautil.get_kernel_live_version()
# pylint: disable-next=unidiomatic-typecheck
assert type(out) == dict
assert "mgr_kernel_live_version" in out
assert out["mgr_kernel_live_version"] == "kgraft_patch_1_2_2"
mock = MagicMock(
side_effect=[
{"retcode": 0, "stdout": "ready"},
{
"retcode": 0,
"stdout": mockery.get_test_data("livepatching-2.sample"),
},
]
)
with patch.dict(sumautil.__salt__, {"cmd.run_all": mock}):
out = sumautil.get_kernel_live_version()
# pylint: disable-next=unidiomatic-typecheck
assert type(out) == dict
assert "mgr_kernel_live_version" in out
assert out["mgr_kernel_live_version"] == "kgraft_patch_2_2_1"
with patch.object(sumautil, "_which_bin", return_value=None):
out = sumautil.get_kernel_live_version()
assert out is None
070701000000F9000081B400000000000000000000000168EFD66400001649000000000000000000000000000000000000003500000000susemanager-sls/src/tests/test_module_supportdata.py"""
Author: mc@suse.com
"""
import pytest
from ..modules import supportdata
from unittest.mock import MagicMock, patch
@pytest.fixture(autouse=True)
def patch_salt():
"""Fixture that patches supportdata.__salt__"""
with patch.dict(
supportdata.__salt__,
{
"cmd.run_all": MagicMock(return_value={"retcode": 0, "stdout": "sample"}),
"cp.push_dir": MagicMock(return_value=False),
},
):
with patch(
"src.modules.supportdata._get_supportdata_dir",
MagicMock(return_value="/var/log/supportdata"),
), patch(
"src.modules.supportdata._cleanup_outdated_data",
MagicMock(),
), patch(
"os.makedirs", MagicMock(return_value=True)
):
yield
def test_supportdata_suse():
"""
Test getting supportdata on a standard SUSE system
:return:
"""
supportdata.__grains__["os_family"] = "Suse"
with patch("os.path.exists", MagicMock(side_effect=[False, False, True])):
out = supportdata.get()
assert isinstance(out, dict)
assert "success" in out
assert out["success"] is True
assert out["supportdata_dir"] == "/var/log/supportdata"
supportdata.__salt__["cmd.run_all"].assert_called_once_with(
["/sbin/supportconfig", "-R", "/var/log/supportdata"],
runas="root",
)
def test_supportdata_suse_extra_args():
"""
Test getting supportdata on a standard SUSE system
:return:
"""
supportdata.__grains__["os_family"] = "Suse"
with patch("os.path.exists", MagicMock(side_effect=[False, False, True])):
out = supportdata.get("-o X,WEB -l 10000")
assert isinstance(out, dict)
assert "success" in out
assert out["success"] is True
assert out["supportdata_dir"] == "/var/log/supportdata"
supportdata.__salt__["cmd.run_all"].assert_called_once_with(
[
"/sbin/supportconfig",
"-R",
"/var/log/supportdata",
"-o",
"X,WEB",
"-l",
"10000",
],
runas="root",
)
def test_supportdata_mlm_proxy():
"""
Test getting supportdata on a MLM Proxy
:return:
"""
supportdata.__grains__["os_family"] = "Suse"
with patch("os.path.exists", MagicMock(side_effect=[False, True, True])):
out = supportdata.get()
assert isinstance(out, dict)
assert "success" in out
assert out["success"] is True
assert out["supportdata_dir"] == "/var/log/supportdata"
supportdata.__salt__["cmd.run_all"].assert_called_once_with(
[
"/usr/bin/mgrpxy",
"support",
"config",
"--output",
"/var/log/supportdata",
],
runas="root",
)
def test_supportdata_mlm_server():
"""
Test getting supportdata on a MLM Server
:return:
"""
supportdata.__grains__["os_family"] = "Suse"
with patch("os.path.exists", MagicMock(side_effect=[True, False, True])):
out = supportdata.get()
assert isinstance(out, dict)
assert "success" in out
assert out["success"] is True
assert out["supportdata_dir"] == "/var/log/supportdata"
supportdata.__salt__["cmd.run_all"].assert_called_once_with(
[
"/usr/bin/mgradm",
"support",
"config",
"--output",
"/var/log/supportdata",
],
runas="root",
)
def test_supportdata_redhat():
"""
Test getting supportdata on a RedHat Server
:return:
"""
supportdata.__grains__["os_family"] = "RedHat"
with patch("os.path.exists", MagicMock(side_effect=[True])):
out = supportdata.get()
assert isinstance(out, dict)
assert "success" in out
assert out["success"] is True
assert out["supportdata_dir"] == "/var/log/supportdata"
supportdata.__salt__["cmd.run_all"].assert_called_once_with(
[
"/usr/sbin/sosreport",
"--batch",
"--tmp-dir",
"/var/log/supportdata",
],
runas="root",
)
def test_supportdata_debian():
"""
Test getting supportdata on a Debian Server
:return:
"""
supportdata.__grains__["os_family"] = "Debian"
supportdata.__grains__["os"] = "Debian 12"
with patch("os.path.exists", MagicMock(side_effect=[True])):
out = supportdata.get()
assert isinstance(out, dict)
assert "success" in out
assert out["success"] is True
assert out["supportdata_dir"] == "/var/log/supportdata"
supportdata.__salt__["cmd.run_all"].assert_called_once_with(
[
"/usr/bin/sosreport",
"--batch",
"--tmp-dir",
"/var/log/supportdata",
],
runas="root",
)
def test_supportdata_unsupported():
"""
Test getting supportdata from unsupported server
:return:
"""
supportdata.__grains__["os_family"] = "Unsupported"
supportdata.__grains__["os"] = "Unsupported 0.1"
with patch("os.path.exists", MagicMock(side_effect=[True])):
out = supportdata.get()
assert isinstance(out, dict)
assert "success" in out
assert out["success"] is False
assert out["supportdata_dir"] == ""
assert out["error"] == "Getting supportdata not supported for Unsupported 0.1"
070701000000FA000081B400000000000000000000000168EFD66400001037000000000000000000000000000000000000003000000000susemanager-sls/src/tests/test_module_udevdb.py"""
Author: Bo Maryniuk <bo@suse.de>
"""
from unittest.mock import MagicMock, patch
from . import mockery
mockery.setup_environment()
# pylint: disable-next=wrong-import-position
from ..modules import udevdb
def test_virtual():
"""
Test virtual returns True if 'udevadm' is around in the environment.
:return:
"""
with patch("src.modules.udevdb._which_bin", MagicMock(return_value=None)):
assert udevdb.__virtual__() is False
with patch("src.modules.udevdb._which_bin", MagicMock(return_value="/bogus/path")):
assert udevdb.__virtual__() is True
def test_normalize():
"""
Test if udevdb.normalize does not returns nested lists that contains only one item.
:return:
"""
data = {"key": ["value", "here"], "foo": ["bar"], "some": "data"}
assert udevdb.normalize(data) == {
"foo": "bar",
"some": "data",
"key": ["value", "here"],
}
def test_exportdb():
"""
Test udevdb.exportdb method.
:return:
"""
udev_data = mockery.get_test_data("udev.sample")
out = [
{
"P": "/devices/LNXSYSTM:00/LNXPWRBN:00",
"E": {
"MODALIAS": "acpi:LNXPWRBN:",
"SUBSYSTEM": "acpi",
"DRIVER": "button",
"DEVPATH": "/devices/LNXSYSTM:00/LNXPWRBN:00",
},
},
{
"P": "/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2",
"E": {
"SUBSYSTEM": "input",
"PRODUCT": "19/0/1/0",
"PHYS": '"LNXPWRBN/button/input0"',
"NAME": '"Power Button"',
"ID_INPUT": 1,
"DEVPATH": "/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2",
"MODALIAS": "input:b0019v0000p0001e0000-e0,1,k74,ramlsfw",
"ID_PATH_TAG": "acpi-LNXPWRBN_00",
"TAGS": ":seat:",
"PROP": 0,
"ID_FOR_SEAT": "input-acpi-LNXPWRBN_00",
"KEY": "10000000000000 0",
"USEC_INITIALIZED": 2010022,
"ID_PATH": "acpi-LNXPWRBN:00",
"EV": 3,
"ID_INPUT_KEY": 1,
},
},
{
"P": "/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2",
"E": {
"SUBSYSTEM": "input",
"XKBLAYOUT": "us",
"MAJOR": 13,
"ID_INPUT": 1,
"DEVPATH": "/devices/LNXSYSTM:00/LNXPWRBN:00/input/input2/event2",
"ID_PATH_TAG": "acpi-LNXPWRBN_00",
"DEVNAME": "/dev/input/event2",
"TAGS": ":power-switch:",
"BACKSPACE": "guess",
"MINOR": 66,
"USEC_INITIALIZED": 2076101,
"ID_PATH": "acpi-LNXPWRBN:00",
"XKBMODEL": "pc105",
"ID_INPUT_KEY": 1,
},
"N": "input/event2",
},
{
"P": "/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0",
"E": {
"MODALIAS": "scsi:t-0x00",
"SUBSYSTEM": "scsi",
"DEVTYPE": "scsi_device",
"DRIVER": "sd",
"DEVPATH": "/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0",
},
"X-Mgr": {"SCSI_SYS_TYPE": "0"},
},
]
with patch.dict(
udevdb.__salt__,
{
"cmd.run_all": MagicMock(
side_effect=[
{"retcode": 0, "stdout": udev_data},
{"retcode": 0, "stdout": "0"},
]
)
},
):
data = udevdb.exportdb()
# pylint: disable-next=invalid-name
assert data == [_f for _f in data if _f]
for d_idx, d_section in enumerate(data):
assert out[d_idx]["P"] == d_section["P"]
assert out[d_idx].get("N") == d_section.get("N")
assert out[d_idx].get("X-Mgr") == d_section.get("X-Mgr")
for key, value in list(d_section["E"].items()):
assert out[d_idx]["E"][key] == value
070701000000FB000081B400000000000000000000000168EFD66400001CD8000000000000000000000000000000000000003600000000susemanager-sls/src/tests/test_module_uyuni_config.py"""
Author: Ricardo Mateus <rmateus@suse.com>
"""
import pytest
from unittest.mock import MagicMock, patch, call
from . import mockery
mockery.setup_environment()
# pylint: disable-next=wrong-import-position,unused-import
import sys
# pylint: disable-next=wrong-import-position
from ..modules import uyuni_config
# pylint: disable-next=wrong-import-position,unused-import
from ..modules.uyuni_config import (
RPCClient,
UyuniChannelsException,
UyuniUsersException,
)
class TestRPCClient:
"""
Test RPCClient object
"""
rpc_client = None
@patch("src.modules.uyuni_config.ssl", MagicMock())
@patch("src.modules.uyuni_config.xmlrpc", MagicMock())
# pylint: disable-next=unused-argument
def setup_method(self, method):
"""
Setup state per test.
:param method:
:return:
"""
self.rpc_client = RPCClient(
user="user", password="password", url="https://somewhere"
)
self.rpc_client.conn.auth.login = MagicMock(return_value="My_token")
self.rpc_client.conn = MagicMock()
# pylint: disable-next=unused-argument
def teardown_method(self, method):
"""
Tear-down state per test.
:param method:
:return:
"""
self.rpc_client = None
uyuni_config.__pillar__ = {}
def test_init_called(self):
"""
Init method called
:return:
"""
assert self.rpc_client.get_user() == "user"
assert self.rpc_client.token is None
def test_init_called_without_pillar(self):
"""
Init method called without user password and without any pillar data
:return:
"""
with pytest.raises(UyuniUsersException):
RPCClient(user="user")
def test_init_called_with_pillar(self):
"""
Init method called without user password and with pillar data defined
:return:
"""
uyuni_config.__pillar__ = {
"uyuni": {"xmlrpc": {"user": "admin_user", "password": "password_user"}}
}
rpc_client = RPCClient(user="user")
assert rpc_client.get_user() == "admin_user"
# pylint: disable-next=protected-access
assert rpc_client._user == "admin_user"
# pylint: disable-next=protected-access
assert rpc_client._password == "password_user"
assert rpc_client.token is None
def test_get_token(self):
"""
Test get_token method with reuse token
:return:
"""
my_mock1 = MagicMock(return_value="My_Special_Token")
my_mock2 = MagicMock(return_value="My_Special_Token_2")
self.rpc_client.conn.auth.login = my_mock1
token = self.rpc_client.get_token()
assert my_mock1.call_count == 1
assert token == "My_Special_Token"
assert (
uyuni_config.__context__.get("uyuni.auth_token_user") == "My_Special_Token"
)
self.rpc_client.get_token()
assert my_mock1.call_count == 1
self.rpc_client.conn.auth.login = my_mock2
self.rpc_client.get_token()
assert my_mock1.call_count == 1
assert my_mock2.call_count == 0
token = self.rpc_client.get_token(True)
assert my_mock1.call_count == 1
assert my_mock2.call_count == 1
assert token == "My_Special_Token_2"
assert (
uyuni_config.__context__.get("uyuni.auth_token_user")
== "My_Special_Token_2"
)
def test_call_rpc(self):
"""
Call any XML-RPC method.
:return:
"""
self.rpc_client.token = "My_token"
out = self.rpc_client("uyuni.some_method")
mo = getattr(self.rpc_client.conn, "uyuni.some_method")
assert out is not None
assert mo.called
mo.assert_called_with("My_token")
out2 = self.rpc_client("uyuni.some_method_2", "my_arg")
mo2 = getattr(self.rpc_client.conn, "uyuni.some_method_2")
assert out2 is not None
assert mo2.called
mo2.assert_called_with("My_token", "my_arg")
def test_call_rpc_crash_handle_generic(self):
"""
Handle XML-RPC method crash wiht generic error
:return:
"""
self.rpc_client.token = "the_token"
exc = Exception("generic error when processing")
exc.faultCode = 2951
setattr(self.rpc_client.conn, "uyuni.some_method", MagicMock(side_effect=exc))
with patch("src.modules.uyuni_config.log") as logger:
with pytest.raises(Exception):
self.rpc_client("uyuni.some_method")
mo = getattr(self.rpc_client.conn, "uyuni.some_method")
assert mo.called
mo.assert_called_with("the_token")
assert logger.error.call_args[0] == (
"Unable to call RPC function: %s",
"generic error when processing",
)
def test_call_rpc_crash_handle_reauthenticate_error(self):
"""
Handle XML-RPC method crash whit reauthenticate error
:return:
"""
self.rpc_client.token = "the_token"
self.rpc_client.conn.auth.login = MagicMock(return_value="the_token_new")
exc = Exception("generic error when processing")
exc.faultCode = 2950
setattr(self.rpc_client.conn, "uyuni.some_method", MagicMock(side_effect=exc))
with patch("src.modules.uyuni_config.log") as logger:
with pytest.raises(Exception):
self.rpc_client("uyuni.some_method")
mo = getattr(self.rpc_client.conn, "uyuni.some_method")
assert mo.call_count == 2
mo.assert_has_calls([call("the_token"), call("the_token_new")])
self.rpc_client.conn.auth.login.assert_called_once_with("user", "password")
assert self.rpc_client.get_token() == "the_token_new"
assert logger.error.call_args[0] == (
"Unable to call RPC function: %s",
"generic error when processing",
)
def test_call_rpc_handle_reauthenticate(self):
"""
Handle XML-RPC method and reauthenticate
:return:
"""
self.rpc_client.token = "the_token"
self.rpc_client.conn.auth.login = MagicMock(return_value="the_token_new")
exc = Exception("generic error when processing")
exc.faultCode = 2950
setattr(
self.rpc_client.conn,
"uyuni.some_method",
MagicMock(side_effect=[exc, "return string"]),
)
assert self.rpc_client.get_token() == "the_token"
with patch("src.modules.uyuni_config.log") as logger:
out = self.rpc_client("uyuni.some_method")
mo = getattr(self.rpc_client.conn, "uyuni.some_method")
# pdb.set_trace()
assert out is not None
assert out == "return string"
assert mo.call_count == 2
mo.assert_has_calls([call("the_token"), call("the_token_new")])
self.rpc_client.conn.auth.login.assert_called_once_with("user", "password")
assert self.rpc_client.get_token() == "the_token_new"
assert logger.warning.call_args[0] == (
"Fall back to the second try due to %s",
"generic error when processing",
)
070701000000FC000081B400000000000000000000000168EFD66400000EAF000000000000000000000000000000000000003300000000susemanager-sls/src/tests/test_state_appstreams.py# pylint: disable=missing-module-docstring
import pytest
# pylint: disable-next=unused-import
from unittest.mock import patch
from ..states import appstreams
# pylint: disable-next=redefined-outer-name
def mock_enable(appstreams):
return True, "AppStreams enabled.", {"enabled": appstreams}
# pylint: disable-next=redefined-outer-name
def mock_disable(appstreams):
return True, "AppStreams disabled.", {"disabled": appstreams}
def mock_get_currently_enabled():
return ["module1", "module2"]
@pytest.mark.parametrize(
"test_mode, appstreams_to_enable, expected_result",
[
(
True,
["maven", "nginx"],
{
"name": "test_state",
"result": None,
"changes": {"ret": {"enabled": ["maven", "nginx"]}},
"comment": "The following appstreams would be enabled: ['maven', 'nginx']",
},
),
(
True,
[],
{
"name": "test_state",
"result": None,
"changes": {},
"comment": "No AppStreams to enable provided",
},
),
(
False,
["maven"],
{
"name": "test_state",
"result": True,
"changes": {
"enabled": ["maven"],
},
"comment": "AppStreams enabled.",
},
),
(
False,
[],
{
"name": "test_state",
"result": True,
"changes": {},
"comment": "No AppStreams to enable provided",
},
),
],
)
def test_enabled(test_mode, appstreams_to_enable, expected_result):
appstreams.__salt__ = {
"appstreams.get_enabled_modules": mock_get_currently_enabled,
"appstreams.enable": mock_enable,
"appstreams.disable": mock_disable,
}
appstreams.__opts__ = {"test": test_mode}
assert appstreams.enabled("test_state", appstreams_to_enable) == expected_result
@pytest.mark.parametrize(
"test_mode, appstreams_to_disable, expected_result",
[
(
True,
["ruby", "php"],
{
"name": "test_state",
"result": None,
"changes": {"ret": {"disabled": ["ruby", "php"]}},
"comment": "The following appstreams would be disabled: ['ruby', 'php']",
},
),
(
True,
[],
{
"name": "test_state",
"result": None,
"changes": {},
"comment": "No AppStreams to disable provided",
},
),
(
False,
["postgresql"],
{
"name": "test_state",
"result": True,
"changes": {
"disabled": ["postgresql"],
},
"comment": "AppStreams disabled.",
},
),
(
False,
[],
{
"name": "test_state",
"result": True,
"changes": {},
"comment": "No AppStreams to disable provided",
},
),
],
)
def test_disabled(test_mode, appstreams_to_disable, expected_result):
appstreams.__salt__ = {
"appstreams.get_enabled_modules": mock_get_currently_enabled,
"appstreams.enable": mock_enable,
"appstreams.disable": mock_disable,
}
appstreams.__opts__ = {"test": test_mode}
assert appstreams.disabled("test_state", appstreams_to_disable) == expected_result
070701000000FD000081B400000000000000000000000168EFD66400001B11000000000000000000000000000000000000003200000000susemanager-sls/src/tests/test_state_mgrcompat.py# -*- coding: utf-8 -*-
"""
Test custom wrapper for "module.run" state module.
Author: Pablo Suárez Herńandez <psuarezhernandez@suse.com>
"""
from unittest.mock import MagicMock, patch
from . import mockery
mockery.setup_environment()
# pylint: disable-next=wrong-import-position
from ..states import mgrcompat
TAILORED_MODULE_RUN_KWARGS = {
"service.running": [{"text": "superseded", "name": "salt-minion"}, {"foo": "bar"}]
}
MGRCOMPAT_MODULE_RUN_KWARGS = {
"name": "service.running",
"text": "superseded",
"m_name": "salt-minion",
"kwargs": {"foo": "bar"},
}
mgrcompat.log = MagicMock()
mgrcompat.OrderedDict = dict
mgrcompat.__opts__ = {}
mgrcompat.__grains__ = {}
mgrcompat.__states__ = {}
def test_module_run_on_phosphorous():
mock = MagicMock(return_value={"changes": {"service.running": "foobar"}})
with patch.dict(
mgrcompat.__grains__, {"saltversioninfo": [3005, None, None, None]}
), patch.dict(mgrcompat.__states__, {"module.run": mock}):
mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)
def test_module_run_on_silicon():
mock = MagicMock(return_value={"changes": {"service.running": "foobar"}})
with patch.dict(
mgrcompat.__grains__, {"saltversioninfo": [3004, None, None, None]}
), patch.dict(mgrcompat.__states__, {"module.run": mock}):
mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)
def test_module_run_on_silicon_use_superseded():
mock = MagicMock(return_value={"changes": {"service.running": "foobar"}})
with patch.dict(
mgrcompat.__grains__, {"saltversioninfo": [3004, None, None, None]}
), patch.dict(mgrcompat.__opts__, {"use_superseded": ["module.run"]}), patch.dict(
mgrcompat.__states__, {"module.run": mock}
):
mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)
def test_module_run_on_aluminum():
mock = MagicMock(return_value={"changes": {"service.running": "foobar"}})
with patch.dict(
mgrcompat.__grains__, {"saltversioninfo": [3003, None, None, None]}
), patch.dict(mgrcompat.__states__, {"module.run": mock}):
mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)
def test_module_run_on_aluminum_use_superseded():
mock = MagicMock(return_value={"changes": {"service.running": "foobar"}})
with patch.dict(
mgrcompat.__grains__, {"saltversioninfo": [3003, None, None, None]}
), patch.dict(mgrcompat.__opts__, {"use_superseded": ["module.run"]}), patch.dict(
mgrcompat.__states__, {"module.run": mock}
):
mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)
def test_module_run_on_magnesium():
mock = MagicMock(return_value={"changes": {"service.running": "foobar"}})
with patch.dict(
mgrcompat.__grains__, {"saltversioninfo": [3002, None, None, None]}
), patch.dict(mgrcompat.__states__, {"module.run": mock}):
mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)
def test_module_run_on_magnesium_use_superseded():
mock = MagicMock(return_value={"changes": {"service.running": "foobar"}})
with patch.dict(
mgrcompat.__grains__, {"saltversioninfo": [3002, None, None, None]}
), patch.dict(mgrcompat.__opts__, {"use_superseded": ["module.run"]}), patch.dict(
mgrcompat.__states__, {"module.run": mock}
):
mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)
def test_module_run_on_sodium():
mock = MagicMock(return_value={"changes": {"service.running": "foobar"}})
with patch.dict(
mgrcompat.__grains__, {"saltversioninfo": [3001, None, None, None]}
), patch.dict(mgrcompat.__states__, {"module.run": mock}):
mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)
def test_module_run_on_sodium_use_superseded():
mock = MagicMock(return_value={"changes": {"service.running": "foobar"}})
with patch.dict(
mgrcompat.__grains__, {"saltversioninfo": [3001, None, None, None]}
), patch.dict(mgrcompat.__opts__, {"use_superseded": ["module.run"]}), patch.dict(
mgrcompat.__states__, {"module.run": mock}
):
mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)
def test_module_run_on_neon():
mock = MagicMock(return_value={"changes": {"service.running": "foobar"}})
with patch.dict(
mgrcompat.__grains__, {"saltversioninfo": [3000, None, None, None]}
), patch.dict(mgrcompat.__states__, {"module.run": mock}):
mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)
def test_module_run_on_neon_use_superseded():
mock = MagicMock(return_value={"changes": {"service.running": "foobar"}})
with patch.dict(
mgrcompat.__grains__, {"saltversioninfo": [3000, None, None, None]}
), patch.dict(mgrcompat.__opts__, {"use_superseded": ["module.run"]}), patch.dict(
mgrcompat.__states__, {"module.run": mock}
):
mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)
def test_module_run_on_2019_2_0_use_superseded():
mock = MagicMock(return_value={"changes": {"service.running": "foobar"}})
with patch.dict(
mgrcompat.__grains__, {"saltversioninfo": [2019, 2, 0, 0]}
), patch.dict(mgrcompat.__opts__, {"use_superseded": ["module.run"]}), patch.dict(
mgrcompat.__states__, {"module.run": mock}
):
mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
mock.assert_called_once_with(**TAILORED_MODULE_RUN_KWARGS)
def test_module_run_on_2019_2_0_without_use_superseded():
mock = MagicMock(return_value={"changes": {"service.running": "foobar"}})
with patch.dict(
mgrcompat.__grains__, {"saltversioninfo": [2019, 2, 0, 0]}
), patch.dict(mgrcompat.__states__, {"module.run": mock}):
mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)
def test_module_run_on_2016_11_4():
mock = MagicMock(return_value={"changes": {"service.running": "foobar"}})
with patch.dict(
mgrcompat.__grains__, {"saltversioninfo": [2016, 11, 4, 0]}
), patch.dict(mgrcompat.__states__, {"module.run": mock}):
mgrcompat.module_run(**MGRCOMPAT_MODULE_RUN_KWARGS)
mock.assert_called_once_with(**MGRCOMPAT_MODULE_RUN_KWARGS)
070701000000FE000081B400000000000000000000000168EFD6640000043E000000000000000000000000000000000000003100000000susemanager-sls/src/tests/test_state_mgrutils.py# -*- coding: utf-8 -*-
"""
Test for mgrutils states
"""
from unittest.mock import MagicMock, patch
from . import mockery
mockery.setup_environment()
# pylint: disable-next=wrong-import-position
from ..states import mgrutils
mgrutils.__opts__ = {"test": False}
mgrutils.__grains__ = {}
mgrutils.__salt__ = {}
mgrutils.__states__ = {}
def test_cmd_dump():
"""
Test cmd_dump()
"""
mock_managed = MagicMock(return_value={"comment": "dummy"})
with patch.dict(mgrutils.__states__, {"file.managed": mock_managed}):
mock_run = MagicMock(return_value="output content")
with patch.dict(mgrutils.__salt__, {"cmd.run": mock_run}):
ret = mgrutils.cmd_dump("/path/to/out", "/bin/bar --out xml")
mock_run.assert_called_once_with(
"/bin/bar --out xml", raise_err=True, python_shell=False
)
mock_managed.assert_called_once_with(
"/path/to/out", contents="output content"
)
assert ret["comment"] == "dummy"
assert ret["name"] == "/path/to/out"
070701000000FF000081B400000000000000000000000168EFD66400001399000000000000000000000000000000000000003000000000susemanager-sls/src/tests/test_state_product.py"""
Author: cbbayburt@suse.com
"""
import sys
from unittest.mock import MagicMock, patch, call
from . import mockery
mockery.setup_environment()
# pylint: disable-next=wrong-import-position
from ..states import product
# Mock globals
product.log = MagicMock()
product.__salt__ = {}
product.__grains__ = {}
@patch.dict(product.__grains__, {"os_family": "Suse"})
def test_suse_with_zypper():
"""
Test if the state module is available for SUSE OS only with a
supported version of zypper (>= 1.8.13) available.
"""
# Supported zypper version
with patch.dict(
product.__salt__,
{
"pkg.info_installed": MagicMock(
return_value={"zypper": {"version": "1.9.0"}}
)
},
):
with patch.object(product, "version_cmp", MagicMock(return_value=1)):
assert product.__virtual__() is "product"
product.version_cmp.assert_called_once_with("1.9.0", "1.8.13")
# Unsupported zypper version
with patch.dict(
product.__salt__,
{
"pkg.info_installed": MagicMock(
return_value={"zypper": {"version": "1.8.0"}}
)
},
):
with patch.object(product, "version_cmp", MagicMock(return_value=-1)):
assert product.__virtual__() == (
False,
"Module product: zypper 1.8.13 or greater required",
)
product.version_cmp.assert_called_once_with("1.8.0", "1.8.13")
# No zypper available
with patch.dict(
product.__salt__,
{
"pkg.info_installed": MagicMock(
return_value=sys.modules["salt.exceptions"].CommandExecutionError
)
},
):
assert product.__virtual__() == (
False,
"Module product: zypper package manager not found",
)
@patch.dict(product.__grains__, {"os_family": "Non-Suse"})
def test_non_suse():
"""
Test if the state module is unavailable for Non-SUSE OS
"""
assert product.__virtual__() == (False, "Module product: non SUSE OS not supported")
def test_get_missing_products():
"""
Test if the missing products are returned correctly, excluding
the ones that are provided by another installed product.
"""
test_data = {
"not_installed": {"product1": True, "product2": True},
"provides-product1": {"product1": True, "this-provides-product1": True},
"provides-product2": {"product2": True},
}
pkg_search_mock = MagicMock(
side_effect=[
test_data["not_installed"],
test_data["provides-product1"],
test_data["provides-product2"],
]
)
with patch.dict(product.__salt__, {"pkg.search": pkg_search_mock}):
# pylint: disable-next=protected-access
res = product._get_missing_products(False)
# Expected pkg.search calls
calls = [
call(
"product()",
refresh=False,
match="exact",
provides=True,
not_installed_only=True,
),
call("product1", match="exact", provides=True),
call("product2", match="exact", provides=True),
]
pkg_search_mock.assert_has_calls(calls)
assert pkg_search_mock.call_count == 3
# Assert that only the non-provided product is returned
assert res == ["product2"]
def test_not_installed_provides():
"""
Test if the provided packages are correctly excluded when
provided by another missing product.
"""
test_data = {
"not_installed": {"product1": True, "this-provides-product1": True},
"provides-product1": {"product1": True, "this-provides-product1": True},
"provides-product2": {"this-provides-product1": True},
}
pkg_search_mock = MagicMock(
side_effect=[
test_data["not_installed"],
test_data["provides-product1"],
test_data["provides-product2"],
]
)
with patch.dict(product.__salt__, {"pkg.search": pkg_search_mock}):
# pylint: disable-next=protected-access
res = product._get_missing_products(False)
# Expected pkg.search calls
calls = [
call(
"product()",
refresh=False,
match="exact",
provides=True,
not_installed_only=True,
),
call("product1", match="exact", provides=True),
call("this-provides-product1", match="exact", provides=True),
]
pkg_search_mock.assert_has_calls(calls)
assert pkg_search_mock.call_count == 3
# Assert that not both products are returned
assert len(res) == 1
# Assert that the provided product is not returned
assert "product1" not in res
# Assert that the providing product is returned
assert "this-provides-product1" in res
07070100000100000081B400000000000000000000000168EFD664000145FD000000000000000000000000000000000000003500000000susemanager-sls/src/tests/test_state_uyuni_config.py# pylint: disable=missing-module-docstring
import pytest
from unittest.mock import MagicMock, patch, call
from . import mockery
# pylint: disable-next=unused-import
import pdb
mockery.setup_environment()
# pylint: disable-next=wrong-import-position,unused-import
import sys
# pylint: disable-next=wrong-import-position
from ..states import uyuni_config
# Mock globals
uyuni_config.log = MagicMock()
uyuni_config.__salt__ = {}
uyuni_config.__opts__ = {"test": False}
# pylint: disable-next=missing-class-docstring
class TestManageUser:
def test_user_present_new_user_test(self):
exc = Exception("user not found")
exc.faultCode = 2951
with patch.dict(
uyuni_config.__salt__,
{"uyuni.user_get_details": MagicMock(side_effect=exc)},
):
with patch.dict(uyuni_config.__opts__, {"test": True}):
result = uyuni_config.user_present(
"username",
"password",
"mail@mail.com",
"first_name",
"last_name",
False,
["role"],
["group"],
"org_admin_user",
"org_admin_password",
)
assert result is not None
assert result["name"] == "username"
assert result["result"] is None
assert result["comment"] == "username would be modified"
assert result["changes"] == {
"login": {"new": "username"},
"password": {"new": "(hidden)"},
"email": {"new": "mail@mail.com"},
"first_name": {"new": "first_name"},
"last_name": {"new": "last_name"},
"roles": {"new": ["role"]},
"system_groups": {"new": ["group"]},
}
uyuni_config.__salt__["uyuni.user_get_details"].assert_called_once_with(
"username",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
def test_user_present_new_user_minimal(self):
exc = Exception("user not found")
exc.faultCode = 2951
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.user_get_details": MagicMock(side_effect=exc),
"uyuni.user_create": MagicMock(return_value=True),
},
):
result = uyuni_config.user_present(
"username", "password", "mail@mail.com", "first_name", "last_name"
)
assert result is not None
assert result["name"] == "username"
assert result["result"] is True
assert result["comment"] == "username user successfully modified"
assert result["changes"] == {
"login": {"new": "username"},
"password": {"new": "(hidden)"},
"email": {"new": "mail@mail.com"},
"first_name": {"new": "first_name"},
"last_name": {"new": "last_name"},
}
## verify mock calls
uyuni_config.__salt__["uyuni.user_get_details"].assert_called_once_with(
"username", org_admin_user=None, org_admin_password=None
)
uyuni_config.__salt__["uyuni.user_create"].assert_called_once_with(
email="mail@mail.com",
first_name="first_name",
last_name="last_name",
use_pam_auth=False,
org_admin_password=None,
org_admin_user=None,
password="password",
login="username",
)
def test_user_present_new_user_complete(self):
exc = Exception("user not found")
exc.faultCode = 2951
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.user_get_details": MagicMock(side_effect=exc),
"uyuni.user_create": MagicMock(return_value=True),
"uyuni.user_add_role": MagicMock(return_value=True),
"uyuni.user_add_assigned_system_groups": MagicMock(return_value=1),
},
):
result = uyuni_config.user_present(
"username",
"password",
"mail@mail.com",
"first_name",
"last_name",
False,
["role"],
["group"],
"org_admin_user",
"org_admin_password",
)
assert result is not None
assert result["name"] == "username"
assert result["result"] is True
assert result["comment"] == "username user successfully modified"
assert result["changes"] == {
"login": {"new": "username"},
"password": {"new": "(hidden)"},
"email": {"new": "mail@mail.com"},
"first_name": {"new": "first_name"},
"last_name": {"new": "last_name"},
"roles": {"new": ["role"]},
"system_groups": {"new": ["group"]},
}
## verify mock calls
uyuni_config.__salt__["uyuni.user_get_details"].assert_called_once_with(
"username",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__["uyuni.user_create"].assert_called_once_with(
email="mail@mail.com",
first_name="first_name",
last_name="last_name",
use_pam_auth=False,
org_admin_password="org_admin_password",
org_admin_user="org_admin_user",
password="password",
login="username",
)
uyuni_config.__salt__["uyuni.user_add_role"].assert_called_once_with(
"username",
role="role",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__[
"uyuni.user_add_assigned_system_groups"
].assert_called_once_with(
login="username",
server_group_names=["group"],
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
def test_user_present_update_user(self):
exc = Exception("user not found")
exc.faultCode = 2950
current_user = {
"uui": "username",
"email": "mail@mail.com",
"first_name": "first",
"last_name": "last",
}
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.user_get_details": MagicMock(side_effect=[current_user, exc]),
"uyuni.user_list_roles": MagicMock(return_value=["role1", "role2"]),
"uyuni.user_list_assigned_system_groups": MagicMock(
return_value=[{"name": "group1"}, {"name": "group2"}]
),
"uyuni.user_set_details": MagicMock(return_value=True),
"uyuni.user_remove_role": MagicMock(return_value=True),
"uyuni.user_add_role": MagicMock(return_value=True),
"uyuni.user_remove_assigned_system_groups": MagicMock(return_value=1),
"uyuni.user_add_assigned_system_groups": MagicMock(return_value=1),
},
):
result = uyuni_config.user_present(
"username",
"new_password",
"new_mail@mail.com",
"new_first",
"new_last",
False,
["role1", "role3"],
["group2", "group3"],
"org_admin_user",
"org_admin_password",
)
assert result is not None
assert result["name"] == "username"
assert result["result"] is True
assert result["comment"] == "username user successfully modified"
assert result["changes"] == {
"password": {"new": "(hidden)", "old": "(hidden)"},
"email": {"new": "new_mail@mail.com", "old": "mail@mail.com"},
"first_name": {"new": "new_first", "old": "first"},
"last_name": {"new": "new_last", "old": "last"},
"roles": {"new": ["role1", "role3"], "old": ["role1", "role2"]},
"system_groups": {
"new": ["group2", "group3"],
"old": ["group1", "group2"],
},
}
## verify mock calls
uyuni_config.__salt__["uyuni.user_get_details"].assert_has_calls(
[
call(
"username",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
),
call("username", "new_password"),
]
)
uyuni_config.__salt__["uyuni.user_list_roles"].assert_called_once_with(
"username",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__[
"uyuni.user_list_assigned_system_groups"
].assert_called_once_with(
"username",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__["uyuni.user_set_details"].assert_called_once_with(
email="new_mail@mail.com",
first_name="new_first",
last_name="new_last",
org_admin_password="org_admin_password",
org_admin_user="org_admin_user",
password="new_password",
login="username",
)
uyuni_config.__salt__["uyuni.user_remove_role"].assert_called_once_with(
"username",
role="role2",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__["uyuni.user_add_role"].assert_called_once_with(
"username",
role="role3",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__[
"uyuni.user_remove_assigned_system_groups"
].assert_called_once_with(
login="username",
server_group_names=["group1"],
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__[
"uyuni.user_add_assigned_system_groups"
].assert_called_once_with(
login="username",
server_group_names=["group3"],
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
def test_user_absent_auth_error(self):
exc = Exception("Auth error")
exc.faultCode = 2950
with patch.dict(
uyuni_config.__salt__,
{"uyuni.user_get_details": MagicMock(side_effect=exc)},
):
result = uyuni_config.user_absent(
"username", "org_admin_user", "org_admin_password"
)
assert result is not None
assert result["name"] == "username"
assert result["result"] is False
assert (
result["comment"]
== "Error deleting user (organization credentials error) 'username': Auth error"
)
assert result["changes"] == {}
def test_user_absent_user_not_exits(self):
exc = Exception("User not found")
exc.faultCode = -213
with patch.dict(
uyuni_config.__salt__,
{"uyuni.user_get_details": MagicMock(side_effect=exc)},
):
result = uyuni_config.user_absent(
"username", "org_admin_user", "org_admin_password"
)
assert result is not None
assert result["name"] == "username"
assert result["result"] is True
assert result["comment"] == "username is already absent"
assert result["changes"] == {}
def test_user_absent_generic_error(self):
exc = Exception("generic error")
exc.faultCode = 2951
with patch.dict(
uyuni_config.__salt__,
{"uyuni.user_get_details": MagicMock(side_effect=exc)},
):
with pytest.raises(Exception) as e:
uyuni_config.user_absent(
"username", "org_admin_user", "org_admin_password"
)
assert e.value.faultCode == 2951
assert e.value.args[0] == "generic error"
def test_user_absent_exists_test(self):
current_user = {
"uui": "username",
"email": "mail@mail.com",
"first_name": "first",
"last_name": "last",
}
with patch.dict(
uyuni_config.__salt__,
{"uyuni.user_get_details": MagicMock(return_value=current_user)},
):
with patch.dict(uyuni_config.__opts__, {"test": True}):
result = uyuni_config.user_absent(
"username", "org_admin_user", "org_admin_password"
)
assert result is not None
assert result["name"] == "username"
assert result["result"] is None
assert result["comment"] == "username would be deleted"
assert result["changes"] == {
"login": {"old": "username"},
"email": {"old": "mail@mail.com"},
"first_name": {"old": "first"},
"last_name": {"old": "last"},
}
uyuni_config.__salt__["uyuni.user_get_details"].assert_called_once_with(
"username",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
def test_user_absent_exist_user(self):
current_user = {
"uui": "username",
"email": "mail@mail.com",
"first_name": "first",
"last_name": "last",
}
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.user_get_details": MagicMock(return_value=current_user),
"uyuni.user_delete": MagicMock(return_value=True),
},
):
result = uyuni_config.user_absent(
"username", "org_admin_user", "org_admin_password"
)
assert result is not None
assert result["name"] == "username"
assert result["result"] is True
assert result["comment"] == "User username has been deleted"
assert result["changes"] == {
"login": {"old": "username"},
"email": {"old": "mail@mail.com"},
"first_name": {"old": "first"},
"last_name": {"old": "last"},
}
uyuni_config.__salt__["uyuni.user_get_details"].assert_called_once_with(
"username",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__["uyuni.user_delete"].assert_called_once_with(
"username",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
# pylint: disable-next=missing-class-docstring
class TestManageUserChannels:
def test_user_channels_org_admin(self):
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.user_list_roles": MagicMock(return_value=["channel_admin"]),
"uyuni.channel_list_manageable_channels": MagicMock(),
"uyuni.channel_list_my_channels": MagicMock(),
},
):
result = uyuni_config.user_channels(
"username",
"password",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
assert result is not None
assert result["name"] == "username"
assert not result["result"]
assert result["changes"] == {}
assert "org_admin" in result["comment"]
uyuni_config.__salt__["uyuni.user_list_roles"].assert_called_once_with(
"username", password="password"
)
uyuni_config.__salt__[
"uyuni.channel_list_manageable_channels"
].assert_called_once_with("username", "password")
uyuni_config.__salt__[
"uyuni.channel_list_my_channels"
].assert_called_once_with("username", "password")
def test_user_channels_channel_admin(self):
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.user_list_roles": MagicMock(return_value=["channel_admin"]),
"uyuni.channel_list_manageable_channels": MagicMock(),
"uyuni.channel_list_my_channels": MagicMock(),
},
):
result = uyuni_config.user_channels(
"username",
"password",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
assert result is not None
assert result["name"] == "username"
assert not result["result"]
assert result["changes"] == {}
assert "channel_admin" in result["comment"]
uyuni_config.__salt__["uyuni.user_list_roles"].assert_called_once_with(
"username", password="password"
)
uyuni_config.__salt__[
"uyuni.channel_list_manageable_channels"
].assert_called_once_with("username", "password")
uyuni_config.__salt__[
"uyuni.channel_list_my_channels"
].assert_called_once_with("username", "password")
def test_user_channels_add_all(self):
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.user_list_roles": MagicMock(return_value=[]),
"uyuni.channel_list_manageable_channels": MagicMock(return_value=[]),
"uyuni.channel_list_my_channels": MagicMock(return_value=[]),
"uyuni.channel_software_set_user_manageable": MagicMock(),
"uyuni.channel_software_set_user_subscribable": MagicMock(),
},
):
result = uyuni_config.user_channels(
"username",
"password",
manageable_channels=["manage1"],
subscribable_channels=["subscribe1"],
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
assert result is not None
assert result["name"] == "username"
assert result["result"]
assert result["changes"] == {
"manageable_channels": {"manage1": True},
"subscribable_channels": {"subscribe1": True},
}
uyuni_config.__salt__["uyuni.user_list_roles"].assert_called_once_with(
"username", password="password"
)
uyuni_config.__salt__[
"uyuni.channel_list_manageable_channels"
].assert_called_once_with("username", "password")
uyuni_config.__salt__[
"uyuni.channel_list_my_channels"
].assert_called_once_with("username", "password")
uyuni_config.__salt__[
"uyuni.channel_software_set_user_manageable"
].assert_called_once_with(
"manage1", "username", True, "org_admin_user", "org_admin_password"
)
uyuni_config.__salt__[
"uyuni.channel_software_set_user_subscribable"
].assert_called_once_with(
"subscribe1", "username", True, "org_admin_user", "org_admin_password"
)
def test_user_channels_no_changes(self):
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.user_list_roles": MagicMock(return_value=[]),
"uyuni.channel_list_manageable_channels": MagicMock(
return_value=[{"label": "manage1"}]
),
"uyuni.channel_list_my_channels": MagicMock(
return_value=[{"label": "subscribe1"}]
),
"uyuni.channel_software_set_user_manageable": MagicMock(),
"uyuni.channel_software_set_user_subscribable": MagicMock(),
},
):
result = uyuni_config.user_channels(
"username",
"password",
manageable_channels=["manage1"],
subscribable_channels=["subscribe1"],
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
assert result is not None
assert result["name"] == "username"
assert result["result"]
assert result["changes"] == {}
uyuni_config.__salt__["uyuni.user_list_roles"].assert_called_once_with(
"username", password="password"
)
uyuni_config.__salt__[
"uyuni.channel_list_manageable_channels"
].assert_called_once_with("username", "password")
uyuni_config.__salt__[
"uyuni.channel_list_my_channels"
].assert_called_once_with("username", "password")
def test_user_channels_managed_subscribe_change(self):
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.user_list_roles": MagicMock(return_value=[]),
"uyuni.channel_list_manageable_channels": MagicMock(
return_value=[{"label": "manage1"}]
),
"uyuni.channel_list_my_channels": MagicMock(
return_value=[{"label": "manage1"}]
),
"uyuni.channel_software_set_user_manageable": MagicMock(),
"uyuni.channel_software_set_user_subscribable": MagicMock(),
},
):
result = uyuni_config.user_channels(
"username",
"password",
manageable_channels=[],
subscribable_channels=["manage1"],
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
print(result)
assert result is not None
assert result["name"] == "username"
assert result["result"]
assert result["changes"] == {
"manageable_channels": {"manage1": False},
"subscribable_channels": {"manage1": True},
}
uyuni_config.__salt__["uyuni.user_list_roles"].assert_called_once_with(
"username", password="password"
)
uyuni_config.__salt__[
"uyuni.channel_list_manageable_channels"
].assert_called_once_with("username", "password")
uyuni_config.__salt__[
"uyuni.channel_list_my_channels"
].assert_called_once_with("username", "password")
uyuni_config.__salt__[
"uyuni.channel_software_set_user_manageable"
].assert_called_once_with(
"manage1", "username", False, "org_admin_user", "org_admin_password"
)
uyuni_config.__salt__[
"uyuni.channel_software_set_user_subscribable"
].assert_called_once_with(
"manage1", "username", True, "org_admin_user", "org_admin_password"
)
# pylint: disable-next=missing-class-docstring
class TestManageGroups:
def test_group_present_new_group_test_no_systems(self):
exc = Exception("Group not found")
exc.faultCode = 2201
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.systemgroup_get_details": MagicMock(side_effect=exc),
"uyuni.master_select_minions": MagicMock(),
"uyuni.systems_get_minion_id_map": MagicMock(),
},
):
with patch.dict(uyuni_config.__opts__, {"test": True}):
result = uyuni_config.group_present(
"my_group",
"my group description",
target="*http*",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
assert result is not None
assert result["name"] == "my_group"
assert result["result"] is None
assert result["comment"] == "my_group would be updated"
assert result["changes"] == {
"description": {"new": "my group description"},
"name": {"new": "my_group"},
}
uyuni_config.__salt__[
"uyuni.systemgroup_get_details"
].assert_called_once_with(
"my_group",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__[
"uyuni.master_select_minions"
].assert_called_once_with("*http*", "glob")
uyuni_config.__salt__[
"uyuni.systems_get_minion_id_map"
].assert_called_once_with("org_admin_user", "org_admin_password")
def test_group_present_new_group_test(self):
exc = Exception("Group not found")
exc.faultCode = 2201
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.systemgroup_get_details": MagicMock(side_effect=exc),
"uyuni.master_select_minions": MagicMock(
return_value={"minions": ["my_minion_1", "my_minion_2"]}
),
"uyuni.systems_get_minion_id_map": MagicMock(
return_value={"my_minion_1": "10001"}
),
},
):
with patch.dict(uyuni_config.__opts__, {"test": True}):
result = uyuni_config.group_present(
"my_group",
"my group description",
target="*http*",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
assert result is not None
assert result["name"] == "my_group"
assert result["result"] is None
assert result["comment"] == "my_group would be updated"
assert result["changes"] == {
"description": {"new": "my group description"},
"systems": {"new": ["10001"]},
"name": {"new": "my_group"},
}
uyuni_config.__salt__[
"uyuni.systemgroup_get_details"
].assert_called_once_with(
"my_group",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__[
"uyuni.master_select_minions"
].assert_called_once_with("*http*", "glob")
uyuni_config.__salt__[
"uyuni.systems_get_minion_id_map"
].assert_called_once_with("org_admin_user", "org_admin_password")
def test_group_present_new_group(self):
exc = Exception("Group not found")
exc.faultCode = 2201
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.systemgroup_get_details": MagicMock(side_effect=exc),
"uyuni.master_select_minions": MagicMock(
return_value={"minions": ["my_minion_1", "my_minion_2"]}
),
"uyuni.systems_get_minion_id_map": MagicMock(
return_value={"my_minion_1": "10001"}
),
"uyuni.systemgroup_create": MagicMock(),
"uyuni.systemgroup_add_remove_systems": MagicMock(),
},
):
result = uyuni_config.group_present(
"my_group",
"my group description",
target="*http*",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
assert result is not None
assert result["name"] == "my_group"
assert result["result"] is True
assert result["comment"] == "my_group successfully updated"
assert result["changes"] == {
"description": {"new": "my group description"},
"systems": {"new": ["10001"]},
"name": {"new": "my_group"},
}
uyuni_config.__salt__[
"uyuni.systemgroup_get_details"
].assert_called_once_with(
"my_group",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__[
"uyuni.master_select_minions"
].assert_called_once_with("*http*", "glob")
uyuni_config.__salt__[
"uyuni.systems_get_minion_id_map"
].assert_called_once_with("org_admin_user", "org_admin_password")
uyuni_config.__salt__["uyuni.systemgroup_create"].assert_called_once_with(
"my_group",
"my group description",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__[
"uyuni.systemgroup_add_remove_systems"
].assert_called_once_with(
"my_group",
True,
["10001"],
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
def test_group_present_update_group(self):
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.systemgroup_get_details": MagicMock(
return_value={"description": "old description", "name": "my_group"}
),
"uyuni.systemgroup_list_systems": MagicMock(
return_value=[{"id": "10001"}, {"id": "10003"}]
),
"uyuni.master_select_minions": MagicMock(
return_value={
"minions": ["my_minion_1", "my_minion_2", "my_minion_4"]
}
),
"uyuni.systems_get_minion_id_map": MagicMock(
return_value={"my_minion_1": "10001", "my_minion_2": "10002"}
),
"uyuni.systemgroup_update": MagicMock(),
"uyuni.systemgroup_add_remove_systems": MagicMock(),
},
):
result = uyuni_config.group_present(
"my_group",
"my group description",
target="*http*",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
assert result is not None
assert result["name"] == "my_group"
assert result["result"]
assert result["comment"] == "my_group successfully updated"
assert result["changes"] == {
"description": {
"new": "my group description",
"old": "old description",
},
"systems": {"new": ["10001", "10002"], "old": ["10001", "10003"]},
}
uyuni_config.__salt__[
"uyuni.systemgroup_get_details"
].assert_called_once_with(
"my_group",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__[
"uyuni.systemgroup_list_systems"
].assert_called_once_with(
"my_group",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__[
"uyuni.master_select_minions"
].assert_called_once_with("*http*", "glob")
uyuni_config.__salt__[
"uyuni.systems_get_minion_id_map"
].assert_called_once_with("org_admin_user", "org_admin_password")
uyuni_config.__salt__["uyuni.systemgroup_update"].assert_called_once_with(
"my_group",
"my group description",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__[
"uyuni.systemgroup_add_remove_systems"
].assert_has_calls(
[
call(
"my_group",
False,
["10003"],
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
),
call(
"my_group",
True,
["10002"],
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
),
]
)
def test_group_absent_success_test(self):
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.systemgroup_get_details": MagicMock(
return_value={"description": "description", "name": "my_group"}
)
},
):
with patch.dict(uyuni_config.__opts__, {"test": True}):
result = uyuni_config.group_absent(
"my_group",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
assert result is not None
assert result["name"] == "my_group"
assert result["result"] is None
assert result["comment"] == "my_group would be removed"
assert result["changes"] == {}
uyuni_config.__salt__[
"uyuni.systemgroup_get_details"
].assert_called_once_with(
"my_group",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
def test_group_absent_success(self):
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.systemgroup_get_details": MagicMock(
return_value={"description": "description", "name": "my_group"}
),
"uyuni.systemgroup_delete": MagicMock(return_value=True),
},
):
result = uyuni_config.group_absent(
"my_group",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
assert result is not None
assert result["name"] == "my_group"
assert result["result"]
assert result["comment"] == "Group my_group has been deleted"
assert result["changes"] == {
"description": {"old": "description"},
"name": {"old": "my_group"},
}
uyuni_config.__salt__[
"uyuni.systemgroup_get_details"
].assert_called_once_with(
"my_group",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__["uyuni.systemgroup_delete"].assert_called_once_with(
"my_group",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
def test_group_absent_already_removed(self):
exc = Exception("Group not found")
exc.faultCode = 2201
with patch.dict(
uyuni_config.__salt__,
{"uyuni.systemgroup_get_details": MagicMock(side_effect=exc)},
):
result = uyuni_config.group_absent(
"my_group",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
assert result is not None
assert result["name"] == "my_group"
assert result["result"]
assert result["comment"] == "my_group is already absent"
assert result["changes"] == {}
uyuni_config.__salt__[
"uyuni.systemgroup_get_details"
].assert_called_once_with(
"my_group",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
# pylint: disable-next=missing-class-docstring
class TestManageOrgs:
def test_org_present_new_org_test(self):
exc = Exception("org not found")
exc.faultCode = 2850
with patch.dict(
uyuni_config.__salt__, {"uyuni.org_get_details": MagicMock(side_effect=exc)}
):
with patch.dict(uyuni_config.__opts__, {"test": True}):
result = uyuni_config.org_present(
"my_org",
"org_admin_user",
"org_admin_password",
"First Name",
"Last Name",
"email@email.com",
admin_user="admin_user",
admin_password="admin_password",
)
assert result is not None
assert result["name"] == "my_org"
assert result["result"] is None
assert result["comment"] == "my_org would be updated"
assert result["changes"] == {
"email": {"new": "email@email.com"},
"first_name": {"new": "First Name"},
"last_name": {"new": "Last Name"},
"org_admin_user": {"new": "org_admin_user"},
"org_name": {"new": "my_org"},
"pam": {"new": False},
}
uyuni_config.__salt__["uyuni.org_get_details"].assert_called_once_with(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
def test_org_present_new_org(self):
exc = Exception("org not found")
exc.faultCode = 2850
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.org_get_details": MagicMock(side_effect=exc),
"uyuni.org_create": MagicMock(),
},
):
result = uyuni_config.org_present(
"my_org",
"org_admin_user",
"org_admin_password",
"First Name",
"Last Name",
"email@email.com",
admin_user="admin_user",
admin_password="admin_password",
)
assert result is not None
assert result["name"] == "my_org"
assert result["result"]
assert result["comment"] == "my_org org successfully modified"
assert result["changes"] == {
"email": {"new": "email@email.com"},
"first_name": {"new": "First Name"},
"last_name": {"new": "Last Name"},
"org_admin_user": {"new": "org_admin_user"},
"org_name": {"new": "my_org"},
"pam": {"new": False},
}
uyuni_config.__salt__["uyuni.org_get_details"].assert_called_once_with(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
uyuni_config.__salt__["uyuni.org_create"].assert_called_once_with(
name="my_org",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
first_name="First Name",
last_name="Last Name",
email="email@email.com",
admin_user="admin_user",
admin_password="admin_password",
pam=False,
)
def test_org_present_update_org(self):
current_user = {
"uui": "org_admin_user",
"email": "old_mail@mail.com",
"first_name": "first",
"last_name": "last",
}
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.org_get_details": MagicMock(
return_value={"id": 100, "name": "my_org"}
),
"uyuni.user_get_details": MagicMock(return_value=current_user),
"uyuni.user_set_details": MagicMock(),
},
):
result = uyuni_config.org_present(
"my_org",
"org_admin_user",
"org_admin_password",
"First Name",
"Last Name",
"email@email.com",
admin_user="admin_user",
admin_password="admin_password",
)
assert result is not None
assert result["name"] == "my_org"
assert result["result"]
assert result["comment"] == "my_org org successfully modified"
assert result["changes"] == {
"email": {"new": "email@email.com", "old": "old_mail@mail.com"},
"first_name": {"new": "First Name", "old": "first"},
"last_name": {"new": "Last Name", "old": "last"},
}
uyuni_config.__salt__["uyuni.org_get_details"].assert_called_once_with(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
uyuni_config.__salt__["uyuni.user_get_details"].assert_called_once_with(
"org_admin_user",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__["uyuni.user_set_details"].assert_called_once_with(
login="org_admin_user",
password="org_admin_password",
email="email@email.com",
first_name="First Name",
last_name="Last Name",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
def test_org_present_no_changes(self):
current_user = {
"uui": "org_admin_user",
"email": "email@email.com",
"first_name": "First Name",
"last_name": "Last Name",
}
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.org_get_details": MagicMock(
return_value={"id": 100, "name": "my_org"}
),
"uyuni.user_get_details": MagicMock(return_value=current_user),
"uyuni.user_set_details": MagicMock(),
},
):
result = uyuni_config.org_present(
"my_org",
"org_admin_user",
"org_admin_password",
"First Name",
"Last Name",
"email@email.com",
admin_user="admin_user",
admin_password="admin_password",
)
assert result is not None
assert result["name"] == "my_org"
assert result["result"]
assert result["comment"] == "my_org is already in the desired state"
assert result["changes"] == {}
uyuni_config.__salt__["uyuni.org_get_details"].assert_called_once_with(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
uyuni_config.__salt__["uyuni.user_get_details"].assert_called_once_with(
"org_admin_user",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
def test_org_absent_success_test(self):
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.org_get_details": MagicMock(
return_value={"id": 100, "name": "my_org"}
)
},
):
with patch.dict(uyuni_config.__opts__, {"test": True}):
result = uyuni_config.org_absent(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
assert result is not None
assert result["name"] == "my_org"
assert result["result"] is None
assert result["comment"] == "my_org would be removed"
assert result["changes"] == {}
uyuni_config.__salt__["uyuni.org_get_details"].assert_called_once_with(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
def test_org_absent_success(self):
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.org_get_details": MagicMock(
return_value={"id": 100, "name": "my_org"}
),
"uyuni.org_delete": MagicMock(),
},
):
result = uyuni_config.org_absent(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
assert result is not None
assert result["name"] == "my_org"
assert result["result"]
assert result["comment"] == "Org my_org has been deleted"
assert result["changes"] == {"name": {"old": "my_org"}}
uyuni_config.__salt__["uyuni.org_get_details"].assert_called_once_with(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
uyuni_config.__salt__["uyuni.org_delete"].assert_called_once_with(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
def test_org_absent_not_present(self):
exc = Exception("org not found")
exc.faultCode = 2850
with patch.dict(
uyuni_config.__salt__, {"uyuni.org_get_details": MagicMock(side_effect=exc)}
):
result = uyuni_config.org_absent(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
assert result is not None
assert result["name"] == "my_org"
assert result["result"]
assert result["comment"] == "my_org is already absent"
assert result["changes"] == {}
uyuni_config.__salt__["uyuni.org_get_details"].assert_called_once_with(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
# pylint: disable-next=missing-class-docstring
class TestManageOrgsTrust:
def test_org_trust_test(self):
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.org_trust_list_trusts": MagicMock(
return_value=[
{"orgId": 2, "orgName": "new_org_1", "trustEnabled": True},
{"orgId": 3, "orgName": "new_org_2", "trustEnabled": False},
]
),
"uyuni.org_get_details": MagicMock(
return_value={"id": 1, "name": "my_org"}
),
},
):
with patch.dict(uyuni_config.__opts__, {"test": True}):
result = uyuni_config.org_trust(
"state_name",
"my_org",
["new_org_1", "new_org_2"],
admin_user="admin_user",
admin_password="admin_password",
)
assert result is not None
assert result["name"] == "state_name"
assert result["result"] is None
assert result["comment"] == "my_org would be created"
assert result["changes"] == {"new_org_2": {"new": True, "old": None}}
uyuni_config.__salt__[
"uyuni.org_trust_list_trusts"
].assert_called_once_with(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
uyuni_config.__salt__["uyuni.org_get_details"].assert_called_once_with(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
def test_org_trust_update(self):
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.org_trust_list_trusts": MagicMock(
return_value=[
{"orgId": 2, "orgName": "new_org_1", "trustEnabled": True},
{"orgId": 3, "orgName": "new_org_2", "trustEnabled": False},
{"orgId": 4, "orgName": "new_org_3", "trustEnabled": True},
]
),
"uyuni.org_get_details": MagicMock(
return_value={"id": 1, "name": "my_org"}
),
"uyuni.org_trust_add_trust": MagicMock(return_value=True),
"uyuni.org_trust_remove_trust": MagicMock(return_value=True),
},
):
result = uyuni_config.org_trust(
"state_name",
"my_org",
["new_org_1", "new_org_2"],
admin_user="admin_user",
admin_password="admin_password",
)
assert result is not None
assert result["name"] == "state_name"
assert result["result"]
assert result["comment"] == "Org 'my_org' trusts successfully modified"
assert result["changes"] == {
"new_org_2": {"new": True, "old": None},
"new_org_3": {"new": None, "old": True},
}
uyuni_config.__salt__[
"uyuni.org_trust_list_trusts"
].assert_called_once_with(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
uyuni_config.__salt__["uyuni.org_get_details"].assert_called_once_with(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
uyuni_config.__salt__["uyuni.org_trust_add_trust"].assert_called_once_with(
1, 3, admin_user="admin_user", admin_password="admin_password"
)
uyuni_config.__salt__[
"uyuni.org_trust_remove_trust"
].assert_called_once_with(
1, 4, admin_user="admin_user", admin_password="admin_password"
)
def test_org_trust_no_changes(self):
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.org_trust_list_trusts": MagicMock(
return_value=[
{"orgId": 2, "orgName": "new_org_1", "trustEnabled": True},
{"orgId": 3, "orgName": "new_org_2", "trustEnabled": True},
{"orgId": 4, "orgName": "new_org_3", "trustEnabled": False},
]
),
"uyuni.org_get_details": MagicMock(
return_value={"id": 1, "name": "my_org"}
),
},
):
result = uyuni_config.org_trust(
"state_name",
"my_org",
["new_org_1", "new_org_2"],
admin_user="admin_user",
admin_password="admin_password",
)
assert result is not None
assert result["name"] == "state_name"
assert result["result"]
assert result["comment"] == "my_org is already in the desired state"
assert result["changes"] == {}
uyuni_config.__salt__[
"uyuni.org_trust_list_trusts"
].assert_called_once_with(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
uyuni_config.__salt__["uyuni.org_get_details"].assert_called_once_with(
"my_org", admin_user="admin_user", admin_password="admin_password"
)
# pylint: disable-next=missing-class-docstring
class TestUyuniActivationKeys:
MINIMAL_AK_PRESENT = {
"name": "ak",
"description": "ak description",
"org_admin_user": "admin",
"org_admin_password": "admin",
}
FULL_AK_PRESENT = {
**MINIMAL_AK_PRESENT,
"base_channel": "sles15SP2",
"usage_limit": 10,
"contact_method": "ssh-push",
"system_types": ["virtualization_host"],
"universal_default": True,
"child_channels": ["sles15SP2-tools"],
"configuration_channels": ["my-channel"],
"packages": [{"name": "vim"}, {"name": "emacs", "arch": "x86_64"}],
"server_groups": ["my-group"],
"configure_after_registration": True,
}
ORG_USER_DETAILS = {"org_id": 1}
ALL_GROUPS = [{"name": "my-group", "id": 1}, {"name": "old_group", "id": 2}]
def test_ak_present_create_minimal_data(self):
exc = Exception("ak not found")
exc.faultCode = -212
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.systemgroup_list_all_groups": MagicMock(
return_value=self.ALL_GROUPS
),
"uyuni.user_get_details": MagicMock(return_value=self.ORG_USER_DETAILS),
"uyuni.activation_key_get_details": MagicMock(side_effect=exc),
"uyuni.activation_key_create": MagicMock(),
"uyuni.activation_key_set_details": MagicMock(),
},
):
result = uyuni_config.activation_key_present(**self.MINIMAL_AK_PRESENT)
assert result is not None
assert result["name"] == "1-ak"
assert result["result"]
assert result["comment"] == "1-ak activation key successfully modified"
assert result["changes"] == {
"description": {"new": "ak description"},
"base_channel": {"new": ""},
"usage_limit": {"new": 0},
"universal_default": {"new": False},
"contact_method": {"new": "default"},
"configure_after_registration": {"new": False},
"key": {"new": "1-ak"},
}
uyuni_config.__salt__[
"uyuni.systemgroup_list_all_groups"
].assert_called_once_with("admin", "admin")
uyuni_config.__salt__["uyuni.user_get_details"].assert_called_once_with(
"admin", "admin"
)
uyuni_config.__salt__[
"uyuni.activation_key_get_details"
].assert_called_once_with(
"1-ak", org_admin_user="admin", org_admin_password="admin"
)
call_values = {
"description": self.MINIMAL_AK_PRESENT["description"],
"key": self.MINIMAL_AK_PRESENT["name"],
"base_channel_label": "",
"usage_limit": 0,
"system_types": [],
"universal_default": False,
"org_admin_user": self.MINIMAL_AK_PRESENT["org_admin_user"],
"org_admin_password": self.MINIMAL_AK_PRESENT["org_admin_password"],
}
uyuni_config.__salt__[
"uyuni.activation_key_create"
].assert_called_once_with(**call_values)
uyuni_config.__salt__[
"uyuni.activation_key_set_details"
].assert_called_once_with(
"1-ak",
contact_method="default",
usage_limit=0,
org_admin_user="admin",
org_admin_password="admin",
)
def test_ak_present_create_full_data(self):
exc = Exception("ak not found")
exc.faultCode = -212
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.systemgroup_list_all_groups": MagicMock(
return_value=self.ALL_GROUPS
),
"uyuni.user_get_details": MagicMock(return_value=self.ORG_USER_DETAILS),
"uyuni.activation_key_get_details": MagicMock(side_effect=exc),
"uyuni.activation_key_create": MagicMock(),
"uyuni.activation_key_set_details": MagicMock(),
"uyuni.activation_key_add_child_channels": MagicMock(),
"uyuni.activation_key_add_server_groups": MagicMock(),
"uyuni.activation_key_add_packages": MagicMock(),
"uyuni.activation_key_enable_config_deployment": MagicMock(),
"uyuni.activation_key_set_config_channels": MagicMock(),
},
):
result = uyuni_config.activation_key_present(**self.FULL_AK_PRESENT)
assert result is not None
assert result["name"] == "1-ak"
assert result["result"]
assert result["comment"] == "1-ak activation key successfully modified"
assert result["changes"] == {
"description": {"new": "ak description"},
"base_channel": {"new": "sles15SP2"},
"usage_limit": {"new": 10},
"universal_default": {"new": True},
"contact_method": {"new": "ssh-push"},
"system_types": {"new": ["virtualization_host"]},
"child_channels": {"new": ["sles15SP2-tools"]},
"server_groups": {"new": ["my-group"]},
"packages": {
"new": [{"name": "vim"}, {"name": "emacs", "arch": "x86_64"}]
},
"configure_after_registration": {"new": True},
"configuration_channels": {"new": ["my-channel"]},
"key": {"new": "1-ak"},
}
uyuni_config.__salt__[
"uyuni.systemgroup_list_all_groups"
].assert_called_once_with("admin", "admin")
uyuni_config.__salt__["uyuni.user_get_details"].assert_called_once_with(
"admin", "admin"
)
uyuni_config.__salt__[
"uyuni.activation_key_get_details"
].assert_called_once_with(
"1-ak", org_admin_user="admin", org_admin_password="admin"
)
uyuni_config.__salt__[
"uyuni.activation_key_create"
].assert_called_once_with(
description=self.FULL_AK_PRESENT["description"],
key=self.FULL_AK_PRESENT["name"],
base_channel_label=self.FULL_AK_PRESENT["base_channel"],
usage_limit=self.FULL_AK_PRESENT["usage_limit"],
system_types=self.FULL_AK_PRESENT["system_types"],
universal_default=self.FULL_AK_PRESENT["universal_default"],
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
uyuni_config.__salt__[
"uyuni.activation_key_set_details"
].assert_called_once_with(
"1-ak",
contact_method=self.FULL_AK_PRESENT["contact_method"],
usage_limit=self.FULL_AK_PRESENT["usage_limit"],
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
uyuni_config.__salt__[
"uyuni.activation_key_add_child_channels"
].assert_called_once_with(
"1-ak",
self.FULL_AK_PRESENT["child_channels"],
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
uyuni_config.__salt__[
"uyuni.activation_key_add_server_groups"
].assert_called_once_with(
"1-ak",
[1],
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
uyuni_config.__salt__[
"uyuni.activation_key_add_packages"
].assert_called_once_with(
"1-ak",
self.FULL_AK_PRESENT["packages"],
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
uyuni_config.__salt__[
"uyuni.activation_key_enable_config_deployment"
].assert_called_once_with(
"1-ak",
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
uyuni_config.__salt__[
"uyuni.activation_key_set_config_channels"
].assert_called_once_with(
["1-ak"],
config_channel_label=self.FULL_AK_PRESENT["configuration_channels"],
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
def test_ak_present_create_full_data_test(self):
exc = Exception("ak not found")
exc.faultCode = -212
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.systemgroup_list_all_groups": MagicMock(
return_value=self.ALL_GROUPS
),
"uyuni.user_get_details": MagicMock(return_value=self.ORG_USER_DETAILS),
"uyuni.activation_key_get_details": MagicMock(side_effect=exc),
},
):
with patch.dict(uyuni_config.__opts__, {"test": True}):
result = uyuni_config.activation_key_present(**self.FULL_AK_PRESENT)
assert result is not None
assert result["name"] == "1-ak"
assert result["result"] is None
assert result["comment"] == "1-ak would be updated"
assert result["changes"] == {
"description": {"new": "ak description"},
"base_channel": {"new": "sles15SP2"},
"usage_limit": {"new": 10},
"universal_default": {"new": True},
"contact_method": {"new": "ssh-push"},
"system_types": {"new": ["virtualization_host"]},
"child_channels": {"new": ["sles15SP2-tools"]},
"server_groups": {"new": ["my-group"]},
"packages": {
"new": [{"name": "vim"}, {"name": "emacs", "arch": "x86_64"}]
},
"configure_after_registration": {"new": True},
"configuration_channels": {"new": ["my-channel"]},
"key": {"new": "1-ak"},
}
uyuni_config.__salt__[
"uyuni.systemgroup_list_all_groups"
].assert_called_once_with("admin", "admin")
uyuni_config.__salt__["uyuni.user_get_details"].assert_called_once_with(
"admin", "admin"
)
uyuni_config.__salt__[
"uyuni.activation_key_get_details"
].assert_called_once_with(
"1-ak", org_admin_user="admin", org_admin_password="admin"
)
def test_ak_present_update_minimal_data(self):
return_ak = {
"description": "old description",
"base_channel_label": "none",
"usage_limit": 0,
"universal_default": False,
"contact_method": "default",
"entitlements": [],
"child_channel_labels": [],
"server_group_ids": [],
"packages": [],
}
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.systemgroup_list_all_groups": MagicMock(
return_value=self.ALL_GROUPS
),
"uyuni.user_get_details": MagicMock(return_value=self.ORG_USER_DETAILS),
"uyuni.activation_key_get_details": MagicMock(return_value=return_ak),
"uyuni.activation_key_check_config_deployment": MagicMock(
return_value=False
),
"uyuni.activation_key_list_config_channels": MagicMock(return_value=[]),
"uyuni.activation_key_set_details": MagicMock(),
},
):
result = uyuni_config.activation_key_present(**self.MINIMAL_AK_PRESENT)
assert result is not None
assert result["name"] == "1-ak"
assert result["result"]
assert result["comment"] == "1-ak activation key successfully modified"
assert result["changes"] == {
"description": {"new": "ak description", "old": "old description"}
}
uyuni_config.__salt__[
"uyuni.systemgroup_list_all_groups"
].assert_called_once_with("admin", "admin")
uyuni_config.__salt__["uyuni.user_get_details"].assert_called_once_with(
"admin", "admin"
)
uyuni_config.__salt__[
"uyuni.activation_key_get_details"
].assert_called_once_with(
"1-ak", org_admin_user="admin", org_admin_password="admin"
)
uyuni_config.__salt__[
"uyuni.activation_key_check_config_deployment"
].assert_called_once_with("1-ak", "admin", "admin")
uyuni_config.__salt__[
"uyuni.activation_key_list_config_channels"
].assert_called_once_with("1-ak", "admin", "admin")
uyuni_config.__salt__[
"uyuni.activation_key_set_details"
].assert_called_once_with(
"1-ak",
description=self.MINIMAL_AK_PRESENT["description"],
contact_method="default",
base_channel_label="",
usage_limit=0,
universal_default=False,
org_admin_user=self.MINIMAL_AK_PRESENT["org_admin_user"],
org_admin_password=self.MINIMAL_AK_PRESENT["org_admin_password"],
)
def test_ak_present_no_changes_minimal_data(self):
return_ak = {
"description": self.MINIMAL_AK_PRESENT["description"],
"base_channel_label": "none",
"usage_limit": 0,
"universal_default": False,
"contact_method": "default",
"entitlements": [],
"child_channel_labels": [],
"server_group_ids": [],
"packages": [],
}
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.systemgroup_list_all_groups": MagicMock(
return_value=self.ALL_GROUPS
),
"uyuni.user_get_details": MagicMock(return_value=self.ORG_USER_DETAILS),
"uyuni.activation_key_get_details": MagicMock(return_value=return_ak),
"uyuni.activation_key_check_config_deployment": MagicMock(
return_value=False
),
"uyuni.activation_key_list_config_channels": MagicMock(return_value=[]),
"uyuni.activation_key_set_details": MagicMock(),
},
):
result = uyuni_config.activation_key_present(**self.MINIMAL_AK_PRESENT)
assert result is not None
assert result["name"] == "1-ak"
assert result["result"]
assert result["comment"] == "1-ak is already in the desired state"
assert result["changes"] == {}
def test_ak_present_update_full_data(self):
return_ak = {
"description": "old description",
"base_channel_label": "base_channel",
"usage_limit": 0,
"universal_default": False,
"contact_method": "default",
"entitlements": ["container_build_host"],
"child_channel_labels": ["child_channel"],
"server_group_ids": [2],
"packages": [{"name": "pkg", "arch": "x86_63"}],
}
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.systemgroup_list_all_groups": MagicMock(
return_value=self.ALL_GROUPS
),
"uyuni.user_get_details": MagicMock(return_value=self.ORG_USER_DETAILS),
"uyuni.activation_key_get_details": MagicMock(return_value=return_ak),
"uyuni.activation_key_check_config_deployment": MagicMock(
return_value=False
),
"uyuni.activation_key_list_config_channels": MagicMock(
return_value=[{"label": "old_config"}]
),
"uyuni.activation_key_set_details": MagicMock(),
"uyuni.activation_key_add_entitlements": MagicMock(),
"uyuni.activation_key_remove_entitlements": MagicMock(),
"uyuni.activation_key_add_child_channels": MagicMock(),
"uyuni.activation_key_remove_child_channels": MagicMock(),
"uyuni.activation_key_add_server_groups": MagicMock(),
"uyuni.activation_key_remove_server_groups": MagicMock(),
"uyuni.activation_key_add_packages": MagicMock(),
"uyuni.activation_key_remove_packages": MagicMock(),
"uyuni.activation_key_enable_config_deployment": MagicMock(),
"uyuni.activation_key_set_config_channels": MagicMock(),
},
):
result = uyuni_config.activation_key_present(**self.FULL_AK_PRESENT)
assert result is not None
assert result["name"] == "1-ak"
assert result["result"]
assert result["comment"] == "1-ak activation key successfully modified"
assert result["changes"] == {
"description": {"new": "ak description", "old": "old description"},
"base_channel": {"new": "sles15SP2", "old": "base_channel"},
"usage_limit": {"new": 10, "old": 0},
"universal_default": {"new": True, "old": False},
"contact_method": {"new": "ssh-push", "old": "default"},
"system_types": {
"new": ["virtualization_host"],
"old": ["container_build_host"],
},
"child_channels": {
"new": ["sles15SP2-tools"],
"old": ["child_channel"],
},
"server_groups": {"new": ["my-group"], "old": ["old_group"]},
"packages": {
"new": [{"name": "vim"}, {"name": "emacs", "arch": "x86_64"}],
"old": [{"name": "pkg", "arch": "x86_63"}],
},
"configure_after_registration": {"new": True, "old": False},
"configuration_channels": {
"new": ["my-channel"],
"old": ["old_config"],
},
}
uyuni_config.__salt__[
"uyuni.systemgroup_list_all_groups"
].assert_called_once_with("admin", "admin")
uyuni_config.__salt__["uyuni.user_get_details"].assert_called_once_with(
"admin", "admin"
)
uyuni_config.__salt__[
"uyuni.activation_key_get_details"
].assert_called_once_with(
"1-ak", org_admin_user="admin", org_admin_password="admin"
)
uyuni_config.__salt__[
"uyuni.activation_key_check_config_deployment"
].assert_called_once_with("1-ak", "admin", "admin")
uyuni_config.__salt__[
"uyuni.activation_key_list_config_channels"
].assert_called_once_with("1-ak", "admin", "admin")
uyuni_config.__salt__[
"uyuni.activation_key_set_details"
].assert_called_once_with(
"1-ak",
description=self.FULL_AK_PRESENT["description"],
contact_method=self.FULL_AK_PRESENT["contact_method"],
base_channel_label=self.FULL_AK_PRESENT["base_channel"],
usage_limit=self.FULL_AK_PRESENT["usage_limit"],
universal_default=self.FULL_AK_PRESENT["universal_default"],
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
uyuni_config.__salt__[
"uyuni.activation_key_add_entitlements"
].assert_called_once_with(
"1-ak",
self.FULL_AK_PRESENT["system_types"],
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
uyuni_config.__salt__[
"uyuni.activation_key_remove_entitlements"
].assert_called_once_with(
"1-ak",
["container_build_host"],
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
uyuni_config.__salt__[
"uyuni.activation_key_add_child_channels"
].assert_called_once_with(
"1-ak",
self.FULL_AK_PRESENT["child_channels"],
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
uyuni_config.__salt__[
"uyuni.activation_key_remove_child_channels"
].assert_called_once_with(
"1-ak",
["child_channel"],
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
uyuni_config.__salt__[
"uyuni.activation_key_add_server_groups"
].assert_called_once_with(
"1-ak",
[1],
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
uyuni_config.__salt__[
"uyuni.activation_key_remove_server_groups"
].assert_called_once_with(
"1-ak",
[2],
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
uyuni_config.__salt__[
"uyuni.activation_key_add_packages"
].assert_called_once_with(
"1-ak",
self.FULL_AK_PRESENT["packages"],
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
uyuni_config.__salt__[
"uyuni.activation_key_remove_packages"
].assert_called_once_with(
"1-ak",
[{"name": "pkg", "arch": "x86_63"}],
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
uyuni_config.__salt__[
"uyuni.activation_key_enable_config_deployment"
].assert_called_once_with(
"1-ak",
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
uyuni_config.__salt__[
"uyuni.activation_key_set_config_channels"
].assert_called_once_with(
["1-ak"],
config_channel_label=self.FULL_AK_PRESENT["configuration_channels"],
org_admin_user=self.FULL_AK_PRESENT["org_admin_user"],
org_admin_password=self.FULL_AK_PRESENT["org_admin_password"],
)
def test_ak_present_no_changes_full_data(self):
return_ak = {
"description": self.FULL_AK_PRESENT["description"],
"base_channel_label": self.FULL_AK_PRESENT["base_channel"],
"usage_limit": self.FULL_AK_PRESENT["usage_limit"],
"universal_default": self.FULL_AK_PRESENT["universal_default"],
"contact_method": self.FULL_AK_PRESENT["contact_method"],
"entitlements": self.FULL_AK_PRESENT["system_types"],
"child_channel_labels": self.FULL_AK_PRESENT["child_channels"],
"server_group_ids": [1],
"packages": self.FULL_AK_PRESENT["packages"],
}
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.systemgroup_list_all_groups": MagicMock(
return_value=self.ALL_GROUPS
),
"uyuni.user_get_details": MagicMock(return_value=self.ORG_USER_DETAILS),
"uyuni.activation_key_get_details": MagicMock(return_value=return_ak),
"uyuni.activation_key_check_config_deployment": MagicMock(
return_value=True
),
"uyuni.activation_key_list_config_channels": MagicMock(
return_value=[{"label": "my-channel"}]
),
},
):
result = uyuni_config.activation_key_present(**self.FULL_AK_PRESENT)
assert result is not None
assert result["name"] == "1-ak"
assert result["result"]
assert result["comment"] == "1-ak is already in the desired state"
assert result["changes"] == {}
uyuni_config.__salt__[
"uyuni.systemgroup_list_all_groups"
].assert_called_once_with("admin", "admin")
uyuni_config.__salt__["uyuni.user_get_details"].assert_called_once_with(
"admin", "admin"
)
uyuni_config.__salt__[
"uyuni.activation_key_get_details"
].assert_called_once_with(
"1-ak", org_admin_user="admin", org_admin_password="admin"
)
uyuni_config.__salt__[
"uyuni.activation_key_check_config_deployment"
].assert_called_once_with("1-ak", "admin", "admin")
uyuni_config.__salt__[
"uyuni.activation_key_list_config_channels"
].assert_called_once_with("1-ak", "admin", "admin")
def test_ak_absent_not_present(self):
exc = Exception("ak not found")
exc.faultCode = -212
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.user_get_details": MagicMock(return_value=self.ORG_USER_DETAILS),
"uyuni.activation_key_get_details": MagicMock(side_effect=exc),
},
):
result = uyuni_config.activation_key_absent(
"ak",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
assert result is not None
assert result["name"] == "ak"
assert result["result"]
assert result["comment"] == "1-ak is already absent"
assert result["changes"] == {}
uyuni_config.__salt__["uyuni.user_get_details"].assert_called_once_with(
"org_admin_user", "org_admin_password"
)
uyuni_config.__salt__[
"uyuni.activation_key_get_details"
].assert_called_once_with(
"1-ak",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
def test_ak_absent_present(self):
with patch.dict(
uyuni_config.__salt__,
{
"uyuni.user_get_details": MagicMock(return_value=self.ORG_USER_DETAILS),
"uyuni.activation_key_get_details": MagicMock(return_value={}),
"uyuni.activation_key_delete": MagicMock(),
},
):
result = uyuni_config.activation_key_absent(
"ak",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
assert result is not None
assert result["name"] == "ak"
assert result["result"]
assert result["comment"] == "Activation Key 1-ak has been deleted"
assert result["changes"] == {"id": {"old": "1-ak"}}
uyuni_config.__salt__["uyuni.user_get_details"].assert_called_once_with(
"org_admin_user", "org_admin_password"
)
uyuni_config.__salt__[
"uyuni.activation_key_get_details"
].assert_called_once_with(
"1-ak",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
uyuni_config.__salt__[
"uyuni.activation_key_delete"
].assert_called_once_with(
"1-ak",
org_admin_user="org_admin_user",
org_admin_password="org_admin_password",
)
07070100000101000081B400000000000000000000000168EFD66400000043000000000000000000000000000000000000002A00000000susemanager-sls/susemanager-sls-rpmlintrcaddFilter("non-executable-script .*/usr/share/susemanager/salt.*")
07070100000102000081B400000000000000000000000168EFD6640000E529000000000000000000000000000000000000002800000000susemanager-sls/susemanager-sls.changes-------------------------------------------------------------------
Wed Jul 30 13:37:08 CEST 2025 - michael.calmer@suse.com
- version 5.1.12-0
* Fix using 3rd party intermediate certificates
(bsc#1247083)
-------------------------------------------------------------------
Tue Jun 17 19:01:59 CEST 2025 - marina.latini@suse.com
- version 5.1.11-0
* Read CWD for remote commands from pillar (bsc#1238173)
* For GPG keys using file URL, check if the file exists before
importing it
* mgr-events: use sha256 instead of md5 to determine queue
* Fix openEuler bootstrap to make it identified as RedHat family
* Optimize sap module to prevent high IO workload (bsc#1241455)
* Provide token through a query parameter instead of relying on a
plugin for DNF version supporting it (bsc#1241307)
* Define Apache-2.0 as the license for Salt modules
* Rename references to old SUSE Manager name
-------------------------------------------------------------------
Mon May 12 23:06:47 CEST 2025 - marina.latini@suse.com
- version 5.1.10-0
* Replace server hostname in chained proxy config (bsc#1236166)
* Move Prometheus PostgreSQL exporter configuration to
the persisted volume /etc/sysconfig (bsc#1239903)
-------------------------------------------------------------------
Fri Apr 11 17:28:26 CEST 2025 - marina.latini@suse.com
- version 5.1.9-0
* Use the new product name SUSE Multi-Linux Manager
* Change uptodate recurring action to use dist-upgrade
instead of upgrade for Deb systems (bsc#1237060)
* Adjust SLS files for SUSE Linux Enterprise 15 SP7 and other
systems running higher Python version
* Display raw playbook output from Ansible in event details
* Add proxy onboarding feature
-------------------------------------------------------------------
Thu Mar 27 16:36:06 CET 2025 - marina.latini@suse.com
- version 5.1.8-0
* Add psycopg2 dependency to susemanager-sls (bsc#1240165)
-------------------------------------------------------------------
Thu Mar 27 07:36:39 CET 2025 - marina.latini@suse.com
- version 5.1.7-0
* mgrutil runner: use mgr-ssl-cert-tool CLI to generate container
configuration (bsc#1240041)
-------------------------------------------------------------------
Mon Mar 17 19:25:01 CET 2025 - marina.latini@suse.com
- version 5.1.6-0
* Use Python3.11 Salt module version
-------------------------------------------------------------------
Fri Feb 28 11:54:23 CET 2025 - rosuna@suse.com
- version 5.1.5-0
* Remove unnecessary Salt Minion upgrade cleanup from highstate
* Collect uname, SAP workloads and container runtime data
on hardware profile update (jsc#SUMA-406)
-------------------------------------------------------------------
Fri Jan 24 13:28:27 CET 2025 - marina.latini@suse.com
- version 5.1.4-0
* Implement SUSE Linux Enterprise Server to
SUSE Linux Enterprise Server for SAP migration
* Remove virtualization features
-------------------------------------------------------------------
Fri Dec 13 15:01:05 CET 2024 - rosuna@suse.com
- version 5.1.3-0
* Prevent warning message for unsigned Debian repositories
when using new deb822 format (bsc#1234251)
-------------------------------------------------------------------
Thu Dec 12 10:08:16 CET 2024 - rosuna@suse.com
- version 5.1.2-0
* suma_minion: prevent issues when calling Salt runners (bsc#1228232)
* Prevent a crash on "reboot_info" module for Liberty 6, RHEL 6 and
clones (bsc#1231404)
* Remove Master public key on bootstrapping to prevent possible issues.
* Fix rebootifneeded state which misses definition of a variable
(bsc#1233426)
* Do not set "Trusted" for Debian repositories when the repo
should be signed
* Improve appstreams context selection (bsc#1231459)
-------------------------------------------------------------------
Mon Oct 14 16:01:34 CEST 2024 - rosuna@suse.com
- version 5.1.1-0
* Implement product migration from RHEL and Clones to SUSE Liberty
Linux
* Bump version to 5.1.0
-------------------------------------------------------------------
Thu Aug 01 09:30:50 CEST 2024 - marina.latini@suse.com
- version 5.0.10-0
* Speed-up mgrutil.remove_ssh_known_host runner (bsc#1223312)
* Start using DEB822 format for repository sources beginning with
Ubuntu 24.04
* Disable transactional-update.timer on SUSE Linux Enterprise
Micro at bootstrap
* sumautil: properly detect bridge interfaces (bsc#1226461)
* Fix typo on directories to cleanup when deleting a system
(bsc#1228101)
* Granslate GPG URL if URL has server name and client behind
proxy (bsc#1223988)
* Fix yum-utils package missing on CentOS7 minions (bsc#1227133)
* Remove reboot from uptodate state, introduce reboot and
rebootifneeded states
* Fix package profile update on CentOS 7 when yum-utils is not
installed (bsc#1227133)
-------------------------------------------------------------------
Tue Jun 18 18:11:01 CEST 2024 - marina.latini@suse.com
- version 5.0.9-0
* Implement IMDSv2 for AWS instance detection (bsc#1226090)
-------------------------------------------------------------------
Mon Jun 10 16:40:05 CEST 2024 - marina.latini@suse.com
- version 5.0.8-0
* Define bootstrap repo data for SL Micro 6.0
* Calculate bootstrap repo path for SL Micro 6.0
* Exclude AppStream information from package profile update for
Centos 7 and older (bsc1224476)
* Explicitly remove old venv-minion environment when updating
Python versions
* Fix bootstrapping transactional systems if Salt Minion package
is already installed.
* Avoid sending empty events with reboot_info beacon
* Fix parsing passwords with special characters for PostgreSQL
exporter
* Fix PAYG client detection during Hardware refresh (bsc#1225940)
* Fix hairpin problem in SSH push with tunnel preventing
bootstrapping (bsc#1226098, bsc#1223970)
-------------------------------------------------------------------
Fri May 03 14:42:58 CEST 2024 - marina.latini@suse.com
- version 5.0.7-0
* Fix SUSE Liberty Linux bootstrapping when Zypper is installed
(bsc#1222347)
* Recognize .tbz image type (bsc#1216085)
* Native support for AppSream repositories
* Introduce Salt module for configuring AppStreams
-------------------------------------------------------------------
Thu Apr 04 18:57:37 CEST 2024 - marina.latini@suse.com
- version 5.0.6-0
* Distinguish between different SUSE versions when detecting if a
reboot is needed (bsc#1220903, bsc#1221571)
* Add custom grain module for CPE
* Use java.hostname conf instead of cobbler.host
* Replace Uyuni configuration formula with an API call
* Fix the case of missing requisites on bootstrap (bsc#1220705)
* Avoid issues on reactivating traditional clients as Salt managed
* Don't log dnf needs-restarting output in Salt's log (bsc#1220194)
* Use execution module call to detect client instance flavor
(PAYG/BYOS) in public cloud (bsc#1218805)
* Migrate all Salt versions to Salt Bundle
* Implement state for confidential compute attestation
* Recognize squashfs build results from kiwi (bsc#1216085)
* Switch the default release version to 0 and update the source
URL
-------------------------------------------------------------------
Wed Feb 28 16:59:17 CET 2024 - marina.latini@suse.com
- version 5.0.5-0
* Remove automatic reboot from transactional systems bootstrap (bsc#1218146)
-------------------------------------------------------------------
Fri Feb 16 10:11:46 CET 2024 - rosuna@suse.com
- version 5.0.4-1
* Fix issue with Salt SSH keys for Salt SSH Minions CVE-2023-32189 (bsc#1170848)
-------------------------------------------------------------------
Tue Feb 13 17:21:41 CET 2024 - marina.latini@suse.com
- version 5.0.3-1
* Fix reboot needed detection for SUSE systems
* Change certs/RHN-ORG-TRUSTED-SSL-CERT from symlink into a real file
(bsc#1219577)
* Improve updatestack update in uptodate state
* Add a standalone update-salt state
* Add pillar check to skip reboot_if_needed state
-------------------------------------------------------------------
Mon Jan 29 11:54:33 CET 2024 - rosuna@suse.com
- version 5.0.2-1
* Include reboot required indication for non-Suse distros
-------------------------------------------------------------------
Tue Jan 16 08:28:35 CET 2024 - jgonzalez@suse.com
- version 5.0.1-1
* improve PAYG instance detection (bsc#1217784)
* Remove includesls macro usage as not relevant anymore
* Change openeuler.sls to a symlink to redhat.sls
* Drop handling of legacy pillar and formula files
* Dynamically load an SELinux policy for "Push via SSH tunnel" for SELinux
enabled clients. This policy allows communication over a custom SSH port.
-------------------------------------------------------------------
Fri Dec 15 17:32:36 CET 2023 - rosuna@suse.com
- version 4.4.10-1
* Configure reboot method for SUSE Linux Enterprise Micro when
applying bootstrap state (bsc#1213981)
* Allow openSUSE Tumbleweed bootstrapping
* Move all file managed by RPM from /srv to /usr/share/susemanager
* Fix the evaluation of mgr_install_flavor_check salt state for
openSUSE
* Add Raspberry Pi OS 12 support
* Revert hardcoded symbolic link and renamed variable for clarity
-------------------------------------------------------------------
Wed Nov 01 19:46:00 CET 2023 - marina.latini@suse.com
- version 4.4.9-1
* Use HTTP for connections to localhost
* Disable dnf_rhui_plugin as it break our susemanagerplugin (bsc#1214601)
* Fix susemanagerplugin to not overwrite header fields set by other plugins
* Let the dnf plugin log when a token was set
* Do not install instance-flavor-check tool on openSUSE
-------------------------------------------------------------------
Mon Sep 18 14:41:33 CEST 2023 - rosuna@suse.com
- version 4.4.8-1
* Integrate instance-flavor-check to detect if the instance is PAYG
* do not disable salt-minion on salt-ssh managed clients
* Mask uyuni roster module password on logs
* Don't install product packages on openSUSE clients
* keep original traditional stack tools for RHEL7 RHUI connection
* Retry loading of pillars from DB on connection error (bsc#1214186)
* Use recurse stratedy to merge formula pillar with existing pillars
* Prevent product installation from being executed before executing product migration (bsc#1210475)
* Include automatic migration from Salt 3000 to Salt Bundle in highstate
* fix duplicate packages in state
* Fix enabling bundle build via custom info
* Rename internal state 'synccustomall' to 'syncall'
* Recurring custom states
* Do not include pillar_only formulas in highstate
* Allow KiwiNG to be used on SLE12 buildhosts (bsc#1204089)
* Add kiwi supported disk images to be collectable (bsc#1208522)
* disable salt-minion and remove its config file on cleanup (bsc#1209277)
-------------------------------------------------------------------
Wed Apr 19 12:56:27 CEST 2023 - marina.latini@suse.com
- version 4.4.7-1
* to update everything on a debian system, call dist-upgrade to
be able to install and remove packages
* Add openEuler 22.03 support
* support multiple gpgkey urls for a channel (bsc#1208540)
* make SUSE Addon GPG key available on all instance (bsc#1208540)
* Improve error handling in mgr_events.py (bsc#1208687)
-------------------------------------------------------------------
Tue Feb 21 14:10:36 CET 2023 - jgonzalez@suse.com
- version 4.4.6-1
* get uptime with package profile update
* Fix missing module when bootstraping transactional systems (bsc#1207792)
* Install the reboot info beacon using a conf file instead of using pillars
* add CPU sockets, threads and total number to standard CPU grains
* Fix mgrnet custom module to be compatible with old Python 2.6 (bsc#1206979) (bsc#1206981)
* Fix current limitation on Action Chains for SLE Micro
* Support SLE Micro migration (bsc#1205011)
* Add data for openSUSE Leap Micro 5.3 and openSUSE MicroOS bootstrapping
-------------------------------------------------------------------
Thu Jan 26 12:27:21 CET 2023 - jgonzalez@suse.com
- version 4.4.5-1
* Prevent possible errors from "mgractionschains" module when there
is no action chain to resume
-------------------------------------------------------------------
Mon Jan 23 08:30:18 CET 2023 - jgonzalez@suse.com
- version 4.4.4-1
* Do not pass server grains to minions (bsc#1207087)
* Reuse DB connection on compiling pillar with suma_minion
* Do not use non-compatible unique filter in old jinja2 (bsc#1206979) (bsc#1206981)
* Fix custom "mgrcompat.module_run" state module to work with Salt 3005.1
* Add missing transactional_update.conf for SLE Micro
* filter out libvirt engine events (bsc#1206146)
* Improve _mgractionchains.conf logs
* install SUSE Liberty v2 GPG key
* Detect bootstrap repository path for SLE Micro (bsc#1206294)
* Fix reboot info beacon installation
* Add state to properly configure the reboot action for transactional systems
* enforce installation of the PTF GPG key package
* Optimize the number of salt calls on minion startup (bsc#1203532)
* Updated logrotate configuration (bsc#1206470)
-------------------------------------------------------------------
Wed Dec 14 14:15:05 CET 2022 - jgonzalez@suse.com
- version 4.4.3-1
* Fix server error while bootstrapping SSH-managed Red Hat-like minion (bsc#1205890)
* drop legacy way to prevent disabling local repos
-------------------------------------------------------------------
Fri Nov 18 15:13:42 CET 2022 - jgonzalez@suse.com
- version 4.4.2-1
* Avoid installing recommended packages from assigned products (bsc#1204330)
* Add beacon to check if a reboot is required in transactional systems
* Manager reboot in transactional update action chain (bsc#1201476)
* Fix kiwi inspect regexp to allow image names with "-" (bsc#1204541)
* Use the actual sudo user home directory for salt ssh
clients on bootstrap and clean up (bsc#1202093)
* dnf repo definition does not support multiline gpgkeys
(bsc#1204444)
* remove forced refresh in channel state as gpg key trust is now
handled in a different way (bsc#1204061)
* import gpg keys directly to prevent using gpg-auto-import-keys
on package operations (bsc#1203580)
* Perform refresh with packages.pkgupdate state (bsc#1203884)
-------------------------------------------------------------------
Wed Sep 28 11:14:52 CEST 2022 - jgonzalez@suse.com
- version 4.4.1-1
* Prevent possible tracebacks on reading postgres opts
with suma_minion salt pillar extension module (bsc#1205255)
* Fix mgrnet availability check
* Remove dependence on Kiwi libraries
* disable always the bootstrap repository also when
"mgr_disable_local_repos" is set to False
* Use mgrnet.dns_fqdns module to improve FQDN detection (bsc#1199726)
* fix syntax error - remove trailing colon (bsc#1203049)
* Add mgrnet salt module with mgrnet.dns_fqnd function implementation
allowing to get all possible FQDNs from DNS (bsc#1199726)
* Copy grains file with util.mgr_switch_to_venv_minion state apply (bsc#1203056)
* Remove the message 'rpm: command not found' on using Salt SSH
with Debian based systems which has no Salt Bundle
-------------------------------------------------------------------
Wed Jul 27 14:17:08 CEST 2022 - jgonzalez@suse.com
- version 4.3.24-1
* Fix issue bootstrap issue with Debian 9 because missing python3-contextvars (bsc#1201782)
* Fix deploy of SLE Micro CA Certificate (bsc#1200276)
* disable local repos before bootstrap and at highstate (bsc#1191925)
* deploy GPG keys to the clients and define trust in channels (bsc#1199984)
* Enable basic support for Ubuntu 22.04
* Add port parameter to mgrutil.remove_ssh_known_host
* Prevent possible tracebacks on calling module.run from mgrcompat
by setting proper globals with using LazyLoader
-------------------------------------------------------------------
Tue Jun 21 18:39:32 CEST 2022 - jgonzalez@suse.com
- version 4.3.23-1
* Fix bootstrapping for Ubuntu 18.04 with classic Salt package (bsc#1200707)
* create CA certificate symlink on Proxies which might get lost due
to de-installation of the ca package
-------------------------------------------------------------------
Wed Jun 01 11:48:26 CEST 2022 - jgonzalez@suse.com
- version 4.3.22-1
* Use checksum function with reasonable RAM requirements (bsc#1200101)
-------------------------------------------------------------------
Wed Jun 01 10:03:00 CEST 2022 - jgonzalez@suse.com
- version 4.3.21-1
* use RES bootstrap repo as a fallback for Red Hat
downstream OS (bsc#1200087)
-------------------------------------------------------------------
Tue May 31 16:59:26 CEST 2022 - jgonzalez@suse.com
- version 4.3.20-1
* Image checksum was missing type prefix (bsc#1199983)
-------------------------------------------------------------------
Mon May 30 14:59:57 CEST 2022 - jgonzalez@suse.com
- version 4.3.19-1
* Fix missing checksum and add support for qcow2 images (bsc#1199983)
-------------------------------------------------------------------
Fri May 20 00:14:13 CEST 2022 - jgonzalez@suse.com
- version 4.3.18-1
* Add support to packages.pkgremove to deal with duplicated pkg names (bsc#1198686)
* do not install products and gpg keys when performing distupgrade
dry-run (bsc#1199466)
* remove unknown repository flags on EL
-------------------------------------------------------------------
Fri May 06 16:30:23 CEST 2022 - jgonzalez@suse.com
- version 4.3.17-1
* Fix bootstrap repository URL resolution for Yum based clients
with preflight script for Salt SSH
-------------------------------------------------------------------
Wed May 04 15:26:22 CEST 2022 - jgonzalez@suse.com
- version 4.3.16-1
* Use recursive merge for legacy image pillars (bsc#1199157)
* Fix log file name for legacy Kiwi (bsc#1199094)
* Collect logs from docker.build
* add packages.pkgupdate state (bsc#1197507)
* Uninstall the products with no successors after migration
-------------------------------------------------------------------
Fri Apr 22 22:11:02 CEST 2022 - jgonzalez@suse.com
- version 4.3.15-1
* Fix bootstrap repository path resolution for Oracle Linux
-------------------------------------------------------------------
Tue Apr 19 12:13:41 CEST 2022 - jgonzalez@suse.com
- version 4.3.14-1
* Flush uyuni roster cache if the config has changed
* Remove kiwi_info annotations, salt doesn't suppor them (bsc#1198480)
* implement grains module for mgr_server to expose report database
settings and more
* Build bundle less images, adapt inspection of such images
Part of saltboot containerization workflow
* Handle salt bundle in set_proxy.sls
* Accept non standard proxy SSH port
-------------------------------------------------------------------
Mon Apr 04 11:02:56 CEST 2022 - jgonzalez@suse.com
- version 4.3.13-1
* Fix the improper condition for checking if the Salt Bundle
was extracted with pre flight script
-------------------------------------------------------------------
Thu Mar 31 15:55:44 CEST 2022 - jgonzalez@suse.com
- version 4.3.12-1
* Use _arch instead of _host_cpu macro to detect the arch
of the Salt Bundle to be deployed (bsc#1197759)
-------------------------------------------------------------------
Thu Mar 31 12:22:55 CEST 2022 - jgonzalez@suse.com
- version 4.3.11-1
* Use dpkg-deb to extract deb files instead of ar
with salt ssh preflight on Debian based distros
-------------------------------------------------------------------
Fri Mar 11 16:49:07 CET 2022 - jgonzalez@suse.com
- version 4.3.10-1
* Expose SSL certificate check to Salt runner
-------------------------------------------------------------------
Fri Mar 11 15:42:20 CET 2022 - jgonzalez@suse.com
- version 4.3.9-1
* Fix possible traceback in uyuni roster if no ssh port in the DB
* Virtualization fixes for python2
* fixing how the return code is returned in mgrutil runner (bsc#1194909)
* Use /var/lib/susemanager/formula_data if /srv/susemanager/formula_data is missing.
* Avoid using lscpu -J option in grains (bsc#1195920)
-------------------------------------------------------------------
Mon Feb 21 22:48:33 CET 2022 - jgonzalez@suse.com
- version 4.3.8-1
* Postgres exporter package was renamed
-------------------------------------------------------------------
Tue Feb 15 10:07:24 CET 2022 - jgonzalez@suse.com
- version 4.3.7-1
* Improve `pkgset` beacon with using `salt.cache`
to notify about the changes made while the minion was stopped
* Add dnfnotify plugin support for pkgset beacon
* Handle multiple Kiwi bundles (bsc#1194905)
* fix deprecation warnings
* Implement uyuni roster module for Salt
* enforce correct minion configuration similar to bootstrapping
(bsc#1192510)
* Fix issues running mgr_events on Salt 3004
-------------------------------------------------------------------
Tue Jan 18 14:08:20 CET 2022 - jgonzalez@suse.com
- version 4.3.6-1
* Remove cluster management feature
* Fix dnf plugin path calculation when using Salt Bundle
* Use global import for which_bin in sumautil module
* Get the formula pillar data from the database
* Use flat repositories format for Debian based systems
* Add checking for libvirtd binary to grains.virt module
* Fix errors on calling sed -E ... by force_restart_minion
with action chains
* Fix problem installing/removing packages using action chains
in transactional systems
* Add state for changing proxy
* Enable basic support for Debian 11
-------------------------------------------------------------------
Fri Dec 03 12:33:13 CET 2021 - jgonzalez@suse.com
- version 4.3.5-1
* fix openscap scan with tailoring options (bsc#1192321)
* Align allow_vendor_change pillar name across SLS files
* Use venv-salt-minion instead of salt for docker states
* Fix libvirt engine config destination for Salt Bundle
* Allow "mgr_force_venv_salt_minion" as pillar when bootstrapping
in order to force venv-salt-minion installation
-------------------------------------------------------------------
Tue Nov 16 10:09:40 CET 2021 - jgonzalez@suse.com
- version 4.3.4-1
* Implement using re-activation keys when bootstrapping
* Add missing compressed_hash value from Kiwi inspect (bsc#1191702)
-------------------------------------------------------------------
Fri Nov 05 14:07:05 CET 2021 - jgonzalez@suse.com
- version 4.3.3-1
* revert disable unaccessible local repos before bootstrapping (bsc#1186405)
* Don't create skeleton /srv/salt/top.sls
* Replace FileNotFoundError by python2-compatible OSError (bsc#1191139)
* Run Prometheus JMX exporter as Java agent (bsc#1184617)
* Fix virt_utils module python 2.6 compatibility (bsc#1191123)
* Update proxy path on minion connection
* deploy certificate on SLE Micro 5.1
* Fix cpuinfo grain and virt_utils state python2 compatibility
(bsc#1191139, bsc#1191123)
* Fix pkgset beacon to work with salt-minion 2016.11.10 (bsc#1189260)
* Add 'flush_cache' flag to 'ansible.playbooks' call (bsc#1190405)
* Update kernel live patch version on minion startup (bsc#1190276)
* Fix virt grain python2 compatibility
* disable unaccessible local repos before bootstrapping (bsc#1186405)
* Fix mgrcompat state module to work with Salt 3003 and 3004
-------------------------------------------------------------------
Fri Sep 17 12:17:49 CEST 2021 - jgonzalez@suse.com
- version 4.3.2-1
* don't use libvirt API to get its version for the virt features grain
* implement package locking for salt minions
-------------------------------------------------------------------
Mon Aug 09 11:11:17 CEST 2021 - jgonzalez@suse.com
- version 4.3.1-1
- Enable logrotate configuration for Salt SSH minion logs
- States and pkgset beacon modified for new salt bundle file placement
- Handle more ocsf2 setups in virt_utils module
- Add UEFI support for VM creation
- Add virt-tuner templates to VM creation
- Add missing symlinks to generate the "certs" state for
SLE Micro 5.0 and openSUSE MicroOS minions (bsc#1188503)
- Remove systemid file on salt client cleanup
- Skip 'update-ca-certificates' run if the certs are updated automatically
- Fix parameters for 'runplaybook' state (bsc#1188395)
- Parameterised apache document root.
- Add support for bootstrapping Raspbian 9 and 10
- Add support for bootstrapping with salt bundle
- Add Rocky Linux 8 support
- Use lscpu to provide more CPU grains for all architectures
- Add findutils to Kiwi bootstrap packages
- Add support for Kiwi options
- Fix Salt scap state to use new 'xccdf_eval' function
- Fix deleting stopped virtual network (bsc#1186281)
- Handle virtual machines running on pacemaker cluster
- fix product detection for native RHEL products (bsc#1187397)
- when bootstrapping with ssh-push with tunnel use the port number
for fetching GPG keys from the server (bsc#1187441)
-------------------------------------------------------------------
Thu Jun 10 13:46:47 CEST 2021 - jgonzalez@suse.com
- version 4.2.14-1
- exclude openSUSE Leap 15.3 from product installation (bsc#1186858)
- Accept GPG key in Amazon Linux 2 for res7tools channel (bsc#1187102)
-------------------------------------------------------------------
Thu Jun 03 13:56:59 CEST 2021 - jgonzalez@suse.com
- version 4.2.13-1
- Enable certificate deployment for Leap 15.3 clients which is needed for
bootstrapping (bsc#1186765)
-------------------------------------------------------------------
Tue Jun 01 17:36:54 CEST 2021 - jgonzalez@suse.com
- version 4.2.12-1
- Do not assume Amazon bootstrap repo on RHEL and AlmaLinux instances (bsc#1186703)
-------------------------------------------------------------------
Mon May 24 12:42:03 CEST 2021 - jgonzalez@suse.com
- version 4.2.11-1
- fix installation of gnupg on Debian 10
- Fix deleting stopped virtual network (bsc#1186281)
- Do not install python2-salt on Salt 3002.2 Docker build hosts (bsc#1185506)
- Add support for 'disable_local_repos' salt minion config parameter
(bsc#1185568)
-------------------------------------------------------------------
Mon May 10 17:46:51 CEST 2021 - jgonzalez@suse.com
- version 4.2.10-1
- fix product detection while bootstrapping RedHat like products (bsc#1185846)
-------------------------------------------------------------------
Wed May 05 16:44:00 CEST 2021 - jgonzalez@suse.com
- version 4.2.9-1
- Always create systemid file to indicate minion is managed by Uyuni
- Switch from GPLv2 to Apache 2.0.
- Add support of salt bundle to pkgset notify beacon
- Add automatic cookie file selection for pkgset beacon
- Ansible integration: new SLS files files to operate Ansible control node
- provide details when bootstrap query is missing 'status'
- add virtual network edit action
-------------------------------------------------------------------
Thu Apr 29 11:51:00 CEST 2021 - jgonzalez@suse.com
- version 4.2.8-1
- Do not assume AmazonLinux bootstrap repo for CentOS (bsc#1185421)
-------------------------------------------------------------------
Fri Apr 16 13:35:25 CEST 2021 - jgonzalez@suse.com
- version 4.2.7-1
- Fix insecure JMX configuration (bsc#1184617)
- Add support for notify beacon for Debian/Ubuntu systems
- Automatically start needed networks and storage pools when creating/starting a VM
- Avoid conflicts with running ioloop on mgr_events engine (bsc#1172711)
- Require new kiwi-systemdeps packages (bsc#1184271)
- keep salt-minion when it is installed to prevent update problems with
dependend packages not available in the bootstrap repo (bsc#1183573)
- Add support for AlmaLinux 8
- Provide Custom Info as Pillar data
- Add support for Amazon Linux 2
- Add support for Alibaba Cloud Linux 2
- add allow vendor change option to pathing via salt
- Prevent useless package list refresh actions on zypper minions (bsc#1183661)
- Skip removed product classes with satellite-sync
- add grain for virt module features
- add virtual network creation action
-------------------------------------------------------------------
Fri Mar 05 15:45:18 CET 2021 - jgonzalez@suse.com
- version 4.2.6-1
- handle GPG keys when bootstrapping ssh minions (bsc#1181847)
-------------------------------------------------------------------
Thu Feb 25 12:12:31 CET 2021 - jgonzalez@suse.com
- version 4.2.5-1
- Ubuntu 18 has version of apt which does not correctly support
auth.conf.d directory. Detect the working version and use this feature
only when we have a higher version installed
-------------------------------------------------------------------
Wed Jan 27 13:11:15 CET 2021 - jgonzalez@suse.com
- version 4.2.4-1
- fix apt login for similar channel labels (bsc#1180803)
- Change behavior of mgrcompat wrapper after deprecation changes on Salt 3002
- Remove the virtpoller beacon
- Make autoinstallation provisoning compatible with GRUB and ELILO
in addition to GRUB2 only (bsc#1164227)
-------------------------------------------------------------------
Thu Dec 03 13:58:41 CET 2020 - jgonzalez@suse.com
- version 4.2.3-1
- Added RHEL support.
-------------------------------------------------------------------
Wed Nov 25 12:32:54 CET 2020 - jgonzalez@suse.com
- version 4.2.2-1
- Fix: sync before start action chains (bsc#1177336)
- Revert: Sync state modules when starting action chain execution (bsc#1177336)
- Sync state modules when starting action chain execution (bsc#1177336)
- Handle group- and org-specific image pillars
- Remove hostname from /var/lib/salt/.ssh/known_hosts when deleting system (bsc#1176159)
- Fix grub2 autoinstall kernel path (bsc#1178060)
- use require in reboot trigger (bsc#1177767)
- add pillar option to get allowVendorChange option during dist upgrade
- Change VM creation state to handle installation from kernel, PXE or CDROM
- Fix action chain resuming when patches updating salt-minion don't cause service to be
restarted (bsc#1144447)
- Make grub2 autoinstall kernel path relative to the boot partition root (bsc#1175876)
- Fix: do not break when pod status is empty (bsc#1161903)
- Move channel token information from sources.list to auth.conf on Debian 10 and Ubuntu 18 and newer
- Add support for activation keys on server configuration Salt modules
- ensure the yum/dnf plugins are enabled
-------------------------------------------------------------------
Fri Sep 18 12:29:55 CEST 2020 - jgonzalez@suse.com
- version 4.2.1-1
- Add uyuni-config-modules subpackage with Salt modules to configure
Servers
- Fix the dnf plugin to add the token to the HTTP header (bsc#1175724)
- Fix reporting of missing products in product.all_installed (bsc#1165829)
- Fix: supply a dnf base when dealing w/repos (bsc#1172504)
- Fix: autorefresh in repos is zypper-only
- Add virtual network state change state to handle start, stop and delete
- Add virtual network state change state to handle start and stop
- Update package version to 4.2.0
-------------------------------------------------------------------
Thu Jul 23 13:41:10 CEST 2020 - jgonzalez@suse.com
- version 4.1.12-1
- fetch oracle-release when looking for RedHat Product Info (bsc#1173584)
- Force a refresh after deleting a virtual storage volume
- Prevent stuck Hardware Refresh actions on Salt 2016.11.10 based SSH minions (bsc#1173169)
- Require PyYAML version >= 5.1
- Log out of Docker registries after image build (bsc#1165572)
- Prevent "module.run" deprecation warnings by using custom mgrcompat module
-------------------------------------------------------------------
Wed Jul 01 16:13:07 CEST 2020 - jgonzalez@suse.com
- version 4.1.11-1
- Fix detection of CentOS systems to properly set bootstrap repo (bsc#1173556)
- Do not produce syntax error on custom ssh_agent Salt module when
executing on Python 2 instance.
-------------------------------------------------------------------
Tue Jun 23 17:24:45 CEST 2020 - jgonzalez@suse.com
- version 4.1.10-1
- Remove VM disk type attribute
- Merge virtualization fragment into suma-minion pillar (bsc#1172962)
-------------------------------------------------------------------
Wed Jun 17 16:21:24 CEST 2020 - jgonzalez@suse.com
- version 4.1.9-1
- Add ssh_agent for CaaSP management
-------------------------------------------------------------------
Wed Jun 10 12:41:08 CEST 2020 - jgonzalez@suse.com
- version 4.1.8-1
- Avoid SSL certificate issue when bootstrapping OpenSUSE Leap 15.2 (bsc#1172712)
- Add Salt states for CaaSP cluster management
- Use minion fqdn instead of minion id as target in kiwi_collect_image
runner. If fqdn is not present or is localhost, use minion ip as
fallback (bsc#1170737)
- trust customer gpg key when metadata signing is enabled
- specify gpg key for RH systems in repo file (bsc#1172286)
- Implement CaaSP cluster upgrade procedure in cluster provider module.
- handle GPG check flags different for yum/dnf (bsc#1171859)
- Enable bootstrapping for Oracle Linux 6, 7 and 8
- Set YAML loader to fix deprecation warnings
-------------------------------------------------------------------
Wed May 20 11:06:24 CEST 2020 - jgonzalez@suse.com
- version 4.1.7-1
- Fix failing "Hardware Refresh" actions because wrong "instance_id" reported
from minion due a captive portal on the network (bsc#1171491)
- Remove suseRegisterInfo package only if it's plain client (bsc#1171262)
- On Debian-like systems, install only required dependencies when installing salt
- Enable support for bootstrapping Ubuntu 20.04 LTS
- Pass image profile custom info values as Docker buildargs during image build
- Cluster Awareness: Introduce generic SLS files for Cluster Management
and CaaSP Cluster Provider custom Salt module.
- Add virtual volume delete action
- Ubuntu no longer shows removed packages as installed (bsc#1171461)
-------------------------------------------------------------------
Mon Apr 13 09:37:50 CEST 2020 - jgonzalez@suse.com
- version 4.1.6-1
- Fix virt.deleted state dependency
- Make 'product' state module only available for minions with zypper >= 1.8.13 (bsc#1166699)
- Use saltutil states if available on the minion (bsc#1167556)
- Enable support for bootstrapping Astra Linux CE "Orel"
- remove key grains only when file and grain exists (bsc#1167237)
- Add virtual storage pool actions
-------------------------------------------------------------------
Thu Mar 19 12:17:47 CET 2020 - jgonzalez@suse.com
- version 4.1.5-1
- Enable support for bootstrapping Debian 9 and 10
- Adapt 'mgractionchains' module to work with Salt 3000
-------------------------------------------------------------------
Wed Mar 11 11:03:06 CET 2020 - jgonzalez@suse.com
- version 4.1.4-1
- cleanup key grains after usage
- Disable modularity failsafe mechanism for RHEL 8 repos (bsc#1164875)
- install dmidecode before HW profile update when missing
- Add mgr_start_event_grains.sls to update minion config
- Add 'product' custom state module to handle installation of
SUSE products at client side (bsc#1157447)
- Support reading of pillar data for minions from multiple files (bsc#1158754)
-------------------------------------------------------------------
Mon Feb 17 12:56:29 CET 2020 - jgonzalez@suse.com
- version 4.1.3-1
- Do not workaround util.syncmodules for SSH minions (bsc#1162609)
- Force to run util.synccustomall when triggering action chains on SSH minions (bsc#1162683).
- Adapt sls file for pre-downloading in Ubuntu minions
- Add custom 'is_payg_instance' grain when instance is PAYG and not BYOS.
-------------------------------------------------------------------
Wed Jan 22 12:25:10 CET 2020 - jgonzalez@suse.com
- version 4.1.2-1
- Only install python2-salt on buildhosts if it is available
- sort formulas by execution order (bsc#1083326)
- split remove_traditional_stack into two parts. One for all systems and
another for clients not being a Uyuni Server or Proxy (bsc#1121640)
- Change the order to check the version correctly for RES (bsc#1152795)
- Remove the virt-poller cache when applying Virtualization entitlement
- Force HTTP request timeout on public cloud grain (bsc#1157975)
-------------------------------------------------------------------
Wed Nov 27 17:08:25 CET 2019 - jgonzalez@suse.com
- version 4.1.1-1
- dockerhost: install python2 salt packages only when python2
is available (bsc#1129627)
- Support license entry in kiwi image packages list
- Install yum plguin for only yum < 4 (bsc#1156173)
- Add self monitoring to Admin Monitoring UI (bsc#1143638)
- configure GPG keys and SSL Certificates for RHEL8 and ES8
- Always run Kiwi with empty cache (bsc#1155899)
- Do not show errors when polling internal metadata API (bsc#1155794)
- Avoid traceback error due lazy loading which_bin (bsc#1155794)
- Add missing "public_cloud" custom grain (bsc#1155656)
- Consider timeout value in salt remote script (bsc#1153181)
- Using new module path for which_bin to get rid of DeprecationWarning
- Fix: match `image_id` with newer k8s (bsc#1149741)
- Bump version to 4.1.0 (bsc#1154940)
- Always install latest available salt during bootstrap
- Create Kiwi cache dir if not present
- Require pmtools only for SLE11 i586 and x86_64 (bsc#1150314)
- do not break Servers registering to a Server
- Introduce dnf-susemanager-plugin for RHEL8 minions
- Provide custom grain to report "instance id" when running on Public Cloud instances
- enable Kiwi NG on SLE15
- disable legacy startup events for new minions
- implement provisioning for salt clients
- Bootstrapping RES6/RHEL6/SLE11 with TLS1.2 now shows error message. (bsc#1147126)
- Fix for issue with bootstrapping RES minions (bsc#1147126)
- dmidecode does not exist on ppc64le and s390x (bsc#1145119)
- update susemanager.conf to use adler32 for computing the server_id for new minions
-------------------------------------------------------------------
Wed Jul 31 17:42:04 CEST 2019 - jgonzalez@suse.com
- version 4.0.13-1
- Check for result of image rsync transfer to catch failures early (bsc#1104949)
- Force VM off before deleting it (bsc#1138127)
- Allow forcing off or resetting VMs
- Fix the indentation so that custom formulas can be read correctly (bsc#1136937)
- Make sure dmidecode is installed during bootstrap to ensure that hardware
refresh works for all operating systems (bsc#1137952)
- Prevent stuck Actions when onboarding KVM host minions (bsc#1137888)
- Fix formula name encoding on Python 3 (bsc#1137533)
- Adapt tests for SUSE manager 4.0
- More thorougly disable the Salt mine in util.mgr_mine_config_clean_up (bsc#1135075)
-------------------------------------------------------------------
Wed May 15 15:35:23 CEST 2019 - jgonzalez@suse.com
- version 4.0.12-1
- SPEC cleanup
- Enabling certificate deployment for Leap 15.1 clients which is
needed for bootstrapping
- States to enable/disable server monitoring
- Improve salt events processing performance (bsc#1125097)
-------------------------------------------------------------------
Mon Apr 22 12:23:43 CEST 2019 - jgonzalez@suse.com
- version 4.0.11-1
- Enable SLES11 OS Image Build Host
- Add support for Salt batch execution mode
- Do not configure Salt Mine in newly registered minions (bsc#1122837)
- use default 'master' branch in OSImage profile URL (bsc#1108218)
- Add Python linting makefile and PyLint configuration file
-------------------------------------------------------------------
Thu Apr 04 14:43:04 CEST 2019 - jgonzalez@suse.com
- version 4.0.10-1
- Update get_kernel_live_version module to support older Salt versions (bsc#1131490)
-------------------------------------------------------------------
Fri Mar 29 10:37:42 CET 2019 - jgonzalez@suse.com
- version 4.0.9-1
- Update get_kernel_live_version module to support SLES 15 live patches
- Support register minion using bootstrap repos for 18.04 and 16.04.
-------------------------------------------------------------------
Mon Mar 25 17:04:34 CET 2019 - jgonzalez@suse.com
- version 4.0.8-1
- Fix Salt error related to remove_traditional_stack when bootstrapping an Ubuntu
minion (bsc#1128724)
- Adapt disablelocalrepos.sls syntax for Salt 2016.10 (rhel6, sle11) (bsc#1127706)
- Automatically trust SUSE GPG key for client tools channels on Ubuntu systems
- util.systeminfo sls has been added to perform different actions at minion startup(bsc#1122381)
-------------------------------------------------------------------
Sat Mar 02 00:16:05 CET 2019 - jgonzalez@suse.com
- version 4.0.7-1
- Add support for Ubuntu minions
- Add Ubuntu SSL-Cert SLS-Files
-------------------------------------------------------------------
Wed Feb 27 13:17:30 CET 2019 - jgonzalez@suse.com
- version 4.0.6-1
- Fix mgr_events to use current ioloop (bsc#1126280)
- add states for virtual machine actions
- Added option to read 'pkg_download_point_...' pillar values and use it in repo url
-------------------------------------------------------------------
Thu Jan 31 09:45:42 CET 2019 - jgonzalez@suse.com
- version 4.0.5-1
- prevent the pkgset beacon from firing during onboarding (bsc#1122896)
- Prevent excessive DEBUG logging from mgr_events engine
-------------------------------------------------------------------
Wed Jan 16 12:27:07 CET 2019 - jgonzalez@suse.com
- version 4.0.4-1
- Allow bootstrapping minions with a pending minion key being present (bsc#1119727)
-------------------------------------------------------------------
Mon Dec 17 14:46:00 CET 2018 - jgonzalez@suse.com
- version 4.0.3-1
- enhance bootstrap-repo urls for Centos and Opensuse
- use a Salt engine to process return results (bsc#1099988)
-------------------------------------------------------------------
Fri Oct 26 10:52:53 CEST 2018 - jgonzalez@suse.com
- version 4.0.2-1
- deploy SSL certificate during onboarding of openSUSE Leap 15.0 (bsc#1112163)
- install all available known kiwi boot descriptions
- Fix: Cleanup Kiwi cache in highstate (bsc#1109892)
- removed the ssl certificate verification while checking bootstrap repo URL (bsc#1095220)
- Removed the need for curl to be present at bootstrap phase (bsc#1095220)
- Migrate Python code to be Python 2/3 compatible
- Fix merging of image pillars
- Fix: delete old custom OS images pillar before generation (bsc#1105107)
- Generate OS image pillars via Java
- Store activation key in the Kiwi built image
- Implement the 2-phase registration of saltbooted minions (SUMA for Retail)
-------------------------------------------------------------------
Fri Aug 10 15:45:45 CEST 2018 - jgonzalez@suse.com
- version 4.0.1-1
- Bump version to 4.0.0 (bsc#1104034)
- Fix copyright for the package specfile (bsc#1103696)
- Feat: add OS Image building with Kiwi FATE#322959 FATE#323057 FATE#323056
- Use custom Salt capabilities to prevent breaking backward compatibility (bsc#1096514)
- Update profileupdate.sls to report all versions installed (bsc#1089526)
- Do not install 'python-salt' on container build hosts with older Salt versions
(bsc#1097699)
- Fix bootstrap error when removing traditional stack (bsc#1096009)
-------------------------------------------------------------------
Wed May 23 09:03:37 CEST 2018 - jgonzalez@suse.com
- version 3.2.13-1
- Changes to mgractionchains module in order to support action chains on
minions using ssh-push connection method.
- Fix migration from traditional stack to salt registration (bsc#1093825)
-------------------------------------------------------------------
Wed May 16 17:38:30 CEST 2018 - jgonzalez@suse.com
- version 3.2.12-1
- Fix external pillar formula "ifempty" and "namespace" handling
- Fix profileupdate sls to execute retrieval of kernel live patching info (bsc#1091052)
- Use recursive merge on form pillars
- install python2/3 salt flavours on buildhosts to generate a compatible
thin for the dockerimage beeing build (bsc#1092161)
- docker.login requires a list as input (bsc#1092161)
-------------------------------------------------------------------
Mon May 07 15:31:50 CEST 2018 - jgonzalez@suse.com
- version 3.2.11-1
- fix hardware refresh when FQDN changes (bsc#1073267)
- Handle empty values. Do not pass optional fields to pillar in
formulas if field is empty and no ifempty attr defined.
- Fixed processing of formulas with $scope: group
- Preserve order of formulas (bsc#1083326)
-------------------------------------------------------------------
Wed Apr 25 12:13:25 CEST 2018 - jgonzalez@suse.com
- version 3.2.10-1
- create bootstrap repo only if it exist in the server (bsc#1087840)
-------------------------------------------------------------------
Mon Apr 23 09:26:09 CEST 2018 - jgonzalez@suse.com
- version 3.2.9-1
- Enqueue states applied from 'mgractionchains' to avoid failures when
other states are already running at that time (bsc#1090502)
-------------------------------------------------------------------
Wed Apr 04 12:14:25 CEST 2018 - jgonzalez@suse.com
- version 3.2.8-1
- Fix 'mgractionchains.resume' output when nothing to resume (bsc#1087401)
-------------------------------------------------------------------
Thu Mar 29 01:28:50 CEST 2018 - jgonzalez@suse.com
- version 3.2.7-1
- Do not execute sumautil.get_kernel_live_version when inspecting an image
-------------------------------------------------------------------
Mon Mar 26 09:15:31 CEST 2018 - jgonzalez@suse.com
- version 3.2.6-1
- Provide new Salt module and Reactor to handle Action Chains on Minions
- use dockermod with new salt and user repository/tag option for build
- adapt names for gpg keys which have been changed
- perform docker login before building and inspecting images (bsc#1085635)
-------------------------------------------------------------------
Mon Mar 05 09:09:19 CET 2018 - jgonzalez@suse.com
- version 3.2.5-1
- support SLE15 product family
-------------------------------------------------------------------
Wed Feb 28 10:15:38 CET 2018 - jgonzalez@suse.com
- version 3.2.4-1
- Remove SUSE Manager repositories when deleting salt minions
(bsc#1079847)
- Fix master tops merging when running salt>=2018
-------------------------------------------------------------------
Mon Feb 05 12:53:28 CET 2018 - jgonzalez@suse.com
- version 3.2.3-1
- Allow scheduling the change of software channel changes as an
action. The previous channels remain accessible to the registered
system until the action is executed.
-------------------------------------------------------------------
Fri Feb 02 13:06:31 CET 2018 - jgonzalez@suse.com
- version 3.2.2-1
- compare osmajorrelease in jinja always as integer
-------------------------------------------------------------------
Wed Jan 17 13:31:27 CET 2018 - jgonzalez@suse.com
- version 3.2.1-1
- addition of parameters to package manipulation states to improve
SUSE Manager performance
- python3 compatibility fixes in modules and states
- Fix cleanup state error when deleting ssh-push minion (bsc#1070161)
- Fix image inspect when entrypoint is used by overwriting it
(bsc#1070782)
-------------------------------------------------------------------
Tue Dec 12 12:05:09 CET 2017 - jgonzalez@suse.com
- version 3.1.13-1
- fix Salt version detection for patches (bsc#1072350)
-------------------------------------------------------------------
Wed Nov 29 10:15:59 CET 2017 - jgonzalez@suse.com
- version 3.1.12-1
- Fix cleanup state error when deleting ssh-push minion (bsc#1070161)
-------------------------------------------------------------------
Tue Nov 28 15:18:20 CET 2017 - jgonzalez@suse.com
- version 3.1.11-1
- Added state templates for deploying/comparing config channels for Salt
- Fix failing certs state for Tumbleweed (bsc#970630)
- Fix deprecated SLS files to avoid deprecation warnings during highstate (bsc#1041993)
- Support xccdf 1.2 namespace in openscap result file (bsc#1059319)
- ensure correct ordering of patches (bsc#1059801)
- fix create empty top.sls with no-op (bsc#1053038)
- Enabling certificate deployment for Leap 42.3 clients which is
needed for bootstrapping
- fix Salt version detection for patches (bsc#1072350)
-------------------------------------------------------------------
Thu Sep 14 11:41:56 CEST 2017 - mc@suse.de
- version 3.1.10-1
- Kubernetes runner implementation
- addition of parameters to package manipulation states to improve
SUSE Manager performance
-------------------------------------------------------------------
Fri Jul 21 12:02:24 CEST 2017 - mc@suse.de
- version 3.1.9-1
- disable gpgcheck for bootstrap repo to work with new libzypp (bsc#1049670)
- Remove spacewalk:* repos when removing traditional stack (bsc#1024267)
- susemanager-sls: fix certs state for Tumbleweed (bsc970630)
- susemanager-sls: fix certs state for Leap 42.2 (bsc970630)
- Make sumautil.get_kernel_live_version accept any kgr output 'active: NUM'
where NUM > 0 (bsc#1044074)
-------------------------------------------------------------------
Mon Jun 19 16:37:53 CEST 2017 - mc@suse.de
- version 3.1.8-1
- Avoids formula leaking on pillar data (bsc#1044236)
-------------------------------------------------------------------
Mon May 29 15:53:51 CEST 2017 - mc@suse.de
- version 3.1.7-1
- fix yum plugin when installing patches on RHEL6 (bsc#1039294)
- Remove suseRegisterInfo in a separate yum transaction so that
it's not called by yum plugin (bsc#1038732)
- Refactoring formulas in suma_minion external pillar (bsc#1033825)
- configure mime also during bootstrapping
- add missing file name attr to yum plugin state
- Encode formula to str (bsc#1033825)
- update yum on RedHat like systems
- update basic packages when bootstrapping with salt
- use include instead of state.apply channels to fix salt-ssh issue
(bsc#1036268)
-------------------------------------------------------------------
Wed May 03 15:55:46 CEST 2017 - michele.bologna@suse.com
- version 3.1.6-1
- Targeting patches instead of packages for non Zypper patch installation
- add certificate state for CAASP
- add certificate state for SLES for SAP (bsc#1031659)
-------------------------------------------------------------------
Mon Apr 03 14:47:46 CEST 2017 - mc@suse.de
- version 3.1.5-1
- patch application pre-download
- pre-download packages scheduled for install
-------------------------------------------------------------------
Fri Mar 31 09:48:52 CEST 2017 - mc@suse.de
- version 3.1.4-1
- Fix mainframesysinfo module to use /proc/sysinfo on SLES11
(bsc#1025758)
- take care that container and images are removed after inspect
- add name to Bootstrap repo
- Pre-create empty top.sls with no-op (bsc#1017754)
- create a random container name
- Fix pkgset beacon (bsc#1029350)
- set minion own key owner to bootstrap ssh_push_sudo_user
- runner to generate ssh key and execute cmd via proxies
- change ssh bootstrap state to generate and auth keys for
salt-ssh push with tunnel
-------------------------------------------------------------------
Tue Mar 07 14:55:32 CET 2017 - mc@suse.de
- version 3.1.3-1
- add xccdf result xslt
- move move_minion_uploaded_files runner
- call docker inspect for additional data
- remove the container after inspecting it
- do not call image profile automatically after build
- Add state for image profileupdate
- add SUSE Manager prefix to state ids
-------------------------------------------------------------------
Tue Feb 07 15:12:30 CET 2017 - michele.bologna@suse.com
- version 3.1.2-1
- Configure mine.update to submit a job return event (bsc#1022735)
- Disable spacewalksd and spacewalk-update-status when switching to salt
registration (bsc#1020902)
- Fix timezone handling for rpm installtime (bsc#1017078)
- Push build images into registry
- Configure a Docker build host
- Salt version update
-------------------------------------------------------------------
Wed Jan 11 16:57:58 CET 2017 - michele.bologna@suse.com
- version 3.1.1-1
- Version bump to 3.1
-------------------------------------------------------------------
Fri Dec 16 12:14:52 CET 2016 - michele.bologna@suse.com
- version 0.1.18-1
- Rename 'master' pillar to 'mgr_server'
- Add tunneling to salt-ssh support
- Provide SUMA static pillar data for unregistered minions (bsc#1015122)
- implement fetching kernel live version as module (FATE#319519)
- Removing '/usr/share/susemanager/pillar' path
- Retreiving SUMA static pillar data from ext_pillar (bsc1010674)
- Bugfix: Prevent salt-master ERROR messages if formulas files are missing
(bsc#1009004)
- fallback to major os release version for cert names (bsc#1009749)
-------------------------------------------------------------------
Mon Nov 07 11:37:52 CET 2016 - michele.bologna@suse.com
- version 0.1.17-1
- Sync custom modules,grains,beacons always before pkg and hw profileupdate
(bsc#1004725)
- Write distupgrade state for SP migration via salt
- New location of the salt-ssh key/cert pair. The previous location wasn't
writable by the salt user
-------------------------------------------------------------------
Thu Oct 13 12:50:28 CEST 2016 - mc@suse.de
- version 0.1.16-1
- Only normalize lists (bsc#1004456)
- Call normalize() before add_scsi_info() (bsc#1004456)
-------------------------------------------------------------------
Thu Oct 06 14:51:43 CEST 2016 - mc@suse.de
- version 0.1.15-1
- Fixed bug with numbers in FormulaForm and improved ext_pillar script
- Added formula directories and formulas.sls to setup script
- External pillar script now also includes formula pillars
- Rename symlinks according to changed 'os' grain for Expanded Support
- Adding certs states for RHEL minion based on SLES-ES
- Rename udevdb scsi info json key
- Add support for mapping mainframe sysinfo
- Implement isX86() in jinja more correctly
- Initial support for querying and saving DMI info
- Add support for mapping the devices
- Actually handle incoming hardware details
- Initial version of the hardware.profileupdate sls
- Added pkgset beacon support in susemanager yum plugin
- trust also RES GPG key on all RedHat minions
- trust GPG keys for SUSE Manager Tools channel on RES
- configure bootstrap repository for RES
- Always enable salt-minion service while bootstrapping (bsc#990202)
- CentOS cert state symlinks and fixes
- states for installing certificate on redhat minions
- pkg.list_products only on Suse
- yum plugin to add jwt token as http header
- Generate SLE 12 bootstrap repo path correctly (bsc#994578)
- Merging top.sls files in base env (bsc#986770)
- Watch files instead of require
-------------------------------------------------------------------
Mon Jul 18 14:23:32 CEST 2016 - jrenner@suse.com
- version 0.1.14-1
- Initial version of the boostrap sls file
- update trust store when multiple certs in one file are available on SLE11
- update ca certificates only when they have changed
- assume no pillar data if the yml file for the minion does not exist
(bsc#980354)
- Add distributable pkgset beacon for RPM database notifications
-------------------------------------------------------------------
Tue May 24 16:04:20 CEST 2016 - kwalter@suse.com
- version 0.1.13-1
- require refresh channels before pkg states (bsc#975424)
- use pillar and static states to install/remove packages (bsc#975424)
-------------------------------------------------------------------
Tue Apr 12 17:15:01 CEST 2016 - mc@suse.de
- version 0.1.12-1
- Add external pillar minion data resolver (bsc#974853)
- Add readme about ext_pillars
- remove pillar top.sls (bsc#974853)
-------------------------------------------------------------------
Wed Apr 06 08:46:20 CEST 2016 - mc@suse.de
- version 0.1.11-1
- generate include only if group_ids not empty
- use state names in custom_groups (bsc#973452)
- rename pillar group_id to group_ids
- Fix generating blank repositories because hitting salt file list cache
(bsc#971004)
- package pillar/top.sls (bsc#973569)
- pre require coreutils to create the cert symlink in post (bsc#972160)
- disable local repositories on registration (bnc#971788)
-------------------------------------------------------------------
Mon Mar 21 17:38:33 CET 2016 - mc@suse.de
- version 0.1.10-1
- remove unused ext_pillar
- ignore missing .sls to include in certs/init.sls
- ignore packages_{machine_id}.sls if it's missing
- ignore missing pillar files at minion level
- ignore missing sls or pillars in custom_XXX/init.sls
(bnc#970461, bnc#970316)
- Include minion custom_<machine_id>.sls only if it exists (#bnc970461)
- Ignore missing org custom state (#bnc970461)
- refactor in python (#bnc970316) (#bnc970461)
-------------------------------------------------------------------
Wed Mar 09 11:29:45 CET 2016 - mc@suse.de
- version 0.1.9-1
- include org and groups separately in top.sls
- refresh pillar on remove from group
- initial suma groups external pillar
-------------------------------------------------------------------
Wed Mar 02 12:09:13 CET 2016 - mc@suse.de
- version 0.1.8-1
- rename tables
-------------------------------------------------------------------
Tue Jan 26 14:07:41 CET 2016 - mc@suse.de
- version 0.1.7-1
- cleanup python code according to PR review
- reworked sumautil network utils to be more pythonic
- remove commented code
- get network if modules, checkstyle cleanup
- get minion primary ips
-------------------------------------------------------------------
Sat Jan 16 11:38:17 CET 2016 - mc@suse.de
- version 0.1.6-1
- custom grain for total num of cpus
-------------------------------------------------------------------
Thu Jan 14 13:30:59 CET 2016 - mc@suse.de
- version 0.1.5-1
- Port client python HW handling to server side java
- CPU socket count: try also lscpu and dmidecode
-------------------------------------------------------------------
Tue Jan 05 15:55:57 CET 2016 - mc@suse.de
- version 0.1.4-1
- Fill General and DMI hw info on minion registration
-------------------------------------------------------------------
Wed Dec 16 11:28:21 CET 2015 - mc@suse.de
- version 0.1.3-1
- Add static sls for package management
-------------------------------------------------------------------
Mon Nov 30 11:15:47 CET 2015 - mc@suse.de
- version 0.1.2-1
- force link creation
- use osfullname instead of os
- Cover sles12 machines reporing os grain SUSE
- Add support for deploying certificates to SLES11 minions
-------------------------------------------------------------------
Tue Nov 17 09:35:38 CET 2015 - jrenner@suse.com
- version 0.1.1-1
- Initial package release
07070100000103000081B400000000000000000000000168EFD66400000021000000000000000000000000000000000000004100000000susemanager-sls/susemanager-sls.changes.agraul.pylint-everything- Allow existing pylint failures
07070100000104000081B400000000000000000000000168EFD66400000022000000000000000000000000000000000000003E00000000susemanager-sls/susemanager-sls.changes.agraul.reformat-black- Reformat Python code with black
07070100000105000081B400000000000000000000000168EFD6640000005D000000000000000000000000000000000000003E00000000susemanager-sls/susemanager-sls.changes.carlo.uyuni-coco-vlek- Add VLEK certificate creation and collection
in confidential computing report generation
07070100000106000081B400000000000000000000000168EFD66400000035000000000000000000000000000000000000005200000000susemanager-sls/susemanager-sls.changes.mackdk.avoid-executing-commands-from-PATH- Use absolute paths when invoking external commands
07070100000107000081B400000000000000000000000168EFD66400000042000000000000000000000000000000000000003F00000000susemanager-sls/susemanager-sls.changes.mackdk.deploy-ibm-keys- Automatically deploy IBM GPG keys to SUSE minions (bsc#1246421)
07070100000108000081B400000000000000000000000168EFD6640000003F000000000000000000000000000000000000004400000000susemanager-sls/susemanager-sls.changes.mbussolotto.jmx_refactoring- Move jmx configuration to a persisted folder
(bsc#1244219)
07070100000109000081B400000000000000000000000168EFD6640000005E000000000000000000000000000000000000005F00000000susemanager-sls/susemanager-sls.changes.mcalmer.Manager-5.1-fix-liberate-via-product-migration- Succeed liberate product migration also when reinstall
packages is disabled (bsc#1248804)
0707010000010A000081B400000000000000000000000168EFD66400000057000000000000000000000000000000000000004B00000000susemanager-sls/susemanager-sls.changes.mcalmer.Manager-5.1-ignore-rancher- Ignore unsupported products when installing missing
release packages (bsc#1251278)
0707010000010B000081B400000000000000000000000168EFD66400000090000000000000000000000000000000000000004D00000000susemanager-sls/susemanager-sls.changes.mcalmer.change-salt-queue-allocation- Assign a salt event not anymore to a fix queue but choose the
least used one by keeping all events from the same minion in
the same queue
0707010000010C000081B400000000000000000000000168EFD6640000004F000000000000000000000000000000000000005700000000susemanager-sls/susemanager-sls.changes.mcalmer.fix-micro-product-install-in-highstate- Install products automatically on SLE Micro 5 and SL Micro 6
(bsc#1243486)
0707010000010D000081B400000000000000000000000168EFD6640000003C000000000000000000000000000000000000005900000000susemanager-sls/susemanager-sls.changes.mcalmer.fix-rebootneeded-detection-transactional- Fix reebot needed detection of transaction update systems
0707010000010E000081B400000000000000000000000168EFD6640000003D000000000000000000000000000000000000004B00000000susemanager-sls/susemanager-sls.changes.mcalmer.gather-virtualization-data- Add state to get virtual instance info from physical hosts
0707010000010F000081B400000000000000000000000168EFD66400000065000000000000000000000000000000000000005100000000susemanager-sls/susemanager-sls.changes.mcalmer.remove-duplicate-product-install- Remove product installation state from packages as it is
already available in the channels state
07070100000110000081B400000000000000000000000168EFD6640000003F000000000000000000000000000000000000004800000000susemanager-sls/susemanager-sls.changes.mcalmer.supportdata-salt-module- Add a salt execution module to get support data from clients
07070100000111000081B400000000000000000000000168EFD66400000036000000000000000000000000000000000000004900000000susemanager-sls/susemanager-sls.changes.oholecek.add_new_kiwi_imagetypes- Add all image types supported by kiwi (bsc#1246663)
07070100000112000081B400000000000000000000000168EFD66400000082000000000000000000000000000000000000004C00000000susemanager-sls/susemanager-sls.changes.oholecek.add_podman_based_kiwibuild- Add container based kiwi10 build system
- Do not use pkg search in jinja (bsc#1240882)
- Remove SLE11 kiwi bundle build support
07070100000113000081B400000000000000000000000168EFD66400000041000000000000000000000000000000000000004200000000susemanager-sls/susemanager-sls.changes.oholecek.allow_Mm_version- Allow OS image templates to have just two numbers as a version
07070100000114000081B400000000000000000000000168EFD66400000046000000000000000000000000000000000000005300000000susemanager-sls/susemanager-sls.changes.rjpmestre.proxy-configuration-install-rpms- Install proxy related packages for RPM installation
(bsc#1246654)
07070100000115000081B400000000000000000000000168EFD66400000055000000000000000000000000000000000000004600000000susemanager-sls/susemanager-sls.changes.welder.scc-cpu-telemetry-data- Collect CPU architecture specific data on hardware
profile update (jsc#SUMA-406)
07070100000116000081B400000000000000000000000168EFD66400002013000000000000000000000000000000000000002500000000susemanager-sls/susemanager-sls.spec#
# spec file for package susemanager-sls
#
# Copyright (c) 2025 SUSE LLC
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
# upon. The license for this file, and modifications and additions to the
# file, is the same license as for the pristine package itself (unless the
# license for the pristine package is not an Open Source License, in which
# case the license is the MIT License). An "Open Source License" is a
# license that conforms to the Open Source Definition (Version 1.9)
# published by the Open Source Initiative.
# Please submit bugfixes or comments via https://bugs.opensuse.org/
#
# The productprettyname macros is controlled in the prjconf. If not defined, we fallback here
%{!?productprettyname: %global productprettyname Uyuni}
# Keep in sync with salt/salt.spec
%if 0%{?suse_version} == 1500 && 0%{?sle_version} >= 150700
%global use_python python311
%else
%global use_python python3
%endif
%if 0%{?suse_version} > 1320 || 0%{?rhel}
# SLE15 builds on Python 3
%global build_py3 1
%endif
%if 0%{?suse_version}
%global serverdir /srv
%global wwwpubroot %{serverdir}/www/htdocs
%else
%global serverdir %{_localstatedir}
%global wwwpubroot %{serverdir}/www/html
%endif
Name: susemanager-sls
Version: 5.2.0
Release: 0
Summary: Static Salt state files for %{productprettyname}
URL: https://github.com/uyuni-project/uyuni
License: Apache-2.0 AND LGPL-2.1-only
# FIXME: use correct group or remove it, see "https://en.opensuse.org/openSUSE:Package_group_guidelines"
Group: Applications/Internet
#!CreateArchive: %{name}
Source: %{name}-%{version}.tar.gz
Source1: https://raw.githubusercontent.com/uyuni-project/uyuni/%{name}-%{version}-0/susemanager-utils/susemanager-sls/%{name}-rpmlintrc
Requires(pre): coreutils
Requires(posttrans):spacewalk-admin
Requires: susemanager-build-keys-web >= 15.4.2
%if 0%{?build_py3}
BuildRequires: %{use_python}-pytest
BuildRequires: %{use_python}-salt
BuildRequires: python3-spacewalk-certs-tools
# Different package names for SUSE and RHEL:
Requires: (python3-PyYAML >= 5.1 or python3-pyyaml >= 5.1)
Requires: %{use_python}-psycopg2
%else
BuildRequires: python-mock
BuildRequires: python-pytest
BuildRequires: python-salt
Requires: python-PyYAML >= 5.1
%endif
BuildArch: noarch
%description
Static Salt state files for %{productprettyname}, where generic operations are
provided for the integration between infrastructure components.
%package -n uyuni-config-modules
Summary: Salt modules to configure a %{productprettyname} Server
# FIXME: use correct group or remove it, see "https://en.opensuse.org/openSUSE:Package_group_guidelines"
Group: Applications/Internet
%description -n uyuni-config-modules
This package contains Salt execution and state modules that can be used
to configure %{productprettyname} Server.
%prep
%setup -q
%build
%install
mkdir -p %{buildroot}%{_datadir}/susemanager/salt/_grains
mkdir -p %{buildroot}%{_datadir}/susemanager/salt/_beacons
mkdir -p %{buildroot}%{_datadir}/susemanager/salt/_modules
mkdir -p %{buildroot}%{_datadir}/susemanager/salt/_states
mkdir -p %{buildroot}%{_datadir}/susemanager/salt-ssh
mkdir -p %{buildroot}%{_datadir}/susemanager/modules/pillar
mkdir -p %{buildroot}%{_datadir}/susemanager/modules/tops
mkdir -p %{buildroot}%{_datadir}/susemanager/modules/runners
mkdir -p %{buildroot}%{_datadir}/susemanager/modules/engines
mkdir -p %{buildroot}%{_datadir}/susemanager/modules/roster
mkdir -p %{buildroot}%{_datadir}/susemanager/pillar_data
mkdir -p %{buildroot}%{_datadir}/susemanager/formulas
mkdir -p %{buildroot}%{_datadir}/susemanager/formulas/metadata
mkdir -p %{buildroot}%{_datadir}/susemanager/reactor
mkdir -p %{buildroot}%{_datadir}/susemanager/scap
mkdir -p %{buildroot}/srv/formula_metadata
cp -R salt/* %{buildroot}%{_datadir}/susemanager/salt
cp -R salt-ssh/* %{buildroot}%{_datadir}/susemanager/salt-ssh
cp -R modules/pillar/* %{buildroot}%{_datadir}/susemanager/modules/pillar
cp -R modules/tops/* %{buildroot}%{_datadir}/susemanager/modules/tops
cp -R modules/runners/* %{buildroot}%{_datadir}/susemanager/modules/runners
cp -R modules/engines/* %{buildroot}%{_datadir}/susemanager/modules/engines
cp -R modules/roster/* %{buildroot}%{_datadir}/susemanager/modules/roster
cp -R formulas/* %{buildroot}%{_datadir}/susemanager/formulas
cp -R formula_metadata/* %{buildroot}/srv/formula_metadata
cp -R reactor/* %{buildroot}%{_datadir}/susemanager/reactor
cp -R scap/* %{buildroot}%{_datadir}/susemanager/scap
# Manually install Python part to already prepared structure
cp src/beacons/*.py %{buildroot}%{_datadir}/susemanager/salt/_beacons
cp src/grains/*.py %{buildroot}%{_datadir}/susemanager/salt/_grains/
rm %{buildroot}%{_datadir}/susemanager/salt/_grains/__init__.py
cp src/modules/*.py %{buildroot}%{_datadir}/susemanager/salt/_modules
rm %{buildroot}%{_datadir}/susemanager/salt/_modules/__init__.py
cp src/states/*.py %{buildroot}%{_datadir}/susemanager/salt/_states
rm %{buildroot}%{_datadir}/susemanager/salt/_states/__init__.py
# Install doc, examples
mkdir -p %{buildroot}%{_docdir}/uyuni-config-modules/examples/ldap
cp src/doc/* %{buildroot}%{_docdir}/uyuni-config-modules/
cp src/examples/uyuni_config_hardcode.sls %{buildroot}%{_docdir}/uyuni-config-modules/examples
cp src/examples/ldap/* %{buildroot}%{_docdir}/uyuni-config-modules/examples/ldap
%check
cd test
# Run py.test-3 for rhel
py.test%{?rhel:-3} test_pillar_suma_minion.py
cd ../src/tests
py.test%{?rhel:-3}
# Check that SLS files don't contain any call to "module.run" which has
# been replaced by "mgrcompat.module_run" calls.
! grep --include "*.sls" -r "module\.run" %{buildroot}%{_datadir}/susemanager/salt || exit 1
%pre
# change /usr/share/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT
# from symlink into a real file
if [ -L %{_datadir}/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT ]; then
rm -f %{_datadir}/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT
if [ -f %{_sysconfdir}/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT ]; then
cp %{_sysconfdir}/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT \
%{_datadir}/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT
elif [ -f %{_sysconfdir}/pki/ca-trust/source/anchors/RHN-ORG-TRUSTED-SSL-CERT ]; then
cp %{_sysconfdir}/pki/ca-trust/source/anchors/RHN-ORG-TRUSTED-SSL-CERT \
%{_datadir}/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT
fi
fi
%post
# when uyuni roster module has changed, we need to remove the cache
rm -f %{_localstatedir}/cache/salt/master/roster/uyuni/minions.p
# this will be filled with content when a certificate gets deployed
if [ ! -e %{_datadir}/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT ]; then
touch %{_datadir}/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT
fi
%posttrans
# Run JMX exporter as Java Agent (bsc#1184617)
grep -q 'prometheus_monitoring_enabled\s*=\s*1\s*$' %{_sysconfdir}/rhn/rhn.conf
if [[ $? == 0 ]]; then
%{_sbindir}/mgr-monitoring-ctl enable
fi
%files
%defattr(-,root,root)
%dir %{_datadir}/susemanager
%{_datadir}/susemanager/salt
%{_datadir}/susemanager/salt-ssh
%{_datadir}/susemanager/pillar_data
%{_datadir}/susemanager/modules
%{_datadir}/susemanager/modules/pillar
%{_datadir}/susemanager/modules/tops
%{_datadir}/susemanager/modules/runners
%{_datadir}/susemanager/modules/engines
%{_datadir}/susemanager/modules/roster
%{_datadir}/susemanager/formulas
%{_datadir}/susemanager/reactor
%{_datadir}/susemanager/scap
/srv/formula_metadata
%exclude %{_datadir}/susemanager/salt/_modules/uyuni_config.py
%exclude %{_datadir}/susemanager/salt/_states/uyuni_config.py
%ghost %{_datadir}/susemanager/salt/certs/RHN-ORG-TRUSTED-SSL-CERT
%files -n uyuni-config-modules
%defattr(-,root,root)
%dir %{_datadir}/susemanager
%{_datadir}/susemanager/salt/_modules/uyuni_config.py
%{_datadir}/susemanager/salt/_states/uyuni_config.py
%dir %{_docdir}/uyuni-config-modules
%doc %{_docdir}/uyuni-config-modules/*
%doc %{_docdir}/uyuni-config-modules/examples/*
%doc %{_docdir}/uyuni-config-modules/examples/ldap/*
%changelog
07070100000117000041FD00000000000000000000000368EFD66400000000000000000000000000000000000000000000001500000000susemanager-sls/test07070100000118000041FD00000000000000000000000368EFD66400000000000000000000000000000000000000000000001A00000000susemanager-sls/test/data07070100000119000081B400000000000000000000000168EFD664000000B6000000000000000000000000000000000000002D00000000susemanager-sls/test/data/formula_order.json["branch-network","cpu-mitigations","dhcpd","grafana","image-synchronize","locale","prometheus","prometheus-exporters","pxe","saltboot","tftpd","virtualization-host","vsftpd","bind"]0707010000011A000041FD00000000000000000000000368EFD66400000000000000000000000000000000000000000000002300000000susemanager-sls/test/data/formulas0707010000011B000041FD00000000000000000000001068EFD66400000000000000000000000000000000000000000000002C00000000susemanager-sls/test/data/formulas/metadata0707010000011C000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000003100000000susemanager-sls/test/data/formulas/metadata/bind0707010000011D000081B400000000000000000000000168EFD66400000A23000000000000000000000000000000000000003A00000000susemanager-sls/test/data/formulas/metadata/bind/form.ymlbind:
$type: hidden-group
config:
$type: group
options:
$type: edit-group
$optional: True
$prototype:
$type: text
$key:
$type: text
$name: Option
include_forwarders:
$type: boolean
$default: false
configured_zones:
$type: edit-group
$minItems: 1
$itemName: Zone ${i}
$prototype:
$type: group
$key:
$type: text
$name: Name
type:
$type: select
$values: ["master", "slave"]
$default: master
notify:
$type: boolean
$default: False
available_zones:
$type: edit-group
$minItems: 1
$itemName: Zone ${i}
$prototype:
$type: group
$key:
$type: text
$name: Name
file:
$type: text
soa:
$name: SOA
$type: group
ns:
$name: NS
$type: text
$placeholder: ns@zone
$ifEmpty: ns
contact:
$type: text
$placeholder: admin@domain
$ifEmpty: root@localhost
serial:
$default: auto
$ifEmpty: auto
class:
$default: IN
refresh:
$default: 8600
$type: number
retry:
$default: 900
$type: number
expiry:
$default: 86000
$type: number
nxdomain:
$name: NXDOMAIN
$default: 500
$type: number
ttl:
$name: TTL
$default: 8600
$type: number
records:
$type: group
A:
$type: edit-group
$optional: true
$minItems: 0
$prototype:
$key:
$type: text
$name: Hostname
$type: text
$name: IP address
NS:
$name: NS
$type: group
$optional: true
'@':
$type: edit-group
$minItems: 0
$prototype:
$type: text
CNAME:
$name: CNAME
$type: edit-group
$optional: true
$minItems: 0
$prototype:
$key:
$type: text
$name: Alias
$type: text
$name: Hostname
generate_reverse:
$type: group
$optional: true
net:
$name: Network
$optional: true
for_zones:
$type: edit-group
$optional: true
$minItems: 0
$prototype:
$type: text
0707010000011E000081B400000000000000000000000168EFD66400000069000000000000000000000000000000000000003E00000000susemanager-sls/test/data/formulas/metadata/bind/metadata.ymldescription:
Settings for bind nameserver
group: general_system_configuration
after:
- branch-network0707010000011F000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000003B00000000susemanager-sls/test/data/formulas/metadata/branch-network07070100000120000081B400000000000000000000000168EFD6640000093C000000000000000000000000000000000000004400000000susemanager-sls/test/data/formulas/metadata/branch-network/form.ymlbranch_network:
$type: hidden-group
dedicated_NIC:
$type: boolean
$default: True
nic:
$default: eth1
$visibleIf: .dedicated_NIC == true
ip:
$default: 192.168.128.1
$visibleIf: .dedicated_NIC == true
netmask:
$default: 255.255.255.0
$visibleIf: .dedicated_NIC == true
configure_firewall:
$type: boolean
$default: true
$help: Uncheck to configure firewall manually.
firewall:
$type: group
$visibleIf: .configure_firewall == true
enable_route:
$type: boolean
$default: True
$visibleIf: ..dedicated_NIC == true
enable_NAT:
$type: boolean
$default: True
$visibleIf: ..dedicated_NIC == true
enable_SLAAC_with_routing:
$type: boolean
$default: False
$visibleIf: .enable_NAT == true
$name: Force enable IPv6 SLAAC together with forwarding
$help: Check to enable IPv6 autoconfiguration (SLAAC) even when Branch act as a router.
open_dhcp_port:
$visibleIf: ..dedicated_NIC == false
$type: boolean
$default: True
open_dns_port:
$visibleIf: ..dedicated_NIC == false
$type: boolean
$default: True
open_tftp_port:
$visibleIf: ..dedicated_NIC == false
$type: boolean
$default: True
open_ftp_port:
$visibleIf: ..dedicated_NIC == false
$type: boolean
$default: True
open_http_port:
$visibleIf: ..dedicated_NIC == false
$type: boolean
$default: True
open_https_port:
$visibleIf: ..dedicated_NIC == false
$type: boolean
$default: True
open_salt_ports:
$visibleIf: ..dedicated_NIC == false
$type: boolean
$default: True
open_ssh_port:
$type: boolean
$default: True
open_xmpp_server_port:
$type: boolean
$default: True
open_xmpp_client_port:
$type: boolean
$default: True
forwarder:
$type: select
$values:
- resolver
- bind
- dnsmasq
$default: bind
forwarder_fallback:
$type: boolean
$default: True
srv_directory:
$name: 'server directory'
$type: text
$default: '/srv/saltboot'
srv_directory_user:
$name: 'server directory user'
$type: text
$default: 'saltboot'
srv_directory_group:
$name: 'server directory group'
$type: text
$default: 'saltboot'
07070100000121000081B400000000000000000000000168EFD6640000005C000000000000000000000000000000000000004800000000susemanager-sls/test/data/formulas/metadata/branch-network/metadata.ymldescription:
Configuration of Branch Server proxy networks
group: SUSE_manager_for_retail
07070100000122000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000003C00000000susemanager-sls/test/data/formulas/metadata/cpu-mitigations07070100000123000081B400000000000000000000000168EFD664000000BA000000000000000000000000000000000000004500000000susemanager-sls/test/data/formulas/metadata/cpu-mitigations/form.ymlmitigations:
$type: group
name:
$type: select
$values: ["Auto",
"Auto + No SMT",
"Off",
"Manual"
]
$default: Auto
07070100000124000081B400000000000000000000000168EFD66400000063000000000000000000000000000000000000004900000000susemanager-sls/test/data/formulas/metadata/cpu-mitigations/metadata.ymldescription:
Settings for kernel options for performance/security.
group: security_configuration
07070100000125000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000003200000000susemanager-sls/test/data/formulas/metadata/dhcpd07070100000126000081B400000000000000000000000168EFD66400001284000000000000000000000000000000000000003B00000000susemanager-sls/test/data/formulas/metadata/dhcpd/form.ymldhcpd:
$type: namespace
domain_name:
$placeholder: Enter domain name for managed LAN
domain_name_servers:
$type: edit-group
$minItems: 1
$prototype:
$type: text
listen_interfaces:
$type: edit-group
$minItems: 1
$prototype:
$type: text
$help: List of interfaces to listen on
$default:
- eth1
authoritative:
$type: boolean
$default: True
max_lease_time:
$default: 20001
$type: number
default_lease_time:
$default: 20000
$type: number
subnets:
$type: edit-group
$minItems: 1
$name: Network Configuration (subnet)
$itemName: Network ${i}
$prototype:
$type: group
$key:
$type: text
$name: Network IP
$default: 192.168.1.0
netmask:
$type: text
$default: 255.255.255.0
domain_name:
$type: text
$optional: true
comment:
$type: text
$optional: true
range:
$type: edit-group
$name: Dynamic IP Range
$minItems: 2
$maxItems: 2
$prototype:
$type: text
$default:
- 192.168.1.51
- 192.168.1.151
broadcast_address:
$type: text
$default: 192.168.1.255
routers:
$type: edit-group
$minItems: 1
$prototype:
$type: text
$default:
- 192.168.1.1
next_server:
$type: text
$default: 192.168.1.1
$help: IP address or hostname of the server from which the initial boot file (specified in the filename statement) is to be loaded
$optional: true
filename:
$type: text
$visibleIf: .next_server != ''
$default: boot/pxelinux.0
$help: Specify the name of the initial boot file which is to be loaded by a client
$optional: true
filename_efi:
$type: text
$visibleIf: .next_server != ''
$default: boot/grub.efi
$help: Specify the name of the initial boot file which is to be loaded by a client in EFI mode
$optional: true
hosts:
$type: edit-group
$minItems: 0
$itemName: Host ${i}
$name: Hosts with Static IP Addresses (with Defaults from Subnet)
$optional: true
$prototype:
$key:
$type: text
$name: Hostname
fixed_address:
$type: text
$optional: true
$name: IP Address
hardware:
$type: text
$name: Hardware Type and Address
$placeholder: Enter hardware-type hardware-address (e.g. "ethernet AA:BB:CC:DD:EE:FF")
$help: Hardware Identifier - ethernet prefix is mandatory
next_server:
$type: text
$default:
$help: IP address or hostname of the server from which the initial boot file (specified in the filename statement) is to be loaded
$optional: true
filename:
$type: text
$visibleIf: .next_server != ''
$default:
$help: Specify the name of the initial boot file which is to be loaded by a client
$optional: true
filename_efi:
$type: text
$visibleIf: .next_server != ''
$default:
$help: Specify the name of the initial boot file which is to be loaded by a client in EFI mode
$optional: true
comment:
$type: text
hosts:
$type: edit-group
$minItems: 0
$itemName: Host ${i}
$name: Hosts with static IP addresses (with global defaults)
$optional: true
$prototype:
$key:
$type: text
$name: Hostname
fixed_address:
$type: text
$optional: true
$name: IP address
hardware:
$type: text
$name: Hardware Type and Address
$placeholder: Enter hardware-type hardware-address (e.g. "ethernet AA:BB:CC:DD:EE:FF")
$help: Hardware Identifier - ethernet prefix is mandatory
next_server:
$type: text
$default:
$help: IP address or hostname of the server from which the initial boot file (specified in the filename statement) is to be loaded
$optional: true
filename:
$type: text
$visibleIf: .next_server != ''
$default:
$help: Specify the name of the initial boot file which is to be loaded by a client
$optional: true
comment:
$type: text
07070100000127000081B400000000000000000000000168EFD66400000065000000000000000000000000000000000000003F00000000susemanager-sls/test/data/formulas/metadata/dhcpd/metadata.ymldescription:
Settings for DHCP server
group: general_system_configuration
after:
- branch-network07070100000128000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000003400000000susemanager-sls/test/data/formulas/metadata/grafana07070100000129000081B400000000000000000000000168EFD6640000074B000000000000000000000000000000000000003D00000000susemanager-sls/test/data/formulas/metadata/grafana/form.ymlgrafana:
$type: namespace
enabled:
$type: boolean
$default: True
$help: disasbled grafana
admin_user:
$type: text
$name: Default admin user
$required: true
$disabled: "!formValues.grafana.enabled"
admin_pass:
$type: password
$name: Default admin password
$required: true
$disabled: "!formValues.grafana.enabled"
datasources:
$type: group
$disabled: "!formValues.grafana.enabled"
$help: Configure the data sources used by Grafana.
prometheus:
$type: edit-group
$minItems: 1
$name: Prometheus
$help: Configure Prometheus data sources.
$itemName: Prometheus data source ${i}
$prototype:
$type: group
$disabled: "!formValues.grafana.enabled"
$key:
$type: text
$name: Datasource name
$default: Prometheus
$help: Data source name
url:
$type: url
$default: http://localhost:9080
$required: true
$name: Prometheus URL
$help: URL of a Prometheus instance
dashboards:
$type: group
$disabled: "!formValues.grafana.enabled"
$help: Dashboards to install.
add_uyuni_dashboard:
$type: boolean
$name: Uyuni server dashboard
$help: Add dashboard for monitoring an Uyuni server
$default: True
add_uyuni_clients_dashboard:
$type: boolean
$name: Uyuni clients dashboard
$help: Add dashboard for monitoring Uyuni clients
$default: True
add_postgresql_dasboard:
$type: boolean
$name: PostgreSQL dashboard
$help: Add dashboard for monitoring a PostgreSQL database
$default: True
add_apache_dashboard:
$type: boolean
$name: Apache HTTPD dashboard
$help: Add dashboard for monitoring an Apache HTTPD server
$default: True
0707010000012A000081B400000000000000000000000168EFD6640000003F000000000000000000000000000000000000004100000000susemanager-sls/test/data/formulas/metadata/grafana/metadata.ymldescription:
Enable and configure Grafana.
group: monitoring
0707010000012B000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000003E00000000susemanager-sls/test/data/formulas/metadata/image-synchronize0707010000012C000081B400000000000000000000000168EFD66400000206000000000000000000000000000000000000004700000000susemanager-sls/test/data/formulas/metadata/image-synchronize/form.ymlimage-synchronize:
$type: hidden-group
in_highstate:
$name: Include Image Synchronization in Highstate
$type: boolean
$default: false
whitelist:
$type: edit-group
$name: Synchronize only the listed images
$minItems: 0
$prototype:
$type: text
$help: Image name (without version)
default_boot_image:
$type: text
$name: Default boot image
$help: Default boot image used for first boot of a terminal
0707010000012D000081B400000000000000000000000168EFD66400000051000000000000000000000000000000000000004B00000000susemanager-sls/test/data/formulas/metadata/image-synchronize/metadata.ymldescription:
Settings for image synchronization
group: SUSE_manager_for_retail
0707010000012E000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000003300000000susemanager-sls/test/data/formulas/metadata/locale0707010000012F000081B400000000000000000000000168EFD66400001537000000000000000000000000000000000000003C00000000susemanager-sls/test/data/formulas/metadata/locale/form.yml# This file is part of locale-formula.
#
# Foobar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Foobar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
timezone:
$type: group
name:
$type: select
$values: ["CET",
"CST6CDT",
"EET",
"EST",
"EST5EDT",
"GMT",
"GMT+0",
"GMT-0",
"GMT0",
"Greenwich",
"HST",
"MET",
"MST",
"MST7MDT",
"NZ",
"NZ-CHAT",
"Navajo",
"PST8PDT",
"UCT",
"UTC",
"Universal",
"W-SU",
"WET",
"Zulu",
"Etc/GMT+1",
"Etc/GMT+2",
"Etc/GMT+3",
"Etc/GMT+4",
"Etc/GMT+5",
"Etc/GMT+6",
"Etc/GMT+7",
"Etc/GMT+8",
"Etc/GMT+9",
"Etc/GMT+10",
"Etc/GMT+11",
"Etc/GMT+12",
"Etc/GMT-1",
"Etc/GMT-2",
"Etc/GMT-3",
"Etc/GMT-4",
"Etc/GMT-5",
"Etc/GMT-6",
"Etc/GMT-7",
"Etc/GMT-8",
"Etc/GMT-9",
"Etc/GMT-10",
"Etc/GMT-11",
"Etc/GMT-12",
"Etc/GMT-13",
"Etc/GMT-14",
"Etc/GMT",
"Etc/GMT+0",
"Etc/GMT-0",
"Etc/GMT0",
"Etc/Greenwich",
"Etc/UCT",
"Etc/UTC",
"Etc/Universal",
"Etc/Zulu"
]
$default: CET
hardware_clock_set_to_utc:
$type: boolean
$default: True
keyboard_and_language:
$type: group
language:
$type: select
$values: ["Afrikaans",
"Arabic",
"Asturian",
"Bulgarian",
"Bengali",
"Bosnian",
"Catalan",
"Czech",
"Welsh",
"Danish",
"German",
"Greek",
"English (UK)",
"English (US)",
"Spanish",
"Estonian",
"Finnish",
"French",
"Galician",
"Gujarati",
"Hebrew",
"Hindi",
"Croatian",
"Hungarian",
"Indonesian",
"Italian",
"Japanese",
"Georgian",
"Khmer",
"Korean",
"Lithuanian",
"Macedonian",
"Marathi",
"Norwegian",
"Dutch",
"Nynorsk",
"Punjabi",
"Polish",
"Portuguese (Brazilian)",
"Portuguese",
"Romanian",
"Russian",
"Sinhala",
"Slovak",
"Slovenian",
"Serbian",
"Swedish",
"Tamil",
"Tajik",
"Thai",
"Turkish",
"Ukrainian",
"Vietnamese",
"Walloon",
"Xhosa",
"Simplified Chinese",
"Traditional Chinese",
"Zulu"
]
$default: English (US)
keyboard_layout:
$type: select
$values: ["Arabic",
"Belgian",
"Canadian (Multilingual)",
"Croatian",
"Czech",
"Czech (qwerty)",
"Danish",
"Dutch",
"Dvorak",
"English (UK)",
"English (US)",
"Estonian",
"Finnish",
"French",
"French (Canada)",
"French (Switzerland)",
"German",
"German (Switzerland)",
"German (with deadkeys)",
"Greek",
"Hungarian",
"Icelandic",
"Italian",
"Japanese",
"Khmer",
"Korean",
"Lithuanian",
"Norwegian",
"Polish",
"Portuguese",
"Portuguese (Brazil)",
"Portuguese (Brazil US accents)",
"Romanian",
"Russian",
"Serbian",
"Simplified Chinese",
"Slovak",
"Slovak (qwerty)",
"Slovene",
"Spanish",
"Spanish (Asturian variant)",
"Spanish (CP 850)",
"Spanish (Latin America)",
"Swedish",
"Tajik",
"Traditional Chinese",
"Turkish",
"Ukrainian",
"US International"
]
$default: English (US)
07070100000130000081B400000000000000000000000168EFD66400000071000000000000000000000000000000000000004000000000susemanager-sls/test/data/formulas/metadata/locale/metadata.ymldescription:
Settings for language, keyboard, and timezone
group: general_system_configuration
after:
- users07070100000131000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000004100000000susemanager-sls/test/data/formulas/metadata/prometheus-exporters07070100000132000081B400000000000000000000000168EFD664000003FF000000000000000000000000000000000000004A00000000susemanager-sls/test/data/formulas/metadata/prometheus-exporters/form.ymlnode_exporter:
$type: group
$help: Prometheus exporter for hardware and OS metrics.
enabled:
$type: boolean
$default: True
args:
$name: "Arguments"
$type: text
$default: --web.listen-address=":9100"
$help: Please refer to the documentation for available options.
apache_exporter:
$type: group
$help: Prometheus exporter for apache mod_status statistics.
enabled:
$type: boolean
$default: False
args:
$name: "Arguments"
$type: text
$default: --telemetry.address=":9117"
$help: Please refer to the documentation for available options.
postgres_exporter:
$type: group
$help: Prometheus exporter for PostgreSQL server metrics.
enabled:
$type: boolean
$default: False
data_source_name:
$type: text
$default: postgresql://user:passwd@localhost:5432/database?sslmode=disable
args:
$name: "Arguments"
$type: text
$default: --web.listen-address=":9187"
$help: Please refer to the documentation for available options.
07070100000133000081B400000000000000000000000168EFD66400000061000000000000000000000000000000000000004E00000000susemanager-sls/test/data/formulas/metadata/prometheus-exporters/metadata.ymldescription:
Enable and configure Prometheus exporters for managed systems.
group: monitoring
07070100000134000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000003700000000susemanager-sls/test/data/formulas/metadata/prometheus07070100000135000081B400000000000000000000000168EFD6640000093C000000000000000000000000000000000000004000000000susemanager-sls/test/data/formulas/metadata/prometheus/form.ymlprometheus:
$type: namespace
enabled:
$type: boolean
$default: True
scrape_interval:
$type: number
$name: Scrape interval (s)
$default: 15
$disabled: "!prometheus.enabled"
$required: true
evaluation_interval:
$type: number
$name: Evaluation interval (s)
$default: 15
$disabled: "!prometheus.enabled"
$required: true
mgr:
$type: group
$name: Uyuni Server
$disabled: "!prometheus.enabled"
monitor_server:
$name: Monitor this server
$type: boolean
$default: True
autodiscover_clients:
$name: Autodiscover clients
$type: boolean
$default: True
sd_username:
$type: text
$name: Username
$help: Username for auto-discovering clients
$default: admin
$visibleIf: .autodiscover_clients == true
$required: true
sd_password:
$type: password
$name: Password
$help: Password for auto-discovering clients
$visibleIf: .autodiscover_clients == true
$required: true
alerting:
$type: group
$disabled: "!prometheus.enabled"
alertmanager_service:
$type: boolean
$default: True
$name: Enable local Alertmanager service
use_local_alertmanager:
$type: boolean
$name: Use local Alertmanager
$help: Use local Alertmanager for this Prometheus instance
$visibleIf: .alertmanager_service == true
$default: True
alertmanagers:
$type: edit-group
$minItems: 0
$itemName: Target ${i}
$prototype:
$type: group
$key:
$type: text
$name: "IP Address : Port"
$default: localhost:9093
$match: ".*\\:\\d{1,5}"
rule_files:
$type: edit-group
$minItems: 0
$prototype:
$type: text
$default: /etc/prometheus/my-rules.yml
$required: true
scrape_configs:
$type: edit-group
$name: User defined scrape configurations
$minItems: 0
$itemName: File-based service discovery ${i}
$disabled: "!prometheus.enabled"
$prototype:
$type: group
$key:
$type: text
$name: "Job name"
files:
$type: edit-group
$minItems: 1
$prototype:
$type: text
$default: /etc/prometheus/my-scrape-config.yml
$required: true
07070100000136000081B400000000000000000000000168EFD66400000042000000000000000000000000000000000000004400000000susemanager-sls/test/data/formulas/metadata/prometheus/metadata.ymldescription:
Enable and configure Prometheus
group: monitoring
07070100000137000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000003000000000susemanager-sls/test/data/formulas/metadata/pxe07070100000138000081B400000000000000000000000168EFD664000002B4000000000000000000000000000000000000003900000000susemanager-sls/test/data/formulas/metadata/pxe/form.ymlpxe:
$type: hidden-group
kernel_name:
$name: 'Kernel Filename'
$type: text
$default: 'linux'
initrd_name:
$name: 'Initrd Filename'
$type: text
$default: 'initrd.gz'
default_kernel_parameters:
$name: 'Kernel Command Line Parameters'
$type: text
$default: 'panic=60 ramdisk_size=710000 ramdisk_blocksize=4096 vga=0x317 splash=silent kiwidebug=0'
pxe_root_directory:
$name: 'PXE Root Directory'
$type: text
$default: '/srv/saltboot'
branch_id:
$name: 'Branch Id'
$type: text
$placeholder: 'Enter unique Branch server ID (e.g. "B0001")'
$help: 'Branch server ID is used as a prefix in terminal ID'
07070100000139000081B400000000000000000000000168EFD66400000067000000000000000000000000000000000000003D00000000susemanager-sls/test/data/formulas/metadata/pxe/metadata.ymldescription:
PXE settings for branch server
group: SUSE_manager_for_retail
after:
- branch-network
0707010000013A000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000003500000000susemanager-sls/test/data/formulas/metadata/saltboot0707010000013B000081B400000000000000000000000168EFD6640000157C000000000000000000000000000000000000003E00000000susemanager-sls/test/data/formulas/metadata/saltboot/form.ymlpartitioning:
$name: Disk Partitioning
$type: edit-group
$itemName: Disk ${i}
$minItems: 1
$prototype:
$type: group
$key:
$type: text
$name: Disk Symbolic ID
$placeholder: Enter disk symbolic ID (e.g. disk1, disk2, md0 for RAID devices)
$help: Disk Symbolic ID is used together with Partition Symbolic ID for RAID completion.
type:
$type: select
$name: Device Type
$values:
- RAID
- DISK
$default: DISK
device:
$type: text
$visibleIf: .type == DISK
$name: Disk Device
$placeholder: Enter target disk device name (e.g. /dev/sda)
$optional: true
level:
$visibleIf: .type == RAID
$type: select
$name: RAID Level
$values:
-
- 0
- 1
- 4
- 5
- 6
- 10
- linear
- multipath
$default:
$optional: true
devices:
$visibleIf: .type == RAID
$type: edit-group
$name: Symbolic IDs of devices to used for RAID device type
$minItems: 0
$prototype:
$type: text
$help: E.g. disk1p1, disk2p1, ... Combination of Disk symbolic ID and Partition symbolic ID to describe devices/partitions used to build RAID device.
$placeholder: Enter combination of Disk and Partition symbolic ID (e.g. disk1part1, disk2part1, ...)
$optional: True
disklabel:
$type: select
$name: Partition table type
$values:
- gpt
- msdos
- none
partitions:
$type: edit-group
$itemName: Partition ${i}
$minItems: 1
$optional: True
$visibleIf: .disklabel != "none"
$prototype:
$type: group
$key:
$type: text
$name: Partition Symbolic ID
$help: E.g. p1, p2, ... Together with Disk symbolic ID is used for RAID completion.
$placeholder: Enter partition symbolic ID (e.g. part1, part2, ...)
size_MiB:
$type: number
$name: Partition Size (MiB)
$help: Leave blank to acquire remaining empty space on the disk.
$optional: True
mountpoint:
$type: text
$name: Device Mount Point
$help: What should the partition be mount as - /, swap, /var, ...
$optional: True
format:
$type: select
$name: Filesystem Format
$values:
-
- btrfs
- ext4
- xfs
- vfat
- swap
$optional: True
image:
$type: text
$name: OS Image to Deploy
$help: Name of the OS Image. Leave blank if no image should be deployed on this partition.
$optional: True
image_version:
$visibleIf: .image != ''
$type: text
$help: Version of OS Image. Leave blank for most recent.
$optional: True
luks_pass:
$optional: True
$type: text
$name: Partition Encryption Password
$help: Password for encrypted partition. Leave blank for unencrypted. Image itself still can be encrypted.
flags:
$type: select
$name: Partition Flags
$values:
-
- swap
- raid
- bios_grub
- esp
- boot
$default:
mountpoint:
$type: text
$name: Device Mount Point
$help: What should the partition be mount as - /, swap, /var, ...
$optional: True
$visibleIf: .disklabel == "none"
format:
$type: select
$name: Filesystem Format
$visibleIf: .disklabel == "none"
$values:
-
- btrfs
- ext4
- xfs
- vfat
- swap
$optional: True
image:
$visibleIf: .disklabel == "none"
$type: text
$name: OS Image to Deploy
$help: Name of the OS Image. Leave blank if no image should be deployed on this partition.
$optional: True
image_version:
$visibleIf: .image != ''
$type: text
$help: Version of OS Image. Leave blank for most recent.
$optional: True
luks_pass:
$visibleIf: .disklabel == "none"
$optional: True
$type: text
$name: Partition Encryption Password
$help: Password for encrypted partition. Leave blank for unencrypted. Image itself still can be encrypted.
0707010000013C000081B400000000000000000000000168EFD6640000005B000000000000000000000000000000000000004200000000susemanager-sls/test/data/formulas/metadata/saltboot/metadata.ymldescription:
Control deployment and boot of POS terminals
group: SUSE_manager_for_retail
0707010000013D000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000003200000000susemanager-sls/test/data/formulas/metadata/tftpd0707010000013E000081B400000000000000000000000168EFD66400000137000000000000000000000000000000000000003B00000000susemanager-sls/test/data/formulas/metadata/tftpd/form.ymltftpd:
$type: hidden-group
listen_ip:
$name: 'Internal Network Address'
$type: text
$optional: True
root_dir:
$name: 'TFTP base directory'
$type: text
$default: '/srv/tftpboot'
tftpd_user:
$name: 'run TFTP under user'
$type: text
$default: 'tftp'
0707010000013F000081B400000000000000000000000168EFD66400000068000000000000000000000000000000000000003F00000000susemanager-sls/test/data/formulas/metadata/tftpd/metadata.ymldescription:
Settings for tftpd service
group: general_system_configuration
after:
- branch-network
07070100000140000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000004000000000susemanager-sls/test/data/formulas/metadata/virtualization-host07070100000141000081B400000000000000000000000168EFD6640000005F000000000000000000000000000000000000004900000000susemanager-sls/test/data/formulas/metadata/virtualization-host/form.ymlhypervisor:
$type: select
$values: ["KVM",
"Xen"
]
$default: KVM
07070100000142000081B400000000000000000000000168EFD66400000055000000000000000000000000000000000000004D00000000susemanager-sls/test/data/formulas/metadata/virtualization-host/metadata.ymldescription:
Settings for virtualization host.
group: general_system_configuration
07070100000143000041FD00000000000000000000000268EFD66400000000000000000000000000000000000000000000003300000000susemanager-sls/test/data/formulas/metadata/vsftpd07070100000144000081B400000000000000000000000168EFD66400000604000000000000000000000000000000000000003C00000000susemanager-sls/test/data/formulas/metadata/vsftpd/form.ymlvsftpd_config:
$type: hidden-group
anon_root:
$name: 'FTP server directory'
$type: text
$default: '/srv/ftp'
listen_address:
$name: 'Internal Network Address'
$type: text
$optional: True
ssl_enable:
$name: 'Enable ssl'
$type: boolean
$default: false
secure_chroot_dir:
$name: 'Chroot dir'
$type: text
$default: '/usr/share/empty'
anonymous_enable:
$name: 'Allow anonymous FTP'
$type: boolean
$default: true
allow_anon_ssl:
$name: 'Allow SSL for anonymous'
$type: boolean
$default: true
listen:
$name: 'Run standalone'
$type: boolean
$default: true
local_enable:
$name: 'Allow local users'
$type: boolean
$default: true
dirmessage_enable:
$name: 'Activate directory messages'
$type: boolean
$default: true
use_localtime:
$name: 'Use localtime'
$type: boolean
$default: true
xferlog_enable:
$name: 'Activate logging of transfers'
$type: boolean
$default: true
connect_from_port_20:
$name: 'Connect from port 20'
$type: boolean
$default: true
pam_service_name:
$name: 'PAM service name'
$type: text
$default: 'vsftpd'
rsa_cert_file:
$name: 'RSA certificate file'
$type: text
$default: '/etc/ssl/certs/[ssl-cert-file].pem'
rsa_private_key_file:
$name: 'RSA private key file'
$type: text
$default: '/etc/ssl/private/[ssl-cert-file].key'
07070100000145000081B400000000000000000000000168EFD66400000071000000000000000000000000000000000000004000000000susemanager-sls/test/data/formulas/metadata/vsftpd/metadata.ymldescription:
Settings for vsftpd for branchserver
group: general_system_configuration
after:
- branch-network07070100000146000081B400000000000000000000000168EFD66400000018000000000000000000000000000000000000002E00000000susemanager-sls/test/data/group_formulas.json{"9":["locale","tftpd"]}07070100000147000081B400000000000000000000000168EFD66400000077000000000000000000000000000000000000002F00000000susemanager-sls/test/data/minion_formulas.json{"suma-refhead-min-centos7.mgr.suse.de":["branch-network"],"suma-refhead-min-sles12sp4.mgr.suse.de":["branch-network"]}07070100000148000081B400000000000000000000000168EFD664000025D4000000000000000000000000000000000000002400000000susemanager-sls/test/test_engine.py# pylint: disable=missing-module-docstring
import logging
import pytest
import psycopg2
import shlex
import subprocess
from mgr_events import Responder, DEFAULT_COMMIT_BURST
from unittest.mock import MagicMock, patch, call
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database, drop_database
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
log = logging.getLogger("mgr_events")
log.setLevel(logging.DEBUG)
log.addHandler(ch)
@pytest.fixture(scope="session")
def postgres(request):
proc = subprocess.Popen(
shlex.split('su postgres -c "pg_ctl -D ~/data -l ~/logfile start"')
)
def finalizer():
subprocess.Popen(
shlex.split('su postgres -c "pg_ctl stop -D /var/lib/pgsql/data"')
)
request.addfinalizer(finalizer)
# pylint: disable-next=unused-variable
outs, errs = proc.communicate(timeout=15)
yield proc
@pytest.fixture(scope="session")
# pylint: disable-next=redefined-outer-name,unused-argument
def db_engine(postgres):
return create_engine("postgresql://postgres@/test")
@pytest.fixture
# pylint: disable-next=redefined-outer-name
def db_connection(db_engine):
if not database_exists(db_engine.url):
create_database(db_engine.url)
with psycopg2.connect(
user="postgres", host="localhost", dbname="test"
) as connection:
yield connection
drop_database(db_engine.url)
def new_connection():
return psycopg2.connect(user="postgres", host="localhost", dbname="test")
@pytest.fixture
# pylint: disable-next=redefined-outer-name
def create_tables(db_connection):
sql = """CREATE TABLE suseSaltEvent (
id SERIAL PRIMARY KEY,
minion_id CHARACTER VARYING(256),
data TEXT NOT NULL,
queue NUMERIC NOT NULL
);"""
db_connection.cursor().execute(sql)
db_connection.commit()
def delete_table(conn, table):
# pylint: disable-next=consider-using-f-string
conn.cursor().execute("DELETE FROM %s" % table)
conn.commit()
@pytest.fixture
# pylint: disable-next=redefined-outer-name,unused-argument
def responder(db_connection, create_tables):
with patch("mgr_events.psycopg2") as mock_psycopg2:
mock_psycopg2.connect.return_value = db_connection
return Responder(
MagicMock(), # mock event_bus
{
"postgres_db": {
"dbname": "tests",
"user": "postgres",
"password": "",
"host": "localhost",
"notify_channel": "suseSaltEvent",
},
"events": {"thread_pool_size": 3},
},
)
# pylint: disable-next=redefined-outer-name
def test_connection_recovery_on_insert(db_connection, responder):
disposable_connection = new_connection()
responder.connection = disposable_connection
# pylint: disable-next=protected-access
responder._insert("salt/minion/1/start", {"value": 1})
responder.connection.close()
with patch("mgr_events.psycopg2") as mock_psycopg2:
mock_psycopg2.connect.return_value = db_connection
# pylint: disable-next=protected-access
responder._insert("salt/minion/2/start", {"value": 2})
responder.connection.commit()
responder.cursor.execute("SELECT * FROM suseSaltEvent")
resp = responder.cursor.fetchall()
assert len(resp) == 2
# pylint: disable-next=redefined-outer-name
def test_connection_recovery_on_commit(db_connection, responder):
responder.connection = new_connection()
# pylint: disable-next=protected-access
responder._insert("salt/minion/1/start", {"value": 1})
responder.connection.close()
with patch("mgr_events.psycopg2") as mock_psycopg2:
mock_psycopg2.connect.return_value = db_connection
responder.attempt_commit()
responder.connection.commit()
responder.cursor.execute("SELECT * FROM suseSaltEvent")
resp = responder.cursor.fetchall()
assert len(resp) == 1
# pylint: disable-next=redefined-outer-name,unused-argument
def test_insert_start_event(responder, db_connection):
responder.event_bus.unpack.return_value = ("salt/minion/12345/start", {"value": 1})
responder.add_event_to_queue("")
responder.cursor.execute("SELECT * FROM suseSaltEvent;")
resp = responder.cursor.fetchall()
assert resp
assert responder.tokens == DEFAULT_COMMIT_BURST - 1
# pylint: disable-next=redefined-outer-name
def test_insert_job_return_event(responder):
responder.event_bus.unpack.return_value = ("salt/job/12345/ret/6789", {"value": 1})
responder.add_event_to_queue("")
responder.cursor.execute("SELECT * FROM suseSaltEvent;")
resp = responder.cursor.fetchall()
assert resp
assert responder.tokens == DEFAULT_COMMIT_BURST - 1
# pylint: disable-next=redefined-outer-name
def test_insert_batch_start_event(responder):
responder.event_bus.unpack.return_value = ("salt/batch/12345/start", {"value": 1})
responder.add_event_to_queue("")
responder.cursor.execute("SELECT * FROM suseSaltEvent;")
resp = responder.cursor.fetchall()
assert resp
assert responder.tokens == DEFAULT_COMMIT_BURST - 1
# pylint: disable-next=redefined-outer-name
def test_discard_batch_presence_ping_event(responder):
responder.event_bus.unpack.return_value = (
"salt/job/12345/ret/6789",
{"value": 1, "fun": "test.ping", "metadata": {"batch-mode": True}},
)
responder.add_event_to_queue("")
responder.cursor.execute("SELECT * FROM suseSaltEvent;")
resp = responder.cursor.fetchall()
assert len(resp) == 0
# pylint: disable-next=redefined-outer-name
def test_keep_presence_ping_event_without_batch(responder):
responder.event_bus.unpack.return_value = (
"salt/job/12345/ret/6789",
{"value": 1, "fun": "test.ping", "id": "testminion"},
)
responder.add_event_to_queue("")
responder.cursor.execute("SELECT * FROM suseSaltEvent;")
resp = responder.cursor.fetchall()
assert len(resp) == 1
# pylint: disable-next=redefined-outer-name
def test_commit_scheduled_on_init(responder):
assert responder.event_bus.io_loop.call_later.call_count == 1
# pylint: disable-next=redefined-outer-name
def test_commit_empty_queue(responder):
responder.counters = [0, 0, 0, 0]
with patch.object(responder, "event_bus", MagicMock()):
with patch.object(responder, "connection") as mock_connection:
mock_connection.closed = False
responder.attempt_commit()
assert responder.connection.commit.call_count == 0
assert responder.tokens == DEFAULT_COMMIT_BURST
# pylint: disable-next=redefined-outer-name
def test_postgres_notification(responder):
with patch.object(responder, "cursor"):
# pylint: disable-next=protected-access
responder._insert("salt/minion/1/start", {"value": 1, "id": "testminion"})
assert responder.counters == [0, 0, 0, 0]
assert responder.tokens == DEFAULT_COMMIT_BURST - 1
assert responder.cursor.execute.mock_calls[-1:] == [
call("NOTIFY suseSaltEvent, '0,0,1,0';")
]
# pylint: disable-next=redefined-outer-name
def test_add_token(responder):
responder.tokens = 0
responder.add_token()
assert responder.tokens == 1
# pylint: disable-next=redefined-outer-name
def test_add_token_max(responder):
responder.add_token()
assert responder.tokens == DEFAULT_COMMIT_BURST
# pylint: disable-next=redefined-outer-name
def test_commit_avoidance_without_tokens(responder):
with patch.object(responder, "cursor"):
with patch.object(responder, "connection") as mock_connection:
mock_connection.closed = False
mock_connection.encoding = "utf-8"
responder.tokens = 0
# pylint: disable-next=protected-access
responder._insert("salt/minion/1/start", {"id": "testminion", "value": 1})
assert responder.counters == [0, 0, 1, 0]
assert responder.tokens == 0
assert responder.connection.commit.call_count == 0
assert responder.cursor.execute.mock_calls == [
call(
"INSERT INTO suseSaltEvent (minion_id, data, queue) VALUES (%s, %s, %s);",
(
"testminion",
'{"tag": "salt/minion/1/start", "data": {"id": "testminion", "value": 1}}',
2,
),
)
]
# pylint: disable-next=redefined-outer-name
def test_postgres_connect(db_connection, responder):
disposable_connection = new_connection()
disposable_connection.close()
responder.connection = disposable_connection
with patch("mgr_events.time") as mock_time:
with patch("mgr_events.psycopg2") as mock_psycopg2:
mock_psycopg2.connect.side_effect = [
psycopg2.OperationalError,
db_connection,
]
mock_psycopg2.OperationalError = psycopg2.OperationalError
responder.db_keepalive()
assert mock_psycopg2.connect.call_count == 2
mock_time.sleep.assert_called_once_with(5)
# pylint: disable-next=redefined-outer-name
def test_postgres_connect_with_port(responder):
responder.config["postgres_db"]["port"] = "1234"
with patch("mgr_events.psycopg2") as mock_psycopg2:
# pylint: disable-next=protected-access
responder._connect_to_database()
mock_psycopg2.connect.assert_called_once_with(
"dbname='tests' user='postgres' host='localhost' port='1234' password=''"
)
07070100000149000081B400000000000000000000000168EFD66400001C7F000000000000000000000000000000000000003000000000susemanager-sls/test/test_pillar_suma_minion.py# -*- coding: utf-8 -*-
"""
:codeauthor: Michael Calmer <Michael.Calmer@suse.com>
"""
from unittest.mock import MagicMock, patch
import pytest
import sys
sys.path.append("../modules/pillar")
# pylint: disable-next=wrong-import-position
import os
# pylint: disable-next=wrong-import-position
import suma_minion
suma_minion.__opts__ = {}
suma_minion.__context__ = {}
suma_minion.psycopg2 = MagicMock()
TEST_FORMULA_ORDER = [
"branch-network",
"cpu-mitigations",
"dhcpd",
"grafana",
"image-synchronize",
"locale",
"prometheus",
"prometheus-exporters",
"pxe",
"saltboot",
"tftpd",
"virtualization-host",
"vsftpd",
"bind",
]
def cursor_callback(cursor):
assert cursor is not None
@pytest.fixture(autouse=True)
def data_paths():
"""
Set the test data paths
"""
suma_minion.FORMULAS_DATA_PATH = os.path.sep.join([os.path.abspath(""), "data"])
suma_minion.FORMULA_ORDER_FILE = os.path.sep.join(
[os.path.abspath(""), "data", "formula_order.json"]
)
suma_minion.MANAGER_FORMULAS_METADATA_MANAGER_PATH = os.path.sep.join(
[os.path.abspath(""), "data", "formulas", "metadata"]
)
@pytest.mark.parametrize("has_psycopg2", [True, False])
def test_virtual(has_psycopg2):
"""
Test virtual returns the module name
"""
with patch("suma_minion.HAS_POSTGRES", has_psycopg2):
assert suma_minion.__virtual__() == has_psycopg2
def test_formula_pillars_db():
"""
Test getting the formulas from the database
"""
minion_id = "suma-refhead-min-sles12sp4.mgr.suse.de"
pillar = {"group_ids": [9]}
cursor = MagicMock()
cursor.fetchall.return_value = [({"formula_order": TEST_FORMULA_ORDER},)]
pillar = suma_minion.load_global_pillars(cursor, pillar)
cursor.fetchall.return_value = [("formula-locale", {}), ("formula-tftpd", {})]
group_formulas, pillar = suma_minion.load_group_pillars(minion_id, cursor, pillar)
cursor.fetchall.return_value = [("formula-branch-network", {})]
system_formulas, pillar = suma_minion.load_system_pillars(minion_id, cursor, pillar)
pillar = suma_minion.formula_pillars(system_formulas, group_formulas, pillar)
assert "formulas" in pillar
assert pillar["formulas"] == ["branch-network", "locale", "tftpd"]
def test_reading_postgres_opts_in__get_cursor():
"""
Test reading proper postgres opts in _get_cursor
"""
pg_connect_mock = MagicMock(return_value=MagicMock())
test_opts = {
"postgres": {
"host": "test_host",
"user": "test_user",
"pass": "test_pass",
"db": "test_db",
"port": 1234,
}
}
with patch.object(suma_minion, "__opts__", test_opts), patch(
"suma_minion.psycopg2.connect", pg_connect_mock
), patch.dict(suma_minion.__context__, {}):
# pylint: disable-next=protected-access
suma_minion._get_cursor(cursor_callback)
assert pg_connect_mock.call_args_list[0][1] == {
"host": "test_host",
"user": "test_user",
"password": "test_pass",
"dbname": "test_db",
"port": 1234,
}
pg_connect_mock.reset_mock()
with patch.object(suma_minion, "__opts__", {"__master_opts__": test_opts}), patch(
"suma_minion.psycopg2.connect", pg_connect_mock
), patch.dict(suma_minion.__context__, {}):
# pylint: disable-next=protected-access
suma_minion._get_cursor(cursor_callback)
assert pg_connect_mock.call_args_list[0][1] == {
"host": "test_host",
"user": "test_user",
"password": "test_pass",
"dbname": "test_db",
"port": 1234,
}
def test_using_context_in__get_cursor():
"""
Test using context to store postgres postgres connection in _get_cursor
"""
pg_connect_mock = MagicMock(return_value=MagicMock())
test_opts = {
"postgres": {
"host": "test_host",
"user": "test_user",
"pass": "test_pass",
"db": "test_db",
"port": 1234,
}
}
with patch.object(
suma_minion, "__opts__", {"id": "foobar_master", **test_opts}
), patch("suma_minion.psycopg2.connect", pg_connect_mock), patch.dict(
suma_minion.__context__, {}
):
# Check if it creates new connection if it's not in the context
# pylint: disable-next=protected-access
suma_minion._get_cursor(cursor_callback)
assert pg_connect_mock.call_args_list[0][1] == {
"host": "test_host",
"user": "test_user",
"password": "test_pass",
"dbname": "test_db",
"port": 1234,
}
pg_connect_mock.reset_mock()
# Check if it reuses the connection from the context
# pylint: disable-next=protected-access
suma_minion._get_cursor(cursor_callback)
assert pg_connect_mock.call_args_list[0][1] == {
"host": "test_host",
"user": "test_user",
"password": "test_pass",
"dbname": "test_db",
"port": 1234,
}
# pylint: disable-next=unnecessary-negation
assert not "suma_minion_cnx" in suma_minion.__context__
pg_connect_mock.reset_mock()
with patch.object(suma_minion, "__opts__", test_opts), patch(
"suma_minion.psycopg2.connect", pg_connect_mock
), patch.dict(suma_minion.__context__, {}):
# Check if it creates new connection if it's not in the context
# pylint: disable-next=protected-access
suma_minion._get_cursor(cursor_callback)
assert pg_connect_mock.call_args_list[0][1] == {
"host": "test_host",
"user": "test_user",
"password": "test_pass",
"dbname": "test_db",
"port": 1234,
}
pg_connect_mock.reset_mock()
# Check if it reuses the connection from the context
# pylint: disable-next=protected-access
suma_minion._get_cursor(cursor_callback)
pg_connect_mock.assert_not_called()
assert "suma_minion_cnx" in suma_minion.__context__
pg_connect_mock.reset_mock()
pg_cnx_mock = MagicMock()
pg_cnx_mock.cursor = MagicMock(side_effect=[True, Exception])
with patch.object(suma_minion, "__opts__", test_opts), patch(
"suma_minion.psycopg2.connect", pg_connect_mock
), patch.object(suma_minion.psycopg2, "InterfaceError", Exception), patch.dict(
suma_minion.__context__, {"suma_minion_cnx": pg_cnx_mock}
):
# Check if it reuses the connection from the context
# pylint: disable-next=protected-access
suma_minion._get_cursor(cursor_callback)
pg_cnx_mock.cursor.assert_called_once()
pg_connect_mock.assert_not_called()
# Check if it tries to recoonect if the connection in the context is not alive
# pylint: disable-next=protected-access
suma_minion._get_cursor(cursor_callback)
assert pg_connect_mock.call_args_list[0][1] == {
"host": "test_host",
"user": "test_user",
"password": "test_pass",
"dbname": "test_db",
"port": 1234,
}
07070100000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000B00000000TRAILER!!!